change the architecture of the Code

This commit is contained in:
zjp-shadow 2022-07-27 09:50:10 +08:00
parent 64f5c7549d
commit a151cc58aa
31 changed files with 129 additions and 32 deletions

View File

@ -1,3 +1,11 @@
# JSparse
JSparse is a high-performance auto-differentiation library for sparse tensor.
## How to Install
```
cd python
python setup.py install
```

73
examples/example.py Normal file
View File

@ -0,0 +1,73 @@
from pickletools import optimize
from statistics import mode
import jittor
from jittor import nn
import JSparse
from JSparse import SparseTensor
from JSparse import nn as spnn
from JSparse.utils.quantize import sparse_quantize
import numpy as np
class RandomDataset(jittor.dataset.Dataset):
def __init__(self, input_size: int, voxel_size: float) -> None:
super().__init__()
self.set_attrs(total_len = input_size)
self.voxel_size = voxel_size
def __getitem__(self, _: int):
inputs = np.random.uniform(-100, 100, size=(self.total_len, 4))
labels = np.random.choice(10, size=self.total_len)
coords, feats = inputs[:, :], inputs
coords -= np.min(coords, axis=0, keepdims=True)
# coords, indices = sparse_quantize(coords, self.voxel_size, return_index=True)
coords = jittor.Var(coords)
feats = jittor.Var(feats)
labels = jittor.Var(labels)
# coords = jittor.Var(coords, dtype=jittor.int64)
# feats = jittor.Var(feats[indices], dtype=jittor.float64)
# labels = jittor.Var(labels[indices], dtype=jittor.int64)
print(type(coords))
inputs = SparseTensor(coords, feats, 1, 1)
labels = SparseTensor(coords, labels, 1, 1)
return inputs, labels
if __name__ == '__main__':
np.random.seed(0)
dataset = RandomDataset(input_size=10000, voxel_size=0.2)
model = nn.Sequential(
spnn.Conv3d(4, 32, 3),
spnn.BatchNorm(32),
spnn.ReLU(True),
spnn.Conv3d(32, 64, 2, stride=2),
spnn.BatchNorm(64),
spnn.ReLU(True),
spnn.Conv3d(64, 64, 2, stride=2, transposed=True),
spnn.BatchNorm(64),
spnn.ReLU(True),
spnn.Conv3d(64, 32, 3),
spnn.BatchNorm(32),
spnn.ReLU(True),
spnn.Conv3d(32, 10, 1),
)
criterion = nn.CrossEntropyLoss()
optimizer = jittor.optim.Adam(model.parameters(), lr=1e-3)
model.train()
lens = len(dataset)
for batch_idx, (inputs, labels) in enumerate(dataset):
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.setp(loss)
if batch_idx % 10 == 0:
print('Training: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
batch_idx, lens , 100. * batch_idx / lens, loss.numpy()[0]))

View File

@ -1,8 +1,8 @@
import jittor as jt
import jittor.nn as nn
from python.jsparse import SparseTensor
from python.jsparse.nn.utils import fapply
from JSparse import SparseTensor
from JSparse.nn.utils import fapply
__all__ = ['relu', 'leaky_relu']
# __all__ = ['relu', 'leaky_relu', 'ReLU', 'LeakyReLU']

View File

@ -4,10 +4,10 @@ import jittor as jt
from jittor import Function
from jittor.misc import _pair, _triple
from python.jsparse import SparseTensor
from python.jsparse.nn import functional as F
from python.jsparse.nn.utils import get_kernel_offsets
from python.jsparse import make_ntuple
from JSparse import SparseTensor
from JSparse.nn import functional as F
from JSparse.nn.utils import get_kernel_offsets
from JSparse import make_ntuple
__all__ = ['conv3d', 'Convolution']

View File

@ -1,7 +1,7 @@
import jittor as jt
from jittor import Function
from python.jsparse import SparseTensor
from JSparse import SparseTensor
__all__ = ['calc_ti_weights', 'spdevoxelize']

View File

@ -3,8 +3,8 @@ from typing import Tuple, Union
import jittor as jt
from jittor.misc import _pair, _triple
from python.jsparse.nn.utils import get_kernel_offsets
from python.jsparse.utils import make_ntuple, trunc
from JSparse.nn.utils import get_kernel_offsets
from JSparse.utils import make_ntuple, trunc
__all__ = ['spdownsample']

View File

@ -1,6 +1,6 @@
import jittor as jt
from python.jsparse import SparseTensor
from JSparse import SparseTensor
__all__ = ['global_avg_pool', 'global_max_pool']

View File

@ -1,7 +1,7 @@
import jittor as jt
from jittor import Function
from python.jsparse import SparseTensor
from JSparse import SparseTensor
__all__ = ['spvoxelize']

View File

@ -1,9 +1,9 @@
import jittor as jt
from jittor import nn
from python.jsparse import SparseTensor
from python.jsparse.nn.functional import relu, leaky_relu
# from python.nn.utils import fapply
from JSparse import SparseTensor
from JSparse.nn.functional import relu, leaky_relu
# from nn.utils import fapply
__all__ = ['ReLU', 'LeakyReLU']

View File

@ -7,8 +7,8 @@ from jittor import nn
from jittor import init
from jittor.misc import _pair, _triple
from python.jsparse import SparseTensor
from python.jsparse.nn import functional as F
from JSparse import SparseTensor
from JSparse.nn import functional as F
# from utils import make_ntuple
__all__ = ['Conv3d']

View File

@ -2,8 +2,8 @@ import jittor as jt
from jittor import nn
from numpy import kaiser
from python.jsparse import SparseTensor
from python.jsparse.nn.utils import fapply
from JSparse import SparseTensor
from JSparse.nn.utils import fapply
__all__ = ['BatchNorm', 'GroupNorm']

View File

@ -2,8 +2,8 @@ from ast import Global
import jittor as jt
from jittor import nn
from python.jsparse import SparseTensor
from python.jsparse.nn.functional import global_avg_pool, global_max_pool
from JSparse import SparseTensor
from JSparse.nn.functional import global_avg_pool, global_max_pool
__all__ = ['GlobalAvgPool', 'GlobalMaxPool']

View File

@ -2,7 +2,7 @@ from typing import Callable
import jittor as jt
from python.jsparse import SparseTensor
from JSparse import SparseTensor
__all__ = ['fapply']

View File

@ -3,7 +3,7 @@ from typing import Tuple, Union
import numpy as np
import jittor as jt
from python.jsparse.utils import make_ntuple, trunc
from JSparse.utils import make_ntuple, trunc
__all__ = ['get_kernel_offsets']

View File

@ -5,26 +5,27 @@ import jittor as jt
from jittor.misc import _pair, _triple
from typing import Any, Dict, Tuple, Union
from type_check import type_check
from python.jsparse.utils import make_ntuple, sparse_quantize, set_hash
from JSparse.utils import make_ntuple, sparse_quantize, set_hash
# from .utils.quantize import sparse_quantize
# from indice_manager import IndiceManager
class SparseTensor:
@type_check
def __init__(
self,
indices: jt.Var,
values: jt.Var,
stride: Union[int, Tuple[int, ...]],
size,
size=None,
quantize=True,
voxel_size=1,
coalesce_mode='sum',
coalesce_mode:str='sum',
indice_manager=None,
device=None,
):
assert isinstance(indices, jt.Var) and isinstance(values, jt.Var)
assert (values.ndim == 2)
# self.indices = indices
# self.values = values

View File

15
python/setup.py Normal file
View File

@ -0,0 +1,15 @@
from gettext import install
from setuptools import find_packages, setup
with open("../README.md", "r") as file:
description = file.read()
print(find_packages())
setup(
name='JSparse',
version='0.1',
description=description,
packages=find_packages(),
install_requires=["jittor", "type_check"]
)

View File

@ -11,12 +11,12 @@ from jittor.misc import _pair, _triple
from itertools import repeat
from typing import List, Tuple, Union
from python.jsparse import SparseTensor
from python.jsparse import PointTensor
from python.jsparse.utils import make_ntuple
from python.jsparse.nn import functional as F
from python.jsparse.nn.utils import get_kernel_offsets
from python.jsparse.nn.functional import Convolution
from JSparse import SparseTensor
from JSparse import PointTensor
from JSparse.utils import make_ntuple
from JSparse.nn import functional as F
from JSparse.nn.utils import get_kernel_offsets
from JSparse.nn.functional import Convolution
import torchsparse
from torchsparse import nn as spnn