add activation, pool and norm

This commit is contained in:
jkay 2022-07-25 22:32:30 +08:00
parent b5c14d1488
commit 52beb6acf1
31 changed files with 225 additions and 4 deletions

1
python/__init__.py Normal file
View File

@ -0,0 +1 @@
from .sparse import *

Binary file not shown.

Binary file not shown.

Binary file not shown.

34
python/indice_manager.py Normal file
View File

@ -0,0 +1,34 @@
import os
import numpy as np
from typing import Union, List, Tuple
import jittor as jt
from jittor import Function
class IndiceManager:
def __init__(
self,
ndim,
# indice_map_type,
# sparse_alorithm, # set m_hashtable_occupancy for concurrent_unordered_map
):
# if indice_map_type == 'GPU':
# assert(jt.has_cuda)
self.ndim = ndim
# self.indice_map_type = indice_map_type
# self.sparse_algorithm = sparse_alorithm
self.stride_key_manager = {}
self.indice_map_manager = {}
self.kernel_map_manager = {}
def insert(self, stride, indice_key, indice_hash):
self.stride_key_manager[stride] = indice_key
self.indice_map_manager[indice_key] = indice_hash
# class IndiceMapManager

1
python/nn/__init__.py Normal file
View File

@ -0,0 +1 @@
from .modules import *

Binary file not shown.

View File

@ -1,10 +1,10 @@
# from .activation import *
from .activation import *
from .conv import *
# from .count import *
# from .crop import *
# from .devoxelize import *
from .downsample import *
from .hash import *
# from .pooling import *
from .pooling import *
from .query import *
# from .voxelize import *

View File

@ -0,0 +1,24 @@
import jittor as jt
import jittor.nn as nn
from python import SparseTensor
from python.nn.utils import fapply
__all__ = ['relu', 'leaky_relu']
# __all__ = ['relu', 'leaky_relu', 'ReLU', 'LeakyReLU']
def relu(input: SparseTensor) -> SparseTensor:
return fapply(input, nn.relu)
def leaky_relu(input: SparseTensor,
scale: float = 0.01) -> SparseTensor:
return fapply(input,
nn.leaky_relu,
scale=scale)
# Relu = jt.make_module(relu)
# ReLU = Relu
# Leaky_relu = jt.make_module(leaky_relu, 2)
# LeakyReLU = Leaky_relu

View File

@ -270,6 +270,7 @@ def conv3d(
bias: Optional[jt.Var] = None,
stride: Union[int, Tuple[int, ...]] = 1,
dilation: Union[int, Tuple[int, ...]] = 1,
group: int = 1,
transposed: bool = False,
) -> SparseTensor:
# kernel_size = make_ntuple(kernel_size, ndim=3)
@ -337,10 +338,12 @@ def conv3d(
if bias is not None:
output_values += bias
# size have to be set
output = SparseTensor(
indices=output_indices,
values=output_values,
stride=output_stride,
size=input.size
)
output.cmaps = input.cmaps
output.cmaps.setdefault(output_stride, output_indices)

View File

@ -0,0 +1,26 @@
import jittor as jt
from python import SparseTensor
__all__ = ['global_avg_pool', 'global_max_pool']
def global_avg_pool(inputs: SparseTensor) -> jt.Var:
batch_size = jt.max(inputs.indices[:, 0]).item() + 1
outputs = []
for k in range(batch_size):
input = inputs.values[inputs.indices[:, 0] == k]
output = jt.mean(input, dim=0)
outputs.append(output)
outputs = jt.stack(outputs, dim=0)
return outputs
def global_max_pool(inputs: SparseTensor) -> jt.Var:
batch_size = jt.max(inputs.indices[:, 0]).item() + 1
outputs = []
for k in range(batch_size):
input = inputs.values[inputs.indices[:, 0] == k]
output = jt.max(input, dim=0)[0]
outputs.append(output)
outputs = jt.stack(outputs, dim=0)
return outputs

View File

@ -0,0 +1,6 @@
from .activation import *
# from .bev import *
from .conv import *
# from .crop import *
from .norm import *
from .pooling import *

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,21 @@
import jittor as jt
from jittor import nn
from python import SparseTensor
from python.nn.functional import relu, leaky_relu
# from python.nn.utils import fapply
__all__ = ['ReLU', 'LeakyReLU']
# class ReLU(nn.ReLU):
# def execute(self, input: SparseTensor) -> SparseTensor:
# return fapply(input, super().execute)
# class LeakyReLU(nn.LeakyReLU):
# def execute(self, input: SparseTensor) -> SparseTensor:
# return fapply(input, super().execute)
Relu = jt.make_module(relu)
ReLU = Relu
Leaky_relu = jt.make_module(leaky_relu, 2)
LeakyReLU = Leaky_relu

View File

@ -9,7 +9,7 @@ from jittor.misc import _pair, _triple
from python import SparseTensor
from python.nn import functional as F
from utils import make_ntuple
# from utils import make_ntuple
__all__ = ['Conv3d']

34
python/nn/modules/norm.py Normal file
View File

@ -0,0 +1,34 @@
import jittor as jt
from jittor import nn
from numpy import kaiser
from python import SparseTensor
from python.nn.utils import fapply
__all__ = ['BatchNorm', 'GroupNorm']
class BatchNorm(nn.BatchNorm):
def execute(self, input: SparseTensor) -> SparseTensor:
return fapply(input, super().execute)
class GroupNorm(nn.GroupNorm):
def execute(self, input: SparseTensor) -> SparseTensor:
indices, values, stride, size = input.indices, input.values, input.stride, input.size
batch_size = jt.max(indices[:, 0]).item() + 1
num_channels = values.shape[1]
n_values = jt.zeros_like(values)
for k in range(batch_size):
idx = indices[:, 0] == k
b_values = values[idx]
b_values = b_values.t().reshape(1, num_channels, -1)
b_values = super().execute(b_values)
b_values = b_values.reshape(num_channels, -1).t()
n_values[idx] = b_values
output = SparseTensor(indices=indices, values=n_values, stride=stride, size=size)
output.cmaps = input.cmaps
output.kmaps = input.kmaps
return output

View File

@ -0,0 +1,11 @@
from ast import Global
import jittor as jt
from jittor import nn
from python import SparseTensor
from python.nn.functional import global_avg_pool, global_max_pool
__all__ = ['GlobalAvgPool', 'GlobalMaxPool']
GlobalAvgPool = jt.make_module(global_avg_pool)
GlobalMaxPool = jt.make_module(global_max_pool)

View File

@ -1 +1,2 @@
from .apply import *
from .kernel import *

Binary file not shown.

15
python/nn/utils/apply.py Normal file
View File

@ -0,0 +1,15 @@
from typing import Callable
import jittor as jt
from python import SparseTensor
__all__ = ['fapply']
def fapply(input: SparseTensor, fn: Callable[..., jt.Var], *args,
**kwargs) -> SparseTensor:
values = fn(input.values, *args, **kwargs)
output = SparseTensor(indices=input.indices, values=values, stride=input.stride, size=input.size)
output.cmaps = input.cmaps
output.kmaps = input.kmaps
return output

View File

@ -24,11 +24,13 @@ class SparseTensor:
assert (values.ndim == 2)
# self.indices = indices
# self.values = values
self.shape = size
self.size = size
self.ndim = indices.shape[1] - 1
self.stride = make_ntuple(stride, ndim=self.ndim)
self.voxel_size = voxel_size
self.coalesce_mode = coalesce_mode
self.cmaps = {}
self.kmaps = {}
##########################
# Setup CoordsManager

View File

@ -0,0 +1,42 @@
import jittor as jt
from jittor import Function
def spmm(
rows: jt.Var,
cols: jt.Var,
vals: jt.Var,
size: jt.NanoVector,
mat: jt.Var,
spmm_mode='scatter',
is_sorted: bool = False,
cuda_spmm_alg: int = 1,
) -> jt.Var:
assert len(rows) == len(cols), "Invalid length"
assert len(rows) == len(vals), "Invalid length"
assert vals.dtype == mat.dtype, "dtype mismatch"
if jt.flags.use_cuda > 1:
assert jt.has_cuda == 1, "No GPUs available"
rows = rows.int()
cols = cols.int()
'''
TODO: Using the coo_spmm of cuSPARSE on GPU
result = coo_spmm_int32(
rows, cols, vals, size[0], size[1], mat, cuda_spmm_alg, is_sorted
)
'''
else:
if (spmm_mode == 'scatter'):
class SPMM(Function):