forked from maxjhandsome/jittor
unique type
This commit is contained in:
parent
adf7ef85d9
commit
0a5bd61bf4
|
@ -24,7 +24,7 @@ CubArgReduceOp::CubArgReduceOp(Var* x, Var* offsets, NanoString op, bool keepdim
|
|||
: x(x), offsets(offsets), op(op), keepdims(keepdims) {
|
||||
flags.set(NodeFlags::_cpu, 0);
|
||||
flags.set(NodeFlags::_cuda, 1);
|
||||
ASSERT(offsets->dtype()==ns_int || offsets->dtype()==ns_int32);
|
||||
ASSERT(offsets->dtype()==ns_int32);
|
||||
y = create_output(nullptr, ns_int32);
|
||||
y_key = create_output(nullptr, x->dtype());
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ CubArgsortOp::CubArgsortOp(Var* x, Var* indexes, Var* offsets, bool descending,
|
|||
: x(x), indexes(indexes), offsets(offsets), descending(descending) {
|
||||
flags.set(NodeFlags::_cpu, 0);
|
||||
flags.set(NodeFlags::_cuda, 1);
|
||||
ASSERT(offsets->dtype()==ns_int || offsets->dtype()==ns_int32);
|
||||
ASSERT(offsets->dtype()==ns_int32);
|
||||
y = create_output(nullptr, dtype);
|
||||
y_key = create_output(nullptr, x->dtype());
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ namespace jittor {
|
|||
|
||||
struct CurandRandomOp : Op {
|
||||
Var* output;
|
||||
CurandRandomOp(NanoVector shape, NanoString dtype=ns_float);
|
||||
CurandRandomOp(NanoVector shape, NanoString dtype=ns_float32);
|
||||
|
||||
const char* name() const override { return "curand_random"; }
|
||||
DECLARE_jit_run;
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace jittor {
|
|||
|
||||
struct CustomOp : Op {
|
||||
Var* output;
|
||||
CustomOp(NanoVector shape, NanoString dtype=ns_float);
|
||||
CustomOp(NanoVector shape, NanoString dtype=ns_float32);
|
||||
|
||||
const char* name() const override { return "custom"; }
|
||||
DECLARE_jit_run;
|
||||
|
|
|
@ -304,11 +304,11 @@ Var.masked_fill = masked_fill
|
|||
def sqr(x): return x*x
|
||||
Var.sqr = sqr
|
||||
|
||||
def argmax(x, dim:int, keepdims:bool=False):
|
||||
def argmax(x, dim, keepdims:bool=False):
|
||||
return x.arg_reduce("max", dim, keepdims)
|
||||
Var.argmax = argmax
|
||||
|
||||
def argmin(x, dim:int, keepdims:bool=False):
|
||||
def argmin(x, dim, keepdims:bool=False):
|
||||
return x.arg_reduce("min", dim, keepdims)
|
||||
Var.argmin = argmin
|
||||
|
||||
|
@ -480,11 +480,11 @@ class Module:
|
|||
end = 0
|
||||
for k in key_:
|
||||
if isinstance(v, nn.Sequential):
|
||||
if np.int(k) >= len(v.layers):
|
||||
if ori_int(k) >= len(v.layers):
|
||||
end = 1
|
||||
break
|
||||
else:
|
||||
v = v[np.int(k)]
|
||||
v = v[ori_int(k)]
|
||||
else:
|
||||
if hasattr(v, k):
|
||||
v = getattr(v, k)
|
||||
|
@ -622,6 +622,16 @@ Var.__str__ = lambda x: str(x.data)
|
|||
Var.__repr__ = lambda x: str(x.data)
|
||||
Var.peek = lambda x: f"{x.dtype}{x.shape}"
|
||||
|
||||
|
||||
ori_int = int
|
||||
|
||||
int = int32
|
||||
Var.int = Var.int32
|
||||
float = float32
|
||||
Var.float = Var.float32
|
||||
double = float64
|
||||
Var.double = Var.float64
|
||||
|
||||
from . import nn
|
||||
from .nn import matmul
|
||||
from . import contrib
|
||||
|
|
|
@ -16,7 +16,7 @@ def expect_error(func):
|
|||
|
||||
class TestCore(unittest.TestCase):
|
||||
def test_number_of_hold_vars(self):
|
||||
assert jt.random([1,2,3]).peek() == "float[1,2,3,]"
|
||||
assert jt.random([1,2,3]).peek() == "float32[1,2,3,]"
|
||||
assert jt.core.number_of_hold_vars() == 0
|
||||
x = jt.random([1,2,3])
|
||||
assert jt.core.number_of_hold_vars() == 1
|
||||
|
|
|
@ -16,7 +16,7 @@ namespace jittor {
|
|||
|
||||
struct CustomOp : Op {
|
||||
Var* output;
|
||||
CustomOp(NanoVector shape, NanoString dtype=ns_float);
|
||||
CustomOp(NanoVector shape, NanoString dtype=ns_float32);
|
||||
|
||||
const char* name() const override { return "custom"; }
|
||||
DECLARE_jit_run;
|
||||
|
@ -75,7 +75,7 @@ class TestCustomOp(unittest.TestCase):
|
|||
my_op = jt.compile_custom_op("""
|
||||
struct MyOp : Op {
|
||||
Var* output;
|
||||
MyOp(NanoVector shape, NanoString dtype=ns_float);
|
||||
MyOp(NanoVector shape, NanoString dtype=ns_float32);
|
||||
|
||||
const char* name() const override { return "my"; }
|
||||
DECLARE_jit_run;
|
||||
|
|
|
@ -10,7 +10,7 @@ from .test_core import expect_error
|
|||
import os
|
||||
|
||||
mid = 0
|
||||
if os.uname()[1] == "jittor-ce":
|
||||
if "jittor" in os.uname()[1]:
|
||||
mid = 1
|
||||
|
||||
class TestNanoString(unittest.TestCase):
|
||||
|
@ -27,7 +27,8 @@ class TestNanoString(unittest.TestCase):
|
|||
assert t < [1.5e-7, 1.7e-7][mid], t
|
||||
|
||||
assert (jt.hash("asdasd") == 4152566416)
|
||||
assert str(jt.NanoString("float"))=="float"
|
||||
assert str(jt.NanoString("float"))=="float32"
|
||||
assert jt.NanoString("float")=="float32"
|
||||
# pybind11: 7
|
||||
# Tuple call: 1.3
|
||||
# fast call (with or with not): 0.9
|
||||
|
@ -38,14 +39,14 @@ class TestNanoString(unittest.TestCase):
|
|||
|
||||
def test_type(self):
|
||||
import numpy as np
|
||||
assert str(jt.NanoString(float)) == "float"
|
||||
assert str(jt.NanoString(np.float)) == "float"
|
||||
assert str(jt.NanoString(float)) == "float32"
|
||||
assert str(jt.NanoString(np.float)) == "float32"
|
||||
assert str(jt.NanoString(np.float32)) == "float32"
|
||||
assert str(jt.NanoString(np.float64)) == "float64"
|
||||
assert str(jt.NanoString(np.int8)) == "int8"
|
||||
assert str(jt.NanoString(np.array([1,2,3]).dtype)) == "int64"
|
||||
|
||||
assert str(jt.NanoString(jt.float)) == "float"
|
||||
assert str(jt.NanoString(jt.float)) == "float32"
|
||||
assert str(jt.NanoString(jt.float32)) == "float32"
|
||||
assert str(jt.NanoString(jt.float64)) == "float64"
|
||||
assert str(jt.NanoString(jt.int8)) == "int8"
|
||||
|
|
2
setup.py
2
setup.py
|
@ -21,7 +21,7 @@ with open(os.path.join(path, "README.md"), "r", encoding='utf8') as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name='jittor',
|
||||
version='1.1.5.1',
|
||||
version='1.1.5.2',
|
||||
# scripts=[],
|
||||
author="Jittor Group",
|
||||
author_email="ran.donglang@gmail.com",
|
||||
|
|
|
@ -9,9 +9,6 @@
|
|||
namespace jittor {
|
||||
|
||||
#define FOR_ALL_TYPES(m) \
|
||||
m(float) \
|
||||
m(double) \
|
||||
m(int) \
|
||||
m(bool) \
|
||||
m(int8) \
|
||||
m(int16) \
|
||||
|
@ -151,6 +148,10 @@ static void init_ns() {
|
|||
NanoString::__string_to_ns["sum"] = ns_add;
|
||||
NanoString::__string_to_ns["min"] = ns_minimum;
|
||||
NanoString::__string_to_ns["max"] = ns_maximum;
|
||||
NanoString::__string_to_ns["float"] = ns_float32;
|
||||
NanoString::__string_to_ns["double"] = ns_float64;
|
||||
NanoString::__string_to_ns["int"] = ns_int32;
|
||||
NanoString::__string_to_ns["uint"] = ns_uint32;
|
||||
LOGvv << "init __string_to_ns" << NanoString::__string_to_ns;
|
||||
LOGvv << "init __ns_to_string" << NanoString::__ns_to_string;
|
||||
}
|
||||
|
|
|
@ -12,9 +12,6 @@ namespace jittor {
|
|||
#define FOR_ALL_NS(m) \
|
||||
\
|
||||
m(void) \
|
||||
m(float) \
|
||||
m(double) \
|
||||
m(int) \
|
||||
m(bool) \
|
||||
m(int8) \
|
||||
m(int16) \
|
||||
|
|
|
@ -20,7 +20,7 @@ struct ArrayOp : Op {
|
|||
Var* output;
|
||||
Allocation allocation;
|
||||
// @pybind(None)
|
||||
ArrayOp(const void* ptr, NanoVector shape, NanoString dtype=ns_float);
|
||||
ArrayOp(const void* ptr, NanoVector shape, NanoString dtype=ns_float32);
|
||||
|
||||
ArrayOp(ArrayArgs&& args);
|
||||
template<class T>
|
||||
|
|
|
@ -73,7 +73,7 @@ FetchOp::FetchOp(vector<Var*>&& inputs, FetchFunc&& func)
|
|||
// stream needs to be created after nccl plugin
|
||||
static Init init_fetch;
|
||||
#endif
|
||||
VarPtr vp(0, ns_int);
|
||||
VarPtr vp(0, ns_int32);
|
||||
outputs_holder.emplace_back(vp);
|
||||
fetcher.emplace_front(move(vp));
|
||||
fetcher_iter = fetcher.begin();
|
||||
|
|
|
@ -16,7 +16,7 @@ static auto make_broadcast_to = get_op_info("broadcast_to")
|
|||
.get_constructor<VarPtr, Var*, Var*, NanoVector>();
|
||||
|
||||
VarPtr make_number(float number, Var* x) {
|
||||
VarPtr nums = make_array(&number, 1, ns_float);
|
||||
VarPtr nums = make_array(&number, 1, ns_float32);
|
||||
nums = make_broadcast_to(nums, x, {});
|
||||
return make_unary(nums, x->dtype());
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ namespace jittor {
|
|||
|
||||
struct RandomOp : Op {
|
||||
Var* output;
|
||||
RandomOp(NanoVector shape, NanoString dtype=ns_float);
|
||||
RandomOp(NanoVector shape, NanoString dtype=ns_float32);
|
||||
|
||||
const char* name() const override { return "random"; }
|
||||
DECLARE_jit_run;
|
||||
|
|
|
@ -22,9 +22,6 @@ static auto make_number = get_op_info("number")
|
|||
.get_constructor<VarPtr, float, Var*>();
|
||||
|
||||
static unordered_set<string> unary_ops = {
|
||||
"float",
|
||||
"double",
|
||||
"int",
|
||||
"bool",
|
||||
"int8",
|
||||
"int16",
|
||||
|
|
|
@ -229,7 +229,7 @@ void ConvTuner::forwardTune(FusedOp* fop) {
|
|||
if (!(bop->x->input()->type()==OpType::broadcast && bop->y->input()->type()==OpType::broadcast)) return;
|
||||
|
||||
// only support float32 currently
|
||||
if (bop->z->dtype() != ns_float && bop->z->dtype() != ns_float32)
|
||||
if (bop->z->dtype() != ns_float32)
|
||||
continue;
|
||||
Op* ops[3] = {op, bop->x->input(), bop->y->input()};
|
||||
int ok = 0;
|
||||
|
|
|
@ -76,12 +76,12 @@ inline int get_typenum(NanoString ns) {
|
|||
if (ns == ns_uint8) return 2;
|
||||
if (ns == ns_int16) return 3;
|
||||
if (ns == ns_uint16) return 4;
|
||||
if (ns == ns_int32 || ns == ns_int) return 5;
|
||||
if (ns == ns_int32) return 5;
|
||||
if (ns == ns_uint32) return 6;
|
||||
if (ns == ns_int64) return 7;
|
||||
if (ns == ns_uint64) return 8;
|
||||
if (ns == ns_float32 || ns == ns_float) return 11;
|
||||
if (ns == ns_float64 || ns == ns_double) return 12;
|
||||
if (ns == ns_float32) return 11;
|
||||
if (ns == ns_float64) return 12;
|
||||
LOGf << ns;
|
||||
return -1;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue