Skip to content

Commit

Permalink
debug onnx test cases
Browse files Browse the repository at this point in the history
  • Loading branch information
joddiy committed May 28, 2020
1 parent 27051ae commit 70a0d7e
Show file tree
Hide file tree
Showing 2 changed files with 167 additions and 99 deletions.
219 changes: 142 additions & 77 deletions python/singa/sonnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import onnx
from onnx.backend.base import Backend, BackendRep
from onnx import (checker, helper, numpy_helper, GraphProto, NodeProto,
TensorProto, OperatorSetIdProto, optimizer)
TensorProto, OperatorSetIdProto, optimizer, mapping)
import warnings

from singa import device
Expand All @@ -38,8 +38,30 @@
OrderedDict = collections.OrderedDict
namedtuple = collections.namedtuple

# tensor type to numpy dtype
singa_type_map = {tensor.float32: np.float32, tensor.int32: np.int32}
# singa only supports float32 and int32
NP_TYPE_TO_SINGA_SUPPORT_TYPE = {
np.dtype('float32'): np.dtype('float32'),
np.dtype('uint8'): None,
np.dtype('int8'): np.dtype('int32'),
np.dtype('uint16'): None,
np.dtype('int16'): np.dtype('int32'),
np.dtype('int32'): np.dtype('int32'),
np.dtype('int64'): np.dtype('int32'),
np.dtype('bool'): np.dtype('float32'),
np.dtype('float16'): np.dtype('float32'),
np.dtype('float64'): np.dtype('float32'),
np.dtype('complex64'): None,
np.dtype('complex128'): None,
np.dtype('uint32'): None,
np.dtype('uint64'): None,
np.dtype(np.object): None
}


def onnx_type_to_singa_type(onnx_type):
np_type = mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type]
return NP_TYPE_TO_SINGA_SUPPORT_TYPE[np_type]


gpu_dev = None
if singa.USE_CUDA:
Expand Down Expand Up @@ -1038,61 +1060,61 @@ class SingaBackend(Backend):
'Asinh': 'Asinh',
'Atan': 'Atan',
'Atanh': 'Atanh',
# 'Equal': 'Equal',
# 'Less': 'Less',
'Equal': 'Equal',
'Less': 'Less',
'Sign': 'Sign',
'Div': 'Div',
'Sub': 'Sub',
'Sqrt': 'Sqrt',
'Log': 'Log',
# 'Greater': 'Greater',
'Greater': 'Greater',
'Identity': 'Identity',
'Softplus': 'SoftPlus',
'Softsign': 'SoftSign',
'Mean': 'Mean',
# 'Pow': 'Pow',
'Pow': 'Pow',
'PRelu': 'PRelu',
'Mul': 'Mul',
# 'Max': 'Max',
'Max': 'Max',
'Min': 'Min',
# 'Shape': 'Shape',
# 'And': 'And',
# 'Or': 'Or',
# 'Xor': 'Xor',
# 'Not': 'Not',
'Shape': 'Shape',
'And': 'And',
'Or': 'Or',
'Xor': 'Xor',
'Not': 'Not',
'Neg': 'Negative',
'Reciprocal': 'Reciprocal',
# 'Unsqueeze': 'Unsqueeze',
# 'NonZero': 'NonZero',
'Unsqueeze': 'Unsqueeze',
'NonZero': 'NonZero',
'Ceil': 'Ceil',
# # special op
# 'Cast': 'Cast',
# 'Split': 'Split',
# 'Squeeze': 'Squeeze',
'Cast': 'Cast',
'Split': 'Split',
'Squeeze': 'Squeeze',
'GlobalAveragePool': 'GlobalAveragePool',
'LeakyRelu': 'LeakyRelu',
'ReduceSum': 'ReduceSum',
'ReduceMean': 'ReduceMean',
'Dropout': 'Dropout',
# 'ConstantOfShape': 'ConstantOfShape',
# 'Transpose': 'Transpose',
'ConstantOfShape': 'ConstantOfShape',
'Transpose': 'Transpose',
'HardSigmoid': 'HardSigmoid',
'Elu': 'Elu',
'Selu': 'SeLU',
# 'Concat': 'Concat',
'Concat': 'Concat',
'Softmax': 'SoftMax',
'Flatten': 'Flatten',
# 'OneHot': 'OneHot',
'OneHot': 'OneHot',
'Tile': 'Tile',
# 'Gather': 'Gather',
# 'Reshape': 'Reshape',
# 'Slice': 'Slice',
# 'Clip': 'Clip',
# 'Gemm': 'layer.Gemm', # layer
# 'BatchNormalization': 'layer.BatchNorm2d', # layer
# 'Conv': 'layer.Conv2d', # layer
# 'MaxPool': 'layer.Pooling2d', # layer
# 'AveragePool': 'layer.Pooling2d', # layer
'Gather': 'Gather',
'Reshape': 'Reshape',
'Slice': 'Slice',
'Clip': 'Clip',
'Gemm': 'layer.Gemm', # layer
'BatchNormalization': 'layer.BatchNorm2d', # layer
'Conv': 'layer.Conv2d', # layer
'MaxPool': 'layer.Pooling2d', # layer
'AveragePool': 'layer.Pooling2d', # layer
}

# this dict indicates the operators that need extra handle
Expand Down Expand Up @@ -1128,19 +1150,6 @@ class SingaBackend(Backend):
'AveragePool': '_create_max_avg_pool',
}

# singa only supports float32 and int32
_type_map = {
TensorProto.FLOAT: tensor.float32, # FLOAT to float32
TensorProto.UINT8: None, # UINT8
TensorProto.INT8: tensor.int32, # INT8 to int32
TensorProto.UINT16: None, # UINT16
TensorProto.INT16: tensor.int32, # INT16 to int32
TensorProto.INT32: tensor.int32, # INT32 to int32
TensorProto.INT64: tensor.int32, # INT64 to int32
TensorProto.STRING: None, # stirng
TensorProto.BOOL: None, # bool
}

@classmethod
def _create_cast(cls, onnx_node, operator, opset_version=_opset_version):
"""
Expand All @@ -1152,9 +1161,12 @@ def _create_cast(cls, onnx_node, operator, opset_version=_opset_version):
Returns:
singa operator instance
"""
to_type = cls._type_map[onnx_node.getattr("to")]
to_type = onnx_type_to_singa_type(onnx_node.getattr("to"))
assert to_type != None, "not support cast type: {}".format(to_type)
return operator(to_type)
if to_type == np.dtype('float32'):
return operator(tensor.float32)
else:
return operator(tensor.int32)

@classmethod
def _create_split(cls, onnx_node, operator, opset_version=_opset_version):
Expand Down Expand Up @@ -1382,7 +1394,17 @@ def _create_gemm(cls, onnx_node, operator, opset_version=_opset_version):
beta = onnx_node.getattr('beta', 1.)
transA = onnx_node.getattr('transA', 0)
transB = onnx_node.getattr('transB', 0)
return operator(alpha=alpha, beta=beta, transA=transA, transB=transB)
onnx_node.set_weight_inputs(onnx_node.inputs[1], 'W')
bias = False
if len(onnx_node.inputs) == 3:
onnx_node.set_attr_inputs(onnx_node.inputs[2], 'b')
bias = True
return operator(None,
alpha=alpha,
beta=beta,
transA=transA,
transB=transB,
bias=bias)

@classmethod
def _create_flatten(cls, onnx_node, operator, opset_version=_opset_version):
Expand Down Expand Up @@ -1470,10 +1492,10 @@ def _create_slice(cls, onnx_node, operator, opset_version=_opset_version):
"""
onnx_node.set_attr_inputs(onnx_node.inputs[1], 'starts')
onnx_node.set_attr_inputs(onnx_node.inputs[2], 'ends')
if len(onnx_node.inputs) >= 3 and onnx_node.inputs[2] != '':
onnx_node.set_attr_inputs(onnx_node.inputs[2], 'axes')
if len(onnx_node.inputs) == 4:
onnx_node.set_attr_inputs(onnx_node.inputs[3], 'steps')
if len(onnx_node.inputs) >= 4 and onnx_node.inputs[3] != '':
onnx_node.set_attr_inputs(onnx_node.inputs[3], 'axes')
if len(onnx_node.inputs) == 5 and onnx_node.inputs[4] != '':
onnx_node.set_attr_inputs(onnx_node.inputs[4], 'steps')
return operator(None, None, None, None)

@classmethod
Expand All @@ -1487,10 +1509,9 @@ def _create_clip(cls, onnx_node, operator, opset_version=_opset_version):
Returns:
singa operator instance
"""
onnx_node.set_attr_inputs(onnx_node.inputs[1], 'starts')
if len(onnx_node.inputs) >= 2 and onnx_node.inputs[1] != '':
onnx_node.set_attr_inputs(onnx_node.inputs[1], 'min')
if len(onnx_node.inputs) == 3:
if len(onnx_node.inputs) == 3 and onnx_node.inputs[2] != '':
onnx_node.set_attr_inputs(onnx_node.inputs[2], 'max')
return operator(None, None)

Expand Down Expand Up @@ -1575,7 +1596,7 @@ def _create_max_avg_pool(cls,
auto_pad = utils.force_unicode(onnx_node.getattr('auto_pad', 'NOTSET'))

# not support count_include_pad and auto_pad
ceil_mode = onnx_node.getattr('pads', 0)
ceil_mode = onnx_node.getattr('ceil_mode', 0)
count_include_pad = onnx_node.getattr('count_include_pad', 0)
if ceil_mode != 0 or count_include_pad != 0:
raise ValueError(
Expand All @@ -1599,7 +1620,7 @@ def _onnx_constant_to_np(cls, onnx_node, opset_version):
a numpy ndarray
"""
onnx_tensor = onnx_node.getattr('value')
np_dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_tensor.data_type]
np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_tensor.data_type]
np_tensor = np.frombuffer(onnx_tensor.raw_data, dtype=np_dtype)
return tensor.from_numpy(np_tensor)

Expand All @@ -1618,14 +1639,17 @@ def _onnx_node_to_singa_op(cls, onnx_node, opset_version=_opset_version):
onnx_op_type)
renamed_op = cls._rename_operators[onnx_op_type]
if renamed_op.startswith('layer.'):
singa_op = getattr(layer, renamed_op[6:])
op_class = getattr(layer, renamed_op[6:])
else:
singa_op = getattr(autograd, renamed_op)
op_class = getattr(autograd, renamed_op)
if onnx_node.op_type in cls._special_operators:
translator = getattr(cls, cls._special_operators[onnx_node.op_type])
return translator(onnx_node, singa_op, opset_version)
op = translator(onnx_node, op_class, opset_version)
else:
return singa_op()
op = op_class()
# refine the ONNXNode
onnx_node.inputs = [inp for inp in onnx_node.inputs if inp != '']
return op

@classmethod
def run_node(cls, node, inputs, device='CPU', opset_version=_opset_version):
Expand All @@ -1650,7 +1674,7 @@ def run_node(cls, node, inputs, device='CPU', opset_version=_opset_version):
weights = {}
_inputs = []
for (key, val) in zip(valid_inputs, inputs):
val = val.astype(cls._type_map[val.dtype])
val = val.astype(onnx_type_to_singa_type(val.dtype))
if key in node.weight_inputs:
weights[key] = val
else:
Expand Down Expand Up @@ -1703,7 +1727,7 @@ def _parse_graph_params(cls, graph, device):
initializers = {t.name for t in graph.initializer}
for tp in graph.initializer:
val = numpy_helper.to_array(tp)
val = val.astype(cls._type_map[val.dtype])
val = val.astype(onnx_type_to_singa_type(val.dtype))
params[tp.name] = val
return params

Expand Down Expand Up @@ -1854,14 +1878,53 @@ def to_tensors(self, x):
tensor_dict = {}
# init inputs as Tensor
for (key, val) in zip(self.inputs, x):
singa_type = SingaBackend._type_map[key.dtype]
np_type = singa_type_map[singa_type]
val = val.astype(np_type)
ts = tensor.from_numpy(val)
ts.to_device(self.dev)
tensor_dict[key.name] = ts
val = val.astype(onnx_type_to_singa_type(key.dtype))
# todo, scalar
if val.ndim != 0:
val = tensor.from_numpy(val)
val.to_device(self.dev)
tensor_dict[key.name] = val
return tensor_dict

def get_states(self, name, node, tensor_dict):
"""
get state from the node's weights or tensor_dict
Args:
name (str): name of the state
node (ONNXNode): ONNX node
tensor_dict ({}): tensor dict
Returns:
the states
"""
if name in node.attr_inputs:
return tensor_dict[name]
else:
return self.states[name]

def handle_special_ops(self, node, op, tensor_dict):
"""
hanlde some special operations
Args:
name (str): name of the state
node (ONNXNode): ONNX node
tensor_dict ({}): tensor dict
Returns:
the states
"""
# todo, hard code
# Conv2d nb_kernels
if node.op_type == "Conv":
shape = self.get_states(node.inputs[1], node, tensor_dict).shape
op.nb_kernels = shape[0]
# Gemm nb_kernels and bias_shape
elif node.op_type == "Gemm":
nb_kernels_flag = 0 if op.transB == 1 else 1
shape = self.get_states(node.inputs[1], node, tensor_dict).shape
op.nb_kernels = shape[nb_kernels_flag]
if op.bias:
shape = self.get_states(node.inputs[2], node, tensor_dict).shape
op.bias_shape = shape

def run(self, x, **kwargs):
"""
run the forward of singa model
Expand All @@ -1871,6 +1934,7 @@ def run(self, x, **kwargs):
a list of outputs
"""
outputs_dict = OrderedDict([(outp.name, None) for outp in self.outputs])
outputs_info = {outp.name: outp for outp in self.outputs}
# last_layers means we run this model until the last #N layers
last_layers = kwargs.get('last_layers', len(self._layers))
if last_layers != len(self._layers):
Expand All @@ -1882,14 +1946,7 @@ def run(self, x, **kwargs):
# run the layer by the topo order
for node in self._layers[:last_layers]:
op = self.__dict__[node.name]
# hard code: handle the nb_kernels of conv2d
if node.op_type == "Conv":
if "W" in node.attr_inputs:
shape = tensor_dict["W"].shape
else:
shape = self.states["W"].shape
# assert False, "{}".format(shape)
op.nb_kernels = shape[0]
self.handle_special_ops(node, op, tensor_dict)
inputs = [
tensor_dict[inp]
for inp in node.inputs
Expand All @@ -1908,7 +1965,10 @@ def run(self, x, **kwargs):

# replace attrs by inputs
for key, name in node.attr_inputs.items():
states[name] = tensor.to_numpy(tensor_dict[key])
ts = tensor_dict[key]
if isinstance(ts, tensor.Tensor):
ts = tensor.to_numpy(ts)
states[name] = ts
# set states
if callable(getattr(op, "set_states", None)):
op.set_states(**states)
Expand All @@ -1928,7 +1988,12 @@ def run(self, x, **kwargs):
for (outp, val) in zip(node.outputs, outputs):
tensor_dict[outp] = val
if outp in outputs_dict:
outputs_dict[outp] = tensor.to_numpy(val)
np_tensor = tensor.to_numpy(val)
if outp in outputs_info:
np_dtyp = mapping.TENSOR_TYPE_TO_NP_TYPE[
outputs_info[outp].dtype]
np_tensor = np_tensor.astype(np_dtyp)
outputs_dict[outp] = np_tensor
return outputs_dict.values()


Expand Down
Loading

0 comments on commit 70a0d7e

Please sign in to comment.