Skip to content

Commit

Permalink
[Frontend] Prevent tflite frontend from producing int64 shape/paramet…
Browse files Browse the repository at this point in the history
…ers (apache#7030)
  • Loading branch information
merrymercy authored and electriclilies committed Feb 18, 2021
1 parent b1cb468 commit c8122eb
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 14 deletions.
11 changes: 11 additions & 0 deletions python/tvm/relay/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,3 +601,14 @@ def __call__(self, inputs, attrs, *args):
if "tvm_custom" in attrs:
attrs.pop("tvm_custom")
return get_relay_op(self._new_name)(*inputs, **attrs)


def to_int_list(np_array):
"""Convert a np array to a python int list.
Note: This function converts np.int32 to python's int.
If we don't do this conversion, numpy's automatic upcast will make
the shape / parameters be converted to int64 IntImm in relay and
cause problems in relay/TOPI.
"""
return [int(x) for x in np_array]
33 changes: 19 additions & 14 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from .. import qnn as _qnn
from ... import nd as _nd
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .common import infer_shape as _infer_shape, to_int_list
from .tflite_flexbuffer import FlexBufferDecoder


Expand Down Expand Up @@ -345,7 +345,7 @@ def get_tensor_value(self, tensor_wrapper):
data = tensor_wrapper.buffer.DataAsNumpy()

if tensor_wrapper.tensor.ShapeLength() != 0:
shape = tensor_wrapper.tensor.ShapeAsNumpy()
shape = to_int_list(tensor_wrapper.tensor.ShapeAsNumpy())
else:
shape = []

Expand Down Expand Up @@ -503,7 +503,7 @@ def convert_reshape(self, op):
op_options = op.BuiltinOptions()
reshape_options = ReshapeOptions()
reshape_options.Init(op_options.Bytes, op_options.Pos)
target_shape = tuple(reshape_options.NewShapeAsNumpy())
target_shape = to_int_list(reshape_options.NewShapeAsNumpy())

in_expr = self.get_expr(input_tensor_idx)

Expand Down Expand Up @@ -1387,7 +1387,7 @@ def convert_gather(self, op):
axis = gather_options.Axis()

# Check the indices are with in bounds.
data_shape = list(input_tensors[0].tensor.ShapeAsNumpy())
data_shape = to_int_list(input_tensors[0].tensor.ShapeAsNumpy())
data_dim = len(data_shape)

axis = data_dim + axis if axis < 0 else axis
Expand Down Expand Up @@ -1505,7 +1505,7 @@ def convert_strided_slice(self, op):
new_axis_mask = options.NewAxisMask()
shrink_axis_mask = options.ShrinkAxisMask()

data_shape = list(input_tensors[0].tensor.ShapeAsNumpy())
data_shape = to_int_list(input_tensors[0].tensor.ShapeAsNumpy())
data_dim = len(data_shape)
stride_dim = len(stride)

Expand Down Expand Up @@ -1757,7 +1757,7 @@ def convert_fully_connected(self, op):
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)

weight_tensor_shape = weight_tensor.tensor.ShapeAsNumpy()
weight_tensor_shape = to_int_list(weight_tensor.tensor.ShapeAsNumpy())

# Weight should have only 2 dimensions(TFLite convention)
assert len(weight_tensor_shape) == 2, "Weight should be only 2-dim"
Expand Down Expand Up @@ -1951,15 +1951,17 @@ def convert_conv(self, op, conv_type):
padding = conv_options.Padding()
fused_activation_fn = conv_options.FusedActivationFunction()

_, input_h, input_w, input_c = input_tensor.tensor.ShapeAsNumpy()
_, input_h, input_w, input_c = to_int_list(input_tensor.tensor.ShapeAsNumpy())

if is_depthwise_conv:
# TFLite depthwise convolution kernel layout is:
# 1 KH KW C(input_c * depth_multiplier)
_, kernel_h, kernel_w, in_channels = weight_tensor.tensor.ShapeAsNumpy()
_, kernel_h, kernel_w, in_channels = to_int_list(weight_tensor.tensor.ShapeAsNumpy())
assert in_channels == input_c * depth_multiplier
else:
output_channels, kernel_h, kernel_w, _ = weight_tensor.tensor.ShapeAsNumpy()
output_channels, kernel_h, kernel_w, _ = to_int_list(
weight_tensor.tensor.ShapeAsNumpy()
)

dilated_kernel_h = dilation_h * (kernel_h - 1) + 1
dilated_kernel_w = dilation_w * (kernel_w - 1) + 1
Expand Down Expand Up @@ -2007,6 +2009,7 @@ def convert_conv(self, op, conv_type):
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, dilated_kernel_h, stride_h)

pad_left, pad_right = get_pad_value(input_w, dilated_kernel_w, stride_w)
do_pad = not (pad_top == 0 and pad_bottom == 0 and pad_left == 0 and pad_right == 0)
if do_pad:
Expand Down Expand Up @@ -2160,7 +2163,7 @@ def convert_slice(self, op):
size = list(self.get_tensor_value(input_tensors[2]))
# strided_slice(Relay) needs the slice's end indices, not the size
end = size
input_tensor_shape = input_tensor.tensor.ShapeAsNumpy()
input_tensor_shape = to_int_list(input_tensor.tensor.ShapeAsNumpy())
input_tensor_rank = len(input_tensor_shape)
for i in range(input_tensor_rank):
if size[i] == -1:
Expand Down Expand Up @@ -2322,7 +2325,7 @@ def convert_pool2d(self, op, pool_type):

in_expr = self.get_expr(input_tensor_idx)

_, input_h, input_w, _ = input_tensor.tensor.ShapeAsNumpy()
_, input_h, input_w, _ = to_int_list(input_tensor.tensor.ShapeAsNumpy())
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
Expand Down Expand Up @@ -2701,10 +2704,12 @@ def convert_transpose_conv(self, op):

# Input (data) Tensor. NHWC layout
input_tensor = input_tensors[2]
_, input_h, input_w, input_c = input_tensor.tensor.ShapeAsNumpy()
_, input_h, input_w, input_c = to_int_list(input_tensor.tensor.ShapeAsNumpy())
# Weights tensor. TFLite uses OHWI layout
weights_tensor = input_tensors[1]
out_channels, kernel_h, kernel_w, in_channels = weights_tensor.tensor.ShapeAsNumpy()
out_channels, kernel_h, kernel_w, in_channels = to_int_list(
weights_tensor.tensor.ShapeAsNumpy()
)
assert (
input_c == in_channels
), "Input channel in the filter should match to channel in the input"
Expand Down Expand Up @@ -3120,7 +3125,7 @@ def convert_matrix_diag(self, op):
), "TFLite MATRIX_DIAG requires diagonal and output tensors' \
scale and zero points to be equal"

shape = diagonal.tensor.ShapeAsNumpy()
shape = to_int_list(diagonal.tensor.ShapeAsNumpy())
shape = np.append(shape, shape[-1])
dtype = self.get_tensor_type_str(diagonal.tensor.Type())

Expand Down

0 comments on commit c8122eb

Please sign in to comment.