Skip to content

Commit

Permalink
Follow up from CMSIS-NN pooling failure (apache#9708)
Browse files Browse the repository at this point in the history
This commit fixes few comments in TIR2Runtime pass of CMSIS-NN target.
These comments specify layout used by CMSIS-NN API 
for input and filter shapes.

Another fix was done to the filter layout calculations. 
Instead of hard coded values for dimensions, 
filter_shape.find("H") was used to locate a particular value.

Third fix was done to the padding API used by Conv2D and Pooling tests.
It was made generic for TFLu's "SAME" padding type.
  • Loading branch information
ashutosh-arm authored and baoxinqi committed Dec 27, 2021
1 parent 259c877 commit f2195b1
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 11 deletions.
11 changes: 8 additions & 3 deletions src/relay/backend/contrib/cmsisnn/relay_to_tir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,10 +156,12 @@ class RelayToTIRVisitor : public MixedModeMutator {
ToArg(dilation_w), ToArg(dilation_h), ToArg(clip_min),
ToArg(clip_max)};

// layout NHWC
// CMSIS-NN data structure "cmsis_nn_dims" for ifm expects input layout as NHWC
// This is the same layout we expect in Relay
Array<PrimExpr> input_shape = conv2d_call->args[0]->type_as<TensorTypeNode>()->shape;

// OHWI for Conv2D and IHWO for depthwise
// CMSIS-NN data structure "cmsis_nn_dims" for weights expects following layouts
// OHWI for Conv2D and IHWO for Depthwise convolutions
Array<PrimExpr> filter_shape = conv2d_call->args[1]->type_as<TensorTypeNode>()->shape;

Array<PrimExpr> bias_shape{1, 1, 1, out_channels};
Expand All @@ -179,7 +181,10 @@ class RelayToTIRVisitor : public MixedModeMutator {
std::string cmsisnn_api = "arm_convolve_wrapper_s8";
if (depth_multiplier != -1) {
cmsisnn_api = "arm_depthwise_conv_wrapper_s8";
Array<PrimExpr> depthwise_filter_shape{1, filter_shape[0], filter_shape[1], out_channels};
int filter_pos_h = kernel_layout.find("H");
int filter_pos_w = kernel_layout.find("W");
Array<PrimExpr> depthwise_filter_shape{1, filter_shape[filter_pos_h],
filter_shape[filter_pos_w], out_channels};
filter_shape = depthwise_filter_shape;
}

Expand Down
19 changes: 11 additions & 8 deletions tests/python/contrib/test_cmsisnn/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,17 +86,20 @@ def make_module(func):
return mod


def get_same_padding(data, kernel, dilation, stride, cmsisnn_padding=True):
"""Provides CMSIS-NN padding when output dim == input dim"""
def get_same_padding(in_shape, kernel, dilation, stride):
"""
Provides CMSIS-NN padding when output dim == input dim.
This is TFLu's "SAME" padding case.
"""
dilated_kernel_h = dilation[0] * (kernel[0] - 1) + 1
dilated_kernel_w = dilation[1] * (kernel[1] - 1) + 1
out = int(math.ceil(float(data[0]) / float(stride[0])))
pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - data[0])
pad_top, pad_bottom = (pad, 0) if cmsisnn_padding else (0, pad)
out = int(math.ceil(float(in_shape[0]) / float(stride[0])))
pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - in_shape[0])
pad_top, pad_bottom = (pad, 0)

out = int(math.ceil(float(data[1]) / float(stride[1])))
pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - data[1])
pad_left, pad_right = (pad, 0) if cmsisnn_padding else (0, pad)
out = int(math.ceil(float(in_shape[1]) / float(stride[1])))
pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - in_shape[1])
pad_left, pad_right = (pad, 0)
return [pad_top, pad_left, pad_bottom, pad_right]


Expand Down

0 comments on commit f2195b1

Please sign in to comment.