Skip to content

Commit

Permalink
using tensor=OutputNode, using operator=Node
Browse files Browse the repository at this point in the history
  • Loading branch information
csy0225 committed Mar 2, 2022
1 parent 537f450 commit 558a882
Show file tree
Hide file tree
Showing 15 changed files with 91 additions and 102 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ int ConvertBatchNormalization(Converter* converter,
core::Operation* operation) {
BATCH_NORMALIZATION_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto input_tensor = converter->GetMappedOutputNode(input_operand);
// Convert operand to OpenVINO Tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
&dilation_width);
}

// Convert operand to OpenVINO OutputNode
auto input_tensor = converter->GetMappedOutputNode(input_operand);
// Convert operand to OpenVINO Tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
Expand All @@ -58,7 +58,7 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
auto ov_pads_end =
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_bottom),
static_cast<std::ptrdiff_t>(pad_width_right)});
std::shared_ptr<OutputNode> output_tensor{nullptr};
std::shared_ptr<Tensor> output_tensor{nullptr};
auto conv2d_op = std::make_shared<default_opset::Convolution>(*input_tensor,
*filter_tensor,
ov_strides,
Expand All @@ -68,10 +68,10 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
ov_auto_pad);
output_tensor = MAP_OUTPUT(output_operand, conv2d_op, 0);
// Bias
auto unsqueeze_output_tensor = converter->AddUnsqueezeOutputNode(
auto unsqueeze_op = converter->AddUnsqueezeOperator(
bias_operand, std::vector<size_t>({3}), std::vector<int64_t>({0, 2, 3}));
auto add_op = std::make_shared<default_opset::Add>(*output_tensor,
*unsqueeze_output_tensor);
unsqueeze_op->output(0));
output_tensor = MAP_OUTPUT(output_operand, add_op, 0);
// Fuse activation
switch (fuse_code) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ namespace intel_openvino {
#undef REGISTER_CONVERTER

int Converter::Apply(core::Model* model) {
// Convert the NNAdapter operations to the aml operators
std::vector<core::Operation*> operations =
SortOperationsInTopologicalOrder(model);
for (auto operation : operations) {
Expand All @@ -57,57 +56,54 @@ int Converter::Apply(core::Model* model) {
return NNADAPTER_NO_ERROR;
}

std::shared_ptr<OutputNode> Converter::GetMappedOutputNode(
core::Operand* operand) {
auto it = output_nodes_->find(operand);
if (it != output_nodes_->end()) {
std::shared_ptr<Tensor> Converter::GetMappedTensor(core::Operand* operand) {
auto it = tensor_map_->find(operand);
if (it != tensor_map_->end()) {
return it->second.back();
}
return nullptr;
}

std::shared_ptr<OutputNode> Converter::UpdateOutputNodeMap(
core::Operand* operand, std::shared_ptr<OutputNode> output_node) {
auto it = output_nodes_->find(operand);
if (it == output_nodes_->end()) {
auto result = output_nodes_->insert(
std::make_pair(operand, std::vector<std::shared_ptr<OutputNode>>()));
std::shared_ptr<Tensor> Converter::UpdateTensorMap(
core::Operand* operand, std::shared_ptr<Tensor> tensor) {
auto it = tensor_map_->find(operand);
if (it == tensor_map_->end()) {
auto result = tensor_map_->insert(
std::make_pair(operand, std::vector<std::shared_ptr<Tensor>>()));
NNADAPTER_CHECK(result.second);
it = result.first;
}
output_node->set_names({OperandIdToString(operand)});
it->second.push_back(output_node);
return output_node;
tensor->set_names({OperandIdToString(operand)});
it->second.push_back(tensor);
return tensor;
}

std::shared_ptr<OutputNode> Converter::ConvertOperand(
std::shared_ptr<Tensor> Converter::ConvertOperand(
core::Operand* operand, std::vector<int32_t> dimensions) {
if (dimensions.empty()) {
for (uint32_t i = 0; i < operand->type.dimensions.count; i++) {
dimensions.push_back(operand->type.dimensions.data[i]);
}
}
if (IsConstantOperand(operand)) {
auto constant_node = std::make_shared<default_opset::Constant>(
auto constant_op = std::make_shared<default_opset::Constant>(
ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions),
operand->buffer);
std::shared_ptr<OutputNode> output_node =
std::make_shared<OutputNode>(constant_node->output(0));
UpdateOutputNodeMap(operand, output_node);
return output_node;
auto output_tensor = std::make_shared<Tensor>(constant_op->output(0));
UpdateTensorMap(operand, output_tensor);
return output_tensor;
} else if (IsModelInputOperand(operand)) {
auto parameter_node = std::make_shared<default_opset::Parameter>(
ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions));
parameter_nodes_->push_back(parameter_node);
std::shared_ptr<OutputNode> output_node =
std::make_shared<OutputNode>(parameter_node->output(0));
UpdateOutputNodeMap(operand, output_node);
return output_node;
auto output_tensor = std::make_shared<Tensor>(parameter_node->output(0));
UpdateTensorMap(operand, output_tensor);
return output_tensor;
}
NNADAPTER_LOG(FATAL) << "Only constant and model input operands can be "
"converted to OpenVINO OutputNode!";
"converted to OpenVINO Tensor!";
return nullptr;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,47 +27,51 @@ class Converter {
public:
explicit Converter(
std::vector<std::shared_ptr<default_opset::Parameter>>* paramter_nodes,
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>*
output_nodes)
: parameter_nodes_(paramter_nodes), output_nodes_(output_nodes) {}
std::map<core::Operand*, std::vector<std::shared_ptr<Tensor>>>*
tensor_map)
: parameter_nodes_(paramter_nodes), tensor_map_(tensor_map) {}

~Converter() {}

// Convert a NNAdapter model to an intel openvino graph
// Convert a NNAdapter model to OpenVINO graph
int Apply(core::Model* model);

// Convert a NNAdapter operand to an intel openvino OutputNode
std::shared_ptr<OutputNode> ConvertOperand(
core::Operand* operand, std::vector<int32_t> dimensions = {});
// Convert a NNAdapter operand to OpenVINO Tensor
std::shared_ptr<Tensor> ConvertOperand(core::Operand* operand,
std::vector<int32_t> dimensions = {});

std::shared_ptr<OutputNode> UpdateOutputNodeMap(
core::Operand* operand, std::shared_ptr<OutputNode> output_node);
std::shared_ptr<Tensor> UpdateTensorMap(core::Operand* operand,
std::shared_ptr<Tensor> tensor);

std::shared_ptr<OutputNode> GetMappedOutputNode(core::Operand* operand);
std::shared_ptr<Tensor> GetMappedTensor(core::Operand* operand);

template <typename T>
std::shared_ptr<OutputNode> AddUnsqueezeOutputNode(
core::Operand* operand,
std::vector<size_t> dimensions,
std::vector<T> axes) {
auto axes_node = AddConstOutputNode(dimensions, axes);
auto y_node = ConvertOperand(operand);
auto unsqueeze_node =
std::make_shared<default_opset::Unsqueeze>(*y_node, *axes_node);
return std::make_shared<OutputNode>(unsqueeze_node->output(0));
std::shared_ptr<Operator> AddUnsqueezeOperator(core::Operand* operand,
std::vector<size_t> dimensions,
std::vector<T> axes) {
auto input_tensor = ConvertOperand(operand);
auto axes_operator = AddConstantOperator(dimensions, axes);
return std::make_shared<default_opset::Unsqueeze>(*input_tensor,
axes_operator->output(0));
}

template <typename T>
std::shared_ptr<Operator> AddConstantOperator(std::vector<size_t> dimensions,
std::vector<T> values) {
return std::make_shared<default_opset::Constant>(
GetElementType<T>(), Shape(dimensions), values);
}

private:
std::vector<std::shared_ptr<default_opset::Parameter>>* parameter_nodes_;
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>*
output_nodes_;
std::map<core::Operand*, std::vector<std::shared_ptr<Tensor>>>* tensor_map_;
};

#define MAP_OUTPUT(output_operand, op_node, output_index) \
({ \
auto output_node = \
std::make_shared<OutputNode>(op_node->output(output_index)); \
converter->UpdateOutputNodeMap(output_operand, output_node); \
#define MAP_OUTPUT(output_operand, operator, output_index) \
({ \
converter->UpdateTensorMap( \
output_operand, \
std::make_shared<Tensor>(operator->output(output_index))); \
})

} // namespace intel_openvino
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ namespace intel_openvino {
int ConvertElementwise(Converter* converter, core::Operation* operation) {
ELEMENTWISE_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto input0_tensor = converter->GetMappedOutputNode(input0_operand);
// Convert operand to OpenVINO Tensor
auto input0_tensor = converter->GetMappedTensor(input0_operand);
if (!input0_tensor) {
input0_tensor = converter->ConvertOperand(input0_operand);
}
auto input1_tensor = converter->GetMappedOutputNode(input1_operand);
auto input1_tensor = converter->GetMappedTensor(input1_operand);
if (!input1_tensor) {
input1_tensor = converter->ConvertOperand(input1_operand);
}
std::shared_ptr<OutputNode> output_tensor{nullptr};
std::shared_ptr<Tensor> output_tensor{nullptr};
switch (operation->type) {
#define CONVERT_ELEMENTWISE(type, class_name) \
case NNADAPTER_##type: { \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ namespace intel_openvino {
int ConvertMatMul(Converter* converter, core::Operation* operation) {
MAT_MUL_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto x_tensor = converter->GetMappedOutputNode(x_operand);
// Convert operand to OpenVINO Tensor
auto x_tensor = converter->GetMappedTensor(x_operand);
if (!x_tensor) {
x_tensor = converter->ConvertOperand(x_operand);
}
auto y_tensor = converter->GetMappedOutputNode(y_operand);
auto y_tensor = converter->GetMappedTensor(y_operand);
if (!y_tensor) {
y_tensor = converter->ConvertOperand(y_operand);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ namespace intel_openvino {
int ConvertPool2D(Converter* converter, core::Operation* operation) {
POOL_2D_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto input_tensor = converter->GetMappedOutputNode(input_operand);
// Convert operand to OpenVINO Tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
Expand All @@ -42,12 +42,12 @@ int ConvertPool2D(Converter* converter, core::Operation* operation) {
std::shared_ptr<Node> pool2d_op{nullptr};
if (operation->type == NNADAPTER_AVERAGE_POOL_2D) {
if (global_pooling) {
auto axes_tensor = AddConstOutputNode(
auto axes = converter->AddConstantOperator(
{2},
std::vector<int64_t>({input_operand->type.dimensions.count - 2,
input_operand->type.dimensions.count - 1}));
pool2d_op = std::make_shared<default_opset::ReduceMean>(
*input_tensor, *axes_tensor, true);
*input_tensor, axes->output(0), true);
} else {
pool2d_op = std::make_shared<default_opset::AvgPool>(*input_tensor,
ov_strides,
Expand All @@ -60,12 +60,12 @@ int ConvertPool2D(Converter* converter, core::Operation* operation) {
}
} else if (operation->type == NNADAPTER_MAX_POOL_2D) {
if (global_pooling) {
auto axes_tensor = AddConstOutputNode(
auto axes = converter->AddConstantOperator(
{2},
std::vector<int64_t>({input_operand->type.dimensions.count - 2,
input_operand->type.dimensions.count - 1}));
pool2d_op = std::make_shared<default_opset::ReduceMax>(
*input_tensor, *axes_tensor, true);
*input_tensor, axes->output(0), true);
} else {
pool2d_op = std::make_shared<default_opset::MaxPool>(*input_tensor,
ov_strides,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ namespace intel_openvino {
int ConvertReshape(Converter* converter, core::Operation* operation) {
RESHAPE_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto input_tensor = converter->GetMappedOutputNode(input_operand);
// Convert operand to OpenVINO Tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
Expand All @@ -39,11 +39,11 @@ int ConvertReshape(Converter* converter, core::Operation* operation) {
shape_data[i] = input_operand->type.dimensions.data[i];
}
}
auto shape_tensor = AddConstOutputNode(
auto shape_operator = converter->AddConstantOperator(
{shape_count},
std::vector<int32_t>(shape_data, shape_data + shape_count));
auto reshape_op = std::make_shared<default_opset::Reshape>(
*input_tensor, *shape_tensor, true);
*input_tensor, shape_operator->output(0), true);
MAP_OUTPUT(output_operand, reshape_op, 0);
} else {
NNADAPTER_LOG(FATAL) << "Unsupported shape lifetime: "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ namespace intel_openvino {
int ConvertSoftmax(Converter* converter, core::Operation* operation) {
SOFTMAX_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto input_tensor = converter->GetMappedOutputNode(input_operand);
// Convert operand to OpenVINO Tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ namespace intel_openvino {
int ConvertUnaryActivations(Converter* converter, core::Operation* operation) {
UNARY_ACTIVATIONS_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO OutputNode
auto input_tensor = converter->GetMappedOutputNode(input_operand);
// Convert operand to OpenVINO Tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,7 @@ using PadType = ov::op::PadType;
using ElementType = ov::element::Type;
using Shape = ov::Shape;

using Tensor = OutputNode;
using Operator = Node;
} // namespace intel_openvino
} // namespace nnadapter
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ int CreateProgram(void* context,
core::Model* model,
core::Cache* cache,
void** program) {
NNADAPTER_LOG(INFO) << "Create program for intel_openvino.";
NNADAPTER_LOG(INFO) << "Create program for Intel OpenVINO.";
if (!context || !(model || (cache && cache->buffer.size())) || !program) {
return NNADAPTER_INVALID_PARAMETER;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,7 @@ Context::~Context() {}
int Program::Build(core::Model* model, core::Cache* cache) {
NNADAPTER_LOG(INFO) << "OpenVINO runtime version - "
<< ov::get_openvino_version();
// Initialize OpenVINO Runtime Core object
runtime_core_ = std::make_shared<ov::Core>();
NNADAPTER_LOG(INFO)
<< "NNAdapter has already loaded the OpenVINO Runtime Core!";
auto device_name = context_->GetFirtSelectedDeviceName();
NNADAPTER_LOG(INFO) << device_name << " version - "
<< runtime_core_->get_versions(device_name);
Expand All @@ -76,7 +73,7 @@ int Program::BuildFromCache(core::Cache* cache) {

int Program::BuildFromModel(core::Model* model) {
NNADAPTER_VLOG(5) << "NNAdapter model:" << std::endl << Visualize(model);
Converter converter(&parameter_nodes_, &output_nodes_);
Converter converter(&parameter_nodes_, &tensor_map_);
NNADAPTER_CHECK_EQ(converter.Apply(model), NNADAPTER_NO_ERROR);
// Indentify the inputs and outputs
auto input_count = model->input_operands.size();
Expand All @@ -87,7 +84,7 @@ int Program::BuildFromModel(core::Model* model) {
for (size_t i = 0; i < input_count; i++) {
auto operand = model->input_operands[i];
const auto& type = operand->type;
NNADAPTER_CHECK(output_nodes_.find(operand) != output_nodes_.end());
NNADAPTER_CHECK(tensor_map_.find(operand) != tensor_map_.end());
input_types_[i] = type;
}
}
Expand All @@ -98,13 +95,13 @@ int Program::BuildFromModel(core::Model* model) {
for (size_t i = 0; i < output_count; i++) {
auto operand = model->output_operands[i];
const auto& type = operand->type;
NNADAPTER_CHECK(output_nodes_.find(operand) != output_nodes_.end());
NNADAPTER_CHECK(tensor_map_.find(operand) != tensor_map_.end());
output_types_[i] = type;
auto result_node =
std::make_shared<default_opset::Result>(*output_nodes_[operand].back());
std::make_shared<default_opset::Result>(*tensor_map_[operand].back());
result_nodes_.push_back(result_node);
}
// Convert a NNAdapter model to an Intel OpenVINO's model
// Convert NNAdapter model to OpenVINO model
std::shared_ptr<ov::Model> ov_model = std::make_shared<ov::Model>(
result_nodes_, parameter_nodes_, "openvino_graph");
compiled_model_ =
Expand Down
Loading

0 comments on commit 558a882

Please sign in to comment.