diff --git a/README.md b/README.md
index 5da757c..8c98310 100644
--- a/README.md
+++ b/README.md
@@ -22,11 +22,11 @@ This package is powered by [NVIDIA Isaac Transport for ROS (NITROS)](https://dev
The following table summarizes the per-platform performance statistics of sample graphs that use this package, with links included to the full benchmark output. These benchmark configurations are taken from the [Isaac ROS Benchmark](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark#list-of-isaac-ros-benchmarks) collection, based on the [`ros2_benchmark`](https://github.com/NVIDIA-ISAAC-ROS/ros2_benchmark) framework.
-| Sample Graph | Input Size | AGX Orin | Orin NX | Orin Nano 8GB | x86_64 w/ RTX 3060 Ti |
-| ----------------------------------------------------------------------------------------------------------------------------------------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [Rectify Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_rectify_node.py) | 1080p | [875 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-agx_orin.json)
1.8 ms | [479 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-orin_nx.json)
3.8 ms | [333 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-orin_nano_8gb.json)
4.0 ms | [1540 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-x86_64_rtx_3060Ti.json)
0.37 ms |
-| [Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_disparity_node.py) | 1080p | [151 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-agx_orin.json)
9.0 ms | [73.7 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-orin_nx.json)
14 ms | [51.6 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-orin_nano_8gb.json)
22 ms | [451 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-x86_64_rtx_3060Ti.json)
2.9 ms |
-| [Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_disparity_graph.py) | 1080p | [141 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-agx_orin.json)
10 ms | [72.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-orin_nx.json)
16 ms | [48.7 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-orin_nano_8gb.json)
24 ms | [425 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-x86_64_rtx_3060Ti.json)
3.6 ms |
+| Sample Graph | Input Size | AGX Orin | Orin NX | Orin Nano 8GB | x86_64 w/ RTX 4060 Ti |
+| ----------------------------------------------------------------------------------------------------------------------------------------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ |
+| [Rectify Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_rectify_node.py) | 1080p | [509 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-agx_orin.json)
5.6 ms | [385 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-orin_nx.json)
5.1 ms | [283 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-orin_nano.json)
8.5 ms | [819 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_rectify_node-nuc_4060ti.json)
4.4 ms |
+| [Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_disparity_node.py) | 1080p | [158 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-agx_orin.json)
10 ms | [76.8 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-orin_nx.json)
17 ms | [53.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-orin_nano.json)
23 ms | [397 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_node-nuc_4060ti.json)
5.8 ms |
+| [Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_disparity_graph.py) | 1080p | [162 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-agx_orin.json)
14 ms | [75.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-orin_nx.json)
20 ms | [50.6 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-orin_nano.json)
28 ms | [387 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_disparity_graph-nuc_4060ti.json)
8.3 ms |
## Table of Contents
@@ -60,7 +60,7 @@ The following table summarizes the per-platform performance statistics of sample
## Latest Update
-Update 2023-04-05: Source available GXF extensions
+Update 2023-05-25: Improved stereo rectification.
## Supported Platforms
@@ -248,6 +248,7 @@ admin@workstation:/workspaces/isaac_ros-dev$ ros2 launch realsense2_camera rs_l
| Date | Changes |
| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 2023-05-25 | Improved stereo rectification |
| 2023-04-05 | Source available GXF extensions |
| 2022-10-19 | Updated OSS licensing |
| 2022-08-31 | Image flip support and update to be compatible with JetPack 5.0.2 |
diff --git a/isaac_ros_image_pipeline/package.xml b/isaac_ros_image_pipeline/package.xml
index 85e6ab8..972fd11 100644
--- a/isaac_ros_image_pipeline/package.xml
+++ b/isaac_ros_image_pipeline/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_image_pipeline
- 0.30.0
+ 0.31.0
Core image processing
Hemal Shah
diff --git a/isaac_ros_image_proc/CMakeLists.txt b/isaac_ros_image_proc/CMakeLists.txt
index 9df7655..a33c78d 100644
--- a/isaac_ros_image_proc/CMakeLists.txt
+++ b/isaac_ros_image_proc/CMakeLists.txt
@@ -75,6 +75,10 @@ install(TARGETS gxf_image_flip DESTINATION share/${PROJECT_NAME}/gxf/lib/image_p
add_subdirectory(gxf/tensorops)
install(TARGETS gxf_tensorops DESTINATION share/${PROJECT_NAME}/gxf/lib/image_proc)
+# RectifyParamsGenerator
+add_subdirectory(gxf/rectify_params_generator)
+install(TARGETS gxf_rectify_params_generator DESTINATION share/${PROJECT_NAME}/gxf/lib/image_proc)
+
### End extensions
if(BUILD_TESTING)
@@ -88,12 +92,14 @@ if(BUILD_TESTING)
add_launch_test(test/isaac_ros_image_format_converter_grayscale_test.py)
add_launch_test(test/isaac_ros_image_format_converter_nv24_test.py)
add_launch_test(test/isaac_ros_rectify_test.py)
+ add_launch_test(test/isaac_ros_rectify_timestamp_match_test.py)
+ add_launch_test(test/isaac_ros_rectify_oss_comparison_test.py)
+ add_launch_test(test/isaac_ros_rectify_stereo_epipolar_test.py)
add_launch_test(test/isaac_ros_resize_test.py)
add_launch_test(test/isaac_ros_resize_invalid_test.py)
add_launch_test(test/isaac_ros_image_flip_180_test.py)
add_launch_test(test/isaac_ros_image_flip_hori_test.py)
add_launch_test(test/isaac_ros_image_flip_vert_test.py)
-
endif()
ament_auto_package(INSTALL_TO_SHARE launch config)
diff --git a/isaac_ros_image_proc/config/image_format_converter_substitution.yaml b/isaac_ros_image_proc/config/image_format_converter_substitution.yaml
index cb9fad0..d20efe6 100644
--- a/isaac_ros_image_proc/config/image_format_converter_substitution.yaml
+++ b/isaac_ros_image_proc/config/image_format_converter_substitution.yaml
@@ -22,7 +22,7 @@ body:
groups:
- entities:
- components:
- - type: nvidia::cvcore::tensor_ops::ConvertColorFormat
+ - type: nvidia::isaac::tensor_ops::ConvertColorFormat
parameters:
output_type: "BGR_U8"
receiver: data_receiver
@@ -34,7 +34,7 @@ body:
overriding_parameters: []
- entities:
- components:
- - type: nvidia::cvcore::tensor_ops::StreamConvertColorFormat
+ - type: nvidia::isaac::tensor_ops::StreamConvertColorFormat
parameters:
output_type: "NV24"
receiver: data_receiver
diff --git a/isaac_ros_image_proc/config/namespace_injector_rule_image_format_converter.yaml b/isaac_ros_image_proc/config/namespace_injector_rule_image_format_converter.yaml
index c5fb076..38b48a8 100644
--- a/isaac_ros_image_proc/config/namespace_injector_rule_image_format_converter.yaml
+++ b/isaac_ros_image_proc/config/namespace_injector_rule_image_format_converter.yaml
@@ -20,7 +20,7 @@ name: Image Format Converter Namespace Injector Rule
operation: namespace_injector
body:
components:
- - type: nvidia::cvcore::tensor_ops::ConvertColorFormat
+ - type: nvidia::isaac::tensor_ops::ConvertColorFormat
path_parameter_keys: [input_adapter, output_adapter]
- - type: nvidia::cvcore::tensor_ops::StreamConvertColorFormat
+ - type: nvidia::isaac::tensor_ops::StreamConvertColorFormat
path_parameter_keys: [input_adapter, output_adapter, stream]
diff --git a/isaac_ros_image_proc/config/namespace_injector_rule_rectify.yaml b/isaac_ros_image_proc/config/namespace_injector_rule_rectify.yaml
index 80d38ac..656b9b2 100644
--- a/isaac_ros_image_proc/config/namespace_injector_rule_rectify.yaml
+++ b/isaac_ros_image_proc/config/namespace_injector_rule_rectify.yaml
@@ -20,5 +20,5 @@ name: Image Rectify Namespace Injector Rule
operation: namespace_injector
body:
components:
- - type: nvidia::cvcore::tensor_ops::StreamUndistort
+ - type: nvidia::isaac::tensor_ops::StreamUndistort
path_parameter_keys: [input_adapter, output_adapter, stream]
diff --git a/isaac_ros_image_proc/config/namespace_injector_rule_resize.yaml b/isaac_ros_image_proc/config/namespace_injector_rule_resize.yaml
index b692a89..2288d76 100644
--- a/isaac_ros_image_proc/config/namespace_injector_rule_resize.yaml
+++ b/isaac_ros_image_proc/config/namespace_injector_rule_resize.yaml
@@ -20,5 +20,5 @@ name: Image Resize Namespace Injector Rule
operation: namespace_injector
body:
components:
- - type: nvidia::cvcore::tensor_ops::Resize
+ - type: nvidia::isaac::tensor_ops::Resize
path_parameter_keys: [input_adapter, output_adapter]
diff --git a/isaac_ros_image_proc/config/nitros_image_flip_node.yaml b/isaac_ros_image_proc/config/nitros_image_flip_node.yaml
index f42966c..36e6148 100644
--- a/isaac_ros_image_proc/config/nitros_image_flip_node.yaml
+++ b/isaac_ros_image_proc/config/nitros_image_flip_node.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -53,7 +53,7 @@ components:
backends: "CUDA"
mode: "BOTH"
---
-name: vault
+name: sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -64,24 +64,27 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
components:
- type: nvidia::gxf::Connection
parameters:
source: image_flip/data_transmitter
- target: vault/signal
+ target: sink/signal
---
name: utils
components:
- name: clock
type: nvidia::gxf::RealtimeClock
-- type: nvidia::gxf::GreedyScheduler
+- type: nvidia::gxf::MultiThreadScheduler
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_ms: 1
+ worker_thread_number: 2
+- type: nvidia::gxf::JobStatistics
+ parameters:
+ clock: clock
diff --git a/isaac_ros_image_proc/config/nitros_image_format_converter_node.yaml b/isaac_ros_image_proc/config/nitros_image_format_converter_node.yaml
index cc9a331..fab5f37 100644
--- a/isaac_ros_image_proc/config/nitros_image_format_converter_node.yaml
+++ b/isaac_ros_image_proc/config/nitros_image_format_converter_node.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -43,7 +43,7 @@ components:
block_size: 14400000
num_blocks: 40
- name: converter
- type: nvidia::cvcore::tensor_ops::ConvertColorFormat
+ type: nvidia::isaac::tensor_ops::ConvertColorFormat
parameters:
output_type: "RGB_U8"
receiver: data_receiver
@@ -53,7 +53,7 @@ components:
output_adapter: resource/adapter
output_name: "image"
---
-name: vault
+name: sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -64,21 +64,19 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
name: resource
components:
- name: adapter
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "VideoBuffer"
- name: stream
- type: nvidia::cvcore::tensor_ops::TensorStream
+ type: nvidia::isaac::tensor_ops::TensorStream
parameters:
backend_type: "VPI"
engine_type: "GPU"
@@ -87,7 +85,7 @@ components:
- type: nvidia::gxf::Connection
parameters:
source: imageConverter/data_transmitter
- target: vault/signal
+ target: sink/signal
---
name: utils
components:
@@ -97,3 +95,7 @@ components:
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_us: 100
+- type: nvidia::gxf::JobStatistics
+ parameters:
+ clock: clock
diff --git a/isaac_ros_image_proc/config/nitros_rectify_node.yaml b/isaac_ros_image_proc/config/nitros_rectify_node.yaml
index 28f9b2c..9b5d4f9 100644
--- a/isaac_ros_image_proc/config/nitros_rectify_node.yaml
+++ b/isaac_ros_image_proc/config/nitros_rectify_node.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -73,7 +73,7 @@ components:
transmitter: data_transmitter
min_size: 1
- name: input_camera
- type: nvidia::cvcore::tensor_ops::CameraModel
+ type: nvidia::isaac::tensor_ops::CameraModel
parameters:
distortion_type: "FisheyeEquidistant"
distortion_coefficients: [0, 0, 0, 0,
@@ -82,7 +82,7 @@ components:
principle_point: [0, 0]
skew_value: 0.0
- name: output_camera
- type: nvidia::cvcore::tensor_ops::CameraModel
+ type: nvidia::isaac::tensor_ops::CameraModel
parameters:
distortion_type: "FisheyeEquidistant"
distortion_coefficients: [0, 0, 0, 0,
@@ -91,7 +91,7 @@ components:
principle_point: [0, 0]
skew_value: 0.0
- name: reference_frame
- type: nvidia::cvcore::tensor_ops::Frame3D
+ type: nvidia::isaac::tensor_ops::Frame3D
parameters:
rotation: [1, 0, 0, 0, 1, 0, 0, 0, 1]
translation: [0, 0, 0]
@@ -102,7 +102,7 @@ components:
block_size: 7372800
num_blocks: 40
- name: undistort_algo
- type: nvidia::cvcore::tensor_ops::StreamUndistort
+ type: nvidia::isaac::tensor_ops::StreamUndistort
parameters:
input_camera_model: input_camera
reference_frame: reference_frame
@@ -126,7 +126,7 @@ components:
- name: data_receiver
type: nvidia::gxf::DoubleBufferReceiver
parameters:
- capacity: 1
+ capacity: 2
policy: 0
- type: nvidia::gxf::MessageAvailableSchedulingTerm
parameters:
@@ -157,7 +157,7 @@ components:
image_tx: image_transmitter
camera_model_tx: camera_model_transmitter
---
-name: image_vault
+name: image_sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -168,14 +168,12 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
-name: camerainfo_vault
+name: camerainfo_sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -186,22 +184,20 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
name: resource
components:
- name: stream
- type: nvidia::cvcore::tensor_ops::TensorStream
+ type: nvidia::isaac::tensor_ops::TensorStream
parameters:
backend_type: "VPI"
engine_type: "GPU"
- name: adapter
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "VideoBuffer"
---
@@ -217,17 +213,22 @@ components:
- type: nvidia::gxf::Connection
parameters:
source: splitter/image_transmitter
- target: image_vault/signal
+ target: image_sink/signal
- type: nvidia::gxf::Connection
parameters:
source: splitter/camera_model_transmitter
- target: camerainfo_vault/signal
+ target: camerainfo_sink/signal
---
name: utils
components:
- name: clock
type: nvidia::gxf::RealtimeClock
-- type: nvidia::gxf::GreedyScheduler
+- type: nvidia::gxf::MultiThreadScheduler
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_ms: 1
+ worker_thread_number: 2
+- type: nvidia::gxf::JobStatistics
+ parameters:
+ clock: clock
diff --git a/isaac_ros_image_proc/config/nitros_resize_node.yaml b/isaac_ros_image_proc/config/nitros_resize_node.yaml
index a523e6a..7d2e67e 100644
--- a/isaac_ros_image_proc/config/nitros_resize_node.yaml
+++ b/isaac_ros_image_proc/config/nitros_resize_node.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -79,7 +79,7 @@ components:
block_size: 7372800
num_blocks: 40
- name: StreamResize
- type: nvidia::cvcore::tensor_ops::Resize
+ type: nvidia::isaac::tensor_ops::Resize
parameters:
output_width: 400
output_height: 300
@@ -98,7 +98,7 @@ components:
- name: data_receiver
type: nvidia::gxf::DoubleBufferReceiver
parameters:
- capacity: 1
+ capacity: 2
policy: 0
- type: nvidia::gxf::MessageAvailableSchedulingTerm
parameters:
@@ -129,7 +129,7 @@ components:
image_tx: image_transmitter
camera_model_tx: camera_model_transmitter
---
-name: image_vault
+name: image_sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -140,14 +140,12 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
-name: camerainfo_vault
+name: camerainfo_sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -158,21 +156,19 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
name: resource
components:
- name: adapter
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "VideoBuffer"
- name: stream
- type: nvidia::cvcore::tensor_ops::TensorStream
+ type: nvidia::isaac::tensor_ops::TensorStream
parameters:
backend_type: "VPI"
engine_type: "CPU"
@@ -189,17 +185,22 @@ components:
- type: nvidia::gxf::Connection
parameters:
source: splitter/image_transmitter
- target: image_vault/signal
+ target: image_sink/signal
- type: nvidia::gxf::Connection
parameters:
source: splitter/camera_model_transmitter
- target: camerainfo_vault/signal
+ target: camerainfo_sink/signal
---
name: utils
components:
- name: clock
type: nvidia::gxf::RealtimeClock
-- type: nvidia::gxf::GreedyScheduler
+- type: nvidia::gxf::MultiThreadScheduler
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_ms: 1
+ worker_thread_number: 2
+- type: nvidia::gxf::JobStatistics
+ parameters:
+ clock: clock
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/CMakeLists.txt b/isaac_ros_image_proc/gxf/rectify_params_generator/CMakeLists.txt
new file mode 100644
index 0000000..b6ab1cb
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/CMakeLists.txt
@@ -0,0 +1,70 @@
+# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+project(gxf_rectify_params_generator LANGUAGES C CXX)
+
+if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ add_compile_options(-fPIC -w)
+endif()
+
+# Dependencies
+include(YamlCpp)
+find_package(GXF ${ISAAC_ROS_GXF_VERSION} MODULE REQUIRED
+ multimedia
+ isaac_messages
+)
+find_package(OpenCV REQUIRED)
+find_package(Eigen3 3.3 REQUIRED NO_MODULE)
+
+# Create extension
+add_library(gxf_rectify_params_generator SHARED
+ extensions/rectify/rectify.cpp
+ extensions/rectify/components/rectify_params_generator.cpp
+ extensions/rectify/components/rectify_params_generator.hpp
+ extensions/rectify/components/stereo_extrinsics_normalizer.cpp
+ extensions/rectify/components/stereo_extrinsics_normalizer.hpp
+)
+target_include_directories(gxf_rectify_params_generator PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
+
+
+add_library(gxf_rectify_utils STATIC
+extensions/rectify/utils/camera_utils.cpp
+extensions/rectify/utils/camera_utils.hpp
+extensions/rectify/utils/utils.cpp
+extensions/rectify/utils/utils.hpp
+)
+target_include_directories(gxf_rectify_utils PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
+
+target_link_libraries(gxf_rectify_utils
+ PUBLIC
+ yaml-cpp
+ PRIVATE
+ Eigen3::Eigen
+ GXF::multimedia
+ GXF::isaac_messages
+ ${OpenCV_LIBS}
+)
+
+target_link_libraries(gxf_rectify_params_generator
+ PUBLIC
+ GXF::multimedia
+ GXF::isaac_messages
+ ${OpenCV_LIBS}
+ PRIVATE
+ gxf_rectify_utils
+ Eigen3::Eigen
+)
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/rectify_params_generator.cpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/rectify_params_generator.cpp
new file mode 100644
index 0000000..8cf2046
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/rectify_params_generator.cpp
@@ -0,0 +1,342 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#include "extensions/rectify/components/rectify_params_generator.hpp"
+
+#include
+
+#include
+#include
+#include
+
+#include "extensions/messages/camera_message.hpp"
+#include "extensions/rectify/utils/camera_utils.hpp"
+#include "gems/gxf_helpers/expected_macro.hpp"
+#include "gxf/multimedia/video.hpp"
+#include "gxf/std/timestamp.hpp"
+
+namespace nvidia {
+namespace isaac {
+namespace {
+
+void PrintRectifyParameters(cv::Matx33d R1, cv::Matx33d R2,
+ cv::Matx34d P1, cv::Matx34d P2, cv::Matx44d Q) {
+ GXF_LOG_DEBUG("left camera Rotation: \n[%g, %g, %g, \n%g, %g, %g, \n%g, %g, %g]\n",
+ R1(0, 0), R1(0, 1), R1(0, 2), R1(0, 3), R1(0, 4), R1(0, 5), R1(0, 6), R1(0, 7), R1(0, 8));
+
+ GXF_LOG_DEBUG("right camera Rotation: \n[%g, %g, %g, \n%g, %g, %g, \n%g, %g, %g]\n",
+ R2(0, 0), R2(0, 1), R2(0, 2), R2(0, 3), R2(0, 4), R2(0, 5), R2(0, 6), R2(0, 7), R2(0, 8));
+
+ GXF_LOG_DEBUG("left camera projection: \n[%g, %g, %g, %g, \n%g, %g, %g, %g,"
+ "\n%g, %g, %g, %g]\n",
+ P1(0, 0), P1(0, 1), P1(0, 2), P1(0, 3),
+ P1(0, 4), P1(0, 5), P1(0, 6), P1(0, 7),
+ P1(0, 8), P1(0, 9), P1(0, 10), P1(0, 11));
+
+ GXF_LOG_DEBUG("right camera projection: \n[%g, %g, %g, %g, \n%g, %g, %g, %g,"
+ "\n%g, %g, %g, %g]\n",
+ P2(0, 0), P2(0, 1), P2(0, 2), P2(0, 3),
+ P2(0, 4), P2(0, 5), P2(0, 6), P2(0, 7),
+ P2(0, 8), P2(0, 9), P2(0, 10), P2(0, 11));
+
+ GXF_LOG_DEBUG("diaparity-to-depth: \n[%g, %g, %g, %g, \n%g, %g, %g, %g,"
+ "\n%g, %g, %g, %g, \n%g, %g, %g, %g]\n",
+ Q(0, 0), Q(0, 1), Q(0, 2), Q(0, 3),
+ Q(0, 4), Q(0, 5), Q(0, 6), Q(0, 7),
+ Q(0, 8), Q(0, 9), Q(0, 10), Q(0, 11),
+ Q(0, 12), Q(0, 13), Q(0, 14), Q(0, 15));
+}
+
+void InitCameraMatrixFromModel(
+ const gxf::CameraModel& intrinsics,
+ cv::Matx33d& camera_matrix) {
+ camera_matrix = {intrinsics.focal_length.x, 0.0, intrinsics.principal_point.x, \
+ 0.0, intrinsics.focal_length.y, intrinsics.principal_point.y, \
+ 0.0, 0.0, 1.0};
+}
+
+void InitDistortionCoefficients(
+ const gxf::CameraModel& intrinsics,
+ cv::Matx& coefficents) {
+ coefficents = {
+ intrinsics.distortion_coefficients[0],
+ intrinsics.distortion_coefficients[1],
+ intrinsics.distortion_coefficients[6],
+ intrinsics.distortion_coefficients[7],
+ intrinsics.distortion_coefficients[2],
+ intrinsics.distortion_coefficients[3],
+ intrinsics.distortion_coefficients[4],
+ intrinsics.distortion_coefficients[5]
+ };
+}
+
+gxf::Expected CopyVideoBuffer(
+ gxf::Handle input,
+ gxf::Handle output) {
+ if (!input || !output) {
+ return gxf::Unexpected{GXF_ARGUMENT_NULL};
+ }
+ // Copy image frame using same storage type
+ cudaMemcpyKind operation;
+ switch (input->storage_type()) {
+ case gxf::MemoryStorageType::kHost:
+ case gxf::MemoryStorageType::kSystem: {
+ operation = cudaMemcpyHostToHost;
+ } break;
+ case gxf::MemoryStorageType::kDevice: {
+ operation = cudaMemcpyDeviceToDevice;
+ } break;
+ default:
+ return gxf::Unexpected{GXF_MEMORY_INVALID_STORAGE_MODE};
+ }
+
+ // TODO(tazhang) zero-copy forwarding when GXF implements GXF-490
+ const cudaError_t error = cudaMemcpy(
+ output->pointer(),
+ input->pointer(),
+ input->size(),
+ operation);
+ if (error != cudaSuccess) {
+ GXF_LOG_ERROR("%s", cudaGetErrorString(error));
+ return gxf::Unexpected{GXF_FAILURE};
+ }
+
+ return gxf::Success;
+}
+
+} // namespace
+
+RectifyParamsGenerator::RectifyParamsGenerator() {}
+
+RectifyParamsGenerator::~RectifyParamsGenerator() {}
+
+gxf_result_t RectifyParamsGenerator::registerInterface(gxf::Registrar* registrar) {
+ gxf::Expected result;
+ result &= registrar->parameter(
+ left_camera_input_, "left_camera_input", "Left Camera Model input",
+ "Camera model input for left camera");
+ result &= registrar->parameter(
+ left_camera_output_, "left_camera_output", "Left Camera Model output",
+ "Camera model output for left camera rectification");
+ result &= registrar->parameter(
+ right_camera_input_, "right_camera_input", "Right Camera Model input",
+ "Camera model input for left camera");
+ result &= registrar->parameter(
+ right_camera_output_, "right_camera_output", "Right Camera Model output",
+ "Camera model output for right camera rectification");
+ result &= registrar->parameter(
+ allocator_, "allocator", "Allocator",
+ "Memory allocator");
+ result &= registrar->parameter(
+ alpha_, "alpha", "Alpha",
+ "Free scaling parameter between 0 and 1", 0.0);
+ result &= registrar->parameter(
+ left_camera_model_file_, "left_camera_model_file", "File name for left camera model",
+ "File that contains paramemters that defines a camera model",
+ gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
+ result &= registrar->parameter(
+ right_camera_model_file_, "right_camera_model_file", "File name for right camera model",
+ "File that contains paramemters that defines a camera model",
+ gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
+ return gxf::ToResultCode(result);
+}
+
+gxf_result_t RectifyParamsGenerator::start() {
+ use_camera_model_file_ = false;
+ std::string filename[2];
+ filename[0] = left_camera_model_file_.try_get().value_or("");
+ if (filename[0].empty()) {
+ return GXF_SUCCESS;
+ }
+ filename[1] = right_camera_model_file_.try_get().value_or("");
+ if (filename[1].empty()) {
+ return GXF_SUCCESS;
+ }
+
+ for (auto idx = 0; idx < 2; idx++) {
+ const auto maybe_json = ::isaac::serialization::TryLoadJsonFromFile(filename[idx]);
+ if (!maybe_json) {
+ GXF_LOG_ERROR("Failed to read json from file '%s'", filename[idx].c_str());
+ return GXF_ARGUMENT_NULL;
+ }
+ const nlohmann::json& json = *maybe_json;
+ if (!cameraUtils::importCameraIntrinsics(json["model"], camera_intrinsics_[idx])) {
+ GXF_LOG_ERROR("Can't read model from json");
+ return GXF_ARGUMENT_NULL;
+ }
+ camera_intrinsics_[idx].distortion_type = gxf::DistortionType::Polynomial;
+ if (!cameraUtils::importCameraExtrinsics(json["model"], camera_extrinsics_[idx], true)) {
+ GXF_LOG_ERROR("Can't read camera extrinsics from json");
+ return GXF_ARGUMENT_NULL;
+ }
+ cameraUtils::printCameraInfo(camera_intrinsics_[idx], camera_extrinsics_[idx]);
+ }
+
+ use_camera_model_file_ = true;
+ return GXF_SUCCESS;
+}
+
+gxf_result_t RectifyParamsGenerator::tick() {
+ // Receive messages
+ CameraMessageParts left_input;
+ CameraMessageParts right_input;
+
+ if (use_camera_model_file_) {
+ GXF_RETURN_IF_ERROR(left_camera_input_->receive().assign_to(left_input.entity));
+ left_input.frame = GXF_UNWRAP_OR_RETURN(left_input.entity.get());
+ left_input.sequence_number =
+ GXF_UNWRAP_OR_RETURN(left_input.entity.get("sequence_number"));
+ left_input.timestamp = GXF_UNWRAP_OR_RETURN(left_input.entity.get());
+
+ GXF_RETURN_IF_ERROR(right_camera_input_->receive().assign_to(right_input.entity));
+ right_input.frame = GXF_UNWRAP_OR_RETURN(right_input.entity.get());
+ right_input.sequence_number =
+ GXF_UNWRAP_OR_RETURN(right_input.entity.get("sequence_number"));
+ right_input.timestamp = GXF_UNWRAP_OR_RETURN(right_input.entity.get());
+
+ GXF_RETURN_IF_ERROR(computeRectifyParams(camera_intrinsics_[0], camera_intrinsics_[1],
+ camera_extrinsics_[1]));
+
+ } else {
+ left_input = GXF_UNWRAP_OR_RETURN(left_camera_input_->receive().map(GetCameraMessage));
+ right_input = GXF_UNWRAP_OR_RETURN(right_camera_input_->receive().map(GetCameraMessage));
+ GXF_RETURN_IF_ERROR(computeRectifyParams(*left_input.intrinsics, *right_input.intrinsics,
+ *right_input.extrinsics));
+ }
+
+ // Create left output message
+ CameraMessageParts left_output = GXF_UNWRAP_OR_RETURN(CreateCameraMessage(
+ context(), left_input.frame->video_frame_info(), left_input.frame->size(),
+ left_input.frame->storage_type(), allocator_));
+
+ gxf::Handle left_target_camera =
+ GXF_UNWRAP_OR_RETURN(left_output.entity.add("target_camera"));
+ gxf::Handle left_target_extrinsics =
+ GXF_UNWRAP_OR_RETURN(left_output.entity.add("target_extrinsics_delta"));
+ // We put rectified_camera_rotation_raw_camera here
+ *left_target_extrinsics = rectify_extrinsics_[0];
+ GXF_RETURN_IF_ERROR(CopyVideoBuffer(left_input.frame, left_output.frame));
+ *left_target_camera = rectify_intrinsics_[0];
+ *left_output.intrinsics = use_camera_model_file_ ? camera_intrinsics_[0] : *left_input.intrinsics;
+ // This puts stereo extrinsics in
+ *left_output.extrinsics = use_camera_model_file_ ? camera_extrinsics_[0] : *left_input.extrinsics;
+ *left_output.sequence_number = *left_input.sequence_number;
+ *left_output.timestamp = *left_input.timestamp;
+
+ // Create right output message
+ CameraMessageParts right_output = GXF_UNWRAP_OR_RETURN(CreateCameraMessage(
+ context(), right_input.frame->video_frame_info(), right_input.frame->size(),
+ right_input.frame->storage_type(), allocator_));
+ // We put rectified_camera_rotation_raw_camera here
+ gxf::Handle right_target_extrinsics =
+ GXF_UNWRAP_OR_RETURN(right_output.entity.add("target_extrinsics_delta"));
+ *right_target_extrinsics = rectify_extrinsics_[1];
+ gxf::Handle right_target_camera =
+ GXF_UNWRAP_OR_RETURN(right_output.entity.add("target_camera"));
+ GXF_RETURN_IF_ERROR(CopyVideoBuffer(right_input.frame, right_output.frame));
+ *right_target_camera = rectify_intrinsics_[1];
+ *right_output.intrinsics = use_camera_model_file_ ? camera_intrinsics_[1] :
+ *right_input.intrinsics;
+ *right_output.extrinsics =
+ use_camera_model_file_ ? camera_extrinsics_[1] : *right_input.extrinsics;
+ *right_output.sequence_number = *right_input.sequence_number;
+ *right_output.timestamp = *right_input.timestamp;
+
+ GXF_RETURN_IF_ERROR(left_camera_output_->publish(left_output.entity,
+ left_input.timestamp->acqtime));
+ GXF_RETURN_IF_ERROR(right_camera_output_->publish(right_output.entity,
+ right_input.timestamp->acqtime));
+
+ return GXF_SUCCESS;
+}
+
+gxf::Expected RectifyParamsGenerator::computeRectifyParams(
+ const gxf::CameraModel& left_camera_intrinsics,
+ const gxf::CameraModel& right_camera_intrinsics,
+ const gxf::Pose3D& right_extrinsics) {
+ cv::Size imagesize(left_camera_intrinsics.dimensions.x,
+ left_camera_intrinsics.dimensions.y);
+
+ cv::Matx33d left_intrinsics;
+ InitCameraMatrixFromModel(left_camera_intrinsics, left_intrinsics);
+ cv::Matx left_coefficients;
+ InitDistortionCoefficients(left_camera_intrinsics, left_coefficients);
+
+ cv::Matx33d right_intrinsics;
+ InitCameraMatrixFromModel(right_camera_intrinsics, right_intrinsics);
+ cv::Matx right_coefficients;
+ InitDistortionCoefficients(right_camera_intrinsics, right_coefficients);
+
+ cv::Matx31d Tlr = { right_extrinsics.translation[0],
+ right_extrinsics.translation[1],
+ right_extrinsics.translation[2]};
+
+ cv::Matx33d Rlr = { right_extrinsics.rotation[0],
+ right_extrinsics.rotation[1],
+ right_extrinsics.rotation[2],
+ right_extrinsics.rotation[3],
+ right_extrinsics.rotation[4],
+ right_extrinsics.rotation[5],
+ right_extrinsics.rotation[6],
+ right_extrinsics.rotation[7],
+ right_extrinsics.rotation[8]};
+
+ cv::Matx33d R[2];
+ cv::Matx34d P[2];
+ cv::Matx44d Q;
+ cv::stereoRectify(
+ left_intrinsics, left_coefficients,
+ right_intrinsics, right_coefficients,
+ imagesize, Rlr, Tlr,
+ R[0], R[1],
+ P[0], P[1],
+ Q, cv::CALIB_ZERO_DISPARITY, alpha_);
+
+ PrintRectifyParameters(R[0], R[1], P[0], P[1], Q);
+
+ for (int i = 0; i < 2; i++) {
+ rectify_intrinsics_[i].dimensions = left_camera_intrinsics.dimensions;
+
+ rectify_intrinsics_[i].distortion_type = gxf::DistortionType::Perspective;
+ memset(rectify_intrinsics_[i].distortion_coefficients, 0,
+ rectify_intrinsics_[i].kMaxDistortionCoefficients * sizeof(float));
+
+ rectify_intrinsics_[i].focal_length.x = P[i](0, 0);
+ rectify_intrinsics_[i].focal_length.y = P[i](0, 5);
+ rectify_intrinsics_[i].principal_point.x = P[i](0, 2);
+ rectify_intrinsics_[i].principal_point.y = P[i](0, 6);
+ rectify_intrinsics_[i].skew_value = 0;
+
+ rectify_extrinsics_[i].translation = {0, 0, 0};
+ rectify_extrinsics_[i].rotation = {
+ static_cast(R[i](0, 0)),
+ static_cast(R[i](0, 1)),
+ static_cast(R[i](0, 2)),
+ static_cast(R[i](0, 3)),
+ static_cast(R[i](0, 4)),
+ static_cast(R[i](0, 5)),
+ static_cast(R[i](0, 6)),
+ static_cast(R[i](0, 7)),
+ static_cast(R[i](0, 8))};
+
+ cameraUtils::printCameraInfo(rectify_intrinsics_[i], rectify_extrinsics_[i]);
+ }
+
+ return gxf::Success;
+}
+
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/rectify_params_generator.hpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/rectify_params_generator.hpp
new file mode 100644
index 0000000..e0cfb29
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/rectify_params_generator.hpp
@@ -0,0 +1,71 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#pragma once
+
+#include
+#include
+#include
+
+#include "gxf/multimedia/camera.hpp"
+#include "gxf/std/allocator.hpp"
+#include "gxf/std/codelet.hpp"
+#include "gxf/std/receiver.hpp"
+#include "gxf/std/transmitter.hpp"
+
+namespace nvidia {
+namespace isaac {
+
+// Takes an camera model as input and generate rectify parameters.
+class RectifyParamsGenerator : public gxf::Codelet {
+ public:
+ // Explicitly declare constructors and destructors
+ // to get around forward declaration of AprilTagsData
+ RectifyParamsGenerator();
+ ~RectifyParamsGenerator();
+
+ gxf_result_t registerInterface(gxf::Registrar* registrar) override;
+ gxf_result_t initialize() { return GXF_SUCCESS; }
+ gxf_result_t deinitialize() { return GXF_SUCCESS; }
+
+ gxf_result_t start() override;
+ gxf_result_t tick() override;
+ gxf_result_t stop() override { return GXF_SUCCESS; }
+
+ private:
+ gxf::Expected computeRectifyParams(
+ const gxf::CameraModel& left_camera_intrinsics,
+ const gxf::CameraModel& right_camera_intrinsics,
+ const gxf::Pose3D& right_extrinsics);
+
+ gxf::Parameter> left_camera_input_;
+ gxf::Parameter> right_camera_input_;
+ gxf::Parameter> left_camera_output_;
+ gxf::Parameter> right_camera_output_;
+ gxf::Parameter> allocator_;
+ gxf::Parameter left_camera_model_file_;
+ gxf::Parameter right_camera_model_file_;
+ gxf::Parameter alpha_;
+
+ gxf::CameraModel rectify_intrinsics_[2];
+ gxf::Pose3D rectify_extrinsics_[2];
+ gxf::CameraModel camera_intrinsics_[2];
+ gxf::Pose3D camera_extrinsics_[2];
+ bool use_camera_model_file_;
+};
+
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/stereo_extrinsics_normalizer.cpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/stereo_extrinsics_normalizer.cpp
new file mode 100644
index 0000000..4ffe004
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/stereo_extrinsics_normalizer.cpp
@@ -0,0 +1,95 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#include "extensions/rectify/components/stereo_extrinsics_normalizer.hpp"
+
+#include "extensions/rectify/utils/utils.hpp"
+#include "gems/gxf_helpers/expected_macro.hpp"
+#include "gxf/multimedia/camera.hpp"
+
+namespace nvidia {
+namespace isaac {
+
+gxf_result_t StereoExtrinsicsNormalizer::registerInterface(gxf::Registrar* registrar) {
+ gxf::Expected result;
+ result &= registrar->parameter(
+ left_camera_input_, "left_camera_input", "Left camera input",
+ "Left camera input containing extrinsics from StreamUndistort module");
+ result &= registrar->parameter(
+ right_camera_input_, "right_camera_input", "Right camera input",
+ "Right camera input containing extrinsics from StreamUndistort module");
+ result &= registrar->parameter(
+ left_camera_output_, "left_camera_output", "Left camera output",
+ "Left camera output containing extrinsics from StreamUndistort module");
+ result &= registrar->parameter(
+ right_camera_output_, "right_camera_output", "Right camera output",
+ "Right camera output containing extrinsics from StreamUndistort module");
+ return gxf::ToResultCode(result);
+}
+
+gxf::Expected StereoExtrinsicsNormalizer::postProcessTransformation(
+ gxf::Entity& input_left_message, gxf::Entity& input_right_message) {
+ // Modify extrinsics information for left camera
+ gxf::Handle left_camera_extrinsics =
+ GXF_UNWRAP_OR_RETURN(input_left_message.get("extrinsics"));
+ // Modify extrinsics information for right camera
+ gxf::Handle right_camera_extrinsics =
+ GXF_UNWRAP_OR_RETURN(input_right_message.get("extrinsics"));
+ // Modify target extrinsics information for left camera
+ gxf::Handle left_camera_target_extrinsics =
+ GXF_UNWRAP_OR_RETURN(input_left_message.get("target_extrinsics_delta"));
+ // Modify target extrinsics information for right camera
+ gxf::Handle right_camera_target_extrinsics =
+ GXF_UNWRAP_OR_RETURN(input_right_message.get("target_extrinsics_delta"));
+
+ // Do post processing of extrinsic data
+ ::isaac::Pose3d left_T_origin =
+ ConvertToIsaacPose(*left_camera_extrinsics.get());
+ ::isaac::Pose3d right_T_left =
+ ConvertToIsaacPose(*right_camera_extrinsics.get());
+ ::isaac::Pose3d left_rectified_T_left =
+ ConvertToIsaacPose(*left_camera_target_extrinsics.get());
+ ::isaac::Pose3d right_rectified_T_right =
+ ConvertToIsaacPose(*right_camera_target_extrinsics.get());
+ ::isaac::Pose3d rectified_right_T_rectified_left =
+ right_rectified_T_right * right_T_left * left_rectified_T_left.inverse();
+
+ gxf::Pose3D processed_right_extrinsics = ConvertToGxfPose(
+ rectified_right_T_rectified_left);
+ ::isaac::Pose3d origin_T_rectified_left = (left_rectified_T_left * left_T_origin).inverse();
+
+ gxf::Pose3D processed_left_extrinsics = ConvertToGxfPose(
+ origin_T_rectified_left);
+
+ *left_camera_extrinsics.get() = processed_left_extrinsics;
+ *right_camera_extrinsics.get() = processed_right_extrinsics;
+
+ // Publish messages to left camera output and right camera output
+ return left_camera_output_->publish(input_left_message).and_then([&] {
+ return right_camera_output_->publish(input_right_message);
+ });
+}
+
+gxf_result_t StereoExtrinsicsNormalizer::tick() {
+ // Receiving the left data
+ gxf::Entity input_left_message = GXF_UNWRAP_OR_RETURN(left_camera_input_->receive());
+ // Receiving the right camera data
+ gxf::Entity input_right_message = GXF_UNWRAP_OR_RETURN(right_camera_input_->receive());
+ return gxf::ToResultCode(postProcessTransformation(input_left_message, input_right_message));
+}
+
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/stereo_extrinsics_normalizer.hpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/stereo_extrinsics_normalizer.hpp
new file mode 100644
index 0000000..b0445ce
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/components/stereo_extrinsics_normalizer.hpp
@@ -0,0 +1,57 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#pragma once
+
+#include
+#include
+
+#include "gxf/std/codelet.hpp"
+#include "gxf/std/receiver.hpp"
+#include "gxf/std/transmitter.hpp"
+
+namespace nvidia {
+namespace isaac {
+
+// Takes input messages from Undistort module and applies post processing to extrinsics messages.
+// The post processing outputs a GXF message under "extrinsics" (gxf::Pose3D) for both left and
+// right imagers such that:
+// 1. rectified_right_T_rectified_left: Position of rectified left camera with respect to the
+// rectified right camera
+// 2. origin_T_rectified_left: Position of the rectified left camera with respect to
+// the stereo camera origin
+class StereoExtrinsicsNormalizer : public gxf::Codelet {
+ public:
+ gxf_result_t registerInterface(gxf::Registrar* registrar) override;
+ gxf_result_t initialize() { return GXF_SUCCESS; }
+ gxf_result_t deinitialize() { return GXF_SUCCESS; }
+
+ gxf_result_t start() override { return GXF_SUCCESS; }
+ gxf_result_t tick() override;
+ gxf_result_t stop() override { return GXF_SUCCESS; }
+
+ private:
+ gxf::Expected postProcessTransformation(
+ gxf::Entity& input_left_message, gxf::Entity& input_right_message);
+
+ gxf::Parameter> left_camera_input_;
+ gxf::Parameter> right_camera_input_;
+ gxf::Parameter> left_camera_output_;
+ gxf::Parameter> right_camera_output_;
+};
+
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/rectify.cpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/rectify.cpp
new file mode 100644
index 0000000..3d31e22
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/rectify.cpp
@@ -0,0 +1,35 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#include "extensions/rectify/components/rectify_params_generator.hpp"
+#include "extensions/rectify/components/stereo_extrinsics_normalizer.hpp"
+#include "gxf/std/extension_factory_helper.hpp"
+
+GXF_EXT_FACTORY_BEGIN()
+
+GXF_EXT_FACTORY_SET_INFO(0xd8d7816ec0485ad4, 0xff795a414bd445ca, "RECTIFY",
+ "Extension containing components for rectify",
+ "Isaac SDK", "1.0.0", "LICENSE");
+
+GXF_EXT_FACTORY_ADD(0xa9ddb12454d54aeb, 0x9739263d9fd1f635,
+ nvidia::isaac::RectifyParamsGenerator, nvidia::gxf::Codelet,
+ "Generate rectification parameters.");
+
+GXF_EXT_FACTORY_ADD(0xa9ddb12454d54aeb, 0x9738263d9fd1f635,
+ nvidia::isaac::StereoExtrinsicsNormalizer, nvidia::gxf::Codelet,
+ "Generate rectification undisortion post processing parameters.");
+
+GXF_EXT_FACTORY_END()
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/camera_utils.cpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/camera_utils.cpp
new file mode 100644
index 0000000..0e86a84
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/camera_utils.cpp
@@ -0,0 +1,190 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#include "extensions/rectify/utils/camera_utils.hpp"
+
+#include
+#include
+#include
+
+#include "engine/gems/serialization/json.hpp"
+#include "engine/gems/serialization/json_formatter.hpp"
+#include "gxf/multimedia/camera.hpp"
+#include "gxf/std/codelet.hpp"
+
+namespace nvidia {
+
+namespace isaac {
+
+namespace cameraUtils {
+using Json = nlohmann::json;
+
+bool importCameraIntrinsics(const Json& json, gxf::CameraModel& intrinsics) {
+ try {
+ intrinsics.dimensions.x = json["dimensions"].at("width");
+ intrinsics.dimensions.y = json["dimensions"].at("height");
+ intrinsics.focal_length.x = json["focal_length"].at("fx");
+ intrinsics.focal_length.y = json["focal_length"].at("fy");
+ intrinsics.principal_point.x = json["principal_point"].at("cx");
+ intrinsics.principal_point.y = json["principal_point"].at("cy");
+ intrinsics.skew_value = json.at("skew");
+
+ std::string type = json.at("distortion_type");
+ if (!type.compare("Polynomial"))
+ intrinsics.distortion_type = gxf::DistortionType::Polynomial;
+ else if (!type.compare("Perspective"))
+ intrinsics.distortion_type = gxf::DistortionType::Perspective;
+ else if (!type.compare("Equidistant"))
+ intrinsics.distortion_type = gxf::DistortionType::FisheyeEquidistant;
+ else if (!type.compare("Equisolid"))
+ intrinsics.distortion_type = gxf::DistortionType::FisheyeEquisolid;
+ else if (!type.compare("OrthoGraphic"))
+ intrinsics.distortion_type = gxf::DistortionType::FisheyeOrthoGraphic;
+ else if (!type.compare("Stereographic"))
+ intrinsics.distortion_type = gxf::DistortionType::FisheyeStereographic;
+ else if (!type.compare("Brown"))
+ intrinsics.distortion_type = gxf::DistortionType::Brown;
+ for (auto idx = 0; idx < intrinsics.kMaxDistortionCoefficients; idx++) {
+ intrinsics.distortion_coefficients[idx] = json.at("distortion_coefficients")[idx];
+ }
+ } catch (Json::out_of_range& e) {
+ LOG_WARNING(e.what());
+ return false;
+ }
+ return true;
+}
+
+bool importCameraExtrinsics(const Json& json,
+ gxf::Pose3D& extrinsics, bool use_rodrigez) {
+ try {
+ for (auto idx = 0; idx < 3; idx++) {
+ extrinsics.translation[idx] = json.at("translation")[idx];
+ }
+ if (use_rodrigez) {
+ cv::Matx33d Rlr;
+ cv::Matx31d aux(json.at("rotation_rodrigez")[0],
+ json.at("rotation_rodrigez")[1],
+ json.at("rotation_rodrigez")[2]);
+ cv::Rodrigues(aux, Rlr);
+ for (auto j = 0; j < 9; j++) {
+ extrinsics.rotation[j] = Rlr(j / 3, j % 3);
+ }
+ } else {
+ for (auto idx = 0; idx < 9; idx++) {
+ extrinsics.rotation[idx] = json.at("rotation")[idx];
+ }
+ }
+ } catch (Json::out_of_range& e) {
+ LOG_WARNING(e.what());
+ return false;
+ }
+ return true;
+}
+
+void printCameraInfo(const gxf::CameraModel& intrinsics,
+ const gxf::Pose3D& extrinsics) {
+ GXF_LOG_DEBUG("intrinsics dimension: (%d, %d)", intrinsics.dimensions.x,
+ intrinsics.dimensions.y);
+ GXF_LOG_DEBUG("intrinsics principal_point: (%4.6f, %4.6f)", intrinsics.principal_point.x,
+ intrinsics.principal_point.y);
+ GXF_LOG_DEBUG("intrinsics focal_length: (%4.6f, %4.6f)", intrinsics.focal_length.x,
+ intrinsics.focal_length.y);
+ GXF_LOG_DEBUG("skew value: %g", intrinsics.skew_value);
+ GXF_LOG_DEBUG("distortion coeffs: [%g, %g, %g, %g, %g, %g, %g, %g]",
+ intrinsics.distortion_coefficients[0],
+ intrinsics.distortion_coefficients[1],
+ intrinsics.distortion_coefficients[2],
+ intrinsics.distortion_coefficients[3],
+ intrinsics.distortion_coefficients[4],
+ intrinsics.distortion_coefficients[5],
+ intrinsics.distortion_coefficients[6],
+ intrinsics.distortion_coefficients[7]);
+
+ GXF_LOG_DEBUG("translation matrix:[%g, %g, %g]",
+ extrinsics.translation[0],
+ extrinsics.translation[1],
+ extrinsics.translation[2]);
+ GXF_LOG_DEBUG("rotation matrix:[%g, %g, %g, %g, %g, %g, %g, %g, %g]",
+ extrinsics.rotation[0],
+ extrinsics.rotation[1],
+ extrinsics.rotation[2],
+ extrinsics.rotation[3],
+ extrinsics.rotation[4],
+ extrinsics.rotation[5],
+ extrinsics.rotation[6],
+ extrinsics.rotation[7],
+ extrinsics.rotation[8]);
+}
+
+void printCameraInfo(const gxf::CameraModel& source_intrinsics,
+ const gxf::CameraModel& target_intrinsics,
+ const gxf::Pose3D& extrinsics) {
+ GXF_LOG_DEBUG("=========SOURCE CAMERA===========");
+ GXF_LOG_DEBUG("intrinsics dimension: (%d, %d)", source_intrinsics.dimensions.x,
+ source_intrinsics.dimensions.y);
+ GXF_LOG_DEBUG("intrinsics principal_point: (%4.6f, %4.6f)", source_intrinsics.principal_point.x,
+ source_intrinsics.principal_point.y);
+ GXF_LOG_DEBUG("intrinsics focal_length: (%4.6f, %4.6f)", source_intrinsics.focal_length.x,
+ source_intrinsics.focal_length.y);
+ GXF_LOG_DEBUG("skew value: %g", source_intrinsics.skew_value);
+ GXF_LOG_DEBUG("distortion coeffs: [%g, %g, %g, %g, %g, %g, %g, %g]",
+ source_intrinsics.distortion_coefficients[0],
+ source_intrinsics.distortion_coefficients[1],
+ source_intrinsics.distortion_coefficients[2],
+ source_intrinsics.distortion_coefficients[3],
+ source_intrinsics.distortion_coefficients[4],
+ source_intrinsics.distortion_coefficients[5],
+ source_intrinsics.distortion_coefficients[6],
+ source_intrinsics.distortion_coefficients[7]);
+
+ GXF_LOG_DEBUG("=========TARGET CAMERA===========");
+ GXF_LOG_DEBUG("intrinsics dimension: (%d, %d)", target_intrinsics.dimensions.x,
+ target_intrinsics.dimensions.y);
+ GXF_LOG_DEBUG("intrinsics principal_point: (%4.6f, %4.6f)", target_intrinsics.principal_point.x,
+ target_intrinsics.principal_point.y);
+ GXF_LOG_DEBUG("intrinsics focal_length: (%4.6f, %4.6f)", target_intrinsics.focal_length.x,
+ target_intrinsics.focal_length.y);
+ GXF_LOG_DEBUG("skew value: %g", target_intrinsics.skew_value);
+ GXF_LOG_DEBUG("distortion coeffs: [%g, %g, %g, %g, %g, %g, %g, %g]",
+ target_intrinsics.distortion_coefficients[0],
+ target_intrinsics.distortion_coefficients[1],
+ target_intrinsics.distortion_coefficients[2],
+ target_intrinsics.distortion_coefficients[3],
+ target_intrinsics.distortion_coefficients[4],
+ target_intrinsics.distortion_coefficients[5],
+ target_intrinsics.distortion_coefficients[6],
+ target_intrinsics.distortion_coefficients[7]);
+
+ GXF_LOG_DEBUG("=========EXTRINSICS===========");
+ GXF_LOG_DEBUG("translation matrix: [%g, %g, %g]",
+ extrinsics.translation[0],
+ extrinsics.translation[1],
+ extrinsics.translation[2]);
+ GXF_LOG_DEBUG("rotation matrix: [%g, %g, %g, %g, %g, %g, %g, %g, %g]",
+ extrinsics.rotation[0],
+ extrinsics.rotation[1],
+ extrinsics.rotation[2],
+ extrinsics.rotation[3],
+ extrinsics.rotation[4],
+ extrinsics.rotation[5],
+ extrinsics.rotation[6],
+ extrinsics.rotation[7],
+ extrinsics.rotation[8]);
+}
+
+} // namespace cameraUtils
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/camera_utils.hpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/camera_utils.hpp
new file mode 100644
index 0000000..03f1b92
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/camera_utils.hpp
@@ -0,0 +1,36 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#pragma once
+
+#include "engine/gems/serialization/json.hpp"
+#include "gxf/multimedia/camera.hpp"
+
+namespace nvidia {
+namespace isaac {
+namespace cameraUtils {
+bool importCameraIntrinsics(const nlohmann::json& json, gxf::CameraModel& intrinsics);
+bool importCameraExtrinsics(const nlohmann::json& json, gxf::Pose3D& extrinsics,
+ bool use_rodrigez);
+void printCameraInfo(const gxf::CameraModel& intrinsics,
+ const gxf::Pose3D& extrinsics);
+void printCameraInfo(const gxf::CameraModel& source_intrinsics,
+ const gxf::CameraModel& target_intrinsics,
+ const gxf::Pose3D& extrinsics);
+
+} // namespace cameraUtils
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/utils.cpp b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/utils.cpp
new file mode 100644
index 0000000..8dfffa3
--- /dev/null
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/utils.cpp
@@ -0,0 +1,54 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+#include "extensions/rectify/utils/utils.hpp"
+
+#include "engine/core/math/pose3.hpp"
+#include "gxf/multimedia/camera.hpp"
+namespace nvidia {
+namespace isaac {
+
+// TODO(kchahal) remove copy of ConvertToGxfPose and ConvertToIsaacPose in
+// `calibration/file/utils.cpp`
+gxf::Pose3D ConvertToGxfPose(const ::isaac::Pose3d& pose) {
+ gxf::Pose3D res;
+ auto matrix = pose.rotation.matrix();
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ res.rotation[(i * 3) + j] = matrix(i, j);
+ }
+ }
+ res.translation = {
+ static_cast(pose.translation.x()), static_cast(pose.translation.y()),
+ static_cast(pose.translation.z())};
+ return res;
+}
+
+::isaac::Pose3d ConvertToIsaacPose(const gxf::Pose3D& pose) {
+ ::isaac::Matrix3d matrix;
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ matrix(i, j) = pose.rotation[(i * 3) + j];
+ }
+ }
+ auto quaternion = ::isaac::SO3d::FromQuaternion(::isaac::Quaterniond(matrix));
+ auto translation =
+ ::isaac::Vector3d(pose.translation[0], pose.translation[1], pose.translation[2]);
+ return ::isaac::Pose3d{quaternion, translation};
+}
+
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/TensorList.h b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/utils.hpp
similarity index 55%
rename from isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/TensorList.h
rename to isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/utils.hpp
index 4ec6695..30f9966 100644
--- a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/TensorList.h
+++ b/isaac_ros_image_proc/gxf/rectify_params_generator/extensions/rectify/utils/utils.hpp
@@ -1,5 +1,5 @@
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,24 +14,19 @@
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
+#pragma once
-#ifndef CVCORE_TENSORLIST_H
-#define CVCORE_TENSORLIST_H
+#include "engine/core/math/pose3.hpp"
+#include "gxf/multimedia/camera.hpp"
-#include
+namespace nvidia {
+namespace isaac {
-#include "Traits.h"
-#include "Array.h"
-#include "Tensor.h"
+// Converts an isaac pose object to a GXF pose object
+gxf::Pose3D ConvertToGxfPose(const ::isaac::Pose3d& pose);
-namespace cvcore {
+// Converts GXFPose to Isaac pose
+::isaac::Pose3d ConvertToIsaacPose(const gxf::Pose3D& pose);
-/**
- * @brief Implementation of a list of tensors of the same rank but, potentially, different dimensions
- */
-template
-using TensorList = typename std::enable_if::value, Array>::type;
-
-} // namespace cvcore
-
-#endif // CVCORE_TENSORLIST_H
+} // namespace isaac
+} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/CMakeLists.txt b/isaac_ros_image_proc/gxf/tensorops/CMakeLists.txt
index 2792795..f07564b 100644
--- a/isaac_ros_image_proc/gxf/tensorops/CMakeLists.txt
+++ b/isaac_ros_image_proc/gxf/tensorops/CMakeLists.txt
@@ -25,105 +25,121 @@ endif()
find_package(CUDAToolkit)
find_package(vpi REQUIRED)
find_package(GXF ${ISAAC_ROS_GXF_VERSION} MODULE REQUIRED
+ core
cuda
multimedia
)
# Create extension
add_library(gxf_tensorops SHARED
- CameraModel.cpp
- CameraModel.hpp
- CMakeLists.txt
- ConvertColorFormat.cpp
- ConvertColorFormat.hpp
- CropAndResize.cpp
- CropAndResize.hpp
- Frame3D.cpp
- Frame3D.hpp
- ImageAdapter.cpp
- ImageAdapter.hpp
- ImageUtils.cpp
- ImageUtils.hpp
- InterleavedToPlanar.cpp
- InterleavedToPlanar.hpp
- Normalize.cpp
- Normalize.hpp
- Reshape.cpp
- Reshape.hpp
- Resize.cpp
- Resize.hpp
- TensorOperator.cpp
- TensorOperator.hpp
- TensorOps.cpp
- TensorStream.cpp
- TensorStream.hpp
- Undistort.cpp
- Undistort.hpp
- detail/ImageAdapterTensorImpl.cpp
- detail/ImageAdapterTensorImpl.hpp
- detail/ImageAdapterVideoBufferImpl.cpp
- detail/ImageAdapterVideoBufferImpl.hpp
+ extensions/tensorops/tensorops.cpp
+ extensions/tensorops/components/CameraModel.cpp
+ extensions/tensorops/components/CameraModel.hpp
+ extensions/tensorops/components/ConvertColorFormat.cpp
+ extensions/tensorops/components/ConvertColorFormat.hpp
+ extensions/tensorops/components/CropAndResize.cpp
+ extensions/tensorops/components/CropAndResize.hpp
+ extensions/tensorops/components/Frame3D.cpp
+ extensions/tensorops/components/Frame3D.hpp
+ extensions/tensorops/components/ImageAdapter.cpp
+ extensions/tensorops/components/ImageAdapter.hpp
+ extensions/tensorops/components/ImageUtils.cpp
+ extensions/tensorops/components/ImageUtils.hpp
+ extensions/tensorops/components/InterleavedToPlanar.cpp
+ extensions/tensorops/components/InterleavedToPlanar.hpp
+ extensions/tensorops/components/Normalize.cpp
+ extensions/tensorops/components/Normalize.hpp
+ extensions/tensorops/components/Reshape.cpp
+ extensions/tensorops/components/Reshape.hpp
+ extensions/tensorops/components/Resize.cpp
+ extensions/tensorops/components/Resize.hpp
+ extensions/tensorops/components/TensorOperator.cpp
+ extensions/tensorops/components/TensorOperator.hpp
+ extensions/tensorops/components/TensorStream.cpp
+ extensions/tensorops/components/TensorStream.hpp
+ extensions/tensorops/components/Undistort.cpp
+ extensions/tensorops/components/Undistort.hpp
+ extensions/tensorops/components/detail/ImageAdapterTensorImpl.cpp
+ extensions/tensorops/components/detail/ImageAdapterTensorImpl.hpp
+ extensions/tensorops/components/detail/ImageAdapterVideoBufferImpl.cpp
+ extensions/tensorops/components/detail/ImageAdapterVideoBufferImpl.hpp
)
-target_include_directories(gxf_tensorops PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cvcore/include)
+target_include_directories(gxf_tensorops PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
-add_library(cvcore STATIC
+add_library(corelib STATIC
# Tensorops
- cvcore/src/tensor_ops/ArithmeticOperations.cpp
- cvcore/src/tensor_ops/BBoxUtils.cpp
- cvcore/src/tensor_ops/ColorConversions.cpp
- cvcore/src/tensor_ops/DBScan.cpp
- cvcore/src/tensor_ops/Errors.cpp
- cvcore/src/tensor_ops/Filters.cpp
- cvcore/src/tensor_ops/Filters.h
- cvcore/src/tensor_ops/FusedOperations.cpp
- cvcore/src/tensor_ops/GeometryTransforms.cpp
- cvcore/src/tensor_ops/IImageWarp.cpp
- cvcore/src/tensor_ops/NppUtils.cpp
- cvcore/src/tensor_ops/NppUtils.h
- cvcore/src/tensor_ops/OneEuroFilter.cpp
- cvcore/src/tensor_ops/TensorOperators.cpp
-
- # Tensorops/VPI
- cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.cpp
- cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.h
- cvcore/src/tensor_ops/vpi/VPIEnumMapping.h
- cvcore/src/tensor_ops/vpi/VPIImageWarp.h
- cvcore/src/tensor_ops/vpi/VPIRemapImpl.cpp
- cvcore/src/tensor_ops/vpi/VPIRemapImpl.h
- cvcore/src/tensor_ops/vpi/VPIResizeImpl.cpp
- cvcore/src/tensor_ops/vpi/VPIResizeImpl.h
- cvcore/src/tensor_ops/vpi/VPIStatusMapping.cpp
- cvcore/src/tensor_ops/vpi/VPIStatusMapping.h
- cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.cpp
- cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.h
- cvcore/src/tensor_ops/vpi/VPITensorOperators.cpp
- cvcore/src/tensor_ops/vpi/VPITensorOperators.h
-
- # Core
- cvcore/src/core/cvcore/Array.cpp
- cvcore/src/core/cvcore/Dummy.cu
- cvcore/src/core/cvcore/MathTypes.cpp
- cvcore/src/core/cvcore/Tensor.cpp
- cvcore/src/core/utility/CVError.cpp
- cvcore/src/core/utility/Instrumentation.cpp
- cvcore/src/core/utility/Memory.cpp
- cvcore/src/core/utility/ProfileUtils.cpp
+ extensions/tensorops/core/ArithmeticOperations.cpp
+ extensions/tensorops/core/BBoxUtils.cpp
+ extensions/tensorops/core/ColorConversions.cpp
+ extensions/tensorops/core/DBScan.cpp
+ extensions/tensorops/core/Errors.cpp
+ extensions/tensorops/core/Filters.cpp
+ extensions/tensorops/core/FusedOperations.cpp
+ extensions/tensorops/core/GeometryTransforms.cpp
+ extensions/tensorops/core/IImageWarp.cpp
+ extensions/tensorops/core/OneEuroFilter.cpp
+ extensions/tensorops/core/TensorOperators.cpp
+ extensions/tensorops/core/VPIColorConvertImpl.cpp
+ extensions/tensorops/core/VPIRemapImpl.cpp
+ extensions/tensorops/core/VPIResizeImpl.cpp
+ extensions/tensorops/core/VPIStatusMapping.cpp
+ extensions/tensorops/core/VPIStereoDisparityEstimatorImpl.cpp
+ extensions/tensorops/core/VPITensorOperators.cpp
+ extensions/tensorops/core/BBoxUtils.h
+ extensions/tensorops/core/DBScan.h
+ extensions/tensorops/core/Errors.h
+ extensions/tensorops/core/Filters.h
+ extensions/tensorops/core/IImageWarp.h
+ extensions/tensorops/core/ITensorOperatorContext.h
+ extensions/tensorops/core/ITensorOperatorStream.h
+ extensions/tensorops/core/ImageUtils.h
+ extensions/tensorops/core/OneEuroFilter.h
+ extensions/tensorops/core/TensorOperators.h
+ extensions/tensorops/core/VPIColorConvertImpl.h
+ extensions/tensorops/core/VPIEnumMapping.h
+ extensions/tensorops/core/VPIImageWarp.h
+ extensions/tensorops/core/VPIRemapImpl.h
+ extensions/tensorops/core/VPIResizeImpl.h
+ extensions/tensorops/core/VPIStatusMapping.h
+ extensions/tensorops/core/VPIStereoDisparityEstimatorImpl.h
+ extensions/tensorops/core/VPITensorOperators.h
+ extensions/tensorops/core/NppUtils.cpp
+ extensions/tensorops/core/NppUtils.h
+ extensions/tensorops/core/Filters.cpp
+ extensions/tensorops/core/Filters.h
+ extensions/tensorops/core/Array.cpp
+ extensions/tensorops/core/CVError.cpp
+ extensions/tensorops/core/MathTypes.cpp
+ extensions/tensorops/core/Memory.cpp
+ extensions/tensorops/core/Tensor.cpp
+ extensions/tensorops/core/Array.h
+ extensions/tensorops/core/BBox.h
+ extensions/tensorops/core/CVError.h
+ extensions/tensorops/core/CameraModel.h
+ extensions/tensorops/core/ComputeEngine.h
+ extensions/tensorops/core/Core.h
+ extensions/tensorops/core/Image.h
+ extensions/tensorops/core/MathTypes.h
+ extensions/tensorops/core/Memory.h
+ extensions/tensorops/core/Tensor.h
+ extensions/tensorops/core/Traits.h
)
-target_include_directories(cvcore PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cvcore/include)
-target_compile_definitions(cvcore PRIVATE -DENABLE_VPI=1)
+target_include_directories(corelib PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
+# target_compile_definitions(corelib PRIVATE -DENABLE_VPI=1)
-target_link_libraries(cvcore PUBLIC
- CUDA::cudart
- CUDA::nppc
- CUDA::nppial
- CUDA::nppicc
- CUDA::nppidei
- CUDA::nppif
- CUDA::nppig
- CUDA::nppisu
- vpi
+target_link_libraries(corelib
+ PUBLIC
+ CUDA::cudart
+ CUDA::nppc
+ CUDA::nppial
+ CUDA::nppicc
+ CUDA::nppidei
+ CUDA::nppif
+ CUDA::nppig
+ CUDA::nppisu
+ vpi
)
target_link_libraries(gxf_tensorops
@@ -132,5 +148,5 @@ target_link_libraries(gxf_tensorops
GXF::multimedia
yaml-cpp
PRIVATE
- cvcore
+ corelib
)
diff --git a/isaac_ros_image_proc/gxf/tensorops/ConvertColorFormat.cpp b/isaac_ros_image_proc/gxf/tensorops/ConvertColorFormat.cpp
deleted file mode 100644
index 074e013..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/ConvertColorFormat.cpp
+++ /dev/null
@@ -1,214 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-#include "ConvertColorFormat.hpp"
-
-namespace nvidia {
-namespace cvcore {
-namespace tensor_ops {
-
-namespace detail {
-
-template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT>
-gxf_result_t ConvertColorFormatImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle output_adapter, gxf::Handle input_adapter,
- gxf::Handle allocator,
- ::cvcore::tensor_ops::ColorConversionType type, cudaStream_t stream) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
-
- ::cvcore::tensor_ops::ConvertColorFormat(output_image.value(), input_image.value(), type, stream);
- return GXF_SUCCESS;
-}
-
-template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT>
-gxf_result_t ConvertColorFormatStreamImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle stream, gxf::Handle output_adapter,
- gxf::Handle input_adapter,
- gxf::Handle allocator) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
-
- auto err_code = stream->getStream()->ColorConvert(output_image.value(), input_image.value());
- if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) {
- GXF_LOG_ERROR("color conversion operation failed.");
- return GXF_FAILURE;
- }
-
- return GXF_SUCCESS;
-}
-
-} // namespace detail
-
-template
-gxf_result_t ConvertColorFormatBase::registerInterface(gxf::Registrar* registrar) {
- gxf::Expected result;
-
- result &= registrar->parameter(output_type_, "output_type");
- result &= registrar->parameter(receiver_, "receiver");
- result &= registrar->parameter(transmitter_, "transmitter");
- result &= registrar->parameter(pool_, "pool");
- result &= registrar->parameter(stream_, "stream", "tensor stream", "tensor stream object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(input_adapter_, "input_adapter");
- result &= registrar->parameter(output_adapter_, "output_adapter");
- result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
-
- return gxf::ToResultCode(result);
-}
-
-template
-gxf::Expected ConvertColorFormatBase::doInferOutputInfo(gxf::Entity& input) {
- // Set output type
- auto output_type = GetImageTypeFromString(output_type_);
- if (!output_type) {
- return gxf::Unexpected{GXF_FAILURE};
- }
- // Check if no-op is needed
- no_op_ = output_type.value() == input_info_.type;
- return ImageInfo{output_type.value(), input_info_.width, input_info_.height, input_info_.is_cpu};
-}
-
-template
-gxf_result_t ConvertColorFormatBase::doUpdateCameraMessage(gxf::Handle& output,
- gxf::Handle& input) {
- *output = *input;
- return GXF_SUCCESS;
-}
-
-#define DEFINE_CONVERT_COLOR_FORMAT(INPUT_TYPE, OUTPUT_TYPE, CONVERSION_TYPE) \
- if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \
- return detail::ConvertColorFormatImpl( \
- output, input, output_info_, input_info_, output_name, input_name, output_adapter_.get(), input_adapter_.get(), \
- pool_.get(), CONVERSION_TYPE, stream); \
- }
-
-#define DEFINE_STREAM_CONVERT_COLOR_FORMAT(INPUT_TYPE, OUTPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \
- return detail::ConvertColorFormatStreamImpl( \
- output, input, output_info_, input_info_, output_name, input_name, stream_.try_get().value(), \
- output_adapter_.get(), input_adapter_.get(), pool_.get()); \
- }
-
-template<>
-gxf_result_t ConvertColorFormatBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute convert color format");
-
- // Run the color conversion operation
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::BGR_U8);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::RGB_U8);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV12, ::cvcore::ImageType::BGR_U8);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::NV12);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV12, ::cvcore::ImageType::RGB_U8);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::NV12);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV24, ::cvcore::ImageType::BGR_U8);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::NV24);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV24, ::cvcore::ImageType::RGB_U8);
- DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::NV24);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image color conversion.");
- return GXF_FAILURE;
-}
-
-template<>
-gxf_result_t ConvertColorFormatBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute convert color format");
-
- // Run the color conversion operation
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::BGR_U8,
- ::cvcore::tensor_ops::ColorConversionType::RGB2BGR);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::BGR_U16,
- ::cvcore::tensor_ops::ColorConversionType::RGB2BGR);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::BGR_F32,
- ::cvcore::tensor_ops::ColorConversionType::RGB2BGR);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::RGB_U8,
- ::cvcore::tensor_ops::ColorConversionType::BGR2RGB);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::RGB_U16,
- ::cvcore::tensor_ops::ColorConversionType::BGR2RGB);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::RGB_F32,
- ::cvcore::tensor_ops::ColorConversionType::BGR2RGB);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::Y_U8,
- ::cvcore::tensor_ops::ColorConversionType::RGB2GRAY);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::Y_U16,
- ::cvcore::tensor_ops::ColorConversionType::RGB2GRAY);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::Y_F32,
- ::cvcore::tensor_ops::ColorConversionType::RGB2GRAY);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::Y_U8,
- ::cvcore::tensor_ops::ColorConversionType::BGR2GRAY);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::Y_U16,
- ::cvcore::tensor_ops::ColorConversionType::BGR2GRAY);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::Y_F32,
- ::cvcore::tensor_ops::ColorConversionType::BGR2GRAY);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U8, ::cvcore::ImageType::RGB_U8,
- ::cvcore::tensor_ops::ColorConversionType::GRAY2RGB);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U16, ::cvcore::ImageType::RGB_U16,
- ::cvcore::tensor_ops::ColorConversionType::GRAY2RGB);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_F32, ::cvcore::ImageType::RGB_F32,
- ::cvcore::tensor_ops::ColorConversionType::GRAY2RGB);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U8, ::cvcore::ImageType::BGR_U8,
- ::cvcore::tensor_ops::ColorConversionType::GRAY2BGR);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U16, ::cvcore::ImageType::BGR_U16,
- ::cvcore::tensor_ops::ColorConversionType::GRAY2BGR);
- DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_F32, ::cvcore::ImageType::BGR_F32,
- ::cvcore::tensor_ops::ColorConversionType::GRAY2BGR);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image color conversion.");
- return GXF_FAILURE;
-}
-
-template class ConvertColorFormatBase;
-template class ConvertColorFormatBase;
-
-} // namespace tensor_ops
-} // namespace cvcore
-} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/CropAndResize.cpp b/isaac_ros_image_proc/gxf/tensorops/CropAndResize.cpp
deleted file mode 100644
index d0c2872..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/CropAndResize.cpp
+++ /dev/null
@@ -1,161 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-#include "CropAndResize.hpp"
-#include "Resize.hpp"
-
-namespace nvidia {
-namespace cvcore {
-namespace tensor_ops {
-
-namespace detail {
-
-template<::cvcore::ImageType T>
-gxf_result_t CropAndResizeImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle output_adapter, gxf::Handle input_adapter,
- gxf::Handle allocator, const std::vector<::cvcore::BBox>& src_rois,
- ::cvcore::tensor_ops::InterpolationType interp_type, cudaStream_t stream) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- const size_t num_output = src_rois.size();
-
- for (size_t i = 0; i < num_output; i++) {
- const std::string output_name_i = std::string(output_name) + "_" + std::to_string(i);
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name_i.c_str());
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name_i.c_str());
- if (!output_image) {
- return GXF_FAILURE;
- }
- ::cvcore::tensor_ops::CropAndResize(output_image.value(), input_image.value(), src_rois[i], interp_type, stream);
- }
-
- return GXF_SUCCESS;
-}
-
-} // namespace detail
-
-gxf_result_t CropAndResize::registerInterface(gxf::Registrar* registrar) {
- gxf::Expected result;
-
- result &= registrar->parameter(output_width_, "output_width");
- result &= registrar->parameter(output_height_, "output_height");
- result &= registrar->parameter(interp_type_, "interp_type");
- result &= registrar->parameter(keep_aspect_ratio_, "keep_aspect_ratio");
- result &= registrar->parameter(receiver_bbox_, "receiver_bbox");
- result &= registrar->parameter(receiver_, "receiver");
- result &= registrar->parameter(transmitter_, "transmitter");
- result &= registrar->parameter(pool_, "pool");
- result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(input_adapter_, "input_adapter");
- result &= registrar->parameter(output_adapter_, "output_adapter");
- result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
-
- return gxf::ToResultCode(result);
-}
-
-gxf::Expected CropAndResize::doInferOutputInfo(gxf::Entity& input) {
- // Set crop regions
- auto input_bbox_message = receiver_bbox_->receive();
- if (!input_bbox_message) {
- return gxf::Unexpected{GXF_FAILURE};
- }
- auto bbox_tensor = input_bbox_message.value().get();
- if (!bbox_tensor) {
- return gxf::Unexpected{GXF_FAILURE};
- }
- const gxf::Shape bbox_shape = bbox_tensor.value()->shape();
- if (bbox_shape.rank() != 2 || bbox_shape.dimension(1) != 4) {
- GXF_LOG_ERROR("invalid input bbox dimension.");
- return gxf::Unexpected{GXF_FAILURE};
- }
- const size_t num_bbox = bbox_shape.dimension(0);
- auto bbox_pointer = bbox_tensor.value()->data();
- if (!bbox_pointer) {
- GXF_LOG_ERROR("empty bbox input.");
- return gxf::Unexpected{GXF_FAILURE};
- }
- std::vector<::cvcore::BBox> rois;
- for (size_t i = 0; i < num_bbox; i++) {
- const int index = i * 4;
- rois.push_back({bbox_pointer.value()[index], bbox_pointer.value()[index + 1], bbox_pointer.value()[index + 2],
- bbox_pointer.value()[index + 3]});
- }
- input_rois_ = std::move(rois);
- // Check if no-op is needed
- no_op_ = input_rois_.size() == 1 && input_rois_[0].xmin == 0 &&
- input_rois_[0].xmax == static_cast(input_info_.width) && input_rois_[0].ymin == 0 &&
- input_rois_[0].ymax == static_cast(input_info_.height);
-
- return ImageInfo{input_info_.type, output_width_.get(), output_height_.get(), input_info_.is_cpu};
-}
-
-gxf_result_t CropAndResize::doUpdateCameraMessage(gxf::Handle& output,
- gxf::Handle& input) {
- auto crop_result = GetCroppedCameraModel(*input, input_rois_[0]);
- if (!crop_result) {
- return GXF_FAILURE;
- }
- *output = GetScaledCameraModel(crop_result.value(), output_info_.width, output_info_.height, false).value();
- return GXF_SUCCESS;
-}
-
-#define DEFINE_CROP_AND_RESIZE(INPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE) { \
- return detail::CropAndResizeImpl(output, input, output_info_, input_info_, output_name, input_name, \
- output_adapter_.get(), input_adapter_.get(), pool_.get(), \
- input_rois_, interp.value(), stream); \
- }
-
-gxf_result_t CropAndResize::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute crop_and_resize.");
- // Check if interpolation type is valid
- auto interp = GetInterpolationType(interp_type_);
- if (!interp) {
- return interp.error();
- }
-
- // Run the image resizing operation
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::Y_U8);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::Y_U16);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::Y_F32);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::RGB_U8);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::RGB_U16);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::RGB_F32);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::BGR_U8);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::BGR_U16);
- DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::BGR_F32);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image crop_and_resize.");
- return GXF_FAILURE;
-}
-
-} // namespace tensor_ops
-} // namespace cvcore
-} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/InterleavedToPlanar.cpp b/isaac_ros_image_proc/gxf/tensorops/InterleavedToPlanar.cpp
deleted file mode 100644
index 7b63825..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/InterleavedToPlanar.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-#include "InterleavedToPlanar.hpp"
-
-namespace nvidia {
-namespace cvcore {
-namespace tensor_ops {
-
-namespace detail {
-
-template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT>
-gxf_result_t InterleavedToPlanarImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle output_adapter, gxf::Handle input_adapter,
- gxf::Handle allocator, cudaStream_t stream) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
- ::cvcore::tensor_ops::InterleavedToPlanar(output_image.value(), input_image.value(), stream);
- return GXF_SUCCESS;
-}
-
-} // namespace detail
-
-gxf_result_t InterleavedToPlanar::registerInterface(gxf::Registrar* registrar) {
- gxf::Expected result;
-
- result &= registrar->parameter(receiver_, "receiver");
- result &= registrar->parameter(transmitter_, "transmitter");
- result &= registrar->parameter(pool_, "pool");
- result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(input_adapter_, "input_adapter");
- result &= registrar->parameter(output_adapter_, "output_adapter");
- result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
-
- return gxf::ToResultCode(result);
-}
-
-gxf::Expected InterleavedToPlanar::doInferOutputInfo(gxf::Entity& input) {
- // Output type is planar
- ::cvcore::ImageType output_type;
- switch (input_info_.type) {
- case ::cvcore::ImageType::RGB_U8: {
- output_type = ::cvcore::ImageType::PLANAR_RGB_U8;
- break;
- }
- case ::cvcore::ImageType::RGB_U16: {
- output_type = ::cvcore::ImageType::PLANAR_RGB_U16;
- break;
- }
- case ::cvcore::ImageType::RGB_F32: {
- output_type = ::cvcore::ImageType::PLANAR_RGB_F32;
- break;
- }
- case ::cvcore::ImageType::BGR_U8: {
- output_type = ::cvcore::ImageType::PLANAR_BGR_U8;
- break;
- }
- case ::cvcore::ImageType::BGR_U16: {
- output_type = ::cvcore::ImageType::PLANAR_BGR_U16;
- break;
- }
- case ::cvcore::ImageType::BGR_F32: {
- output_type = ::cvcore::ImageType::PLANAR_BGR_F32;
- break;
- }
- case ::cvcore::ImageType::PLANAR_RGB_U8:
- case ::cvcore::ImageType::PLANAR_RGB_U16:
- case ::cvcore::ImageType::PLANAR_RGB_F32:
- case ::cvcore::ImageType::PLANAR_BGR_U8:
- case ::cvcore::ImageType::PLANAR_BGR_U16:
- case ::cvcore::ImageType::PLANAR_BGR_F32: {
- output_type = input_info_.type;
- no_op_ = true;
- break;
- }
- default: {
- GXF_LOG_ERROR("invalid input type for interleaved to planar conversion.");
- return gxf::Unexpected{GXF_FAILURE};
- }
- }
- return ImageInfo{output_type, input_info_.width, input_info_.height, input_info_.is_cpu};
-}
-
-gxf_result_t InterleavedToPlanar::doUpdateCameraMessage(gxf::Handle& output,
- gxf::Handle& input) {
- *output = *input;
- return GXF_SUCCESS;
-}
-
-#define DEFINE_INTERLEAVED_TO_PLANAR(INPUT_TYPE, OUTPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \
- return detail::InterleavedToPlanarImpl(output, input, output_info_, input_info_, \
- output_name, input_name, output_adapter_.get(), \
- input_adapter_.get(), pool_.get(), stream); \
- }
-
-gxf_result_t InterleavedToPlanar::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute interleaved_to_planar conversion");
- // Run the interleaved to planar operation
- DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::PLANAR_RGB_U8);
- DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::PLANAR_RGB_U16);
- DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::PLANAR_RGB_F32);
- DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::PLANAR_BGR_U8);
- DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::PLANAR_BGR_U16);
- DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::PLANAR_BGR_F32);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image interleaved to planar conversion.");
- return GXF_FAILURE;
-}
-
-} // namespace tensor_ops
-} // namespace cvcore
-} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/Normalize.cpp b/isaac_ros_image_proc/gxf/tensorops/Normalize.cpp
deleted file mode 100644
index 2429abb..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/Normalize.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-#include "Normalize.hpp"
-
-namespace nvidia {
-namespace cvcore {
-namespace tensor_ops {
-
-namespace detail {
-
-template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT>
-gxf_result_t NormalizeC1Impl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle output_adapter, gxf::Handle input_adapter,
- gxf::Handle allocator, const std::vector& scales,
- const std::vector& offsets, cudaStream_t stream) {
- if (scales.size() != 1 || offsets.size() != 1) {
- GXF_LOG_ERROR("invalid scales/offsets dimension");
- return GXF_FAILURE;
- }
-
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
- ::cvcore::tensor_ops::Normalize(output_image.value(), input_image.value(), scales[0], offsets[0], stream);
- return GXF_SUCCESS;
-}
-
-template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT>
-gxf_result_t NormalizeC3Impl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle output_adapter, gxf::Handle input_adapter,
- gxf::Handle allocator, const std::vector& scales,
- const std::vector& offsets, cudaStream_t stream) {
- if (scales.size() != 3 || offsets.size() != 3) {
- GXF_LOG_ERROR("invalid scales/offsets dimension");
- return GXF_FAILURE;
- }
-
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
- const float scales_value[3] = {scales[0], scales[1], scales[2]};
- const float offsets_value[3] = {offsets[0], offsets[1], offsets[2]};
- ::cvcore::tensor_ops::Normalize(output_image.value(), input_image.value(), scales_value, offsets_value, stream);
- return GXF_SUCCESS;
-}
-
-} // namespace detail
-
-gxf_result_t Normalize::registerInterface(gxf::Registrar* registrar) {
- gxf::Expected result;
-
- result &= registrar->parameter(scales_, "scales");
- result &= registrar->parameter(offsets_, "offsets");
- result &= registrar->parameter(receiver_, "receiver");
- result &= registrar->parameter(transmitter_, "transmitter");
- result &= registrar->parameter(pool_, "pool");
- result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(input_adapter_, "input_adapter");
- result &= registrar->parameter(output_adapter_, "output_adapter");
- result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
-
- return gxf::ToResultCode(result);
-}
-
-gxf::Expected Normalize::doInferOutputInfo(gxf::Entity& input) {
- // Output type is F32
- ::cvcore::ImageType output_type;
- switch (input_info_.type) {
- case ::cvcore::ImageType::Y_U8:
- case ::cvcore::ImageType::Y_U16:
- case ::cvcore::ImageType::Y_F32: {
- output_type = ::cvcore::ImageType::Y_F32;
- break;
- }
- case ::cvcore::ImageType::RGB_U8:
- case ::cvcore::ImageType::RGB_U16:
- case ::cvcore::ImageType::RGB_F32: {
- output_type = ::cvcore::ImageType::RGB_F32;
- break;
- }
- case ::cvcore::ImageType::BGR_U8:
- case ::cvcore::ImageType::BGR_U16:
- case ::cvcore::ImageType::BGR_F32: {
- output_type = ::cvcore::ImageType::BGR_F32;
- break;
- }
- default: {
- GXF_LOG_ERROR("invalid input type for normalize.");
- return gxf::Unexpected{GXF_FAILURE};
- }
- }
- // Operation must be performed under any condition
- no_op_ = false;
- return ImageInfo{output_type, input_info_.width, input_info_.height, input_info_.is_cpu};
-}
-
-gxf_result_t Normalize::doUpdateCameraMessage(gxf::Handle& output,
- gxf::Handle& input) {
- *output = *input;
- return GXF_SUCCESS;
-}
-
-#define DEFINE_NORMALIZE_C1(INPUT_TYPE, OUTPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \
- return detail::NormalizeC1Impl(output, input, output_info_, input_info_, output_name, \
- input_name, output_adapter_.get(), input_adapter_.get(), \
- pool_.get(), scales_.get(), offsets_.get(), stream); \
- }
-
-#define DEFINE_NORMALIZE_C3(INPUT_TYPE, OUTPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \
- return detail::NormalizeC3Impl(output, input, output_info_, input_info_, output_name, \
- input_name, output_adapter_.get(), input_adapter_.get(), \
- pool_.get(), scales_.get(), offsets_.get(), stream); \
- }
-
-gxf_result_t Normalize::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name,
- const char* input_name) {
- GXF_LOG_INFO("execute normalize");
-
- // Run the image normalization operation
- DEFINE_NORMALIZE_C1(::cvcore::ImageType::Y_U8, ::cvcore::ImageType::Y_F32);
- DEFINE_NORMALIZE_C1(::cvcore::ImageType::Y_U16, ::cvcore::ImageType::Y_F32);
- DEFINE_NORMALIZE_C1(::cvcore::ImageType::Y_F32, ::cvcore::ImageType::Y_F32);
- DEFINE_NORMALIZE_C3(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::RGB_F32);
- DEFINE_NORMALIZE_C3(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::RGB_F32);
- DEFINE_NORMALIZE_C3(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::RGB_F32);
- DEFINE_NORMALIZE_C3(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::BGR_F32);
- DEFINE_NORMALIZE_C3(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::BGR_F32);
- DEFINE_NORMALIZE_C3(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::BGR_F32);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image normalize.");
- return GXF_FAILURE;
-}
-
-} // namespace tensor_ops
-} // namespace cvcore
-} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/Resize.cpp b/isaac_ros_image_proc/gxf/tensorops/Resize.cpp
deleted file mode 100644
index 943ac5a..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/Resize.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-#include "Resize.hpp"
-
-namespace nvidia {
-namespace cvcore {
-namespace tensor_ops {
-
-namespace detail {
-
-template<::cvcore::ImageType T>
-gxf_result_t ResizeImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle output_adapter, gxf::Handle input_adapter,
- gxf::Handle allocator, bool keep_aspect_ratio,
- ::cvcore::tensor_ops::InterpolationType interp_type, cudaStream_t stream) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
- ::cvcore::tensor_ops::Resize(output_image.value(), input_image.value(), keep_aspect_ratio, interp_type, stream);
- return GXF_SUCCESS;
-}
-
-template<::cvcore::ImageType T>
-gxf_result_t ResizeStreamImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle stream, gxf::Handle output_adapter,
- gxf::Handle input_adapter, gxf::Handle allocator,
- ::cvcore::tensor_ops::InterpolationType interp_type,
- ::cvcore::tensor_ops::BorderType border_type) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
-
- auto err_code = stream->getStream()->Resize(output_image.value(), input_image.value(), interp_type, border_type);
- if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) {
- GXF_LOG_ERROR("resize operation failed.");
- return GXF_FAILURE;
- }
-
- return GXF_SUCCESS;
-}
-
-} // namespace detail
-
-template
-gxf_result_t ResizeBase::registerInterface(gxf::Registrar* registrar) {
- gxf::Expected result;
-
- result &= registrar->parameter(output_width_, "output_width");
- result &= registrar->parameter(output_height_, "output_height");
- result &= registrar->parameter(interp_type_, "interp_type");
- result &= registrar->parameter(border_type_, "border_type");
- result &= registrar->parameter(keep_aspect_ratio_, "keep_aspect_ratio");
- result &= registrar->parameter(receiver_, "receiver");
- result &= registrar->parameter(transmitter_, "transmitter");
- result &= registrar->parameter(pool_, "pool");
- result &= registrar->parameter(stream_, "stream", "tensor stream", "tensor stream object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(input_adapter_, "input_adapter");
- result &= registrar->parameter(output_adapter_, "output_adapter");
- result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
-
- return gxf::ToResultCode(result);
-}
-
-template
-gxf::Expected ResizeBase::doInferOutputInfo(gxf::Entity& input) {
- // Check if no-op is needed
- no_op_ = output_width_.get() == input_info_.width && output_height_.get() == input_info_.height;
- return ImageInfo{input_info_.type, output_width_.get(), output_height_.get(), input_info_.is_cpu};
-}
-
-template
-gxf_result_t ResizeBase::doUpdateCameraMessage(gxf::Handle& output,
- gxf::Handle& input) {
- *output = GetScaledCameraModel(*input, output_info_.width, output_info_.height, keep_aspect_ratio_.get()).value();
- return GXF_SUCCESS;
-}
-
-#define DEFINE_RESIZE(INPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE) { \
- return detail::ResizeImpl(output, input, output_info_, input_info_, output_name, input_name, \
- output_adapter_.get(), input_adapter_.get(), pool_.get(), \
- keep_aspect_ratio_.get(), interp.value(), stream); \
- }
-
-#define DEFINE_STREAM_RESIZE(INPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE) { \
- return detail::ResizeStreamImpl(output, input, output_info_, input_info_, output_name, input_name, \
- stream_.try_get().value(), output_adapter_.get(), \
- input_adapter_.get(), pool_.get(), interp.value(), border.value()); \
- }
-
-template<>
-gxf_result_t ResizeBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute resize.");
- // Check if interpolation type is valid
- auto interp = GetInterpolationType(interp_type_);
- if (!interp) {
- return interp.error();
- }
- auto border = GetBorderType(border_type_);
- if (!border) {
- return border.error();
- }
-
- // Run the image resizing operation
- DEFINE_STREAM_RESIZE(::cvcore::ImageType::RGB_U8);
- DEFINE_STREAM_RESIZE(::cvcore::ImageType::BGR_U8);
- DEFINE_STREAM_RESIZE(::cvcore::ImageType::NV12);
- DEFINE_STREAM_RESIZE(::cvcore::ImageType::NV24);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image resize.");
- return GXF_FAILURE;
-}
-
-template<>
-gxf_result_t ResizeBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute resize.");
- // Check if interpolation type is valid
- auto interp = GetInterpolationType(interp_type_);
- if (!interp) {
- return interp.error();
- }
-
- // Run the image resizing operation
- DEFINE_RESIZE(::cvcore::ImageType::Y_U8);
- DEFINE_RESIZE(::cvcore::ImageType::Y_U16);
- DEFINE_RESIZE(::cvcore::ImageType::Y_F32);
- DEFINE_RESIZE(::cvcore::ImageType::RGB_U8);
- DEFINE_RESIZE(::cvcore::ImageType::RGB_U16);
- DEFINE_RESIZE(::cvcore::ImageType::RGB_F32);
- DEFINE_RESIZE(::cvcore::ImageType::BGR_U8);
- DEFINE_RESIZE(::cvcore::ImageType::BGR_U16);
- DEFINE_RESIZE(::cvcore::ImageType::BGR_F32);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image resize.");
- return GXF_FAILURE;
-}
-
-template class ResizeBase;
-template class ResizeBase;
-
-} // namespace tensor_ops
-} // namespace cvcore
-} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/TensorOps.cpp b/isaac_ros_image_proc/gxf/tensorops/TensorOps.cpp
deleted file mode 100644
index 1694d95..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/TensorOps.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-#include "CameraModel.hpp"
-#include "ConvertColorFormat.hpp"
-#include "CropAndResize.hpp"
-#include "Frame3D.hpp"
-#include "ImageAdapter.hpp"
-#include "InterleavedToPlanar.hpp"
-#include "Normalize.hpp"
-#include "Reshape.hpp"
-#include "Resize.hpp"
-#include "TensorOperator.hpp"
-#include "TensorStream.hpp"
-#include "Undistort.hpp"
-
-#include "gxf/std/extension_factory_helper.hpp"
-
-GXF_EXT_FACTORY_BEGIN()
-GXF_EXT_FACTORY_SET_INFO(0x6eae64ff97a94d9b, 0xb324f85e6a98a75a, "NvCvTensorOpsExtension",
- "Generic CVCORE tensor_ops interfaces", "Nvidia_Gxf", "3.1.0", "LICENSE");
-
-GXF_EXT_FACTORY_ADD(0xd073a92344ba4b81, 0xbd0f18f4996048e9, nvidia::cvcore::tensor_ops::CameraModel,
- nvidia::gxf::Component,
- "Construct Camera distortion model / Camera intrinsic compatible with CVCORE");
-
-GXF_EXT_FACTORY_ADD(0x6c9419223e4b4c2b, 0x899a4d65279c6507, nvidia::cvcore::tensor_ops::Frame3D, nvidia::gxf::Component,
- "Construct Camera extrinsic compatible with CVCORE");
-
-GXF_EXT_FACTORY_ADD(0xd94385e5b35b4634, 0x9adb0d214a3865f6, nvidia::cvcore::tensor_ops::TensorStream,
- nvidia::gxf::Component, "Wrapper of CVCORE ITensorOperatorStream/ITensorOperatorContext");
-
-GXF_EXT_FACTORY_ADD(0xd0c4ddad486a4a91, 0xb69c8a5304b205ef, nvidia::cvcore::tensor_ops::ImageAdapter,
- nvidia::gxf::Component, "Utility component for conversion between message and cvcore image type");
-
-GXF_EXT_FACTORY_ADD(0xadebc792bd0b4a56, 0x99c1405fd2ea0727, nvidia::cvcore::tensor_ops::StreamUndistort,
- nvidia::gxf::Codelet, "Codelet for stream image undistortion in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0xa58141ac7eca4ea5, 0x9b545446fe379a11, nvidia::cvcore::tensor_ops::Resize, nvidia::gxf::Codelet,
- "Codelet for image resizing in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0xeb8b5f5b36d44b48, 0x81f959fd28e6f677, nvidia::cvcore::tensor_ops::StreamResize,
- nvidia::gxf::Codelet, "Codelet for stream image resizing in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0x4a7ff422de3841bc, 0x9e743ac10d9294b6, nvidia::cvcore::tensor_ops::CropAndResize,
- nvidia::gxf::Codelet, "Codelet for crop and resizing operation in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0x7018f0b9034c462b, 0xa9fbaf7ee012974f, nvidia::cvcore::tensor_ops::Normalize, nvidia::gxf::Codelet,
- "Codelet for image normalization in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0x269d4237f3c3479d, 0xbcca9ecc44c71a70, nvidia::cvcore::tensor_ops::InterleavedToPlanar,
- nvidia::gxf::Codelet, "Codelet for convert interleaved image to planar image in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0xfc4d7b4d8fcc4daa, 0xa286056e0fcafa78, nvidia::cvcore::tensor_ops::ConvertColorFormat,
- nvidia::gxf::Codelet, "Codelet for image color conversion in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0x5ab4a4d8f7a34552, 0xa90be52660b076fd, nvidia::cvcore::tensor_ops::StreamConvertColorFormat,
- nvidia::gxf::Codelet, "Codelet for stream image color conversion in tensor_ops");
-
-GXF_EXT_FACTORY_ADD(0x26789b7d5a8d4e84, 0x86b845ec5f4cd12a, nvidia::cvcore::tensor_ops::Reshape, nvidia::gxf::Codelet,
- "Codelet for image reshape in tensor_ops");
-GXF_EXT_FACTORY_END()
diff --git a/isaac_ros_image_proc/gxf/tensorops/Undistort.cpp b/isaac_ros_image_proc/gxf/tensorops/Undistort.cpp
deleted file mode 100644
index b1eed4e..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/Undistort.cpp
+++ /dev/null
@@ -1,285 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-#include
-
-#include "ImageUtils.hpp"
-#include "Undistort.hpp"
-#include "gxf/multimedia/camera.hpp"
-
-namespace nvidia {
-namespace cvcore {
-namespace tensor_ops {
-
-namespace detail {
-
-template<::cvcore::ImageType T>
-gxf_result_t UndistortImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info,
- const ImageInfo& input_info, const char* output_name, const char* input_name,
- gxf::Handle stream, gxf::Handle output_adapter,
- gxf::Handle input_adapter, gxf::Handle allocator,
- ::cvcore::tensor_ops::ImageWarp warp, ::cvcore::tensor_ops::InterpolationType interp_type,
- ::cvcore::tensor_ops::BorderType border_type) {
- auto input_image = input_adapter->WrapImageFromMessage(input, input_name);
- if (!input_image) {
- return GXF_FAILURE;
- }
-
- auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator,
- output_info.is_cpu, output_name);
- if (error != GXF_SUCCESS) {
- return GXF_FAILURE;
- }
-
- auto output_image = output_adapter->WrapImageFromMessage(output, output_name);
- if (!output_image) {
- return GXF_FAILURE;
- }
-
- auto err_code = stream->getStream()->Remap(output_image.value(), input_image.value(), warp, interp_type, border_type);
- if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) {
- GXF_LOG_ERROR("undistort operation failed.");
- return GXF_FAILURE;
- }
-
- return GXF_SUCCESS;
-}
-
-gxf::Expected<::cvcore::CameraIntrinsics> GetIntrinsicsFromMessage(gxf::Handle& camera_model) {
- return ::cvcore::CameraIntrinsics(camera_model->focal_length.x, camera_model->focal_length.y,
- camera_model->principal_point.x, camera_model->principal_point.y,
- camera_model->skew_value);
-}
-
-gxf::Expected<::cvcore::CameraExtrinsics> GetExtrinsicsFromMessage(gxf::Handle& pose) {
- float raw_matrix[3][4];
- for (size_t i = 0; i < 9; i++) {
- raw_matrix[i / 3][i % 3] = pose->rotation[i];
- }
- for (size_t i = 0; i < 3; i++) {
- raw_matrix[i][3] = pose->translation[i];
- }
- return ::cvcore::CameraExtrinsics(raw_matrix);
-}
-
-gxf::Expected<::cvcore::CameraDistortionModel> GetDistortionsFromMessage(gxf::Handle& camera_model) {
- auto distortion_type = GetCameraDistortionType(camera_model->distortion_type);
- if (!distortion_type) {
- return gxf::Unexpected{GXF_FAILURE};
- }
- auto distortion_model = ::cvcore::CameraDistortionModel();
- for (size_t i = 0; i < 8; i++) {
- distortion_model.coefficients[i] = camera_model->distortion_coefficients[i];
- }
- distortion_model.type = distortion_type.value();
- return distortion_model;
-}
-
-} // namespace detail
-
-gxf_result_t UndistortBase::start() {
- // Load grid object
- image_grid_.numHorizRegions = regions_width_.get().size();
- image_grid_.numVertRegions = regions_height_.get().size();
- if (regions_width_.get().size() != horizontal_intervals_.get().size() ||
- regions_height_.get().size() != vertical_intervals_.get().size()) {
- GXF_LOG_ERROR("invalid image grid.");
- return GXF_FAILURE;
- }
- std::copy(regions_width_.get().begin(), regions_width_.get().end(), image_grid_.regionWidth.begin());
- std::copy(regions_height_.get().begin(), regions_height_.get().end(), image_grid_.regionHeight.begin());
- std::copy(horizontal_intervals_.get().begin(), horizontal_intervals_.get().end(), image_grid_.horizInterval.begin());
- std::copy(vertical_intervals_.get().begin(), vertical_intervals_.get().end(), image_grid_.vertInterval.begin());
- output_shape_.x = static_cast(
- std::accumulate(image_grid_.regionWidth.begin(), image_grid_.regionWidth.end(), 0));
- output_shape_.y = static_cast(
- std::accumulate(image_grid_.regionHeight.begin(), image_grid_.regionHeight.end(), 0));
-
- // Generate Image Warp if possible
- if (input_camera_model_.try_get() && reference_frame_.try_get()) {
- input_camera_info_ = {input_camera_model_.try_get().value()->getCameraIntrinsics(),
- reference_frame_.try_get().value()->getCameraExtrinsics(),
- input_camera_model_.try_get().value()->getDistortionModel()};
-
- output_camera_intrinsics_ = output_camera_model_.try_get()
- ? output_camera_model_.try_get().value()->getCameraIntrinsics()
- : input_camera_info_.intrinsic;
-
- auto err_code = stream_->getStream()->GenerateWarpFromCameraModel(image_warp_, image_grid_, input_camera_info_,
- output_camera_intrinsics_);
- if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) {
- GXF_LOG_ERROR("image warp creation failed.");
- return GXF_FAILURE;
- }
- }
-
- return GXF_SUCCESS;
-}
-
-gxf_result_t UndistortBase::stop() {
- auto err_code = stream_->getStream()->DestroyWarp(image_warp_);
- if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) {
- GXF_LOG_ERROR("image warp de-allocation failed.");
- return GXF_FAILURE;
- }
- return GXF_SUCCESS;
-}
-
-gxf_result_t UndistortBase::registerInterface(gxf::Registrar* registrar) {
- gxf::Expected result;
-
- result &= registrar->parameter(input_camera_model_, "input_camera_model", "", "",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(reference_frame_, "reference_frame", "", "", gxf::Registrar::NoDefaultParameter(),
- GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_camera_model_, "output_camera_model", "", "",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(regions_width_, "regions_width");
- result &= registrar->parameter(regions_height_, "regions_height");
- result &= registrar->parameter(horizontal_intervals_, "horizontal_intervals");
- result &= registrar->parameter(vertical_intervals_, "vertical_intervals");
- result &= registrar->parameter(interp_type_, "interp_type");
- result &= registrar->parameter(border_type_, "border_type");
- result &= registrar->parameter(receiver_, "receiver");
- result &= registrar->parameter(transmitter_, "transmitter");
- result &= registrar->parameter(pool_, "pool");
- result &= registrar->parameter(stream_, "stream");
- result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(input_adapter_, "input_adapter");
- result &= registrar->parameter(output_adapter_, "output_adapter");
- result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
- result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name",
- gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL);
-
- return gxf::ToResultCode(result);
-}
-
-gxf::Expected UndistortBase::doInferOutputInfo(gxf::Entity& input) {
- // Check if the input distortion type is Perpective
- auto maybe_camera_message = input.get();
- if (maybe_camera_message) {
- no_op_ = maybe_camera_message.value()->distortion_type == gxf::DistortionType::Perspective;
- }
- // Output size may vary, but the format must be the same
- return ImageInfo{input_info_.type, static_cast(output_shape_.x), static_cast(output_shape_.y),
- input_info_.is_cpu};
-}
-
-gxf_result_t UndistortBase::doUpdateCameraMessage(gxf::Handle& output,
- gxf::Handle& input) {
- *output = *input;
- (*output).distortion_type = gxf::DistortionType::Perspective;
- for (size_t i = 0; i < gxf::CameraModel::kMaxDistortionCoefficients; i++) {
- (*output).distortion_coefficients[i] = 0.;
- }
- (*output).dimensions = output_shape_;
- (*output).focal_length.x = output_camera_intrinsics_.fx();
- (*output).focal_length.y = output_camera_intrinsics_.fy();
- (*output).principal_point.x = output_camera_intrinsics_.cx();
- (*output).principal_point.y = output_camera_intrinsics_.cy();
- (*output).skew_value = output_camera_intrinsics_.skew();
- return GXF_SUCCESS;
-}
-
-#define DEFINE_UNDISTORT(INPUT_TYPE) \
- if (input_info_.type == INPUT_TYPE) { \
- return detail::UndistortImpl(output, input, output_info_, input_info_, output_name, input_name, \
- stream_.get(), output_adapter_.get(), input_adapter_.get(), pool_.get(), \
- image_warp_, interp.value(), border.value()); \
- }
-
-gxf_result_t UndistortBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream,
- const char* output_name, const char* input_name) {
- GXF_LOG_INFO("execute undistort.");
- auto maybe_camera_message = input.get();
- auto maybe_pose3d_message = input.get();
- if (!maybe_camera_message) {
- if (image_warp_ == nullptr) {
- GXF_LOG_ERROR("no camera information found.");
- return GXF_FAILURE;
- }
- } else {
- auto maybe_intrinsics = detail::GetIntrinsicsFromMessage(maybe_camera_message.value());
- auto maybe_distortions = detail::GetDistortionsFromMessage(maybe_camera_message.value());
- if (!maybe_intrinsics || !maybe_distortions) {
- return GXF_FAILURE;
- }
- const auto& new_intrinsics = maybe_intrinsics.value();
- const auto& new_distortions = maybe_distortions.value();
-
- auto new_extrinsics = maybe_pose3d_message ? detail::GetExtrinsicsFromMessage(maybe_pose3d_message.value()).value()
- : ::cvcore::CameraExtrinsics();
-
- const bool reset = image_warp_ == nullptr || new_intrinsics != input_camera_info_.intrinsic ||
- new_distortions != input_camera_info_.distortion ||
- new_extrinsics != input_camera_info_.extrinsic;
-
- if (reset) {
- auto new_width = static_cast(output_shape_.x);
- auto new_height = static_cast(output_shape_.y);
-
- // These two parameters (width_scale and height_scale) can be
- // used to determine a crop or pad regime depending on which dimension to
- // preserve in the case of keep_aspect ratio. In this case, we assume
- // always_crop=True, or that we will always use the largest dimension
- // change.
- auto width_scale = new_width / static_cast(input_info_.width);
- auto height_scale = new_height / static_cast(input_info_.height);
- auto scale = std::max({width_scale, height_scale}); // Always crop
-
- input_camera_info_ = {new_intrinsics, new_extrinsics, new_distortions};
- output_camera_intrinsics_ = new_intrinsics;
- output_camera_intrinsics_.m_intrinsics[0][0] = scale * new_intrinsics.m_intrinsics[0][0];
- output_camera_intrinsics_.m_intrinsics[0][1] = scale * new_intrinsics.m_intrinsics[0][1];
- output_camera_intrinsics_.m_intrinsics[1][1] = scale * new_intrinsics.m_intrinsics[1][1];
- output_camera_intrinsics_.m_intrinsics[0][2] = scale * new_intrinsics.m_intrinsics[0][2];
- output_camera_intrinsics_.m_intrinsics[1][2] = scale * new_intrinsics.m_intrinsics[1][2];
-
- auto err_code = stream_->getStream()->GenerateWarpFromCameraModel(image_warp_, image_grid_, input_camera_info_,
- output_camera_intrinsics_);
- if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) {
- GXF_LOG_ERROR("image warp creation failed.");
- return GXF_FAILURE;
- }
- }
- }
-
- auto interp = GetInterpolationType(interp_type_);
- if (!interp) {
- return interp.error();
- }
- auto border = GetBorderType(border_type_);
- if (!border) {
- return border.error();
- }
-
- // Run the image undistortion operation
- DEFINE_UNDISTORT(::cvcore::ImageType::BGR_U8);
- DEFINE_UNDISTORT(::cvcore::ImageType::RGB_U8);
- DEFINE_UNDISTORT(::cvcore::ImageType::NV12);
- DEFINE_UNDISTORT(::cvcore::ImageType::NV24);
-
- // Return error code for unsupported type
- GXF_LOG_ERROR("invalid input/output type for image undistort.");
- return GXF_FAILURE;
-}
-
-} // namespace tensor_ops
-} // namespace cvcore
-} // namespace nvidia
diff --git a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/Instrumentation.h b/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/Instrumentation.h
deleted file mode 100644
index 6324b8d..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/Instrumentation.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-#ifndef CVCORE_INSTRUMENTATION_H
-#define CVCORE_INSTRUMENTATION_H
-
-#include
-
-namespace cvcore { namespace profiler {
-
-/**
- * A enum class used to find out the type of profiler output required
- */
-enum class ProfilerJsonOutputType : uint32_t
-{
- JSON_OFF, /**< print the aggregate values of each timer in pretty print format */
- JSON_AGGREGATE, /**< print the aggregate values of each timer in JSON format
- along with the pretty print format. Pretty print format
- gets printed on the terminal */
- JSON_SEPARATE /**< print all the elapsed times for all timers along with the
- aggregate values from JSON_AGGREGATE option */
-};
-
-/**
-* Flush call to print the timer values in a file input
-* @param jsonHelperType used to find out the type of profiler output required
-* @return filename used to write the timer values
-*/
-void flush(const std::string& filename, ProfilerJsonOutputType jsonHelperType);
-
-/**
-* Flush call to print the timer values in a output stream
-* @param jsonHelperType used to find out the type of profiler output required
-* @return output stream used to write the timer values
-*/
-void flush(std::ostream& output, ProfilerJsonOutputType jsonHelperType);
-
-/**
-* Flush call to print the timer values on the terminal
-* @param jsonHelperType used to find out the type of profiler output required
-*/
-void flush(ProfilerJsonOutputType jsonHelperType);
-
-
-/**
-* Clear all the profile timers
-*/
-void clear();
-
-}}
-#endif
diff --git a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/Model.h b/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/Model.h
deleted file mode 100644
index 4a14945..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/Model.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-#ifndef CVCORE_MODEL_H
-#define CVCORE_MODEL_H
-
-#include
-
-#include "Image.h"
-
-namespace cvcore {
-
-/**
- * Struct to describe input type required by the model
- */
-struct ModelInputParams
-{
- size_t maxBatchSize; /**< maxbatchSize supported by network*/
- size_t inputLayerWidth; /**< Input layer width */
- size_t inputLayerHeight; /**< Input layer Height */
- ImageType modelInputType; /**< Input Layout type */
-};
-
-/**
- * Struct to describe the model
- */
-struct ModelInferenceParams
-{
- std::string engineFilePath; /**< Engine file path. */
- std::vector inputLayers; /**< names of input layers. */
- std::vector outputLayers; /**< names of output layers. */
-};
-
-} // namespace cvcore
-
-#endif // CVCORE_MODEL_H
diff --git a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/ProfileUtils.h b/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/ProfileUtils.h
deleted file mode 100644
index 0a4e9a5..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/ProfileUtils.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-#ifndef CVCORE_PROFILE_UTILS_H
-#define CVCORE_PROFILE_UTILS_H
-
-#include
-
-namespace cvcore {
-
-/**
- * Export one profiling item to specified json file.
- * @param outputPath output json file path.
- * @param taskName item name showing in the output json file.
- * @param tMin minimum running time in milliseconds.
- * @param tMax maximum running time in milliseconds.
- * @param tAvg average running time in milliseconds.
- * @param isCPU whether CPU or GPU time.
- * @param iterations number of iterations.
- */
-void ExportToJson(const std::string outputPath, const std::string taskName, float tMin, float tMax, float tAvg,
- bool isCPU, int iterations = 100);
-
-} // namespace cvcore
-
-#endif // CVCORE_PROFILE_UTILS_H
diff --git a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/TensorMap.h b/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/TensorMap.h
deleted file mode 100644
index 93c8783..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/core/TensorMap.h
+++ /dev/null
@@ -1,534 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-#ifndef CVCORE_TENSORMAP_H
-#define CVCORE_TENSORMAP_H
-
-#include
-#include
-#include
-#include
-#include
-
-#include "Traits.h"
-#include "TensorList.h"
-
-namespace cvcore {
-
-/**
- * @brief Implementation of a map of tensors of the same rank but, potentially, different dimensions, over the batch dimension
- *
- * @tparam TensorType Any CVCORE tensor type
- * @tparam KeyType Any STL hashable data type
- */
-template
-class TensorMap {};
-
-template
-class TensorMap, KT,
- typename std::enable_if>::value>::type>
-{
- using my_type = TensorMap, KT>;
-
- public:
- using key_type = KT;
- using unit_type = Tensor;
- using element_type = traits::remove_batch_t;
- using frame_type = TensorList;
- using buffer_type = TensorList;
-
- template
- struct dim_data_type
- {
- std::size_t height;
- std::size_t width;
- };
-
- template
- struct dim_data_type::type>
- {
- std::size_t height;
- std::size_t width;
- std::size_t channels;
- };
-
- TensorMap() = default;
- TensorMap(const my_type &) = delete;
- TensorMap(my_type && other)
- {
- *this = std::move(other);
- }
-
- /**
- * @brief Construct a new Tensor Map object
- *
- * @param batchSize The batch dimension of all sub-tensors
- * @param dimData The dimensional description of all sub-tensors in at least HWC format
- * @param isCPU A boolean flag specifying what device to allocate the sub-tensors
- */
- template::type>
- TensorMap(std::size_t batchSize,
- const std::vector> & dimData,
- bool isCPU = true)
- : m_maxBatchSize{batchSize}, m_size{dimData.size()},
- m_isCPU{isCPU}, m_buffer{dimData.size(), true}
- {
- m_buffer.setSize(m_size);
-
- int i = 0;
- for(auto dim: dimData)
- {
- m_buffer[i] = std::move(unit_type(dim.width,
- dim.height,
- m_maxBatchSize,
- m_isCPU));
- ++i;
- }
-
- for(std::size_t i = 0; i < m_maxBatchSize; ++i)
- {
- m_pool.insert(i);
- }
- }
-
- /**
- * @brief Construct a new Tensor Map object
- *
- * @param batchSize The batch dimension of all sub-tensors
- * @param dimData The dimensional description of all sub-tensors in at least HWC format
- * @param isCPU A boolean flag specifying what device to allocate the sub-tensors
- */
- template::type>
- TensorMap(std::size_t batchSize,
- const std::vector> & dimData,
- bool isCPU = true)
- : m_maxBatchSize{batchSize}, m_size{dimData.size()},
- m_isCPU{isCPU}, m_buffer{dimData.size(), true}
- {
- m_buffer.setSize(m_size);
-
- int i = 0;
- for(auto dim: dimData)
- {
- m_buffer[i] = std::move(unit_type(dim.width,
- dim.height,
- m_maxBatchSize,
- dim.channels,
- m_isCPU));
- ++i;
- }
-
- for(std::size_t i = 0; i < m_maxBatchSize; ++i)
- {
- m_pool.insert(i);
- }
- }
-
- ~TensorMap() = default;
-
- my_type & operator=(const my_type &) = delete;
- my_type & operator=(my_type && other)
- {
- std::swap(m_mapping, other.m_mapping);
- std::swap(m_pool, other.m_pool);
- std::swap(m_maxBatchSize, other.m_maxBatchSize);
- std::swap(m_size, other.m_size);
- std::swap(m_isCPU, other.m_isCPU);
- std::swap(m_buffer, other.m_buffer);
-
- return *this;
- }
-
- /**
- * @brief A mapping of the batch dimension index to a given key
- *
- * @details Given a set of pairs such that the keys AND values are unique
- * respectively, the key-wise mapping of the batch dimension is reset
- * to the provided values.
- *
- * @param pairs An unordered map of the uniqe key value pairs
- * @return true If the length of ``pairs`` is less than the max batch size
- * and the key value pairs are one-to-one and onto.
- * @return false If the conditions of ``true`` are not met.
- */
- bool remap(const std::unordered_map & pairs)
- {
- bool result = false;
-
- if(pairs.size() <= m_maxBatchSize)
- {
- for(std::size_t i = 0; i < m_maxBatchSize; ++i)
- {
- m_pool.insert(i);
- }
-
- m_mapping.clear();
- for(auto mapping: pairs)
- {
- if(m_pool.erase(mapping.second))
- {
- m_mapping[mapping.first] = mapping.second;
- }
- }
-
- if((pairs.size() + m_pool.size()) == m_maxBatchSize)
- {
- result = true;
- }
- }
-
- return result;
- }
-
- /**
- * @brief Associates a given key with the first available batch index
- *
- * @details Assuming the associated keys has not reached `maxBatchSize``
- * then this function associates a given key with the first available
- * batch index and returns that index value. If no batch index is
- * available -1 is returned. NOTE: if ``key`` is already associated
- * with a batch index, the that index is returned.
- *
- * @param key The key to be associated with a batch index value
- * @return std::intmax_t The batch index associated with the key or -1
- * if no index is available. NOTE: because std::intmax_t is not a full
- * covering of std::size_t, it is possible for wrap around to happen.
- */
- std::intmax_t map(const key_type & key)
- {
- auto it = m_mapping.find(key);
-
- if(it == m_mapping.end() && !m_pool.empty())
- {
- auto value = m_pool.begin();
- it = m_mapping.insert({key, *value}).first;
- m_pool.erase(value);
- }
-
- return static_cast(it != m_mapping.end() ? it->second : -1);
- }
-
- /**
- * @brief Dissociates a given key with a batch index if possible
- *
- * @details Assuming the given key is associated with a batch index this
- * function removes the association and returns the batch index is was
- * associated with. If no batch index is found associated with the given
- * key, -1 is returned.
- *
- * @param key The key to be dissociated
- * @return std::intmax_t The batch index associated with the key or -1
- * if not found. NOTE: because std::intmax_t is not a full covering of
- * std::size_t, it is possible for wrap around to happen.
- */
- std::intmax_t unmap(const key_type & key)
- {
- std::intmax_t result = -1;
-
- auto it = m_mapping.find(key);
-
- if(it != m_mapping.end())
- {
- result = static_cast(it->second);
- m_pool.insert(it->second);
- m_mapping.erase(it);
- }
-
- return result;
- }
-
- /**
- * @brief The number of keys associated with a batch index
- *
- * @return std::size_t
- */
- std::size_t getKeyCount() const noexcept
- {
- return m_mapping.size();
- }
-
- /**
- * @brief The maximum number of batch index
- *
- * @return std::size_t
- */
- std::size_t getMaxBatchSize() const noexcept
- {
- return m_maxBatchSize;
- }
-
- /**
- * @brief The number of sub-tensors
- *
- * @return std::size_t
- */
- std::size_t getUnitCount() const
- {
- return m_size;
- }
-
- /**
- * Get the size of given dimension.
- * @param dimIdx dimension index.
- * @return size of the specified dimension.
- */
- std::size_t getTensorSize(std::size_t unitIdx, std::size_t dimIdx) const
- {
- return m_buffer[unitIdx].getSize(dimIdx);
- }
-
- /**
- * Get the stride of given dimension.
- * @param dimIdx dimension index.
- * @return stride of the specified dimension.
- */
- std::size_t getTensorStride(std::size_t unitIdx, std::size_t dimIdx) const
- {
- return m_buffer[unitIdx].getStride(dimIdx);
- }
-
- template::type>
- unit_type getUnit(std::size_t idx)
- {
- unit_type result{m_buffer[idx].getWidth(),
- m_buffer[idx].getHeight(),
- m_buffer[idx].getDepth(),
- m_buffer[idx].getData(),
- m_buffer[idx].isCPU()};
- return result;
- }
-
- template::type>
- unit_type getUnit(std::size_t idx, ChannelCount UNUSED = T)
- {
- unit_type result{m_buffer[idx].getWidth(),
- m_buffer[idx].getHeight(),
- m_buffer[idx].getDepth(),
- m_buffer[idx].getChannelCount(),
- m_buffer[idx].getData(),
- m_buffer[idx].isCPU()};
- return result;
- }
-
- template::type>
- frame_type getFrame(const key_type & idx)
- {
- frame_type result;
-
- if(m_mapping.find(idx) != m_mapping.end())
- {
- std::size_t at = m_mapping[idx];
- result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()});
- result.setSize(m_size);
- for(std::size_t i = 0; i < m_size; ++i)
- {
- element_type element{m_buffer[i].getWidth(),
- m_buffer[i].getHeight(),
- m_buffer[i].getData() +
- at * m_buffer[i].getStride(TensorDimension::DEPTH),
- m_buffer[i].isCPU()};
- result[i] = std::move(element);
- }
- }
-
- return result;
- }
-
- template::type>
- frame_type getFrame(const key_type & idx, ChannelCount UNUSED = T)
- {
- frame_type result;
-
- if(m_mapping.find(idx) != m_mapping.end())
- {
- std::size_t at = m_mapping[idx];
- result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()});
- result.setSize(m_size);
- for(std::size_t i = 0; i < m_size; ++i)
- {
- element_type element{m_buffer[i].getWidth(),
- m_buffer[i].getHeight(),
- m_buffer[i].getChannelCount(),
- m_buffer[i].getData() +
- at * m_buffer[i].getStride(TensorDimension::DEPTH),
- m_buffer[i].isCPU()};
- result[i] = std::move(element);
- }
- }
-
- return result;
- }
-
- template::type>
- frame_type getFrame(key_type && idx)
- {
- frame_type result;
-
- if(m_mapping.find(idx) != m_mapping.end())
- {
- std::size_t at = m_mapping[idx];
- result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()});
- result.setSize(m_size);
- for(std::size_t i = 0; i < m_size; ++i)
- {
- element_type element{m_buffer[i].getWidth(),
- m_buffer[i].getHeight(),
- m_buffer[i].getData() +
- at * m_buffer[i].getStride(TensorDimension::DEPTH),
- m_buffer[i].isCPU()};
- result[i] = std::move(element);
- }
- }
-
- return result;
- }
-
- template::type>
- frame_type getFrame(key_type && idx, ChannelCount UNUSED = T)
- {
- frame_type result;
-
- if(m_mapping.find(idx) != m_mapping.end())
- {
- std::size_t at = m_mapping[idx];
- result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()});
- result.setSize(m_size);
- for(std::size_t i = 0; i < m_size; ++i)
- {
- element_type element{m_buffer[i].getWidth(),
- m_buffer[i].getHeight(),
- m_buffer[i].getChannelCount(),
- m_buffer[i].getData() +
- at * m_buffer[i].getStride(TensorDimension::DEPTH),
- m_buffer[i].isCPU()};
- result[i] = std::move(element);
- }
- }
-
- return result;
- }
-
- template::type>
- element_type getElement(const key_type & keyIdx, std::size_t unitIdx)
- {
- element_type element;
-
- if(m_mapping.find(keyIdx) != m_mapping.end())
- {
- std::size_t at = m_mapping[keyIdx];
- element = std::move(element_type{m_buffer[unitIdx].getWidth(),
- m_buffer[unitIdx].getHeight(),
- m_buffer[unitIdx].getData() +
- at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH),
- m_buffer[unitIdx].isCPU()});
- }
-
- return element;
- }
-
- template::type>
- element_type getElement(const key_type & keyIdx, std::size_t unitIdx, ChannelCount UNUSED = T)
- {
- element_type element;
-
- if(m_mapping.find(keyIdx) != m_mapping.end())
- {
- std::size_t at = m_mapping[keyIdx];
- element = std::move(element_type{m_buffer[unitIdx].getWidth(),
- m_buffer[unitIdx].getHeight(),
- m_buffer[unitIdx].getChannelCount(),
- m_buffer[unitIdx].getData() +
- at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH),
- m_buffer[unitIdx].isCPU()});
- }
-
- return element;
- }
-
- template::type>
- element_type getElement(key_type && keyIdx, std::size_t unitIdx)
- {
- element_type element;
-
- if(m_mapping.find(keyIdx) != m_mapping.end())
- {
- std::size_t at = m_mapping[keyIdx];
- element = std::move(element_type{m_buffer[unitIdx].getWidth(),
- m_buffer[unitIdx].getHeight(),
- m_buffer[unitIdx].getData() +
- at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH),
- m_buffer[unitIdx].isCPU()});
- }
-
- return element;
- }
-
- template::type>
- element_type getElement(key_type && keyIdx, std::size_t unitIdx, ChannelCount UNUSED = T)
- {
- element_type element;
-
- if(m_mapping.find(keyIdx) != m_mapping.end())
- {
- std::size_t at = m_mapping[keyIdx];
- element = std::move(element_type{m_buffer[unitIdx].getWidth(),
- m_buffer[unitIdx].getHeight(),
- m_buffer[unitIdx].getChannelCount(),
- m_buffer[unitIdx].getData() +
- at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH),
- m_buffer[unitIdx].isCPU()});
- }
-
- return element;
- }
-
- /**
- * Get the ChannelType of the Tensor.
- * @return ChannelType of the Tensor.
- */
- constexpr ChannelType getType() const noexcept
- {
- return CT;
- }
-
- /**
- * Get the flag whether the Tensor is allocated in CPU or GPU.
- * @return whether the Tensor is allocated in CPU.
- */
- bool isCPU() const noexcept
- {
- return m_isCPU;
- }
-
- private:
- // Mapping and Pool form a unique-to-unique isometry between
- // the keys and indices of the batch dimension
- mutable std::unordered_map m_mapping;
- mutable std::set m_pool;
-
- std::size_t m_maxBatchSize;
- std::size_t m_size;
- bool m_isCPU;
-
- buffer_type m_buffer;
-};
-
-} // namespace cvcore
-
-#endif // CVCORE_TENSORMAP_H
diff --git a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h b/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h
deleted file mode 100644
index c4de9df..0000000
--- a/isaac_ros_image_proc/gxf/tensorops/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h
+++ /dev/null
@@ -1,251 +0,0 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-#ifndef CVCORE_ITENSOROPERATORSTREAM_H
-#define CVCORE_ITENSOROPERATORSTREAM_H
-
-#include
-#include
-#include
-#include
-
-#include "cv/core/CameraModel.h"
-#include "cv/core/ComputeEngine.h"
-#include "cv/core/Image.h"
-
-#include "Errors.h"
-#include "IImageWarp.h"
-#include "ImageUtils.h"
-namespace cvcore { namespace tensor_ops {
-
-class NotImplementedException : public std::logic_error
-{
-public:
- NotImplementedException()
- : std::logic_error("Method not yet implemented.")
- {
- }
-};
-
-class ITensorOperatorStream
-{
-public:
- // Public Constructor(s)/Destructor
- virtual ~ITensorOperatorStream() noexcept = default;
-
- // Public Accessor Method(s)
- virtual std::error_code Status() noexcept = 0;
-
- virtual std::error_code GenerateWarpFromCameraModel(ImageWarp & warp,
- const ImageGrid & grid,
- const CameraModel & source,
- const CameraIntrinsics & target) = 0;
-
- virtual std::error_code DestroyWarp(ImageWarp & warp) noexcept = 0;
-
- // Public Mutator Method(s)
- virtual std::error_code Remap(Image &outputImage, const Image