From b094410ef05d92209d27d7bedfe7aa86636d78c3 Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Mon, 11 Jul 2022 18:56:48 +0200 Subject: [PATCH] =?UTF-8?q?=F0=9F=93=83=20Add=20documentation=20for=20grad?= =?UTF-8?q?io=20inference=20(#427)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add documentation for gradio inference * Minor edits --- README.md | 10 ++++ docs/source/guides/inference.rst | 71 +++++++++++++++++++++++------ tools/inference/gradio_inference.py | 2 +- 3 files changed, 67 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index e36c29fc9c..98d761e0f4 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,16 @@ python tools/inference/openvino_inference.py \ > Ensure that you provide path to `meta_data.json` if you want the normalization to be applied correctly. +You can also use Gradio Inference to interact with the trained models using a UI. Refer to our [guide](https://openvinotoolkit.github.io/anomalib/guides/inference.html#gradio-inference) for more details. + +A quick example: + +```bash +python tools/inference/gradio_inference.py \ + --config ./anomalib/models/padim/config.yaml \ + --weights ./results/padim/mvtec/bottle/weights/model.ckpt +``` + ## Hyperparameter Optimization To run hyperparameter optimization, use the following command: diff --git a/docs/source/guides/inference.rst b/docs/source/guides/inference.rst index 70be3625fa..b99060d549 100644 --- a/docs/source/guides/inference.rst +++ b/docs/source/guides/inference.rst @@ -10,7 +10,7 @@ PyTorch (Lightning) Inference The entrypoint script in ``tools/inference/lightning.py`` can be used to run inference with a trained PyTorch model. The script runs inference by loading a previously trained model into a PyTorch Lightning trainer and running the ``predict sequence``. The entrypoint script has several command line arguments that can be used to configure inference: +---------------------+----------+---------------------------------------------------------------------------------+ -| Parameter | Required | Description | +| Parameter | Required | Description | +=====================+==========+=================================================================================+ | config | True | Path to the model config file. | +---------------------+----------+---------------------------------------------------------------------------------+ @@ -37,20 +37,20 @@ OpenVINO Inference ============== To run OpenVINO inference, first make sure that your model has been exported to the OpenVINO IR format. Once the model has been exported, OpenVINO inference can be triggered by running the OpenVINO entrypoint script in ``tools/inference/openvino.py``. The command line arguments are very similar to PyTorch inference entrypoint script: -+-------------+----------+-------------------------------------------------------------------------------------+ -| Parameter | Required | Description | -+=============+==========+=====================================================================================+ -| config | True | Path to the model config file. | -+-------------+----------+-------------------------------------------------------------------------------------+ -| weights | True | Path to the OpenVINO IR model file (either ``.xml`` or ``.bin``) | -+-------------+----------+-------------------------------------------------------------------------------------+ -| image | True | Path to the image source. This can be a single image or a folder of images. | -+-------------+----------+-------------------------------------------------------------------------------------+ -| save_data | False | Path to which the output images should be saved. Leave empty for live visualization.| -+-------------+----------+-------------------------------------------------------------------------------------+ -| meta_data | True | Path to the JSON file containing the model's meta data (e.g. normalization | -| | | parameters and anomaly score threshold). | -+-------------+----------+-------------------------------------------------------------------------------------+ ++-----------+----------+--------------------------------------------------------------------------------------+ +| Parameter | Required | Description | ++===========+==========+======================================================================================+ +| config | True | Path to the model config file. | ++-----------+----------+--------------------------------------------------------------------------------------+ +| weights | True | Path to the OpenVINO IR model file (either ``.xml`` or ``.bin``) | ++-----------+----------+--------------------------------------------------------------------------------------+ +| image | True | Path to the image source. This can be a single image or a folder of images. | ++-----------+----------+--------------------------------------------------------------------------------------+ +| save_data | False | Path to which the output images should be saved. Leave empty for live visualization. | ++-----------+----------+--------------------------------------------------------------------------------------+ +| meta_data | True | Path to the JSON file containing the model's meta data (e.g. normalization | +| | | parameters and anomaly score threshold). | ++-----------+----------+--------------------------------------------------------------------------------------+ For correct inference results, the ``meta_data`` argument should be specified and point to the ``meta_data.json`` file that was generated when exporting the OpenVINO IR model. The file is stored in the same folder as the ``.xml`` and ``.bin`` files of the model. @@ -59,3 +59,44 @@ As an example, OpenVINO inference can be triggered by the following command: ``python tools/inference/openvino.py --config padim.yaml --weights results/openvino/model.xml --input image.png --meta_data results/openvino/meta_data.json`` Similar to PyTorch inference, the visualization results will be displayed on the screen, and optionally saved to the file system location specified by the ``save_data`` parameter. + + + +Gradio Inference +============== + +The gradio inference is supported for both PyTorch and OpenVINO models. + ++-----------+----------+------------------------------------------------------------------+ +| Parameter | Required | Description | ++===========+==========+==================================================================+ +| config | True | Path to the model config file. | ++-----------+----------+------------------------------------------------------------------+ +| weights | True | Path to the OpenVINO IR model file (either ``.xml`` or ``.bin``) | ++-----------+----------+------------------------------------------------------------------+ +| meta_data | False | Path to the JSON file containing the model's meta data. | +| | | This is needed only for OpenVINO model. | ++-----------+----------+------------------------------------------------------------------+ +| threshold | False | Threshold value used for identifying anomalies. Range 1-100. | ++-----------+----------+------------------------------------------------------------------+ +| share | False | Share Gradio `share_url` | ++-----------+----------+------------------------------------------------------------------+ + +To use gradio with OpenVINO model, first make sure that your model has been exported to the OpenVINO IR format and ensure that the `meta_data` argument points to the ``meta_data.json`` file that was generated when exporting the OpenVINO IR model. The file is stored in the same folder as the ``.xml`` and ``.bin`` files of the model. + +As an example, PyTorch model can be used by the following command: + +.. code-block:: bash + + python tools/inference/gradio_inference.py \ + --config ./anomalib/models/padim/config.yaml \ + --weights ./results/padim/mvtec/bottle/weights/model.ckpt + +Similarly, you can use OpenVINO model by the following command: + +.. code-block:: bash + + python python tools/inference/gradio_inference.py \ + --config ./anomalib/models/padim/config.yaml \ + --weights ./results/padim/mvtec/bottle/openvino/openvino_model.onnx \ + --meta_data ./results/padim/mvtec/bottle/openvino/meta_data.json diff --git a/tools/inference/gradio_inference.py b/tools/inference/gradio_inference.py index 9cdafa5274..139bb89dc4 100644 --- a/tools/inference/gradio_inference.py +++ b/tools/inference/gradio_inference.py @@ -98,7 +98,7 @@ def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optiona elif extension in (".onnx", ".bin", ".xml"): openvino_inferencer = getattr(module, "OpenVINOInferencer") - inferencer = openvino_inferencer(config=config_path, path=weight_path, meta_data_path=meta_data_path) + inferencer = openvino_inferencer(config=config, path=weight_path, meta_data_path=meta_data_path) else: raise ValueError(