From 89205f06d1b740952e79c512d6b0ef6f8db18300 Mon Sep 17 00:00:00 2001 From: Ziniu Yu Date: Tue, 16 Aug 2022 13:41:58 +0800 Subject: [PATCH] chore: update executor docstring (#806) * chore: update executor docstring * chore: update executor docstring * chore: minor update * chore: address comment --- docs/user-guides/server.md | 20 +++++++++---------- server/clip_server/executors/clip_onnx.py | 13 ++++++++++++ server/clip_server/executors/clip_tensorrt.py | 10 ++++++++++ server/clip_server/executors/clip_torch.py | 11 ++++++++++ 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/docs/user-guides/server.md b/docs/user-guides/server.md index bb905abdc..964c2c462 100644 --- a/docs/user-guides/server.md +++ b/docs/user-guides/server.md @@ -76,7 +76,7 @@ Please also note that **different models give different sizes of output dimensio | ViT-B-16 | ✅ | ✅ | ✅ | 512 | 335 | 3.20 | 1.44 | | ViT-B-16-plus-240 | ✅ | ✅ | 🚧 | 640 | 795 | 3.03 | 1.59 | | ViT-L-14 | ✅ | ✅ | ❌ | 768 | 890 | 3.66 | 2.04 | -| ViT-L-14@336px | ✅ | ✅ | ❌ | 768 | 891 | 3.74 | 2.23 | +| ViT-L-14-336 | ✅ | ✅ | ❌ | 768 | 891 | 3.74 | 2.23 | | M-CLIP/XLM-Roberta-Large-Vit-B-32 | ✅ | 🚧 | 🚧 | 512 | 4284 | 5.37 | 1.68 | | M-CLIP/XLM-Roberta-Large-Vit-L-14 | ✅ | 🚧 | ❌ | 768 | 4293 | 4.30 | 4.97 | | M-CLIP/XLM-Roberta-Large-Vit-B-16Plus | ✅ | 🚧 | 🚧 | 640 | 4293 | 4.30 | 4.13 | @@ -262,11 +262,11 @@ executors: For all backends, you can set the following parameters via `with`: -| Parameter | Description | -|-------------------------|--------------------------------------------------------------------------------------------------------------------------------| -| `name` | Model weights, default is `ViT-B-32::openai`. A full list of models and weights can be found [here](#model-support) | -| `num_worker_preprocess` | The number of CPU workers for image & text prerpocessing, default 4. | -| `minibatch_size` | The size of a minibatch for CPU preprocessing and GPU encoding, default 64. Reduce the size of it if you encounter OOM on GPU. | +| Parameter | Description | +|-------------------------|------------------------------------------------------------------------------------------------------------------------------| +| `name` | The name of the model to be used. Default 'ViT-B-32::openai'. A list of available models can be found [here](#model-support) | +| `num_worker_preprocess` | The number of CPU workers to preprocess images and texts. Default is 4. | +| `minibatch_size` | The size of the minibatch for preprocessing and encoding. Default is 32. Reduce this number if you encounter OOM errors. | There are also runtime-specific parameters listed below: @@ -274,8 +274,8 @@ There are also runtime-specific parameters listed below: | Parameter | Description | |-----------|--------------------------------------------------------------------------------------------------------------------------------| -| `device` | `cuda` or `cpu`. Default is `None` means auto-detect. | -| `jit` | If to enable Torchscript JIT, default is `False`. | +| `device` | 'cpu' or 'cuda'. Default is None, which auto-detects the device. | +| `jit` | Whether to use JIT compilation. Default is False. | ```` @@ -283,8 +283,8 @@ There are also runtime-specific parameters listed below: | Parameter | Description | |-----------|--------------------------------------------------------------------------------------------------------------------------------| -| `device` | `cuda` or `cpu`. Default is `None` means auto-detect. -| `model_path` | The path to custom CLIP model, default `None`. | +| `device` | 'cpu' or 'cuda'. Default is None, which auto-detects the device. +| `model_path` | The path to the model to be used. If not specified, the model will be downloaded or loaded from the local cache. See [here](#use-custom-model-for-onnx) to learn how to finetune custom models. | ```` diff --git a/server/clip_server/executors/clip_onnx.py b/server/clip_server/executors/clip_onnx.py index 24abd11ab..c14da999d 100644 --- a/server/clip_server/executors/clip_onnx.py +++ b/server/clip_server/executors/clip_onnx.py @@ -27,6 +27,19 @@ def __init__( model_path: Optional[str] = None, **kwargs, ): + """ + :param name: The name of the model to be used. Default 'ViT-B-32::openai'. A list of available models can be + found at https://clip-as-service.jina.ai/user-guides/server/#model-support + :param device: 'cpu' or 'cuda'. Default is None, which auto-detects the device. + :param num_worker_preprocess: The number of CPU workers to preprocess images and texts. Default is 4. + :param minibatch_size: The size of the minibatch for preprocessing and encoding. Default is 32. Reduce this + number if you encounter OOM errors. + :param access_paths: The access paths to traverse on the input documents to get the images and texts to be + processed. Visit https://docarray.jina.ai/fundamentals/documentarray/access-elements for more details. + :param model_path: The path to the model to be used. If not specified, the model will be downloaded or loaded + from the local cache. Visit https://clip-as-service.jina.ai/user-guides/server/#use-custom-model-for-onnx + to learn how to finetune custom models. + """ super().__init__(**kwargs) self._minibatch_size = minibatch_size diff --git a/server/clip_server/executors/clip_tensorrt.py b/server/clip_server/executors/clip_tensorrt.py index 7b519a649..0f13bd52e 100644 --- a/server/clip_server/executors/clip_tensorrt.py +++ b/server/clip_server/executors/clip_tensorrt.py @@ -25,6 +25,16 @@ def __init__( access_paths: str = '@r', **kwargs, ): + """ + :param name: The name of the model to be used. Default 'ViT-B-32::openai'. A list of available models can be + found at https://clip-as-service.jina.ai/user-guides/server/#model-support + :param device: 'cpu' or 'cuda'. Default is 'cuda' since TensorRT is only supported on CUDA. + :param num_worker_preprocess: The number of CPU workers to preprocess images and texts. Default is 4. + :param minibatch_size: The size of the minibatch for preprocessing and encoding. Default is 32. Reduce this + number if you encounter OOM errors. + :param access_paths: The access paths to traverse on the input documents to get the images and texts to be + processed. Visit https://docarray.jina.ai/fundamentals/documentarray/access-elements for more details. + """ super().__init__(**kwargs) self._pool = ThreadPool(processes=num_worker_preprocess) diff --git a/server/clip_server/executors/clip_torch.py b/server/clip_server/executors/clip_torch.py index 5aab4f5f0..64edc8236 100644 --- a/server/clip_server/executors/clip_torch.py +++ b/server/clip_server/executors/clip_torch.py @@ -28,6 +28,17 @@ def __init__( access_paths: str = '@r', **kwargs, ): + """ + :param name: The name of the model to be used. Default 'ViT-B-32::openai'. A list of available models can be + found at https://clip-as-service.jina.ai/user-guides/server/#model-support + :param device: 'cpu' or 'cuda'. Default is None, which auto-detects the device. + :param jit: Whether to use JIT compilation. Default is False. + :param num_worker_preprocess: The number of CPU workers to preprocess images and texts. Default is 4. + :param minibatch_size: The size of the minibatch for preprocessing and encoding. Default is 32. Reduce this + number if you encounter OOM errors. + :param access_paths: The access paths to traverse on the input documents to get the images and texts to be + processed. Visit https://docarray.jina.ai/fundamentals/documentarray/access-elements for more details. + """ super().__init__(**kwargs) self._minibatch_size = minibatch_size