diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000..c88a062b05 Binary files /dev/null and b/.DS_Store differ diff --git a/.stats.yml b/.stats.yml index 0151c5a105..e8bca3c6d8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-de1981b64ac229493473670d618500c6362c195f1057eb7de00bd1bc9184fbd5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-073331021d48db6af646a3552ab0c682efe31b7fb4e59a109ed1ba539f9b89c5.yml diff --git a/api.md b/api.md index 2498340328..eae17d8d84 100644 --- a/api.md +++ b/api.md @@ -114,7 +114,7 @@ Methods: Types: ```python -from openai.types import AudioModel +from openai.types import AudioModel, AudioResponseFormat ``` ## Transcriptions diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index a6009143d4..fd042d1ac3 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -8,6 +8,7 @@ import httpx from ... import _legacy_response +from ...types import AudioResponseFormat from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import ( extract_files, @@ -22,6 +23,7 @@ from ..._base_client import make_request_options from ...types.audio_model import AudioModel from ...types.audio.transcription import Transcription +from ...types.audio_response_format import AudioResponseFormat __all__ = ["Transcriptions", "AsyncTranscriptions"] @@ -53,7 +55,7 @@ def create( model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -83,8 +85,8 @@ def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -160,7 +162,7 @@ async def create( model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -190,8 +192,8 @@ async def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index 7ec647fb6b..fe08dd550e 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -7,6 +7,7 @@ import httpx from ... import _legacy_response +from ...types import AudioResponseFormat from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import ( extract_files, @@ -21,6 +22,7 @@ from ..._base_client import make_request_options from ...types.audio_model import AudioModel from ...types.audio.translation import Translation +from ...types.audio_response_format import AudioResponseFormat __all__ = ["Translations", "AsyncTranslations"] @@ -51,7 +53,7 @@ def create( file: FileTypes, model: Union[str, AudioModel], prompt: str | NotGiven = NOT_GIVEN, - response_format: str | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -75,8 +77,8 @@ def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -143,7 +145,7 @@ async def create( file: FileTypes, model: Union[str, AudioModel], prompt: str | NotGiven = NOT_GIVEN, - response_format: str | NotGiven = NOT_GIVEN, + response_format: AudioResponseFormat | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -167,8 +169,8 @@ async def create( [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 4dbc1b6b7b..6223be883d 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -38,6 +38,7 @@ from .batch_create_params import BatchCreateParams as BatchCreateParams from .batch_request_counts import BatchRequestCounts as BatchRequestCounts from .upload_create_params import UploadCreateParams as UploadCreateParams +from .audio_response_format import AudioResponseFormat as AudioResponseFormat from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index a825fefecb..5ac2bb91e5 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -7,6 +7,7 @@ from ..._types import FileTypes from ..audio_model import AudioModel +from ..audio_response_format import AudioResponseFormat __all__ = ["TranscriptionCreateParams"] @@ -41,10 +42,10 @@ class TranscriptionCreateParams(TypedDict, total=False): should match the audio language. """ - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] + response_format: AudioResponseFormat """ - The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. """ temperature: float diff --git a/src/openai/types/audio/translation_create_params.py b/src/openai/types/audio/translation_create_params.py index 054996a134..6859ed9d30 100644 --- a/src/openai/types/audio/translation_create_params.py +++ b/src/openai/types/audio/translation_create_params.py @@ -7,6 +7,7 @@ from ..._types import FileTypes from ..audio_model import AudioModel +from ..audio_response_format import AudioResponseFormat __all__ = ["TranslationCreateParams"] @@ -33,10 +34,10 @@ class TranslationCreateParams(TypedDict, total=False): should be in English. """ - response_format: str + response_format: AudioResponseFormat """ - The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. """ temperature: float diff --git a/src/openai/types/audio_response_format.py b/src/openai/types/audio_response_format.py new file mode 100644 index 0000000000..f8c8d45945 --- /dev/null +++ b/src/openai/types/audio_response_format.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["AudioResponseFormat"] + +AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"] diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index f5c6c68f0b..b048a1af12 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -30,8 +30,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: translation = client.audio.translations.create( file=b"raw file contents", model="whisper-1", - prompt="string", - response_format="string", + prompt="prompt", + response_format="json", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) @@ -79,8 +79,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> translation = await async_client.audio.translations.create( file=b"raw file contents", model="whisper-1", - prompt="string", - response_format="string", + prompt="prompt", + response_format="json", temperature=0, ) assert_matches_type(Translation, translation, path=["response"])