From 31f1e7b62c64c3b3be630db6471e4a3fc93d87e9 Mon Sep 17 00:00:00 2001 From: iscai-msft <43154838+iscai-msft@users.noreply.github.com> Date: Fri, 1 Oct 2021 18:16:33 -0400 Subject: [PATCH] [conversations] initial (#21019) * Conversations first code gen * First tests * Running test * Latest swagger + autorest * Updated some tests * Updated tests * Removed qna samples * conversations SDK (#20947) * make samples run * fix pipelines run from ci * add feature branch to trigger in ci.yml * fix safe names in ci * fix packaging files * try to fix broken links Co-authored-by: antisch Co-authored-by: Mohamed Shaban --- eng/tox/allowed_pylint_failures.py | 3 +- .../CHANGELOG.md | 6 + .../MANIFEST.in | 7 + .../azure-ai-language-conversations/README.md | 304 +++++ .../azure/__init__.py | 1 + .../azure/ai/__init__.py | 1 + .../azure/ai/language/__init__.py | 1 + .../ai/language/conversations/__init__.py | 19 + .../language/conversations/_configuration.py | 69 + .../_conversation_analysis_client.py | 97 ++ .../ai/language/conversations/_version.py | 9 + .../ai/language/conversations/aio/__init__.py | 10 + .../conversations/aio/_configuration.py | 62 + .../aio/_conversation_analysis_client.py | 87 ++ .../conversations/aio/operations/__init__.py | 13 + .../aio/operations/_operations.py | 86 ++ .../language/conversations/models/__init__.py | 95 ++ .../_conversation_analysis_client_enums.py | 58 + .../language/conversations/models/_models.py | 1070 +++++++++++++++ .../conversations/models/_models_py3.py | 1168 +++++++++++++++++ .../conversations/operations/__init__.py | 13 + .../conversations/operations/_operations.py | 127 ++ .../azure/ai/language/conversations/py.typed | 1 + .../dev_requirements.txt | 5 + .../samples/README.md | 74 ++ .../sample_analyze_conversation_app_async.py | 76 ++ .../sample_analyze_workflow_app_async.py | 75 ++ ..._analyze_workflow_app_with_params_async.py | 94 ++ .../async/sample_authentication_async.py | 50 + .../sample_analyze_conversation_app.py | 71 + .../samples/sample_analyze_workflow_app.py | 70 + ...sample_analyze_workflow_app_with_params.py | 88 ++ .../samples/sample_authentication.py | 46 + .../sdk_packaging.toml | 2 + .../azure-ai-language-conversations/setup.cfg | 2 + .../azure-ai-language-conversations/setup.py | 96 ++ .../tests/asynctestcase.py | 28 + .../tests/conftest.py | 15 + ...onversation_app.test_conversation_app.yaml | 52 + ...test_conversation_app_with_dictparams.yaml | 52 + ...ation_app_async.test_conversation_app.yaml | 38 + ...test_conversation_app_with_dictparams.yaml | 38 + .../test_workflow_app.test_workflow_app.yaml | 215 +++ ...flow_app.test_workflow_app_with_model.yaml | 142 ++ ...app.test_workflow_app_with_parameters.yaml | 142 ++ ..._workflow_app_async.test_workflow_app.yaml | 186 +++ ...pp_async.test_workflow_app_with_model.yaml | 127 ++ ...ync.test_workflow_app_with_parameters.yaml | 127 ++ .../tests/test_conversation_app.py | 91 ++ .../tests/test_conversation_app_async.py | 89 ++ .../tests/test_workflow_app.py | 149 +++ .../tests/test_workflow_app_async.py | 149 +++ .../tests/test_workflow_direct.py | 179 +++ .../tests/test_workflow_direct_async.py | 175 +++ .../tests/testcase.py | 97 ++ sdk/cognitivelanguage/ci.yml | 9 +- shared_requirements.txt | 2 + 57 files changed, 6153 insertions(+), 5 deletions(-) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/MANIFEST.in create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/README.md create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/py.typed create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/dev_requirements.txt create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_authentication_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/setup.cfg create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/setup.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/asynctestcase.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/conftest.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py diff --git a/eng/tox/allowed_pylint_failures.py b/eng/tox/allowed_pylint_failures.py index c929b50632ab..4f2b35c5b718 100644 --- a/eng/tox/allowed_pylint_failures.py +++ b/eng/tox/allowed_pylint_failures.py @@ -58,5 +58,6 @@ "azure-messaging-nspkg", "azure-agrifood-farming", "azure-eventhub", - "azure-ai-language-questionanswering" + "azure-ai-language-questionanswering", + "azure-ai-language-conversations" ] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md b/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md new file mode 100644 index 000000000000..f0d9f184e050 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/CHANGELOG.md @@ -0,0 +1,6 @@ +# Release History + +## 1.0.0b1 (unreleased) + +### Features Added +* Initial release diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/MANIFEST.in b/sdk/cognitivelanguage/azure-ai-language-conversations/MANIFEST.in new file mode 100644 index 000000000000..90b1336c6ac5 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include azure/__init__.py +include azure/ai/__init__.py +include azure/ai/language/__init__.py +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/ai/language/conversations/py.typed \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/README.md b/sdk/cognitivelanguage/azure-ai-language-conversations/README.md new file mode 100644 index 000000000000..14106ed8a3fd --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/README.md @@ -0,0 +1,304 @@ +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/azure-sdk-for-python.client?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=46?branchName=main) + +# Azure Conversational Language Understanding client library for Python +Conversational Language Understanding, aka **CLU** for short, is a cloud-based conversational AI service which is mainly used in bots to extract useful information from user utterance (natural language processing). +The CLU **analyze api** encompasses two projects; deepstack, and workflow projects. +You can use the "deepstack" project if you want to extract intents (intention behind a user utterance] and custom entities. +You can also use the "workflow" project which orchestrates multiple language apps to get the best response (language apps like Question Answering, Luis, and Deepstack). + +[Source code][conversationallanguage_client_src] | [Package (PyPI)][conversationallanguage_pypi_package] | [API reference documentation][conversationallanguage_refdocs] | [Product documentation][conversationallanguage_docs] | [Samples][conversationallanguage_samples] + +## _Disclaimer_ + +_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_ + + +## Getting started + +### Prerequisites + +* Python 2.7, or 3.6 or later is required to use this package. +* An [Azure subscription][azure_subscription] +* An existing Text Analytics resource + +> Note: the new unified Cognitive Language Services are not currently available for deployment. + +### Install the package + +Install the Azure Conversations client library for Python with [pip][pip_link]: + +```bash +pip install azure-ai-language-conversations +``` + +### Authenticate the client +In order to interact with the CLU service, you'll need to create an instance of the [ConversationAnalysisClient][conversationanalysis_client_class] class. You will need an **endpoint**, and an **API key** to instantiate a client object. For more information regarding authenticating with Cognitive Services, see [Authenticate requests to Azure Cognitive Services][cognitive_auth]. + +#### Get an API key +You can get the **endpoint** and an **API key** from the Cognitive Services resource in the [Azure Portal][azure_portal]. + +Alternatively, use the [Azure CLI][azure_cli] command shown below to get the API key from the Cognitive Service resource. + +```powershell +az cognitiveservices account keys list --resource-group --name +``` + + +#### Create ConversationAnalysisClient +Once you've determined your **endpoint** and **API key** you can instantiate a `ConversationAnalysisClient`: + +```python +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient + +endpoint = "https://.cognitiveservices.azure.com/" +credential = AzureKeyCredential("") +client = ConversationAnalysisClient(endpoint, credential) +``` + + +## Key concepts + +### ConversationAnalysisClient +The [ConversationAnalysisClient][conversationanalysis_client_class] is the primary interface for making predictions using your deployed Conversations models. For asynchronous operations, an async `ConversationAnalysisClient` is in the `azure.ai.language.conversation.aio` namespace. + +## Examples +The `azure-ai-language-conversation` client library provides both synchronous and asynchronous APIs. + +The following examples show common scenarios using the `client` [created above](#create-conversationanalysisclient). + +### Analyze a conversation with a Deepstack App +If you would like to extract custom intents and entities from a user utterance, you can call the `client.analyze_conversations()` method with your deepstack's project name as follows: + +```python +# import libraries +import os +from azure.core.credentials import AzureKeyCredential + +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import AnalyzeConversationOptions + +# get secrets +conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] +conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"] + +# prepare data +query = "One california maki please." +input = AnalyzeConversationOptions( + query=query +) + +# analyze quey +client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) +with client: + result = client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + +# view result +print("query: {}".format(result.query)) +print("project kind: {}\n".format(result.prediction.project_kind)) + +print("view top intent:") +print("top intent: {}".format(result.prediction.top_intent)) +print("\tcategory: {}".format(result.prediction.intents[0].category)) +print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + +print("view entities:") +for entity in result.prediction.entities: + print("\tcategory: {}".format(entity.category)) + print("\ttext: {}".format(entity.text)) + print("\tconfidence score: {}".format(entity.confidence_score)) +``` + +### Analyze conversation with a Workflow App + +If you would like to pass the user utterance to your orchestrator (worflow) app, you can call the `client.analyze_conversations()` method with your workflow's project name. The orchestrator project simply orchestrates the submitted user utterance between your language apps (Luis, Deepstack, and Question Answering) to get the best response according to the user intent. See the next example: + +```python +# import libraries +import os +from azure.core.credentials import AzureKeyCredential + +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import AnalyzeConversationOptions + +# get secrets +conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] +workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT") + +# prepare data +query = "How do you make sushi rice?", +input = AnalyzeConversationOptions( + query=query +) + +# analyze query +client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) +with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + +# view result +print("query: {}".format(result.query)) +print("project kind: {}\n".format(result.prediction.project_kind)) + +print("view top intent:") +print("top intent: {}".format(result.prediction.top_intent)) +print("\tcategory: {}".format(result.prediction.intents[0].category)) +print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + +print("view Question Answering result:") +print("\tresult: {}\n".format(result.prediction.intents[0].result)) +``` + +### Analyze conversation with a Workflow (Direct) App + +If you would like to use an orchestrator (workflow) app, and you want to call a specific one of your language apps directly, you can call the `client.analyze_conversations()` method with your workflow's project name and the diirect target name which corresponds to your one of you language apps as follows: + +```python +# import libraries +import os +from azure.core.credentials import AzureKeyCredential + +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import AnalyzeConversationOptions + +# get secrets +conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] +workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT") + +# prepare data +query = "How do you make sushi rice?", +target_intent = "SushiMaking" +input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ) + } +) + +# analyze query +client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) +with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + +# view result +print("query: {}".format(result.query)) +print("project kind: {}\n".format(result.prediction.project_kind)) + +print("view top intent:") +print("top intent: {}".format(result.prediction.top_intent)) +print("\tcategory: {}".format(result.prediction.intents[0].category)) +print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + +print("view Question Answering result:") +print("\tresult: {}\n".format(result.prediction.intents[0].result)) +``` + + +## Optional Configuration + +Optional keyword arguments can be passed in at the client and per-operation level. The azure-core [reference documentation][azure_core_ref_docs] describes available configurations for retries, logging, transport protocols, and more. + +## Troubleshooting + +### General + +The Conversations client will raise exceptions defined in [Azure Core][azure_core_exceptions]. + +### Logging + +This library uses the standard +[logging][python_logging] library for logging. +Basic information about HTTP sessions (URLs, headers, etc.) is logged at INFO +level. + +Detailed DEBUG level logging, including request/response bodies and unredacted +headers, can be enabled on a client with the `logging_enable` argument. + +See full SDK logging documentation with examples [here][sdk_logging_docs]. + +```python +import sys +import logging +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient + +# Create a logger for the 'azure' SDK +logger = logging.getLogger('azure') +logger.setLevel(logging.DEBUG) + +# Configure a console output +handler = logging.StreamHandler(stream=sys.stdout) +logger.addHandler(handler) + +endpoint = "https://.cognitiveservices.azure.com/" +credential = AzureKeyCredential("") + +# This client will log detailed information about its HTTP sessions, at DEBUG level +client = ConversationAnalysisClient(endpoint, credential, logging_enable=True) +result = client.analyze_conversations(...) +``` + +Similarly, `logging_enable` can enable detailed logging for a single operation, even when it isn't enabled for the client: + +```python +result = client.analyze_conversations(..., logging_enable=True) +``` + +## Next steps + +## Contributing + +See the [CONTRIBUTING.md][contributing] for details on building, testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [cla.microsoft.com][cla]. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. + + +[azure_cli]: https://docs.microsoft.com/cli/azure/ +[azure_portal]: https://portal.azure.com/ +[azure_subscription]: https://azure.microsoft.com/free/ + +[cla]: https://cla.microsoft.com +[coc_contact]: mailto:opencode@microsoft.com +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[cognitive_auth]: https://docs.microsoft.com/azure/cognitive-services/authentication/ +[contributing]: https://github.com/Azure/azure-sdk-for-python/blob/main/CONTRIBUTING.md +[python_logging]: https://docs.python.org/3/library/logging.html +[sdk_logging_docs]: https://docs.microsoft.com/azure/developer/python/azure-sdk-logging +[azure_core_ref_docs]: https://azuresdkdocs.blob.core.windows.net/$web/python/azure-core/latest/azure.core.html +[azure_core_readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/core/azure-core/README.md +[pip_link]:https://pypi.org/project/pip/ +[conversationallanguage_client_src]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations +[conversationallanguage_pypi_package]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations +[conversationallanguage_refdocs]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations +[conversationallanguage_docs]: https://azure.microsoft.com/services/cognitive-services/language-understanding-intelligent-service/ +[conversationallanguage_samples]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md +[conversationanalysis_client_class]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py +[azure_core_exceptions]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/core/azure-core/README.md +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fsdk%2Ftemplate%2Fazure-template%2FREADME.png) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py new file mode 100644 index 000000000000..94bc4a23d401 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._conversation_analysis_client import ConversationAnalysisClient +from ._version import VERSION + +__version__ = VERSION +__all__ = ['ConversationAnalysisClient'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py new file mode 100644 index 000000000000..12a99c2f6eed --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.credentials import AzureKeyCredential + + +class ConversationAnalysisClientConfiguration(Configuration): + """Configuration for ConversationAnalysisClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoint (e.g., https://:code:``.api.cognitiveservices.azure.com). + :type endpoint: str + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.AzureKeyCredential + """ + + def __init__( + self, + endpoint, # type: str + credential, # type: AzureKeyCredential + **kwargs # type: Any + ): + # type: (...) -> None + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + super(ConversationAnalysisClientConfiguration, self).__init__(**kwargs) + + self.endpoint = endpoint + self.credential = credential + self.api_version = "2021-07-15-preview" + kwargs.setdefault('sdk_moniker', 'ai-language-conversations/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py new file mode 100644 index 000000000000..0626bb6e4fa1 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +from . import models +from ._configuration import ConversationAnalysisClientConfiguration +from .operations import ConversationAnalysisClientOperationsMixin + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Optional + + from azure.core.credentials import AzureKeyCredential + from azure.core.rest import HttpRequest, HttpResponse + +class ConversationAnalysisClient(ConversationAnalysisClientOperationsMixin): + """This API accepts a request and mediates among multiple language projects, such as LUIS Generally Available, Question Answering, LUIS Deepstack, and then calls the best candidate service to handle the request. At last, it returns a response with the candidate service's response as a payload. + + In some cases, this API needs to forward requests and responses between the caller and an upstream service. + + :param endpoint: Supported Cognitive Services endpoint (e.g., + https://:code:``.api.cognitiveservices.azure.com). + :type endpoint: str + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.AzureKeyCredential + """ + + def __init__( + self, + endpoint, # type: str + credential, # type: AzureKeyCredential + **kwargs # type: Any + ): + # type: (...) -> None + _endpoint = '{Endpoint}/language' + self._config = ConversationAnalysisClientConfiguration(endpoint, credential, **kwargs) + self._client = PipelineClient(base_url=_endpoint, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + + + def send_request( + self, + request, # type: HttpRequest + **kwargs # type: Any + ): + # type: (...) -> HttpResponse + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, **kwargs) + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> ConversationAnalysisClient + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py new file mode 100644 index 000000000000..e5754a47ce68 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py new file mode 100644 index 000000000000..458d572f9290 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._conversation_analysis_client import ConversationAnalysisClient +__all__ = ['ConversationAnalysisClient'] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py new file mode 100644 index 000000000000..7dc15b360c92 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from .._version import VERSION + + +class ConversationAnalysisClientConfiguration(Configuration): + """Configuration for ConversationAnalysisClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoint (e.g., https://:code:``.api.cognitiveservices.azure.com). + :type endpoint: str + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.AzureKeyCredential + """ + + def __init__( + self, + endpoint: str, + credential: AzureKeyCredential, + **kwargs: Any + ) -> None: + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + super(ConversationAnalysisClientConfiguration, self).__init__(**kwargs) + + self.endpoint = endpoint + self.credential = credential + self.api_version = "2021-07-15-preview" + kwargs.setdefault('sdk_moniker', 'ai-language-conversations/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py new file mode 100644 index 000000000000..aec88d6bbf2b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, Optional + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.rest import AsyncHttpResponse, HttpRequest +from msrest import Deserializer, Serializer + +from .. import models +from ._configuration import ConversationAnalysisClientConfiguration +from .operations import ConversationAnalysisClientOperationsMixin + +class ConversationAnalysisClient(ConversationAnalysisClientOperationsMixin): + """This API accepts a request and mediates among multiple language projects, such as LUIS Generally Available, Question Answering, LUIS Deepstack, and then calls the best candidate service to handle the request. At last, it returns a response with the candidate service's response as a payload. + + In some cases, this API needs to forward requests and responses between the caller and an upstream service. + + :param endpoint: Supported Cognitive Services endpoint (e.g., + https://:code:``.api.cognitiveservices.azure.com). + :type endpoint: str + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.AzureKeyCredential + """ + + def __init__( + self, + endpoint: str, + credential: AzureKeyCredential, + **kwargs: Any + ) -> None: + _endpoint = '{Endpoint}/language' + self._config = ConversationAnalysisClientConfiguration(endpoint, credential, **kwargs) + self._client = AsyncPipelineClient(base_url=_endpoint, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + + + def send_request( + self, + request: HttpRequest, + **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, **kwargs) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "ConversationAnalysisClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/__init__.py new file mode 100644 index 000000000000..f90ccbf89a57 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import ConversationAnalysisClientOperationsMixin + +__all__ = [ + 'ConversationAnalysisClientOperationsMixin', +] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py new file mode 100644 index 000000000000..d279fae87db2 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import functools +from typing import Any, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async + +from ... import models as _models +from ...operations._operations import build_analyze_conversations_request + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ConversationAnalysisClientOperationsMixin: + + @distributed_trace_async + async def analyze_conversations( + self, + analyze_conversation_options: "_models.AnalyzeConversationOptions", + *, + project_name: str, + deployment_name: str, + **kwargs: Any + ) -> "_models.AnalyzeConversationResult": + """Analyzes the input conversation utterance. + + :param analyze_conversation_options: Post body of the request. + :type analyze_conversation_options: + ~azure.ai.language.conversations.models.AnalyzeConversationOptions + :keyword project_name: The name of the project to use. + :paramtype project_name: str + :keyword deployment_name: The name of the specific deployment of the project to use. + :paramtype deployment_name: str + :return: AnalyzeConversationResult + :rtype: ~azure.ai.language.conversations.models.AnalyzeConversationResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalyzeConversationResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] + + json = self._serialize.body(analyze_conversation_options, 'AnalyzeConversationOptions') + + request = build_analyze_conversations_request( + content_type=content_type, + project_name=project_name, + deployment_name=deployment_name, + json=json, + template_url=self.analyze_conversations.metadata['url'], + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + request.url = self._client.format_url(request.url, **path_format_arguments) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('AnalyzeConversationResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + analyze_conversations.metadata = {'url': '/:analyze-conversations'} # type: ignore + diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py new file mode 100644 index 000000000000..69d031432af2 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py @@ -0,0 +1,95 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AnalyzeConversationOptions + from ._models_py3 import AnalyzeConversationResult + from ._models_py3 import AnalyzeParameters + from ._models_py3 import BasePrediction + from ._models_py3 import DSTargetIntentResult + from ._models_py3 import DeepStackEntityResolution + from ._models_py3 import DeepstackCallingOptions + from ._models_py3 import DeepstackEntity + from ._models_py3 import DeepstackIntent + from ._models_py3 import DeepstackParameters + from ._models_py3 import DeepstackPrediction + from ._models_py3 import DeepstackResult + from ._models_py3 import DictionaryNormalizedValueResolution + from ._models_py3 import Error + from ._models_py3 import ErrorResponse + from ._models_py3 import InnerErrorModel + from ._models_py3 import LUISCallingOptions + from ._models_py3 import LUISParameters + from ._models_py3 import LUISTargetIntentResult + from ._models_py3 import QuestionAnsweringParameters + from ._models_py3 import QuestionAnsweringTargetIntentResult + from ._models_py3 import TargetIntentResult + from ._models_py3 import WorkflowPrediction +except (SyntaxError, ImportError): + from ._models import AnalyzeConversationOptions # type: ignore + from ._models import AnalyzeConversationResult # type: ignore + from ._models import AnalyzeParameters # type: ignore + from ._models import BasePrediction # type: ignore + from ._models import DSTargetIntentResult # type: ignore + from ._models import DeepStackEntityResolution # type: ignore + from ._models import DeepstackCallingOptions # type: ignore + from ._models import DeepstackEntity # type: ignore + from ._models import DeepstackIntent # type: ignore + from ._models import DeepstackParameters # type: ignore + from ._models import DeepstackPrediction # type: ignore + from ._models import DeepstackResult # type: ignore + from ._models import DictionaryNormalizedValueResolution # type: ignore + from ._models import Error # type: ignore + from ._models import ErrorResponse # type: ignore + from ._models import InnerErrorModel # type: ignore + from ._models import LUISCallingOptions # type: ignore + from ._models import LUISParameters # type: ignore + from ._models import LUISTargetIntentResult # type: ignore + from ._models import QuestionAnsweringParameters # type: ignore + from ._models import QuestionAnsweringTargetIntentResult # type: ignore + from ._models import TargetIntentResult # type: ignore + from ._models import WorkflowPrediction # type: ignore + +from ._conversation_analysis_client_enums import ( + ErrorCode, + InnerErrorCode, + ProjectKind, + ResolutionKind, + TargetKind, +) + +__all__ = [ + 'AnalyzeConversationOptions', + 'AnalyzeConversationResult', + 'AnalyzeParameters', + 'BasePrediction', + 'DSTargetIntentResult', + 'DeepStackEntityResolution', + 'DeepstackCallingOptions', + 'DeepstackEntity', + 'DeepstackIntent', + 'DeepstackParameters', + 'DeepstackPrediction', + 'DeepstackResult', + 'DictionaryNormalizedValueResolution', + 'Error', + 'ErrorResponse', + 'InnerErrorModel', + 'LUISCallingOptions', + 'LUISParameters', + 'LUISTargetIntentResult', + 'QuestionAnsweringParameters', + 'QuestionAnsweringTargetIntentResult', + 'TargetIntentResult', + 'WorkflowPrediction', + 'ErrorCode', + 'InnerErrorCode', + 'ProjectKind', + 'ResolutionKind', + 'TargetKind', +] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py new file mode 100644 index 000000000000..cdc67ea5d6e5 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from six import with_metaclass +from azure.core import CaseInsensitiveEnumMeta + + +class ErrorCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """Human-readable error code. + """ + + INVALID_REQUEST = "InvalidRequest" + INVALID_ARGUMENT = "InvalidArgument" + UNAUTHORIZED = "Unauthorized" + FORBIDDEN = "Forbidden" + NOT_FOUND = "NotFound" + TOO_MANY_REQUESTS = "TooManyRequests" + INTERNAL_SERVER_ERROR = "InternalServerError" + SERVICE_UNAVAILABLE = "ServiceUnavailable" + +class InnerErrorCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """Human-readable error code. + """ + + INVALID_REQUEST = "InvalidRequest" + INVALID_PARAMETER_VALUE = "InvalidParameterValue" + KNOWLEDGE_BASE_NOT_FOUND = "KnowledgeBaseNotFound" + AZURE_COGNITIVE_SEARCH_NOT_FOUND = "AzureCognitiveSearchNotFound" + AZURE_COGNITIVE_SEARCH_THROTTLING = "AzureCognitiveSearchThrottling" + EXTRACTION_FAILURE = "ExtractionFailure" + +class ProjectKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """The type of the project. + """ + + CONVERSATION = "conversation" + WORKFLOW = "workflow" + +class ResolutionKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """The type of an entity resolution. + """ + + #: Dictionary normalized entities. + DICTIONARY_NORMALIZED_VALUE = "DictionaryNormalizedValue" + +class TargetKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """The type of a target service. + """ + + LUIS = "luis" + LUIS_DEEPSTACK = "luis_deepstack" + QUESTION_ANSWERING = "question_answering" diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py new file mode 100644 index 000000000000..fd2c107aae65 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py @@ -0,0 +1,1070 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AnalyzeConversationOptions(msrest.serialization.Model): + """The request body. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The conversation utterance to be analyzed. + :vartype query: str + :ivar direct_target: The name of the target project this request is sending to directly. + :vartype direct_target: str + :ivar language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :vartype language: str + :ivar verbose: If true, the service will return more detailed information in the response. + :vartype verbose: bool + :ivar is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :vartype is_logging_enabled: bool + :ivar parameters: A dictionary representing the input for each target project. + :vartype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + """ + + _validation = { + 'query': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'direct_target': {'key': 'directTarget', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, + 'parameters': {'key': 'parameters', 'type': '{AnalyzeParameters}'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword query: Required. The conversation utterance to be analyzed. + :paramtype query: str + :keyword direct_target: The name of the target project this request is sending to directly. + :paramtype direct_target: str + :keyword language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :paramtype language: str + :keyword verbose: If true, the service will return more detailed information in the response. + :paramtype verbose: bool + :keyword is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :paramtype is_logging_enabled: bool + :keyword parameters: A dictionary representing the input for each target project. + :paramtype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + """ + super(AnalyzeConversationOptions, self).__init__(**kwargs) + self.query = kwargs['query'] + self.direct_target = kwargs.get('direct_target', None) + self.language = kwargs.get('language', None) + self.verbose = kwargs.get('verbose', None) + self.is_logging_enabled = kwargs.get('is_logging_enabled', None) + self.parameters = kwargs.get('parameters', None) + + +class AnalyzeConversationResult(msrest.serialization.Model): + """Represents a conversation analysis response. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The conversation utterance given by the caller. + :vartype query: str + :ivar detected_language: The system detected language for the query. + :vartype detected_language: str + :ivar prediction: Required. The prediction result of a conversation project. + :vartype prediction: ~azure.ai.language.conversations.models.BasePrediction + """ + + _validation = { + 'query': {'required': True}, + 'prediction': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'detected_language': {'key': 'detectedLanguage', 'type': 'str'}, + 'prediction': {'key': 'prediction', 'type': 'BasePrediction'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword query: Required. The conversation utterance given by the caller. + :paramtype query: str + :keyword detected_language: The system detected language for the query. + :paramtype detected_language: str + :keyword prediction: Required. The prediction result of a conversation project. + :paramtype prediction: ~azure.ai.language.conversations.models.BasePrediction + """ + super(AnalyzeConversationResult, self).__init__(**kwargs) + self.query = kwargs['query'] + self.detected_language = kwargs.get('detected_language', None) + self.prediction = kwargs['prediction'] + + +class AnalyzeParameters(msrest.serialization.Model): + """This is the parameter set of either the conversation application itself or one of the target services. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: LUISParameters, DeepstackParameters, QuestionAnsweringParameters. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + """ + + _validation = { + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'target_kind': {'luis': 'LUISParameters', 'luis_deepstack': 'DeepstackParameters', 'question_answering': 'QuestionAnsweringParameters'} + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + """ + super(AnalyzeParameters, self).__init__(**kwargs) + self.target_kind = None # type: Optional[str] + self.api_version = kwargs.get('api_version', None) + + +class BasePrediction(msrest.serialization.Model): + """This is the base class of prediction. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: DeepstackPrediction, WorkflowPrediction. + + All required parameters must be populated in order to send to Azure. + + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + """ + + _validation = { + 'project_kind': {'required': True}, + } + + _attribute_map = { + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'project_kind': {'key': 'projectType', 'type': 'str'}, + } + + _subtype_map = { + 'project_kind': {'conversation': 'DeepstackPrediction', 'workflow': 'WorkflowPrediction'} + } + + def __init__( + self, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + """ + super(BasePrediction, self).__init__(**kwargs) + self.top_intent = kwargs.get('top_intent', None) + self.project_kind = None # type: Optional[str] + + +class DeepstackCallingOptions(msrest.serialization.Model): + """The option to set to call a LUIS Deepstack project. + + :ivar language: The language of the query. + :vartype language: str + :ivar verbose: If true, the service will return more detailed information. + :vartype verbose: bool + :ivar is_logging_enabled: If true, the query will be saved for customers to further review in + authoring, to improve the model quality. + :vartype is_logging_enabled: bool + """ + + _attribute_map = { + 'language': {'key': 'language', 'type': 'str'}, + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword language: The language of the query. + :paramtype language: str + :keyword verbose: If true, the service will return more detailed information. + :paramtype verbose: bool + :keyword is_logging_enabled: If true, the query will be saved for customers to further review + in authoring, to improve the model quality. + :paramtype is_logging_enabled: bool + """ + super(DeepstackCallingOptions, self).__init__(**kwargs) + self.language = kwargs.get('language', None) + self.verbose = kwargs.get('verbose', None) + self.is_logging_enabled = kwargs.get('is_logging_enabled', None) + + +class DeepstackEntity(msrest.serialization.Model): + """The entity extraction result of a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar category: Required. The entity category. + :vartype category: str + :ivar text: Required. The predicted entity text. + :vartype text: str + :ivar offset: Required. The starting index of this entity in the query. + :vartype offset: int + :ivar length: Required. The length of the text. + :vartype length: int + :ivar confidence_score: Required. The entity confidence score. + :vartype confidence_score: float + :ivar resolution: A array with extra information about the entity. + :vartype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + """ + + _validation = { + 'category': {'required': True}, + 'text': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'resolution': {'key': 'resolution', 'type': '[DeepStackEntityResolution]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword category: Required. The entity category. + :paramtype category: str + :keyword text: Required. The predicted entity text. + :paramtype text: str + :keyword offset: Required. The starting index of this entity in the query. + :paramtype offset: int + :keyword length: Required. The length of the text. + :paramtype length: int + :keyword confidence_score: Required. The entity confidence score. + :paramtype confidence_score: float + :keyword resolution: A array with extra information about the entity. + :paramtype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + """ + super(DeepstackEntity, self).__init__(**kwargs) + self.category = kwargs['category'] + self.text = kwargs['text'] + self.offset = kwargs['offset'] + self.length = kwargs['length'] + self.confidence_score = kwargs['confidence_score'] + self.resolution = kwargs.get('resolution', None) + + +class DeepStackEntityResolution(msrest.serialization.Model): + """This is the base class of all kinds of entity resolutions. + + All required parameters must be populated in order to send to Azure. + + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, any] + :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + """ + + _validation = { + 'resolution_kind': {'required': True}, + } + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{object}'}, + 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + """ + super(DeepStackEntityResolution, self).__init__(**kwargs) + self.additional_properties = kwargs.get('additional_properties', None) + self.resolution_kind = kwargs['resolution_kind'] + + +class DeepstackIntent(msrest.serialization.Model): + """The intent classification result of a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar category: Required. A predicted class. + :vartype category: str + :ivar confidence_score: Required. The confidence score of the class from 0.0 to 1.0. + :vartype confidence_score: float + """ + + _validation = { + 'category': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword category: Required. A predicted class. + :paramtype category: str + :keyword confidence_score: Required. The confidence score of the class from 0.0 to 1.0. + :paramtype confidence_score: float + """ + super(DeepstackIntent, self).__init__(**kwargs) + self.category = kwargs['category'] + self.confidence_score = kwargs['confidence_score'] + + +class DeepstackParameters(AnalyzeParameters): + """This is a set of request parameters for LUIS Deepstack projects. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar calling_options: The option to set to call a LUIS Deepstack project. + :vartype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + """ + + _validation = { + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'calling_options': {'key': 'callingOptions', 'type': 'DeepstackCallingOptions'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + :keyword calling_options: The option to set to call a LUIS Deepstack project. + :paramtype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + """ + super(DeepstackParameters, self).__init__(**kwargs) + self.target_kind = 'luis_deepstack' # type: str + self.calling_options = kwargs.get('calling_options', None) + + +class DeepstackPrediction(BasePrediction): + """Represents the prediction section of a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar intents: Required. The intent classification results. + :vartype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :ivar entities: Required. The entity extraction results. + :vartype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + """ + + _validation = { + 'project_kind': {'required': True}, + 'intents': {'required': True}, + 'entities': {'required': True}, + } + + _attribute_map = { + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'project_kind': {'key': 'projectType', 'type': 'str'}, + 'intents': {'key': 'intents', 'type': '[DeepstackIntent]'}, + 'entities': {'key': 'entities', 'type': '[DeepstackEntity]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + :keyword intents: Required. The intent classification results. + :paramtype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :keyword entities: Required. The entity extraction results. + :paramtype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + """ + super(DeepstackPrediction, self).__init__(**kwargs) + self.project_kind = 'conversation' # type: str + self.intents = kwargs['intents'] + self.entities = kwargs['entities'] + + +class DeepstackResult(msrest.serialization.Model): + """The response returned by a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The same query given in request. + :vartype query: str + :ivar detected_language: The detected language from the query. + :vartype detected_language: str + :ivar prediction: Required. The predicted result for the query. + :vartype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + """ + + _validation = { + 'query': {'required': True}, + 'prediction': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'detected_language': {'key': 'detectedLanguage', 'type': 'str'}, + 'prediction': {'key': 'prediction', 'type': 'DeepstackPrediction'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword query: Required. The same query given in request. + :paramtype query: str + :keyword detected_language: The detected language from the query. + :paramtype detected_language: str + :keyword prediction: Required. The predicted result for the query. + :paramtype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + """ + super(DeepstackResult, self).__init__(**kwargs) + self.query = kwargs['query'] + self.detected_language = kwargs.get('detected_language', None) + self.prediction = kwargs['prediction'] + + +class DictionaryNormalizedValueResolution(DeepStackEntityResolution): + """The DictionaryNormalizedValue resolution indicates entity values are extracted from a predefined dictionary. For example, Coca could be a normalized name for Coca-Cola. + + All required parameters must be populated in order to send to Azure. + + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, any] + :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + :ivar values: A list of normalized entities. + :vartype values: list[str] + """ + + _validation = { + 'resolution_kind': {'required': True}, + } + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{object}'}, + 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + :keyword values: A list of normalized entities. + :paramtype values: list[str] + """ + super(DictionaryNormalizedValueResolution, self).__init__(**kwargs) + self.values = kwargs.get('values', None) + + +class TargetIntentResult(msrest.serialization.Model): + """This is the base class of an intent prediction. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: LUISTargetIntentResult, DSTargetIntentResult, QuestionAnsweringTargetIntentResult. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + } + + _subtype_map = { + 'target_kind': {'luis': 'LUISTargetIntentResult', 'luis_deepstack': 'DSTargetIntentResult', 'question_answering': 'QuestionAnsweringTargetIntentResult'} + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + """ + super(TargetIntentResult, self).__init__(**kwargs) + self.api_version = kwargs.get('api_version', None) + self.confidence_score = kwargs.get('confidence_score', None) + self.target_kind = None # type: Optional[str] + + +class DSTargetIntentResult(TargetIntentResult): + """A wrap up of LUIS Deepstack response. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar result: The actual response from a LUIS Deepstack application. + :vartype result: ~azure.ai.language.conversations.models.DeepstackResult + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + 'result': {'key': 'result', 'type': 'DeepstackResult'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The actual response from a LUIS Deepstack application. + :paramtype result: ~azure.ai.language.conversations.models.DeepstackResult + """ + super(DSTargetIntentResult, self).__init__(**kwargs) + self.target_kind = 'luis_deepstack' # type: str + self.result = kwargs.get('result', None) + + +class Error(msrest.serialization.Model): + """The error object. + + All required parameters must be populated in order to send to Azure. + + :ivar code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", + "TooManyRequests", "InternalServerError", "ServiceUnavailable". + :vartype code: str or ~azure.ai.language.conversations.models.ErrorCode + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar target: The target of the error. + :vartype target: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~azure.ai.language.conversations.models.Error] + :ivar innererror: An object containing more specific information than the current object about + the error. + :vartype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[Error]'}, + 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", + "TooManyRequests", "InternalServerError", "ServiceUnavailable". + :paramtype code: str or ~azure.ai.language.conversations.models.ErrorCode + :keyword message: Required. A human-readable representation of the error. + :paramtype message: str + :keyword target: The target of the error. + :paramtype target: str + :keyword details: An array of details about specific errors that led to this reported error. + :paramtype details: list[~azure.ai.language.conversations.models.Error] + :keyword innererror: An object containing more specific information than the current object + about the error. + :paramtype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + super(Error, self).__init__(**kwargs) + self.code = kwargs['code'] + self.message = kwargs['message'] + self.target = kwargs.get('target', None) + self.details = kwargs.get('details', None) + self.innererror = kwargs.get('innererror', None) + + +class ErrorResponse(msrest.serialization.Model): + """Error response. + + :ivar error: The error object. + :vartype error: ~azure.ai.language.conversations.models.Error + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'Error'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword error: The error object. + :paramtype error: ~azure.ai.language.conversations.models.Error + """ + super(ErrorResponse, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class InnerErrorModel(msrest.serialization.Model): + """An object containing more specific information about the error. As per Microsoft One API guidelines - https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses. + + All required parameters must be populated in order to send to Azure. + + :ivar code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", + "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure". + :vartype code: str or ~azure.ai.language.conversations.models.InnerErrorCode + :ivar message: Required. Error message. + :vartype message: str + :ivar details: Error details. + :vartype details: dict[str, str] + :ivar target: Error target. + :vartype target: str + :ivar innererror: An object containing more specific information than the current object about + the error. + :vartype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '{str}'}, + 'target': {'key': 'target', 'type': 'str'}, + 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", + "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure". + :paramtype code: str or ~azure.ai.language.conversations.models.InnerErrorCode + :keyword message: Required. Error message. + :paramtype message: str + :keyword details: Error details. + :paramtype details: dict[str, str] + :keyword target: Error target. + :paramtype target: str + :keyword innererror: An object containing more specific information than the current object + about the error. + :paramtype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + super(InnerErrorModel, self).__init__(**kwargs) + self.code = kwargs['code'] + self.message = kwargs['message'] + self.details = kwargs.get('details', None) + self.target = kwargs.get('target', None) + self.innererror = kwargs.get('innererror', None) + + +class LUISCallingOptions(msrest.serialization.Model): + """This customizes how the service calls LUIS Generally Available projects. + + :ivar verbose: Enable verbose response. + :vartype verbose: bool + :ivar log: Save log to add in training utterances later. + :vartype log: bool + :ivar show_all_intents: Set true to show all intents. + :vartype show_all_intents: bool + :ivar timezone_offset: The timezone offset for the location of the request. + :vartype timezone_offset: float + :ivar spell_check: Enable spell checking. + :vartype spell_check: bool + :ivar bing_spell_check_subscription_key: The subscription key to use when enabling Bing spell + check. + :vartype bing_spell_check_subscription_key: str + """ + + _attribute_map = { + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'log': {'key': 'log', 'type': 'bool'}, + 'show_all_intents': {'key': 'show-all-intents', 'type': 'bool'}, + 'timezone_offset': {'key': 'timezoneOffset', 'type': 'float'}, + 'spell_check': {'key': 'spellCheck', 'type': 'bool'}, + 'bing_spell_check_subscription_key': {'key': 'bing-spell-check-subscription-key', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword verbose: Enable verbose response. + :paramtype verbose: bool + :keyword log: Save log to add in training utterances later. + :paramtype log: bool + :keyword show_all_intents: Set true to show all intents. + :paramtype show_all_intents: bool + :keyword timezone_offset: The timezone offset for the location of the request. + :paramtype timezone_offset: float + :keyword spell_check: Enable spell checking. + :paramtype spell_check: bool + :keyword bing_spell_check_subscription_key: The subscription key to use when enabling Bing + spell check. + :paramtype bing_spell_check_subscription_key: str + """ + super(LUISCallingOptions, self).__init__(**kwargs) + self.verbose = kwargs.get('verbose', None) + self.log = kwargs.get('log', None) + self.show_all_intents = kwargs.get('show_all_intents', None) + self.timezone_offset = kwargs.get('timezone_offset', None) + self.spell_check = kwargs.get('spell_check', None) + self.bing_spell_check_subscription_key = kwargs.get('bing_spell_check_subscription_key', None) + + +class LUISParameters(AnalyzeParameters): + """This is a set of request parameters for LUIS Generally Available projects. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, any] + :ivar query: The utterance to predict. + :vartype query: str + :ivar calling_options: This customizes how the service calls LUIS Generally Available projects. + :vartype calling_options: ~azure.ai.language.conversations.models.LUISCallingOptions + """ + + _validation = { + 'target_kind': {'required': True}, + 'query': {'max_length': 500, 'min_length': 0}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'additional_properties': {'key': '', 'type': '{object}'}, + 'query': {'key': 'query', 'type': 'str'}, + 'calling_options': {'key': 'callingOptions', 'type': 'LUISCallingOptions'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword query: The utterance to predict. + :paramtype query: str + :keyword calling_options: This customizes how the service calls LUIS Generally Available + projects. + :paramtype calling_options: ~azure.ai.language.conversations.models.LUISCallingOptions + """ + super(LUISParameters, self).__init__(**kwargs) + self.target_kind = 'luis' # type: str + self.additional_properties = kwargs.get('additional_properties', None) + self.query = kwargs.get('query', None) + self.calling_options = kwargs.get('calling_options', None) + + +class LUISTargetIntentResult(TargetIntentResult): + """It is a wrap up of LUIS Generally Available response. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar result: The actual response from a LUIS Generally Available application. + :vartype result: any + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + 'result': {'key': 'result', 'type': 'object'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The actual response from a LUIS Generally Available application. + :paramtype result: any + """ + super(LUISTargetIntentResult, self).__init__(**kwargs) + self.target_kind = 'luis' # type: str + self.result = kwargs.get('result', None) + + +class QuestionAnsweringParameters(AnalyzeParameters): + """This is a set of request parameters for Question Answering knowledge bases. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar calling_options: The options sent to a Question Answering KB. + :vartype calling_options: any + """ + + _validation = { + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'calling_options': {'key': 'callingOptions', 'type': 'object'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + :keyword calling_options: The options sent to a Question Answering KB. + :paramtype calling_options: any + """ + super(QuestionAnsweringParameters, self).__init__(**kwargs) + self.target_kind = 'question_answering' # type: str + self.calling_options = kwargs.get('calling_options', None) + + +class QuestionAnsweringTargetIntentResult(TargetIntentResult): + """It is a wrap up a Question Answering KB response. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar result: The generated answer by a Question Answering KB. + :vartype result: any + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + 'result': {'key': 'result', 'type': 'object'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The generated answer by a Question Answering KB. + :paramtype result: any + """ + super(QuestionAnsweringTargetIntentResult, self).__init__(**kwargs) + self.target_kind = 'question_answering' # type: str + self.result = kwargs.get('result', None) + + +class WorkflowPrediction(BasePrediction): + """This represents the prediction result of an Workflow project. + + All required parameters must be populated in order to send to Azure. + + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar intents: Required. A dictionary that contains all intents. A key is an intent name and a + value is its confidence score and target type. The top intent's value also contains the actual + response from the target project. + :vartype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + + _validation = { + 'project_kind': {'required': True}, + 'intents': {'required': True}, + } + + _attribute_map = { + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'project_kind': {'key': 'projectType', 'type': 'str'}, + 'intents': {'key': 'intents', 'type': '{TargetIntentResult}'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + :keyword intents: Required. A dictionary that contains all intents. A key is an intent name and + a value is its confidence score and target type. The top intent's value also contains the + actual response from the target project. + :paramtype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + super(WorkflowPrediction, self).__init__(**kwargs) + self.project_kind = 'workflow' # type: str + self.intents = kwargs['intents'] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py new file mode 100644 index 000000000000..7faf499e3998 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py @@ -0,0 +1,1168 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Dict, List, Optional, Union + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + +from ._conversation_analysis_client_enums import * + + +class AnalyzeConversationOptions(msrest.serialization.Model): + """The request body. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The conversation utterance to be analyzed. + :vartype query: str + :ivar direct_target: The name of the target project this request is sending to directly. + :vartype direct_target: str + :ivar language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :vartype language: str + :ivar verbose: If true, the service will return more detailed information in the response. + :vartype verbose: bool + :ivar is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :vartype is_logging_enabled: bool + :ivar parameters: A dictionary representing the input for each target project. + :vartype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + """ + + _validation = { + 'query': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'direct_target': {'key': 'directTarget', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, + 'parameters': {'key': 'parameters', 'type': '{AnalyzeParameters}'}, + } + + def __init__( + self, + *, + query: str, + direct_target: Optional[str] = None, + language: Optional[str] = None, + verbose: Optional[bool] = None, + is_logging_enabled: Optional[bool] = None, + parameters: Optional[Dict[str, "AnalyzeParameters"]] = None, + **kwargs + ): + """ + :keyword query: Required. The conversation utterance to be analyzed. + :paramtype query: str + :keyword direct_target: The name of the target project this request is sending to directly. + :paramtype direct_target: str + :keyword language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :paramtype language: str + :keyword verbose: If true, the service will return more detailed information in the response. + :paramtype verbose: bool + :keyword is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :paramtype is_logging_enabled: bool + :keyword parameters: A dictionary representing the input for each target project. + :paramtype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + """ + super(AnalyzeConversationOptions, self).__init__(**kwargs) + self.query = query + self.direct_target = direct_target + self.language = language + self.verbose = verbose + self.is_logging_enabled = is_logging_enabled + self.parameters = parameters + + +class AnalyzeConversationResult(msrest.serialization.Model): + """Represents a conversation analysis response. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The conversation utterance given by the caller. + :vartype query: str + :ivar detected_language: The system detected language for the query. + :vartype detected_language: str + :ivar prediction: Required. The prediction result of a conversation project. + :vartype prediction: ~azure.ai.language.conversations.models.BasePrediction + """ + + _validation = { + 'query': {'required': True}, + 'prediction': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'detected_language': {'key': 'detectedLanguage', 'type': 'str'}, + 'prediction': {'key': 'prediction', 'type': 'BasePrediction'}, + } + + def __init__( + self, + *, + query: str, + prediction: "BasePrediction", + detected_language: Optional[str] = None, + **kwargs + ): + """ + :keyword query: Required. The conversation utterance given by the caller. + :paramtype query: str + :keyword detected_language: The system detected language for the query. + :paramtype detected_language: str + :keyword prediction: Required. The prediction result of a conversation project. + :paramtype prediction: ~azure.ai.language.conversations.models.BasePrediction + """ + super(AnalyzeConversationResult, self).__init__(**kwargs) + self.query = query + self.detected_language = detected_language + self.prediction = prediction + + +class AnalyzeParameters(msrest.serialization.Model): + """This is the parameter set of either the conversation application itself or one of the target services. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: LUISParameters, DeepstackParameters, QuestionAnsweringParameters. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + """ + + _validation = { + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'target_kind': {'luis': 'LUISParameters', 'luis_deepstack': 'DeepstackParameters', 'question_answering': 'QuestionAnsweringParameters'} + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + """ + super(AnalyzeParameters, self).__init__(**kwargs) + self.target_kind = None # type: Optional[str] + self.api_version = api_version + + +class BasePrediction(msrest.serialization.Model): + """This is the base class of prediction. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: DeepstackPrediction, WorkflowPrediction. + + All required parameters must be populated in order to send to Azure. + + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + """ + + _validation = { + 'project_kind': {'required': True}, + } + + _attribute_map = { + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'project_kind': {'key': 'projectType', 'type': 'str'}, + } + + _subtype_map = { + 'project_kind': {'conversation': 'DeepstackPrediction', 'workflow': 'WorkflowPrediction'} + } + + def __init__( + self, + *, + top_intent: Optional[str] = None, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + """ + super(BasePrediction, self).__init__(**kwargs) + self.top_intent = top_intent + self.project_kind = None # type: Optional[str] + + +class DeepstackCallingOptions(msrest.serialization.Model): + """The option to set to call a LUIS Deepstack project. + + :ivar language: The language of the query. + :vartype language: str + :ivar verbose: If true, the service will return more detailed information. + :vartype verbose: bool + :ivar is_logging_enabled: If true, the query will be saved for customers to further review in + authoring, to improve the model quality. + :vartype is_logging_enabled: bool + """ + + _attribute_map = { + 'language': {'key': 'language', 'type': 'str'}, + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, + } + + def __init__( + self, + *, + language: Optional[str] = None, + verbose: Optional[bool] = None, + is_logging_enabled: Optional[bool] = None, + **kwargs + ): + """ + :keyword language: The language of the query. + :paramtype language: str + :keyword verbose: If true, the service will return more detailed information. + :paramtype verbose: bool + :keyword is_logging_enabled: If true, the query will be saved for customers to further review + in authoring, to improve the model quality. + :paramtype is_logging_enabled: bool + """ + super(DeepstackCallingOptions, self).__init__(**kwargs) + self.language = language + self.verbose = verbose + self.is_logging_enabled = is_logging_enabled + + +class DeepstackEntity(msrest.serialization.Model): + """The entity extraction result of a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar category: Required. The entity category. + :vartype category: str + :ivar text: Required. The predicted entity text. + :vartype text: str + :ivar offset: Required. The starting index of this entity in the query. + :vartype offset: int + :ivar length: Required. The length of the text. + :vartype length: int + :ivar confidence_score: Required. The entity confidence score. + :vartype confidence_score: float + :ivar resolution: A array with extra information about the entity. + :vartype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + """ + + _validation = { + 'category': {'required': True}, + 'text': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'resolution': {'key': 'resolution', 'type': '[DeepStackEntityResolution]'}, + } + + def __init__( + self, + *, + category: str, + text: str, + offset: int, + length: int, + confidence_score: float, + resolution: Optional[List["DeepStackEntityResolution"]] = None, + **kwargs + ): + """ + :keyword category: Required. The entity category. + :paramtype category: str + :keyword text: Required. The predicted entity text. + :paramtype text: str + :keyword offset: Required. The starting index of this entity in the query. + :paramtype offset: int + :keyword length: Required. The length of the text. + :paramtype length: int + :keyword confidence_score: Required. The entity confidence score. + :paramtype confidence_score: float + :keyword resolution: A array with extra information about the entity. + :paramtype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + """ + super(DeepstackEntity, self).__init__(**kwargs) + self.category = category + self.text = text + self.offset = offset + self.length = length + self.confidence_score = confidence_score + self.resolution = resolution + + +class DeepStackEntityResolution(msrest.serialization.Model): + """This is the base class of all kinds of entity resolutions. + + All required parameters must be populated in order to send to Azure. + + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, any] + :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + """ + + _validation = { + 'resolution_kind': {'required': True}, + } + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{object}'}, + 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, + } + + def __init__( + self, + *, + resolution_kind: Union[str, "ResolutionKind"], + additional_properties: Optional[Dict[str, Any]] = None, + **kwargs + ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + """ + super(DeepStackEntityResolution, self).__init__(**kwargs) + self.additional_properties = additional_properties + self.resolution_kind = resolution_kind + + +class DeepstackIntent(msrest.serialization.Model): + """The intent classification result of a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar category: Required. A predicted class. + :vartype category: str + :ivar confidence_score: Required. The confidence score of the class from 0.0 to 1.0. + :vartype confidence_score: float + """ + + _validation = { + 'category': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + } + + def __init__( + self, + *, + category: str, + confidence_score: float, + **kwargs + ): + """ + :keyword category: Required. A predicted class. + :paramtype category: str + :keyword confidence_score: Required. The confidence score of the class from 0.0 to 1.0. + :paramtype confidence_score: float + """ + super(DeepstackIntent, self).__init__(**kwargs) + self.category = category + self.confidence_score = confidence_score + + +class DeepstackParameters(AnalyzeParameters): + """This is a set of request parameters for LUIS Deepstack projects. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar calling_options: The option to set to call a LUIS Deepstack project. + :vartype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + """ + + _validation = { + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'calling_options': {'key': 'callingOptions', 'type': 'DeepstackCallingOptions'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + calling_options: Optional["DeepstackCallingOptions"] = None, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + :keyword calling_options: The option to set to call a LUIS Deepstack project. + :paramtype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + """ + super(DeepstackParameters, self).__init__(api_version=api_version, **kwargs) + self.target_kind = 'luis_deepstack' # type: str + self.calling_options = calling_options + + +class DeepstackPrediction(BasePrediction): + """Represents the prediction section of a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar intents: Required. The intent classification results. + :vartype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :ivar entities: Required. The entity extraction results. + :vartype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + """ + + _validation = { + 'project_kind': {'required': True}, + 'intents': {'required': True}, + 'entities': {'required': True}, + } + + _attribute_map = { + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'project_kind': {'key': 'projectType', 'type': 'str'}, + 'intents': {'key': 'intents', 'type': '[DeepstackIntent]'}, + 'entities': {'key': 'entities', 'type': '[DeepstackEntity]'}, + } + + def __init__( + self, + *, + intents: List["DeepstackIntent"], + entities: List["DeepstackEntity"], + top_intent: Optional[str] = None, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + :keyword intents: Required. The intent classification results. + :paramtype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :keyword entities: Required. The entity extraction results. + :paramtype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + """ + super(DeepstackPrediction, self).__init__(top_intent=top_intent, **kwargs) + self.project_kind = 'conversation' # type: str + self.intents = intents + self.entities = entities + + +class DeepstackResult(msrest.serialization.Model): + """The response returned by a LUIS Deepstack project. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The same query given in request. + :vartype query: str + :ivar detected_language: The detected language from the query. + :vartype detected_language: str + :ivar prediction: Required. The predicted result for the query. + :vartype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + """ + + _validation = { + 'query': {'required': True}, + 'prediction': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'detected_language': {'key': 'detectedLanguage', 'type': 'str'}, + 'prediction': {'key': 'prediction', 'type': 'DeepstackPrediction'}, + } + + def __init__( + self, + *, + query: str, + prediction: "DeepstackPrediction", + detected_language: Optional[str] = None, + **kwargs + ): + """ + :keyword query: Required. The same query given in request. + :paramtype query: str + :keyword detected_language: The detected language from the query. + :paramtype detected_language: str + :keyword prediction: Required. The predicted result for the query. + :paramtype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + """ + super(DeepstackResult, self).__init__(**kwargs) + self.query = query + self.detected_language = detected_language + self.prediction = prediction + + +class DictionaryNormalizedValueResolution(DeepStackEntityResolution): + """The DictionaryNormalizedValue resolution indicates entity values are extracted from a predefined dictionary. For example, Coca could be a normalized name for Coca-Cola. + + All required parameters must be populated in order to send to Azure. + + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, any] + :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + :ivar values: A list of normalized entities. + :vartype values: list[str] + """ + + _validation = { + 'resolution_kind': {'required': True}, + } + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{object}'}, + 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[str]'}, + } + + def __init__( + self, + *, + resolution_kind: Union[str, "ResolutionKind"], + additional_properties: Optional[Dict[str, Any]] = None, + values: Optional[List[str]] = None, + **kwargs + ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: + "DictionaryNormalizedValue". + :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind + :keyword values: A list of normalized entities. + :paramtype values: list[str] + """ + super(DictionaryNormalizedValueResolution, self).__init__(additional_properties=additional_properties, resolution_kind=resolution_kind, **kwargs) + self.values = values + + +class TargetIntentResult(msrest.serialization.Model): + """This is the base class of an intent prediction. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: LUISTargetIntentResult, DSTargetIntentResult, QuestionAnsweringTargetIntentResult. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + } + + _subtype_map = { + 'target_kind': {'luis': 'LUISTargetIntentResult', 'luis_deepstack': 'DSTargetIntentResult', 'question_answering': 'QuestionAnsweringTargetIntentResult'} + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + confidence_score: Optional[float] = None, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + """ + super(TargetIntentResult, self).__init__(**kwargs) + self.api_version = api_version + self.confidence_score = confidence_score + self.target_kind = None # type: Optional[str] + + +class DSTargetIntentResult(TargetIntentResult): + """A wrap up of LUIS Deepstack response. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar result: The actual response from a LUIS Deepstack application. + :vartype result: ~azure.ai.language.conversations.models.DeepstackResult + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + 'result': {'key': 'result', 'type': 'DeepstackResult'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + confidence_score: Optional[float] = None, + result: Optional["DeepstackResult"] = None, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The actual response from a LUIS Deepstack application. + :paramtype result: ~azure.ai.language.conversations.models.DeepstackResult + """ + super(DSTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) + self.target_kind = 'luis_deepstack' # type: str + self.result = result + + +class Error(msrest.serialization.Model): + """The error object. + + All required parameters must be populated in order to send to Azure. + + :ivar code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", + "TooManyRequests", "InternalServerError", "ServiceUnavailable". + :vartype code: str or ~azure.ai.language.conversations.models.ErrorCode + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar target: The target of the error. + :vartype target: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~azure.ai.language.conversations.models.Error] + :ivar innererror: An object containing more specific information than the current object about + the error. + :vartype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[Error]'}, + 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}, + } + + def __init__( + self, + *, + code: Union[str, "ErrorCode"], + message: str, + target: Optional[str] = None, + details: Optional[List["Error"]] = None, + innererror: Optional["InnerErrorModel"] = None, + **kwargs + ): + """ + :keyword code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", + "TooManyRequests", "InternalServerError", "ServiceUnavailable". + :paramtype code: str or ~azure.ai.language.conversations.models.ErrorCode + :keyword message: Required. A human-readable representation of the error. + :paramtype message: str + :keyword target: The target of the error. + :paramtype target: str + :keyword details: An array of details about specific errors that led to this reported error. + :paramtype details: list[~azure.ai.language.conversations.models.Error] + :keyword innererror: An object containing more specific information than the current object + about the error. + :paramtype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + super(Error, self).__init__(**kwargs) + self.code = code + self.message = message + self.target = target + self.details = details + self.innererror = innererror + + +class ErrorResponse(msrest.serialization.Model): + """Error response. + + :ivar error: The error object. + :vartype error: ~azure.ai.language.conversations.models.Error + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'Error'}, + } + + def __init__( + self, + *, + error: Optional["Error"] = None, + **kwargs + ): + """ + :keyword error: The error object. + :paramtype error: ~azure.ai.language.conversations.models.Error + """ + super(ErrorResponse, self).__init__(**kwargs) + self.error = error + + +class InnerErrorModel(msrest.serialization.Model): + """An object containing more specific information about the error. As per Microsoft One API guidelines - https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses. + + All required parameters must be populated in order to send to Azure. + + :ivar code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", + "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure". + :vartype code: str or ~azure.ai.language.conversations.models.InnerErrorCode + :ivar message: Required. Error message. + :vartype message: str + :ivar details: Error details. + :vartype details: dict[str, str] + :ivar target: Error target. + :vartype target: str + :ivar innererror: An object containing more specific information than the current object about + the error. + :vartype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '{str}'}, + 'target': {'key': 'target', 'type': 'str'}, + 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}, + } + + def __init__( + self, + *, + code: Union[str, "InnerErrorCode"], + message: str, + details: Optional[Dict[str, str]] = None, + target: Optional[str] = None, + innererror: Optional["InnerErrorModel"] = None, + **kwargs + ): + """ + :keyword code: Required. One of a server-defined set of error codes. Possible values include: + "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", + "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure". + :paramtype code: str or ~azure.ai.language.conversations.models.InnerErrorCode + :keyword message: Required. Error message. + :paramtype message: str + :keyword details: Error details. + :paramtype details: dict[str, str] + :keyword target: Error target. + :paramtype target: str + :keyword innererror: An object containing more specific information than the current object + about the error. + :paramtype innererror: ~azure.ai.language.conversations.models.InnerErrorModel + """ + super(InnerErrorModel, self).__init__(**kwargs) + self.code = code + self.message = message + self.details = details + self.target = target + self.innererror = innererror + + +class LUISCallingOptions(msrest.serialization.Model): + """This customizes how the service calls LUIS Generally Available projects. + + :ivar verbose: Enable verbose response. + :vartype verbose: bool + :ivar log: Save log to add in training utterances later. + :vartype log: bool + :ivar show_all_intents: Set true to show all intents. + :vartype show_all_intents: bool + :ivar timezone_offset: The timezone offset for the location of the request. + :vartype timezone_offset: float + :ivar spell_check: Enable spell checking. + :vartype spell_check: bool + :ivar bing_spell_check_subscription_key: The subscription key to use when enabling Bing spell + check. + :vartype bing_spell_check_subscription_key: str + """ + + _attribute_map = { + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'log': {'key': 'log', 'type': 'bool'}, + 'show_all_intents': {'key': 'show-all-intents', 'type': 'bool'}, + 'timezone_offset': {'key': 'timezoneOffset', 'type': 'float'}, + 'spell_check': {'key': 'spellCheck', 'type': 'bool'}, + 'bing_spell_check_subscription_key': {'key': 'bing-spell-check-subscription-key', 'type': 'str'}, + } + + def __init__( + self, + *, + verbose: Optional[bool] = None, + log: Optional[bool] = None, + show_all_intents: Optional[bool] = None, + timezone_offset: Optional[float] = None, + spell_check: Optional[bool] = None, + bing_spell_check_subscription_key: Optional[str] = None, + **kwargs + ): + """ + :keyword verbose: Enable verbose response. + :paramtype verbose: bool + :keyword log: Save log to add in training utterances later. + :paramtype log: bool + :keyword show_all_intents: Set true to show all intents. + :paramtype show_all_intents: bool + :keyword timezone_offset: The timezone offset for the location of the request. + :paramtype timezone_offset: float + :keyword spell_check: Enable spell checking. + :paramtype spell_check: bool + :keyword bing_spell_check_subscription_key: The subscription key to use when enabling Bing + spell check. + :paramtype bing_spell_check_subscription_key: str + """ + super(LUISCallingOptions, self).__init__(**kwargs) + self.verbose = verbose + self.log = log + self.show_all_intents = show_all_intents + self.timezone_offset = timezone_offset + self.spell_check = spell_check + self.bing_spell_check_subscription_key = bing_spell_check_subscription_key + + +class LUISParameters(AnalyzeParameters): + """This is a set of request parameters for LUIS Generally Available projects. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, any] + :ivar query: The utterance to predict. + :vartype query: str + :ivar calling_options: This customizes how the service calls LUIS Generally Available projects. + :vartype calling_options: ~azure.ai.language.conversations.models.LUISCallingOptions + """ + + _validation = { + 'target_kind': {'required': True}, + 'query': {'max_length': 500, 'min_length': 0}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'additional_properties': {'key': '', 'type': '{object}'}, + 'query': {'key': 'query', 'type': 'str'}, + 'calling_options': {'key': 'callingOptions', 'type': 'LUISCallingOptions'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + additional_properties: Optional[Dict[str, Any]] = None, + query: Optional[str] = None, + calling_options: Optional["LUISCallingOptions"] = None, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword query: The utterance to predict. + :paramtype query: str + :keyword calling_options: This customizes how the service calls LUIS Generally Available + projects. + :paramtype calling_options: ~azure.ai.language.conversations.models.LUISCallingOptions + """ + super(LUISParameters, self).__init__(api_version=api_version, **kwargs) + self.target_kind = 'luis' # type: str + self.additional_properties = additional_properties + self.query = query + self.calling_options = calling_options + + +class LUISTargetIntentResult(TargetIntentResult): + """It is a wrap up of LUIS Generally Available response. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar result: The actual response from a LUIS Generally Available application. + :vartype result: any + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + 'result': {'key': 'result', 'type': 'object'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + confidence_score: Optional[float] = None, + result: Optional[Any] = None, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The actual response from a LUIS Generally Available application. + :paramtype result: any + """ + super(LUISTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) + self.target_kind = 'luis' # type: str + self.result = result + + +class QuestionAnsweringParameters(AnalyzeParameters): + """This is a set of request parameters for Question Answering knowledge bases. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str + :ivar calling_options: The options sent to a Question Answering KB. + :vartype calling_options: any + """ + + _validation = { + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'calling_options': {'key': 'callingOptions', 'type': 'object'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + calling_options: Optional[Any] = None, + **kwargs + ): + """ + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str + :keyword calling_options: The options sent to a Question Answering KB. + :paramtype calling_options: any + """ + super(QuestionAnsweringParameters, self).__init__(api_version=api_version, **kwargs) + self.target_kind = 'question_answering' # type: str + self.calling_options = calling_options + + +class QuestionAnsweringTargetIntentResult(TargetIntentResult): + """It is a wrap up a Question Answering KB response. + + All required parameters must be populated in order to send to Azure. + + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response. 'luis' means the type is LUIS Generally Available. + 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant + filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar result: The generated answer by a Question Answering KB. + :vartype result: any + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + 'target_kind': {'required': True}, + } + + _attribute_map = { + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'target_kind': {'key': 'targetType', 'type': 'str'}, + 'result': {'key': 'result', 'type': 'object'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = None, + confidence_score: Optional[float] = None, + result: Optional[Any] = None, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The generated answer by a Question Answering KB. + :paramtype result: any + """ + super(QuestionAnsweringTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) + self.target_kind = 'question_answering' # type: str + self.result = result + + +class WorkflowPrediction(BasePrediction): + """This represents the prediction result of an Workflow project. + + All required parameters must be populated in order to send to Azure. + + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar intents: Required. A dictionary that contains all intents. A key is an intent name and a + value is its confidence score and target type. The top intent's value also contains the actual + response from the target project. + :vartype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + + _validation = { + 'project_kind': {'required': True}, + 'intents': {'required': True}, + } + + _attribute_map = { + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'project_kind': {'key': 'projectType', 'type': 'str'}, + 'intents': {'key': 'intents', 'type': '{TargetIntentResult}'}, + } + + def __init__( + self, + *, + intents: Dict[str, "TargetIntentResult"], + top_intent: Optional[str] = None, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + :keyword intents: Required. A dictionary that contains all intents. A key is an intent name and + a value is its confidence score and target type. The top intent's value also contains the + actual response from the target project. + :paramtype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + super(WorkflowPrediction, self).__init__(top_intent=top_intent, **kwargs) + self.project_kind = 'workflow' # type: str + self.intents = intents diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/__init__.py new file mode 100644 index 000000000000..f90ccbf89a57 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import ConversationAnalysisClientOperationsMixin + +__all__ = [ + 'ConversationAnalysisClientOperationsMixin', +] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py new file mode 100644 index 000000000000..769c2b77e1d8 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import functools +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from msrest import Serializer + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +# fmt: off + +def build_analyze_conversations_request( + **kwargs # type: Any +): + # type: (...) -> HttpRequest + content_type = kwargs.pop('content_type', None) # type: Optional[str] + project_name = kwargs.pop('project_name') # type: str + deployment_name = kwargs.pop('deployment_name') # type: str + + api_version = "2021-07-15-preview" + accept = "application/json" + # Construct URL + url = kwargs.pop("template_url", '/:analyze-conversations') + + # Construct parameters + query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] + query_parameters['projectName'] = _SERIALIZER.query("project_name", project_name, 'str') + query_parameters['deploymentName'] = _SERIALIZER.query("deployment_name", deployment_name, 'str') + query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] + if content_type is not None: + header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') + header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') + + return HttpRequest( + method="POST", + url=url, + params=query_parameters, + headers=header_parameters, + **kwargs + ) + +# fmt: on +class ConversationAnalysisClientOperationsMixin(object): + + @distributed_trace + def analyze_conversations( + self, + analyze_conversation_options, # type: "_models.AnalyzeConversationOptions" + **kwargs # type: Any + ): + # type: (...) -> "_models.AnalyzeConversationResult" + """Analyzes the input conversation utterance. + + :param analyze_conversation_options: Post body of the request. + :type analyze_conversation_options: + ~azure.ai.language.conversations.models.AnalyzeConversationOptions + :keyword project_name: The name of the project to use. + :paramtype project_name: str + :keyword deployment_name: The name of the specific deployment of the project to use. + :paramtype deployment_name: str + :return: AnalyzeConversationResult + :rtype: ~azure.ai.language.conversations.models.AnalyzeConversationResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalyzeConversationResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] + project_name = kwargs.pop('project_name') # type: str + deployment_name = kwargs.pop('deployment_name') # type: str + + json = self._serialize.body(analyze_conversation_options, 'AnalyzeConversationOptions') + + request = build_analyze_conversations_request( + content_type=content_type, + project_name=project_name, + deployment_name=deployment_name, + json=json, + template_url=self.analyze_conversations.metadata['url'], + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + request.url = self._client.format_url(request.url, **path_format_arguments) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('AnalyzeConversationResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + analyze_conversations.metadata = {'url': '/:analyze-conversations'} # type: ignore + diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/py.typed b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/dev_requirements.txt b/sdk/cognitivelanguage/azure-ai-language-conversations/dev_requirements.txt new file mode 100644 index 000000000000..a2928f848ba4 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/dev_requirements.txt @@ -0,0 +1,5 @@ +-e ../../../tools/azure-sdk-tools +-e ../../../tools/azure-devtools +../../core/azure-core +-e ../../cognitiveservices/azure-mgmt-cognitiveservices +aiohttp>=3.0; python_version >= '3.5' \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md new file mode 100644 index 000000000000..326aef0c67ea --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md @@ -0,0 +1,74 @@ +--- +page_type: sample +languages: + - python +products: + - azure + - azure-cognitive-services + - azure-ai-language-understanding +urlFragment: conversationslanguageunderstanding-samples +--- + +# Samples for Azure Conversational Language Understanding client library for Python + +These code samples show common scenario operations with the Azure Conversational Language Understanding client library. +The async versions of the samples require Python 3.6 or later. + +You can authenticate your client with a Conversational Language Understanding API key: + +- See [sample_authentication.py][sample_authentication] and [sample_authentication_async.py][sample_authentication_async] for how to authenticate in the above cases. + +These sample programs show common scenarios for the Conversational Language Understanding client's offerings. + +| **File Name** | **Description** | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [sample_analyze_conversation_app.py][sample_analyze_conversation_app] and [sample_analyze_conversation_app_async.py][sample_analyze_conversation_app_async] | Analyze intents and entities in your utterance using a deepstack (conversation) project | +| [sample_analyze_workflow_app.py][sample_analyze_workflow_app] and [sample_analyze_workflow_app_async.py][sample_analyze_workflow_app_async] | Analyze user utterance using an orchestrator (workflow) project, which uses the best candidate from one of your different apps to analyze user query (ex: Qna, DeepStack, and Luis) | + +## Prerequisites + +- Python 2.7, or 3.6 or later is required to use this package (3.6 or later if using asyncio) +- You must have an [Azure subscription][azure_subscription] and an + [Azure CLU account][azure_clu_account] to run these samples. + +## Setup + +1. Install the Azure Conversational Language Understanding client library for Python with [pip][pip]: + +```bash +pip install azure-ai-language-conversations --pre +``` + +For more information about how the versioning of the SDK corresponds to the versioning of the service's API, see [here][versioning_story_readme]. + +2. Clone or download this sample repository +3. Open the sample folder in Visual Studio Code or your IDE of choice. + +## Running the samples + +1. Open a terminal window and `cd` to the directory that the samples are saved in. +2. Set the environment variables specified in the sample file you wish to run. +3. Follow the usage described in the file, e.g. `python sample_analyze_conversation_app.py` + +## Next steps + +Check out the [API reference documentation][api_reference_documentation] to learn more about +what you can do with the Azure Conversational Language Understanding client library. + +| **Advanced Sample File Name** | **Description** | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [sample_analyze_workflow_app_with_params.py][sample_analyze_workflow_app_with_params] and [sample_analyze_workflow_app_with_params_async.py][sample_analyze_workflow_app_with_params_async] | Same as workflow sample, but with ability to customize call with parameters | + +[azure_subscription]: https://azure.microsoft.com/free/ +[azure_clu_account]: https://language.azure.com/clu/projects +[pip]: https://pypi.org/project/pip/ +[sample_authentication]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py +[sample_authentication_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_authentication_async.py +[sample_analyze_conversation_app]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py +[sample_analyze_conversation_app_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py +[sample_analyze_workflow_app]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py +[sample_analyze_workflow_app_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py +[sample_analyze_workflow_app_with_params]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py +[sample_analyze_workflow_app_with_params_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py +[api_reference_documentation]: https://language.azure.com/clu/projects +[versioning_story_readme]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations#install-the-package diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py new file mode 100644 index 000000000000..e500223bc143 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_conversation_app_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query for intents and entities using a deepstack project. + + For more info about how to setup a CLU deepstack project, see the README. + +USAGE: + python sample_analyze_conversation_app_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_PROJECT - the name of your CLU conversations project. +""" + +import asyncio + +async def sample_analyze_conversation_app_async(): + # [START analyze_conversation_app_async] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations.aio import ConversationAnalysisClient + from azure.ai.language.conversations.models import AnalyzeConversationOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"] + + # prepare data + query = "One california maki please." + input = AnalyzeConversationOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + print("top intent: {}".format(result.prediction.top_intent)) + print("\tcategory: {}".format(result.prediction.intents[0].category)) + print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + + print("view entities:") + for entity in result.prediction.entities: + print("\tcategory: {}".format(entity.category)) + print("\ttext: {}".format(entity.text)) + print("\tconfidence score: {}".format(entity.confidence_score)) + # [END analyze_conversation_app_async] + +async def main(): + await sample_analyze_conversation_app_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py new file mode 100644 index 000000000000..87dcbbb6911a --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_workflow_app_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration/workflow project. + In this sample, workflow project's top intent will map to a Question Answering project. + + For more info about how to setup a CLU workflow project, see the README. + +USAGE: + python sample_analyze_workflow_app_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. +""" + +import asyncio + +async def sample_analyze_workflow_app_async(): + # [START analyze_workflow_app] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations.aio import ConversationAnalysisClient + from azure.ai.language.conversations.models import AnalyzeConversationOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "How do you make sushi rice?", + input = AnalyzeConversationOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("top intent: {}".format(top_intent)) + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view Question Answering result:") + print("\tresult: {}\n".format(top_intent_object.result)) + # [END analyze_workflow_app] + +async def main(): + await sample_analyze_workflow_app_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py new file mode 100644 index 000000000000..ee4e434a6ead --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_workflow_app_with_params_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration/workflow project. + In this sample, worflow project's top intent will map to a Question Answering project. + + For more info about how to setup a CLU workflow project, see the README. + +USAGE: + python sample_analyze_workflow_app_with_params_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. +""" + +import asyncio + +async def sample_analyze_workflow_app_with_params_async(): + # [START analyze_workflow_app_with_params] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations.aio import ConversationAnalysisClient + from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + QuestionAnsweringParameters, + DeepstackParameters, + ) + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "How do you make sushi rice?", + input = AnalyzeConversationOptions( + query=query, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ), + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("top intent: {}".format(top_intent)) + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view Question Answering result:") + print("\tresult: {}\n".format(top_intent_object.result)) + # [END analyze_workflow_app_with_params] + + +async def main(): + await sample_analyze_workflow_app_with_params_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_authentication_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_authentication_async.py new file mode 100644 index 000000000000..15652abaaead --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_authentication_async.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +""" +FILE: sample_authentication_async.py + +DESCRIPTION: + This sample demonstrates how to authenticate to the Conversational Language Understanding service. + We authenticate using an AzureKeyCredential from azure.core.credentials. + + See more details about authentication here: + https://docs.microsoft.com/azure/cognitive-services/authentication + + Note: the endpoint must be formatted to use the custom domain name for your resource: + https://.cognitiveservices.azure.com/ + +USAGE: + python sample_authentication_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your Conversational Language Understanding resource. + 2) AZURE_CONVERSATIONS_KEY - your Conversational Language Understanding API key +""" + +import os +import asyncio + + +async def sample_authentication_api_key_async(): + # [START create_clu_client_with_key_async] + from azure.core.credentials import AzureKeyCredential + from azure.ai.language.conversations.aio import ConversationAnalysisClient + + endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + key = os.environ["AZURE_CONVERSATIONS_KEY"] + + clu_client = ConversationAnalysisClient(endpoint, AzureKeyCredential(key)) + # [END create_clu_client_with_key_async] + + +async def main(): + await sample_authentication_api_key_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py new file mode 100644 index 000000000000..f7994db10de0 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_conversation_app.py + +DESCRIPTION: + This sample demonstrates how to analyze user query for intents and entities using a deepstack project. + + For more info about how to setup a CLU deepstack project, see the README. + +USAGE: + python sample_analyze_conversation_app.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_PROJECT - the name of your CLU conversations project. +""" + +def sample_analyze_conversation_app(): + # [START analyze_conversation_app] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import AnalyzeConversationOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"] + + # prepare data + query = "One california maki please." + input = AnalyzeConversationOptions( + query=query + ) + + # analyze quey + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + print("top intent: {}".format(result.prediction.top_intent)) + print("\tcategory: {}".format(result.prediction.intents[0].category)) + print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + + print("view entities:") + for entity in result.prediction.entities: + print("\tcategory: {}".format(entity.category)) + print("\ttext: {}".format(entity.text)) + print("\tconfidence score: {}".format(entity.confidence_score)) + # [END analyze_conversation_app] + + +if __name__ == '__main__': + sample_analyze_conversation_app() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py new file mode 100644 index 000000000000..6378346fb3fc --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_workflow_app.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration/workflow project. + In this sample, workflow project's top intent will map to a Qna project. + + For more info about how to setup a CLU workflow project, see the README. + +USAGE: + python sample_analyze_workflow_app.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. +""" + +def sample_analyze_workflow_app(): + # [START analyze_workflow_app] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import AnalyzeConversationOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "How do you make sushi rice?", + input = AnalyzeConversationOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("top intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view qna result:") + print("\tresult: {}\n".format(top_intent_object.result)) + # [END analyze_workflow_app] + +if __name__ == '__main__': + sample_analyze_workflow_app() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py new file mode 100644 index 000000000000..7c300f690e75 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_workflow_app_with_params.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration/workflow project. + In this sample, worflow project's top intent will map to a Qna project. + + For more info about how to setup a CLU workflow project, see the README. + +USAGE: + python sample_analyze_workflow_app_with_params.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. +""" + +def sample_analyze_workflow_app_with_params(): + # [START analyze_workflow_app_with_params] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + QuestionAnsweringParameters, + DeepstackParameters, + ) + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "How do you make sushi rice?", + input = AnalyzeConversationOptions( + query=query, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ), + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("top intent: {}".format(top_intent)) + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view Question Answering result:") + print("\tresult: {}\n".format(top_intent_object.result)) + # [END analyze_workflow_app_with_params] + + +if __name__ == '__main__': + sample_analyze_workflow_app_with_params() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py new file mode 100644 index 000000000000..132bab214f5b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_authentication.py @@ -0,0 +1,46 @@ +# coding=utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +""" +FILE: sample_authentication.py + +DESCRIPTION: + This sample demonstrates how to authenticate to the Conversational Language Understanding service. + We authenticate using an AzureKeyCredential from azure.core.credentials. + + See more details about authentication here: + https://docs.microsoft.com/azure/cognitive-services/authentication + + Note: the endpoint must be formatted to use the custom domain name for your resource: + https://.cognitiveservices.azure.com/ + +USAGE: + python sample_authentication.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your Conversational Language Understanding resource. + 2) AZURE_CONVERSATIONS_KEY - your Conversational Language Understanding API key +""" + +import os + + +def sample_authentication_api_key(): + # [START create_dt_client_with_key] + from azure.core.credentials import AzureKeyCredential + from azure.ai.language.conversations import ConversationAnalysisClient + + + endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + key = os.environ["AZURE_CONVERSATIONS_KEY"] + + clu_client = ConversationAnalysisClient(endpoint, AzureKeyCredential(key)) + # [END create_clu_client_with_key] + +if __name__ == '__main__': + sample_authentication_api_key() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml b/sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml new file mode 100644 index 000000000000..901bc8ccbfa6 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/setup.cfg b/sdk/cognitivelanguage/azure-ai-language-conversations/setup.cfg new file mode 100644 index 000000000000..3c6e79cf31da --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/setup.py b/sdk/cognitivelanguage/azure-ai-language-conversations/setup.py new file mode 100644 index 000000000000..42ec6f386a01 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/setup.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import re +import os.path +from io import open +from setuptools import find_packages, setup + +# Change the PACKAGE_NAME only to change folder and different name +PACKAGE_NAME = "azure-ai-language-conversations" +PACKAGE_PPRINT_NAME = "Azure Conversational Language Understanding" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace('-', '/') +# a-b-c => a.b.c +namespace_name = PACKAGE_NAME.replace('-', '.') + +# azure v0.x is not compatible with this package +# azure v0.x used to have a __version__ attribute (newer versions don't) +try: + import azure + try: + ver = azure.__version__ + raise Exception( + 'This package is incompatible with azure=={}. '.format(ver) + + 'Uninstall it with "pip uninstall azure".' + ) + except AttributeError: + pass +except ImportError: + pass + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + +with open('README.md', encoding='utf-8') as f: + readme = f.read() +with open('CHANGELOG.md', encoding='utf-8') as f: + changelog = f.read() + +setup( + name=PACKAGE_NAME, + version=version, + include_package_data=True, + description='Microsoft {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), + long_description=readme + "\n\n" + changelog, + long_description_content_type='text/markdown', + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python', + classifiers=[ + "Development Status :: 4 - Beta", + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=find_packages(exclude=[ + 'tests', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure.ai', + 'azure.ai.language', + ]), + install_requires=[ + "azure-core<2.0.0,>=1.19.0", + "msrest>=0.6.21", + 'azure-common~=1.1', + 'six>=1.11.0', + ], + extras_require={ + ":python_version<'3.0'": ['azure-ai-language-nspkg'], + ":python_version<'3.5'": ['typing'], + }, + project_urls={ + 'Bug Reports': 'https://github.com/Azure/azure-sdk-for-python/issues', + 'Source': 'https://github.com/Azure/azure-sdk-python', + } +) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/asynctestcase.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/asynctestcase.py new file mode 100644 index 000000000000..eef23d2678c3 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/asynctestcase.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import asyncio +import functools +from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function +from azure.core.credentials import AccessToken +from testcase import ConversationTest + + +class AsyncFakeTokenCredential(object): + """Protocol for classes able to provide OAuth tokens. + :param str scopes: Lets you specify the type of access needed. + """ + def __init__(self): + self.token = AccessToken("YOU SHALL NOT PASS", 0) + + async def get_token(self, *args): + return self.token + + +class AsyncConversationTest(ConversationTest): + + def generate_fake_token(self): + return AsyncFakeTokenCredential() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/conftest.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/conftest.py new file mode 100644 index 000000000000..755d2a9305fa --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/conftest.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + + +# Ignore async tests for Python < 3.5 +collect_ignore_glob = [] +if sys.version_info < (3, 5): + collect_ignore_glob.append("*_async.py") + diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml new file mode 100644 index 000000000000..2c7a6cc30bcd --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml @@ -0,0 +1,52 @@ +interactions: +- request: + body: !!python/unicode '{"query": "One california maki please."}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-project&deploymentName=production + response: + body: + string: !!python/unicode "{\n \"query\": \"One california maki please.\",\n + \ \"prediction\": {\n \"intents\": [\n {\n \"category\": \"Order\",\n + \ \"confidenceScore\": 1\n }\n ],\n \"entities\": [\n {\n + \ \"category\": \"OrderItem\",\n \"text\": \"california maki\",\n + \ \"offset\": 4,\n \"length\": 15,\n \"confidenceScore\": + 1\n }\n ],\n \"topIntent\": \"Order\",\n \"projectType\": \"conversation\"\n + \ }\n}" + headers: + apim-request-id: + - 02b21bc7-d52c-48f4-8ecb-5ec8b95c0822 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + date: + - Thu, 30 Sep 2021 17:41:07 GMT + pragma: + - no-cache + request-id: + - 02b21bc7-d52c-48f4-8ecb-5ec8b95c0822 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '126' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml new file mode 100644 index 000000000000..fb25b0bf0925 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml @@ -0,0 +1,52 @@ +interactions: +- request: + body: !!python/unicode '{"query": "One california maki please."}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-project&deploymentName=production + response: + body: + string: !!python/unicode "{\n \"query\": \"One california maki please.\",\n + \ \"prediction\": {\n \"intents\": [\n {\n \"category\": \"Order\",\n + \ \"confidenceScore\": 1\n }\n ],\n \"entities\": [\n {\n + \ \"category\": \"OrderItem\",\n \"text\": \"california maki\",\n + \ \"offset\": 4,\n \"length\": 15,\n \"confidenceScore\": + 1\n }\n ],\n \"topIntent\": \"Order\",\n \"projectType\": \"conversation\"\n + \ }\n}" + headers: + apim-request-id: + - 2c325546-f02f-43fd-afb0-e9d5c2f1b418 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + date: + - Thu, 30 Sep 2021 17:41:09 GMT + pragma: + - no-cache + request-id: + - 2c325546-f02f-43fd-afb0-e9d5c2f1b418 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '73' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml new file mode 100644 index 000000000000..ce0fcdc9e420 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml @@ -0,0 +1,38 @@ +interactions: +- request: + body: '{"query": "One california maki please."}' + headers: + Accept: + - application/json + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-07-15-preview + response: + body: + string: "{\n \"query\": \"One california maki please.\",\n \"prediction\": + {\n \"intents\": [\n {\n \"category\": \"Order\",\n \"confidenceScore\": + 1\n }\n ],\n \"entities\": [\n {\n \"category\": \"OrderItem\",\n + \ \"text\": \"california maki\",\n \"offset\": 4,\n \"length\": + 15,\n \"confidenceScore\": 1\n }\n ],\n \"topIntent\": \"Order\",\n + \ \"projectType\": \"conversation\"\n }\n}" + headers: + apim-request-id: 577adef9-402b-4f6a-ae8b-abc1c82660a4 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + date: Thu, 30 Sep 2021 16:56:53 GMT + pragma: no-cache + request-id: 577adef9-402b-4f6a-ae8b-abc1c82660a4 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '303' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischOne&deploymentName=production&api-version=2021-07-15-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml new file mode 100644 index 000000000000..79a376aa59e2 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml @@ -0,0 +1,38 @@ +interactions: +- request: + body: '{"query": "One california maki please."}' + headers: + Accept: + - application/json + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-07-15-preview + response: + body: + string: "{\n \"query\": \"One california maki please.\",\n \"prediction\": + {\n \"intents\": [\n {\n \"category\": \"Order\",\n \"confidenceScore\": + 1\n }\n ],\n \"entities\": [\n {\n \"category\": \"OrderItem\",\n + \ \"text\": \"california maki\",\n \"offset\": 4,\n \"length\": + 15,\n \"confidenceScore\": 1\n }\n ],\n \"topIntent\": \"Order\",\n + \ \"projectType\": \"conversation\"\n }\n}" + headers: + apim-request-id: 9ec258d5-b660-4f35-bacb-ef4ad6af3fd9 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + date: Thu, 30 Sep 2021 16:56:54 GMT + pragma: no-cache + request-id: 9ec258d5-b660-4f35-bacb-ef4ad6af3fd9 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '51' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischOne&deploymentName=production&api-version=2021-07-15-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml new file mode 100644 index 000000000000..11e5169ed888 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml @@ -0,0 +1,215 @@ +interactions: +- request: + body: !!python/unicode '{"query": "How do you make sushi rice?"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production + response: + body: + string: !!python/unicode "{\n \"query\": \"How do you make sushi rice?\",\n + \ \"prediction\": {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": + \"question_answering\",\n \"result\": {\n \"answers\": [\n + \ {\n \"questions\": [\n \"do you eat + cake?\",\n \"do you ever eat beef?\",\n \"do + you ever eat pizza?\",\n \"have you ever eaten tofu?\",\n \"you + don't eat?\",\n \"have you ever wanted to eat?\",\n \"Don't + you ever get hungry?\",\n \"how many calories do you need?\",\n + \ \"What kind of food do you like?\",\n \"What + do you eat for dinner?\",\n \"What do you eat?\",\n \"What + kind of food do you eat?\",\n \"What is your favorite snack?\",\n + \ \"What is your favorite meal?\",\n \"what foods + do you eat?\",\n \"What do you want to eat?\",\n \"What + did you eat for lunch?\",\n \"What do you like to dine on?\",\n + \ \"What kind of foods do you like?\",\n \"What + do you eat for lunch?\",\n \"What do you eat for breakfast?\",\n + \ \"What did you have for lunch?\",\n \"What + did you have for dinner?\",\n \"do you eat vegetables\",\n + \ \"What do you like to eat?\",\n \"will you + ever eat?\",\n \"Are you ever hungry?\",\n \"Do + you eat pasta?\",\n \"do you eat pizza?\",\n \"you + don't need to eat?\",\n \"you don't need food?\",\n \"What + kind of food do you like to eat?\",\n \"will you ever need + to eat?\",\n \"when do you eat?\",\n \"What's + your favorite cuisine?\",\n \"what kinds of foods do you like?\",\n + \ \"What kinds of food do you like to eat?\",\n \"What + kinds of food do you eat?\",\n \"What did you eat for dinner?\",\n + \ \"you don't eat food?\",\n \"Do you eat?\",\n + \ \"do you need calories to survive?\",\n \"Do + you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n + \ \"Do you get hungry?\",\n \"do you ever need + to eat?\",\n \"What did you have for breakfast?\",\n \"do + you ever eat food?\",\n \"do you need food?\",\n \"do + you eat food?\",\n \"do you consume food?\",\n \"Are + you hungry?\",\n \"Are you going to have lunch?\",\n \"Are + you going to have dinner?\",\n \"Are you going to have breakfast?\",\n + \ \"Do you ever get hungry?\",\n \"have you ever + wanted a snack?\",\n \"What did you eat for breakfast?\",\n + \ \"so you don't eat?\",\n \"how many calories + do you need to eat?\",\n \"how many calories do you need each + day?\",\n \"how many calories do you eat?\",\n \"do + you need calories?\",\n \"have you ever wanted food?\",\n \"do + you need food to survive?\",\n \"have you ever wanted a meal?\",\n + \ \"have you ever been hungry?\",\n \"Don't you + get hungry?\",\n \"do you not need to eat?\",\n \"do + you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so + you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have + you ever eaten toast?\",\n \"do you eat toast?\",\n \"do + you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n + \ \"do you eat bread?\",\n \"so you've really + never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do + you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have + you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do + you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true + or false: you don't get hungry\",\n \"do you eat tofu?\",\n + \ \"do you ever eat pork?\",\n \"have you ever + eaten pork?\",\n \"do you eat pork?\",\n \"so + you never eat?\",\n \"do you eat beef?\",\n \"so + you've really never eaten?\",\n \"true or false: you don't + eat\",\n \"tell me whether or not you eat\",\n \"is + it true that you don't eat?\",\n \"so you've never really eaten + food?\",\n \"so you've never really eaten anything?\",\n \"do + you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do + you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n + \ \"have you ever eaten vegetables?\",\n \"have + you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do + you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do + you ever eat vegetables?\",\n \"do you eat ice cream?\",\n + \ \"have you ever eaten pasta?\",\n \"do you + ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do + you eat pie?\",\n \"do you ever eat cookies?\",\n \"do + you eat steak?\",\n \"do you ever eat fries?\",\n \"have + you ever eaten fries?\",\n \"do you eat fries?\",\n \"do + you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n + \ \"do you eat burgers?\",\n \"have you ever + eaten pie?\",\n \"have you ever eaten steak?\",\n \"have + you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have + you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do + you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n + \ \"do you ever eat tofu?\",\n \"do you ever + eat steak?\"\n ],\n \"answer\": \"I only do food + for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n + \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: + - c674556f-5ac0-43cd-a1ca-4243b8b3c86a + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: + - Thu, 30 Sep 2021 17:41:11 GMT + pragma: + - no-cache + request-id: + - c674556f-5ac0-43cd-a1ca-4243b8b3c86a + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '812' + status: + code: 200 + message: OK +- request: + body: !!python/unicode '{"query": "I will have sashimi"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '32' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production + response: + body: + string: !!python/unicode "{\n \"query\": \"I will have sashimi\",\n \"prediction\": + {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n + \ \"result\": {\n \"answers\": [\n {\n \"questions\": + [\n \"I could really use a hug\",\n \"Can I + get a little hug?\",\n \"A hug would be nice\",\n \"Can + we hug it out?\",\n \"Let's hug\",\n \"Can I + please get a hug?\",\n \"I want a hug\",\n \"I + could use a hug\",\n \"Can you hug me?\",\n \"Will + you give me a hug?\",\n \"Can I have a big hug?\",\n \"Can + I have a little hug?\",\n \"Can you give me a big hug?\",\n + \ \"Can you give me a hug?\",\n \"Can you give + me a little hug?\",\n \"I need a big hug\",\n \"I + need a hug\",\n \"Will you give me a big hug?\",\n \"Will + you hug me?\",\n \"Would you give me a big hug?\",\n \"Would + you give me a hug?\",\n \"Can I get a big hug?\",\n \"Can + I please have a hug?\",\n \"Can I get a hug?\",\n \"I + really need a hug\",\n \"Can we hug?\",\n \"Would + you give me a little hug?\",\n \"Let's hug it out\",\n \"I'd + love a hug\",\n \"I'd like a hug\",\n \"Do you + want to give me a hug?\"\n ],\n \"answer\": \"Giving + you a virtual hug right now.\",\n \"score\": 2.29,\n \"id\": + 67,\n \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.5102507\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.4897493\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: + - 998ec5bb-3bb7-4d2f-ae48-ba24283f6264 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: + - Thu, 30 Sep 2021 17:41:12 GMT + pragma: + - no-cache + request-id: + - 998ec5bb-3bb7-4d2f-ae48-ba24283f6264 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '737' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml new file mode 100644 index 000000000000..b36ae897cc57 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml @@ -0,0 +1,142 @@ +interactions: +- request: + body: !!python/unicode '{"query": "How do you make sushi rice?", "parameters": + {"SushiMaking": {"callingOptions": {"confidence_score_threshold": 0.1, "top": + 1, "question": "How do you make sushi rice?"}, "targetKind": "question_answering"}, + "SushiOrder": {"callingOptions": {"verbose": true}, "targetKind": "luis_deepstack"}}}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '302' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production + response: + body: + string: !!python/unicode "{\n \"query\": \"How do you make sushi rice?\",\n + \ \"prediction\": {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": + \"question_answering\",\n \"result\": {\n \"answers\": [\n + \ {\n \"questions\": [\n \"do you eat + cake?\",\n \"do you ever eat beef?\",\n \"do + you ever eat pizza?\",\n \"have you ever eaten tofu?\",\n \"you + don't eat?\",\n \"have you ever wanted to eat?\",\n \"Don't + you ever get hungry?\",\n \"how many calories do you need?\",\n + \ \"What kind of food do you like?\",\n \"What + do you eat for dinner?\",\n \"What do you eat?\",\n \"What + kind of food do you eat?\",\n \"What is your favorite snack?\",\n + \ \"What is your favorite meal?\",\n \"what foods + do you eat?\",\n \"What do you want to eat?\",\n \"What + did you eat for lunch?\",\n \"What do you like to dine on?\",\n + \ \"What kind of foods do you like?\",\n \"What + do you eat for lunch?\",\n \"What do you eat for breakfast?\",\n + \ \"What did you have for lunch?\",\n \"What + did you have for dinner?\",\n \"do you eat vegetables\",\n + \ \"What do you like to eat?\",\n \"will you + ever eat?\",\n \"Are you ever hungry?\",\n \"Do + you eat pasta?\",\n \"do you eat pizza?\",\n \"you + don't need to eat?\",\n \"you don't need food?\",\n \"What + kind of food do you like to eat?\",\n \"will you ever need + to eat?\",\n \"when do you eat?\",\n \"What's + your favorite cuisine?\",\n \"what kinds of foods do you like?\",\n + \ \"What kinds of food do you like to eat?\",\n \"What + kinds of food do you eat?\",\n \"What did you eat for dinner?\",\n + \ \"you don't eat food?\",\n \"Do you eat?\",\n + \ \"do you need calories to survive?\",\n \"Do + you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n + \ \"Do you get hungry?\",\n \"do you ever need + to eat?\",\n \"What did you have for breakfast?\",\n \"do + you ever eat food?\",\n \"do you need food?\",\n \"do + you eat food?\",\n \"do you consume food?\",\n \"Are + you hungry?\",\n \"Are you going to have lunch?\",\n \"Are + you going to have dinner?\",\n \"Are you going to have breakfast?\",\n + \ \"Do you ever get hungry?\",\n \"have you ever + wanted a snack?\",\n \"What did you eat for breakfast?\",\n + \ \"so you don't eat?\",\n \"how many calories + do you need to eat?\",\n \"how many calories do you need each + day?\",\n \"how many calories do you eat?\",\n \"do + you need calories?\",\n \"have you ever wanted food?\",\n \"do + you need food to survive?\",\n \"have you ever wanted a meal?\",\n + \ \"have you ever been hungry?\",\n \"Don't you + get hungry?\",\n \"do you not need to eat?\",\n \"do + you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so + you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have + you ever eaten toast?\",\n \"do you eat toast?\",\n \"do + you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n + \ \"do you eat bread?\",\n \"so you've really + never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do + you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have + you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do + you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true + or false: you don't get hungry\",\n \"do you eat tofu?\",\n + \ \"do you ever eat pork?\",\n \"have you ever + eaten pork?\",\n \"do you eat pork?\",\n \"so + you never eat?\",\n \"do you eat beef?\",\n \"so + you've really never eaten?\",\n \"true or false: you don't + eat\",\n \"tell me whether or not you eat\",\n \"is + it true that you don't eat?\",\n \"so you've never really eaten + food?\",\n \"so you've never really eaten anything?\",\n \"do + you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do + you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n + \ \"have you ever eaten vegetables?\",\n \"have + you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do + you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do + you ever eat vegetables?\",\n \"do you eat ice cream?\",\n + \ \"have you ever eaten pasta?\",\n \"do you + ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do + you eat pie?\",\n \"do you ever eat cookies?\",\n \"do + you eat steak?\",\n \"do you ever eat fries?\",\n \"have + you ever eaten fries?\",\n \"do you eat fries?\",\n \"do + you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n + \ \"do you eat burgers?\",\n \"have you ever + eaten pie?\",\n \"have you ever eaten steak?\",\n \"have + you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have + you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do + you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n + \ \"do you ever eat tofu?\",\n \"do you ever + eat steak?\"\n ],\n \"answer\": \"I only do food + for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n + \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: + - f270a6a8-c502-447b-ba35-ebf518b0f004 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: + - Thu, 30 Sep 2021 17:41:13 GMT + pragma: + - no-cache + request-id: + - f270a6a8-c502-447b-ba35-ebf518b0f004 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '471' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml new file mode 100644 index 000000000000..132ea8fff9f6 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml @@ -0,0 +1,142 @@ +interactions: +- request: + body: !!python/unicode '{"query": "(''How do you make sushi rice?'',)", "parameters": + {"SushiMaking": {"callingOptions": {"top": 1, "question": "(''How do you make + sushi rice?'',)", "confidenceScoreThreshold": 0.1}, "targetKind": "question_answering"}, + "SushiOrder": {"callingOptions": {"verbose": true}, "targetKind": "luis_deepstack"}}}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '310' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production + response: + body: + string: !!python/unicode "{\n \"query\": \"('How do you make sushi rice?',)\",\n + \ \"prediction\": {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": + \"question_answering\",\n \"result\": {\n \"answers\": [\n + \ {\n \"questions\": [\n \"do you eat + cake?\",\n \"do you ever eat beef?\",\n \"do + you ever eat pizza?\",\n \"have you ever eaten tofu?\",\n \"you + don't eat?\",\n \"have you ever wanted to eat?\",\n \"Don't + you ever get hungry?\",\n \"how many calories do you need?\",\n + \ \"What kind of food do you like?\",\n \"What + do you eat for dinner?\",\n \"What do you eat?\",\n \"What + kind of food do you eat?\",\n \"What is your favorite snack?\",\n + \ \"What is your favorite meal?\",\n \"what foods + do you eat?\",\n \"What do you want to eat?\",\n \"What + did you eat for lunch?\",\n \"What do you like to dine on?\",\n + \ \"What kind of foods do you like?\",\n \"What + do you eat for lunch?\",\n \"What do you eat for breakfast?\",\n + \ \"What did you have for lunch?\",\n \"What + did you have for dinner?\",\n \"do you eat vegetables\",\n + \ \"What do you like to eat?\",\n \"will you + ever eat?\",\n \"Are you ever hungry?\",\n \"Do + you eat pasta?\",\n \"do you eat pizza?\",\n \"you + don't need to eat?\",\n \"you don't need food?\",\n \"What + kind of food do you like to eat?\",\n \"will you ever need + to eat?\",\n \"when do you eat?\",\n \"What's + your favorite cuisine?\",\n \"what kinds of foods do you like?\",\n + \ \"What kinds of food do you like to eat?\",\n \"What + kinds of food do you eat?\",\n \"What did you eat for dinner?\",\n + \ \"you don't eat food?\",\n \"Do you eat?\",\n + \ \"do you need calories to survive?\",\n \"Do + you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n + \ \"Do you get hungry?\",\n \"do you ever need + to eat?\",\n \"What did you have for breakfast?\",\n \"do + you ever eat food?\",\n \"do you need food?\",\n \"do + you eat food?\",\n \"do you consume food?\",\n \"Are + you hungry?\",\n \"Are you going to have lunch?\",\n \"Are + you going to have dinner?\",\n \"Are you going to have breakfast?\",\n + \ \"Do you ever get hungry?\",\n \"have you ever + wanted a snack?\",\n \"What did you eat for breakfast?\",\n + \ \"so you don't eat?\",\n \"how many calories + do you need to eat?\",\n \"how many calories do you need each + day?\",\n \"how many calories do you eat?\",\n \"do + you need calories?\",\n \"have you ever wanted food?\",\n \"do + you need food to survive?\",\n \"have you ever wanted a meal?\",\n + \ \"have you ever been hungry?\",\n \"Don't you + get hungry?\",\n \"do you not need to eat?\",\n \"do + you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so + you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have + you ever eaten toast?\",\n \"do you eat toast?\",\n \"do + you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n + \ \"do you eat bread?\",\n \"so you've really + never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do + you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have + you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do + you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true + or false: you don't get hungry\",\n \"do you eat tofu?\",\n + \ \"do you ever eat pork?\",\n \"have you ever + eaten pork?\",\n \"do you eat pork?\",\n \"so + you never eat?\",\n \"do you eat beef?\",\n \"so + you've really never eaten?\",\n \"true or false: you don't + eat\",\n \"tell me whether or not you eat\",\n \"is + it true that you don't eat?\",\n \"so you've never really eaten + food?\",\n \"so you've never really eaten anything?\",\n \"do + you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do + you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n + \ \"have you ever eaten vegetables?\",\n \"have + you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do + you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do + you ever eat vegetables?\",\n \"do you eat ice cream?\",\n + \ \"have you ever eaten pasta?\",\n \"do you + ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do + you eat pie?\",\n \"do you ever eat cookies?\",\n \"do + you eat steak?\",\n \"do you ever eat fries?\",\n \"have + you ever eaten fries?\",\n \"do you eat fries?\",\n \"do + you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n + \ \"do you eat burgers?\",\n \"have you ever + eaten pie?\",\n \"have you ever eaten steak?\",\n \"have + you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have + you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do + you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n + \ \"do you ever eat tofu?\",\n \"do you ever + eat steak?\"\n ],\n \"answer\": \"I only do food + for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n + \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.58619076\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.4138092\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: + - a28b94cb-e298-4a2c-838e-af7b67c1060f + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: + - Thu, 30 Sep 2021 17:41:15 GMT + pragma: + - no-cache + request-id: + - a28b94cb-e298-4a2c-838e-af7b67c1060f + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '330' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml new file mode 100644 index 000000000000..a5a0766b79f0 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml @@ -0,0 +1,186 @@ +interactions: +- request: + body: '{"query": "How do you make sushi rice?"}' + headers: + Accept: + - application/json + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview + response: + body: + string: "{\n \"query\": \"How do you make sushi rice?\",\n \"prediction\": + {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n + \ \"result\": {\n \"answers\": [\n {\n \"questions\": + [\n \"do you eat cake?\",\n \"do you ever eat + beef?\",\n \"do you ever eat pizza?\",\n \"have + you ever eaten tofu?\",\n \"you don't eat?\",\n \"have + you ever wanted to eat?\",\n \"Don't you ever get hungry?\",\n + \ \"how many calories do you need?\",\n \"What + kind of food do you like?\",\n \"What do you eat for dinner?\",\n + \ \"What do you eat?\",\n \"What kind of food + do you eat?\",\n \"What is your favorite snack?\",\n \"What + is your favorite meal?\",\n \"what foods do you eat?\",\n \"What + do you want to eat?\",\n \"What did you eat for lunch?\",\n + \ \"What do you like to dine on?\",\n \"What + kind of foods do you like?\",\n \"What do you eat for lunch?\",\n + \ \"What do you eat for breakfast?\",\n \"What + did you have for lunch?\",\n \"What did you have for dinner?\",\n + \ \"do you eat vegetables\",\n \"What do you + like to eat?\",\n \"will you ever eat?\",\n \"Are + you ever hungry?\",\n \"Do you eat pasta?\",\n \"do + you eat pizza?\",\n \"you don't need to eat?\",\n \"you + don't need food?\",\n \"What kind of food do you like to eat?\",\n + \ \"will you ever need to eat?\",\n \"when do + you eat?\",\n \"What's your favorite cuisine?\",\n \"what + kinds of foods do you like?\",\n \"What kinds of food do you + like to eat?\",\n \"What kinds of food do you eat?\",\n \"What + did you eat for dinner?\",\n \"you don't eat food?\",\n \"Do + you eat?\",\n \"do you need calories to survive?\",\n \"Do + you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n + \ \"Do you get hungry?\",\n \"do you ever need + to eat?\",\n \"What did you have for breakfast?\",\n \"do + you ever eat food?\",\n \"do you need food?\",\n \"do + you eat food?\",\n \"do you consume food?\",\n \"Are + you hungry?\",\n \"Are you going to have lunch?\",\n \"Are + you going to have dinner?\",\n \"Are you going to have breakfast?\",\n + \ \"Do you ever get hungry?\",\n \"have you ever + wanted a snack?\",\n \"What did you eat for breakfast?\",\n + \ \"so you don't eat?\",\n \"how many calories + do you need to eat?\",\n \"how many calories do you need each + day?\",\n \"how many calories do you eat?\",\n \"do + you need calories?\",\n \"have you ever wanted food?\",\n \"do + you need food to survive?\",\n \"have you ever wanted a meal?\",\n + \ \"have you ever been hungry?\",\n \"Don't you + get hungry?\",\n \"do you not need to eat?\",\n \"do + you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so + you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have + you ever eaten toast?\",\n \"do you eat toast?\",\n \"do + you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n + \ \"do you eat bread?\",\n \"so you've really + never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do + you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have + you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do + you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true + or false: you don't get hungry\",\n \"do you eat tofu?\",\n + \ \"do you ever eat pork?\",\n \"have you ever + eaten pork?\",\n \"do you eat pork?\",\n \"so + you never eat?\",\n \"do you eat beef?\",\n \"so + you've really never eaten?\",\n \"true or false: you don't + eat\",\n \"tell me whether or not you eat\",\n \"is + it true that you don't eat?\",\n \"so you've never really eaten + food?\",\n \"so you've never really eaten anything?\",\n \"do + you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do + you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n + \ \"have you ever eaten vegetables?\",\n \"have + you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do + you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do + you ever eat vegetables?\",\n \"do you eat ice cream?\",\n + \ \"have you ever eaten pasta?\",\n \"do you + ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do + you eat pie?\",\n \"do you ever eat cookies?\",\n \"do + you eat steak?\",\n \"do you ever eat fries?\",\n \"have + you ever eaten fries?\",\n \"do you eat fries?\",\n \"do + you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n + \ \"do you eat burgers?\",\n \"have you ever + eaten pie?\",\n \"have you ever eaten steak?\",\n \"have + you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have + you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do + you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n + \ \"do you ever eat tofu?\",\n \"do you ever + eat steak?\"\n ],\n \"answer\": \"I only do food + for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n + \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: 1685ca0c-6a9e-407b-883c-3edabb16a15d + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: Thu, 30 Sep 2021 16:57:03 GMT + pragma: no-cache + request-id: 1685ca0c-6a9e-407b-883c-3edabb16a15d + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '246' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview +- request: + body: '{"query": "I will have sashimi"}' + headers: + Accept: + - application/json + Content-Length: + - '32' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview + response: + body: + string: "{\n \"query\": \"I will have sashimi\",\n \"prediction\": {\n \"intents\": + {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n + \ \"result\": {\n \"answers\": [\n {\n \"questions\": + [\n \"I could really use a hug\",\n \"Can I + get a little hug?\",\n \"A hug would be nice\",\n \"Can + we hug it out?\",\n \"Let's hug\",\n \"Can I + please get a hug?\",\n \"I want a hug\",\n \"I + could use a hug\",\n \"Can you hug me?\",\n \"Will + you give me a hug?\",\n \"Can I have a big hug?\",\n \"Can + I have a little hug?\",\n \"Can you give me a big hug?\",\n + \ \"Can you give me a hug?\",\n \"Can you give + me a little hug?\",\n \"I need a big hug\",\n \"I + need a hug\",\n \"Will you give me a big hug?\",\n \"Will + you hug me?\",\n \"Would you give me a big hug?\",\n \"Would + you give me a hug?\",\n \"Can I get a big hug?\",\n \"Can + I please have a hug?\",\n \"Can I get a hug?\",\n \"I + really need a hug\",\n \"Can we hug?\",\n \"Would + you give me a little hug?\",\n \"Let's hug it out\",\n \"I'd + love a hug\",\n \"I'd like a hug\",\n \"Do you + want to give me a hug?\"\n ],\n \"answer\": \"Giving + you a virtual hug right now.\",\n \"score\": 2.29,\n \"id\": + 67,\n \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.5102507\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.4897493\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: d71eeb28-556b-4b94-a0fe-b650f982bf05 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: Thu, 30 Sep 2021 16:57:03 GMT + pragma: no-cache + request-id: d71eeb28-556b-4b94-a0fe-b650f982bf05 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '204' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml new file mode 100644 index 000000000000..62caf86d9677 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml @@ -0,0 +1,127 @@ +interactions: +- request: + body: '{"query": "How do you make sushi rice?", "parameters": {"SushiMaking": + {"targetKind": "question_answering", "callingOptions": {"question": "How do + you make sushi rice?", "top": 1, "confidence_score_threshold": 0.1}}, "SushiOrder": + {"targetKind": "luis_deepstack", "callingOptions": {"verbose": true}}}}' + headers: + Accept: + - application/json + Content-Length: + - '302' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview + response: + body: + string: "{\n \"query\": \"How do you make sushi rice?\",\n \"prediction\": + {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n + \ \"result\": {\n \"answers\": [\n {\n \"questions\": + [\n \"do you eat cake?\",\n \"do you ever eat + beef?\",\n \"do you ever eat pizza?\",\n \"have + you ever eaten tofu?\",\n \"you don't eat?\",\n \"have + you ever wanted to eat?\",\n \"Don't you ever get hungry?\",\n + \ \"how many calories do you need?\",\n \"What + kind of food do you like?\",\n \"What do you eat for dinner?\",\n + \ \"What do you eat?\",\n \"What kind of food + do you eat?\",\n \"What is your favorite snack?\",\n \"What + is your favorite meal?\",\n \"what foods do you eat?\",\n \"What + do you want to eat?\",\n \"What did you eat for lunch?\",\n + \ \"What do you like to dine on?\",\n \"What + kind of foods do you like?\",\n \"What do you eat for lunch?\",\n + \ \"What do you eat for breakfast?\",\n \"What + did you have for lunch?\",\n \"What did you have for dinner?\",\n + \ \"do you eat vegetables\",\n \"What do you + like to eat?\",\n \"will you ever eat?\",\n \"Are + you ever hungry?\",\n \"Do you eat pasta?\",\n \"do + you eat pizza?\",\n \"you don't need to eat?\",\n \"you + don't need food?\",\n \"What kind of food do you like to eat?\",\n + \ \"will you ever need to eat?\",\n \"when do + you eat?\",\n \"What's your favorite cuisine?\",\n \"what + kinds of foods do you like?\",\n \"What kinds of food do you + like to eat?\",\n \"What kinds of food do you eat?\",\n \"What + did you eat for dinner?\",\n \"you don't eat food?\",\n \"Do + you eat?\",\n \"do you need calories to survive?\",\n \"Do + you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n + \ \"Do you get hungry?\",\n \"do you ever need + to eat?\",\n \"What did you have for breakfast?\",\n \"do + you ever eat food?\",\n \"do you need food?\",\n \"do + you eat food?\",\n \"do you consume food?\",\n \"Are + you hungry?\",\n \"Are you going to have lunch?\",\n \"Are + you going to have dinner?\",\n \"Are you going to have breakfast?\",\n + \ \"Do you ever get hungry?\",\n \"have you ever + wanted a snack?\",\n \"What did you eat for breakfast?\",\n + \ \"so you don't eat?\",\n \"how many calories + do you need to eat?\",\n \"how many calories do you need each + day?\",\n \"how many calories do you eat?\",\n \"do + you need calories?\",\n \"have you ever wanted food?\",\n \"do + you need food to survive?\",\n \"have you ever wanted a meal?\",\n + \ \"have you ever been hungry?\",\n \"Don't you + get hungry?\",\n \"do you not need to eat?\",\n \"do + you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so + you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have + you ever eaten toast?\",\n \"do you eat toast?\",\n \"do + you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n + \ \"do you eat bread?\",\n \"so you've really + never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do + you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have + you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do + you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true + or false: you don't get hungry\",\n \"do you eat tofu?\",\n + \ \"do you ever eat pork?\",\n \"have you ever + eaten pork?\",\n \"do you eat pork?\",\n \"so + you never eat?\",\n \"do you eat beef?\",\n \"so + you've really never eaten?\",\n \"true or false: you don't + eat\",\n \"tell me whether or not you eat\",\n \"is + it true that you don't eat?\",\n \"so you've never really eaten + food?\",\n \"so you've never really eaten anything?\",\n \"do + you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do + you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n + \ \"have you ever eaten vegetables?\",\n \"have + you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do + you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do + you ever eat vegetables?\",\n \"do you eat ice cream?\",\n + \ \"have you ever eaten pasta?\",\n \"do you + ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do + you eat pie?\",\n \"do you ever eat cookies?\",\n \"do + you eat steak?\",\n \"do you ever eat fries?\",\n \"have + you ever eaten fries?\",\n \"do you eat fries?\",\n \"do + you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n + \ \"do you eat burgers?\",\n \"have you ever + eaten pie?\",\n \"have you ever eaten steak?\",\n \"have + you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have + you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do + you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n + \ \"do you ever eat tofu?\",\n \"do you ever + eat steak?\"\n ],\n \"answer\": \"I only do food + for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n + \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: dedc30b9-bec0-48c0-8f54-0e40b3964ebe + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: Thu, 30 Sep 2021 16:57:05 GMT + pragma: no-cache + request-id: dedc30b9-bec0-48c0-8f54-0e40b3964ebe + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '364' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml new file mode 100644 index 000000000000..787d7d3ace40 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml @@ -0,0 +1,127 @@ +interactions: +- request: + body: '{"query": "(''How do you make sushi rice?'',)", "parameters": {"SushiMaking": + {"targetKind": "question_answering", "callingOptions": {"question": "(''How + do you make sushi rice?'',)", "top": 1, "confidenceScoreThreshold": 0.1}}, "SushiOrder": + {"targetKind": "luis_deepstack", "callingOptions": {"verbose": true}}}}' + headers: + Accept: + - application/json + Content-Length: + - '310' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview + response: + body: + string: "{\n \"query\": \"('How do you make sushi rice?',)\",\n \"prediction\": + {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n + \ \"result\": {\n \"answers\": [\n {\n \"questions\": + [\n \"do you eat cake?\",\n \"do you ever eat + beef?\",\n \"do you ever eat pizza?\",\n \"have + you ever eaten tofu?\",\n \"you don't eat?\",\n \"have + you ever wanted to eat?\",\n \"Don't you ever get hungry?\",\n + \ \"how many calories do you need?\",\n \"What + kind of food do you like?\",\n \"What do you eat for dinner?\",\n + \ \"What do you eat?\",\n \"What kind of food + do you eat?\",\n \"What is your favorite snack?\",\n \"What + is your favorite meal?\",\n \"what foods do you eat?\",\n \"What + do you want to eat?\",\n \"What did you eat for lunch?\",\n + \ \"What do you like to dine on?\",\n \"What + kind of foods do you like?\",\n \"What do you eat for lunch?\",\n + \ \"What do you eat for breakfast?\",\n \"What + did you have for lunch?\",\n \"What did you have for dinner?\",\n + \ \"do you eat vegetables\",\n \"What do you + like to eat?\",\n \"will you ever eat?\",\n \"Are + you ever hungry?\",\n \"Do you eat pasta?\",\n \"do + you eat pizza?\",\n \"you don't need to eat?\",\n \"you + don't need food?\",\n \"What kind of food do you like to eat?\",\n + \ \"will you ever need to eat?\",\n \"when do + you eat?\",\n \"What's your favorite cuisine?\",\n \"what + kinds of foods do you like?\",\n \"What kinds of food do you + like to eat?\",\n \"What kinds of food do you eat?\",\n \"What + did you eat for dinner?\",\n \"you don't eat food?\",\n \"Do + you eat?\",\n \"do you need calories to survive?\",\n \"Do + you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n + \ \"Do you get hungry?\",\n \"do you ever need + to eat?\",\n \"What did you have for breakfast?\",\n \"do + you ever eat food?\",\n \"do you need food?\",\n \"do + you eat food?\",\n \"do you consume food?\",\n \"Are + you hungry?\",\n \"Are you going to have lunch?\",\n \"Are + you going to have dinner?\",\n \"Are you going to have breakfast?\",\n + \ \"Do you ever get hungry?\",\n \"have you ever + wanted a snack?\",\n \"What did you eat for breakfast?\",\n + \ \"so you don't eat?\",\n \"how many calories + do you need to eat?\",\n \"how many calories do you need each + day?\",\n \"how many calories do you eat?\",\n \"do + you need calories?\",\n \"have you ever wanted food?\",\n \"do + you need food to survive?\",\n \"have you ever wanted a meal?\",\n + \ \"have you ever been hungry?\",\n \"Don't you + get hungry?\",\n \"do you not need to eat?\",\n \"do + you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so + you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have + you ever eaten toast?\",\n \"do you eat toast?\",\n \"do + you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n + \ \"do you eat bread?\",\n \"so you've really + never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do + you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have + you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do + you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true + or false: you don't get hungry\",\n \"do you eat tofu?\",\n + \ \"do you ever eat pork?\",\n \"have you ever + eaten pork?\",\n \"do you eat pork?\",\n \"so + you never eat?\",\n \"do you eat beef?\",\n \"so + you've really never eaten?\",\n \"true or false: you don't + eat\",\n \"tell me whether or not you eat\",\n \"is + it true that you don't eat?\",\n \"so you've never really eaten + food?\",\n \"so you've never really eaten anything?\",\n \"do + you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do + you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n + \ \"have you ever eaten vegetables?\",\n \"have + you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do + you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do + you ever eat vegetables?\",\n \"do you eat ice cream?\",\n + \ \"have you ever eaten pasta?\",\n \"do you + ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do + you eat pie?\",\n \"do you ever eat cookies?\",\n \"do + you eat steak?\",\n \"do you ever eat fries?\",\n \"have + you ever eaten fries?\",\n \"do you eat fries?\",\n \"do + you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n + \ \"do you eat burgers?\",\n \"have you ever + eaten pie?\",\n \"have you ever eaten steak?\",\n \"have + you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have + you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do + you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n + \ \"do you ever eat tofu?\",\n \"do you ever + eat steak?\"\n ],\n \"answer\": \"I only do food + for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n + \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": + false,\n \"metadata\": [\n {\n \"name\": + \"editorial\",\n \"value\": \"chitchat\"\n }\n + \ ],\n \"context\": {\n \"isContextOnly\": + false,\n \"prompts\": []\n }\n }\n + \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": + 0.58619076\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n + \ \"confidenceScore\": 0.4138092\n },\n \"None\": {\n \"targetType\": + \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": + \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" + headers: + apim-request-id: d8dde644-cd13-4f84-9466-797cbfda2428 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 + date: Thu, 30 Sep 2021 16:57:06 GMT + pragma: no-cache + request-id: d8dde644-cd13-4f84-9466-797cbfda2428 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '234' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py new file mode 100644 index 000000000000..8dd770ff9b4c --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +from azure.core.exceptions import HttpResponseError, ClientAuthenticationError +from azure.core.credentials import AzureKeyCredential + +from testcase import ( + ConversationTest, + GlobalConversationAccountPreparer +) + +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + AnalyzeConversationResult, + DeepstackPrediction +) + + +class ConversationAppTests(ConversationTest): + + @GlobalConversationAccountPreparer() + def test_conversation_app(self, conv_account, conv_key, conv_project): + + # prepare data + query = "One california maki please." + input = AnalyzeConversationOptions( + query=query, + ) + + # analyze quey + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, DeepstackPrediction) + assert result.prediction.project_kind == 'conversation' + assert result.prediction.top_intent == 'Order' + assert len(result.prediction.entities) > 0 + assert len(result.prediction.intents) > 0 + assert result.prediction.intents[0].category == 'Order' + assert result.prediction.intents[0].confidence_score > 0 + assert result.prediction.entities[0].category == 'OrderItem' + assert result.prediction.entities[0].text == 'california maki' + assert result.prediction.entities[0].confidence_score > 0 + + + @GlobalConversationAccountPreparer() + def test_conversation_app_with_dictparams(self, conv_account, conv_key, conv_project): + + # prepare data + query = "One california maki please." + params = { + "query": query, + } + + # analyze quey + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + params, + project_name=conv_project, + deployment_name='production' + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, DeepstackPrediction) + assert result.prediction.project_kind == 'conversation' + assert result.prediction.top_intent == 'Order' + assert len(result.prediction.entities) > 0 + assert len(result.prediction.intents) > 0 + assert result.prediction.intents[0].category == 'Order' + assert result.prediction.intents[0].confidence_score > 0 + assert result.prediction.entities[0].category == 'OrderItem' + assert result.prediction.entities[0].text == 'california maki' + assert result.prediction.entities[0].confidence_score > 0 + \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py new file mode 100644 index 000000000000..b0ad647aee85 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +from azure.core.exceptions import HttpResponseError, ClientAuthenticationError +from azure.core.credentials import AzureKeyCredential + +from testcase import GlobalConversationAccountPreparer +from asynctestcase import AsyncConversationTest + +from azure.ai.language.conversations.aio import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + AnalyzeConversationResult, + DeepstackPrediction +) + + +class ConversationAppAsyncTests(AsyncConversationTest): + + @GlobalConversationAccountPreparer() + async def test_conversation_app(self, conv_account, conv_key, conv_project): + + # prepare data + query = "One california maki please." + input = AnalyzeConversationOptions( + query=query, + ) + + # analyze quey + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, DeepstackPrediction) + assert result.prediction.project_kind == 'conversation' + assert result.prediction.top_intent == 'Order' + assert len(result.prediction.entities) > 0 + assert len(result.prediction.intents) > 0 + assert result.prediction.intents[0].category == 'Order' + assert result.prediction.intents[0].confidence_score > 0 + assert result.prediction.entities[0].category == 'OrderItem' + assert result.prediction.entities[0].text == 'california maki' + assert result.prediction.entities[0].confidence_score > 0 + + @GlobalConversationAccountPreparer() + async def test_conversation_app_with_dictparams(self, conv_account, conv_key, conv_project): + + # prepare data + query = "One california maki please." + params = { + "query": query, + } + + # analyze quey + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + params, + project_name=conv_project, + deployment_name='production' + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, DeepstackPrediction) + assert result.prediction.project_kind == 'conversation' + assert result.prediction.top_intent == 'Order' + assert len(result.prediction.entities) > 0 + assert len(result.prediction.intents) > 0 + assert result.prediction.intents[0].category == 'Order' + assert result.prediction.intents[0].confidence_score > 0 + assert result.prediction.entities[0].category == 'OrderItem' + assert result.prediction.entities[0].text == 'california maki' + assert result.prediction.entities[0].confidence_score > 0 + + \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py new file mode 100644 index 000000000000..98ea790b3462 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +from azure.core.exceptions import HttpResponseError, ClientAuthenticationError +from azure.core.credentials import AzureKeyCredential + +from testcase import ( + ConversationTest, + GlobalConversationAccountPreparer +) + +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + AnalyzeConversationResult, + QuestionAnsweringParameters, + DeepstackParameters, + DeepstackCallingOptions, + QuestionAnsweringTargetIntentResult, + WorkflowPrediction, + DSTargetIntentResult +) + +class WorkflowAppTests(ConversationTest): + + @GlobalConversationAccountPreparer() + def test_workflow_app(self, conv_account, conv_key, workflow_project): + + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + + # analyze query + query = "How do you make sushi rice?" + result = client.analyze_conversations( + {"query": query}, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == "SushiMaking" + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + # analyze query + query = "I will have sashimi" + result = client.analyze_conversations( + {"query": query}, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + # assert result.prediction.top_intent == "SushiOrder" --> wrong top intent! + # assert isinstance(result.prediction.intents, DSTargetIntentResult) + + + @GlobalConversationAccountPreparer() + def test_workflow_app_with_parameters(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?", + input = AnalyzeConversationOptions( + query=query, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ), + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + # assert result.query == query --> weird behavior here! + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == "SushiMaking" + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + + @GlobalConversationAccountPreparer() + def test_workflow_app_with_model(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?" + input = AnalyzeConversationOptions( + query=query, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question":query, + "top":1, + "confidence_score_threshold":0.1 + } + ), + "SushiOrder": DeepstackParameters( + calling_options=DeepstackCallingOptions( + verbose=True + ) + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == "SushiMaking" + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py new file mode 100644 index 000000000000..78052780d63e --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +from azure.core.exceptions import HttpResponseError, ClientAuthenticationError +from azure.core.credentials import AzureKeyCredential + +from testcase import GlobalConversationAccountPreparer +from asynctestcase import AsyncConversationTest + +from azure.ai.language.conversations.aio import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + AnalyzeConversationResult, + AnalyzeConversationOptions, + AnalyzeConversationResult, + QuestionAnsweringParameters, + DeepstackParameters, + DeepstackCallingOptions, + QuestionAnsweringTargetIntentResult, + WorkflowPrediction, + DSTargetIntentResult +) + +class WorkflowAppAsyncTests(AsyncConversationTest): + + @GlobalConversationAccountPreparer() + async def test_workflow_app(self, conv_account, conv_key, workflow_project): + + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + + # analyze query + query = "How do you make sushi rice?" + result = await client.analyze_conversations( + {"query": query}, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == "SushiMaking" + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + # analyze query + query = "I will have sashimi" + result = await client.analyze_conversations( + {"query": query}, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + # assert result.prediction.top_intent == "SushiOrder" --> wrong top intent! + # assert isinstance(result.prediction.intents, DSTargetIntentResult) + + + @GlobalConversationAccountPreparer() + async def test_workflow_app_with_parameters(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?", + input = AnalyzeConversationOptions( + query=query, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ), + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + # assert result.query == query --> weird behavior here! + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == "SushiMaking" + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + + @GlobalConversationAccountPreparer() + async def test_workflow_app_with_model(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?" + input = AnalyzeConversationOptions( + query=query, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question":query, + "top":1, + "confidence_score_threshold":0.1 + } + ), + "SushiOrder": DeepstackParameters( + calling_options=DeepstackCallingOptions( + verbose=True + ) + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == "SushiMaking" + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py new file mode 100644 index 000000000000..02f2aac6a7e6 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py @@ -0,0 +1,179 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +from azure.core.exceptions import HttpResponseError, ClientAuthenticationError +from azure.core.credentials import AzureKeyCredential + +from testcase import ( + ConversationTest, + GlobalConversationAccountPreparer +) + +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + AnalyzeConversationResult, + QuestionAnsweringParameters, + DeepstackParameters, + WorkflowPrediction, + QuestionAnsweringTargetIntentResult, + DSTargetIntentResult, + LUISTargetIntentResult +) + + +class WorkflowAppDirectTests(ConversationTest): + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + def test_direct_kb_intent(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?" + target_intent = "SushiMaking" + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + def test_kb_intent_with_model(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?" + target_intent = "SushiMaking" + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + caling_options={ + "question":query, + "top":1, + "confidence_score_threshold":0.1 + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + def test_deepstack_intent(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "I will have the oyako donburi please." + target_intent = "SushiOrder" + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True, + } + ) + } + ) + + # analyze query + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, DSTargetIntentResult) + + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + def test_luis_intent(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "I will have the oyako donburi please." + target_intent = "SushiOrder" + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True, + } + ) + } + ) + + # analyze query + with client: + result = client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, LUISTargetIntentResult) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py new file mode 100644 index 000000000000..982763cab607 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +from azure.core.exceptions import HttpResponseError, ClientAuthenticationError +from azure.core.credentials import AzureKeyCredential + +from testcase import GlobalConversationAccountPreparer +from asynctestcase import AsyncConversationTest + +from azure.ai.language.conversations.aio import ConversationAnalysisClient +from azure.ai.language.conversations.models import ( + AnalyzeConversationOptions, + AnalyzeConversationResult, + QuestionAnsweringParameters, + DeepstackParameters, + WorkflowPrediction, + QuestionAnsweringTargetIntentResult, + DSTargetIntentResult, + LUISTargetIntentResult +) + +class WorkflowAppDirectAsyncTests(AsyncConversationTest): + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + async def test_direct_kb_intent(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?" + target_intent = "SushiMaking" + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + async def test_kb_intent_with_model(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "How do you make sushi rice?" + target_intent = "SushiMaking" + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiMaking": QuestionAnsweringParameters( + calling_options={ + "question": query, + "top": 1, + "confidenceScoreThreshold": 0.1 + } + ) + } + ) + + # analyze query + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + async def test_deepstack_intent(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "I will have the oyako donburi please." + target_intent = "SushiOrder" + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True, + } + ) + } + ) + + # analyze query + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, DSTargetIntentResult) + + @pytest.mark.skip(reason="internal server error!") + @GlobalConversationAccountPreparer() + async def test_luis_intent(self, conv_account, conv_key, workflow_project): + + # prepare data + query = "I will have the oyako donburi please." + target_intent = "SushiOrder" + client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) + input = AnalyzeConversationOptions( + query=query, + direct_target=target_intent, + parameters={ + "SushiOrder": DeepstackParameters( + calling_options={ + "verbose": True, + } + ) + } + ) + + # analyze query + async with client: + result = await client.analyze_conversations( + input, + project_name=workflow_project, + deployment_name='production', + ) + + # assert + assert isinstance(result, AnalyzeConversationResult) + assert result.query == query + assert isinstance(result.prediction, WorkflowPrediction) + assert result.prediction.project_kind == "workflow" + assert result.prediction.top_intent == target_intent + # assert isinstance(result.prediction.intents, LUISTargetIntentResult) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py new file mode 100644 index 000000000000..7894ef03185c --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import os +import pytest + +from azure.core.credentials import AccessToken, AzureKeyCredential +from devtools_testutils import ( + AzureTestCase, + AzureMgmtPreparer, + FakeResource, + ResourceGroupPreparer, +) +from devtools_testutils.cognitiveservices_testcase import CognitiveServicesAccountPreparer +from azure_devtools.scenario_tests import ReplayableTest + + +REGION = 'westus2' + + +class FakeTokenCredential(object): + """Protocol for classes able to provide OAuth tokens. + :param str scopes: Lets you specify the type of access needed. + """ + def __init__(self): + self.token = AccessToken("YOU SHALL NOT PASS", 0) + + def get_token(self, *args): + return self.token + +TEST_ENDPOINT = 'https://test-resource.api.cognitive.microsoft.com' +TEST_KEY = '0000000000000000' +TEST_PROJECT = 'test-project' +TEST_WORKFLOW = 'test-workflow' + + +class ConversationTest(AzureTestCase): + FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key'] + + def __init__(self, method_name): + super(ConversationTest, self).__init__(method_name) + self.scrubber.register_name_pair(os.environ.get("AZURE_CONVERSATIONS_ENDPOINT"), TEST_ENDPOINT) + self.scrubber.register_name_pair(os.environ.get("AZURE_CONVERSATIONS_KEY"), TEST_KEY) + self.scrubber.register_name_pair(os.environ.get("AZURE_CONVERSATIONS_PROJECT"), TEST_PROJECT) + self.scrubber.register_name_pair(os.environ.get("AZURE_CONVERSATIONS_WORKFLOW_PROJECT"), TEST_WORKFLOW) + + def generate_fake_token(self): + return FakeTokenCredential() + + +class GlobalResourceGroupPreparer(AzureMgmtPreparer): + def __init__(self): + super(GlobalResourceGroupPreparer, self).__init__( + name_prefix='', + random_name_length=42 + ) + + def create_resource(self, name, **kwargs): + rg = FakeResource( + name="rgname", + id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rgname" + ) + + return { + 'location': REGION, + 'resource_group': rg, + } + + +class GlobalConversationAccountPreparer(AzureMgmtPreparer): + def __init__(self): + super(GlobalConversationAccountPreparer, self).__init__( + name_prefix='', + random_name_length=42 + ) + + def create_resource(self, name, **kwargs): + if self.is_live: + return { + 'location': REGION, + 'resource_group': "rgname", + 'conv_account': os.environ.get("AZURE_CONVERSATIONS_ENDPOINT"), + 'conv_key': os.environ.get("AZURE_CONVERSATIONS_KEY"), + 'conv_project': os.environ.get("AZURE_CONVERSATIONS_PROJECT"), + 'workflow_project': os.environ.get("AZURE_CONVERSATIONS_WORKFLOW_PROJECT") + } + return { + 'location': REGION, + 'resource_group': "rgname", + 'conv_account': TEST_ENDPOINT, + 'conv_key': TEST_KEY, + 'conv_project': TEST_PROJECT, + 'workflow_project': TEST_WORKFLOW + } diff --git a/sdk/cognitivelanguage/ci.yml b/sdk/cognitivelanguage/ci.yml index 15f72f532b70..91df5c89b6d3 100644 --- a/sdk/cognitivelanguage/ci.yml +++ b/sdk/cognitivelanguage/ci.yml @@ -3,7 +3,6 @@ trigger: branches: include: - - master - main - hotfix/* - release/* @@ -11,12 +10,11 @@ trigger: paths: include: - sdk/cognitivelanguage/ - - scripts/ + - sdk/core/ pr: branches: include: - - master - main - feature/* - hotfix/* @@ -25,6 +23,7 @@ pr: paths: include: - sdk/cognitivelanguage/ + - sdk/core/ extends: template: ../../eng/pipelines/templates/stages/archetype-sdk-client.yml @@ -32,4 +31,6 @@ extends: ServiceDirectory: cognitivelanguage Artifacts: - name: azure-ai-language-questionanswering - safeName: questionanswering \ No newline at end of file + safeName: azureailanguagequestionanswering + - name: azure-ai-language-conversations + safeName: azureailanguageconversations \ No newline at end of file diff --git a/shared_requirements.txt b/shared_requirements.txt index 3c9254069d21..b3476641f7ff 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -349,3 +349,5 @@ opentelemetry-sdk<2.0.0,>=1.0.0 #override azure-mgmt-authorization msrest>=0.6.21 #override azure-mgmt-azurearcdata msrest>=0.6.21 #override azure-mgmt-fluidrelay msrest>=0.6.21 +#override azure-ai-language-conversations azure-core<2.0.0,>=1.19.0 +#override azure-ai-language-conversations msrest>=0.6.21