From 0a9b8ed36f28b1ae07f8d40eaaada04704b38668 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Fri, 15 Sep 2023 11:57:59 -0700 Subject: [PATCH] Release v1.45.11 (2023-09-15) (#4990) Release v1.45.11 (2023-09-15) === ### Service Client Updates * `service/appstream`: Updates service API, documentation, and waiters * This release introduces app block builder, allowing customers to provision a resource to package applications into an app block * `service/connect`: Updates service API * `service/datasync`: Updates service documentation * `service/sagemaker`: Updates service API and documentation * This release introduces Skip Model Validation for Model Packages --- CHANGELOG.md | 11 ++ aws/endpoints/defaults.go | 12 ++ aws/version.go | 2 +- models/apis/appstream/2016-12-01/api-2.json | 34 ++-- models/apis/appstream/2016-12-01/docs-2.json | 20 +-- .../apis/appstream/2016-12-01/waiters-2.json | 12 +- models/apis/connect/2017-08-08/api-2.json | 3 +- models/apis/datasync/2018-11-09/docs-2.json | 4 +- models/apis/sagemaker/2017-07-24/api-2.json | 16 +- models/apis/sagemaker/2017-07-24/docs-2.json | 62 ++++--- models/endpoints/endpoints.json | 4 + service/appstream/api.go | 161 ++++-------------- service/appstream/waiters.go | 12 +- service/connect/api.go | 4 + service/datasync/api.go | 8 +- service/sagemaker/api.go | 144 ++++++++++------ 16 files changed, 245 insertions(+), 264 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fd87bef2cf..1d8089e99a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.45.11 (2023-09-15) +=== + +### Service Client Updates +* `service/appstream`: Updates service API, documentation, and waiters + * This release introduces app block builder, allowing customers to provision a resource to package applications into an app block +* `service/connect`: Updates service API +* `service/datasync`: Updates service documentation +* `service/sagemaker`: Updates service API and documentation + * This release introduces Skip Model Validation for Model Packages + Release v1.45.10 (2023-09-14) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index d6a3377b15b..86082f706ad 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -7212,6 +7212,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7242,12 +7245,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7260,6 +7269,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 469a720b246..b3dd2d694e3 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.10" +const SDKVersion = "1.45.11" diff --git a/models/apis/appstream/2016-12-01/api-2.json b/models/apis/appstream/2016-12-01/api-2.json index 85b33be9ac6..6a083b058d9 100644 --- a/models/apis/appstream/2016-12-01/api-2.json +++ b/models/apis/appstream/2016-12-01/api-2.json @@ -1487,9 +1487,9 @@ }, "ComputeCapacity":{ "type":"structure", + "required":["DesiredInstances"], "members":{ - "DesiredInstances":{"shape":"Integer"}, - "DesiredSessions":{"shape":"Integer"} + "DesiredInstances":{"shape":"Integer"} } }, "ComputeCapacityStatus":{ @@ -1499,11 +1499,7 @@ "Desired":{"shape":"Integer"}, "Running":{"shape":"Integer"}, "InUse":{"shape":"Integer"}, - "Available":{"shape":"Integer"}, - "DesiredUserSessions":{"shape":"Integer"}, - "AvailableUserSessions":{"shape":"Integer"}, - "ActiveUserSessions":{"shape":"Integer"}, - "ActualUserSessions":{"shape":"Integer"} + "Available":{"shape":"Integer"} } }, "ConcurrentModificationException":{ @@ -1696,8 +1692,7 @@ "Platform":{"shape":"PlatformType"}, "MaxConcurrentSessions":{"shape":"Integer"}, "UsbDeviceFilterStrings":{"shape":"UsbDeviceFilterStrings"}, - "SessionScriptS3Location":{"shape":"S3Location"}, - "MaxSessionsPerInstance":{"shape":"Integer"} + "SessionScriptS3Location":{"shape":"S3Location"} } }, "CreateFleetResult":{ @@ -2191,13 +2186,12 @@ "FleetName" ], "members":{ - "StackName":{"shape":"Name"}, - "FleetName":{"shape":"Name"}, + "StackName":{"shape":"String"}, + "FleetName":{"shape":"String"}, "UserId":{"shape":"UserId"}, "NextToken":{"shape":"String"}, "Limit":{"shape":"Integer"}, - "AuthenticationType":{"shape":"AuthenticationType"}, - "InstanceId":{"shape":"String"} + "AuthenticationType":{"shape":"AuthenticationType"} } }, "DescribeSessionsResult":{ @@ -2546,8 +2540,7 @@ "Platform":{"shape":"PlatformType"}, "MaxConcurrentSessions":{"shape":"Integer"}, "UsbDeviceFilterStrings":{"shape":"UsbDeviceFilterStrings"}, - "SessionScriptS3Location":{"shape":"S3Location"}, - "MaxSessionsPerInstance":{"shape":"Integer"} + "SessionScriptS3Location":{"shape":"S3Location"} } }, "FleetAttribute":{ @@ -2558,8 +2551,7 @@ "DOMAIN_JOIN_INFO", "IAM_ROLE_ARN", "USB_DEVICE_FILTER_STRINGS", - "SESSION_SCRIPT_S3_LOCATION", - "MAX_SESSIONS_PER_INSTANCE" + "SESSION_SCRIPT_S3_LOCATION" ] }, "FleetAttributes":{ @@ -3071,8 +3063,7 @@ "StartTime":{"shape":"Timestamp"}, "MaxExpirationTime":{"shape":"Timestamp"}, "AuthenticationType":{"shape":"AuthenticationType"}, - "NetworkAccessConfiguration":{"shape":"NetworkAccessConfiguration"}, - "InstanceId":{"shape":"String"} + "NetworkAccessConfiguration":{"shape":"NetworkAccessConfiguration"} } }, "SessionConnectionState":{ @@ -3447,7 +3438,7 @@ "members":{ "ImageName":{"shape":"String"}, "ImageArn":{"shape":"Arn"}, - "Name":{"shape":"Name"}, + "Name":{"shape":"String"}, "InstanceType":{"shape":"String"}, "ComputeCapacity":{"shape":"ComputeCapacity"}, "VpcConfig":{"shape":"VpcConfig"}, @@ -3468,8 +3459,7 @@ "Platform":{"shape":"PlatformType"}, "MaxConcurrentSessions":{"shape":"Integer"}, "UsbDeviceFilterStrings":{"shape":"UsbDeviceFilterStrings"}, - "SessionScriptS3Location":{"shape":"S3Location"}, - "MaxSessionsPerInstance":{"shape":"Integer"} + "SessionScriptS3Location":{"shape":"S3Location"} } }, "UpdateFleetResult":{ diff --git a/models/apis/appstream/2016-12-01/docs-2.json b/models/apis/appstream/2016-12-01/docs-2.json index 018893bf58f..28bfa281213 100644 --- a/models/apis/appstream/2016-12-01/docs-2.json +++ b/models/apis/appstream/2016-12-01/docs-2.json @@ -1319,20 +1319,14 @@ "base": null, "refs": { "ComputeCapacity$DesiredInstances": "

The desired number of streaming instances.

", - "ComputeCapacity$DesiredSessions": "

The desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets.

When you create a fleet, you must set either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can’t define both attributes or leave both attributes blank.

", "ComputeCapacityStatus$Desired": "

The desired number of streaming instances.

", "ComputeCapacityStatus$Running": "

The total number of simultaneous streaming instances that are running.

", "ComputeCapacityStatus$InUse": "

The number of instances in use for streaming.

", "ComputeCapacityStatus$Available": "

The number of currently available instances that can be used to stream sessions.

", - "ComputeCapacityStatus$DesiredUserSessions": "

The total number of sessions slots that are either running or pending. This represents the total number of concurrent streaming sessions your fleet can support in a steady state.

DesiredUserSessionCapacity = ActualUserSessionCapacity + PendingUserSessionCapacity

This only applies to multi-session fleets.

", - "ComputeCapacityStatus$AvailableUserSessions": "

The number of idle session slots currently available for user sessions.

AvailableUserSessionCapacity = ActualUserSessionCapacity - ActiveUserSessions

This only applies to multi-session fleets.

", - "ComputeCapacityStatus$ActiveUserSessions": "

The number of user sessions currently being used for streaming sessions. This only applies to multi-session fleets.

", - "ComputeCapacityStatus$ActualUserSessions": "

The total number of session slots that are available for streaming or are currently streaming.

ActualUserSessionCapacity = AvailableUserSessionCapacity + ActiveUserSessions

This only applies to multi-session fleets.

", - "CreateFleetRequest$MaxUserDurationInSeconds": "

The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.

Specify a value between 600 and 432000.

", + "CreateFleetRequest$MaxUserDurationInSeconds": "

The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.

Specify a value between 600 and 360000.

", "CreateFleetRequest$DisconnectTimeoutInSeconds": "

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 360000.

", "CreateFleetRequest$IdleDisconnectTimeoutInSeconds": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

", "CreateFleetRequest$MaxConcurrentSessions": "

The maximum concurrent sessions of the Elastic fleet. This is required for Elastic fleets, and not allowed for other fleet types.

", - "CreateFleetRequest$MaxSessionsPerInstance": "

The maximum number of user sessions on an instance. This only applies to multi-session fleets.

", "DescribeAppBlockBuilderAppBlockAssociationsRequest$MaxResults": "

The maximum size of each page of results.

", "DescribeAppBlockBuildersRequest$MaxResults": "

The maximum size of each page of results. The maximum value is 25.

", "DescribeAppBlocksRequest$MaxResults": "

The maximum size of each page of results.

", @@ -1348,14 +1342,12 @@ "Fleet$DisconnectTimeoutInSeconds": "

The amount of time that a streaming session remains active after users disconnect. If they try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 360000.

", "Fleet$IdleDisconnectTimeoutInSeconds": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

", "Fleet$MaxConcurrentSessions": "

The maximum number of concurrent sessions for the fleet.

", - "Fleet$MaxSessionsPerInstance": "

The maximum number of user sessions on an instance. This only applies to multi-session fleets.

", "ListEntitledApplicationsRequest$MaxResults": "

The maximum size of each page of results.

", "ScriptDetails$TimeoutInSeconds": "

The run timeout, in seconds, for the script.

", "UpdateFleetRequest$MaxUserDurationInSeconds": "

The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.

Specify a value between 600 and 432000.

", "UpdateFleetRequest$DisconnectTimeoutInSeconds": "

The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.

Specify a value between 60 and 360000.

", "UpdateFleetRequest$IdleDisconnectTimeoutInSeconds": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

", - "UpdateFleetRequest$MaxConcurrentSessions": "

The maximum number of concurrent sessions for a fleet.

", - "UpdateFleetRequest$MaxSessionsPerInstance": "

The maximum number of user sessions on an instance. This only applies to multi-session fleets.

" + "UpdateFleetRequest$MaxConcurrentSessions": "

The maximum number of concurrent sessions for a fleet.

" } }, "InvalidAccountStatusException": { @@ -1494,8 +1486,6 @@ "DescribeEntitlementsRequest$StackName": "

The name of the stack with which the entitlement is associated.

", "DescribeImagePermissionsRequest$Name": "

The name of the private image for which to describe permissions. The image must be one that you own.

", "DescribeImagePermissionsResult$Name": "

The name of the private image.

", - "DescribeSessionsRequest$StackName": "

The name of the stack. This value is case-sensitive.

", - "DescribeSessionsRequest$FleetName": "

The name of the fleet. This value is case-sensitive.

", "DisassociateAppBlockBuilderAppBlockRequest$AppBlockBuilderName": "

The name of the app block builder.

", "DisassociateApplicationFleetRequest$FleetName": "

The name of the fleet.

", "DisassociateApplicationFromEntitlementRequest$StackName": "

The name of the stack with which the entitlement is associated.

", @@ -1510,7 +1500,6 @@ "UpdateApplicationRequest$Name": "

The name of the application. This name is visible to users when display name is not specified.

", "UpdateEntitlementRequest$Name": "

The name of the entitlement.

", "UpdateEntitlementRequest$StackName": "

The name of the stack with which the entitlement is associated.

", - "UpdateFleetRequest$Name": "

A unique name for the fleet.

", "UpdateImagePermissionsRequest$Name": "

The name of the private image.

" } }, @@ -1940,8 +1929,9 @@ "DescribeImagePermissionsResult$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

", "DescribeImagesRequest$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

", "DescribeImagesResult$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

", + "DescribeSessionsRequest$StackName": "

The name of the stack. This value is case-sensitive.

", + "DescribeSessionsRequest$FleetName": "

The name of the fleet. This value is case-sensitive.

", "DescribeSessionsRequest$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

", - "DescribeSessionsRequest$InstanceId": "

The identifier for the instance hosting the session.

", "DescribeSessionsResult$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

", "DescribeStacksRequest$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

", "DescribeStacksResult$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

", @@ -1997,7 +1987,6 @@ "Session$Id": "

The identifier of the streaming session.

", "Session$StackName": "

The name of the stack for the streaming session.

", "Session$FleetName": "

The name of the fleet for the streaming session.

", - "Session$InstanceId": "

The identifier for the instance hosting the session.

", "Stack$Name": "

The name of the stack.

", "Stack$Description": "

The description to display.

", "Stack$DisplayName": "

The stack name to display.

", @@ -2013,6 +2002,7 @@ "UpdateApplicationRequest$WorkingDirectory": "

The working directory of the application.

", "UpdateApplicationRequest$LaunchParameters": "

The launch parameters of the application.

", "UpdateFleetRequest$ImageName": "

The name of the image used to create the fleet.

", + "UpdateFleetRequest$Name": "

A unique name for the fleet.

", "UpdateFleetRequest$InstanceType": "

The instance type to use when launching fleet instances. The following instance types are available:

The following instance types are available for Elastic fleets:

", "UpdateStackRequest$Name": "

The name of the stack.

", "UsageReportSubscription$S3BucketName": "

The Amazon S3 bucket where generated reports are stored.

If you enabled on-instance session scripts and Amazon S3 logging for your session script configuration, AppStream 2.0 created an S3 bucket to store the script output. The bucket is unique to your account and Region. When you enable usage reporting in this case, AppStream 2.0 uses the same bucket to store your usage reports. If you haven't already enabled on-instance session scripts, when you enable usage reports, AppStream 2.0 creates a new S3 bucket.

", diff --git a/models/apis/appstream/2016-12-01/waiters-2.json b/models/apis/appstream/2016-12-01/waiters-2.json index f53f609cb7c..1c8dea0ded5 100644 --- a/models/apis/appstream/2016-12-01/waiters-2.json +++ b/models/apis/appstream/2016-12-01/waiters-2.json @@ -10,19 +10,19 @@ "state": "success", "matcher": "pathAll", "argument": "Fleets[].State", - "expected": "ACTIVE" + "expected": "RUNNING" }, { "state": "failure", "matcher": "pathAny", "argument": "Fleets[].State", - "expected": "PENDING_DEACTIVATE" + "expected": "STOPPING" }, { "state": "failure", "matcher": "pathAny", "argument": "Fleets[].State", - "expected": "INACTIVE" + "expected": "STOPPED" } ] }, @@ -35,19 +35,19 @@ "state": "success", "matcher": "pathAll", "argument": "Fleets[].State", - "expected": "INACTIVE" + "expected": "STOPPED" }, { "state": "failure", "matcher": "pathAny", "argument": "Fleets[].State", - "expected": "PENDING_ACTIVATE" + "expected": "STARTING" }, { "state": "failure", "matcher": "pathAny", "argument": "Fleets[].State", - "expected": "ACTIVE" + "expected": "RUNNING" } ] } diff --git a/models/apis/connect/2017-08-08/api-2.json b/models/apis/connect/2017-08-08/api-2.json index 4a67710cf20..6218f37bd66 100644 --- a/models/apis/connect/2017-08-08/api-2.json +++ b/models/apis/connect/2017-08-08/api-2.json @@ -6915,7 +6915,8 @@ "OnZendeskTicketCreate", "OnZendeskTicketStatusUpdate", "OnSalesforceCaseCreate", - "OnContactEvaluationSubmit" + "OnContactEvaluationSubmit", + "OnMetricDataUpdate" ] }, "FilterV2":{ diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index d03ae5d80fa..12a70990f53 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -38,7 +38,7 @@ "DescribeStorageSystemResources": "

Returns information that DataSync Discovery collects about resources in your on-premises storage system.

", "DescribeTask": "

Provides information about an DataSync transfer task.

", "DescribeTaskExecution": "

Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing transfer or check the results of the transfer.

", - "GenerateRecommendations": "

Creates recommendations about where to migrate your data to in Amazon Web Services. Recommendations are generated based on information that DataSync Discovery collects about your on-premises storage system's resources. For more information, see Recommendations provided by DataSync Discovery.

Once generated, you can view your recommendations by using the DescribeStorageSystemResources operation.

If your discovery job completes successfully, you don't need to use this operation. DataSync Discovery generates the recommendations for you automatically.

", + "GenerateRecommendations": "

Creates recommendations about where to migrate your data to in Amazon Web Services. Recommendations are generated based on information that DataSync Discovery collects about your on-premises storage system's resources. For more information, see Recommendations provided by DataSync Discovery.

Once generated, you can view your recommendations by using the DescribeStorageSystemResources operation.

", "ListAgents": "

Returns a list of DataSync agents that belong to an Amazon Web Services account in the Amazon Web Services Region specified in the request.

With pagination, you can reduce the number of agents returned in a response. If you get a truncated list of agents in a response, the response contains a marker that you can specify in your next request to fetch the next page of agents.

ListAgents is eventually consistent. This means the result of running the operation might not reflect that you just created or deleted an agent. For example, if you create an agent with CreateAgent and then immediately run ListAgents, that agent might not show up in the list right away. In situations like this, you can always confirm whether an agent has been created (or deleted) by using DescribeAgent.

", "ListDiscoveryJobs": "

Provides a list of the existing discovery jobs in the Amazon Web Services Region and Amazon Web Services account where you're using DataSync Discovery.

", "ListLocations": "

Returns a list of source and destination locations.

If you have more locations than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a token that you can specify in your next request to fetch the next page of locations.

", @@ -1688,7 +1688,7 @@ "base": "

Specifies the level of detail for a particular aspect of your DataSync task report.

", "refs": { "ReportOverrides$Transferred": "

Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer.

", - "ReportOverrides$Verified": "

Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. This only applies if you configure your task to verify data during and after the transfer (which DataSync does by default).

", + "ReportOverrides$Verified": "

Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer.

", "ReportOverrides$Deleted": "

Specifies the level of reporting for the files, objects, and directories that DataSync attempted to delete in your destination location. This only applies if you configure your task to delete data in the destination that isn't in the source.

", "ReportOverrides$Skipped": "

Specifies the level of reporting for the files, objects, and directories that DataSync attempted to skip during your transfer.

" } diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 8dddd4c54b2..603d20def65 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -6501,7 +6501,8 @@ "Domain":{"shape":"String"}, "Task":{"shape":"String"}, "SamplePayloadUrl":{"shape":"S3Uri"}, - "AdditionalInferenceSpecifications":{"shape":"AdditionalInferenceSpecifications"} + "AdditionalInferenceSpecifications":{"shape":"AdditionalInferenceSpecifications"}, + "SkipModelValidation":{"shape":"SkipModelValidation"} } }, "CreateModelPackageOutput":{ @@ -8987,7 +8988,8 @@ "Domain":{"shape":"String"}, "Task":{"shape":"String"}, "SamplePayloadUrl":{"shape":"String"}, - "AdditionalInferenceSpecifications":{"shape":"AdditionalInferenceSpecifications"} + "AdditionalInferenceSpecifications":{"shape":"AdditionalInferenceSpecifications"}, + "SkipModelValidation":{"shape":"SkipModelValidation"} } }, "DescribeModelQualityJobDefinitionRequest":{ @@ -15237,7 +15239,8 @@ "AdditionalInferenceSpecifications":{"shape":"AdditionalInferenceSpecifications"}, "Tags":{"shape":"TagList"}, "CustomerMetadataProperties":{"shape":"CustomerMetadataMap"}, - "DriftCheckBaselines":{"shape":"DriftCheckBaselines"} + "DriftCheckBaselines":{"shape":"DriftCheckBaselines"}, + "SkipModelValidation":{"shape":"SkipModelValidation"} } }, "ModelPackageArn":{ @@ -18892,6 +18895,13 @@ "type":"string", "pattern":"UserName" }, + "SkipModelValidation":{ + "type":"string", + "enum":[ + "All", + "None" + ] + }, "SnsTopicArn":{ "type":"string", "max":2048, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index efaef0292b2..8b61bec1d21 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -1423,7 +1423,7 @@ "Model$EnableNetworkIsolation": "

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

", "ModelDashboardIndicatorAction$Enabled": "

Indicates whether the alert action is turned on.

", "MonitoringCsvDatasetFormat$Header": "

Indicates if the CSV data has a header.

", - "MonitoringJsonDatasetFormat$Line": "

Indicates if the file should be read as a json object per line.

", + "MonitoringJsonDatasetFormat$Line": "

Indicates if the file should be read as a JSON object per line.

", "MonitoringNetworkConfig$EnableInterContainerTrafficEncryption": "

Whether to encrypt all communications between the instances used for the monitoring jobs. Choose True to encrypt communications. Encryption provides greater security for distributed jobs, but the processing might take longer.

", "MonitoringNetworkConfig$EnableNetworkIsolation": "

Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job.

", "NetworkConfig$EnableInterContainerTrafficEncryption": "

Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.

", @@ -1575,7 +1575,7 @@ } }, "CaptureContentTypeHeader": { - "base": "

Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data.

", + "base": "

Configuration specifying how to treat different headers. If no headers are specified Amazon SageMaker will by default base64 encode when capturing the data.

", "refs": { "DataCaptureConfig$CaptureContentTypeHeader": "

Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data.

", "InferenceExperimentDataStorageConfig$ContentType": null @@ -2871,7 +2871,7 @@ "CsvContentTypes": { "base": null, "refs": { - "CaptureContentTypeHeader$CsvContentTypes": "

The list of all content type headers that SageMaker will treat as CSV and capture accordingly.

" + "CaptureContentTypeHeader$CsvContentTypes": "

The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly.

" } }, "CustomImage": { @@ -7155,7 +7155,7 @@ "CreateEndpointConfigInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

The KmsKeyId can be any of the following formats:

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", "CreateInferenceExperimentRequest$KmsKey": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. The KmsKey can be any of the following formats:

If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint and UpdateEndpoint requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", "CreateNotebookInstanceInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide.

", - "DataCaptureConfig$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption.

The KmsKeyId can be any of the following formats:

", + "DataCaptureConfig$KmsKeyId": "

The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption.

The KmsKeyId can be any of the following formats:

", "DataCaptureConfigSummary$KmsKeyId": "

The KMS key being used to encrypt the data in Amazon S3.

", "DescribeDomainResponse$HomeEfsFileSystemKmsKeyId": "

Use KmsKeyId.

", "DescribeDomainResponse$KmsKeyId": "

The Amazon Web Services KMS customer managed key used to encrypt the EFS volume attached to the domain.

", @@ -7170,8 +7170,8 @@ "LabelingJobOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service ID of the key used to encrypt the output data, if any.

If you provide your own KMS key ID, you must add the required permissions to your KMS key described in Encrypt Output Data and Storage Volume with Amazon Web Services KMS.

If you don't provide a KMS key ID, Amazon SageMaker uses the default Amazon Web Services KMS key for Amazon S3 for your role's account to encrypt your output data.

If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

", "LabelingJobResourceConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training and inference jobs used for automated data labeling.

You can only specify a VolumeKmsKeyId when you create a labeling job with automated data labeling enabled using the API operation CreateLabelingJob. You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for automated data labeling model training and inference when you create a labeling job using the console. To learn more, see Output Data and Storage Volume Encryption.

The VolumeKmsKeyId can be any of the following formats:

", "ModelCardSecurityConfig$KmsKeyId": "

A Key Management Service key ID to use for encrypting a model card.

", - "MonitoringClusterConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", - "MonitoringOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", + "MonitoringClusterConfig$VolumeKmsKeyId": "

The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", + "MonitoringOutputConfig$KmsKeyId": "

The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", "OnlineStoreSecurityConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (KMS) key ARN that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

The caller (either user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

", "OutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KmsKeyId can be any of the following formats:

", "OutputDataConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, SageMaker uses the default KMS key for Amazon S3 for your role's account. SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", @@ -9272,7 +9272,7 @@ } }, "ModelQualityJobInput": { - "base": "

The input for the model quality monitoring job. Currently endponts are supported for input for model quality monitoring jobs.

", + "base": "

The input for the model quality monitoring job. Currently endpoints are supported for input for model quality monitoring jobs.

", "refs": { "CreateModelQualityJobDefinitionRequest$ModelQualityJobInput": "

A list of the inputs that are monitored. Currently endpoints are supported.

", "DescribeModelQualityJobDefinitionResponse$ModelQualityJobInput": "

Inputs for the model quality job.

" @@ -9498,7 +9498,7 @@ "MonitoringExecutionSortKey": { "base": null, "refs": { - "ListMonitoringExecutionsRequest$SortBy": "

Whether to sort results by Status, CreationTime, ScheduledTime field. The default is CreationTime.

" + "ListMonitoringExecutionsRequest$SortBy": "

Whether to sort the results by the Status, CreationTime, or ScheduledTime field. The default is CreationTime.

" } }, "MonitoringExecutionSummary": { @@ -9648,7 +9648,7 @@ "DescribeModelBiasJobDefinitionResponse$ModelBiasJobOutputConfig": null, "DescribeModelExplainabilityJobDefinitionResponse$ModelExplainabilityJobOutputConfig": null, "DescribeModelQualityJobDefinitionResponse$ModelQualityJobOutputConfig": null, - "MonitoringJobDefinition$MonitoringOutputConfig": "

The array of outputs from the monitoring job to be uploaded to Amazon Simple Storage Service (Amazon S3).

" + "MonitoringJobDefinition$MonitoringOutputConfig": "

The array of outputs from the monitoring job to be uploaded to Amazon S3.

" } }, "MonitoringOutputs": { @@ -9756,7 +9756,7 @@ "MonitoringScheduleSortKey": { "base": null, "refs": { - "ListMonitoringSchedulesRequest$SortBy": "

Whether to sort results by Status, CreationTime, ScheduledTime field. The default is CreationTime.

" + "ListMonitoringSchedulesRequest$SortBy": "

Whether to sort the results by the Status, CreationTime, or ScheduledTime field. The default is CreationTime.

" } }, "MonitoringScheduleSummary": { @@ -9989,7 +9989,7 @@ "ListLineageGroupsRequest$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of algorithms, use it in the subsequent request.

", "ListLineageGroupsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of algorithms, use it in the subsequent request.

", "ListModelBiasJobDefinitionsRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListModelBiasJobDefinitionsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.

", + "ListModelBiasJobDefinitionsResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", "ListModelCardExportJobsRequest$NextToken": "

If the response to a previous ListModelCardExportJobs request was truncated, the response includes a NextToken. To retrieve the next set of model card export jobs, use the token in the next request.

", "ListModelCardExportJobsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of model card export jobs, use it in the subsequent request.

", "ListModelCardVersionsRequest$NextToken": "

If the response to a previous ListModelCardVersions request was truncated, the response includes a NextToken. To retrieve the next set of model card versions, use the token in the next request.

", @@ -9997,7 +9997,7 @@ "ListModelCardsRequest$NextToken": "

If the response to a previous ListModelCards request was truncated, the response includes a NextToken. To retrieve the next set of model cards, use the token in the next request.

", "ListModelCardsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of model cards, use it in the subsequent request.

", "ListModelExplainabilityJobDefinitionsRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListModelExplainabilityJobDefinitionsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.

", + "ListModelExplainabilityJobDefinitionsResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", "ListModelMetadataRequest$NextToken": "

If the response to a previous ListModelMetadataResponse request was truncated, the response includes a NextToken. To retrieve the next set of model metadata, use the token in the next request.

", "ListModelMetadataResponse$NextToken": "

A token for getting the next set of recommendations, if there are any.

", "ListModelPackageGroupsInput$NextToken": "

If the result of the previous ListModelPackageGroups request was truncated, the response includes a NextToken. To retrieve the next set of model groups, use the token in the next request.

", @@ -10011,9 +10011,9 @@ "ListMonitoringAlertsRequest$NextToken": "

If the result of the previous ListMonitoringAlerts request was truncated, the response includes a NextToken. To retrieve the next set of alerts in the history, use the token in the next request.

", "ListMonitoringAlertsResponse$NextToken": "

If the response is truncated, SageMaker returns this token. To retrieve the next set of alerts, use it in the subsequent request.

", "ListMonitoringExecutionsRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListMonitoringExecutionsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent reques

", + "ListMonitoringExecutionsResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", "ListMonitoringSchedulesRequest$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", - "ListMonitoringSchedulesResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.

", + "ListMonitoringSchedulesResponse$NextToken": "

The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.

", "ListNotebookInstanceLifecycleConfigsInput$NextToken": "

If the result of a ListNotebookInstanceLifecycleConfigs request was truncated, the response includes a NextToken. To get the next set of lifecycle configurations, use the token in the next request.

", "ListNotebookInstanceLifecycleConfigsOutput$NextToken": "

If the response is truncated, SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request.

", "ListNotebookInstancesInput$NextToken": "

If the previous call to the ListNotebookInstances is truncated, the response includes a NextToken. You can use this token in your subsequent ListNotebookInstances request to fetch the next set of notebook instances.

You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.

", @@ -11023,7 +11023,7 @@ "base": null, "refs": { "BatchTransformInput$S3DataDistributionType": "

Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to FullyReplicated

", - "EndpointInput$S3DataDistributionType": "

Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to FullyReplicated

", + "EndpointInput$S3DataDistributionType": "

Whether input data distributed in Amazon S3 is fully replicated or sharded by an Amazon S3 key. Defaults to FullyReplicated

", "ProcessingS3Input$S3DataDistributionType": "

Whether to distribute the data from Amazon S3 to all processing instances with FullyReplicated, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.

" } }, @@ -12078,8 +12078,8 @@ "DescribeInferenceExperimentResponse$RoleArn": "

The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage Amazon SageMaker Inference endpoints for model deployment.

", "DescribeInferenceRecommendationsJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role you provided when you initiated the job.

", "DescribeLabelingJobResponse$RoleArn": "

The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf during data labeling.

", - "DescribeModelBiasJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", - "DescribeModelExplainabilityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeModelBiasJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeModelExplainabilityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", "DescribeModelOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that you specified for the model.

", "DescribeModelQualityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "DescribeNotebookInstanceOutput$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role associated with the instance.

", @@ -12207,7 +12207,7 @@ "CheckpointConfig$S3Uri": "

Identifies the S3 path where you want SageMaker to store checkpoints. For example, s3://bucket-name/key-name-prefix.

", "CreateLabelingJobRequest$LabelCategoryConfigS3Uri": "

The S3 URI of the file, referred to as a label category configuration file, that defines the categories used to label the data objects.

For 3D point cloud and video frame task types, you can add label category attributes and frame attributes to your label category configuration file. To learn how, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For named entity recognition jobs, in addition to \"labels\", you must provide worker instructions in the label category configuration file using the \"instructions\" parameter: \"instructions\": {\"shortInstruction\":\"<h1>Add header</h1><p>Add Instructions</p>\", \"fullInstruction\":\"<p>Add additional instructions.</p>\"}. For details and an example, see Create a Named Entity Recognition Labeling Job (API) .

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\",

\"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\": \"label_n\"}]

}

Note the following about the label category configuration file:

", "CreateModelPackageInput$SamplePayloadUrl": "

The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). This archive can hold multiple files that are all equally used in the load test. Each file in the archive must satisfy the size constraints of the InvokeEndpoint call.

", - "DataQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", + "DataQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flattened JSON so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", "DataQualityAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", "DebugHookConfig$S3OutputPath": "

Path to Amazon S3 storage location for metrics and tensors.

", "DebugRuleConfiguration$S3OutputPath": "

Path to Amazon S3 storage location for rules.

", @@ -12226,10 +12226,10 @@ "ModelBiasAppSpecification$ConfigUri": "

JSON formatted S3 file that defines bias parameters. For more information on this JSON configuration file, see Configure bias parameters.

", "ModelCardExportArtifacts$S3ExportArtifacts": "

The Amazon S3 URI of the exported model artifacts.

", "ModelCardExportOutputConfig$S3OutputPath": "

The Amazon S3 output path to export your model card PDF.

", - "ModelExplainabilityAppSpecification$ConfigUri": "

JSON formatted S3 file that defines explainability parameters. For more information on this JSON configuration file, see Configure model explainability parameters.

", - "ModelQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", + "ModelExplainabilityAppSpecification$ConfigUri": "

JSON formatted Amazon S3 file that defines explainability parameters. For more information on this JSON configuration file, see Configure model explainability parameters.

", + "ModelQualityAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flattened JSON so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", "ModelQualityAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", - "MonitoringAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", + "MonitoringAppSpecification$RecordPreprocessorSourceUri": "

An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flattened JSON so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers.

", "MonitoringAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", "MonitoringConstraintsResource$S3Uri": "

The Amazon S3 URI for the constraints resource.

", "MonitoringStatisticsResource$S3Uri": "

The Amazon S3 URI for the statistics resource.

", @@ -12578,6 +12578,14 @@ "DescribeUserProfileResponse$SingleSignOnUserIdentifier": "

The IAM Identity Center user identifier.

" } }, + "SkipModelValidation": { + "base": null, + "refs": { + "CreateModelPackageInput$SkipModelValidation": "

Indicates if you want to skip model validation.

", + "DescribeModelPackageOutput$SkipModelValidation": "

Indicates if you want to skip model validation.

", + "ModelPackage$SkipModelValidation": "

Indicates if you want to skip model validation.

" + } + }, "SnsTopicArn": { "base": null, "refs": { @@ -12649,7 +12657,7 @@ "ListAssociationsRequest$SortOrder": "

The sort order. The default value is Descending.

", "ListCompilationJobsRequest$SortOrder": "

The sort order for results. The default is Ascending.

", "ListContextsRequest$SortOrder": "

The sort order. The default value is Descending.

", - "ListDataQualityJobDefinitionsRequest$SortOrder": "

The sort order for results. The default is Descending.

", + "ListDataQualityJobDefinitionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListDeviceFleetsRequest$SortOrder": "

What direction to sort in.

", "ListEdgeDeploymentPlansRequest$SortOrder": "

The direction of the sorting (ascending or descending).

", "ListEdgePackagingJobsRequest$SortOrder": "

What direction to sort by.

", @@ -12669,7 +12677,7 @@ "ListModelExplainabilityJobDefinitionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListModelPackageGroupsInput$SortOrder": "

The sort order for results. The default is Ascending.

", "ListModelPackagesInput$SortOrder": "

The sort order for the results. The default is Ascending.

", - "ListModelQualityJobDefinitionsRequest$SortOrder": "

The sort order for results. The default is Descending.

", + "ListModelQualityJobDefinitionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListMonitoringAlertHistoryRequest$SortOrder": "

The sort order, whether Ascending or Descending, of the alert history. The default is Descending.

", "ListMonitoringExecutionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListMonitoringSchedulesRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", @@ -13376,7 +13384,7 @@ "CreateCodeRepositoryInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateCompilationJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateContextRequest$Tags": "

A list of tags to apply to the context.

", - "CreateDataQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateDataQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateDeviceFleetRequest$Tags": "

Creates tags for the specified fleet.

", "CreateDomainRequest$Tags": "

Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

Tags that you specify for the Domain are also added to all Apps that the Domain launches.

", "CreateEdgeDeploymentPlanRequest$Tags": "

List of tags with which to tag the edge deployment plan.

", @@ -13393,13 +13401,13 @@ "CreateInferenceExperimentRequest$Tags": "

Array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging your Amazon Web Services Resources.

", "CreateInferenceRecommendationsJobRequest$Tags": "

The metadata that you apply to Amazon Web Services resources to help you categorize and organize them. Each tag consists of a key and a value, both of which you define. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Reference.

", "CreateLabelingJobRequest$Tags": "

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", - "CreateModelBiasJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelBiasJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateModelCardRequest$Tags": "

Key-value pairs used to manage metadata for model cards.

", - "CreateModelExplainabilityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelExplainabilityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateModelInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateModelPackageGroupInput$Tags": "

A list of key value pairs associated with the model group. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", "CreateModelPackageInput$Tags": "

A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

If you supply ModelPackageGroupName, your model package belongs to the model group you specify and uses the tags associated with the model group. In this case, you cannot supply a tag argument.

", - "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateMonitoringScheduleRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateNotebookInstanceInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreatePipelineRequest$Tags": "

A list of tags to apply to the created pipeline.

", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 4217c18bbdf..ddaa415a1c8 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -3911,6 +3911,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, @@ -3928,12 +3929,15 @@ "hostname" : "controltower-fips.ca-central-1.amazonaws.com" }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { diff --git a/service/appstream/api.go b/service/appstream/api.go index 2e9cee2902e..d772a16d7ff 100644 --- a/service/appstream/api.go +++ b/service/appstream/api.go @@ -8321,15 +8321,9 @@ type ComputeCapacity struct { _ struct{} `type:"structure"` // The desired number of streaming instances. - DesiredInstances *int64 `type:"integer"` - - // The desired number of user sessions for a multi-session fleet. This is not - // allowed for single-session fleets. // - // When you create a fleet, you must set either the DesiredSessions or DesiredInstances - // attribute, based on the type of fleet you create. You can’t define both - // attributes or leave both attributes blank. - DesiredSessions *int64 `type:"integer"` + // DesiredInstances is a required field + DesiredInstances *int64 `type:"integer" required:"true"` } // String returns the string representation. @@ -8350,58 +8344,37 @@ func (s ComputeCapacity) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComputeCapacity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComputeCapacity"} + if s.DesiredInstances == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDesiredInstances sets the DesiredInstances field's value. func (s *ComputeCapacity) SetDesiredInstances(v int64) *ComputeCapacity { s.DesiredInstances = &v return s } -// SetDesiredSessions sets the DesiredSessions field's value. -func (s *ComputeCapacity) SetDesiredSessions(v int64) *ComputeCapacity { - s.DesiredSessions = &v - return s -} - // Describes the capacity status for a fleet. type ComputeCapacityStatus struct { _ struct{} `type:"structure"` - // The number of user sessions currently being used for streaming sessions. - // This only applies to multi-session fleets. - ActiveUserSessions *int64 `type:"integer"` - - // The total number of session slots that are available for streaming or are - // currently streaming. - // - // ActualUserSessionCapacity = AvailableUserSessionCapacity + ActiveUserSessions - // - // This only applies to multi-session fleets. - ActualUserSessions *int64 `type:"integer"` - // The number of currently available instances that can be used to stream sessions. Available *int64 `type:"integer"` - // The number of idle session slots currently available for user sessions. - // - // AvailableUserSessionCapacity = ActualUserSessionCapacity - ActiveUserSessions - // - // This only applies to multi-session fleets. - AvailableUserSessions *int64 `type:"integer"` - // The desired number of streaming instances. // // Desired is a required field Desired *int64 `type:"integer" required:"true"` - // The total number of sessions slots that are either running or pending. This - // represents the total number of concurrent streaming sessions your fleet can - // support in a steady state. - // - // DesiredUserSessionCapacity = ActualUserSessionCapacity + PendingUserSessionCapacity - // - // This only applies to multi-session fleets. - DesiredUserSessions *int64 `type:"integer"` - // The number of instances in use for streaming. InUse *int64 `type:"integer"` @@ -8427,42 +8400,18 @@ func (s ComputeCapacityStatus) GoString() string { return s.String() } -// SetActiveUserSessions sets the ActiveUserSessions field's value. -func (s *ComputeCapacityStatus) SetActiveUserSessions(v int64) *ComputeCapacityStatus { - s.ActiveUserSessions = &v - return s -} - -// SetActualUserSessions sets the ActualUserSessions field's value. -func (s *ComputeCapacityStatus) SetActualUserSessions(v int64) *ComputeCapacityStatus { - s.ActualUserSessions = &v - return s -} - // SetAvailable sets the Available field's value. func (s *ComputeCapacityStatus) SetAvailable(v int64) *ComputeCapacityStatus { s.Available = &v return s } -// SetAvailableUserSessions sets the AvailableUserSessions field's value. -func (s *ComputeCapacityStatus) SetAvailableUserSessions(v int64) *ComputeCapacityStatus { - s.AvailableUserSessions = &v - return s -} - // SetDesired sets the Desired field's value. func (s *ComputeCapacityStatus) SetDesired(v int64) *ComputeCapacityStatus { s.Desired = &v return s } -// SetDesiredUserSessions sets the DesiredUserSessions field's value. -func (s *ComputeCapacityStatus) SetDesiredUserSessions(v int64) *ComputeCapacityStatus { - s.DesiredUserSessions = &v - return s -} - // SetInUse sets the InUse field's value. func (s *ComputeCapacityStatus) SetInUse(v int64) *ComputeCapacityStatus { s.InUse = &v @@ -9804,17 +9753,13 @@ type CreateFleetInput struct { // Elastic fleets, and not allowed for other fleet types. MaxConcurrentSessions *int64 `type:"integer"` - // The maximum number of user sessions on an instance. This only applies to - // multi-session fleets. - MaxSessionsPerInstance *int64 `type:"integer"` - // The maximum amount of time that a streaming session can remain active, in // seconds. If users are still connected to a streaming instance five minutes // before this limit is reached, they are prompted to save any open documents // before being disconnected. After this time elapses, the instance is terminated // and replaced by a new instance. // - // Specify a value between 600 and 432000. + // Specify a value between 600 and 360000. MaxUserDurationInSeconds *int64 `type:"integer"` // A unique name for the fleet. @@ -9897,6 +9842,11 @@ func (s *CreateFleetInput) Validate() error { if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.ComputeCapacity != nil { + if err := s.ComputeCapacity.Validate(); err != nil { + invalidParams.AddNested("ComputeCapacity", err.(request.ErrInvalidParams)) + } + } if s.SessionScriptS3Location != nil { if err := s.SessionScriptS3Location.Validate(); err != nil { invalidParams.AddNested("SessionScriptS3Location", err.(request.ErrInvalidParams)) @@ -9987,12 +9937,6 @@ func (s *CreateFleetInput) SetMaxConcurrentSessions(v int64) *CreateFleetInput { return s } -// SetMaxSessionsPerInstance sets the MaxSessionsPerInstance field's value. -func (s *CreateFleetInput) SetMaxSessionsPerInstance(v int64) *CreateFleetInput { - s.MaxSessionsPerInstance = &v - return s -} - // SetMaxUserDurationInSeconds sets the MaxUserDurationInSeconds field's value. func (s *CreateFleetInput) SetMaxUserDurationInSeconds(v int64) *CreateFleetInput { s.MaxUserDurationInSeconds = &v @@ -13323,10 +13267,7 @@ type DescribeSessionsInput struct { // The name of the fleet. This value is case-sensitive. // // FleetName is a required field - FleetName *string `type:"string" required:"true"` - - // The identifier for the instance hosting the session. - InstanceId *string `min:"1" type:"string"` + FleetName *string `min:"1" type:"string" required:"true"` // The size of each page of results. The default value is 20 and the maximum // value is 50. @@ -13339,7 +13280,7 @@ type DescribeSessionsInput struct { // The name of the stack. This value is case-sensitive. // // StackName is a required field - StackName *string `type:"string" required:"true"` + StackName *string `min:"1" type:"string" required:"true"` // The user identifier (ID). If you specify a user ID, you must also specify // the authentication type. @@ -13370,8 +13311,8 @@ func (s *DescribeSessionsInput) Validate() error { if s.FleetName == nil { invalidParams.Add(request.NewErrParamRequired("FleetName")) } - if s.InstanceId != nil && len(*s.InstanceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + if s.FleetName != nil && len(*s.FleetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) @@ -13379,6 +13320,9 @@ func (s *DescribeSessionsInput) Validate() error { if s.StackName == nil { invalidParams.Add(request.NewErrParamRequired("StackName")) } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } if s.UserId != nil && len(*s.UserId) < 2 { invalidParams.Add(request.NewErrParamMinLen("UserId", 2)) } @@ -13401,12 +13345,6 @@ func (s *DescribeSessionsInput) SetFleetName(v string) *DescribeSessionsInput { return s } -// SetInstanceId sets the InstanceId field's value. -func (s *DescribeSessionsInput) SetInstanceId(v string) *DescribeSessionsInput { - s.InstanceId = &v - return s -} - // SetLimit sets the Limit field's value. func (s *DescribeSessionsInput) SetLimit(v int64) *DescribeSessionsInput { s.Limit = &v @@ -15201,10 +15139,6 @@ type Fleet struct { // The maximum number of concurrent sessions for the fleet. MaxConcurrentSessions *int64 `type:"integer"` - // The maximum number of user sessions on an instance. This only applies to - // multi-session fleets. - MaxSessionsPerInstance *int64 `type:"integer"` - // The maximum amount of time that a streaming session can remain active, in // seconds. If users are still connected to a streaming instance five minutes // before this limit is reached, they are prompted to save any open documents @@ -15360,12 +15294,6 @@ func (s *Fleet) SetMaxConcurrentSessions(v int64) *Fleet { return s } -// SetMaxSessionsPerInstance sets the MaxSessionsPerInstance field's value. -func (s *Fleet) SetMaxSessionsPerInstance(v int64) *Fleet { - s.MaxSessionsPerInstance = &v - return s -} - // SetMaxUserDurationInSeconds sets the MaxUserDurationInSeconds field's value. func (s *Fleet) SetMaxUserDurationInSeconds(v int64) *Fleet { s.MaxUserDurationInSeconds = &v @@ -17558,9 +17486,6 @@ type Session struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // The identifier for the instance hosting the session. - InstanceId *string `min:"1" type:"string"` - // The time when the streaming session is set to expire. This time is based // on the MaxUserDurationinSeconds value, which determines the maximum length // of time that a streaming session can run. A streaming session might end earlier @@ -17634,12 +17559,6 @@ func (s *Session) SetId(v string) *Session { return s } -// SetInstanceId sets the InstanceId field's value. -func (s *Session) SetInstanceId(v string) *Session { - s.InstanceId = &v - return s -} - // SetMaxExpirationTime sets the MaxExpirationTime field's value. func (s *Session) SetMaxExpirationTime(v time.Time) *Session { s.MaxExpirationTime = &v @@ -19449,10 +19368,6 @@ type UpdateFleetInput struct { // The maximum number of concurrent sessions for a fleet. MaxConcurrentSessions *int64 `type:"integer"` - // The maximum number of user sessions on an instance. This only applies to - // multi-session fleets. - MaxSessionsPerInstance *int64 `type:"integer"` - // The maximum amount of time that a streaming session can remain active, in // seconds. If users are still connected to a streaming instance five minutes // before this limit is reached, they are prompted to save any open documents @@ -19463,7 +19378,7 @@ type UpdateFleetInput struct { MaxUserDurationInSeconds *int64 `type:"integer"` // A unique name for the fleet. - Name *string `type:"string"` + Name *string `min:"1" type:"string"` // The platform of the fleet. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported // for Elastic fleets. @@ -19519,6 +19434,14 @@ func (s *UpdateFleetInput) Validate() error { if s.InstanceType != nil && len(*s.InstanceType) < 1 { invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ComputeCapacity != nil { + if err := s.ComputeCapacity.Validate(); err != nil { + invalidParams.AddNested("ComputeCapacity", err.(request.ErrInvalidParams)) + } + } if s.SessionScriptS3Location != nil { if err := s.SessionScriptS3Location.Validate(); err != nil { invalidParams.AddNested("SessionScriptS3Location", err.(request.ErrInvalidParams)) @@ -19615,12 +19538,6 @@ func (s *UpdateFleetInput) SetMaxConcurrentSessions(v int64) *UpdateFleetInput { return s } -// SetMaxSessionsPerInstance sets the MaxSessionsPerInstance field's value. -func (s *UpdateFleetInput) SetMaxSessionsPerInstance(v int64) *UpdateFleetInput { - s.MaxSessionsPerInstance = &v - return s -} - // SetMaxUserDurationInSeconds sets the MaxUserDurationInSeconds field's value. func (s *UpdateFleetInput) SetMaxUserDurationInSeconds(v int64) *UpdateFleetInput { s.MaxUserDurationInSeconds = &v @@ -20703,9 +20620,6 @@ const ( // FleetAttributeSessionScriptS3Location is a FleetAttribute enum value FleetAttributeSessionScriptS3Location = "SESSION_SCRIPT_S3_LOCATION" - - // FleetAttributeMaxSessionsPerInstance is a FleetAttribute enum value - FleetAttributeMaxSessionsPerInstance = "MAX_SESSIONS_PER_INSTANCE" ) // FleetAttribute_Values returns all elements of the FleetAttribute enum @@ -20717,7 +20631,6 @@ func FleetAttribute_Values() []string { FleetAttributeIamRoleArn, FleetAttributeUsbDeviceFilterStrings, FleetAttributeSessionScriptS3Location, - FleetAttributeMaxSessionsPerInstance, } } diff --git a/service/appstream/waiters.go b/service/appstream/waiters.go index 1f9f17f2dca..90ec459f0fb 100644 --- a/service/appstream/waiters.go +++ b/service/appstream/waiters.go @@ -34,17 +34,17 @@ func (c *AppStream) WaitUntilFleetStartedWithContext(ctx aws.Context, input *Des { State: request.SuccessWaiterState, Matcher: request.PathAllWaiterMatch, Argument: "Fleets[].State", - Expected: "ACTIVE", + Expected: "RUNNING", }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", - Expected: "PENDING_DEACTIVATE", + Expected: "STOPPING", }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", - Expected: "INACTIVE", + Expected: "STOPPED", }, }, Logger: c.Config.Logger, @@ -90,17 +90,17 @@ func (c *AppStream) WaitUntilFleetStoppedWithContext(ctx aws.Context, input *Des { State: request.SuccessWaiterState, Matcher: request.PathAllWaiterMatch, Argument: "Fleets[].State", - Expected: "INACTIVE", + Expected: "STOPPED", }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", - Expected: "PENDING_ACTIVATE", + Expected: "STARTING", }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", - Expected: "ACTIVE", + Expected: "RUNNING", }, }, Logger: c.Config.Logger, diff --git a/service/connect/api.go b/service/connect/api.go index f8aa0054f0e..5aa204b3811 100644 --- a/service/connect/api.go +++ b/service/connect/api.go @@ -64741,6 +64741,9 @@ const ( // EventSourceNameOnContactEvaluationSubmit is a EventSourceName enum value EventSourceNameOnContactEvaluationSubmit = "OnContactEvaluationSubmit" + + // EventSourceNameOnMetricDataUpdate is a EventSourceName enum value + EventSourceNameOnMetricDataUpdate = "OnMetricDataUpdate" ) // EventSourceName_Values returns all elements of the EventSourceName enum @@ -64753,6 +64756,7 @@ func EventSourceName_Values() []string { EventSourceNameOnZendeskTicketStatusUpdate, EventSourceNameOnSalesforceCaseCreate, EventSourceNameOnContactEvaluationSubmit, + EventSourceNameOnMetricDataUpdate, } } diff --git a/service/datasync/api.go b/service/datasync/api.go index 20946118bb2..1945966ec20 100644 --- a/service/datasync/api.go +++ b/service/datasync/api.go @@ -3241,10 +3241,6 @@ func (c *DataSync) GenerateRecommendationsRequest(input *GenerateRecommendations // (https://docs.aws.amazon.com/datasync/latest/userguide/API_DescribeStorageSystemResources.html) // operation. // -// If your discovery job completes successfully (https://docs.aws.amazon.com/datasync/latest/userguide/discovery-job-statuses.html#discovery-job-statuses-table), -// you don't need to use this operation. DataSync Discovery generates the recommendations -// for you automatically. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -14700,9 +14696,7 @@ type ReportOverrides struct { Transferred *ReportOverride `type:"structure"` // Specifies the level of reporting for the files, objects, and directories - // that DataSync attempted to verify at the end of your transfer. This only - // applies if you configure your task (https://docs.aws.amazon.com/datasync/latest/userguide/configure-data-verification-options.html) - // to verify data during and after the transfer (which DataSync does by default). + // that DataSync attempted to verify at the end of your transfer. Verified *ReportOverride `type:"structure"` } diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index 96d72027dbd..5945c7952de 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -34550,12 +34550,13 @@ func (s *CapacitySize) SetValue(v int64) *CapacitySize { } // Configuration specifying how to treat different headers. If no headers are -// specified SageMaker will by default base64 encode when capturing the data. +// specified Amazon SageMaker will by default base64 encode when capturing the +// data. type CaptureContentTypeHeader struct { _ struct{} `type:"structure"` - // The list of all content type headers that SageMaker will treat as CSV and - // capture accordingly. + // The list of all content type headers that Amazon SageMaker will treat as + // CSV and capture accordingly. CsvContentTypes []*string `min:"1" type:"list"` // The list of all content type headers that SageMaker will treat as JSON and @@ -43615,6 +43616,9 @@ type CreateModelPackageInput struct { // call. SamplePayloadUrl *string `type:"string"` + // Indicates if you want to skip model validation. + SkipModelValidation *string `type:"string" enum:"SkipModelValidation"` + // Details about the algorithm that was used to create the model package. SourceAlgorithmSpecification *SourceAlgorithmSpecification `type:"structure"` @@ -43813,6 +43817,12 @@ func (s *CreateModelPackageInput) SetSamplePayloadUrl(v string) *CreateModelPack return s } +// SetSkipModelValidation sets the SkipModelValidation field's value. +func (s *CreateModelPackageInput) SetSkipModelValidation(v string) *CreateModelPackageInput { + s.SkipModelValidation = &v + return s +} + // SetSourceAlgorithmSpecification sets the SourceAlgorithmSpecification field's value. func (s *CreateModelPackageInput) SetSourceAlgorithmSpecification(v *SourceAlgorithmSpecification) *CreateModelPackageInput { s.SourceAlgorithmSpecification = v @@ -47522,9 +47532,8 @@ type DataCaptureConfig struct { // InitialSamplingPercentage is a required field InitialSamplingPercentage *int64 `type:"integer" required:"true"` - // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service - // key that SageMaker uses to encrypt the captured data at rest using Amazon - // S3 server-side encryption. + // The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker + // uses to encrypt the captured data at rest using Amazon S3 server-side encryption. // // The KmsKeyId can be any of the following formats: // @@ -47903,9 +47912,9 @@ type DataQualityAppSpecification struct { PostAnalyticsProcessorSourceUri *string `type:"string"` // An Amazon S3 URI to a script that is called per row prior to running analysis. - // It can base64 decode the payload and convert it into a flatted json so that - // the built-in container can use the converted data. Applicable only for the - // built-in (first party) containers. + // It can base64 decode the payload and convert it into a flattened JSON so + // that the built-in container can use the converted data. Applicable only for + // the built-in (first party) containers. RecordPreprocessorSourceUri *string `type:"string"` } @@ -59292,9 +59301,9 @@ type DescribeModelBiasJobDefinitionOutput struct { // Networking options for a model bias job. NetworkConfig *MonitoringNetworkConfig `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access - // Management (IAM) role that has read permission to the input data location - // and write permission to the output data location in Amazon S3. + // The Amazon Resource Name (ARN) of the IAM role that has read permission to + // the input data location and write permission to the output data location + // in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -59890,9 +59899,9 @@ type DescribeModelExplainabilityJobDefinitionOutput struct { // Networking options for a model explainability job. NetworkConfig *MonitoringNetworkConfig `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access - // Management (IAM) role that has read permission to the input data location - // and write permission to the output data location in Amazon S3. + // The Amazon Resource Name (ARN) of the IAM role that has read permission to + // the input data location and write permission to the output data location + // in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -60437,6 +60446,9 @@ type DescribeModelPackageOutput struct { // suffix). SamplePayloadUrl *string `type:"string"` + // Indicates if you want to skip model validation. + SkipModelValidation *string `type:"string" enum:"SkipModelValidation"` + // Details about the algorithm that was used to create the model package. SourceAlgorithmSpecification *SourceAlgorithmSpecification `type:"structure"` @@ -60599,6 +60611,12 @@ func (s *DescribeModelPackageOutput) SetSamplePayloadUrl(v string) *DescribeMode return s } +// SetSkipModelValidation sets the SkipModelValidation field's value. +func (s *DescribeModelPackageOutput) SetSkipModelValidation(v string) *DescribeModelPackageOutput { + s.SkipModelValidation = &v + return s +} + // SetSourceAlgorithmSpecification sets the SourceAlgorithmSpecification field's value. func (s *DescribeModelPackageOutput) SetSourceAlgorithmSpecification(v *SourceAlgorithmSpecification) *DescribeModelPackageOutput { s.SourceAlgorithmSpecification = v @@ -66948,7 +66966,7 @@ type EndpointInput struct { ProbabilityThresholdAttribute *float64 `type:"double"` // Whether input data distributed in Amazon S3 is fully replicated or sharded - // by an S3 key. Defaults to FullyReplicated + // by an Amazon S3 key. Defaults to FullyReplicated S3DataDistributionType *string `type:"string" enum:"ProcessingS3DataDistributionType"` // Whether the Pipe or File is used as the input mode for transferring data @@ -74382,7 +74400,8 @@ type InferenceExperimentDataStorageConfig struct { _ struct{} `type:"structure"` // Configuration specifying how to treat different headers. If no headers are - // specified SageMaker will by default base64 encode when capturing the data. + // specified Amazon SageMaker will by default base64 encode when capturing the + // data. ContentType *CaptureContentTypeHeader `type:"structure"` // The Amazon S3 bucket where the inference request and response data is stored. @@ -78941,7 +78960,8 @@ type ListDataQualityJobDefinitionsInput struct { // The field to sort results by. The default is CreationTime. SortBy *string `type:"string" enum:"MonitoringJobDefinitionSortKey"` - // The sort order for results. The default is Descending. + // Whether to sort the results in Ascending or Descending order. The default + // is Descending. SortOrder *string `type:"string" enum:"SortOrder"` } @@ -82775,8 +82795,8 @@ type ListModelBiasJobDefinitionsOutput struct { // JobDefinitionSummaries is a required field JobDefinitionSummaries []*MonitoringJobDefinitionSummary `type:"list" required:"true"` - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of jobs, use it in the subsequent request. + // The token returned if the response is truncated. To retrieve the next set + // of job executions, use it in the next request. NextToken *string `type:"string"` } @@ -83431,8 +83451,8 @@ type ListModelExplainabilityJobDefinitionsOutput struct { // JobDefinitionSummaries is a required field JobDefinitionSummaries []*MonitoringJobDefinitionSummary `type:"list" required:"true"` - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of jobs, use it in the subsequent request. + // The token returned if the response is truncated. To retrieve the next set + // of job executions, use it in the next request. NextToken *string `type:"string"` } @@ -83940,7 +83960,8 @@ type ListModelQualityJobDefinitionsInput struct { // The field to sort results by. The default is CreationTime. SortBy *string `type:"string" enum:"MonitoringJobDefinitionSortKey"` - // The sort order for results. The default is Descending. + // Whether to sort the results in Ascending or Descending order. The default + // is Descending. SortOrder *string `type:"string" enum:"SortOrder"` } @@ -84533,8 +84554,8 @@ type ListMonitoringExecutionsInput struct { // Filter for jobs scheduled before a specified time. ScheduledTimeBefore *time.Time `type:"timestamp"` - // Whether to sort results by Status, CreationTime, ScheduledTime field. The - // default is CreationTime. + // Whether to sort the results by the Status, CreationTime, or ScheduledTime + // field. The default is CreationTime. SortBy *string `type:"string" enum:"MonitoringExecutionSortKey"` // Whether to sort the results in Ascending or Descending order. The default @@ -84680,8 +84701,8 @@ type ListMonitoringExecutionsOutput struct { // MonitoringExecutionSummaries is a required field MonitoringExecutionSummaries []*MonitoringExecutionSummary `type:"list" required:"true"` - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of jobs, use it in the subsequent reques + // The token returned if the response is truncated. To retrieve the next set + // of job executions, use it in the next request. NextToken *string `type:"string"` } @@ -84756,8 +84777,8 @@ type ListMonitoringSchedulesInput struct { // of job executions, use it in the next request. NextToken *string `type:"string"` - // Whether to sort results by Status, CreationTime, ScheduledTime field. The - // default is CreationTime. + // Whether to sort the results by the Status, CreationTime, or ScheduledTime + // field. The default is CreationTime. SortBy *string `type:"string" enum:"MonitoringScheduleSortKey"` // Whether to sort the results in Ascending or Descending order. The default @@ -84889,8 +84910,8 @@ type ListMonitoringSchedulesOutput struct { // MonitoringScheduleSummaries is a required field MonitoringScheduleSummaries []*MonitoringScheduleSummary `type:"list" required:"true"` - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of jobs, use it in the subsequent request. + // The token returned if the response is truncated. To retrieve the next set + // of job executions, use it in the next request. NextToken *string `type:"string"` } @@ -90326,9 +90347,9 @@ func (s *ModelDigests) SetArtifactDigest(v string) *ModelDigests { type ModelExplainabilityAppSpecification struct { _ struct{} `type:"structure"` - // JSON formatted S3 file that defines explainability parameters. For more information - // on this JSON configuration file, see Configure model explainability parameters - // (https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-config-json-monitor-model-explainability-parameters.html). + // JSON formatted Amazon S3 file that defines explainability parameters. For + // more information on this JSON configuration file, see Configure model explainability + // parameters (https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-config-json-monitor-model-explainability-parameters.html). // // ConfigUri is a required field ConfigUri *string `type:"string" required:"true"` @@ -91051,6 +91072,9 @@ type ModelPackage struct { // This path must point to a single gzip compressed tar archive (.tar.gz suffix). SamplePayloadUrl *string `type:"string"` + // Indicates if you want to skip model validation. + SkipModelValidation *string `type:"string" enum:"SkipModelValidation"` + // A list of algorithms that were used to create a model package. SourceAlgorithmSpecification *SourceAlgorithmSpecification `type:"structure"` @@ -91218,6 +91242,12 @@ func (s *ModelPackage) SetSamplePayloadUrl(v string) *ModelPackage { return s } +// SetSkipModelValidation sets the SkipModelValidation field's value. +func (s *ModelPackage) SetSkipModelValidation(v string) *ModelPackage { + s.SkipModelValidation = &v + return s +} + // SetSourceAlgorithmSpecification sets the SourceAlgorithmSpecification field's value. func (s *ModelPackage) SetSourceAlgorithmSpecification(v *SourceAlgorithmSpecification) *ModelPackage { s.SourceAlgorithmSpecification = v @@ -92019,9 +92049,9 @@ type ModelQualityAppSpecification struct { ProblemType *string `type:"string" enum:"MonitoringProblemType"` // An Amazon S3 URI to a script that is called per row prior to running analysis. - // It can base64 decode the payload and convert it into a flatted json so that - // the built-in container can use the converted data. Applicable only for the - // built-in (first party) containers. + // It can base64 decode the payload and convert it into a flattened JSON so + // that the built-in container can use the converted data. Applicable only for + // the built-in (first party) containers. RecordPreprocessorSourceUri *string `type:"string"` } @@ -92160,7 +92190,7 @@ func (s *ModelQualityBaselineConfig) SetConstraintsResource(v *MonitoringConstra return s } -// The input for the model quality monitoring job. Currently endponts are supported +// The input for the model quality monitoring job. Currently endpoints are supported // for input for model quality monitoring jobs. type ModelQualityJobInput struct { _ struct{} `type:"structure"` @@ -92770,9 +92800,9 @@ type MonitoringAppSpecification struct { PostAnalyticsProcessorSourceUri *string `type:"string"` // An Amazon S3 URI to a script that is called per row prior to running analysis. - // It can base64 decode the payload and convert it into a flatted json so that - // the built-in container can use the converted data. Applicable only for the - // built-in (first party) containers. + // It can base64 decode the payload and convert it into a flattened JSON so + // that the built-in container can use the converted data. Applicable only for + // the built-in (first party) containers. RecordPreprocessorSourceUri *string `type:"string"` } @@ -92926,9 +92956,9 @@ type MonitoringClusterConfig struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"ProcessingInstanceType"` - // The Amazon Web Services Key Management Service (Amazon Web Services KMS) - // key that Amazon SageMaker uses to encrypt data on the storage volume attached - // to the ML compute instance(s) that run the model monitoring job. + // The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt + // data on the storage volume attached to the ML compute instance(s) that run + // the model monitoring job. VolumeKmsKeyId *string `type:"string"` // The size of the ML storage volume, in gigabytes, that you want to provision. @@ -93357,8 +93387,7 @@ type MonitoringJobDefinition struct { // MonitoringInputs is a required field MonitoringInputs []*MonitoringInput `min:"1" type:"list" required:"true"` - // The array of outputs from the monitoring job to be uploaded to Amazon Simple - // Storage Service (Amazon S3). + // The array of outputs from the monitoring job to be uploaded to Amazon S3. // // MonitoringOutputConfig is a required field MonitoringOutputConfig *MonitoringOutputConfig `type:"structure" required:"true"` @@ -93597,7 +93626,7 @@ func (s *MonitoringJobDefinitionSummary) SetMonitoringJobDefinitionName(v string type MonitoringJsonDatasetFormat struct { _ struct{} `type:"structure"` - // Indicates if the file should be read as a json object per line. + // Indicates if the file should be read as a JSON object per line. Line *bool `type:"boolean"` } @@ -93754,9 +93783,8 @@ func (s *MonitoringOutput) SetS3Output(v *MonitoringS3Output) *MonitoringOutput type MonitoringOutputConfig struct { _ struct{} `type:"structure"` - // The Amazon Web Services Key Management Service (Amazon Web Services KMS) - // key that Amazon SageMaker uses to encrypt the model artifacts at rest using - // Amazon S3 server-side encryption. + // The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt + // the model artifacts at rest using Amazon S3 server-side encryption. KmsKeyId *string `type:"string"` // Monitoring outputs for monitoring jobs. This is where the output of the periodic @@ -123850,6 +123878,22 @@ func SecondaryStatus_Values() []string { } } +const ( + // SkipModelValidationAll is a SkipModelValidation enum value + SkipModelValidationAll = "All" + + // SkipModelValidationNone is a SkipModelValidation enum value + SkipModelValidationNone = "None" +) + +// SkipModelValidation_Values returns all elements of the SkipModelValidation enum +func SkipModelValidation_Values() []string { + return []string{ + SkipModelValidationAll, + SkipModelValidationNone, + } +} + const ( // SortActionsByName is a SortActionsBy enum value SortActionsByName = "Name"