diff --git a/CHANGELOG.md b/CHANGELOG.md index 48d7f311a2f..581e29fe448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,25 @@ +Release v1.45.15 (2023-09-22) +=== + +### Service Client Updates +* `service/braket`: Updates service API and documentation +* `service/dms`: Updates service API, documentation, and examples + * new vendors for DMS CSF: MongoDB, MariaDB, DocumentDb and Redshift +* `service/ec2`: Updates service API + * EC2 M2 Pro Mac instances are powered by Apple M2 Pro Mac Mini computers featuring 12 core CPU, 19 core GPU, 32 GiB of memory, and 16 core Apple Neural Engine and uniquely enabled by the AWS Nitro System through high-speed Thunderbolt connections. +* `service/elasticfilesystem`: Updates service documentation + * Documentation updates for Elastic File System +* `service/events`: Updates service API and documentation + * Adds sensitive trait to various shapes in Jetstream Connections API model. +* `service/guardduty`: Updates service API and documentation + * Add `EKS_CLUSTER_NAME` to filter and sort key. +* `service/mediaconvert`: Updates service API and documentation + * This release supports the creation of of audio-only tracks in CMAF output groups. + +### SDK Bugs +* `aws/session`: Removed typed literal parsing for config, everything is now treated as a string until a numeric value is needed. + * This resolves an issue where the contents of a profile would silently be dropped with certain values. + Release v1.45.14 (2023-09-20) === diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 02e2fecc7e2..8a1927a39ca 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,5 +3,3 @@ ### SDK Enhancements ### SDK Bugs -* `aws/session`: Removed typed literal parsing for config, everything is now treated as a string until a numeric value is needed. - * This resolves an issue where the contents of a profile would silently be dropped with certain values. diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 5ea860c6b11..5d3516cb4bf 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -20361,6 +20361,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -21468,6 +21476,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -33225,6 +33241,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -33263,6 +33299,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 5d06b42d9c4..8fe3a6efea9 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.14" +const SDKVersion = "1.45.15" diff --git a/models/apis/braket/2019-09-01/api-2.json b/models/apis/braket/2019-09-01/api-2.json index ac4f3557386..4d49ca107af 100644 --- a/models/apis/braket/2019-09-01/api-2.json +++ b/models/apis/braket/2019-09-01/api-2.json @@ -472,6 +472,22 @@ }, "exception":true }, + "DeviceQueueInfo":{ + "type":"structure", + "required":[ + "queue", + "queueSize" + ], + "members":{ + "queue":{"shape":"QueueName"}, + "queuePriority":{"shape":"QueuePriority"}, + "queueSize":{"shape":"String"} + } + }, + "DeviceQueueInfoList":{ + "type":"list", + "member":{"shape":"DeviceQueueInfo"} + }, "DeviceRetiredException":{ "type":"structure", "members":{ @@ -547,6 +563,7 @@ "jsonvalue":true }, "deviceName":{"shape":"String"}, + "deviceQueueInfo":{"shape":"DeviceQueueInfoList"}, "deviceStatus":{"shape":"DeviceStatus"}, "deviceType":{"shape":"DeviceType"}, "providerName":{"shape":"String"} @@ -556,6 +573,11 @@ "type":"structure", "required":["jobArn"], "members":{ + "additionalAttributeNames":{ + "shape":"HybridJobAdditionalAttributeNamesList", + "location":"querystring", + "locationName":"additionalAttributeNames" + }, "jobArn":{ "shape":"JobArn", "location":"uri", @@ -590,6 +612,7 @@ "jobArn":{"shape":"JobArn"}, "jobName":{"shape":"GetJobResponseJobNameString"}, "outputDataConfig":{"shape":"JobOutputDataConfig"}, + "queueInfo":{"shape":"HybridJobQueueInfo"}, "roleArn":{"shape":"RoleArn"}, "startedAt":{"shape":"SyntheticTimestamp_date_time"}, "status":{"shape":"JobPrimaryStatus"}, @@ -607,6 +630,11 @@ "type":"structure", "required":["quantumTaskArn"], "members":{ + "additionalAttributeNames":{ + "shape":"QuantumTaskAdditionalAttributeNamesList", + "location":"querystring", + "locationName":"additionalAttributeNames" + }, "quantumTaskArn":{ "shape":"QuantumTaskArn", "location":"uri", @@ -639,11 +667,32 @@ "outputS3Bucket":{"shape":"String"}, "outputS3Directory":{"shape":"String"}, "quantumTaskArn":{"shape":"QuantumTaskArn"}, + "queueInfo":{"shape":"QuantumTaskQueueInfo"}, "shots":{"shape":"Long"}, "status":{"shape":"QuantumTaskStatus"}, "tags":{"shape":"TagsMap"} } }, + "HybridJobAdditionalAttributeName":{ + "type":"string", + "enum":["QueueInfo"] + }, + "HybridJobAdditionalAttributeNamesList":{ + "type":"list", + "member":{"shape":"HybridJobAdditionalAttributeName"} + }, + "HybridJobQueueInfo":{ + "type":"structure", + "required":[ + "position", + "queue" + ], + "members":{ + "message":{"shape":"String"}, + "position":{"shape":"String"}, + "queue":{"shape":"QueueName"} + } + }, "HyperParameters":{ "type":"map", "key":{"shape":"String256"}, @@ -886,10 +935,31 @@ "type":"long", "box":true }, + "QuantumTaskAdditionalAttributeName":{ + "type":"string", + "enum":["QueueInfo"] + }, + "QuantumTaskAdditionalAttributeNamesList":{ + "type":"list", + "member":{"shape":"QuantumTaskAdditionalAttributeName"} + }, "QuantumTaskArn":{ "type":"string", "max":256, - "min":1 + "min":0 + }, + "QuantumTaskQueueInfo":{ + "type":"structure", + "required":[ + "position", + "queue" + ], + "members":{ + "message":{"shape":"String"}, + "position":{"shape":"String"}, + "queue":{"shape":"QueueName"}, + "queuePriority":{"shape":"QueuePriority"} + } }, "QuantumTaskStatus":{ "type":"string", @@ -930,6 +1000,20 @@ "type":"list", "member":{"shape":"QuantumTaskSummary"} }, + "QueueName":{ + "type":"string", + "enum":[ + "QUANTUM_TASKS_QUEUE", + "JOBS_QUEUE" + ] + }, + "QueuePriority":{ + "type":"string", + "enum":[ + "Normal", + "Priority" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ diff --git a/models/apis/braket/2019-09-01/docs-2.json b/models/apis/braket/2019-09-01/docs-2.json index c660f68027d..4d39d65ee59 100644 --- a/models/apis/braket/2019-09-01/docs-2.json +++ b/models/apis/braket/2019-09-01/docs-2.json @@ -158,6 +158,18 @@ "refs": { } }, + "DeviceQueueInfo": { + "base": "

Information about tasks and jobs queued on a device.

", + "refs": { + "DeviceQueueInfoList$member": null + } + }, + "DeviceQueueInfoList": { + "base": null, + "refs": { + "GetDeviceResponse$deviceQueueInfo": "

List of information about tasks and jobs queued on a device.

" + } + }, "DeviceRetiredException": { "base": "

The specified device has been retired.

", "refs": { @@ -225,6 +237,24 @@ "refs": { } }, + "HybridJobAdditionalAttributeName": { + "base": null, + "refs": { + "HybridJobAdditionalAttributeNamesList$member": null + } + }, + "HybridJobAdditionalAttributeNamesList": { + "base": null, + "refs": { + "GetJobRequest$additionalAttributeNames": "

A list of attributes to return information for.

" + } + }, + "HybridJobQueueInfo": { + "base": "

Information about the queue for a specified job.

", + "refs": { + "GetJobResponse$queueInfo": "

Queue information for the requested job. Only returned if QueueInfo is specified in the additionalAttributeNames\" field in the GetJob API request.

" + } + }, "HyperParameters": { "base": null, "refs": { @@ -406,6 +436,18 @@ "QuantumTaskSummary$shots": "

The shots used for the task.

" } }, + "QuantumTaskAdditionalAttributeName": { + "base": null, + "refs": { + "QuantumTaskAdditionalAttributeNamesList$member": null + } + }, + "QuantumTaskAdditionalAttributeNamesList": { + "base": null, + "refs": { + "GetQuantumTaskRequest$additionalAttributeNames": "

A list of attributes to return information for.

" + } + }, "QuantumTaskArn": { "base": null, "refs": { @@ -417,6 +459,12 @@ "QuantumTaskSummary$quantumTaskArn": "

The ARN of the task.

" } }, + "QuantumTaskQueueInfo": { + "base": "

Information about the queue for the specified quantum task.

", + "refs": { + "GetQuantumTaskResponse$queueInfo": "

Queue information for the requested quantum task. Only returned if QueueInfo is specified in the additionalAttributeNames\" field in the GetQuantumTask API request.

" + } + }, "QuantumTaskStatus": { "base": null, "refs": { @@ -436,6 +484,21 @@ "SearchQuantumTasksResponse$quantumTasks": "

An array of QuantumTaskSummary objects for tasks that match the specified filters.

" } }, + "QueueName": { + "base": null, + "refs": { + "DeviceQueueInfo$queue": "

The name of the queue.

", + "HybridJobQueueInfo$queue": "

The name of the queue.

", + "QuantumTaskQueueInfo$queue": "

The name of the queue.

" + } + }, + "QueuePriority": { + "base": null, + "refs": { + "DeviceQueueInfo$queuePriority": "

Optional. Specifies the priority of the queue. Tasks in a priority queue are processed before the tasks in a normal queue.

", + "QuantumTaskQueueInfo$queuePriority": "

Optional. Specifies the priority of the queue. Quantum tasks in a priority queue are processed before the tasks in a normal queue.

" + } + }, "ResourceNotFoundException": { "base": "

The specified resource was not found.

", "refs": { @@ -600,6 +663,7 @@ "AccessDeniedException$message": null, "ConflictException$message": null, "DeviceOfflineException$message": null, + "DeviceQueueInfo$queueSize": "

The number of jobs or tasks in the queue for a given device.

", "DeviceRetiredException$message": null, "DeviceSummary$deviceName": "

The name of the device.

", "DeviceSummary$providerName": "

The provider of the device.

", @@ -608,9 +672,13 @@ "GetQuantumTaskResponse$failureReason": "

The reason that a task failed.

", "GetQuantumTaskResponse$outputS3Bucket": "

The S3 bucket where task results are stored.

", "GetQuantumTaskResponse$outputS3Directory": "

The folder in the S3 bucket where task results are stored.

", + "HybridJobQueueInfo$message": "

Optional. Provides more information about the queue position. For example, if the job is complete and no longer in the queue, the message field contains that information.

", + "HybridJobQueueInfo$position": "

Current position of the job in the jobs queue.

", "InternalServiceException$message": null, "JobSummary$jobName": "

The name of the Amazon Braket job.

", "ListTagsForResourceRequest$resourceArn": "

Specify the resourceArn for the resource whose tags to display.

", + "QuantumTaskQueueInfo$message": "

Optional. Provides more information about the queue position. For example, if the task is complete and no longer in the queue, the message field contains that information.

", + "QuantumTaskQueueInfo$position": "

Current position of the task in the quantum tasks queue.

", "QuantumTaskSummary$outputS3Bucket": "

The S3 bucket where the task result file is stored..

", "QuantumTaskSummary$outputS3Directory": "

The folder in the S3 bucket where the task result file is stored.

", "ResourceNotFoundException$message": null, diff --git a/models/apis/braket/2019-09-01/endpoint-rule-set-1.json b/models/apis/braket/2019-09-01/endpoint-rule-set-1.json new file mode 100644 index 00000000000..7a57ae60375 --- /dev/null +++ b/models/apis/braket/2019-09-01/endpoint-rule-set-1.json @@ -0,0 +1,314 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://braket-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://braket-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://braket.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://braket.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] +} \ No newline at end of file diff --git a/models/apis/braket/2019-09-01/endpoint-tests-1.json b/models/apis/braket/2019-09-01/endpoint-tests-1.json new file mode 100644 index 00000000000..653d446a497 --- /dev/null +++ b/models/apis/braket/2019-09-01/endpoint-tests-1.json @@ -0,0 +1,353 @@ +{ + "testCases": [ + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://braket.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://braket.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://braket.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://braket.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/dms/2016-01-01/api-2.json b/models/apis/dms/2016-01-01/api-2.json index 4dd02c65c56..0571f70e3f3 100644 --- a/models/apis/dms/2016-01-01/api-2.json +++ b/models/apis/dms/2016-01-01/api-2.json @@ -2073,10 +2073,14 @@ "DataProviderSettings":{ "type":"structure", "members":{ + "RedshiftSettings":{"shape":"RedshiftDataProviderSettings"}, "PostgreSqlSettings":{"shape":"PostgreSqlDataProviderSettings"}, "MySqlSettings":{"shape":"MySqlDataProviderSettings"}, "OracleSettings":{"shape":"OracleDataProviderSettings"}, - "MicrosoftSqlServerSettings":{"shape":"MicrosoftSqlServerDataProviderSettings"} + "MicrosoftSqlServerSettings":{"shape":"MicrosoftSqlServerDataProviderSettings"}, + "DocDbSettings":{"shape":"DocDbDataProviderSettings"}, + "MariaDbSettings":{"shape":"MariaDbDataProviderSettings"}, + "MongoDbSettings":{"shape":"MongoDbDataProviderSettings"} }, "union":true }, @@ -3014,6 +3018,16 @@ "BucketName":{"shape":"String"} } }, + "DocDbDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "SslMode":{"shape":"DmsSslModeValue"}, + "CertificateArn":{"shape":"String"} + } + }, "DocDbSettings":{ "type":"structure", "members":{ @@ -3586,6 +3600,15 @@ "nclob" ] }, + "MariaDbDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "SslMode":{"shape":"DmsSslModeValue"}, + "CertificateArn":{"shape":"String"} + } + }, "MessageFormatValue":{ "type":"string", "enum":[ @@ -3875,6 +3898,19 @@ "ReplicationTask":{"shape":"ReplicationTask"} } }, + "MongoDbDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "SslMode":{"shape":"DmsSslModeValue"}, + "CertificateArn":{"shape":"String"}, + "AuthType":{"shape":"AuthTypeValue"}, + "AuthSource":{"shape":"String"}, + "AuthMechanism":{"shape":"AuthMechanismValue"} + } + }, "MongoDbSettings":{ "type":"structure", "members":{ @@ -4241,6 +4277,14 @@ "SslCaCertificateArn":{"shape":"String"} } }, + "RedshiftDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"} + } + }, "RedshiftSettings":{ "type":"structure", "members":{ diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index efbfbc19c44..b98fb9809d3 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -13,7 +13,7 @@ "CreateInstanceProfile": "

Creates the instance profile using the specified parameters.

", "CreateMigrationProject": "

Creates the migration project using the specified parameters.

You can run this action only after you create an instance profile and data providers using CreateInstanceProfile and CreateDataProvider.

", "CreateReplicationConfig": "

Creates a configuration that you can later provide to configure and start an DMS Serverless replication. You can also provide options to validate the configuration inputs before you start the replication.

", - "CreateReplicationInstance": "

Creates the replication instance using the specified parameters.

DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the CLI and DMS API. For information on the required permissions, see IAM Permissions Needed to Use DMS.

", + "CreateReplicationInstance": "

Creates the replication instance using the specified parameters.

DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the CLI and DMS API. For information on the required permissions, see IAM Permissions Needed to Use DMS.

If you don't specify a version when creating a replication instance, DMS will create the instance using the default engine version. For information about the default engine version, see Release Notes.

", "CreateReplicationSubnetGroup": "

Creates a replication subnet group given a list of the subnet IDs in a VPC.

The VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the service will throw a ReplicationSubnetGroupDoesNotCoverEnoughAZs exception.

If a replication subnet group exists in your Amazon Web Services account, the CreateReplicationSubnetGroup action returns the following error message: The Replication Subnet Group already exists. In this case, delete the existing replication subnet group. To do so, use the DeleteReplicationSubnetGroup action. Optionally, choose Subnet groups in the DMS console, then choose your subnet group. Next, choose Delete from Actions.

", "CreateReplicationTask": "

Creates a replication task using the specified parameters.

", "DeleteCertificate": "

Deletes the specified certificate.

", @@ -107,7 +107,7 @@ "StopReplication": "

For a given DMS Serverless replication configuration, DMS stops any and all ongoing DMS Serverless replications. This command doesn't deprovision the stopped replications.

", "StopReplicationTask": "

Stops the replication task.

", "TestConnection": "

Tests the connection between the replication instance and the endpoint.

", - "UpdateSubscriptionsToEventBridge": "

Migrates 10 active and enabled Amazon SNS subscriptions at a time and converts them to corresponding Amazon EventBridge rules. By default, this operation migrates subscriptions only when all your replication instance versions are 3.4.6 or higher. If any replication instances are from versions earlier than 3.4.6, the operation raises an error and tells you to upgrade these instances to version 3.4.6 or higher. To enable migration regardless of version, set the Force option to true. However, if you don't upgrade instances earlier than version 3.4.6, some types of events might not be available when you use Amazon EventBridge.

To call this operation, make sure that you have certain permissions added to your user account. For more information, see Migrating event subscriptions to Amazon EventBridge in the Amazon Web Services Database Migration Service User Guide.

" + "UpdateSubscriptionsToEventBridge": "

Migrates 10 active and enabled Amazon SNS subscriptions at a time and converts them to corresponding Amazon EventBridge rules. By default, this operation migrates subscriptions only when all your replication instance versions are 3.4.5 or higher. If any replication instances are from versions earlier than 3.4.5, the operation raises an error and tells you to upgrade these instances to version 3.4.5 or higher. To enable migration regardless of version, set the Force option to true. However, if you don't upgrade instances earlier than version 3.4.5, some types of events might not be available when you use Amazon EventBridge.

To call this operation, make sure that you have certain permissions added to your user account. For more information, see Migrating event subscriptions to Amazon EventBridge in the Amazon Web Services Database Migration Service User Guide.

" }, "shapes": { "AccessDeniedFault": { @@ -168,12 +168,14 @@ "AuthMechanismValue": { "base": null, "refs": { + "MongoDbDataProviderSettings$AuthMechanism": "

The authentication method for connecting to the data provider. Valid values are DEFAULT, MONGODB_CR, or SCRAM_SHA_1.

", "MongoDbSettings$AuthMechanism": "

The authentication mechanism you use to access the MongoDB source endpoint.

For the default value, in MongoDB version 2.x, \"default\" is \"mongodb_cr\". For MongoDB version 3.x or later, \"default\" is \"scram_sha_1\". This setting isn't used when AuthType is set to \"no\".

" } }, "AuthTypeValue": { "base": null, "refs": { + "MongoDbDataProviderSettings$AuthType": "

The authentication type for the database connection. Valid values are PASSWORD or NO.

", "MongoDbSettings$AuthType": "

The authentication type you use to access the MongoDB source endpoint.

When when set to \"no\", user name and password parameters are not used and can be empty.

" } }, @@ -241,7 +243,7 @@ "CreateEventSubscriptionMessage$Enabled": "

A Boolean value; set to true to activate the subscription, or set to false to create the subscription but not activate it.

", "CreateInstanceProfileMessage$PubliclyAccessible": "

Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.

", "CreateReplicationInstanceMessage$MultiAZ": "

Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

", - "CreateReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. This parameter defaults to true.

Default: true

When AutoMinorVersionUpgrade is enabled, DMS uses the current default engine version when you create a replication instance. For example, if you set EngineVersion to a lower version number than the current default version, DMS uses the default version.

If AutoMinorVersionUpgrade isn’t enabled when you create a replication instance, DMS uses the engine version specified by the EngineVersion parameter.

", + "CreateReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. This parameter defaults to true.

Default: true

", "CreateReplicationInstanceMessage$PubliclyAccessible": "

Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true.

", "DescribeReplicationTasksMessage$WithoutSettings": "

An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default).

", "DocDbSettings$ExtractDocId": "

Specifies the document ID. Use this setting when NestingLevel is set to \"none\".

Default value is \"false\".

", @@ -270,14 +272,14 @@ "MicrosoftSQLServerSettings$ReadBackupOnly": "

When this attribute is set to Y, DMS only reads changes from transaction log backups and doesn't read from the active transaction log file during ongoing replication. Setting this parameter to Y enables you to control active transaction log file growth during full load and ongoing replication tasks. However, it can add some source latency to ongoing replication.

", "MicrosoftSQLServerSettings$UseBcpFullLoad": "

Use this to attribute to transfer data for full-load operations using BCP. When the target table contains an identity column that does not exist in the source table, you must disable the use BCP for loading table option.

", "MicrosoftSQLServerSettings$UseThirdPartyBackupDevice": "

When this attribute is set to Y, DMS processes third-party transaction log backups if they are created in native format.

", - "MicrosoftSQLServerSettings$TrimSpaceInChar": "

Use the TrimSpaceInChar source endpoint setting to trim data on CHAR and NCHAR data types during migration. The default value is true.

", + "MicrosoftSQLServerSettings$TrimSpaceInChar": "

Use the TrimSpaceInChar source endpoint setting to right-trim data on CHAR and NCHAR data types during migration. Setting TrimSpaceInChar does not left-trim data. The default value is true.

", "MicrosoftSQLServerSettings$ForceLobLookup": "

Forces LOB lookup on inline LOB.

", "ModifyDataProviderMessage$ExactSettings": "

If this attribute is Y, the current call to ModifyDataProvider replaces all existing data provider settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyDataProvider does two things:

", "ModifyEndpointMessage$ExactSettings": "

If this attribute is Y, the current call to ModifyEndpoint replaces all existing endpoint settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyEndpoint does two things:

For example, if you call create-endpoint ... --endpoint-settings '{\"a\":1}' ..., the endpoint has the following endpoint settings: '{\"a\":1}'. If you then call modify-endpoint ... --endpoint-settings '{\"b\":2}' ... for the same endpoint, the endpoint has the following settings: '{\"a\":1,\"b\":2}'.

However, suppose that you follow this with a call to modify-endpoint ... --endpoint-settings '{\"b\":2}' --exact-settings ... for that same endpoint again. Then the endpoint has the following settings: '{\"b\":2}'. All existing settings are replaced with the exact settings that you specify.

", "ModifyEventSubscriptionMessage$Enabled": "

A Boolean value; set to true to activate the subscription.

", "ModifyInstanceProfileMessage$PubliclyAccessible": "

Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.

", "ModifyReplicationInstanceMessage$MultiAZ": "

Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

", - "ModifyReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible.

An outage does result if these factors apply:

When AutoMinorVersionUpgrade is enabled, DMS uses the current default engine version when you modify a replication instance. For example, if you set EngineVersion to a lower version number than the current default version, DMS uses the default version.

If AutoMinorVersionUpgrade isn’t enabled when you modify a replication instance, DMS uses the engine version specified by the EngineVersion parameter.

", + "ModifyReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible.

An outage does result if these factors apply:

", "MongoDbSettings$UseUpdateLookUp": "

If true, DMS retrieves the entire document from the MongoDB source during migration. This may cause a migration failure if the server response exceeds bandwidth limits. To fetch only updates and deletes during migration, set this parameter to false.

", "MongoDbSettings$ReplicateShardCollections": "

If true, DMS replicates data to shard collections. DMS only uses this setting if the target endpoint is a DocumentDB elastic cluster.

When this setting is true, note the following:

", "MySQLSettings$CleanSourceMetadataOnMismatch": "

Cleans and recreates table metadata information on the replication instance when a mismatch occurs. For example, in a situation where running an alter DDL on the table could result in different information about the table cached in the replication instance.

", @@ -302,7 +304,7 @@ "PostgreSQLSettings$FailTasksOnLobTruncation": "

When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize.

If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

", "PostgreSQLSettings$HeartbeatEnable": "

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios.

", "PostgreSQLSettings$TrimSpaceInChar": "

Use the TrimSpaceInChar source endpoint setting to trim data on CHAR and NCHAR data types during migration. The default value is true.

", - "PostgreSQLSettings$MapBooleanAsBoolean": "

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5).

", + "PostgreSQLSettings$MapBooleanAsBoolean": "

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

", "PostgreSQLSettings$MapJsonbAsClob": "

When true, DMS migrates JSONB values as CLOB.

", "RebootReplicationInstanceMessage$ForceFailover": "

If this parameter is true, the reboot is conducted through a Multi-AZ failover. If the instance isn't configured for Multi-AZ, then you can't specify true. ( --force-planned-failover and --force-failover can't both be set to true.)

", "RebootReplicationInstanceMessage$ForcePlannedFailover": "

If this parameter is true, the reboot is conducted through a planned Multi-AZ failover where resources are released and cleaned up prior to conducting the failover. If the instance isn''t configured for Multi-AZ, then you can't specify true. ( --force-planned-failover and --force-failover can't both be set to true.)

", @@ -315,7 +317,7 @@ "RedshiftSettings$RemoveQuotes": "

A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters within the quotation marks, including delimiters, are retained. Choose true to remove quotation marks. The default is false.

", "RedshiftSettings$TrimBlanks": "

A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter applies only to columns with a VARCHAR data type. Choose true to remove unneeded white space. The default is false.

", "RedshiftSettings$TruncateColumns": "

A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or less. Choose true to truncate data. The default is false.

", - "RedshiftSettings$MapBooleanAsBoolean": "

When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as varchar(1).

", + "RedshiftSettings$MapBooleanAsBoolean": "

When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as varchar(1). You must set this setting on both the source and target endpoints for it to take effect.

", "ReplicationPendingModifiedValues$MultiAZ": "

Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true.

", "S3Settings$EnableStatistics": "

A value that enables statistics for Parquet pages and row groups. Choose true to enable statistics, false to disable. Statistics include NULL, DISTINCT, MAX, and MIN values. This parameter defaults to true. This value is used for .parquet file format only.

", "S3Settings$IncludeOpForFullLoad": "

A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet output files only to indicate how the rows were added to the source database.

DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and later.

DMS supports the use of the .parquet files with the IncludeOpForFullLoad parameter in versions 3.4.7 and later.

For full load, records can only be inserted. By default (the false setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.

This setting works together with the CdcInsertsOnly and the CdcInsertsAndUpdates parameters for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..

", @@ -334,7 +336,7 @@ "TableStatistics$FullLoadReloaded": "

A value that indicates if the table was reloaded (true) or loaded as part of a new full load operation (false).

", "TimestreamSettings$CdcInsertsAndUpdates": "

Set this attribute to true to specify that DMS only applies inserts and updates, and not deletes. Amazon Timestream does not allow deleting records, so if this value is false, DMS nulls out the corresponding record in the Timestream database rather than deleting it.

", "TimestreamSettings$EnableMagneticStoreWrites": "

Set this attribute to true to enable memory store writes. When this value is false, DMS does not write records that are older in days than the value specified in MagneticDuration, because Amazon Timestream does not allow memory writes by default. For more information, see Storage in the Amazon Timestream Developer Guide.

", - "UpdateSubscriptionsToEventBridgeMessage$ForceMove": "

When set to true, this operation migrates DMS subscriptions for Amazon SNS notifications no matter what your replication instance version is. If not set or set to false, this operation runs only when all your replication instances are from DMS version 3.4.6 or higher.

" + "UpdateSubscriptionsToEventBridgeMessage$ForceMove": "

When set to true, this operation migrates DMS subscriptions for Amazon SNS notifications no matter what your replication instance version is. If not set or set to false, this operation runs only when all your replication instances are from DMS version 3.4.5 or higher.

" } }, "CancelReplicationTaskAssessmentRunMessage": { @@ -1223,9 +1225,12 @@ "base": null, "refs": { "CreateEndpointMessage$SslMode": "

The Secure Sockets Layer (SSL) mode to use for the SSL connection. The default is none

", + "DocDbDataProviderSettings$SslMode": "

The SSL mode used to connect to the DocumentDB data provider. The default value is none.

", "Endpoint$SslMode": "

The SSL mode used to connect to the endpoint. The default value is none.

", + "MariaDbDataProviderSettings$SslMode": "

The SSL mode used to connect to the MariaDB data provider. The default value is none.

", "MicrosoftSqlServerDataProviderSettings$SslMode": "

The SSL mode used to connect to the Microsoft SQL Server data provider. The default value is none.

", "ModifyEndpointMessage$SslMode": "

The SSL mode used to connect to the endpoint. The default value is none.

", + "MongoDbDataProviderSettings$SslMode": "

The SSL mode used to connect to the MongoDB data provider. The default value is none.

", "MySqlDataProviderSettings$SslMode": "

The SSL mode used to connect to the MySQL data provider. The default value is none.

", "OracleDataProviderSettings$SslMode": "

The SSL mode used to connect to the Oracle data provider. The default value is none.

", "PostgreSqlDataProviderSettings$SslMode": "

The SSL mode used to connect to the PostgreSQL data provider. The default value is none.

" @@ -1239,6 +1244,12 @@ "ModifyEndpointMessage$DmsTransferSettings": "

The settings in JSON format for the DMS transfer type of source endpoint.

Attributes include the following:

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\"}

" } }, + "DocDbDataProviderSettings": { + "base": "

Provides information that defines a DocumentDB data provider.

", + "refs": { + "DataProviderSettings$DocDbSettings": null + } + }, "DocDbSettings": { "base": "

Provides information that defines a DocumentDB endpoint.

", "refs": { @@ -1624,8 +1635,8 @@ "base": null, "refs": { "Certificate$KeyLength": "

The key length of the cryptographic algorithm being used.

", - "ComputeConfig$MaxCapacityUnits": "

Specifies the maximum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 2 DCUs as the minimum value allowed. The list of valid DCU values includes 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum value that you can specify for DMS Serverless is 384. The MaxCapacityUnits parameter is the only DCU parameter you are required to specify.

", - "ComputeConfig$MinCapacityUnits": "

Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 2 DCUs as the minimum value allowed. The list of valid DCU values includes 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU value that you can specify for DMS Serverless is 2. You don't have to specify a value for the MinCapacityUnits parameter. If you don't set this value, DMS scans the current activity of available source tables to identify an optimum setting for this parameter. If there is no current source activity or DMS can't otherwise identify a more appropriate value, it sets this parameter to the minimum DCU value allowed, 2.

", + "ComputeConfig$MaxCapacityUnits": "

Specifies the maximum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum value that you can specify for DMS Serverless is 384. The MaxCapacityUnits parameter is the only DCU parameter you are required to specify.

", + "ComputeConfig$MinCapacityUnits": "

Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU value that you can specify for DMS Serverless is 1. You don't have to specify a value for the MinCapacityUnits parameter. If you don't set this value, DMS scans the current activity of available source tables to identify an optimum setting for this parameter. If there is no current source activity or DMS can't otherwise identify a more appropriate value, it sets this parameter to the minimum DCU value allowed, 1.

", "CreateEndpointMessage$Port": "

The port used by the endpoint database.

", "CreateReplicationInstanceMessage$AllocatedStorage": "

The amount of storage (in gigabytes) to be initially allocated for the replication instance.

", "DatabaseInstanceSoftwareDetailsResponse$OsArchitecture": "

The operating system architecture of the database.

", @@ -1669,6 +1680,7 @@ "DescribeReplicationsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

", "DescribeSchemasMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeTableStatisticsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 500.

", + "DocDbDataProviderSettings$Port": "

The port value for the DocumentDB data provider.

", "DocDbSettings$Port": "

The port value for the DocumentDB source endpoint.

", "DocDbSettings$DocsToInvestigate": "

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

", "ElasticsearchSettings$FullLoadErrorPercentage": "

The maximum percentage of records that can fail to be written before a full load operation stops.

To avoid early failure, this counter is only effective after 1000 records are transferred. OpenSearch also has the concept of error monitoring during the last 10 minutes of an Observation Window. If transfer of all records fail in the last 10 minutes, the full load operation stops.

", @@ -1685,11 +1697,13 @@ "InventoryData$NumberOfDatabases": "

The number of databases in the Fleet Advisor collector inventory.

", "InventoryData$NumberOfSchemas": "

The number of schemas in the Fleet Advisor collector inventory.

", "KafkaSettings$MessageMaxBytes": "

The maximum size in bytes for records created on the endpoint The default is 1,000,000.

", + "MariaDbDataProviderSettings$Port": "

The port value for the MariaDB data provider

", "MicrosoftSQLServerSettings$Port": "

Endpoint TCP port.

", "MicrosoftSQLServerSettings$BcpPacketSize": "

The maximum size of the packets (in bytes) used to transfer data using BCP.

", "MicrosoftSqlServerDataProviderSettings$Port": "

The port value for the Microsoft SQL Server data provider.

", "ModifyEndpointMessage$Port": "

The port used by the endpoint database.

", "ModifyReplicationInstanceMessage$AllocatedStorage": "

The amount of storage (in gigabytes) to be allocated for the replication instance.

", + "MongoDbDataProviderSettings$Port": "

The port value for the MongoDB data provider.

", "MongoDbSettings$Port": "

The port value for the MongoDB source endpoint.

", "MySQLSettings$EventsPollInterval": "

Specifies how often to check the binary log for new changes/events when the database is idle. The default is five seconds.

Example: eventsPollInterval=5;

In the example, DMS checks for changes in the binary logs every five seconds.

", "MySQLSettings$MaxFileSize": "

Specifies the maximum size (in KB) of any .csv file used to transfer data to a MySQL-compatible database.

Example: maxFileSize=512

", @@ -1718,6 +1732,7 @@ "RdsConfiguration$StorageIops": "

Describes the number of I/O operations completed each second (IOPS) on the recommended Amazon RDS DB instance that meets your requirements.

", "RdsRequirements$StorageSize": "

The required Amazon RDS DB instance storage size.

", "RdsRequirements$StorageIops": "

The required number of I/O operations completed each second (IOPS) on your Amazon RDS DB instance.

", + "RedshiftDataProviderSettings$Port": "

The port value for the Amazon Redshift data provider.

", "RedshiftSettings$ConnectionTimeout": "

A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.

", "RedshiftSettings$FileTransferUploadStreams": "

The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.

The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For more information, see Multipart upload overview.

FileTransferUploadStreams accepts a value from 1 through 64. It defaults to 10.

", "RedshiftSettings$LoadTimeout": "

The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE.

", @@ -1909,6 +1924,12 @@ "PostgreSQLSettings$MapLongVarcharAs": "

When true, DMS migrates LONG values as VARCHAR.

" } }, + "MariaDbDataProviderSettings": { + "base": "

Provides information that defines a MariaDB data provider.

", + "refs": { + "DataProviderSettings$MariaDbSettings": "

Provides information that defines a MariaDB data provider.

" + } + }, "MessageFormatValue": { "base": null, "refs": { @@ -2058,6 +2079,12 @@ "refs": { } }, + "MongoDbDataProviderSettings": { + "base": "

Provides information that defines a MongoDB data provider.

", + "refs": { + "DataProviderSettings$MongoDbSettings": "

Provides information that defines a MongoDB data provider.

" + } + }, "MongoDbSettings": { "base": "

Provides information that defines a MongoDB endpoint.

", "refs": { @@ -2256,6 +2283,12 @@ "ModifyEndpointMessage$RedisSettings": "

Settings in JSON format for the Redis target endpoint.

" } }, + "RedshiftDataProviderSettings": { + "base": "

Provides information that defines an Amazon Redshift data provider.

", + "refs": { + "DataProviderSettings$RedshiftSettings": null + } + }, "RedshiftSettings": { "base": "

Provides information that defines an Amazon Redshift endpoint.

", "refs": { @@ -2937,7 +2970,7 @@ "CreateReplicationTaskMessage$TableMappings": "

The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the Database Migration Service User Guide.

", "CreateReplicationTaskMessage$ReplicationTaskSettings": "

Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for Database Migration Service Tasks in the Database Migration Service User Guide.

", "CreateReplicationTaskMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.

", - "CreateReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“

", + "CreateReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“

", "CreateReplicationTaskMessage$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.

", "CreateReplicationTaskMessage$ResourceIdentifier": "

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, DMS generates a default identifier value for the end of EndpointArn.

", "DataProvider$DataProviderName": "

The name of the data provider.

", @@ -3088,6 +3121,9 @@ "DescribeTableStatisticsResponse$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DmsTransferSettings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole action.

", "DmsTransferSettings$BucketName": "

The name of the S3 bucket to use.

", + "DocDbDataProviderSettings$ServerName": "

The name of the source DocumentDB server.

", + "DocDbDataProviderSettings$DatabaseName": "

The database name on the DocumentDB data provider.

", + "DocDbDataProviderSettings$CertificateArn": "

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

", "DocDbSettings$Username": "

The user name you use to access the DocumentDB source endpoint.

", "DocDbSettings$ServerName": "

The name of the server on the DocumentDB source endpoint.

", "DocDbSettings$DatabaseName": "

The database name on the DocumentDB source endpoint.

", @@ -3181,6 +3217,8 @@ "Limitation$Impact": "

The impact of the limitation. You can use this parameter to prioritize limitations that you want to address. Valid values include \"Blocker\", \"High\", \"Medium\", and \"Low\".

", "Limitation$Type": "

The type of the limitation, such as action required, upgrade required, and limited feature.

", "ListTagsForResourceMessage$ResourceArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the DMS resource to list tags for. This returns a list of keys (names of tags) created for the resource and their associated tag values.

", + "MariaDbDataProviderSettings$ServerName": "

The name of the MariaDB server.

", + "MariaDbDataProviderSettings$CertificateArn": "

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

", "MicrosoftSQLServerSettings$DatabaseName": "

Database name for the endpoint.

", "MicrosoftSQLServerSettings$ControlTablesFileGroup": "

Specifies a file group for the DMS internal tables. When the replication task starts, all the internal DMS control tables (awsdms_ apply_exception, awsdms_apply, awsdms_changes) are created for the specified file group.

", "MicrosoftSQLServerSettings$ServerName": "

Fully qualified domain name of the endpoint. For an Amazon RDS SQL Server instance, this is the output of DescribeDBInstances, in the Endpoint.Address field.

", @@ -3248,10 +3286,14 @@ "ModifyReplicationTaskMessage$TableMappings": "

When using the CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://. For example, --table-mappings file://mappingfile.json. When working with the DMS API, provide the JSON as the parameter value.

", "ModifyReplicationTaskMessage$ReplicationTaskSettings": "

JSON file that contains settings for the task, such as task metadata settings.

", "ModifyReplicationTaskMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.

", - "ModifyReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“

", + "ModifyReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“

", "ModifyReplicationTaskMessage$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.

", + "MongoDbDataProviderSettings$ServerName": "

The name of the MongoDB server.

", + "MongoDbDataProviderSettings$DatabaseName": "

The database name on the MongoDB data provider.

", + "MongoDbDataProviderSettings$CertificateArn": "

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

", + "MongoDbDataProviderSettings$AuthSource": "

The MongoDB database name. This setting isn't used when AuthType is set to \"no\".

The default is \"admin\".

", "MongoDbSettings$Username": "

The user name you use to access the MongoDB source endpoint.

", - "MongoDbSettings$ServerName": "

The name of the server on the MongoDB source endpoint.

", + "MongoDbSettings$ServerName": "

The name of the server on the MongoDB source endpoint. For MongoDB Atlas, provide the server name for any of the servers in the replication set.

", "MongoDbSettings$DatabaseName": "

The database name on the MongoDB source endpoint.

", "MongoDbSettings$ExtractDocId": "

Specifies the document ID. Use this setting when NestingLevel is set to \"none\".

Default value is \"false\".

", "MongoDbSettings$DocsToInvestigate": "

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

", @@ -3292,7 +3334,7 @@ "OracleSettings$Username": "

Endpoint connection user name.

", "OracleSettings$SecretsManagerAccessRoleArn": "

The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret has the value of the Amazon Web Services Secrets Manager secret that allows access to the Oracle endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId. Or you can specify clear-text values for UserName, Password, ServerName, and Port. You can't specify both. For more information on creating this SecretsManagerSecret and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

", "OracleSettings$SecretsManagerSecretId": "

The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the Oracle endpoint connection details.

", - "OracleSettings$SecretsManagerOracleAsmAccessRoleArn": "

Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret. This SecretsManagerOracleAsmSecret has the secret value that allows access to the Oracle ASM of the endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId. Or you can specify clear-text values for AsmUserName, AsmPassword, and AsmServerName. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

", + "OracleSettings$SecretsManagerOracleAsmAccessRoleArn": "

Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret. This SecretsManagerOracleAsmSecret has the secret value that allows access to the Oracle ASM of the endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId. Or you can specify clear-text values for AsmUser, AsmPassword, and AsmServerName. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

", "OracleSettings$SecretsManagerOracleAsmSecretId": "

Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN, partial ARN, or friendly name of the SecretsManagerOracleAsmSecret that contains the Oracle ASM connection details for the Oracle endpoint.

", "OrderableReplicationInstance$EngineVersion": "

The version of the replication engine.

", "OrderableReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", @@ -3333,6 +3375,8 @@ "RedisSettings$ServerName": "

Fully qualified domain name of the endpoint.

", "RedisSettings$AuthUserName": "

The user name provided with the auth-role option of the AuthType setting for a Redis target endpoint.

", "RedisSettings$SslCaCertificateArn": "

The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

", + "RedshiftDataProviderSettings$ServerName": "

The name of the Amazon Redshift server.

", + "RedshiftDataProviderSettings$DatabaseName": "

The database name on the Amazon Redshift data provider.

", "RedshiftSettings$AfterConnectScript": "

Code to run after connecting. This parameter should contain the code itself, not the name of a file containing the code.

", "RedshiftSettings$BucketFolder": "

An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift cluster.

For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID path. DMS uses the Redshift COPY command to upload the .csv files to the target table. The files are deleted once the COPY operation has finished. For more information, see COPY in the Amazon Redshift Database Developer Guide.

For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this BucketFolder/NetChangesTableID path.

", "RedshiftSettings$BucketName": "

The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift.

", @@ -3409,7 +3453,7 @@ "ReplicationTask$LastFailureMessage": "

The last error (failure) message generated for the replication task.

", "ReplicationTask$StopReason": "

The reason the replication task was stopped. This response parameter can return one of the following values:

", "ReplicationTask$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want the CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

", - "ReplicationTask$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“

", + "ReplicationTask$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“

", "ReplicationTask$RecoveryCheckpoint": "

Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition parameter to start a CDC operation that begins at that checkpoint.

", "ReplicationTask$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", "ReplicationTask$TaskData": "

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.

", @@ -3447,7 +3491,7 @@ "S3Settings$TimestampColumnName": "

A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an Amazon S3 target.

DMS supports the TimestampColumnName parameter in versions 3.1.4 and later.

DMS includes an additional STRING column in the .csv or .parquet object files of your migrated data when you set TimestampColumnName to a nonblank value.

For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS.

For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of that row in the source database.

The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit timestamp supported by DMS for the source database.

When the AddColumnName parameter is set to true, DMS also includes a name for the timestamp column that you set with TimestampColumnName.

", "S3Settings$CsvNoSupValue": "

This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in .csv format. If UseCsvNoSupValue is set to true, specify a string value that you want DMS to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of the UseCsvNoSupValue setting.

This setting is supported in DMS versions 3.4.1 and later.

", "S3Settings$CdcPath": "

Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If CdcPath is set, DMS reads CDC files from this path and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions to true, DMS verifies that you have set this parameter to a folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC folder path in either your S3 target working directory or the S3 target location specified by BucketFolder and BucketName .

For example, if you specify CdcPath as MyChangedData, and you specify BucketName as MyTargetBucket but do not specify BucketFolder, DMS creates the CDC folder path following: MyTargetBucket/MyChangedData.

If you specify the same CdcPath, and you specify BucketName as MyTargetBucket and BucketFolder as MyTargetData, DMS creates the CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData.

For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.

This setting is supported in DMS versions 3.4.2 and later.

", - "S3Settings$CsvNullValue": "

An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this parameter to pass a user-defined string as null when writing to the target. For example, when target columns are not nullable, you can use this option to differentiate between the empty string value and the null value. So, if you set this parameter value to the empty string (\"\" or ''), DMS treats the empty string as the null value instead of NULL.

The default value is NULL. Valid values include any valid string.

", + "S3Settings$CsvNullValue": "

An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this parameter to pass a user-defined string as null when writing to the target. For example, when target columns are nullable, you can use this option to differentiate between the empty string value and the null value. So, if you set this parameter value to the empty string (\"\" or ''), DMS treats the empty string as the null value instead of NULL.

The default value is NULL. Valid values include any valid string.

", "S3Settings$DatePartitionTimezone": "

When creating an S3 target endpoint, set DatePartitionTimezone to convert the current UTC time into a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled is set to true, as shown in the following example.

s3-settings='{\"DatePartitionEnabled\": true, \"DatePartitionSequence\": \"YYYYMMDDHH\", \"DatePartitionDelimiter\": \"SLASH\", \"DatePartitionTimezone\":\"Asia/Seoul\", \"BucketName\": \"dms-nattarat-test\"}'

", "S3Settings$ExpectedBucketOwner": "

To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner endpoint setting.

Example: --s3-settings='{\"ExpectedBucketOwner\": \"AWS_Account_ID\"}'

When you make a request to test a connection or perform a migration, S3 checks the account ID of the bucket owner against the specified parameter.

", "SCApplicationAttributes$S3BucketPath": "

The path for the Amazon S3 bucket that the application uses for exporting assessment reports.

", @@ -3502,7 +3546,7 @@ "StartReplicationTaskAssessmentRunMessage$AssessmentRunName": "

Unique name to identify the assessment run.

", "StartReplicationTaskMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task to be started.

", "StartReplicationTaskMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.

", - "StartReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“

", + "StartReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“

", "StopReplicationMessage$ReplicationConfigArn": "

The Amazon Resource Name of the replication to stop.

", "StopReplicationTaskMessage$ReplicationTaskArn": "

The Amazon Resource Name(ARN) of the replication task to be stopped.

", "StringList$member": null, diff --git a/models/apis/dms/2016-01-01/examples-1.json b/models/apis/dms/2016-01-01/examples-1.json index f9e8c4e5dcb..f0e2ba56124 100644 --- a/models/apis/dms/2016-01-01/examples-1.json +++ b/models/apis/dms/2016-01-01/examples-1.json @@ -27,6 +27,54 @@ "title": "Add tags to resource" } ], + "CreateDataProvider": [ + { + "input": { + "DataProviderName": "sqlServer-dev", + "Description": "description", + "Engine": "sqlserver", + "Settings": { + "MicrosoftSqlServerSettings": { + "DatabaseName": "DatabaseName", + "Port": 11112, + "ServerName": "ServerName2", + "SslMode": "none" + } + }, + "Tags": [ + { + "Key": "access", + "Value": "authorizedusers" + } + ] + }, + "output": { + "DataProvider": { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:my-target-dataprovider", + "DataProviderCreationTime": "2023-05-12T10:50:41.988561Z", + "DataProviderName": "my-target-dataprovider", + "Engine": "postgres", + "Settings": { + "PostgreSqlSettings": { + "DatabaseName": "target", + "Port": 5432, + "ServerName": "postrgesql.a1b2c3d4e5f6.us-east-1.rds.amazonaws.com", + "SslMode": "none" + } + } + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Creates the data provider with the specified parameters.", + "id": "create-data-provider-1689726511871", + "title": "Create Data Provider" + } + ], "CreateEndpoint": [ { "input": { @@ -73,6 +121,119 @@ "title": "Create endpoint" } ], + "CreateInstanceProfile": [ + { + "input": { + "Description": "Description", + "InstanceProfileName": "my-instance-profile", + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "NetworkType": "DUAL", + "PubliclyAccessible": true, + "SubnetGroupIdentifier": "my-subnet-group", + "Tags": [ + { + "Key": "access", + "Value": "authorizedusers" + } + ] + }, + "output": { + "InstanceProfile": { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:my-instance-profile", + "InstanceProfileCreationTime": "2022-12-16T09:44:43.543246Z", + "InstanceProfileName": "my-instance-profile", + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "PubliclyAccessible": true, + "SubnetGroupIdentifier": "public-subnets", + "VpcIdentifier": "vpc-0a1b2c3d4e5f6g7h8", + "VpcSecurityGroups": [ + "sg-0123456" + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Creates the instance profile using the specified parameters.", + "id": "create-instance-profile-1689716070633", + "title": "Create Instance Profile" + } + ], + "CreateMigrationProject": [ + { + "input": { + "Description": "description", + "InstanceProfileIdentifier": "ip-au-17", + "MigrationProjectName": "my-migration-project", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "arn:aws:s3:::mylogin-bucket", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/Admin" + }, + "SourceDataProviderDescriptors": [ + { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/myuser-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/example1/ALL.SOURCE.ORACLE_12-A1B2C3" + } + ], + "Tags": [ + { + "Key": "access", + "Value": "authorizedusers" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/myuser-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/example1/TARGET.postgresql-A1B2C3" + } + ], + "TransformationRules": "{\"key0\":\"value0\",\"key1\":\"value1\",\"key2\":\"value2\"}" + }, + "output": { + "MigrationProject": { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "InstanceProfileName": "my-instance-profile", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "MigrationProjectCreationTime": "2023-04-19T11:45:15.805253Z", + "MigrationProjectName": "my-migration-project", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "my-s3-bucket/my_folder", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/my-s3role" + }, + "SourceDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "source-oracle-12", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myuser/ALL.SOURCE.ORACLE_12-0123456" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "target-dataprovider-3", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/dmytbon-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myuser/TARGET.postgresql-0123456" + } + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Creates the migration project with the specified parameters.", + "id": "create-migration-project-1689716672685", + "title": "Create Migration Project" + } + ], "CreateReplicationInstance": [ { "input": { @@ -271,6 +432,38 @@ "title": "Delete Connection" } ], + "DeleteDataProvider": [ + { + "input": { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "DataProvider": { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:my-target-data-provider", + "DataProviderCreationTime": "2023-05-12T10:50:41.988561Z", + "DataProviderName": "my-target-data-provider", + "Engine": "postgres", + "Settings": { + "PostgreSqlSettings": { + "DatabaseName": "target", + "Port": 5432, + "ServerName": "postrgesql.0a1b2c3d4e5f.us-east-1.rds.amazonaws.com", + "SslMode": "none" + } + } + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Deletes the specified data provider.", + "id": "delete-data-provider-1689724476356", + "title": "Delete Data Provider" + } + ], "DeleteEndpoint": [ { "input": { @@ -300,6 +493,81 @@ "title": "Delete Endpoint" } ], + "DeleteInstanceProfile": [ + { + "input": { + "InstanceProfileIdentifier": "arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "InstanceProfile": { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:my-instance-profile", + "InstanceProfileCreationTime": "2022-12-16T09:44:43.543246Z", + "InstanceProfileName": "my-instance-profile", + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "PubliclyAccessible": true, + "SubnetGroupIdentifier": "public-subnets", + "VpcIdentifier": "vpc-0a1b2c3d4e5f6g7h8", + "VpcSecurityGroups": [ + "sg-0123456" + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Deletes the specified instance profile.", + "id": "delete-instance-profile-1689716924105", + "title": "Delete Instance Profile" + } + ], + "DeleteMigrationProject": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "MigrationProject": { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "InstanceProfileName": "my-instance-profile", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "MigrationProjectCreationTime": "2023-04-19T11:45:15.805253Z", + "MigrationProjectName": "my-migration-project", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "my-s3-bucket/my_folder", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/my-s3role" + }, + "SourceDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "all-source-oracle-12", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myuser/ALL.SOURCE.ORACLE_12-0123456" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "sde-obilyns-dataprovider-3", + "SecretsManagerAccessRoleArn": "arn:aws:iam::437223687239:role/dmytbon-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myuser/TARGET.postgresql-0123456" + } + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Deletes the specified migration project.", + "id": "delete-migration-project-1689717217454", + "title": "Delete Migration Project" + } + ], "DeleteReplicationInstance": [ { "input": { @@ -518,6 +786,70 @@ "title": "Describe connections" } ], + "DescribeConversionConfiguration": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "ConversionConfiguration": "{\"Common project settings\":{\"ShowSeverityLevelInSql\":\"CRITICAL\"},\"ORACLE_TO_POSTGRESQL\" : {\"ToTimeZone\":false,\"LastDayBuiltinFunctionOracle\":false, \"NextDayBuiltinFunctionOracle\":false,\"ConvertProceduresToFunction\":false,\"NvlBuiltinFunctionOracle\":false,\"DbmsAssertBuiltinFunctionOracle\":false}}", + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns configuration parameters for a schema conversion project.", + "id": "describe-conversion-configuration-1689717690907", + "title": "Describe Conversion Configuration" + } + ], + "DescribeDataProviders": [ + { + "input": { + "Filters": [ + { + "Name": "data-provider-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + ] + } + ], + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "MaxRecords": 20 + }, + "output": { + "DataProviders": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:my-target-data-provider", + "DataProviderCreationTime": "2023-05-12T10:50:41.988561Z", + "DataProviderName": "my-target-data-provider", + "Engine": "postgres", + "Settings": { + "PostgreSqlSettings": { + "DatabaseName": "target", + "Port": 5432, + "ServerName": "postrgesql.0a1b2c3d4e5f.us-east-1.rds.amazonaws.com", + "SslMode": "none" + } + } + } + ], + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "", + "id": "describe-data-providers-1689725897156", + "title": "Describe Data Providers" + } + ], "DescribeEndpointTypes": [ { "input": { @@ -582,6 +914,318 @@ "title": "Describe endpoints" } ], + "DescribeExtensionPackAssociations": [ + { + "input": { + "Filters": [ + { + "Name": "instance-profile-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20, + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "Status": "SUCCESS" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of extension pack associations for the specified migration project.", + "id": "describe-extension-pack-associations-1689718322580", + "title": "Describe Extension Pack Associations" + } + ], + "DescribeInstanceProfiles": [ + { + "input": { + "Filters": [ + { + "Name": "instance-profile-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20 + }, + "output": { + "InstanceProfiles": [ + { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:my-instance-profile", + "InstanceProfileCreationTime": "2022-12-16T09:44:43.543246Z", + "InstanceProfileName": "my-instance-profile", + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "PubliclyAccessible": true, + "SubnetGroupIdentifier": "public-subnets", + "VpcIdentifier": "vpc-0a1b2c3d4e5f6g7h8" + } + ], + "Marker": "0123456789abcdefghijklmnopqrs" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of instance profiles for your account in the current region.", + "id": "describe-instance-profiles-1689718406840", + "title": "Describe Instance Profiles" + } + ], + "DescribeMetadataModelAssessments": [ + { + "input": { + "Filters": [ + { + "Name": "my-migration-project", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20, + "MigrationProjectIdentifier": "" + }, + "output": { + "Marker": "ASDLKJASDJKHDFHGDNBGDASKJHGFK", + "Requests": [ + { + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "Status": "SUCCESS" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of metadata model assessments for your account in the current region.", + "id": "describe-metadata-model-assessments-1689718702303", + "title": "Describe Metadata Model Assessments" + } + ], + "DescribeMetadataModelConversions": [ + { + "input": { + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "MaxRecords": 123, + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "Status": "SUCCESS" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of metadata model conversions for a migration project.", + "id": "describe-metadata-model-conversions-1689719021495", + "title": "Describe Metadata Model Conversions" + } + ], + "DescribeMetadataModelExportsAsScript": [ + { + "input": { + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20, + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "Status": "SUCCESS" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of metadata model exports.", + "id": "describe-metadata-model-exports-as-script-1689719253938", + "title": "Describe Metadata Model Exports As Script" + } + ], + "DescribeMetadataModelExportsToTarget": [ + { + "input": { + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20, + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "Status": "SUCCESS" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of metadata model exports.", + "id": "describe-metadata-model-exports-to-target-1689719484750", + "title": "Describe Metadata Model Exports To Target" + } + ], + "DescribeMetadataModelImports": [ + { + "input": { + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20, + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "Status": "SUCCESS" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of metadata model imports.", + "id": "describe-metadata-model-imports-1689719771322", + "title": "Describe Metadata Model Imports" + } + ], + "DescribeMigrationProjects": [ + { + "input": { + "Filters": [ + { + "Name": "migration-project-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901" + ] + } + ], + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "MaxRecords": 20 + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "MigrationProjects": [ + { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "InstanceProfileName": "my-instance-profile", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "MigrationProjectCreationTime": "2023-04-19T11:45:15.805253Z", + "MigrationProjectName": "my-migration-project", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "my-s3-bucket/my_folder", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/my-s3role" + }, + "SourceDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "all-source-oracle-12", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:mygroup/myalias/ALL.SOURCE.ORACLE_12-012345" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "my-data-provider", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/dmytbon-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:mygroup/myalias/TARGET.postgresql-012345" + } + ] + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Returns a paginated list of migration projects for your account in the current region.", + "id": "describe-migration-projects-1689719912075", + "title": "Describe Migration Projects" + } + ], "DescribeOrderableReplicationInstances": [ { "input": { @@ -770,6 +1414,37 @@ "title": "Describe table statistics" } ], + "ExportMetadataModelAssessment": [ + { + "input": { + "AssessmentReportTypes": [ + "pdf" + ], + "FileName": "file", + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-a1b2c3d4e5f6.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "CsvReport": { + "ObjectURL": "url", + "S3ObjectKey": "object-name" + }, + "PdfReport": { + "ObjectURL": "url", + "S3ObjectKey": "object-name" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Saves a copy of a database migration assessment report to your S3 bucket. DMS can save your assessment report as a comma-separated value (CSV) or a PDF file.", + "id": "export-metadata-model-assessment-1689720309558", + "title": "Export Metadata Model Assessment" + } + ], "ImportCertificate": [ { "input": { @@ -812,6 +1487,69 @@ "title": "List tags for resource" } ], + "ModifyConversionConfiguration": [ + { + "input": { + "ConversionConfiguration": "{\"Common project settings\":{\"ShowSeverityLevelInSql\":\"CRITICAL\"},\"ORACLE_TO_POSTGRESQL\" : {\"ToTimeZone\":false,\"LastDayBuiltinFunctionOracle\":false, \"NextDayBuiltinFunctionOracle\":false,\"ConvertProceduresToFunction\":false,\"NvlBuiltinFunctionOracle\":false,\"DbmsAssertBuiltinFunctionOracle\":false}}", + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Modifies the specified schema conversion configuration using the provided parameters.", + "id": "modify-conversion-configuration-1689720529855", + "title": "Modify Conversion Configuration" + } + ], + "ModifyDataProvider": [ + { + "input": { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "DataProviderName": "new-name", + "Description": "description", + "Engine": "sqlserver", + "Settings": { + "MicrosoftSqlServerSettings": { + "DatabaseName": "DatabaseName", + "Port": 11112, + "ServerName": "ServerName2", + "SslMode": "none" + } + } + }, + "output": { + "DataProvider": { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:my-target-data-provider", + "DataProviderCreationTime": "2023-05-12T10:50:41.988561Z", + "DataProviderName": "my-target-data-provider", + "Engine": "postgres", + "Settings": { + "PostgreSqlSettings": { + "DatabaseName": "target", + "Port": 5432, + "ServerName": "postrgesql.0a1b2c3d4e5f.us-east-1.rds.amazonaws.com", + "SslMode": "none" + } + } + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Modifies the specified data provider using the provided settings.", + "id": "modify-data-provider-1689720700567", + "title": "Modify Data Provider" + } + ], "ModifyEndpoint": [ { "input": { @@ -843,6 +1581,112 @@ "title": "Modify endpoint" } ], + "ModifyInstanceProfile": [ + { + "input": { + "AvailabilityZone": "", + "Description": "", + "InstanceProfileIdentifier": "", + "InstanceProfileName": "", + "KmsKeyArn": "", + "NetworkType": "", + "PubliclyAccessible": true, + "SubnetGroupIdentifier": "", + "VpcSecurityGroups": [ + + ] + }, + "output": { + "InstanceProfile": { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:my-instance-profile", + "InstanceProfileCreationTime": "2022-12-16T09:44:43.543246Z", + "InstanceProfileName": "my-instance-profile", + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "PubliclyAccessible": true, + "SubnetGroupIdentifier": "public-subnets", + "VpcIdentifier": "vpc-0a1b2c3d4e5f6g7h8", + "VpcSecurityGroups": [ + "sg-0123456" + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Modifies the specified instance profile using the provided parameters.", + "id": "modify-instance-profile-1689724223329", + "title": "Modify Instance Profile" + } + ], + "ModifyMigrationProject": [ + { + "input": { + "Description": "description", + "InstanceProfileIdentifier": "my-instance-profile", + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "MigrationProjectName": "new-name", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "arn:aws:s3:::myuser-bucket", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/Admin" + }, + "SourceDataProviderDescriptors": [ + { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/myuser-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/myuser/ALL.SOURCE.ORACLE_12-A1B2C3" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/myuser-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/myuser/TARGET.postgresql-A1B2C3" + } + ] + }, + "output": { + "MigrationProject": { + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "InstanceProfileName": "my-instance-profile", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "MigrationProjectCreationTime": "2023-04-19T11:45:15.805253Z", + "MigrationProjectName": "my-migration-project", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "my-s3-bucket/my_folder", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/my-s3role" + }, + "SourceDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "all-source-oracle-12", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:mygroup/myalias/ALL.SOURCE.ORACLE_12-TP5rA9" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "DataProviderName": "my-dataprovider", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:mygroup/myalias/TARGET.postgresql-mysecret" + } + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Modifies the specified migration project using the provided parameters.", + "id": "modify-migration-project-1689721117475", + "title": "Modify Migration Project" + } + ], "ModifyReplicationInstance": [ { "input": { @@ -987,6 +1831,130 @@ "title": "Remove tags from resource" } ], + "StartExtensionPackAssociation": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Applies the extension pack to your target database.", + "id": "start-extension-pack-association-1689721897266", + "title": "Start Extension Pack Association" + } + ], + "StartMetadataModelAssessment": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Creates a database migration assessment report by assessing the migration complexity for \n your source database.", + "id": "start-metadata-model-assessment-1689722322596", + "title": "Start Metadata Model Assessment" + } + ], + "StartMetadataModelConversion": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Converts your source database objects to a format compatible with the target database. ", + "id": "start-metadata-model-conversion-1689722427798", + "title": "Start Metadata Model Conversion" + } + ], + "StartMetadataModelExportAsScript": [ + { + "input": { + "FileName": "FILE", + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "Origin": "SOURCE", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Saves your converted code to a file as a SQL script, and stores this file on your S3 bucket.", + "id": "start-metadata-model-export-as-script-1689722681469", + "title": "Start Metadata Model Export As Script" + } + ], + "StartMetadataModelExportToTarget": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "OverwriteExtensionPack": true, + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-a1b2c3d4e5f6.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Applies converted database objects to your target database.", + "id": "start-metadata-model-export-to-target-1689783666835", + "title": "Start Metadata Model Export To Target" + } + ], + "StartMetadataModelImport": [ + { + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "Origin": "SOURCE", + "Refresh": false, + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "Loads the metadata for all the dependent database objects of the parent object.", + "id": "start-metadata-model-import-1689723124259", + "title": "Start Metadata Model Import" + } + ], "StartReplicationTask": [ { "input": { diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 7793ea285e7..ac43546a296 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -27496,7 +27496,8 @@ "c7i.12xlarge", "c7i.16xlarge", "c7i.24xlarge", - "c7i.48xlarge" + "c7i.48xlarge", + "mac2-m2pro.metal" ] }, "InstanceTypeHypervisor":{ diff --git a/models/apis/elasticfilesystem/2015-02-01/docs-2.json b/models/apis/elasticfilesystem/2015-02-01/docs-2.json index ef22fe4a1d9..8e036afb448 100644 --- a/models/apis/elasticfilesystem/2015-02-01/docs-2.json +++ b/models/apis/elasticfilesystem/2015-02-01/docs-2.json @@ -5,16 +5,16 @@ "CreateAccessPoint": "

Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

", "CreateFileSystem": "

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

For more information, see Creating a file system in the Amazon EFS User Guide.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

You can set the throughput mode for the file system using the ThroughputMode parameter.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

File systems can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

", "CreateMountTarget": "

Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

You can create only one mount target for an EFS file system using One Zone storage classes. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

For more information, see Amazon EFS: How it Works.

To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, provide the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

", - "CreateReplicationConfiguration": "

Creates a replication configuration that replicates an existing EFS file system to a new, read-only file system. For more information, see Amazon EFS replication in the Amazon EFS User Guide. The replication configuration specifies the following:

The following properties are set by default:

The following properties are turned off by default:

For more information, see Amazon EFS replication in the Amazon EFS User Guide.

", + "CreateReplicationConfiguration": "

Creates a replication configuration that replicates an existing EFS file system to a new, read-only file system. For more information, see Amazon EFS replication in the Amazon EFS User Guide. The replication configuration specifies the following:

The following properties are set by default:

The following properties are turned off by default:

For more information, see Amazon EFS replication in the Amazon EFS User Guide.

", "CreateTags": "

DEPRECATED - CreateTags is deprecated and not maintained. To create tags for EFS resources, use the API action.

Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. If you add the Name tag to your file system, Amazon EFS returns it in the response to the DescribeFileSystems operation.

This operation requires permission for the elasticfilesystem:CreateTags action.

", "DeleteAccessPoint": "

Deletes the specified access point. After deletion is complete, new clients can no longer connect to the access points. Clients connected to the access point at the time of deletion will continue to function until they terminate their connection.

This operation requires permissions for the elasticfilesystem:DeleteAccessPoint action.

", "DeleteFileSystem": "

Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system.

You need to manually delete mount targets attached to a file system before you can delete an EFS file system. This step is performed for you when you use the Amazon Web Services console to delete a file system.

You cannot delete a file system that is part of an EFS Replication configuration. You need to delete the replication configuration first.

You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. For more information, see DescribeMountTargets and DeleteMountTarget.

The DeleteFileSystem call returns while the file system state is still deleting. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass file system ID or creation token for the deleted file system, the DescribeFileSystems returns a 404 FileSystemNotFound error.

This operation requires permissions for the elasticfilesystem:DeleteFileSystem action.

", "DeleteFileSystemPolicy": "

Deletes the FileSystemPolicy for the specified file system. The default FileSystemPolicy goes into effect once the existing policy is deleted. For more information about the default file system policy, see Using Resource-based Policies with EFS.

This operation requires permissions for the elasticfilesystem:DeleteFileSystemPolicy action.

", "DeleteMountTarget": "

Deletes the specified mount target.

This operation forcibly breaks any mounts of the file system by using the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes might be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC by using another mount target.

This operation requires permissions for the following action on the file system:

The DeleteMountTarget call returns while the mount target state is still deleting. You can check the mount target deletion by calling the DescribeMountTargets operation, which returns a list of mount target descriptions for the given file system.

The operation also requires permissions for the following Amazon EC2 action on the mount target's network interface:

", - "DeleteReplicationConfiguration": "

Deletes an existing replication configuration. To delete a replication configuration, you must make the request from the Amazon Web Services Region in which the destination file system is located. Deleting a replication configuration ends the replication process. After a replication configuration is deleted, the destination file system is no longer read-only. You can write to the destination file system after its status becomes Writeable.

", + "DeleteReplicationConfiguration": "

Deletes an existing replication configuration. Deleting a replication configuration ends the replication process. After a replication configuration is deleted, the destination file system is no longer read-only. You can write to the destination file system after its status becomes Writeable.

", "DeleteTags": "

DEPRECATED - DeleteTags is deprecated and not maintained. To remove tags from EFS resources, use the API action.

Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag restrictions in the Billing and Cost Management User Guide.

This operation requires permissions for the elasticfilesystem:DeleteTags action.

", "DescribeAccessPoints": "

Returns the description of a specific Amazon EFS access point if the AccessPointId is provided. If you provide an EFS FileSystemId, it returns descriptions of all access points for that file system. You can provide either an AccessPointId or a FileSystemId in the request, but not both.

This operation requires permissions for the elasticfilesystem:DescribeAccessPoints action.

", - "DescribeAccountPreferences": "

Returns the account preferences settings for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region. For more information, see Managing Amazon EFS resource IDs.

", + "DescribeAccountPreferences": "

Returns the account preferences settings for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region.

", "DescribeBackupPolicy": "

Returns the backup policy for the specified EFS file system.

", "DescribeFileSystemPolicy": "

Returns the FileSystemPolicy for the specified EFS file system.

This operation requires permissions for the elasticfilesystem:DescribeFileSystemPolicy action.

", "DescribeFileSystems": "

Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. This number is automatically set to 100. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker.

To retrieve a list of your file system descriptions, this operation is used in an iterative process, where DescribeFileSystems is called first without the Marker and then the operation continues to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker.

The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multi-call iteration is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeFileSystems action.

", @@ -703,7 +703,7 @@ "PerformanceMode": { "base": null, "refs": { - "CreateFileSystemRequest$PerformanceMode": "

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

The maxIO mode is not supported on file systems using One Zone storage classes.

", + "CreateFileSystemRequest$PerformanceMode": "

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

The maxIO mode is not supported on file systems using One Zone storage classes.

Default is generalPurpose.

", "FileSystemDescription$PerformanceMode": "

The performance mode of the file system.

" } }, @@ -735,9 +735,9 @@ "ProvisionedThroughputInMibps": { "base": null, "refs": { - "CreateFileSystemRequest$ProvisionedThroughputInMibps": "

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

", - "FileSystemDescription$ProvisionedThroughputInMibps": "

The amount of provisioned throughput, measured in MiB/s, for the file system. Valid for file systems using ThroughputMode set to provisioned.

", - "UpdateFileSystemRequest$ProvisionedThroughputInMibps": "

(Optional) Sets the amount of provisioned throughput, in MiB/s, for the file system. Valid values are 1-1024. If you are changing the throughput mode to provisioned, you must also provide the amount of provisioned throughput. Required if ThroughputMode is changed to provisioned on update.

" + "CreateFileSystemRequest$ProvisionedThroughputInMibps": "

The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

", + "FileSystemDescription$ProvisionedThroughputInMibps": "

The amount of provisioned throughput, measured in MiBps, for the file system. Valid for file systems using ThroughputMode set to provisioned.

", + "UpdateFileSystemRequest$ProvisionedThroughputInMibps": "

(Optional) The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

" } }, "PutAccountPreferencesRequest": { @@ -774,7 +774,7 @@ } }, "ReplicationConfigurationDescription": { - "base": null, + "base": "

Describes the replication configuration for a specific file system.

", "refs": { "ReplicationConfigurationDescriptions$member": null } diff --git a/models/apis/elasticfilesystem/2015-02-01/endpoint-rule-set-1.json b/models/apis/elasticfilesystem/2015-02-01/endpoint-rule-set-1.json index e2a751df21f..2501aa43f99 100644 --- a/models/apis/elasticfilesystem/2015-02-01/endpoint-rule-set-1.json +++ b/models/apis/elasticfilesystem/2015-02-01/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://elasticfilesystem.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://elasticfilesystem.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/events/2015-10-07/api-2.json b/models/apis/events/2015-10-07/api-2.json index 8c20eb8b720..a405dfb7b30 100644 --- a/models/apis/events/2015-10-07/api-2.json +++ b/models/apis/events/2015-10-07/api-2.json @@ -874,6 +874,13 @@ "min":1, "pattern":"^[ \\t]*[^\\x00-\\x1F:\\x7F]+([ \\t]+[^\\x00-\\x1F:\\x7F]+)*[ \\t]*$" }, + "AuthHeaderParametersSensitive":{ + "type":"string", + "max":512, + "min":1, + "pattern":"^[ \\t]*[^\\x00-\\x1F:\\x7F]+([ \\t]+[^\\x00-\\x1F:\\x7F]+)*[ \\t]*$", + "sensitive":true + }, "AwsVpcConfiguration":{ "type":"structure", "required":["Subnets"], @@ -1024,7 +1031,7 @@ "type":"structure", "members":{ "Key":{"shape":"String"}, - "Value":{"shape":"String"}, + "Value":{"shape":"SensitiveString"}, "IsValueSecret":{"shape":"Boolean"} } }, @@ -1043,7 +1050,7 @@ "type":"structure", "members":{ "Key":{"shape":"HeaderKey"}, - "Value":{"shape":"HeaderValue"}, + "Value":{"shape":"HeaderValueSensitive"}, "IsValueSecret":{"shape":"Boolean"} } }, @@ -1094,7 +1101,7 @@ "type":"structure", "members":{ "Key":{"shape":"QueryStringKey"}, - "Value":{"shape":"QueryStringValue"}, + "Value":{"shape":"QueryStringValueSensitive"}, "IsValueSecret":{"shape":"Boolean"} } }, @@ -1182,7 +1189,7 @@ ], "members":{ "ApiKeyName":{"shape":"AuthHeaderParameters"}, - "ApiKeyValue":{"shape":"AuthHeaderParameters"} + "ApiKeyValue":{"shape":"AuthHeaderParametersSensitive"} } }, "CreateConnectionAuthRequestParameters":{ @@ -1202,7 +1209,7 @@ ], "members":{ "Username":{"shape":"AuthHeaderParameters"}, - "Password":{"shape":"AuthHeaderParameters"} + "Password":{"shape":"AuthHeaderParametersSensitive"} } }, "CreateConnectionOAuthClientRequestParameters":{ @@ -1213,7 +1220,7 @@ ], "members":{ "ClientID":{"shape":"AuthHeaderParameters"}, - "ClientSecret":{"shape":"AuthHeaderParameters"} + "ClientSecret":{"shape":"AuthHeaderParametersSensitive"} } }, "CreateConnectionOAuthRequestParameters":{ @@ -1679,6 +1686,12 @@ "max":512, "pattern":"^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*$" }, + "HeaderValueSensitive":{ + "type":"string", + "max":512, + "pattern":"^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*$", + "sensitive":true + }, "HttpParameters":{ "type":"structure", "members":{ @@ -2270,6 +2283,12 @@ "max":512, "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+" }, + "QueryStringValueSensitive":{ + "type":"string", + "max":512, + "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+", + "sensitive":true + }, "RedshiftDataParameters":{ "type":"structure", "required":[ @@ -2559,6 +2578,10 @@ "min":20, "pattern":"^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]|\\d|\\-)*:([0-9]{12})?:secret:[\\/_+=\\.@\\-A-Za-z0-9]+$" }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "Sql":{ "type":"string", "max":100000, @@ -2812,7 +2835,7 @@ "type":"structure", "members":{ "ApiKeyName":{"shape":"AuthHeaderParameters"}, - "ApiKeyValue":{"shape":"AuthHeaderParameters"} + "ApiKeyValue":{"shape":"AuthHeaderParametersSensitive"} } }, "UpdateConnectionAuthRequestParameters":{ @@ -2828,14 +2851,14 @@ "type":"structure", "members":{ "Username":{"shape":"AuthHeaderParameters"}, - "Password":{"shape":"AuthHeaderParameters"} + "Password":{"shape":"AuthHeaderParametersSensitive"} } }, "UpdateConnectionOAuthClientRequestParameters":{ "type":"structure", "members":{ "ClientID":{"shape":"AuthHeaderParameters"}, - "ClientSecret":{"shape":"AuthHeaderParameters"} + "ClientSecret":{"shape":"AuthHeaderParametersSensitive"} } }, "UpdateConnectionOAuthRequestParameters":{ diff --git a/models/apis/events/2015-10-07/docs-2.json b/models/apis/events/2015-10-07/docs-2.json index 42d89b4a35a..1c1ff0048fe 100644 --- a/models/apis/events/2015-10-07/docs-2.json +++ b/models/apis/events/2015-10-07/docs-2.json @@ -233,16 +233,21 @@ "ConnectionBasicAuthResponseParameters$Username": "

The user name to use for Basic authorization.

", "ConnectionOAuthClientResponseParameters$ClientID": "

The client ID associated with the response to the connection request.

", "CreateConnectionApiKeyAuthRequestParameters$ApiKeyName": "

The name of the API key to use for authorization.

", - "CreateConnectionApiKeyAuthRequestParameters$ApiKeyValue": "

The value for the API key to use for authorization.

", "CreateConnectionBasicAuthRequestParameters$Username": "

The user name to use for Basic authorization.

", - "CreateConnectionBasicAuthRequestParameters$Password": "

The password associated with the user name to use for Basic authorization.

", "CreateConnectionOAuthClientRequestParameters$ClientID": "

The client ID to use for OAuth authorization for the connection.

", - "CreateConnectionOAuthClientRequestParameters$ClientSecret": "

The client secret associated with the client ID to use for OAuth authorization for the connection.

", "UpdateConnectionApiKeyAuthRequestParameters$ApiKeyName": "

The name of the API key to use for authorization.

", - "UpdateConnectionApiKeyAuthRequestParameters$ApiKeyValue": "

The value associated with teh API key to use for authorization.

", "UpdateConnectionBasicAuthRequestParameters$Username": "

The user name to use for Basic authorization.

", + "UpdateConnectionOAuthClientRequestParameters$ClientID": "

The client ID to use for OAuth authorization.

" + } + }, + "AuthHeaderParametersSensitive": { + "base": null, + "refs": { + "CreateConnectionApiKeyAuthRequestParameters$ApiKeyValue": "

The value for the API key to use for authorization.

", + "CreateConnectionBasicAuthRequestParameters$Password": "

The password associated with the user name to use for Basic authorization.

", + "CreateConnectionOAuthClientRequestParameters$ClientSecret": "

The client secret associated with the client ID to use for OAuth authorization for the connection.

", + "UpdateConnectionApiKeyAuthRequestParameters$ApiKeyValue": "

The value associated with teh API key to use for authorization.

", "UpdateConnectionBasicAuthRequestParameters$Password": "

The password associated with the user name to use for Basic authorization.

", - "UpdateConnectionOAuthClientRequestParameters$ClientID": "

The client ID to use for OAuth authorization.

", "UpdateConnectionOAuthClientRequestParameters$ClientSecret": "

The client secret assciated with the client ID to use for OAuth authorization.

" } }, @@ -909,10 +914,15 @@ "HeaderValue": { "base": null, "refs": { - "ConnectionHeaderParameter$Value": "

The value associated with the key.

", "HeaderParametersMap$value": null } }, + "HeaderValueSensitive": { + "base": null, + "refs": { + "ConnectionHeaderParameter$Value": "

The value associated with the key.

" + } + }, "HttpParameters": { "base": "

These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. In the latter case, these are merged with any InvocationParameters specified on the Connection, with any values from the Connection taking precedence.

", "refs": { @@ -1451,10 +1461,15 @@ "QueryStringValue": { "base": null, "refs": { - "ConnectionQueryStringParameter$Value": "

The value associated with the key for the query string parameter.

", "QueryStringParametersMap$value": null } }, + "QueryStringValueSensitive": { + "base": null, + "refs": { + "ConnectionQueryStringParameter$Value": "

The value associated with the key for the query string parameter.

" + } + }, "RedshiftDataParameters": { "base": "

These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API ExecuteStatement based on EventBridge events.

", "refs": { @@ -1749,6 +1764,12 @@ "DescribeConnectionResponse$SecretArn": "

The ARN of the secret created from the authorization parameters specified for the connection.

" } }, + "SensitiveString": { + "base": null, + "refs": { + "ConnectionBodyParameter$Value": "

The value associated with the key.

" + } + }, "Sql": { "base": null, "refs": { @@ -1793,7 +1814,6 @@ "Condition$Key": "

Specifies the key for the condition. Currently the only supported key is aws:PrincipalOrgID.

", "Condition$Value": "

Specifies the value for the key. Currently, this must be the ID of the organization.

", "ConnectionBodyParameter$Key": "

The key for the parameter.

", - "ConnectionBodyParameter$Value": "

The value associated with the key.

", "CreateEventBusResponse$EventBusArn": "

The ARN of the new event bus.

", "CreatePartnerEventSourceResponse$EventSourceArn": "

The ARN of the partner event source.

", "DescribeEventBusResponse$Name": "

The name of the event bus. Currently, this is always default.

", diff --git a/models/apis/guardduty/2017-11-28/api-2.json b/models/apis/guardduty/2017-11-28/api-2.json index 7ab3dc8d679..900325ca445 100644 --- a/models/apis/guardduty/2017-11-28/api-2.json +++ b/models/apis/guardduty/2017-11-28/api-2.json @@ -1621,7 +1621,8 @@ "RESOURCE_TYPE", "COVERAGE_STATUS", "ADDON_VERSION", - "MANAGEMENT_TYPE" + "MANAGEMENT_TYPE", + "EKS_CLUSTER_NAME" ] }, "CoverageFilterCriterionList":{ @@ -1701,7 +1702,8 @@ "COVERAGE_STATUS", "ISSUE", "ADDON_VERSION", - "UPDATED_AT" + "UPDATED_AT", + "EKS_CLUSTER_NAME" ] }, "CoverageStatistics":{ diff --git a/models/apis/guardduty/2017-11-28/docs-2.json b/models/apis/guardduty/2017-11-28/docs-2.json index 14707270a3e..17273920b2c 100644 --- a/models/apis/guardduty/2017-11-28/docs-2.json +++ b/models/apis/guardduty/2017-11-28/docs-2.json @@ -43,7 +43,7 @@ "GetRemainingFreeTrialDays": "

Provides the number of days left for each data source used in the free trial period.

", "GetThreatIntelSet": "

Retrieves the ThreatIntelSet that is specified by the ThreatIntelSet ID.

", "GetUsageStatistics": "

Lists Amazon GuardDuty usage statistics over the last 30 days for the specified detector ID. For newly enabled detectors or data sources, the cost returned will include only the usage so far under 30 days. This may differ from the cost metrics in the console, which project usage over 30 days to provide a monthly cost estimate. For more information, see Understanding How Usage Costs are Calculated.

", - "InviteMembers": "

Invites Amazon Web Services accounts to become members of an organization administered by the Amazon Web Services account that invokes this API. If you are using organizations to manager your GuardDuty environment, this step is not needed. For more information, see Managing accounts with organizations.

To invite Amazon Web Services accounts, the first step is to ensure that GuardDuty has been enabled in the potential member accounts. You can now invoke this API to add accounts by invitation. The invited accounts can either accept or decline the invitation from their GuardDuty accounts. Each invited Amazon Web Services account can choose to accept the invitation from only one Amazon Web Services account. For more information, see Managing GuardDuty accounts by invitation.

After the invite has been accepted and you choose to disassociate a member account (by using DisassociateMembers) from your account, the details of the member account obtained by invoking CreateMembers, including the associated email addresses, will be retained. This is done so that you can invoke InviteMembers without the need to invoke CreateMembers again. To remove the details associated with a member account, you must also invoke DeleteMembers.

", + "InviteMembers": "

Invites Amazon Web Services accounts to become members of an organization administered by the Amazon Web Services account that invokes this API. If you are using Amazon Web Services Organizations to manage your GuardDuty environment, this step is not needed. For more information, see Managing accounts with organizations.

To invite Amazon Web Services accounts, the first step is to ensure that GuardDuty has been enabled in the potential member accounts. You can now invoke this API to add accounts by invitation. The invited accounts can either accept or decline the invitation from their GuardDuty accounts. Each invited Amazon Web Services account can choose to accept the invitation from only one Amazon Web Services account. For more information, see Managing GuardDuty accounts by invitation.

After the invite has been accepted and you choose to disassociate a member account (by using DisassociateMembers) from your account, the details of the member account obtained by invoking CreateMembers, including the associated email addresses, will be retained. This is done so that you can invoke InviteMembers without the need to invoke CreateMembers again. To remove the details associated with a member account, you must also invoke DeleteMembers.

", "ListCoverage": "

Lists coverage details for your GuardDuty account. If you're a GuardDuty administrator, you can retrieve all resources associated with the active member accounts in your organization.

Make sure the accounts have EKS Runtime Monitoring enabled and GuardDuty agent running on their EKS nodes.

", "ListDetectors": "

Lists detectorIds of all the existing Amazon GuardDuty detector resources.

", "ListFilters": "

Returns a paginated list of the current filters.

", @@ -51,7 +51,7 @@ "ListIPSets": "

Lists the IPSets of the GuardDuty service specified by the detector ID. If you use this operation from a member account, the IPSets returned are the IPSets from the associated administrator account.

", "ListInvitations": "

Lists all GuardDuty membership invitations that were sent to the current Amazon Web Services account.

", "ListMembers": "

Lists details about all member accounts for the current GuardDuty administrator account.

", - "ListOrganizationAdminAccounts": "

Lists the accounts configured as GuardDuty delegated administrators. Only the organization's management account can run this API operation.

", + "ListOrganizationAdminAccounts": "

Lists the accounts designated as GuardDuty delegated administrators. Only the organization's management account can run this API operation.

", "ListPublishingDestinations": "

Returns a list of publishing destinations associated with the specified detectorId.

", "ListTagsForResource": "

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource.

", "ListThreatIntelSets": "

Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID. If you use this operation from a member account, the ThreatIntelSets associated with the administrator account are returned.

", @@ -285,7 +285,7 @@ "ThreatDetectedByName$Shortened": "

Flag to determine if the finding contains every single infected file-path and/or every threat.

", "UpdateDetectorRequest$Enable": "

Specifies whether the detector is enabled or not enabled.

", "UpdateIPSetRequest$Activate": "

The updated Boolean value that specifies whether the IPSet is active or not.

", - "UpdateOrganizationConfigurationRequest$AutoEnable": "

Indicates whether to automatically enable member accounts in the organization.

Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide the value for either autoEnableOrganizationMembers or autoEnable.

", + "UpdateOrganizationConfigurationRequest$AutoEnable": "

Represents whether or not to automatically enable member accounts in the organization.

Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable.

", "UpdateThreatIntelSetRequest$Activate": "

The updated Boolean value that specifies whether the ThreateIntelSet is active or not.

" } }, @@ -400,7 +400,7 @@ "CoverageFilterCriterionKey": { "base": null, "refs": { - "CoverageFilterCriterion$CriterionKey": "

An enum value representing possible filter fields.

" + "CoverageFilterCriterion$CriterionKey": "

An enum value representing possible filter fields.

Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has been deprecated.

" } }, "CoverageFilterCriterionList": { @@ -436,7 +436,7 @@ "CoverageSortKey": { "base": null, "refs": { - "CoverageSortCriteria$AttributeName": "

Represents the field name used to sort the coverage details.

" + "CoverageSortCriteria$AttributeName": "

Represents the field name used to sort the coverage details.

Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has been deprecated.

" } }, "CoverageStatistics": { @@ -543,7 +543,7 @@ "CriterionKey": { "base": null, "refs": { - "FilterCriterion$CriterionKey": "

An enum value representing possible scan properties to match with given scan entries.

" + "FilterCriterion$CriterionKey": "

An enum value representing possible scan properties to match with given scan entries.

Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has been deprecated.

" } }, "DNSLogsConfigurationResult": { @@ -1129,7 +1129,7 @@ } }, "Finding": { - "base": "

Contains information about the finding, which is generated when abnormal or suspicious activity is detected.

", + "base": "

Contains information about the finding that is generated when abnormal or suspicious activity is detected.

", "refs": { "Findings$member": null } @@ -1989,7 +1989,7 @@ "refs": { "OrganizationAdditionalConfiguration$AutoEnable": "

The status of the additional configuration that will be configured for the organization. Use one of the following values to configure the feature status for the entire organization:

", "OrganizationAdditionalConfigurationResult$AutoEnable": "

Describes the status of the additional configuration that is configured for the member accounts within the organization. One of the following values is the status for the entire organization:

", - "OrganizationFeatureConfiguration$AutoEnable": "

The status of the feature that will be configured for the organization. Use one of the following values to configure the feature status for the entire organization:

", + "OrganizationFeatureConfiguration$AutoEnable": "

Describes the status of the feature that is configured for the member accounts within the organization. One of the following values is the status for the entire organization:

", "OrganizationFeatureConfigurationResult$AutoEnable": "

Describes the status of the feature that is configured for the member accounts within the organization.

" } }, @@ -2333,7 +2333,7 @@ } }, "ScanConditionPair": { - "base": "

Represents key, value pair to be matched against given resource property.

", + "base": "

Represents the key:value pair to be matched against given resource property.

", "refs": { "MapEquals$member": null } @@ -2600,7 +2600,7 @@ "EksClusterDetails$Arn": "

EKS cluster ARN.

", "EksClusterDetails$VpcId": "

The VPC ID to which the EKS cluster is attached.

", "EksClusterDetails$Status": "

The EKS cluster status.

", - "EnableOrganizationAdminAccountRequest$AdminAccountId": "

The Amazon Web Services Account ID for the organization account to be enabled as a GuardDuty delegated administrator.

", + "EnableOrganizationAdminAccountRequest$AdminAccountId": "

The Amazon Web Services account ID for the organization account to be enabled as a GuardDuty delegated administrator.

", "Eq$member": null, "Equals$member": null, "Finding$AccountId": "

The ID of the account in which the finding was generated.

", @@ -2823,7 +2823,7 @@ "TagKey": { "base": null, "refs": { - "ScanConditionPair$Key": "

Represents key in the map condition.

", + "ScanConditionPair$Key": "

Represents the key in the map condition.

", "TagKeyList$member": null, "TagMap$key": null } @@ -2862,7 +2862,7 @@ "TagValue": { "base": null, "refs": { - "ScanConditionPair$Value": "

Represents optional value in the map condition. If not specified, only key will be matched.

", + "ScanConditionPair$Value": "

Represents optional value in the map condition. If not specified, only the key will be matched.

", "TagMap$value": null } }, diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index df8e0a4a2a4..601d826a5be 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -2785,7 +2785,8 @@ "enum": [ "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT", "ALTERNATE_AUDIO_AUTO_SELECT", - "ALTERNATE_AUDIO_NOT_AUTO_SELECT" + "ALTERNATE_AUDIO_NOT_AUTO_SELECT", + "AUDIO_ONLY_VARIANT_STREAM" ] }, "CmfcDescriptiveVideoServiceFlag": { @@ -6679,6 +6680,10 @@ "shape": "ExtendedDataServices", "locationName": "extendedDataServices" }, + "FollowInputIndex": { + "shape": "__integerMin0Max149", + "locationName": "followInputIndex" + }, "Inputs": { "shape": "__listOfInput", "locationName": "inputs" @@ -6811,6 +6816,10 @@ "shape": "ExtendedDataServices", "locationName": "extendedDataServices" }, + "FollowInputIndex": { + "shape": "__integerMin0Max149", + "locationName": "followInputIndex" + }, "Inputs": { "shape": "__listOfInputTemplate", "locationName": "inputs" @@ -9489,7 +9498,7 @@ "locationName": "systemIds" }, "Url": { - "shape": "__stringPatternHttps", + "shape": "__stringPatternHttpsD", "locationName": "url" } } @@ -9514,7 +9523,7 @@ "locationName": "resourceId" }, "Url": { - "shape": "__stringPatternHttps", + "shape": "__stringPatternHttpsD", "locationName": "url" } } @@ -10971,6 +10980,11 @@ "min": 0, "max": 1466400000 }, + "__integerMin0Max149": { + "type": "integer", + "min": 0, + "max": 149 + }, "__integerMin0Max15": { "type": "integer", "min": 0, @@ -11875,6 +11889,10 @@ "type": "string", "pattern": "^https:\\/\\/" }, + "__stringPatternHttpsD": { + "type": "string", + "pattern": "^https:\\/\\/[^:@\\/]*(:\\d*)?(\\/.*)?$" + }, "__stringPatternHttpsKantarmedia": { "type": "string", "pattern": "^https:\\/\\/.*.kantarmedia.*$" diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index 3f605b88e90..5aa01a350f3 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -795,9 +795,9 @@ } }, "CmfcAudioTrackType": { - "base": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", + "base": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. Choose Audio-only variant stream (AUDIO_ONLY_VARIANT_STREAM) for any variant that you want to prohibit the client from playing with video. This causes MediaConvert to represent the variant as an EXT-X-STREAM-INF in the HLS manifest. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", "refs": { - "CmfcSettings$AudioTrackType": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." + "CmfcSettings$AudioTrackType": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. Choose Audio-only variant stream (AUDIO_ONLY_VARIANT_STREAM) for any variant that you want to prohibit the client from playing with video. This causes MediaConvert to represent the variant as an EXT-X-STREAM-INF in the HLS manifest. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." } }, "CmfcDescriptiveVideoServiceFlag": { @@ -3351,9 +3351,9 @@ } }, "S3StorageClass": { - "base": "Specify the S3 storage class to use for this destination.", + "base": "Specify the S3 storage class to use for this output. To use your destination's default storage class: Keep the default value, Not set. For more information about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html", "refs": { - "S3DestinationSettings$StorageClass": "Specify the S3 storage class to use for this destination." + "S3DestinationSettings$StorageClass": "Specify the S3 storage class to use for this output. To use your destination's default storage class: Keep the default value, Not set. For more information about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html" } }, "SampleRangeConversion": { @@ -4127,6 +4127,13 @@ "H265Settings$HrdBufferSize": "Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." } }, + "__integerMin0Max149": { + "base": null, + "refs": { + "JobSettings$FollowInputIndex": "Specifies which input metadata to use for the default \"Follow input\" option for the following settings: resolution, frame rate, and pixel aspect ratio. In the simplest case, specify which input is used based on its index in the job. For example if you specify 3, then the fourth input will be used from each input. If the job does not have a fourth input, then the first input will be used. If no followInputIndex is specified, then 0 will be chosen automatically.", + "JobTemplateSettings$FollowInputIndex": "Specifies which input metadata to use for the default \"Follow input\" option for the following settings: resolution, frame rate, and pixel aspect ratio. In the simplest case, specify which input is used based on its index in the job. For example if you specify 3, then the fourth input will be used from each input. If the job does not have a fourth input, then the first input will be used. If no followInputIndex is specified, then 0 will be chosen automatically." + } + }, "__integerMin0Max15": { "base": null, "refs": { @@ -5509,7 +5516,12 @@ "__stringPatternHttps": { "base": null, "refs": { - "NielsenNonLinearWatermarkSettings$TicServerUrl": "Specify the endpoint for the TIC server that you have deployed and configured in the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert can't connect directly to a TIC server. Instead, you must use API Gateway to provide a RESTful interface between MediaConvert and a TIC server that you deploy in your AWS account. For more information on deploying a TIC server in your AWS account and the required API Gateway, contact Nielsen support.", + "NielsenNonLinearWatermarkSettings$TicServerUrl": "Specify the endpoint for the TIC server that you have deployed and configured in the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert can't connect directly to a TIC server. Instead, you must use API Gateway to provide a RESTful interface between MediaConvert and a TIC server that you deploy in your AWS account. For more information on deploying a TIC server in your AWS account and the required API Gateway, contact Nielsen support." + } + }, + "__stringPatternHttpsD": { + "base": null, + "refs": { "SpekeKeyProvider$Url": "Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.", "SpekeKeyProviderCmaf$Url": "Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content." } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 5249f64a2ba..83b37c96818 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -11711,6 +11711,12 @@ }, "hostname" : "oidc.eu-central-1.amazonaws.com" }, + "eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "hostname" : "oidc.eu-central-2.amazonaws.com" + }, "eu-north-1" : { "credentialScope" : { "region" : "eu-north-1" @@ -12344,6 +12350,12 @@ }, "hostname" : "portal.sso.eu-central-1.amazonaws.com" }, + "eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "hostname" : "portal.sso.eu-central-2.amazonaws.com" + }, "eu-north-1" : { "credentialScope" : { "region" : "eu-north-1" @@ -19248,6 +19260,22 @@ "cn-northwest-1" : { } } }, + "oidc" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "oidc.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "oidc.cn-northwest-1.amazonaws.com.cn" + } + } + }, "organizations" : { "endpoints" : { "aws-cn-global" : { @@ -19276,6 +19304,22 @@ "cn-northwest-1" : { } } }, + "portal.sso" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "portal.sso.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "portal.sso.cn-northwest-1.amazonaws.com.cn" + } + } + }, "ram" : { "endpoints" : { "cn-north-1" : { }, diff --git a/service/braket/api.go b/service/braket/api.go index 23c66f7419e..cbbadae77c0 100644 --- a/service/braket/api.go +++ b/service/braket/api.go @@ -1610,7 +1610,7 @@ type CancelQuantumTaskInput struct { // The ARN of the task to cancel. // // QuantumTaskArn is a required field - QuantumTaskArn *string `location:"uri" locationName:"quantumTaskArn" min:"1" type:"string" required:"true"` + QuantumTaskArn *string `location:"uri" locationName:"quantumTaskArn" type:"string" required:"true"` } // String returns the string representation. @@ -1673,7 +1673,7 @@ type CancelQuantumTaskOutput struct { // The ARN of the task. // // QuantumTaskArn is a required field - QuantumTaskArn *string `locationName:"quantumTaskArn" min:"1" type:"string" required:"true"` + QuantumTaskArn *string `locationName:"quantumTaskArn" type:"string" required:"true"` } // String returns the string representation. @@ -2241,7 +2241,7 @@ type CreateQuantumTaskOutput struct { // The ARN of the task created by the request. // // QuantumTaskArn is a required field - QuantumTaskArn *string `locationName:"quantumTaskArn" min:"1" type:"string" required:"true"` + QuantumTaskArn *string `locationName:"quantumTaskArn" type:"string" required:"true"` } // String returns the string representation. @@ -2437,6 +2437,61 @@ func (s *DeviceOfflineException) RequestID() string { return s.RespMetadata.RequestID } +// Information about tasks and jobs queued on a device. +type DeviceQueueInfo struct { + _ struct{} `type:"structure"` + + // The name of the queue. + // + // Queue is a required field + Queue *string `locationName:"queue" type:"string" required:"true" enum:"QueueName"` + + // Optional. Specifies the priority of the queue. Tasks in a priority queue + // are processed before the tasks in a normal queue. + QueuePriority *string `locationName:"queuePriority" type:"string" enum:"QueuePriority"` + + // The number of jobs or tasks in the queue for a given device. + // + // QueueSize is a required field + QueueSize *string `locationName:"queueSize" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeviceQueueInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeviceQueueInfo) GoString() string { + return s.String() +} + +// SetQueue sets the Queue field's value. +func (s *DeviceQueueInfo) SetQueue(v string) *DeviceQueueInfo { + s.Queue = &v + return s +} + +// SetQueuePriority sets the QueuePriority field's value. +func (s *DeviceQueueInfo) SetQueuePriority(v string) *DeviceQueueInfo { + s.QueuePriority = &v + return s +} + +// SetQueueSize sets the QueueSize field's value. +func (s *DeviceQueueInfo) SetQueueSize(v string) *DeviceQueueInfo { + s.QueueSize = &v + return s +} + // The specified device has been retired. type DeviceRetiredException struct { _ struct{} `type:"structure"` @@ -2646,6 +2701,9 @@ type GetDeviceOutput struct { // DeviceName is a required field DeviceName *string `locationName:"deviceName" type:"string" required:"true"` + // List of information about tasks and jobs queued on a device. + DeviceQueueInfo []*DeviceQueueInfo `locationName:"deviceQueueInfo" type:"list"` + // The status of the device. // // DeviceStatus is a required field @@ -2698,6 +2756,12 @@ func (s *GetDeviceOutput) SetDeviceName(v string) *GetDeviceOutput { return s } +// SetDeviceQueueInfo sets the DeviceQueueInfo field's value. +func (s *GetDeviceOutput) SetDeviceQueueInfo(v []*DeviceQueueInfo) *GetDeviceOutput { + s.DeviceQueueInfo = v + return s +} + // SetDeviceStatus sets the DeviceStatus field's value. func (s *GetDeviceOutput) SetDeviceStatus(v string) *GetDeviceOutput { s.DeviceStatus = &v @@ -2719,6 +2783,9 @@ func (s *GetDeviceOutput) SetProviderName(v string) *GetDeviceOutput { type GetJobInput struct { _ struct{} `type:"structure" nopayload:"true"` + // A list of attributes to return information for. + AdditionalAttributeNames []*string `location:"querystring" locationName:"additionalAttributeNames" type:"list" enum:"HybridJobAdditionalAttributeName"` + // The ARN of the job to retrieve. // // JobArn is a required field @@ -2759,6 +2826,12 @@ func (s *GetJobInput) Validate() error { return nil } +// SetAdditionalAttributeNames sets the AdditionalAttributeNames field's value. +func (s *GetJobInput) SetAdditionalAttributeNames(v []*string) *GetJobInput { + s.AdditionalAttributeNames = v + return s +} + // SetJobArn sets the JobArn field's value. func (s *GetJobInput) SetJobArn(v string) *GetJobInput { s.JobArn = &v @@ -2831,6 +2904,10 @@ type GetJobOutput struct { // OutputDataConfig is a required field OutputDataConfig *JobOutputDataConfig `locationName:"outputDataConfig" type:"structure" required:"true"` + // Queue information for the requested job. Only returned if QueueInfo is specified + // in the additionalAttributeNames" field in the GetJob API request. + QueueInfo *HybridJobQueueInfo `locationName:"queueInfo" type:"structure"` + // The Amazon Resource Name (ARN) of an IAM role that Amazon Braket can assume // to perform tasks on behalf of a user. It can access user resources, run an // Amazon Braket job container on behalf of user, and output resources to the @@ -2957,6 +3034,12 @@ func (s *GetJobOutput) SetOutputDataConfig(v *JobOutputDataConfig) *GetJobOutput return s } +// SetQueueInfo sets the QueueInfo field's value. +func (s *GetJobOutput) SetQueueInfo(v *HybridJobQueueInfo) *GetJobOutput { + s.QueueInfo = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *GetJobOutput) SetRoleArn(v string) *GetJobOutput { s.RoleArn = &v @@ -2990,10 +3073,13 @@ func (s *GetJobOutput) SetTags(v map[string]*string) *GetJobOutput { type GetQuantumTaskInput struct { _ struct{} `type:"structure" nopayload:"true"` + // A list of attributes to return information for. + AdditionalAttributeNames []*string `location:"querystring" locationName:"additionalAttributeNames" type:"list" enum:"QuantumTaskAdditionalAttributeName"` + // the ARN of the task to retrieve. // // QuantumTaskArn is a required field - QuantumTaskArn *string `location:"uri" locationName:"quantumTaskArn" min:"1" type:"string" required:"true"` + QuantumTaskArn *string `location:"uri" locationName:"quantumTaskArn" type:"string" required:"true"` } // String returns the string representation. @@ -3030,6 +3116,12 @@ func (s *GetQuantumTaskInput) Validate() error { return nil } +// SetAdditionalAttributeNames sets the AdditionalAttributeNames field's value. +func (s *GetQuantumTaskInput) SetAdditionalAttributeNames(v []*string) *GetQuantumTaskInput { + s.AdditionalAttributeNames = v + return s +} + // SetQuantumTaskArn sets the QuantumTaskArn field's value. func (s *GetQuantumTaskInput) SetQuantumTaskArn(v string) *GetQuantumTaskInput { s.QuantumTaskArn = &v @@ -3076,7 +3168,12 @@ type GetQuantumTaskOutput struct { // The ARN of the task. // // QuantumTaskArn is a required field - QuantumTaskArn *string `locationName:"quantumTaskArn" min:"1" type:"string" required:"true"` + QuantumTaskArn *string `locationName:"quantumTaskArn" type:"string" required:"true"` + + // Queue information for the requested quantum task. Only returned if QueueInfo + // is specified in the additionalAttributeNames" field in the GetQuantumTask + // API request. + QueueInfo *QuantumTaskQueueInfo `locationName:"queueInfo" type:"structure"` // The number of shots used in the task. // @@ -3164,6 +3261,12 @@ func (s *GetQuantumTaskOutput) SetQuantumTaskArn(v string) *GetQuantumTaskOutput return s } +// SetQueueInfo sets the QueueInfo field's value. +func (s *GetQuantumTaskOutput) SetQueueInfo(v *QuantumTaskQueueInfo) *GetQuantumTaskOutput { + s.QueueInfo = v + return s +} + // SetShots sets the Shots field's value. func (s *GetQuantumTaskOutput) SetShots(v int64) *GetQuantumTaskOutput { s.Shots = &v @@ -3182,6 +3285,62 @@ func (s *GetQuantumTaskOutput) SetTags(v map[string]*string) *GetQuantumTaskOutp return s } +// Information about the queue for a specified job. +type HybridJobQueueInfo struct { + _ struct{} `type:"structure"` + + // Optional. Provides more information about the queue position. For example, + // if the job is complete and no longer in the queue, the message field contains + // that information. + Message *string `locationName:"message" type:"string"` + + // Current position of the job in the jobs queue. + // + // Position is a required field + Position *string `locationName:"position" type:"string" required:"true"` + + // The name of the queue. + // + // Queue is a required field + Queue *string `locationName:"queue" type:"string" required:"true" enum:"QueueName"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HybridJobQueueInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HybridJobQueueInfo) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *HybridJobQueueInfo) SetMessage(v string) *HybridJobQueueInfo { + s.Message = &v + return s +} + +// SetPosition sets the Position field's value. +func (s *HybridJobQueueInfo) SetPosition(v string) *HybridJobQueueInfo { + s.Position = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *HybridJobQueueInfo) SetQueue(v string) *HybridJobQueueInfo { + s.Queue = &v + return s +} + // A list of parameters that specify the input channels, type of input data, // and where it is located. type InputFileConfig struct { @@ -3815,6 +3974,72 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe return s } +// Information about the queue for the specified quantum task. +type QuantumTaskQueueInfo struct { + _ struct{} `type:"structure"` + + // Optional. Provides more information about the queue position. For example, + // if the task is complete and no longer in the queue, the message field contains + // that information. + Message *string `locationName:"message" type:"string"` + + // Current position of the task in the quantum tasks queue. + // + // Position is a required field + Position *string `locationName:"position" type:"string" required:"true"` + + // The name of the queue. + // + // Queue is a required field + Queue *string `locationName:"queue" type:"string" required:"true" enum:"QueueName"` + + // Optional. Specifies the priority of the queue. Quantum tasks in a priority + // queue are processed before the tasks in a normal queue. + QueuePriority *string `locationName:"queuePriority" type:"string" enum:"QueuePriority"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QuantumTaskQueueInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QuantumTaskQueueInfo) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *QuantumTaskQueueInfo) SetMessage(v string) *QuantumTaskQueueInfo { + s.Message = &v + return s +} + +// SetPosition sets the Position field's value. +func (s *QuantumTaskQueueInfo) SetPosition(v string) *QuantumTaskQueueInfo { + s.Position = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QuantumTaskQueueInfo) SetQueue(v string) *QuantumTaskQueueInfo { + s.Queue = &v + return s +} + +// SetQueuePriority sets the QueuePriority field's value. +func (s *QuantumTaskQueueInfo) SetQueuePriority(v string) *QuantumTaskQueueInfo { + s.QueuePriority = &v + return s +} + // Includes information about a quantum task. type QuantumTaskSummary struct { _ struct{} `type:"structure"` @@ -3845,7 +4070,7 @@ type QuantumTaskSummary struct { // The ARN of the task. // // QuantumTaskArn is a required field - QuantumTaskArn *string `locationName:"quantumTaskArn" min:"1" type:"string" required:"true"` + QuantumTaskArn *string `locationName:"quantumTaskArn" type:"string" required:"true"` // The shots used for the task. // @@ -5150,6 +5375,18 @@ func DeviceType_Values() []string { } } +const ( + // HybridJobAdditionalAttributeNameQueueInfo is a HybridJobAdditionalAttributeName enum value + HybridJobAdditionalAttributeNameQueueInfo = "QueueInfo" +) + +// HybridJobAdditionalAttributeName_Values returns all elements of the HybridJobAdditionalAttributeName enum +func HybridJobAdditionalAttributeName_Values() []string { + return []string{ + HybridJobAdditionalAttributeNameQueueInfo, + } +} + const ( // InstanceTypeMlM4Xlarge is a InstanceType enum value InstanceTypeMlM4Xlarge = "ml.m4.xlarge" @@ -5398,6 +5635,18 @@ func JobPrimaryStatus_Values() []string { } } +const ( + // QuantumTaskAdditionalAttributeNameQueueInfo is a QuantumTaskAdditionalAttributeName enum value + QuantumTaskAdditionalAttributeNameQueueInfo = "QueueInfo" +) + +// QuantumTaskAdditionalAttributeName_Values returns all elements of the QuantumTaskAdditionalAttributeName enum +func QuantumTaskAdditionalAttributeName_Values() []string { + return []string{ + QuantumTaskAdditionalAttributeNameQueueInfo, + } +} + const ( // QuantumTaskStatusCreated is a QuantumTaskStatus enum value QuantumTaskStatusCreated = "CREATED" @@ -5434,6 +5683,38 @@ func QuantumTaskStatus_Values() []string { } } +const ( + // QueueNameQuantumTasksQueue is a QueueName enum value + QueueNameQuantumTasksQueue = "QUANTUM_TASKS_QUEUE" + + // QueueNameJobsQueue is a QueueName enum value + QueueNameJobsQueue = "JOBS_QUEUE" +) + +// QueueName_Values returns all elements of the QueueName enum +func QueueName_Values() []string { + return []string{ + QueueNameQuantumTasksQueue, + QueueNameJobsQueue, + } +} + +const ( + // QueuePriorityNormal is a QueuePriority enum value + QueuePriorityNormal = "Normal" + + // QueuePriorityPriority is a QueuePriority enum value + QueuePriorityPriority = "Priority" +) + +// QueuePriority_Values returns all elements of the QueuePriority enum +func QueuePriority_Values() []string { + return []string{ + QueuePriorityNormal, + QueuePriorityPriority, + } +} + const ( // SearchJobsFilterOperatorLt is a SearchJobsFilterOperator enum value SearchJobsFilterOperatorLt = "LT" diff --git a/service/cloudwatchevents/api.go b/service/cloudwatchevents/api.go index 7e562a7dc4c..6de0fc7b642 100644 --- a/service/cloudwatchevents/api.go +++ b/service/cloudwatchevents/api.go @@ -5857,7 +5857,11 @@ type ConnectionBodyParameter struct { Key *string `type:"string"` // The value associated with the key. - Value *string `type:"string"` + // + // Value is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ConnectionBodyParameter's + // String and GoString methods. + Value *string `type:"string" sensitive:"true"` } // String returns the string representation. @@ -5908,7 +5912,11 @@ type ConnectionHeaderParameter struct { Key *string `type:"string"` // The value associated with the key. - Value *string `type:"string"` + // + // Value is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ConnectionHeaderParameter's + // String and GoString methods. + Value *string `type:"string" sensitive:"true"` } // String returns the string representation. @@ -6105,7 +6113,11 @@ type ConnectionQueryStringParameter struct { Key *string `type:"string"` // The value associated with the key for the query string parameter. - Value *string `type:"string"` + // + // Value is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ConnectionQueryStringParameter's + // String and GoString methods. + Value *string `type:"string" sensitive:"true"` } // String returns the string representation. @@ -6485,8 +6497,12 @@ type CreateConnectionApiKeyAuthRequestParameters struct { // The value for the API key to use for authorization. // + // ApiKeyValue is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateConnectionApiKeyAuthRequestParameters's + // String and GoString methods. + // // ApiKeyValue is a required field - ApiKeyValue *string `min:"1" type:"string" required:"true"` + ApiKeyValue *string `min:"1" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -6637,8 +6653,12 @@ type CreateConnectionBasicAuthRequestParameters struct { // The password associated with the user name to use for Basic authorization. // + // Password is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateConnectionBasicAuthRequestParameters's + // String and GoString methods. + // // Password is a required field - Password *string `min:"1" type:"string" required:"true"` + Password *string `min:"1" type:"string" required:"true" sensitive:"true"` // The user name to use for Basic authorization. // @@ -6802,8 +6822,12 @@ type CreateConnectionOAuthClientRequestParameters struct { // The client secret associated with the client ID to use for OAuth authorization // for the connection. // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateConnectionOAuthClientRequestParameters's + // String and GoString methods. + // // ClientSecret is a required field - ClientSecret *string `min:"1" type:"string" required:"true"` + ClientSecret *string `min:"1" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -15380,7 +15404,11 @@ type UpdateConnectionApiKeyAuthRequestParameters struct { ApiKeyName *string `min:"1" type:"string"` // The value associated with teh API key to use for authorization. - ApiKeyValue *string `min:"1" type:"string"` + // + // ApiKeyValue is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UpdateConnectionApiKeyAuthRequestParameters's + // String and GoString methods. + ApiKeyValue *string `min:"1" type:"string" sensitive:"true"` } // String returns the string representation. @@ -15522,7 +15550,11 @@ type UpdateConnectionBasicAuthRequestParameters struct { _ struct{} `type:"structure"` // The password associated with the user name to use for Basic authorization. - Password *string `min:"1" type:"string"` + // + // Password is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UpdateConnectionBasicAuthRequestParameters's + // String and GoString methods. + Password *string `min:"1" type:"string" sensitive:"true"` // The user name to use for Basic authorization. Username *string `min:"1" type:"string"` @@ -15663,7 +15695,11 @@ type UpdateConnectionOAuthClientRequestParameters struct { ClientID *string `min:"1" type:"string"` // The client secret assciated with the client ID to use for OAuth authorization. - ClientSecret *string `min:"1" type:"string"` + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UpdateConnectionOAuthClientRequestParameters's + // String and GoString methods. + ClientSecret *string `min:"1" type:"string" sensitive:"true"` } // String returns the string representation. diff --git a/service/databasemigrationservice/api.go b/service/databasemigrationservice/api.go index 9fcdb0ef7bd..e165d6ba4b1 100644 --- a/service/databasemigrationservice/api.go +++ b/service/databasemigrationservice/api.go @@ -1118,6 +1118,10 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // For information on the required permissions, see IAM Permissions Needed to // Use DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.IAMPermissions). // +// If you don't specify a version when creating a replication instance, DMS +// will create the instance using the default engine version. For information +// about the default engine version, see Release Notes (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReleaseNotes.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -11497,11 +11501,11 @@ func (c *DatabaseMigrationService) UpdateSubscriptionsToEventBridgeRequest(input // Migrates 10 active and enabled Amazon SNS subscriptions at a time and converts // them to corresponding Amazon EventBridge rules. By default, this operation // migrates subscriptions only when all your replication instance versions are -// 3.4.6 or higher. If any replication instances are from versions earlier than -// 3.4.6, the operation raises an error and tells you to upgrade these instances -// to version 3.4.6 or higher. To enable migration regardless of version, set +// 3.4.5 or higher. If any replication instances are from versions earlier than +// 3.4.5, the operation raises an error and tells you to upgrade these instances +// to version 3.4.5 or higher. To enable migration regardless of version, set // the Force option to true. However, if you don't upgrade instances earlier -// than version 3.4.6, some types of events might not be available when you +// than version 3.4.5, some types of events might not be available when you // use Amazon EventBridge. // // To call this operation, make sure that you have certain permissions added @@ -12572,22 +12576,22 @@ type ComputeConfig struct { // Specifies the maximum value of the DMS capacity units (DCUs) for which a // given DMS Serverless replication can be provisioned. A single DCU is 2GB - // of RAM, with 2 DCUs as the minimum value allowed. The list of valid DCU values - // includes 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum value - // that you can specify for DMS Serverless is 384. The MaxCapacityUnits parameter - // is the only DCU parameter you are required to specify. + // of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values + // includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum + // value that you can specify for DMS Serverless is 384. The MaxCapacityUnits + // parameter is the only DCU parameter you are required to specify. MaxCapacityUnits *int64 `type:"integer"` // Specifies the minimum value of the DMS capacity units (DCUs) for which a // given DMS Serverless replication can be provisioned. A single DCU is 2GB - // of RAM, with 2 DCUs as the minimum value allowed. The list of valid DCU values - // includes 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU - // value that you can specify for DMS Serverless is 2. You don't have to specify - // a value for the MinCapacityUnits parameter. If you don't set this value, - // DMS scans the current activity of available source tables to identify an - // optimum setting for this parameter. If there is no current source activity + // of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values + // includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum + // DCU value that you can specify for DMS Serverless is 1. You don't have to + // specify a value for the MinCapacityUnits parameter. If you don't set this + // value, DMS scans the current activity of available source tables to identify + // an optimum setting for this parameter. If there is no current source activity // or DMS can't otherwise identify a more appropriate value, it sets this parameter - // to the minimum DCU value allowed, 2. + // to the minimum DCU value allowed, 1. MinCapacityUnits *int64 `type:"integer"` // Specifies whether the DMS Serverless replication is a Multi-AZ deployment. @@ -14267,14 +14271,6 @@ type CreateReplicationInstanceInput struct { // defaults to true. // // Default: true - // - // When AutoMinorVersionUpgrade is enabled, DMS uses the current default engine - // version when you create a replication instance. For example, if you set EngineVersion - // to a lower version number than the current default version, DMS uses the - // default version. - // - // If AutoMinorVersionUpgrade isn’t enabled when you create a replication - // instance, DMS uses the engine version specified by the EngineVersion parameter. AutoMinorVersionUpgrade *bool `type:"boolean"` // The Availability Zone where the replication instance will be created. The @@ -14698,7 +14694,7 @@ type CreateReplicationTaskInput struct { // // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ + // Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“ CdcStopPosition *string `type:"string"` // The migration type. Valid values: full-load | cdc | full-load-and-cdc @@ -15133,9 +15129,18 @@ func (s *DataProviderDescriptorDefinition) SetSecretsManagerSecretId(v string) * type DataProviderSettings struct { _ struct{} `type:"structure"` + // Provides information that defines a DocumentDB data provider. + DocDbSettings *DocDbDataProviderSettings `type:"structure"` + + // Provides information that defines a MariaDB data provider. + MariaDbSettings *MariaDbDataProviderSettings `type:"structure"` + // Provides information that defines a Microsoft SQL Server data provider. MicrosoftSqlServerSettings *MicrosoftSqlServerDataProviderSettings `type:"structure"` + // Provides information that defines a MongoDB data provider. + MongoDbSettings *MongoDbDataProviderSettings `type:"structure"` + // Provides information that defines a MySQL data provider. MySqlSettings *MySqlDataProviderSettings `type:"structure"` @@ -15144,6 +15149,9 @@ type DataProviderSettings struct { // Provides information that defines a PostgreSQL data provider. PostgreSqlSettings *PostgreSqlDataProviderSettings `type:"structure"` + + // Provides information that defines an Amazon Redshift data provider. + RedshiftSettings *RedshiftDataProviderSettings `type:"structure"` } // String returns the string representation. @@ -15164,12 +15172,30 @@ func (s DataProviderSettings) GoString() string { return s.String() } +// SetDocDbSettings sets the DocDbSettings field's value. +func (s *DataProviderSettings) SetDocDbSettings(v *DocDbDataProviderSettings) *DataProviderSettings { + s.DocDbSettings = v + return s +} + +// SetMariaDbSettings sets the MariaDbSettings field's value. +func (s *DataProviderSettings) SetMariaDbSettings(v *MariaDbDataProviderSettings) *DataProviderSettings { + s.MariaDbSettings = v + return s +} + // SetMicrosoftSqlServerSettings sets the MicrosoftSqlServerSettings field's value. func (s *DataProviderSettings) SetMicrosoftSqlServerSettings(v *MicrosoftSqlServerDataProviderSettings) *DataProviderSettings { s.MicrosoftSqlServerSettings = v return s } +// SetMongoDbSettings sets the MongoDbSettings field's value. +func (s *DataProviderSettings) SetMongoDbSettings(v *MongoDbDataProviderSettings) *DataProviderSettings { + s.MongoDbSettings = v + return s +} + // SetMySqlSettings sets the MySqlSettings field's value. func (s *DataProviderSettings) SetMySqlSettings(v *MySqlDataProviderSettings) *DataProviderSettings { s.MySqlSettings = v @@ -15188,6 +15214,12 @@ func (s *DataProviderSettings) SetPostgreSqlSettings(v *PostgreSqlDataProviderSe return s } +// SetRedshiftSettings sets the RedshiftSettings field's value. +func (s *DataProviderSettings) SetRedshiftSettings(v *RedshiftDataProviderSettings) *DataProviderSettings { + s.RedshiftSettings = v + return s +} + // Describes an inventory database instance for a Fleet Advisor collector. type DatabaseInstanceSoftwareDetailsResponse struct { _ struct{} `type:"structure"` @@ -21785,6 +21817,75 @@ func (s *DmsTransferSettings) SetServiceAccessRoleArn(v string) *DmsTransferSett return s } +// Provides information that defines a DocumentDB data provider. +type DocDbDataProviderSettings struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate used for SSL connection. + CertificateArn *string `type:"string"` + + // The database name on the DocumentDB data provider. + DatabaseName *string `type:"string"` + + // The port value for the DocumentDB data provider. + Port *int64 `type:"integer"` + + // The name of the source DocumentDB server. + ServerName *string `type:"string"` + + // The SSL mode used to connect to the DocumentDB data provider. The default + // value is none. + SslMode *string `type:"string" enum:"DmsSslModeValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DocDbDataProviderSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DocDbDataProviderSettings) GoString() string { + return s.String() +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *DocDbDataProviderSettings) SetCertificateArn(v string) *DocDbDataProviderSettings { + s.CertificateArn = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *DocDbDataProviderSettings) SetDatabaseName(v string) *DocDbDataProviderSettings { + s.DatabaseName = &v + return s +} + +// SetPort sets the Port field's value. +func (s *DocDbDataProviderSettings) SetPort(v int64) *DocDbDataProviderSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *DocDbDataProviderSettings) SetServerName(v string) *DocDbDataProviderSettings { + s.ServerName = &v + return s +} + +// SetSslMode sets the SslMode field's value. +func (s *DocDbDataProviderSettings) SetSslMode(v string) *DocDbDataProviderSettings { + s.SslMode = &v + return s +} + // Provides information that defines a DocumentDB endpoint. type DocDbSettings struct { _ struct{} `type:"structure"` @@ -25312,6 +25413,66 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut return s } +// Provides information that defines a MariaDB data provider. +type MariaDbDataProviderSettings struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate used for SSL connection. + CertificateArn *string `type:"string"` + + // The port value for the MariaDB data provider + Port *int64 `type:"integer"` + + // The name of the MariaDB server. + ServerName *string `type:"string"` + + // The SSL mode used to connect to the MariaDB data provider. The default value + // is none. + SslMode *string `type:"string" enum:"DmsSslModeValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MariaDbDataProviderSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MariaDbDataProviderSettings) GoString() string { + return s.String() +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *MariaDbDataProviderSettings) SetCertificateArn(v string) *MariaDbDataProviderSettings { + s.CertificateArn = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MariaDbDataProviderSettings) SetPort(v int64) *MariaDbDataProviderSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MariaDbDataProviderSettings) SetServerName(v string) *MariaDbDataProviderSettings { + s.ServerName = &v + return s +} + +// SetSslMode sets the SslMode field's value. +func (s *MariaDbDataProviderSettings) SetSslMode(v string) *MariaDbDataProviderSettings { + s.SslMode = &v + return s +} + // Provides information that defines a Microsoft SQL Server endpoint. type MicrosoftSQLServerSettings struct { _ struct{} `type:"structure"` @@ -25400,8 +25561,9 @@ type MicrosoftSQLServerSettings struct { // Indicates the mode used to fetch CDC data. TlogAccessMode *string `type:"string" enum:"TlogAccessMode"` - // Use the TrimSpaceInChar source endpoint setting to trim data on CHAR and - // NCHAR data types during migration. The default value is true. + // Use the TrimSpaceInChar source endpoint setting to right-trim data on CHAR + // and NCHAR data types during migration. Setting TrimSpaceInChar does not left-trim + // data. The default value is true. TrimSpaceInChar *bool `type:"boolean"` // Use this to attribute to transfer data for full-load operations using BCP. @@ -27083,14 +27245,6 @@ type ModifyReplicationInstanceInput struct { // * A newer minor version is available. // // * DMS has enabled automatic patching for the given engine version. - // - // When AutoMinorVersionUpgrade is enabled, DMS uses the current default engine - // version when you modify a replication instance. For example, if you set EngineVersion - // to a lower version number than the current default version, DMS uses the - // default version. - // - // If AutoMinorVersionUpgrade isn’t enabled when you modify a replication - // instance, DMS uses the engine version specified by the EngineVersion parameter. AutoMinorVersionUpgrade *bool `type:"boolean"` // The engine version number of the replication instance. @@ -27417,7 +27571,7 @@ type ModifyReplicationTaskInput struct { // // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ + // Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“ CdcStopPosition *string `type:"string"` // The migration type. Valid values: full-load | cdc | full-load-and-cdc @@ -27571,6 +27725,107 @@ func (s *ModifyReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *Mo return s } +// Provides information that defines a MongoDB data provider. +type MongoDbDataProviderSettings struct { + _ struct{} `type:"structure"` + + // The authentication method for connecting to the data provider. Valid values + // are DEFAULT, MONGODB_CR, or SCRAM_SHA_1. + AuthMechanism *string `type:"string" enum:"AuthMechanismValue"` + + // The MongoDB database name. This setting isn't used when AuthType is set to + // "no". + // + // The default is "admin". + AuthSource *string `type:"string"` + + // The authentication type for the database connection. Valid values are PASSWORD + // or NO. + AuthType *string `type:"string" enum:"AuthTypeValue"` + + // The Amazon Resource Name (ARN) of the certificate used for SSL connection. + CertificateArn *string `type:"string"` + + // The database name on the MongoDB data provider. + DatabaseName *string `type:"string"` + + // The port value for the MongoDB data provider. + Port *int64 `type:"integer"` + + // The name of the MongoDB server. + ServerName *string `type:"string"` + + // The SSL mode used to connect to the MongoDB data provider. The default value + // is none. + SslMode *string `type:"string" enum:"DmsSslModeValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MongoDbDataProviderSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MongoDbDataProviderSettings) GoString() string { + return s.String() +} + +// SetAuthMechanism sets the AuthMechanism field's value. +func (s *MongoDbDataProviderSettings) SetAuthMechanism(v string) *MongoDbDataProviderSettings { + s.AuthMechanism = &v + return s +} + +// SetAuthSource sets the AuthSource field's value. +func (s *MongoDbDataProviderSettings) SetAuthSource(v string) *MongoDbDataProviderSettings { + s.AuthSource = &v + return s +} + +// SetAuthType sets the AuthType field's value. +func (s *MongoDbDataProviderSettings) SetAuthType(v string) *MongoDbDataProviderSettings { + s.AuthType = &v + return s +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *MongoDbDataProviderSettings) SetCertificateArn(v string) *MongoDbDataProviderSettings { + s.CertificateArn = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *MongoDbDataProviderSettings) SetDatabaseName(v string) *MongoDbDataProviderSettings { + s.DatabaseName = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MongoDbDataProviderSettings) SetPort(v int64) *MongoDbDataProviderSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MongoDbDataProviderSettings) SetServerName(v string) *MongoDbDataProviderSettings { + s.ServerName = &v + return s +} + +// SetSslMode sets the SslMode field's value. +func (s *MongoDbDataProviderSettings) SetSslMode(v string) *MongoDbDataProviderSettings { + s.SslMode = &v + return s +} + // Provides information that defines a MongoDB endpoint. type MongoDbSettings struct { _ struct{} `type:"structure"` @@ -27661,7 +27916,8 @@ type MongoDbSettings struct { // contains the MongoDB endpoint connection details. SecretsManagerSecretId *string `type:"string"` - // The name of the server on the MongoDB source endpoint. + // The name of the server on the MongoDB source endpoint. For MongoDB Atlas, + // provide the server name for any of the servers in the replication set. ServerName *string `type:"string"` // If true, DMS retrieves the entire document from the MongoDB source during @@ -28607,7 +28863,7 @@ type OracleSettings struct { // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerOracleAsmSecretId. - // Or you can specify clear-text values for AsmUserName, AsmPassword, and AsmServerName. + // Or you can specify clear-text values for AsmUser, AsmPassword, and AsmServerName. // You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret // and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId // required to access it, see Using secrets to access Database Migration Service @@ -29255,7 +29511,8 @@ type PostgreSQLSettings struct { HeartbeatSchema *string `type:"string"` // When true, lets PostgreSQL migrate the boolean type as boolean. By default, - // PostgreSQL migrates booleans as varchar(5). + // PostgreSQL migrates booleans as varchar(5). You must set this setting on + // both the source and target endpoints for it to take effect. MapBooleanAsBoolean *bool `type:"boolean"` // When true, DMS migrates JSONB values as CLOB. @@ -30325,6 +30582,56 @@ func (s *RedisSettings) SetSslSecurityProtocol(v string) *RedisSettings { return s } +// Provides information that defines an Amazon Redshift data provider. +type RedshiftDataProviderSettings struct { + _ struct{} `type:"structure"` + + // The database name on the Amazon Redshift data provider. + DatabaseName *string `type:"string"` + + // The port value for the Amazon Redshift data provider. + Port *int64 `type:"integer"` + + // The name of the Amazon Redshift server. + ServerName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedshiftDataProviderSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedshiftDataProviderSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *RedshiftDataProviderSettings) SetDatabaseName(v string) *RedshiftDataProviderSettings { + s.DatabaseName = &v + return s +} + +// SetPort sets the Port field's value. +func (s *RedshiftDataProviderSettings) SetPort(v int64) *RedshiftDataProviderSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *RedshiftDataProviderSettings) SetServerName(v string) *RedshiftDataProviderSettings { + s.ServerName = &v + return s +} + // Provides information that defines an Amazon Redshift endpoint. type RedshiftSettings struct { _ struct{} `type:"structure"` @@ -30430,7 +30737,8 @@ type RedshiftSettings struct { LoadTimeout *int64 `type:"integer"` // When true, lets Redshift migrate the boolean type as boolean. By default, - // Redshift migrates booleans as varchar(1). + // Redshift migrates booleans as varchar(1). You must set this setting on both + // the source and target endpoints for it to take effect. MapBooleanAsBoolean *bool `type:"boolean"` // The maximum size (in KB) of any .csv file used to load data on an S3 bucket @@ -32291,7 +32599,7 @@ type ReplicationTask struct { // // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ + // Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“ CdcStopPosition *string `type:"string"` // The last error (failure) message generated for the replication task. @@ -33682,9 +33990,9 @@ type S3Settings struct { // An optional parameter that specifies how DMS treats null values. While handling // the null value, you can use this parameter to pass a user-defined string // as null when writing to the target. For example, when target columns are - // not nullable, you can use this option to differentiate between the empty - // string value and the null value. So, if you set this parameter value to the - // empty string ("" or ''), DMS treats the empty string as the null value instead + // nullable, you can use this option to differentiate between the empty string + // value and the null value. So, if you set this parameter value to the empty + // string ("" or ''), DMS treats the empty string as the null value instead // of NULL. // // The default value is NULL. Valid values include any valid string. @@ -35869,7 +36177,7 @@ type StartReplicationTaskInput struct { // // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ + // Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“ CdcStopPosition *string `type:"string"` // The Amazon Resource Name (ARN) of the replication task to be started. @@ -37109,7 +37417,7 @@ type UpdateSubscriptionsToEventBridgeInput struct { // When set to true, this operation migrates DMS subscriptions for Amazon SNS // notifications no matter what your replication instance version is. If not // set or set to false, this operation runs only when all your replication instances - // are from DMS version 3.4.6 or higher. + // are from DMS version 3.4.5 or higher. ForceMove *bool `type:"boolean"` } diff --git a/service/databasemigrationservice/examples_test.go b/service/databasemigrationservice/examples_test.go index 99ddbb9d8ce..987bb9d496e 100644 --- a/service/databasemigrationservice/examples_test.go +++ b/service/databasemigrationservice/examples_test.go @@ -62,6 +62,54 @@ func ExampleDatabaseMigrationService_AddTagsToResource_shared00() { fmt.Println(result) } +// Create Data Provider +// Creates the data provider with the specified parameters. +func ExampleDatabaseMigrationService_CreateDataProvider_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.CreateDataProviderInput{ + DataProviderName: aws.String("sqlServer-dev"), + Description: aws.String("description"), + Engine: aws.String("sqlserver"), + Settings: &databasemigrationservice.DataProviderSettings{ + MicrosoftSqlServerSettings: &databasemigrationservice.MicrosoftSqlServerDataProviderSettings{ + DatabaseName: aws.String("DatabaseName"), + Port: aws.Int64(11112), + ServerName: aws.String("ServerName2"), + SslMode: aws.String("none"), + }, + }, + Tags: []*databasemigrationservice.Tag{ + { + Key: aws.String("access"), + Value: aws.String("authorizedusers"), + }, + }, + } + + result, err := svc.CreateDataProvider(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // Create endpoint // Creates an endpoint using the provided settings. func ExampleDatabaseMigrationService_CreateEndpoint_shared00() { @@ -119,6 +167,124 @@ func ExampleDatabaseMigrationService_CreateEndpoint_shared00() { fmt.Println(result) } +// Create Instance Profile +// Creates the instance profile using the specified parameters. +func ExampleDatabaseMigrationService_CreateInstanceProfile_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.CreateInstanceProfileInput{ + Description: aws.String("Description"), + InstanceProfileName: aws.String("my-instance-profile"), + KmsKeyArn: aws.String("arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef"), + NetworkType: aws.String("DUAL"), + PubliclyAccessible: aws.Bool(true), + SubnetGroupIdentifier: aws.String("my-subnet-group"), + Tags: []*databasemigrationservice.Tag{ + { + Key: aws.String("access"), + Value: aws.String("authorizedusers"), + }, + }, + } + + result, err := svc.CreateInstanceProfile(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Create Migration Project +// Creates the migration project with the specified parameters. +func ExampleDatabaseMigrationService_CreateMigrationProject_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.CreateMigrationProjectInput{ + Description: aws.String("description"), + InstanceProfileIdentifier: aws.String("ip-au-17"), + MigrationProjectName: aws.String("my-migration-project"), + SchemaConversionApplicationAttributes: &databasemigrationservice.SCApplicationAttributes{ + S3BucketPath: aws.String("arn:aws:s3:::mylogin-bucket"), + S3BucketRoleArn: aws.String("arn:aws:iam::012345678901:role/Admin"), + }, + SourceDataProviderDescriptors: []*databasemigrationservice.DataProviderDescriptorDefinition{ + { + DataProviderIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + SecretsManagerAccessRoleArn: aws.String("arn:aws:iam::012345678901:role/myuser-admin-access"), + SecretsManagerSecretId: aws.String("arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/example1/ALL.SOURCE.ORACLE_12-A1B2C3"), + }, + }, + Tags: []*databasemigrationservice.Tag{ + { + Key: aws.String("access"), + Value: aws.String("authorizedusers"), + }, + }, + TargetDataProviderDescriptors: []*databasemigrationservice.DataProviderDescriptorDefinition{ + { + DataProviderIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + SecretsManagerAccessRoleArn: aws.String("arn:aws:iam::012345678901:role/myuser-admin-access"), + SecretsManagerSecretId: aws.String("arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/example1/TARGET.postgresql-A1B2C3"), + }, + }, + TransformationRules: aws.String("{\"key0\":\"value0\",\"key1\":\"value1\",\"key2\":\"value2\"}"), + } + + result, err := svc.CreateMigrationProject(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // Create replication instance // Creates the replication instance using the specified parameters. func ExampleDatabaseMigrationService_CreateReplicationInstance_shared00() { @@ -344,6 +510,38 @@ func ExampleDatabaseMigrationService_DeleteConnection_shared00() { fmt.Println(result) } +// Delete Data Provider +// Deletes the specified data provider. +func ExampleDatabaseMigrationService_DeleteDataProvider_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DeleteDataProviderInput{ + DataProviderIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + } + + result, err := svc.DeleteDataProvider(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // Delete Endpoint // Deletes the specified endpoint. All tasks associated with the endpoint must be deleted // before you can delete the endpoint. @@ -375,6 +573,70 @@ func ExampleDatabaseMigrationService_DeleteEndpoint_shared00() { fmt.Println(result) } +// Delete Instance Profile +// Deletes the specified instance profile. +func ExampleDatabaseMigrationService_DeleteInstanceProfile_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DeleteInstanceProfileInput{ + InstanceProfileIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + } + + result, err := svc.DeleteInstanceProfile(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Delete Migration Project +// Deletes the specified migration project. +func ExampleDatabaseMigrationService_DeleteMigrationProject_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DeleteMigrationProjectInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + } + + result, err := svc.DeleteMigrationProject(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // Delete Replication Instance // Deletes the specified replication instance. You must delete any migration tasks that // are associated with the replication instance before you can delete it. @@ -570,6 +832,74 @@ func ExampleDatabaseMigrationService_DescribeConnections_shared00() { fmt.Println(result) } +// Describe Conversion Configuration +// Returns configuration parameters for a schema conversion project. +func ExampleDatabaseMigrationService_DescribeConversionConfiguration_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DescribeConversionConfigurationInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + } + + result, err := svc.DescribeConversionConfiguration(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Describe Data Providers +// + +func ExampleDatabaseMigrationService_DescribeDataProviders_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DescribeDataProvidersInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("data-provider-identifier"), + Values: []*string{ + aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + }, + }, + }, + Marker: aws.String("EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + MaxRecords: aws.Int64(20), + } + + result, err := svc.DescribeDataProviders(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // Describe endpoint types // Returns information about the type of endpoints available. func ExampleDatabaseMigrationService_DescribeEndpointTypes_shared00() { @@ -644,17 +974,26 @@ func ExampleDatabaseMigrationService_DescribeEndpoints_shared00() { fmt.Println(result) } -// Describe orderable replication instances -// Returns information about the replication instance types that can be created in the -// specified region. -func ExampleDatabaseMigrationService_DescribeOrderableReplicationInstances_shared00() { +// Describe Extension Pack Associations +// Returns a paginated list of extension pack associations for the specified migration +// project. +func ExampleDatabaseMigrationService_DescribeExtensionPackAssociations_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeOrderableReplicationInstancesInput{ - Marker: aws.String(""), - MaxRecords: aws.Int64(123), + input := &databasemigrationservice.DescribeExtensionPackAssociationsInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("instance-profile-identifier"), + Values: []*string{ + aws.String("arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + }, + }, + }, + Marker: aws.String("0123456789abcdefghijklmnopqrs"), + MaxRecords: aws.Int64(20), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), } - result, err := svc.DescribeOrderableReplicationInstances(input) + result, err := svc.DescribeExtensionPackAssociations(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -672,22 +1011,31 @@ func ExampleDatabaseMigrationService_DescribeOrderableReplicationInstances_share fmt.Println(result) } -// Describe refresh schema status -// Returns the status of the refresh-schemas operation. -func ExampleDatabaseMigrationService_DescribeRefreshSchemasStatus_shared00() { +// Describe Instance Profiles +// Returns a paginated list of instance profiles for your account in the current region. +func ExampleDatabaseMigrationService_DescribeInstanceProfiles_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeRefreshSchemasStatusInput{ - EndpointArn: aws.String(""), + input := &databasemigrationservice.DescribeInstanceProfilesInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("instance-profile-identifier"), + Values: []*string{ + aws.String("arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + }, + }, + }, + Marker: aws.String("0123456789abcdefghijklmnopqrs"), + MaxRecords: aws.Int64(20), } - result, err := svc.DescribeRefreshSchemasStatus(input) + result, err := svc.DescribeInstanceProfiles(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeInvalidResourceStateFault: - fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -702,25 +1050,26 @@ func ExampleDatabaseMigrationService_DescribeRefreshSchemasStatus_shared00() { fmt.Println(result) } -// Describe replication instances -// Returns the status of the refresh-schemas operation. -func ExampleDatabaseMigrationService_DescribeReplicationInstances_shared00() { +// Describe Metadata Model Assessments +// Returns a paginated list of metadata model assessments for your account in the current +// region. +func ExampleDatabaseMigrationService_DescribeMetadataModelAssessments_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeReplicationInstancesInput{ + input := &databasemigrationservice.DescribeMetadataModelAssessmentsInput{ Filters: []*databasemigrationservice.Filter{ { - Name: aws.String("string"), + Name: aws.String("my-migration-project"), Values: []*string{ - aws.String("string"), - aws.String("string"), + aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), }, }, }, - Marker: aws.String(""), - MaxRecords: aws.Int64(123), + Marker: aws.String("0123456789abcdefghijklmnopqrs"), + MaxRecords: aws.Int64(20), + MigrationProjectIdentifier: aws.String(""), } - result, err := svc.DescribeReplicationInstances(input) + result, err := svc.DescribeMetadataModelAssessments(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -740,25 +1089,25 @@ func ExampleDatabaseMigrationService_DescribeReplicationInstances_shared00() { fmt.Println(result) } -// Describe replication subnet groups -// Returns information about the replication subnet groups. -func ExampleDatabaseMigrationService_DescribeReplicationSubnetGroups_shared00() { +// Describe Metadata Model Conversions +// Returns a paginated list of metadata model conversions for a migration project. +func ExampleDatabaseMigrationService_DescribeMetadataModelConversions_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeReplicationSubnetGroupsInput{ + input := &databasemigrationservice.DescribeMetadataModelConversionsInput{ Filters: []*databasemigrationservice.Filter{ { - Name: aws.String("string"), + Name: aws.String("request-id"), Values: []*string{ - aws.String("string"), - aws.String("string"), + aws.String("01234567-89ab-cdef-0123-456789abcdef"), }, }, }, - Marker: aws.String(""), - MaxRecords: aws.Int64(123), + Marker: aws.String("EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ123456"), + MaxRecords: aws.Int64(123), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), } - result, err := svc.DescribeReplicationSubnetGroups(input) + result, err := svc.DescribeMetadataModelConversions(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -778,25 +1127,25 @@ func ExampleDatabaseMigrationService_DescribeReplicationSubnetGroups_shared00() fmt.Println(result) } -// Describe replication tasks -// Returns information about replication tasks for your account in the current region. -func ExampleDatabaseMigrationService_DescribeReplicationTasks_shared00() { +// Describe Metadata Model Exports As Script +// Returns a paginated list of metadata model exports. +func ExampleDatabaseMigrationService_DescribeMetadataModelExportsAsScript_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeReplicationTasksInput{ + input := &databasemigrationservice.DescribeMetadataModelExportsAsScriptInput{ Filters: []*databasemigrationservice.Filter{ { - Name: aws.String("string"), + Name: aws.String("request-id"), Values: []*string{ - aws.String("string"), - aws.String("string"), + aws.String("01234567-89ab-cdef-0123-456789abcdef"), }, }, }, - Marker: aws.String(""), - MaxRecords: aws.Int64(123), + Marker: aws.String("0123456789abcdefghijklmnopqrs"), + MaxRecords: aws.Int64(20), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), } - result, err := svc.DescribeReplicationTasks(input) + result, err := svc.DescribeMetadataModelExportsAsScript(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -816,22 +1165,28 @@ func ExampleDatabaseMigrationService_DescribeReplicationTasks_shared00() { fmt.Println(result) } -// Describe schemas -// Returns information about the schema for the specified endpoint. -func ExampleDatabaseMigrationService_DescribeSchemas_shared00() { +// Describe Metadata Model Exports To Target +// Returns a paginated list of metadata model exports. +func ExampleDatabaseMigrationService_DescribeMetadataModelExportsToTarget_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeSchemasInput{ - EndpointArn: aws.String(""), - Marker: aws.String(""), - MaxRecords: aws.Int64(123), + input := &databasemigrationservice.DescribeMetadataModelExportsToTargetInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("request-id"), + Values: []*string{ + aws.String("01234567-89ab-cdef-0123-456789abcdef"), + }, + }, + }, + Marker: aws.String("0123456789abcdefghijklmnopqrs"), + MaxRecords: aws.Int64(20), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), } - result, err := svc.DescribeSchemas(input) + result, err := svc.DescribeMetadataModelExportsToTarget(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeInvalidResourceStateFault: - fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) default: @@ -848,25 +1203,30 @@ func ExampleDatabaseMigrationService_DescribeSchemas_shared00() { fmt.Println(result) } -// Describe table statistics -// Returns table statistics on the database migration task, including table name, rows -// inserted, rows updated, and rows deleted. -func ExampleDatabaseMigrationService_DescribeTableStatistics_shared00() { +// Describe Metadata Model Imports +// Returns a paginated list of metadata model imports. +func ExampleDatabaseMigrationService_DescribeMetadataModelImports_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.DescribeTableStatisticsInput{ - Marker: aws.String(""), - MaxRecords: aws.Int64(123), - ReplicationTaskArn: aws.String(""), + input := &databasemigrationservice.DescribeMetadataModelImportsInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("request-id"), + Values: []*string{ + aws.String("01234567-89ab-cdef-0123-456789abcdef"), + }, + }, + }, + Marker: aws.String("0123456789abcdefghijklmnopqrs"), + MaxRecords: aws.Int64(20), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), } - result, err := svc.DescribeTableStatistics(input) + result, err := svc.DescribeMetadataModelImports(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) - case databasemigrationservice.ErrCodeInvalidResourceStateFault: - fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -881,25 +1241,31 @@ func ExampleDatabaseMigrationService_DescribeTableStatistics_shared00() { fmt.Println(result) } -// Import certificate -// Uploads the specified certificate. -func ExampleDatabaseMigrationService_ImportCertificate_shared00() { +// Describe Migration Projects +// Returns a paginated list of migration projects for your account in the current region. +func ExampleDatabaseMigrationService_DescribeMigrationProjects_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.ImportCertificateInput{ - CertificateIdentifier: aws.String(""), - CertificatePem: aws.String(""), + input := &databasemigrationservice.DescribeMigrationProjectsInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("migration-project-identifier"), + Values: []*string{ + aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901"), + }, + }, + }, + Marker: aws.String("EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ123456"), + MaxRecords: aws.Int64(20), } - result, err := svc.ImportCertificate(input) + result, err := svc.DescribeMigrationProjects(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: - fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) - case databasemigrationservice.ErrCodeInvalidCertificateFault: - fmt.Println(databasemigrationservice.ErrCodeInvalidCertificateFault, aerr.Error()) - case databasemigrationservice.ErrCodeResourceQuotaExceededFault: - fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -914,20 +1280,20 @@ func ExampleDatabaseMigrationService_ImportCertificate_shared00() { fmt.Println(result) } -// List tags for resource -// Lists all tags for an AWS DMS resource. -func ExampleDatabaseMigrationService_ListTagsForResource_shared00() { +// Describe orderable replication instances +// Returns information about the replication instance types that can be created in the +// specified region. +func ExampleDatabaseMigrationService_DescribeOrderableReplicationInstances_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.ListTagsForResourceInput{ - ResourceArn: aws.String(""), + input := &databasemigrationservice.DescribeOrderableReplicationInstancesInput{ + Marker: aws.String(""), + MaxRecords: aws.Int64(123), } - result, err := svc.ListTagsForResource(input) + result, err := svc.DescribeOrderableReplicationInstances(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeResourceNotFoundFault: - fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -942,26 +1308,15 @@ func ExampleDatabaseMigrationService_ListTagsForResource_shared00() { fmt.Println(result) } -// Modify endpoint -// Modifies the specified endpoint. -func ExampleDatabaseMigrationService_ModifyEndpoint_shared00() { +// Describe refresh schema status +// Returns the status of the refresh-schemas operation. +func ExampleDatabaseMigrationService_DescribeRefreshSchemasStatus_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.ModifyEndpointInput{ - CertificateArn: aws.String(""), - DatabaseName: aws.String(""), - EndpointArn: aws.String(""), - EndpointIdentifier: aws.String(""), - EndpointType: aws.String("source"), - EngineName: aws.String(""), - ExtraConnectionAttributes: aws.String(""), - Password: aws.String(""), - Port: aws.Int64(123), - ServerName: aws.String(""), - SslMode: aws.String("require"), - Username: aws.String(""), + input := &databasemigrationservice.DescribeRefreshSchemasStatusInput{ + EndpointArn: aws.String(""), } - result, err := svc.ModifyEndpoint(input) + result, err := svc.DescribeRefreshSchemasStatus(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -969,12 +1324,6 @@ func ExampleDatabaseMigrationService_ModifyEndpoint_shared00() { fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) - case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: - fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) - case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: - fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) - case databasemigrationservice.ErrCodeAccessDeniedFault: - fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -989,43 +1338,30 @@ func ExampleDatabaseMigrationService_ModifyEndpoint_shared00() { fmt.Println(result) } -// Modify replication instance -// Modifies the replication instance to apply new settings. You can change one or more -// parameters by specifying these parameters and the new values in the request. Some -// settings are applied during the maintenance window. -func ExampleDatabaseMigrationService_ModifyReplicationInstance_shared00() { +// Describe replication instances +// Returns the status of the refresh-schemas operation. +func ExampleDatabaseMigrationService_DescribeReplicationInstances_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.ModifyReplicationInstanceInput{ - AllocatedStorage: aws.Int64(123), - AllowMajorVersionUpgrade: aws.Bool(true), - ApplyImmediately: aws.Bool(true), - AutoMinorVersionUpgrade: aws.Bool(true), - EngineVersion: aws.String("1.5.0"), - MultiAZ: aws.Bool(true), - PreferredMaintenanceWindow: aws.String("sun:06:00-sun:14:00"), - ReplicationInstanceArn: aws.String("arn:aws:dms:us-east-1:123456789012:rep:6UTDJGBOUS3VI3SUWA66XFJCJQ"), - ReplicationInstanceClass: aws.String("dms.t2.micro"), - ReplicationInstanceIdentifier: aws.String("test-rep-1"), + input := &databasemigrationservice.DescribeReplicationInstancesInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("string"), + Values: []*string{ + aws.String("string"), + aws.String("string"), + }, + }, + }, + Marker: aws.String(""), + MaxRecords: aws.Int64(123), } - result, err := svc.ModifyReplicationInstance(input) + result, err := svc.DescribeReplicationInstances(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeAccessDeniedFault: - fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) - case databasemigrationservice.ErrCodeInvalidResourceStateFault: - fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) - case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: - fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) - case databasemigrationservice.ErrCodeInsufficientResourceCapacityFault: - fmt.Println(databasemigrationservice.ErrCodeInsufficientResourceCapacityFault, aerr.Error()) - case databasemigrationservice.ErrCodeStorageQuotaExceededFault: - fmt.Println(databasemigrationservice.ErrCodeStorageQuotaExceededFault, aerr.Error()) - case databasemigrationservice.ErrCodeUpgradeDependencyFailureFault: - fmt.Println(databasemigrationservice.ErrCodeUpgradeDependencyFailureFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1040,31 +1376,30 @@ func ExampleDatabaseMigrationService_ModifyReplicationInstance_shared00() { fmt.Println(result) } -// Modify replication subnet group -// Modifies the settings for the specified replication subnet group. -func ExampleDatabaseMigrationService_ModifyReplicationSubnetGroup_shared00() { +// Describe replication subnet groups +// Returns information about the replication subnet groups. +func ExampleDatabaseMigrationService_DescribeReplicationSubnetGroups_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.ModifyReplicationSubnetGroupInput{ - ReplicationSubnetGroupDescription: aws.String(""), - ReplicationSubnetGroupIdentifier: aws.String(""), + input := &databasemigrationservice.DescribeReplicationSubnetGroupsInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("string"), + Values: []*string{ + aws.String("string"), + aws.String("string"), + }, + }, + }, + Marker: aws.String(""), + MaxRecords: aws.Int64(123), } - result, err := svc.ModifyReplicationSubnetGroup(input) + result, err := svc.DescribeReplicationSubnetGroups(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeAccessDeniedFault: - fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) - case databasemigrationservice.ErrCodeResourceQuotaExceededFault: - fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) - case databasemigrationservice.ErrCodeSubnetAlreadyInUse: - fmt.Println(databasemigrationservice.ErrCodeSubnetAlreadyInUse, aerr.Error()) - case databasemigrationservice.ErrCodeReplicationSubnetGroupDoesNotCoverEnoughAZs: - fmt.Println(databasemigrationservice.ErrCodeReplicationSubnetGroupDoesNotCoverEnoughAZs, aerr.Error()) - case databasemigrationservice.ErrCodeInvalidSubnet: - fmt.Println(databasemigrationservice.ErrCodeInvalidSubnet, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1079,29 +1414,540 @@ func ExampleDatabaseMigrationService_ModifyReplicationSubnetGroup_shared00() { fmt.Println(result) } -// Refresh schema -// Populates the schema for the specified endpoint. This is an asynchronous operation -// and can take several minutes. You can check the status of this operation by calling -// the describe-refresh-schemas-status operation. -func ExampleDatabaseMigrationService_RefreshSchemas_shared00() { +// Describe replication tasks +// Returns information about replication tasks for your account in the current region. +func ExampleDatabaseMigrationService_DescribeReplicationTasks_shared00() { svc := databasemigrationservice.New(session.New()) - input := &databasemigrationservice.RefreshSchemasInput{ - EndpointArn: aws.String(""), - ReplicationInstanceArn: aws.String(""), + input := &databasemigrationservice.DescribeReplicationTasksInput{ + Filters: []*databasemigrationservice.Filter{ + { + Name: aws.String("string"), + Values: []*string{ + aws.String("string"), + aws.String("string"), + }, + }, + }, + Marker: aws.String(""), + MaxRecords: aws.Int64(123), } - result, err := svc.RefreshSchemas(input) + result, err := svc.DescribeReplicationTasks(input) if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case databasemigrationservice.ErrCodeInvalidResourceStateFault: - fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) case databasemigrationservice.ErrCodeResourceNotFoundFault: fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) - case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: - fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) - case databasemigrationservice.ErrCodeResourceQuotaExceededFault: - fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Describe schemas +// Returns information about the schema for the specified endpoint. +func ExampleDatabaseMigrationService_DescribeSchemas_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DescribeSchemasInput{ + EndpointArn: aws.String(""), + Marker: aws.String(""), + MaxRecords: aws.Int64(123), + } + + result, err := svc.DescribeSchemas(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Describe table statistics +// Returns table statistics on the database migration task, including table name, rows +// inserted, rows updated, and rows deleted. +func ExampleDatabaseMigrationService_DescribeTableStatistics_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.DescribeTableStatisticsInput{ + Marker: aws.String(""), + MaxRecords: aws.Int64(123), + ReplicationTaskArn: aws.String(""), + } + + result, err := svc.DescribeTableStatistics(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Export Metadata Model Assessment +// Saves a copy of a database migration assessment report to your S3 bucket. DMS can +// save your assessment report as a comma-separated value (CSV) or a PDF file. +func ExampleDatabaseMigrationService_ExportMetadataModelAssessment_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ExportMetadataModelAssessmentInput{ + AssessmentReportTypes: []*string{ + aws.String("pdf"), + }, + FileName: aws.String("file"), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + SelectionRules: aws.String("{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-a1b2c3d4e5f6.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}"), + } + + result, err := svc.ExportMetadataModelAssessment(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Import certificate +// Uploads the specified certificate. +func ExampleDatabaseMigrationService_ImportCertificate_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ImportCertificateInput{ + CertificateIdentifier: aws.String(""), + CertificatePem: aws.String(""), + } + + result, err := svc.ImportCertificate(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidCertificateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidCertificateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// List tags for resource +// Lists all tags for an AWS DMS resource. +func ExampleDatabaseMigrationService_ListTagsForResource_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ListTagsForResourceInput{ + ResourceArn: aws.String(""), + } + + result, err := svc.ListTagsForResource(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify Conversion Configuration +// Modifies the specified schema conversion configuration using the provided parameters. +func ExampleDatabaseMigrationService_ModifyConversionConfiguration_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyConversionConfigurationInput{ + ConversionConfiguration: aws.String("{\"Common project settings\":{\"ShowSeverityLevelInSql\":\"CRITICAL\"},\"ORACLE_TO_POSTGRESQL\" : {\"ToTimeZone\":false,\"LastDayBuiltinFunctionOracle\":false, \"NextDayBuiltinFunctionOracle\":false,\"ConvertProceduresToFunction\":false,\"NvlBuiltinFunctionOracle\":false,\"DbmsAssertBuiltinFunctionOracle\":false}}"), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + } + + result, err := svc.ModifyConversionConfiguration(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify Data Provider +// Modifies the specified data provider using the provided settings. +func ExampleDatabaseMigrationService_ModifyDataProvider_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyDataProviderInput{ + DataProviderIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + DataProviderName: aws.String("new-name"), + Description: aws.String("description"), + Engine: aws.String("sqlserver"), + Settings: &databasemigrationservice.DataProviderSettings{ + MicrosoftSqlServerSettings: &databasemigrationservice.MicrosoftSqlServerDataProviderSettings{ + DatabaseName: aws.String("DatabaseName"), + Port: aws.Int64(11112), + ServerName: aws.String("ServerName2"), + SslMode: aws.String("none"), + }, + }, + } + + result, err := svc.ModifyDataProvider(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify endpoint +// Modifies the specified endpoint. +func ExampleDatabaseMigrationService_ModifyEndpoint_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyEndpointInput{ + CertificateArn: aws.String(""), + DatabaseName: aws.String(""), + EndpointArn: aws.String(""), + EndpointIdentifier: aws.String(""), + EndpointType: aws.String("source"), + EngineName: aws.String(""), + ExtraConnectionAttributes: aws.String(""), + Password: aws.String(""), + Port: aws.Int64(123), + ServerName: aws.String(""), + SslMode: aws.String("require"), + Username: aws.String(""), + } + + result, err := svc.ModifyEndpoint(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify Instance Profile +// Modifies the specified instance profile using the provided parameters. +func ExampleDatabaseMigrationService_ModifyInstanceProfile_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyInstanceProfileInput{ + AvailabilityZone: aws.String(""), + Description: aws.String(""), + InstanceProfileIdentifier: aws.String(""), + InstanceProfileName: aws.String(""), + KmsKeyArn: aws.String(""), + NetworkType: aws.String(""), + PubliclyAccessible: aws.Bool(true), + SubnetGroupIdentifier: aws.String(""), + } + + result, err := svc.ModifyInstanceProfile(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify Migration Project +// Modifies the specified migration project using the provided parameters. +func ExampleDatabaseMigrationService_ModifyMigrationProject_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyMigrationProjectInput{ + Description: aws.String("description"), + InstanceProfileIdentifier: aws.String("my-instance-profile"), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + MigrationProjectName: aws.String("new-name"), + SchemaConversionApplicationAttributes: &databasemigrationservice.SCApplicationAttributes{ + S3BucketPath: aws.String("arn:aws:s3:::myuser-bucket"), + S3BucketRoleArn: aws.String("arn:aws:iam::012345678901:role/Admin"), + }, + SourceDataProviderDescriptors: []*databasemigrationservice.DataProviderDescriptorDefinition{ + { + DataProviderIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + SecretsManagerAccessRoleArn: aws.String("arn:aws:iam::012345678901:role/myuser-admin-access"), + SecretsManagerSecretId: aws.String("arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/myuser/ALL.SOURCE.ORACLE_12-A1B2C3"), + }, + }, + TargetDataProviderDescriptors: []*databasemigrationservice.DataProviderDescriptorDefinition{ + { + DataProviderIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + SecretsManagerAccessRoleArn: aws.String("arn:aws:iam::012345678901:role/myuser-admin-access"), + SecretsManagerSecretId: aws.String("arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/myuser/TARGET.postgresql-A1B2C3"), + }, + }, + } + + result, err := svc.ModifyMigrationProject(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify replication instance +// Modifies the replication instance to apply new settings. You can change one or more +// parameters by specifying these parameters and the new values in the request. Some +// settings are applied during the maintenance window. +func ExampleDatabaseMigrationService_ModifyReplicationInstance_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyReplicationInstanceInput{ + AllocatedStorage: aws.Int64(123), + AllowMajorVersionUpgrade: aws.Bool(true), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + EngineVersion: aws.String("1.5.0"), + MultiAZ: aws.Bool(true), + PreferredMaintenanceWindow: aws.String("sun:06:00-sun:14:00"), + ReplicationInstanceArn: aws.String("arn:aws:dms:us-east-1:123456789012:rep:6UTDJGBOUS3VI3SUWA66XFJCJQ"), + ReplicationInstanceClass: aws.String("dms.t2.micro"), + ReplicationInstanceIdentifier: aws.String("test-rep-1"), + } + + result, err := svc.ModifyReplicationInstance(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeInsufficientResourceCapacityFault: + fmt.Println(databasemigrationservice.ErrCodeInsufficientResourceCapacityFault, aerr.Error()) + case databasemigrationservice.ErrCodeStorageQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeStorageQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeUpgradeDependencyFailureFault: + fmt.Println(databasemigrationservice.ErrCodeUpgradeDependencyFailureFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Modify replication subnet group +// Modifies the settings for the specified replication subnet group. +func ExampleDatabaseMigrationService_ModifyReplicationSubnetGroup_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.ModifyReplicationSubnetGroupInput{ + ReplicationSubnetGroupDescription: aws.String(""), + ReplicationSubnetGroupIdentifier: aws.String(""), + } + + result, err := svc.ModifyReplicationSubnetGroup(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeSubnetAlreadyInUse: + fmt.Println(databasemigrationservice.ErrCodeSubnetAlreadyInUse, aerr.Error()) + case databasemigrationservice.ErrCodeReplicationSubnetGroupDoesNotCoverEnoughAZs: + fmt.Println(databasemigrationservice.ErrCodeReplicationSubnetGroupDoesNotCoverEnoughAZs, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidSubnet: + fmt.Println(databasemigrationservice.ErrCodeInvalidSubnet, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Refresh schema +// Populates the schema for the specified endpoint. This is an asynchronous operation +// and can take several minutes. You can check the status of this operation by calling +// the describe-refresh-schemas-status operation. +func ExampleDatabaseMigrationService_RefreshSchemas_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.RefreshSchemasInput{ + EndpointArn: aws.String(""), + ReplicationInstanceArn: aws.String(""), + } + + result, err := svc.RefreshSchemas(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1144,6 +1990,272 @@ func ExampleDatabaseMigrationService_RemoveTagsFromResource_shared00() { fmt.Println(result) } +// Start Extension Pack Association +// Applies the extension pack to your target database. +func ExampleDatabaseMigrationService_StartExtensionPackAssociation_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.StartExtensionPackAssociationInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + } + + result, err := svc.StartExtensionPackAssociation(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Start Metadata Model Assessment +// Creates a database migration assessment report by assessing the migration complexity +// for +// +// your source database. +func ExampleDatabaseMigrationService_StartMetadataModelAssessment_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.StartMetadataModelAssessmentInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + SelectionRules: aws.String("{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}"), + } + + result, err := svc.StartMetadataModelAssessment(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Start Metadata Model Conversion +// Converts your source database objects to a format compatible with the target database. +func ExampleDatabaseMigrationService_StartMetadataModelConversion_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.StartMetadataModelConversionInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + SelectionRules: aws.String("{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}"), + } + + result, err := svc.StartMetadataModelConversion(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Start Metadata Model Export As Script +// Saves your converted code to a file as a SQL script, and stores this file on your +// S3 bucket. +func ExampleDatabaseMigrationService_StartMetadataModelExportAsScript_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.StartMetadataModelExportAsScriptInput{ + FileName: aws.String("FILE"), + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + Origin: aws.String("SOURCE"), + SelectionRules: aws.String("{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}"), + } + + result, err := svc.StartMetadataModelExportAsScript(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Start Metadata Model Export To Target +// Applies converted database objects to your target database. +func ExampleDatabaseMigrationService_StartMetadataModelExportToTarget_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.StartMetadataModelExportToTargetInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345"), + OverwriteExtensionPack: aws.Bool(true), + SelectionRules: aws.String("{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-a1b2c3d4e5f6.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}"), + } + + result, err := svc.StartMetadataModelExportToTarget(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + +// Start Metadata Model Import +// Loads the metadata for all the dependent database objects of the parent object. +func ExampleDatabaseMigrationService_StartMetadataModelImport_shared00() { + svc := databasemigrationservice.New(session.New()) + input := &databasemigrationservice.StartMetadataModelImportInput{ + MigrationProjectIdentifier: aws.String("arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012"), + Origin: aws.String("SOURCE"), + Refresh: aws.Bool(false), + SelectionRules: aws.String("{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}"), + } + + result, err := svc.StartMetadataModelImport(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case databasemigrationservice.ErrCodeAccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeAccessDeniedFault, aerr.Error()) + case databasemigrationservice.ErrCodeInvalidResourceStateFault: + fmt.Println(databasemigrationservice.ErrCodeInvalidResourceStateFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceAlreadyExistsFault: + fmt.Println(databasemigrationservice.ErrCodeResourceAlreadyExistsFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault: + fmt.Println(databasemigrationservice.ErrCodeKMSKeyNotAccessibleFault, aerr.Error()) + case databasemigrationservice.ErrCodeResourceQuotaExceededFault: + fmt.Println(databasemigrationservice.ErrCodeResourceQuotaExceededFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3ResourceNotFoundFault: + fmt.Println(databasemigrationservice.ErrCodeS3ResourceNotFoundFault, aerr.Error()) + case databasemigrationservice.ErrCodeS3AccessDeniedFault: + fmt.Println(databasemigrationservice.ErrCodeS3AccessDeniedFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // Start replication task // Starts the replication task. func ExampleDatabaseMigrationService_StartReplicationTask_shared00() { diff --git a/service/ec2/api.go b/service/ec2/api.go index 310584a5318..bab95287a87 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -187925,6 +187925,9 @@ const ( // InstanceTypeC7i48xlarge is a InstanceType enum value InstanceTypeC7i48xlarge = "c7i.48xlarge" + + // InstanceTypeMac2M2proMetal is a InstanceType enum value + InstanceTypeMac2M2proMetal = "mac2-m2pro.metal" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -188670,6 +188673,7 @@ func InstanceType_Values() []string { InstanceTypeC7i16xlarge, InstanceTypeC7i24xlarge, InstanceTypeC7i48xlarge, + InstanceTypeMac2M2proMetal, } } diff --git a/service/efs/api.go b/service/efs/api.go index fde9cfdcc44..54f6dab746a 100644 --- a/service/efs/api.go +++ b/service/efs/api.go @@ -593,11 +593,13 @@ func (c *EFS) CreateReplicationConfigurationRequest(input *CreateReplicationConf // The destination file system configuration consists of the following properties: // Amazon Web Services Region - The Amazon Web Services Region in which the // destination file system is created. Amazon EFS replication is available -// in all Amazon Web Services Regions that Amazon EFS is available in, except -// Africa (Cape Town), Asia Pacific (Hong Kong), Asia Pacific (Jakarta), -// Europe (Milan), and Middle East (Bahrain). Availability Zone - If you -// want the destination file system to use EFS One Zone availability and -// durability, you must specify the Availability Zone to create the file +// in all Amazon Web Services Regions in which EFS is available. To use EFS +// replication in a Region that is disabled by default, you must first opt +// in to the Region. For more information, see Managing Amazon Web Services +// Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) +// in the Amazon Web Services General Reference Reference Guide Availability +// Zone - If you want the destination file system to use EFS One Zone availability +// and durability, you must specify the Availability Zone to create the file // system in. For more information about EFS storage classes, see Amazon // EFS storage classes (https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) // in the Amazon EFS User Guide. Encryption - All destination file systems @@ -625,7 +627,7 @@ func (c *EFS) CreateReplicationConfigurationRequest(input *CreateReplicationConf // file system is created, you can enable EFS lifecycle management and EFS // Intelligent-Tiering. // -// - Automatic backups - Automatic daily backups not enabled on the destination +// - Automatic backups - Automatic daily backups are enabled on the destination // file system. After the file system is created, you can change this setting. // // For more information, see Amazon EFS replication (https://docs.aws.amazon.com/efs/latest/ug/efs-replication.html) @@ -1273,9 +1275,7 @@ func (c *EFS) DeleteReplicationConfigurationRequest(input *DeleteReplicationConf // DeleteReplicationConfiguration API operation for Amazon Elastic File System. // -// Deletes an existing replication configuration. To delete a replication configuration, -// you must make the request from the Amazon Web Services Region in which the -// destination file system is located. Deleting a replication configuration +// Deletes an existing replication configuration. Deleting a replication configuration // ends the replication process. After a replication configuration is deleted, // the destination file system is no longer read-only. You can write to the // destination file system after its status becomes Writeable. @@ -1631,7 +1631,7 @@ func (c *EFS) DescribeAccountPreferencesRequest(input *DescribeAccountPreference // // Returns the account preferences settings for the Amazon Web Services account // associated with the user making the request, in the current Amazon Web Services -// Region. For more information, see Managing Amazon EFS resource IDs (efs/latest/ug/manage-efs-resource-ids.html). +// Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4443,14 +4443,17 @@ type CreateFileSystemInput struct { // performance mode can't be changed after the file system has been created. // // The maxIO mode is not supported on file systems using One Zone storage classes. + // + // Default is generalPurpose. PerformanceMode *string `type:"string" enum:"PerformanceMode"` - // The throughput, measured in MiB/s, that you want to provision for a file - // system that you're creating. Valid values are 1-1024. Required if ThroughputMode - // is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase - // this limit, contact Amazon Web Services Support. For more information, see - // Amazon EFS quotas that you can increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) - // in the Amazon EFS User Guide. + // The throughput, measured in mebibytes per second (MiBps), that you want to + // provision for a file system that you're creating. Required if ThroughputMode + // is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit + // depending on Region. To increase this limit, contact Amazon Web Services + // Support. For more information, see Amazon EFS quotas that you can increase + // (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) in the + // Amazon EFS User Guide. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // Use to create one or more tags associated with the file system. Each tag @@ -4736,6 +4739,7 @@ func (s *CreateReplicationConfigurationInput) SetSourceFileSystemId(v string) *C return s } +// Describes the replication configuration for a specific file system. type CreateReplicationConfigurationOutput struct { _ struct{} `type:"structure"` @@ -6929,7 +6933,7 @@ type FileSystemDescription struct { // PerformanceMode is a required field PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` - // The amount of provisioned throughput, measured in MiB/s, for the file system. + // The amount of provisioned throughput, measured in MiBps, for the file system. // Valid for file systems using ThroughputMode set to provisioned. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` @@ -9114,6 +9118,7 @@ func (s *PutLifecycleConfigurationOutput) SetLifecyclePolicies(v []*LifecyclePol return s } +// Describes the replication configuration for a specific file system. type ReplicationConfigurationDescription struct { _ struct{} `type:"structure"` @@ -10198,10 +10203,13 @@ type UpdateFileSystemInput struct { // FileSystemId is a required field FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` - // (Optional) Sets the amount of provisioned throughput, in MiB/s, for the file - // system. Valid values are 1-1024. If you are changing the throughput mode - // to provisioned, you must also provide the amount of provisioned throughput. - // Required if ThroughputMode is changed to provisioned on update. + // (Optional) The throughput, measured in mebibytes per second (MiBps), that + // you want to provision for a file system that you're creating. Required if + // ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with + // the upper limit depending on Region. To increase this limit, contact Amazon + // Web Services Support. For more information, see Amazon EFS quotas that you + // can increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) + // in the Amazon EFS User Guide. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // (Optional) Updates the file system's throughput mode. If you're not updating @@ -10333,7 +10341,7 @@ type UpdateFileSystemOutput struct { // PerformanceMode is a required field PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` - // The amount of provisioned throughput, measured in MiB/s, for the file system. + // The amount of provisioned throughput, measured in MiBps, for the file system. // Valid for file systems using ThroughputMode set to provisioned. ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` diff --git a/service/guardduty/api.go b/service/guardduty/api.go index 74e5a1bb71e..ecea007fc3b 100644 --- a/service/guardduty/api.go +++ b/service/guardduty/api.go @@ -3763,9 +3763,9 @@ func (c *GuardDuty) InviteMembersRequest(input *InviteMembersInput) (req *reques // // Invites Amazon Web Services accounts to become members of an organization // administered by the Amazon Web Services account that invokes this API. If -// you are using organizations to manager your GuardDuty environment, this step -// is not needed. For more information, see Managing accounts with organizations -// (https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_organizations.html). +// you are using Amazon Web Services Organizations to manage your GuardDuty +// environment, this step is not needed. For more information, see Managing +// accounts with organizations (https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_organizations.html). // // To invite Amazon Web Services accounts, the first step is to ensure that // GuardDuty has been enabled in the potential member accounts. You can now @@ -4853,7 +4853,7 @@ func (c *GuardDuty) ListOrganizationAdminAccountsRequest(input *ListOrganization // ListOrganizationAdminAccounts API operation for Amazon GuardDuty. // -// Lists the accounts configured as GuardDuty delegated administrators. Only +// Lists the accounts designated as GuardDuty delegated administrators. Only // the organization's management account can run this API operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8336,6 +8336,9 @@ type CoverageFilterCriterion struct { _ struct{} `type:"structure"` // An enum value representing possible filter fields. + // + // Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has + // been deprecated. CriterionKey *string `locationName:"criterionKey" type:"string" enum:"CoverageFilterCriterionKey"` // Contains information about the condition. @@ -8505,6 +8508,9 @@ type CoverageSortCriteria struct { _ struct{} `type:"structure"` // Represents the field name used to sort the coverage details. + // + // Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has + // been deprecated. AttributeName *string `locationName:"attributeName" type:"string" enum:"CoverageSortKey"` // The order in which the sorted findings are to be displayed. @@ -12409,7 +12415,7 @@ func (s *EksClusterDetails) SetVpcId(v string) *EksClusterDetails { type EnableOrganizationAdminAccountInput struct { _ struct{} `type:"structure"` - // The Amazon Web Services Account ID for the organization account to be enabled + // The Amazon Web Services account ID for the organization account to be enabled // as a GuardDuty delegated administrator. // // AdminAccountId is a required field @@ -12635,6 +12641,9 @@ type FilterCriterion struct { // An enum value representing possible scan properties to match with given scan // entries. + // + // Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has + // been deprecated. CriterionKey *string `locationName:"criterionKey" type:"string" enum:"CriterionKey"` // Contains information about the condition. @@ -12686,8 +12695,8 @@ func (s *FilterCriterion) SetFilterCondition(v *FilterCondition) *FilterCriterio return s } -// Contains information about the finding, which is generated when abnormal -// or suspicious activity is detected. +// Contains information about the finding that is generated when abnormal or +// suspicious activity is detected. type Finding struct { _ struct{} `type:"structure"` @@ -18521,9 +18530,9 @@ type OrganizationFeatureConfiguration struct { // The additional information that will be configured for the organization. AdditionalConfiguration []*OrganizationAdditionalConfiguration `locationName:"additionalConfiguration" type:"list"` - // The status of the feature that will be configured for the organization. Use - // one of the following values to configure the feature status for the entire - // organization: + // Describes the status of the feature that is configured for the member accounts + // within the organization. One of the following values is the status for the + // entire organization: // // * NEW: Indicates that when a new account joins the organization, they // will have the feature enabled automatically. @@ -20626,17 +20635,17 @@ func (s *ScanCondition) SetMapEquals(v []*ScanConditionPair) *ScanCondition { return s } -// Represents key, value pair to be matched against given resource property. +// Represents the key:value pair to be matched against given resource property. type ScanConditionPair struct { _ struct{} `type:"structure"` - // Represents key in the map condition. + // Represents the key in the map condition. // // Key is a required field Key *string `locationName:"key" min:"1" type:"string" required:"true"` - // Represents optional value in the map condition. If not specified, only key - // will be matched. + // Represents optional value in the map condition. If not specified, only the + // key will be matched. Value *string `locationName:"value" type:"string"` } @@ -22996,10 +23005,11 @@ func (s *UpdateMemberDetectorsOutput) SetUnprocessedAccounts(v []*UnprocessedAcc type UpdateOrganizationConfigurationInput struct { _ struct{} `type:"structure"` - // Indicates whether to automatically enable member accounts in the organization. + // Represents whether or not to automatically enable member accounts in the + // organization. // // Even though this is still supported, we recommend using AutoEnableOrganizationMembers - // to achieve the similar results. You must provide the value for either autoEnableOrganizationMembers + // to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers // or autoEnable. // // Deprecated: This field is deprecated, use AutoEnableOrganizationMembers instead @@ -23935,6 +23945,9 @@ const ( // CoverageFilterCriterionKeyManagementType is a CoverageFilterCriterionKey enum value CoverageFilterCriterionKeyManagementType = "MANAGEMENT_TYPE" + + // CoverageFilterCriterionKeyEksClusterName is a CoverageFilterCriterionKey enum value + CoverageFilterCriterionKeyEksClusterName = "EKS_CLUSTER_NAME" ) // CoverageFilterCriterionKey_Values returns all elements of the CoverageFilterCriterionKey enum @@ -23946,6 +23959,7 @@ func CoverageFilterCriterionKey_Values() []string { CoverageFilterCriterionKeyCoverageStatus, CoverageFilterCriterionKeyAddonVersion, CoverageFilterCriterionKeyManagementType, + CoverageFilterCriterionKeyEksClusterName, } } @@ -23967,6 +23981,9 @@ const ( // CoverageSortKeyUpdatedAt is a CoverageSortKey enum value CoverageSortKeyUpdatedAt = "UPDATED_AT" + + // CoverageSortKeyEksClusterName is a CoverageSortKey enum value + CoverageSortKeyEksClusterName = "EKS_CLUSTER_NAME" ) // CoverageSortKey_Values returns all elements of the CoverageSortKey enum @@ -23978,6 +23995,7 @@ func CoverageSortKey_Values() []string { CoverageSortKeyIssue, CoverageSortKeyAddonVersion, CoverageSortKeyUpdatedAt, + CoverageSortKeyEksClusterName, } } diff --git a/service/mediaconvert/api.go b/service/mediaconvert/api.go index f1be32c84cd..d393c3a4472 100644 --- a/service/mediaconvert/api.go +++ b/service/mediaconvert/api.go @@ -7213,10 +7213,13 @@ type CmfcSettings struct { // Use this setting to control the values that MediaConvert puts in your HLS // parent playlist to control how the client player selects which audio track - // to play. The other options for this setting determine the values that MediaConvert - // writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry - // for the audio variant. For more information about these attributes, see the - // Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. + // to play. Choose Audio-only variant stream (AUDIO_ONLY_VARIANT_STREAM) for + // any variant that you want to prohibit the client from playing with video. + // This causes MediaConvert to represent the variant as an EXT-X-STREAM-INF + // in the HLS manifest. The other options for this setting determine the values + // that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the + // EXT-X-MEDIA entry for the audio variant. For more information about these + // attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. // Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. // Choose this value for only one variant in your output group. Choose Alternate // audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose @@ -16638,6 +16641,15 @@ type JobSettings struct { // 05h Content Advisory. ExtendedDataServices *ExtendedDataServices `locationName:"extendedDataServices" type:"structure"` + // Specifies which input metadata to use for the default "Follow input" option + // for the following settings: resolution, frame rate, and pixel aspect ratio. + // In the simplest case, specify which input is used based on its index in the + // job. For example if you specify 3, then the fourth input will be used from + // each input. If the job does not have a fourth input, then the first input + // will be used. If no followInputIndex is specified, then 0 will be chosen + // automatically. + FollowInputIndex *int64 `locationName:"followInputIndex" type:"integer"` + // Use Inputs to define source file used in the transcode job. There can be // multiple inputs add in a job. These inputs will be concantenated together // to create the output. @@ -16784,6 +16796,12 @@ func (s *JobSettings) SetExtendedDataServices(v *ExtendedDataServices) *JobSetti return s } +// SetFollowInputIndex sets the FollowInputIndex field's value. +func (s *JobSettings) SetFollowInputIndex(v int64) *JobSettings { + s.FollowInputIndex = &v + return s +} + // SetInputs sets the Inputs field's value. func (s *JobSettings) SetInputs(v []*Input) *JobSettings { s.Inputs = v @@ -17009,6 +17027,15 @@ type JobTemplateSettings struct { // 05h Content Advisory. ExtendedDataServices *ExtendedDataServices `locationName:"extendedDataServices" type:"structure"` + // Specifies which input metadata to use for the default "Follow input" option + // for the following settings: resolution, frame rate, and pixel aspect ratio. + // In the simplest case, specify which input is used based on its index in the + // job. For example if you specify 3, then the fourth input will be used from + // each input. If the job does not have a fourth input, then the first input + // will be used. If no followInputIndex is specified, then 0 will be chosen + // automatically. + FollowInputIndex *int64 `locationName:"followInputIndex" type:"integer"` + // Use Inputs to define the source file used in the transcode job. There can // only be one input in a job template. Using the API, you can include multiple // inputs when referencing a job template. @@ -17155,6 +17182,12 @@ func (s *JobTemplateSettings) SetExtendedDataServices(v *ExtendedDataServices) * return s } +// SetFollowInputIndex sets the FollowInputIndex field's value. +func (s *JobTemplateSettings) SetFollowInputIndex(v int64) *JobTemplateSettings { + s.FollowInputIndex = &v + return s +} + // SetInputs sets the Inputs field's value. func (s *JobTemplateSettings) SetInputs(v []*InputTemplate) *JobTemplateSettings { s.Inputs = v @@ -23100,7 +23133,9 @@ type S3DestinationSettings struct { // S3. Encryption *S3EncryptionSettings `locationName:"encryption" type:"structure"` - // Specify the S3 storage class to use for this destination. + // Specify the S3 storage class to use for this output. To use your destination's + // default storage class: Keep the default value, Not set. For more information + // about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html StorageClass *string `locationName:"storageClass" type:"string" enum:"S3StorageClass"` } @@ -29368,10 +29403,13 @@ func CmfcAudioDuration_Values() []string { // Use this setting to control the values that MediaConvert puts in your HLS // parent playlist to control how the client player selects which audio track -// to play. The other options for this setting determine the values that MediaConvert -// writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry -// for the audio variant. For more information about these attributes, see the -// Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. +// to play. Choose Audio-only variant stream (AUDIO_ONLY_VARIANT_STREAM) for +// any variant that you want to prohibit the client from playing with video. +// This causes MediaConvert to represent the variant as an EXT-X-STREAM-INF +// in the HLS manifest. The other options for this setting determine the values +// that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the +// EXT-X-MEDIA entry for the audio variant. For more information about these +// attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. // Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. // Choose this value for only one variant in your output group. Choose Alternate // audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose @@ -29388,6 +29426,9 @@ const ( // CmfcAudioTrackTypeAlternateAudioNotAutoSelect is a CmfcAudioTrackType enum value CmfcAudioTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" + + // CmfcAudioTrackTypeAudioOnlyVariantStream is a CmfcAudioTrackType enum value + CmfcAudioTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM" ) // CmfcAudioTrackType_Values returns all elements of the CmfcAudioTrackType enum @@ -29396,6 +29437,7 @@ func CmfcAudioTrackType_Values() []string { CmfcAudioTrackTypeAlternateAudioAutoSelectDefault, CmfcAudioTrackTypeAlternateAudioAutoSelect, CmfcAudioTrackTypeAlternateAudioNotAutoSelect, + CmfcAudioTrackTypeAudioOnlyVariantStream, } } @@ -36663,7 +36705,9 @@ func S3ServerSideEncryptionType_Values() []string { } } -// Specify the S3 storage class to use for this destination. +// Specify the S3 storage class to use for this output. To use your destination's +// default storage class: Keep the default value, Not set. For more information +// about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html const ( // S3StorageClassStandard is a S3StorageClass enum value S3StorageClassStandard = "STANDARD"