diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d32079530e..6bbeacd77b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.44.239 (2023-04-07) +=== + +### Service Client Updates +* `service/dlm`: Adds new service +* `service/docdb`: Updates service API and documentation + * This release adds a new parameter 'DBClusterParameterGroupName' to 'RestoreDBClusterFromSnapshot' API to associate the name of the DB cluster parameter group while performing restore. +* `service/fsx`: Updates service documentation +* `service/lambda`: Updates service API and documentation + * This release adds a new Lambda InvokeWithResponseStream API to support streaming Lambda function responses. The release also adds a new InvokeMode parameter to Function Url APIs to control whether the response will be streamed or buffered. +* `service/quicksight`: Updates service API and documentation + * This release has two changes: adding the OR condition to tag-based RLS rules in CreateDataSet and UpdateDataSet; adding RefreshSchedule and Incremental RefreshProperties operations for users to programmatically configure SPICE dataset ingestions. +* `service/redshift-data`: Updates service documentation +* `service/servicecatalog`: Updates service documentation + * Updates description for property + Release v1.44.238 (2023-04-06) === diff --git a/aws/version.go b/aws/version.go index ba51da4856f..216c6d49a15 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.238" +const SDKVersion = "1.44.239" diff --git a/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json b/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json index efc50d4c587..7069e7a3bbb 100644 --- a/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json +++ b/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,179 +111,240 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dlm-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://dlm-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://dlm.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://dlm.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://dlm-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://dlm-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dlm.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -311,7 +352,7 @@ { "conditions": [], "endpoint": { - "url": "https://dlm.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://dlm.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -320,28 +361,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://dlm.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/dlm/2018-01-12/endpoint-tests-1.json b/models/apis/dlm/2018-01-12/endpoint-tests-1.json index e0a57f37a78..53f0e7caa24 100644 --- a/models/apis/dlm/2018-01-12/endpoint-tests-1.json +++ b/models/apis/dlm/2018-01-12/endpoint-tests-1.json @@ -1,159 +1,159 @@ { "testCases": [ { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://dlm.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-iso-east-1.c2s.ic.gov" + "url": "https://dlm.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-northeast-3.amazonaws.com" + "url": "https://dlm.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-east-1.amazonaws.com" + "url": "https://dlm.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-west-1.amazonaws.com" + "url": "https://dlm.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-west-2.amazonaws.com" + "url": "https://dlm.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-west-3.amazonaws.com" + "url": "https://dlm.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.me-south-1.amazonaws.com" + "url": "https://dlm.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-north-1.amazonaws.com" + "url": "https://dlm.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-east-2.amazonaws.com" + "url": "https://dlm.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.sa-east-1.amazonaws.com" + "url": "https://dlm.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-east-1.amazonaws.com" + "url": "https://dlm.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -164,74 +164,100 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-central-1.amazonaws.com" + "url": "https://dlm.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-southeast-1.amazonaws.com" + "url": "https://dlm.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-southeast-2.amazonaws.com" + "url": "https://dlm.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-southeast-3.amazonaws.com" + "url": "https://dlm.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ca-central-1.amazonaws.com" + "url": "https://dlm.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dlm.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dlm.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -242,9 +268,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -255,152 +281,165 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dlm.af-south-1.amazonaws.com" + "url": "https://dlm-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "af-south-1" + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-south-1.amazonaws.com" + "url": "https://dlm-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dlm.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-northeast-1.amazonaws.com" + "url": "https://dlm.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-northeast-2.amazonaws.com" + "url": "https://dlm.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dlm-fips.us-east-1.api.aws" + "url": "https://dlm-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm-fips.us-east-1.amazonaws.com" + "url": "https://dlm-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dlm.us-east-1.api.aws" + "url": "https://dlm.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-west-1.amazonaws.com" + "url": "https://dlm.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-west-1.amazonaws.com" + "url": "https://dlm.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-east-1.amazonaws.com" + "url": "https://dlm.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-east-1.amazonaws.com" + "url": "https://dlm.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -411,9 +450,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -424,113 +463,131 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dlm-fips.us-isob-east-1.sc2s.sgov.gov" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-isob-east-1.sc2s.sgov.gov" + "url": "https://dlm-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-isob-east-1" + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dlm.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.cn-north-1.amazonaws.com.cn" + "url": "https://dlm.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dlm-fips.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm-fips.cn-north-1.amazonaws.com.cn" + "url": "https://dlm-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://dlm.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -540,9 +597,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -552,11 +609,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/docdb/2014-10-31/api-2.json b/models/apis/docdb/2014-10-31/api-2.json index 6b7ddaa68bd..7edf9e0180c 100644 --- a/models/apis/docdb/2014-10-31/api-2.json +++ b/models/apis/docdb/2014-10-31/api-2.json @@ -2846,7 +2846,8 @@ "Tags":{"shape":"TagList"}, "KmsKeyId":{"shape":"String"}, "EnableCloudwatchLogsExports":{"shape":"LogTypeList"}, - "DeletionProtection":{"shape":"BooleanOptional"} + "DeletionProtection":{"shape":"BooleanOptional"}, + "DBClusterParameterGroupName":{"shape":"String"} } }, "RestoreDBClusterFromSnapshotResult":{ diff --git a/models/apis/docdb/2014-10-31/docs-2.json b/models/apis/docdb/2014-10-31/docs-2.json index 14581e865af..66a42d9a046 100644 --- a/models/apis/docdb/2014-10-31/docs-2.json +++ b/models/apis/docdb/2014-10-31/docs-2.json @@ -1571,7 +1571,7 @@ "FailoverDBClusterMessage$TargetDBInstanceIdentifier": "

The name of the instance to promote to the primary instance.

You must specify the instance identifier for an Amazon DocumentDB replica in the cluster. For example, mydbcluster-replica1.

", "Filter$Name": "

The name of the filter. Filter names are case sensitive.

", "FilterValueList$member": null, - "GlobalCluster$GlobalClusterResourceId": "

The Amazon Web Services Region-unique, immutable identifier for the global database cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS customer master key (CMK) for the cluster is accessed.

", + "GlobalCluster$GlobalClusterResourceId": "

The Amazon Web Services Region-unique, immutable identifier for the global database cluster. This identifier is found in CloudTrail log entries whenever the KMS customer master key (CMK) for the cluster is accessed.

", "GlobalCluster$GlobalClusterArn": "

The Amazon Resource Name (ARN) for the global cluster.

", "GlobalCluster$Status": "

Specifies the current state of this global cluster.

", "GlobalCluster$Engine": "

The Amazon DocumentDB database engine used by the global cluster.

", @@ -1643,8 +1643,9 @@ "RestoreDBClusterFromSnapshotMessage$EngineVersion": "

The version of the database engine to use for the new cluster.

", "RestoreDBClusterFromSnapshotMessage$DBSubnetGroupName": "

The name of the subnet group to use for the new cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

", "RestoreDBClusterFromSnapshotMessage$KmsKeyId": "

The KMS key identifier to use when restoring an encrypted cluster from a DB snapshot or cluster snapshot.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

", + "RestoreDBClusterFromSnapshotMessage$DBClusterParameterGroupName": "

The name of the DB cluster parameter group to associate with this DB cluster.

Type: String.       Required: No.

If this argument is omitted, the default DB cluster parameter group is used. If supplied, must match the name of an existing default DB cluster parameter group. The string must consist of from 1 to 255 letters, numbers or hyphens. Its first character must be a letter, and it cannot end with a hyphen or contain two consecutive hyphens.

", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

The name of the new cluster to be created.

Constraints:

", - "RestoreDBClusterToPointInTimeMessage$RestoreType": "

The type of restore to be performed. You can specify one of the following values:

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

", + "RestoreDBClusterToPointInTimeMessage$RestoreType": "

The type of restore to be performed. You can specify one of the following values:

Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

", "RestoreDBClusterToPointInTimeMessage$SourceDBClusterIdentifier": "

The identifier of the source cluster from which to restore.

Constraints:

", "RestoreDBClusterToPointInTimeMessage$DBSubnetGroupName": "

The subnet group name to use for the new cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

", "RestoreDBClusterToPointInTimeMessage$KmsKeyId": "

The KMS key identifier to use when restoring an encrypted cluster from an encrypted cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

You can restore to a new cluster and encrypt the new cluster with an KMS key that is different from the KMS key used to encrypt the source cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

If DBClusterIdentifier refers to a cluster that is not encrypted, then the restore request is rejected.

", diff --git a/models/apis/docdb/2014-10-31/endpoint-rule-set-1.json b/models/apis/docdb/2014-10-31/endpoint-rule-set-1.json new file mode 100644 index 00000000000..b9aff9f06c8 --- /dev/null +++ b/models/apis/docdb/2014-10-31/endpoint-rule-set-1.json @@ -0,0 +1,375 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rds-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://rds.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://rds-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rds.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rds.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/docdb/2014-10-31/endpoint-tests-1.json b/models/apis/docdb/2014-10-31/endpoint-tests-1.json new file mode 100644 index 00000000000..8b0bc663a10 --- /dev/null +++ b/models/apis/docdb/2014-10-31/endpoint-tests-1.json @@ -0,0 +1,691 @@ +{ + "testCases": [ + { + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.af-south-1.amazonaws.com" + } + }, + "params": { + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-south-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ap-southeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.eu-west-3.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.me-south-1.amazonaws.com" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.sa-east-1.amazonaws.com" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rds.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rds.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rds.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-iso-west-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rds-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 94a495a7ea4..5a368d0fbb1 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -6,7 +6,7 @@ "CancelDataRepositoryTask": "

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following.

", "CopyBackup": "

Copies an existing backup within the same Amazon Web Services account to another Amazon Web Services Region (cross-Region copy) or within the same Amazon Web Services Region (in-Region copy). You can have up to five backup copy requests in progress to a single destination Region per account.

You can use cross-Region backup copies for cross-Region disaster recovery. You can periodically take backups and copy them to another Region so that in the event of a disaster in the primary Region, you can restore from backup and recover availability quickly in the other Region. You can make cross-Region copies only within your Amazon Web Services partition. A partition is a grouping of Regions. Amazon Web Services currently has three partitions: aws (Standard Regions), aws-cn (China Regions), and aws-us-gov (Amazon Web Services GovCloud [US] Regions).

You can also use backup copies to clone your file dataset to another Region or within the same Region.

You can use the SourceRegion parameter to specify the Amazon Web Services Region from which the backup will be copied. For example, if you make the call from the us-west-1 Region and want to copy a backup from the us-east-2 Region, you specify us-east-2 in the SourceRegion parameter to make a cross-Region copy. If you don't specify a Region, the backup copy is created in the same Region where the request is sent from (in-Region copy).

For more information about creating backup copies, see Copying backups in the Amazon FSx for Windows User Guide, Copying backups in the Amazon FSx for Lustre User Guide, and Copying backups in the Amazon FSx for OpenZFS User Guide.

", "CreateBackup": "

Creates a backup of an existing Amazon FSx for Windows File Server file system, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx for OpenZFS file system. We recommend creating regular backups so that you can restore a file system or volume from a backup if an issue arises with the original file system or volume.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems that have the following configuration:

For more information about backups, see the following:

If a backup with the specified client request token exists and the parameters match, this operation returns the description of the existing backup. If a backup with the specified client request token exists and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", - "CreateDataRepositoryAssociation": "

Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

", + "CreateDataRepositoryAssociation": "

Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

", "CreateDataRepositoryTask": "

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", "CreateFileCache": "

Creates a new Amazon File Cache resource.

You can use this operation with a client request token in the request that Amazon File Cache uses to ensure idempotent creation. If a cache with the specified client request token exists and the parameters match, CreateFileCache returns the description of the existing cache. If a cache with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file cache with the specified client request token doesn't exist, CreateFileCache does the following:

The CreateFileCache call returns while the cache's lifecycle state is still CREATING. You can check the cache creation status by calling the DescribeFileCaches operation, which returns the cache state along with other information.

", "CreateFileSystem": "

Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation:

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

", @@ -16,14 +16,14 @@ "CreateVolume": "

Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.

", "CreateVolumeFromBackup": "

Creates a new Amazon FSx for NetApp ONTAP volume from an existing Amazon FSx volume backup.

", "DeleteBackup": "

Deletes an Amazon FSx backup. After deletion, the backup no longer exists, and its data is gone.

The DeleteBackup call returns instantly. The backup won't show up in later DescribeBackups calls.

The data in a deleted backup is also deleted and can't be recovered by any means.

", - "DeleteDataRepositoryAssociation": "

Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

", + "DeleteDataRepositoryAssociation": "

Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

", "DeleteFileCache": "

Deletes an Amazon File Cache resource. After deletion, the cache no longer exists, and its data is gone.

The DeleteFileCache operation returns while the cache has the DELETING status. You can check the cache deletion status by calling the DescribeFileCaches operation, which returns a list of caches in your account. If you pass the cache ID for a deleted cache, the DescribeFileCaches operation returns a FileCacheNotFound error.

The data in a deleted cache is also deleted and can't be recovered by any means.

", "DeleteFileSystem": "

Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation.

By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

The data in a deleted file system is also deleted and can't be recovered by any means.

", "DeleteSnapshot": "

Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a file system backup.

The DeleteSnapshot operation returns instantly. The snapshot appears with the lifecycle status of DELETING until the deletion is complete.

", "DeleteStorageVirtualMachine": "

Deletes an existing Amazon FSx for ONTAP storage virtual machine (SVM). Prior to deleting an SVM, you must delete all non-root volumes in the SVM, otherwise the operation will fail.

", "DeleteVolume": "

Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

", "DescribeBackups": "

Returns the description of a specific Amazon FSx backup, if a BackupIds value is provided for that backup. Otherwise, it returns all backups owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all backups, you can optionally specify the MaxResults parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of the NextToken value from the last response.

This operation is used in an iterative process to retrieve a list of your backups. DescribeBackups is called first without a NextToken value. Then the operation continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken value.

When using this operation, keep the following in mind:

", - "DescribeDataRepositoryAssociations": "

Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported only for Amazon FSx for Lustre file systems with the Persistent_2 deployment type and for Amazon File Cache resources.

You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

", + "DescribeDataRepositoryAssociations": "

Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types.

You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

", "DescribeDataRepositoryTasks": "

Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository tasks, if one or more TaskIds values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems or caches, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all tasks, you can paginate the response by using the optional MaxResults parameter to limit the number of tasks returned in a response. If more tasks remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

", "DescribeFileCaches": "

Returns the description of a specific Amazon File Cache resource, if a FileCacheIds value is provided for that cache. Otherwise, it returns descriptions of all caches owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all cache descriptions, you can optionally specify the MaxResults parameter to limit the number of descriptions in a response. If more cache descriptions remain, the operation returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This operation is used in an iterative process to retrieve a list of your cache descriptions. DescribeFileCaches is called first without a NextTokenvalue. Then the operation continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this operation, keep the following in mind:

", "DescribeFileSystemAliases": "

Returns the DNS aliases that are associated with the specified Amazon FSx for Windows File Server file system. A history of all DNS aliases that have been associated with and disassociated from the file system is available in the list of AdministrativeAction provided in the DescribeFileSystems operation response.

", @@ -37,9 +37,9 @@ "RestoreVolumeFromSnapshot": "

Returns an Amazon FSx for OpenZFS volume to the state saved by the specified snapshot.

", "TagResource": "

Tags an Amazon FSx resource.

", "UntagResource": "

This action removes a tag from an Amazon FSx resource.

", - "UpdateDataRepositoryAssociation": "

Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

", + "UpdateDataRepositoryAssociation": "

Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

", "UpdateFileCache": "

Updates the configuration of an existing Amazon File Cache resource. You can update multiple properties in a single request.

", - "UpdateFileSystem": "

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For Amazon FSx for Windows File Server file systems, you can update the following properties:

For Amazon FSx for Lustre file systems, you can update the following properties:

For Amazon FSx for NetApp ONTAP file systems, you can update the following properties:

For the Amazon FSx for OpenZFS file systems, you can update the following properties:

", + "UpdateFileSystem": "

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

For FSx for Lustre file systems, you can update the following properties:

For FSx for ONTAP file systems, you can update the following properties:

For FSx for OpenZFS file systems, you can update the following properties:

", "UpdateSnapshot": "

Updates the name of an Amazon FSx for OpenZFS snapshot.

", "UpdateStorageVirtualMachine": "

Updates an Amazon FSx for ONTAP storage virtual machine (SVM).

", "UpdateVolume": "

Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

" @@ -51,7 +51,7 @@ "Backup$OwnerId": null, "FileCache$OwnerId": null, "FileCacheCreating$OwnerId": null, - "FileSystem$OwnerId": "

The Amazon Web Services account that created the file system. If the file system was created by an Identity and Access Management (IAM) user, the Amazon Web Services account to which the IAM user belongs is the owner.

" + "FileSystem$OwnerId": "

The Amazon Web Services account that created the file system. If the file system was created by a user in IAM Identity Center, the Amazon Web Services account to which the IAM user belongs is the owner.

" } }, "ActiveDirectoryBackupAttributes": { @@ -154,10 +154,10 @@ "ArchivePath": { "base": null, "refs": { - "CompletionReport$Path": "

Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path you provide must be located within the file system’s ExportPath. An example Path value is \"s3://myBucket/myExportPath/optionalPrefix\". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's ExportPath, see .

", + "CompletionReport$Path": "

Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path you provide must be located within the file system’s ExportPath. An example Path value is \"s3://myBucket/myExportPath/optionalPrefix\". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode.

", "CreateDataRepositoryAssociationRequest$DataRepositoryPath": "

The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to.

", - "CreateFileSystemLustreConfiguration$ImportPath": "

(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.

This parameter is not supported for file systems with the Persistent_2 deployment type. Instead, use CreateDataRepositoryAssociation to create a data repository association to link your Lustre file system to a data repository.

", - "CreateFileSystemLustreConfiguration$ExportPath": "

(Optional) Available with Scratch and Persistent_1 deployment types. Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z.

The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath. If you specify only a bucket name, such as s3://import-bucket, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.

This parameter is not supported for file systems with the Persistent_2 deployment type. Instead, use CreateDataRepositoryAssociation to create a data repository association to link your Lustre file system to a data repository.

", + "CreateFileSystemLustreConfiguration$ImportPath": "

(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.

This parameter is not supported for file systems with a data repository association.

", + "CreateFileSystemLustreConfiguration$ExportPath": "

(Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z.

The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath. If you specify only a bucket name, such as s3://import-bucket, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.

This parameter is not supported for file systems with a data repository association.

", "DataRepositoryAssociation$DataRepositoryPath": "

The path to the data repository that will be linked to the cache or file system.

", "DataRepositoryConfiguration$ImportPath": "

The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example s3://import-bucket/optional-prefix. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.

", "DataRepositoryConfiguration$ExportPath": "

The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.

", @@ -175,14 +175,14 @@ } }, "AutoExportPolicy": { - "base": "

Describes a data repository association's automatic export policy. The AutoExportPolicy defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.

This AutoExportPolicy is supported only for Amazon FSx for Lustre file systems with the Persistent_2 deployment type.

", + "base": "

Describes a data repository association's automatic export policy. The AutoExportPolicy defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.

The AutoExportPolicy is only supported on Amazon FSx for Lustre file systems with a data repository association.

", "refs": { "NFSDataRepositoryConfiguration$AutoExportPolicy": "

This parameter is not supported for Amazon File Cache.

", "S3DataRepositoryConfiguration$AutoExportPolicy": "

Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.

" } }, "AutoImportPolicy": { - "base": "

Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket.

The AutoImportPolicy is supported only for Amazon FSx for Lustre file systems with the Persistent_2 deployment type.

", + "base": "

Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket.

The AutoImportPolicy is only supported on Amazon FSx for Lustre file systems with a data repository association.

", "refs": { "S3DataRepositoryConfiguration$AutoImportPolicy": "

Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.

" } @@ -190,9 +190,9 @@ "AutoImportPolicyType": { "base": null, "refs": { - "CreateFileSystemLustreConfiguration$AutoImportPolicy": "

(Optional) Available with Scratch and Persistent_1 deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

For more information, see Automatically import updates from your S3 bucket.

This parameter is not supported for file systems with the Persistent_2 deployment type. Instead, use CreateDataRepositoryAssociation to create a data repository association to link your Lustre file system to a data repository.

", + "CreateFileSystemLustreConfiguration$AutoImportPolicy": "

(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

For more information, see Automatically import updates from your S3 bucket.

This parameter is not supported for file systems with a data repository association.

", "DataRepositoryConfiguration$AutoImportPolicy": "

Describes the file system's linked S3 data repository's AutoImportPolicy. The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

", - "UpdateFileSystemLustreConfiguration$AutoImportPolicy": "

(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

The AutoImportPolicy parameter is not supported for Lustre file systems with the Persistent_2 deployment type. Instead, use to update a data repository association on your Persistent_2 file system.

" + "UpdateFileSystemLustreConfiguration$AutoImportPolicy": "

(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

This parameter is not supported for file systems with a data repository association.

" } }, "AutomaticBackupRetentionDays": { @@ -319,24 +319,24 @@ } }, "ClientRequestToken": { - "base": "

(Optional) An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", + "base": "

(Optional) An idempotency token for resource creation, in a string of up to 63 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", "refs": { "AssociateFileSystemAliasesRequest$ClientRequestToken": null, "CopyBackupRequest$ClientRequestToken": null, - "CreateBackupRequest$ClientRequestToken": "

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", + "CreateBackupRequest$ClientRequestToken": "

(Optional) A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", "CreateDataRepositoryAssociationRequest$ClientRequestToken": null, "CreateDataRepositoryTaskRequest$ClientRequestToken": null, - "CreateFileCacheRequest$ClientRequestToken": "

An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

By using the idempotent operation, you can retry a CreateFileCache operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.

", - "CreateFileSystemFromBackupRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", - "CreateFileSystemRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", + "CreateFileCacheRequest$ClientRequestToken": "

An idempotency token for resource creation, in a string of up to 63 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

By using the idempotent operation, you can retry a CreateFileCache operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.

", + "CreateFileSystemFromBackupRequest$ClientRequestToken": "

A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", + "CreateFileSystemRequest$ClientRequestToken": "

A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", "CreateSnapshotRequest$ClientRequestToken": null, "CreateStorageVirtualMachineRequest$ClientRequestToken": null, "CreateVolumeFromBackupRequest$ClientRequestToken": null, "CreateVolumeRequest$ClientRequestToken": null, - "DeleteBackupRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This parameter is automatically filled on your behalf when using the CLI or SDK.

", + "DeleteBackupRequest$ClientRequestToken": "

A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This parameter is automatically filled on your behalf when using the CLI or SDK.

", "DeleteDataRepositoryAssociationRequest$ClientRequestToken": null, "DeleteFileCacheRequest$ClientRequestToken": null, - "DeleteFileSystemRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This token is automatically filled on your behalf when using the Command Line Interface (CLI) or an Amazon Web Services SDK.

", + "DeleteFileSystemRequest$ClientRequestToken": "

A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This token is automatically filled on your behalf when using the Command Line Interface (CLI) or an Amazon Web Services SDK.

", "DeleteSnapshotRequest$ClientRequestToken": null, "DeleteStorageVirtualMachineRequest$ClientRequestToken": null, "DeleteVolumeRequest$ClientRequestToken": null, @@ -346,7 +346,7 @@ "RestoreVolumeFromSnapshotRequest$ClientRequestToken": null, "UpdateDataRepositoryAssociationRequest$ClientRequestToken": null, "UpdateFileCacheRequest$ClientRequestToken": null, - "UpdateFileSystemRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", + "UpdateFileSystemRequest$ClientRequestToken": "

A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", "UpdateSnapshotRequest$ClientRequestToken": null, "UpdateStorageVirtualMachineRequest$ClientRequestToken": null, "UpdateVolumeRequest$ClientRequestToken": null @@ -445,7 +445,7 @@ } }, "CreateFileSystemLustreConfiguration": { - "base": "

The Lustre configuration for the file system being created.

The following parameters are not supported for file systems with the Persistent_2 deployment type. Instead, use CreateDataRepositoryAssociation to create a data repository association to link your Lustre file system to a data repository.

", + "base": "

The Lustre configuration for the file system being created.

The following parameters are not supported for file systems with a data repository association created with .

", "refs": { "CreateFileSystemFromBackupRequest$LustreConfiguration": null, "CreateFileSystemRequest$LustreConfiguration": null @@ -597,7 +597,7 @@ } }, "DataRepositoryAssociation": { - "base": "

The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

Data repository associations are supported only for an Amazon FSx for Lustre file system with the Persistent_2 deployment type and for an Amazon File Cache resource.

", + "base": "

The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types.

", "refs": { "CreateDataRepositoryAssociationResponse$Association": "

The response object returned after the data repository association is created.

", "DataRepositoryAssociations$member": null, @@ -634,7 +634,7 @@ } }, "DataRepositoryConfiguration": { - "base": "

The data repository configuration object for Lustre file systems returned in the response of the CreateFileSystem operation.

This data type is not supported for file systems with the Persistent_2 deployment type. Instead, use .

", + "base": "

The data repository configuration object for Lustre file systems returned in the response of the CreateFileSystem operation.

This data type is not supported on file systems with a data repository association. For file systems with a data repository association, see .

", "refs": { "LustreFileSystemConfiguration$DataRepositoryConfiguration": null } @@ -742,7 +742,7 @@ "base": null, "refs": { "CreateDataRepositoryTaskRequest$Type": "

Specifies the type of data repository task to create.

", - "DataRepositoryTask$Type": "

The type of data repository task.

" + "DataRepositoryTask$Type": "

The type of data repository task.

" } }, "DataRepositoryTasks": { @@ -1388,7 +1388,7 @@ "CreateFileSystemOpenZFSConfiguration$CopyTagsToBackups": "

A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

", "CreateFileSystemOpenZFSConfiguration$CopyTagsToVolumes": "

A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to false. If it's set to true, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.

", "CreateFileSystemWindowsConfiguration$CopyTagsToBackups": "

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

", - "CreateOntapVolumeConfiguration$StorageEfficiencyEnabled": "

Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume.

", + "CreateOntapVolumeConfiguration$StorageEfficiencyEnabled": "

Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume, or set to false to disable them. This parameter is required.

", "CreateOntapVolumeConfiguration$CopyTagsToBackups": "

A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.

", "CreateOpenZFSVolumeConfiguration$CopyTagsToSnapshots": "

A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.

", "DeleteFileSystemLustreConfiguration$SkipFinalBackup": "

Set SkipFinalBackup to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the DeleteFileSystem operation is invoked. (Default = true)

The fsx:CreateBackup permission is required if you set SkipFinalBackup to false in order to delete the file system and take a final backup.

", @@ -1533,14 +1533,14 @@ "IpAddressRange": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$EndpointIpAddressRange": "

(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.

", - "OntapFileSystemConfiguration$EndpointIpAddressRange": "

(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.

The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.

" + "CreateFileSystemOntapConfiguration$EndpointIpAddressRange": "

(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.

", + "OntapFileSystemConfiguration$EndpointIpAddressRange": "

(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.

" } }, "JunctionPath": { "base": null, "refs": { - "CreateOntapVolumeConfiguration$JunctionPath": "

Specifies the location in the SVM's namespace where the volume is mounted. The JunctionPath must have a leading forward slash, such as /vol3.

", + "CreateOntapVolumeConfiguration$JunctionPath": "

Specifies the location in the SVM's namespace where the volume is mounted. This parameter is required. The JunctionPath must have a leading forward slash, such as /vol3.

", "OntapVolumeConfiguration$JunctionPath": "

Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a JunctionPath directly below a parent volume junction or on a directory within a volume. A JunctionPath for a volume named vol3 might be /vol1/vol2/vol3, or /vol1/dir2/vol3, or even /dir1/dir2/vol3.

", "UpdateOntapVolumeConfiguration$JunctionPath": "

Specifies the location in the SVM's namespace where the volume is mounted. The JunctionPath must have a leading forward slash, such as /vol3.

" } @@ -1598,7 +1598,7 @@ "LustreDeploymentType": { "base": null, "refs": { - "CreateFileSystemLustreConfiguration$DeploymentType": "

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

(Default = SCRATCH_1)

", + "CreateFileSystemLustreConfiguration$DeploymentType": "

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

(Default = SCRATCH_1)

", "LustreFileSystemConfiguration$DeploymentType": "

The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 is built on Lustre v2.12 and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

The default is SCRATCH_1.

" } }, @@ -1673,7 +1673,7 @@ "base": null, "refs": { "CreateDataRepositoryAssociationRequest$ImportedFileChunkSize": "

For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

", - "CreateFileSystemLustreConfiguration$ImportedFileChunkSize": "

(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

This parameter is not supported for file systems with the Persistent_2 deployment type. Instead, use CreateDataRepositoryAssociation to create a data repository association to link your Lustre file system to a data repository.

", + "CreateFileSystemLustreConfiguration$ImportedFileChunkSize": "

(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

This parameter is not supported for file systems with a data repository association.

", "DataRepositoryAssociation$ImportedFileChunkSize": "

For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

", "DataRepositoryConfiguration$ImportedFileChunkSize": "

For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

", "UpdateDataRepositoryAssociationRequest$ImportedFileChunkSize": "

For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

" @@ -2282,7 +2282,7 @@ "FileCache$StorageCapacity": "

The storage capacity of the cache in gibibytes (GiB).

", "FileCacheCreating$StorageCapacity": "

The storage capacity of the cache in gibibytes (GiB).

", "FileSystem$StorageCapacity": "

The storage capacity of the file system in gibibytes (GiB).

", - "UpdateFileSystemRequest$StorageCapacity": "

Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server, Amazon FSx for Lustre, or Amazon FSx for NetApp ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.

You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.

For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide.

For Lustre file systems, the storage capacity target value can be the following:

For more information, see Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.

For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

" + "UpdateFileSystemRequest$StorageCapacity": "

Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.

You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.

For Lustre file systems, the storage capacity target value can be the following:

For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide.

For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide.

For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide.

For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

" } }, "StorageType": { @@ -2691,7 +2691,7 @@ "VolumeCapacity": { "base": null, "refs": { - "CreateOntapVolumeConfiguration$SizeInMegabytes": "

Specifies the size of the volume, in megabytes (MB), that you are creating.

", + "CreateOntapVolumeConfiguration$SizeInMegabytes": "

Specifies the size of the volume, in megabytes (MB), that you are creating. Provide any whole number in the range of 20–104857600 to specify the size of the volume.

", "OntapVolumeConfiguration$SizeInMegabytes": "

The configured size of the volume, in megabytes (MBs).

", "UpdateOntapVolumeConfiguration$SizeInMegabytes": "

Specifies the size of the volume in megabytes.

" } diff --git a/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json b/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json index c34da7df90e..b5cc1311213 100644 --- a/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json +++ b/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,307 +111,238 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "Region" } - ] - }, + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://fsx-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-ca-central-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "endpoint": { - "url": "https://fsx-fips.ca-central-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + }, { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-us-east-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "endpoint": { - "url": "https://fsx-fips.us-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "Region" - }, - "prod-us-east-2" + "conditions": [], + "endpoint": { + "url": "https://fsx-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://fsx-fips.us-east-2.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-us-west-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], - "endpoint": { - "url": "https://fsx-fips.us-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "Region" - }, - "prod-us-west-2" + "conditions": [], + "endpoint": { + "url": "https://fsx-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://fsx-fips.us-west-2.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-us-gov-east-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "endpoint": { - "url": "https://fsx-fips.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "Region" - }, - "prod-us-gov-west-1" + "conditions": [], + "endpoint": { + "url": "https://fsx.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://fsx-fips.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, { "conditions": [], - "endpoint": { - "url": "https://fsx-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://fsx.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://fsx.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://fsx.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/fsx/2018-03-01/endpoint-tests-1.json b/models/apis/fsx/2018-03-01/endpoint-tests-1.json index 7893d02ee7b..283b2995923 100644 --- a/models/apis/fsx/2018-03-01/endpoint-tests-1.json +++ b/models/apis/fsx/2018-03-01/endpoint-tests-1.json @@ -1,1455 +1,558 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.me-south-1.amazonaws.com" + "url": "https://fsx.af-south-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "me-south-1" + "Region": "af-south-1", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.sa-east-1.api.aws" + "url": "https://fsx.ap-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "sa-east-1" + "UseFIPS": false, + "Region": "ap-east-1", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.sa-east-1.amazonaws.com" + "url": "https://fsx.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "sa-east-1" + "UseFIPS": false, + "Region": "ap-northeast-1", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.sa-east-1.api.aws" + "url": "https://fsx.ap-northeast-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "sa-east-1" + "Region": "ap-northeast-2", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.sa-east-1.amazonaws.com" + "url": "https://fsx.ap-northeast-3.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "Region": "ap-northeast-3", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-east-1.api.aws" + "url": "https://fsx.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-east-1" + "UseFIPS": false, + "Region": "ap-south-1", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-east-1.amazonaws.com" + "url": "https://fsx.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-east-1" + "UseFIPS": false, + "Region": "ap-southeast-1", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-east-1.api.aws" + "url": "https://fsx.ap-southeast-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-east-1" + "Region": "ap-southeast-2", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-east-1.amazonaws.com" + "url": "https://fsx.ca-central-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-east-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://fsx-fips.ca-central-1.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.cn-north-1.amazonaws.com.cn" + "url": "https://fsx.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseFIPS": false, + "Region": "eu-central-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://fsx.eu-north-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "eu-north-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.cn-north-1.amazonaws.com.cn" + "url": "https://fsx.eu-south-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "Region": "eu-south-1", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-gov-west-1.api.aws" + "url": "https://fsx.eu-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-west-1" + "UseFIPS": false, + "Region": "eu-west-1", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-gov-west-1.amazonaws.com" + "url": "https://fsx.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseFIPS": false, + "Region": "eu-west-2", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-gov-west-1.api.aws" + "url": "https://fsx.eu-west-3.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-west-1" + "Region": "eu-west-3", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-gov-west-1.amazonaws.com" + "url": "https://fsx.me-south-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-west-1" + "Region": "me-south-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-1.api.aws" + "url": "https://fsx.sa-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": false, + "Region": "sa-east-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-1.amazonaws.com" + "url": "https://fsx.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-1.api.aws" + "url": "https://fsx-fips.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-1.amazonaws.com" + "url": "https://fsx.us-east-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "Region": "us-east-2", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-2.api.aws" + "url": "https://fsx-fips.us-east-2.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-2" + "Region": "us-east-2", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-2.amazonaws.com" + "url": "https://fsx.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": false, + "Region": "us-west-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-2.api.aws" + "url": "https://fsx-fips.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": true, + "Region": "us-west-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-2.amazonaws.com" + "url": "https://fsx.us-west-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "Region": "us-west-2", + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://fsx-fips.us-west-2.amazonaws.com" + } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-west-2", + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://fsx-fips.us-east-1.api.aws" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://fsx.us-east-1.api.aws" + } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-iso-east-1.c2s.ic.gov" + "url": "https://fsx.cn-north-1.amazonaws.com.cn" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-3.api.aws" + "url": "https://fsx.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-3" + "UseFIPS": false, + "Region": "cn-northwest-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-3.amazonaws.com" + "url": "https://fsx-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-3" + "Region": "cn-north-1", + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-3.api.aws" + "url": "https://fsx-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-3" + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-3.amazonaws.com" + "url": "https://fsx.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-3" + "Region": "cn-north-1", + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-1.api.aws" + "url": "https://fsx.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-1.amazonaws.com" + "url": "https://fsx-fips.us-gov-east-1.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-east-1.amazonaws.com" + "url": "https://fsx.us-gov-west-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-2.api.aws" + "url": "https://fsx-fips.us-gov-west-1.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-2" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-2.amazonaws.com" + "url": "https://fsx-fips.us-gov-east-1.api.aws" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-2" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx.us-east-2.amazonaws.com" + "url": "https://fsx.us-gov-east-1.api.aws" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://fsx-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://fsx-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://fsx.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.cn-northwest-1.amazonaws.com.cn" + "url": "https://fsx.us-iso-east-1.c2s.ic.gov" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -1459,8 +562,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -1472,8 +575,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false } }, { @@ -1483,8 +586,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -1496,12 +599,26 @@ }, "params": { "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-isob-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1510,7 +627,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -1521,8 +637,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1533,10 +649,16 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 663a119585e..2a44cf5aa41 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -603,6 +603,43 @@ ], "deprecated":true }, + "InvokeWithResponseStream":{ + "name":"InvokeWithResponseStream", + "http":{ + "method":"POST", + "requestUri":"/2021-11-15/functions/{FunctionName}/response-streaming-invocations" + }, + "input":{"shape":"InvokeWithResponseStreamRequest"}, + "output":{"shape":"InvokeWithResponseStreamResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestContentException"}, + {"shape":"RequestTooLargeException"}, + {"shape":"UnsupportedMediaTypeException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"EC2UnexpectedException"}, + {"shape":"SubnetIPAddressLimitReachedException"}, + {"shape":"ENILimitReachedException"}, + {"shape":"EFSMountConnectivityException"}, + {"shape":"EFSMountFailureException"}, + {"shape":"EFSMountTimeoutException"}, + {"shape":"EFSIOException"}, + {"shape":"EC2ThrottledException"}, + {"shape":"EC2AccessDeniedException"}, + {"shape":"InvalidSubnetIDException"}, + {"shape":"InvalidSecurityGroupIDException"}, + {"shape":"InvalidZipFileException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"InvalidRuntimeException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ResourceNotReadyException"} + ] + }, "ListAliases":{ "name":"ListAliases", "http":{ @@ -1513,7 +1550,8 @@ "locationName":"Qualifier" }, "AuthType":{"shape":"FunctionUrlAuthType"}, - "Cors":{"shape":"Cors"} + "Cors":{"shape":"Cors"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "CreateFunctionUrlConfigResponse":{ @@ -1529,7 +1567,8 @@ "FunctionArn":{"shape":"FunctionArn"}, "AuthType":{"shape":"FunctionUrlAuthType"}, "Cors":{"shape":"Cors"}, - "CreationTime":{"shape":"Timestamp"} + "CreationTime":{"shape":"Timestamp"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "DatabaseName":{ @@ -2091,7 +2130,8 @@ "CreationTime":{"shape":"Timestamp"}, "LastModifiedTime":{"shape":"Timestamp"}, "Cors":{"shape":"Cors"}, - "AuthType":{"shape":"FunctionUrlAuthType"} + "AuthType":{"shape":"FunctionUrlAuthType"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "FunctionUrlConfigList":{ @@ -2295,7 +2335,8 @@ "AuthType":{"shape":"FunctionUrlAuthType"}, "Cors":{"shape":"Cors"}, "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"} + "LastModifiedTime":{"shape":"Timestamp"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "GetLayerVersionByArnRequest":{ @@ -2641,6 +2682,94 @@ }, "deprecated":true }, + "InvokeMode":{ + "type":"string", + "enum":[ + "BUFFERED", + "RESPONSE_STREAM" + ] + }, + "InvokeResponseStreamUpdate":{ + "type":"structure", + "members":{ + "Payload":{ + "shape":"Blob", + "eventpayload":true + } + }, + "event":true + }, + "InvokeWithResponseStreamCompleteEvent":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"String"}, + "ErrorDetails":{"shape":"String"}, + "LogResult":{"shape":"String"} + }, + "event":true + }, + "InvokeWithResponseStreamRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"NamespacedFunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvocationType":{ + "shape":"ResponseStreamingInvocationType", + "location":"header", + "locationName":"X-Amz-Invocation-Type" + }, + "LogType":{ + "shape":"LogType", + "location":"header", + "locationName":"X-Amz-Log-Type" + }, + "ClientContext":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Client-Context" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + }, + "Payload":{"shape":"Blob"} + }, + "payload":"Payload" + }, + "InvokeWithResponseStreamResponse":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"Integer", + "location":"statusCode" + }, + "ExecutedVersion":{ + "shape":"Version", + "location":"header", + "locationName":"X-Amz-Executed-Version" + }, + "EventStream":{"shape":"InvokeWithResponseStreamResponseEvent"}, + "ResponseStreamContentType":{ + "shape":"String", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"EventStream" + }, + "InvokeWithResponseStreamResponseEvent":{ + "type":"structure", + "members":{ + "PayloadChunk":{"shape":"InvokeResponseStreamUpdate"}, + "InvokeComplete":{"shape":"InvokeWithResponseStreamCompleteEvent"} + }, + "eventstream":true + }, "KMSAccessDeniedException":{ "type":"structure", "members":{ @@ -3674,6 +3803,13 @@ "error":{"httpStatusCode":502}, "exception":true }, + "ResponseStreamingInvocationType":{ + "type":"string", + "enum":[ + "RequestResponse", + "DryRun" + ] + }, "RoleArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" @@ -4224,7 +4360,8 @@ "locationName":"Qualifier" }, "AuthType":{"shape":"FunctionUrlAuthType"}, - "Cors":{"shape":"Cors"} + "Cors":{"shape":"Cors"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "UpdateFunctionUrlConfigResponse":{ @@ -4242,7 +4379,8 @@ "AuthType":{"shape":"FunctionUrlAuthType"}, "Cors":{"shape":"Cors"}, "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"} + "LastModifiedTime":{"shape":"Timestamp"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "UpdateRuntimeOn":{ diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 131b1602c97..aaf738ca0de 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -6,7 +6,7 @@ "AddPermission": "

Grants an Amazon Web Service, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.

To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda.

", "CreateAlias": "

Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.

You can also map an alias to split invocation requests between two versions. Use the RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.

", "CreateCodeSigningConfig": "

Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).

", - "CreateEventSourceMapping": "

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for stream sources (DynamoDB and Kinesis):

For information about which configuration parameters apply to each event source, see the following topics.

", + "CreateEventSourceMapping": "

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for stream sources (DynamoDB and Kinesis):

For information about which configuration parameters apply to each event source, see the following topics.

", "CreateFunction": "

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.

If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.

If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function.

If another Amazon Web Services account or an Amazon Web Service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions.

", "CreateFunctionUrlConfig": "

Creates a Lambda function URL with the specified configuration parameters. A function URL is a dedicated HTTP(S) endpoint that you can use to invoke your function.

", "DeleteAlias": "

Deletes a Lambda function alias.

", @@ -37,6 +37,7 @@ "GetRuntimeManagementConfig": "

Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and null is returned for the ARN. For more information, see Runtime updates.

", "Invoke": "

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.

For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

", "InvokeAsync": "

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

", + "InvokeWithResponseStream": "

Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.

", "ListAliases": "

Returns a list of aliases for a Lambda function.

", "ListCodeSigningConfigs": "

Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.

", "ListEventSourceMappings": "

Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.

", @@ -62,7 +63,7 @@ "UntagResource": "

Removes tags from a function.

", "UpdateAlias": "

Updates the configuration of a Lambda function alias.

", "UpdateCodeSigningConfig": "

Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

", - "UpdateEventSourceMapping": "

Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for stream sources (DynamoDB and Kinesis):

For information about which configuration parameters apply to each event source, see the following topics.

", + "UpdateEventSourceMapping": "

Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for stream sources (DynamoDB and Kinesis):

For information about which configuration parameters apply to each event source, see the following topics.

", "UpdateFunctionCode": "

Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.

If the function's package type is Image, then you must specify the code package in ImageUri as the URI of a container image in the Amazon ECR registry.

If the function's package type is Zip, then you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile field.

The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64).

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.

", "UpdateFunctionConfiguration": "

Modify the version-specific settings of a Lambda function.

When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states.

These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Service, use AddPermission.

", "UpdateFunctionEventInvokeConfig": "

Updates the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", @@ -203,30 +204,30 @@ "base": null, "refs": { "AddPermissionRequest$SourceArn": "

For Amazon Web Services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

Note that Lambda configures the comparison using the StringLike operator.

", - "CreateEventSourceMappingRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", + "CreateEventSourceMappingRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", "EventSourceMappingConfiguration$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", "FunctionConfiguration$SigningProfileVersionArn": "

The ARN of the signing profile version.

", "FunctionConfiguration$SigningJobArn": "

The ARN of the signing job.

", "Layer$SigningProfileVersionArn": "

The Amazon Resource Name (ARN) for a signing profile version.

", "Layer$SigningJobArn": "

The Amazon Resource Name (ARN) of a signing job.

", - "ListEventSourceMappingsRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", + "ListEventSourceMappingsRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", "SigningProfileVersionArns$member": null } }, "BatchSize": { "base": null, "refs": { - "CreateEventSourceMappingRequest$BatchSize": "

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

", + "CreateEventSourceMappingRequest$BatchSize": "

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

", "EventSourceMappingConfiguration$BatchSize": "

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

", - "UpdateEventSourceMappingRequest$BatchSize": "

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

" + "UpdateEventSourceMappingRequest$BatchSize": "

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

" } }, "BisectBatchOnFunctionError": { "base": null, "refs": { - "CreateEventSourceMappingRequest$BisectBatchOnFunctionError": "

(Streams only) If the function returns an error, split the batch in two and retry.

", - "EventSourceMappingConfiguration$BisectBatchOnFunctionError": "

(Streams only) If the function returns an error, split the batch in two and retry. The default value is false.

", - "UpdateEventSourceMappingRequest$BisectBatchOnFunctionError": "

(Streams only) If the function returns an error, split the batch in two and retry.

" + "CreateEventSourceMappingRequest$BisectBatchOnFunctionError": "

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.

", + "EventSourceMappingConfiguration$BisectBatchOnFunctionError": "

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.

", + "UpdateEventSourceMappingRequest$BisectBatchOnFunctionError": "

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.

" } }, "Blob": { @@ -235,6 +236,8 @@ "FunctionCode$ZipFile": "

The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you.

", "InvocationRequest$Payload": "

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

", "InvocationResponse$Payload": "

The response from the function, or an error object.

", + "InvokeResponseStreamUpdate$Payload": "

Data returned by your Lambda function.

", + "InvokeWithResponseStreamRequest$Payload": "

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

", "LayerVersionContentInput$ZipFile": "

The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.

", "UpdateFunctionCodeRequest$ZipFile": "

The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.

" } @@ -500,12 +503,12 @@ "DestinationConfig": { "base": "

A configuration object that specifies the destination of an event after Lambda processes it.

", "refs": { - "CreateEventSourceMappingRequest$DestinationConfig": "

(Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

", - "EventSourceMappingConfiguration$DestinationConfig": "

(Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

", - "FunctionEventInvokeConfig$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", - "PutFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", - "UpdateEventSourceMappingRequest$DestinationConfig": "

(Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

", - "UpdateFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

" + "CreateEventSourceMappingRequest$DestinationConfig": "

(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

", + "EventSourceMappingConfiguration$DestinationConfig": "

(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

", + "FunctionEventInvokeConfig$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", + "PutFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", + "UpdateEventSourceMappingRequest$DestinationConfig": "

(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

", + "UpdateFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

" } }, "DocumentDBEventSourceConfig": { @@ -654,8 +657,8 @@ "EventSourcePosition": { "base": null, "refs": { - "CreateEventSourceMappingRequest$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams.

", - "EventSourceMappingConfiguration$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams.

" + "CreateEventSourceMappingRequest$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams and Amazon DocumentDB.

", + "EventSourceMappingConfiguration$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams and Amazon DocumentDB.

" } }, "EventSourceToken": { @@ -825,9 +828,9 @@ "FunctionResponseTypeList": { "base": null, "refs": { - "CreateEventSourceMappingRequest$FunctionResponseTypes": "

(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.

", - "EventSourceMappingConfiguration$FunctionResponseTypes": "

(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.

", - "UpdateEventSourceMappingRequest$FunctionResponseTypes": "

(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.

" + "CreateEventSourceMappingRequest$FunctionResponseTypes": "

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

", + "EventSourceMappingConfiguration$FunctionResponseTypes": "

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

", + "UpdateEventSourceMappingRequest$FunctionResponseTypes": "

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" } }, "FunctionUrl": { @@ -1064,7 +1067,8 @@ "base": null, "refs": { "AccountLimit$ConcurrentExecutions": "

The maximum number of simultaneous function executions.

", - "InvocationResponse$StatusCode": "

The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.

" + "InvocationResponse$StatusCode": "

The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.

", + "InvokeWithResponseStreamResponse$StatusCode": "

For a successful request, the HTTP status code is in the 200 range. For the RequestResponse invocation type, this status code is 200. For the DryRun invocation type, this status code is 204.

" } }, "InvalidCodeSignatureException": { @@ -1128,6 +1132,45 @@ "refs": { } }, + "InvokeMode": { + "base": null, + "refs": { + "CreateFunctionUrlConfigRequest$InvokeMode": "

Use one of the following options:

", + "CreateFunctionUrlConfigResponse$InvokeMode": "

Use one of the following options:

", + "FunctionUrlConfig$InvokeMode": "

Use one of the following options:

", + "GetFunctionUrlConfigResponse$InvokeMode": "

Use one of the following options:

", + "UpdateFunctionUrlConfigRequest$InvokeMode": "

Use one of the following options:

", + "UpdateFunctionUrlConfigResponse$InvokeMode": "

Use one of the following options:

" + } + }, + "InvokeResponseStreamUpdate": { + "base": "

A chunk of the streamed response payload.

", + "refs": { + "InvokeWithResponseStreamResponseEvent$PayloadChunk": "

A chunk of the streamed response payload.

" + } + }, + "InvokeWithResponseStreamCompleteEvent": { + "base": "

A response confirming that the event stream is complete.

", + "refs": { + "InvokeWithResponseStreamResponseEvent$InvokeComplete": "

An object that's returned when the stream has ended and all the payload chunks have been returned.

" + } + }, + "InvokeWithResponseStreamRequest": { + "base": null, + "refs": { + } + }, + "InvokeWithResponseStreamResponse": { + "base": null, + "refs": { + } + }, + "InvokeWithResponseStreamResponseEvent": { + "base": "

An object that includes a chunk of the response payload. When the stream has ended, Lambda includes a InvokeComplete object.

", + "refs": { + "InvokeWithResponseStreamResponse$EventStream": "

The stream of response payloads.

" + } + }, "KMSAccessDeniedException": { "base": "

Lambda couldn't decrypt the environment variables because KMS access was denied. Check the Lambda function's KMS permissions.

", "refs": { @@ -1426,7 +1469,8 @@ "LogType": { "base": null, "refs": { - "InvocationRequest$LogType": "

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

" + "InvocationRequest$LogType": "

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

", + "InvokeWithResponseStreamRequest$LogType": "

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

" } }, "Long": { @@ -1493,9 +1537,9 @@ "MaximumBatchingWindowInSeconds": { "base": null, "refs": { - "CreateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

", - "EventSourceMappingConfiguration$MaximumBatchingWindowInSeconds": "

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

", - "UpdateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" + "CreateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

", + "EventSourceMappingConfiguration$MaximumBatchingWindowInSeconds": "

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

", + "UpdateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" } }, "MaximumConcurrency": { @@ -1515,9 +1559,9 @@ "MaximumRecordAgeInSeconds": { "base": null, "refs": { - "CreateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "

(Streams only) Discard records older than the specified age. The default value is infinite (-1).

", - "EventSourceMappingConfiguration$MaximumRecordAgeInSeconds": "

(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.

", - "UpdateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "

(Streams only) Discard records older than the specified age. The default value is infinite (-1).

" + "CreateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).

", + "EventSourceMappingConfiguration$MaximumRecordAgeInSeconds": "

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.

", + "UpdateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).

" } }, "MaximumRetryAttempts": { @@ -1531,9 +1575,9 @@ "MaximumRetryAttemptsEventSourceMapping": { "base": null, "refs": { - "CreateEventSourceMappingRequest$MaximumRetryAttempts": "

(Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

", - "EventSourceMappingConfiguration$MaximumRetryAttempts": "

(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.

", - "UpdateEventSourceMappingRequest$MaximumRetryAttempts": "

(Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" + "CreateEventSourceMappingRequest$MaximumRetryAttempts": "

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

", + "EventSourceMappingConfiguration$MaximumRetryAttempts": "

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.

", + "UpdateEventSourceMappingRequest$MaximumRetryAttempts": "

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" } }, "MemorySize": { @@ -1567,6 +1611,7 @@ "GetRuntimeManagementConfigRequest$FunctionName": "

The name of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "InvocationRequest$FunctionName": "

The name of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "InvokeAsyncRequest$FunctionName": "

The name of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "InvokeWithResponseStreamRequest$FunctionName": "

The name of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "ListVersionsByFunctionRequest$FunctionName": "

The name of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" } }, @@ -1621,9 +1666,9 @@ "ParallelizationFactor": { "base": null, "refs": { - "CreateEventSourceMappingRequest$ParallelizationFactor": "

(Streams only) The number of batches to process from each shard concurrently.

", - "EventSourceMappingConfiguration$ParallelizationFactor": "

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

", - "UpdateEventSourceMappingRequest$ParallelizationFactor": "

(Streams only) The number of batches to process from each shard concurrently.

" + "CreateEventSourceMappingRequest$ParallelizationFactor": "

(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.

", + "EventSourceMappingConfiguration$ParallelizationFactor": "

(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.

", + "UpdateEventSourceMappingRequest$ParallelizationFactor": "

(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.

" } }, "Pattern": { @@ -1757,6 +1802,7 @@ "GetProvisionedConcurrencyConfigRequest$Qualifier": "

The version number or alias name.

", "GetRuntimeManagementConfigRequest$Qualifier": "

Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.

", "InvocationRequest$Qualifier": "

Specify a version or alias to invoke a published version of the function.

", + "InvokeWithResponseStreamRequest$Qualifier": "

The alias name.

", "PutFunctionEventInvokeConfigRequest$Qualifier": "

A version number or alias name.

", "PutProvisionedConcurrencyConfigRequest$Qualifier": "

The version number or alias name.

", "PutRuntimeManagementConfigRequest$Qualifier": "

Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.

", @@ -1826,6 +1872,12 @@ "refs": { } }, + "ResponseStreamingInvocationType": { + "base": null, + "refs": { + "InvokeWithResponseStreamRequest$InvocationType": "

Use one of the following options:

" + } + }, "RoleArn": { "base": null, "refs": { @@ -2106,6 +2158,11 @@ "InvocationRequest$ClientContext": "

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", "InvocationResponse$FunctionError": "

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

", "InvocationResponse$LogResult": "

The last 4 KB of the execution log, which is base64-encoded.

", + "InvokeWithResponseStreamCompleteEvent$ErrorCode": "

An error code.

", + "InvokeWithResponseStreamCompleteEvent$ErrorDetails": "

The details of any returned error.

", + "InvokeWithResponseStreamCompleteEvent$LogResult": "

The last 4 KB of the execution log, which is base64-encoded.

", + "InvokeWithResponseStreamRequest$ClientContext": "

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", + "InvokeWithResponseStreamResponse$ResponseStreamContentType": "

The type of data the stream is returning.

", "KMSAccessDeniedException$Type": null, "KMSAccessDeniedException$Message": null, "KMSDisabledException$Type": null, @@ -2319,9 +2376,9 @@ "TumblingWindowInSeconds": { "base": null, "refs": { - "CreateEventSourceMappingRequest$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is between 1 second and 900 seconds.

", - "EventSourceMappingConfiguration$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is 1–900 seconds.

", - "UpdateEventSourceMappingRequest$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is between 1 second and 900 seconds.

" + "CreateEventSourceMappingRequest$TumblingWindowInSeconds": "

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

", + "EventSourceMappingConfiguration$TumblingWindowInSeconds": "

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

", + "UpdateEventSourceMappingRequest$TumblingWindowInSeconds": "

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" } }, "URI": { @@ -2408,6 +2465,7 @@ "CreateAliasRequest$FunctionVersion": "

The function version that the alias invokes.

", "FunctionConfiguration$Version": "

The version of the Lambda function.

", "InvocationResponse$ExecutedVersion": "

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", + "InvokeWithResponseStreamResponse$ExecutedVersion": "

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", "ListAliasesRequest$FunctionVersion": "

Specify a function version to only list aliases that invoke that version.

", "UpdateAliasRequest$FunctionVersion": "

The function version that the alias invokes.

" } diff --git a/models/apis/lambda/2015-03-31/endpoint-tests-1.json b/models/apis/lambda/2015-03-31/endpoint-tests-1.json index 06ce0daa036..09d226188e5 100644 --- a/models/apis/lambda/2015-03-31/endpoint-tests-1.json +++ b/models/apis/lambda/2015-03-31/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": true } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": true } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": true } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": true } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": true } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": true } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": true } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": true } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": true } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": false } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": true } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": false } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": false } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": true } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": false } }, { @@ -411,9 +411,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": true } }, { @@ -424,9 +424,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": true } }, { @@ -450,9 +450,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": true } }, { @@ -476,9 +476,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -489,9 +489,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -502,9 +502,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -515,9 +515,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": false } }, { @@ -528,9 +528,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": false } }, { @@ -541,9 +541,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": true } }, { @@ -554,9 +554,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": false } }, { @@ -567,9 +567,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": false } }, { @@ -580,9 +580,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": true } }, { @@ -593,9 +593,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": false } }, { @@ -606,9 +606,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": false } }, { @@ -619,9 +619,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": true } }, { @@ -632,9 +632,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -645,9 +645,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -658,9 +658,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -671,9 +671,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": false } }, { @@ -684,9 +684,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": true } }, { @@ -697,9 +697,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -710,9 +710,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -723,9 +723,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -736,9 +736,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -749,9 +749,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { @@ -762,9 +762,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { @@ -775,9 +775,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -788,9 +788,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -801,9 +801,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -814,9 +814,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-iso-west-1" + "Region": "us-iso-west-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -827,9 +838,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -840,9 +862,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -853,9 +886,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -866,9 +910,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -880,8 +924,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -891,9 +935,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "UseFIPS": true, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -903,11 +947,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "UseFIPS": false, "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index 081cf97fd4b..5619d901f53 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -278,6 +278,25 @@ {"shape":"ResourceUnavailableException"} ] }, + "CreateRefreshSchedule":{ + "name":"CreateRefreshSchedule", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules" + }, + "input":{"shape":"CreateRefreshScheduleRequest"}, + "output":{"shape":"CreateRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ] + }, "CreateTemplate":{ "name":"CreateTemplate", "http":{ @@ -441,6 +460,24 @@ {"shape":"InternalFailureException"} ] }, + "DeleteDataSetRefreshProperties":{ + "name":"DeleteDataSetRefreshProperties", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties" + }, + "input":{"shape":"DeleteDataSetRefreshPropertiesRequest"}, + "output":{"shape":"DeleteDataSetRefreshPropertiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteDataSource":{ "name":"DeleteDataSource", "http":{ @@ -565,6 +602,23 @@ {"shape":"ResourceUnavailableException"} ] }, + "DeleteRefreshSchedule":{ + "name":"DeleteRefreshSchedule", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules/{ScheduleId}" + }, + "input":{"shape":"DeleteRefreshScheduleRequest"}, + "output":{"shape":"DeleteRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteTemplate":{ "name":"DeleteTemplate", "http":{ @@ -857,6 +911,24 @@ {"shape":"InternalFailureException"} ] }, + "DescribeDataSetRefreshProperties":{ + "name":"DescribeDataSetRefreshProperties", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties" + }, + "input":{"shape":"DescribeDataSetRefreshPropertiesRequest"}, + "output":{"shape":"DescribeDataSetRefreshPropertiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ] + }, "DescribeDataSource":{ "name":"DescribeDataSource", "http":{ @@ -1043,6 +1115,23 @@ {"shape":"ResourceUnavailableException"} ] }, + "DescribeRefreshSchedule":{ + "name":"DescribeRefreshSchedule", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules/{ScheduleId}" + }, + "input":{"shape":"DescribeRefreshScheduleRequest"}, + "output":{"shape":"DescribeRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ] + }, "DescribeTemplate":{ "name":"DescribeTemplate", "http":{ @@ -1490,6 +1579,23 @@ {"shape":"ResourceUnavailableException"} ] }, + "ListRefreshSchedules":{ + "name":"ListRefreshSchedules", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules" + }, + "input":{"shape":"ListRefreshSchedulesRequest"}, + "output":{"shape":"ListRefreshSchedulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1647,6 +1753,25 @@ {"shape":"ResourceUnavailableException"} ] }, + "PutDataSetRefreshProperties":{ + "name":"PutDataSetRefreshProperties", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties" + }, + "input":{"shape":"PutDataSetRefreshPropertiesRequest"}, + "output":{"shape":"PutDataSetRefreshPropertiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ] + }, "RegisterUser":{ "name":"RegisterUser", "http":{ @@ -2124,6 +2249,24 @@ {"shape":"InternalFailureException"} ] }, + "UpdateRefreshSchedule":{ + "name":"UpdateRefreshSchedule", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules" + }, + "input":{"shape":"UpdateRefreshScheduleRequest"}, + "output":{"shape":"UpdateRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ] + }, "UpdateTemplate":{ "name":"UpdateTemplate", "http":{ @@ -4198,6 +4341,39 @@ } } }, + "CreateRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId", + "Schedule" + ], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Schedule":{"shape":"RefreshSchedule"} + } + }, + "CreateRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "ScheduleId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "CreateTemplateAliasRequest":{ "type":"structure", "required":[ @@ -5008,6 +5184,13 @@ "member":{"shape":"DataSetReference"}, "min":1 }, + "DataSetRefreshProperties":{ + "type":"structure", + "required":["RefreshConfiguration"], + "members":{ + "RefreshConfiguration":{"shape":"RefreshConfiguration"} + } + }, "DataSetSchema":{ "type":"structure", "members":{ @@ -5351,6 +5534,24 @@ "CustomValue":{"shape":"SensitiveTimestamp"} } }, + "DayOfMonth":{ + "type":"string", + "max":17, + "min":1, + "pattern":"^(?:LAST_DAY_OF_MONTH|1[0-9]|2[0-8]|[12]|[3-9])$" + }, + "DayOfWeek":{ + "type":"string", + "enum":[ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, "DecimalDefaultValueList":{ "type":"list", "member":{"shape":"SensitiveDoubleObject"}, @@ -5581,6 +5782,35 @@ "RequestId":{"shape":"String"} } }, + "DeleteDataSetRefreshPropertiesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DeleteDataSetRefreshPropertiesResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, "DeleteDataSetRequest":{ "type":"structure", "required":[ @@ -5853,6 +6083,43 @@ } } }, + "DeleteRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId", + "ScheduleId" + ], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ScheduleId":{ + "shape":"String", + "location":"uri", + "locationName":"ScheduleId" + } + } + }, + "DeleteRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "ScheduleId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "DeleteTemplateAliasRequest":{ "type":"structure", "required":[ @@ -6403,6 +6670,36 @@ } } }, + "DescribeDataSetRefreshPropertiesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DescribeDataSetRefreshPropertiesResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "DataSetRefreshProperties":{"shape":"DataSetRefreshProperties"} + } + }, "DescribeDataSetRequest":{ "type":"structure", "required":[ @@ -6793,6 +7090,43 @@ } } }, + "DescribeRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "ScheduleId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "ScheduleId":{ + "shape":"String", + "location":"uri", + "locationName":"ScheduleId" + } + } + }, + "DescribeRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "RefreshSchedule":{"shape":"RefreshSchedule"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "DescribeTemplateAliasRequest":{ "type":"structure", "required":[ @@ -8896,6 +9230,13 @@ "error":{"httpStatusCode":403}, "exception":true }, + "IncrementalRefresh":{ + "type":"structure", + "required":["LookbackWindow"], + "members":{ + "LookbackWindow":{"shape":"LookbackWindow"} + } + }, "Ingestion":{ "type":"structure", "required":[ @@ -8969,7 +9310,8 @@ "REFRESH_SUPPRESSED_BY_EDIT", "PERMISSION_NOT_FOUND", "ELASTICSEARCH_CURSOR_NOT_ENABLED", - "CURSOR_NOT_ENABLED" + "CURSOR_NOT_ENABLED", + "DUPLICATE_COLUMN_NAMES_FOUND" ] }, "IngestionId":{ @@ -10051,6 +10393,36 @@ } } }, + "ListRefreshSchedulesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "ListRefreshSchedulesResponse":{ + "type":"structure", + "members":{ + "RefreshSchedules":{"shape":"RefreshSchedules"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -10478,6 +10850,27 @@ "max":1800, "min":-1800 }, + "LookbackWindow":{ + "type":"structure", + "required":[ + "ColumnName", + "Size", + "SizeUnit" + ], + "members":{ + "ColumnName":{"shape":"String"}, + "Size":{"shape":"PositiveLong"}, + "SizeUnit":{"shape":"LookbackWindowSizeUnit"} + } + }, + "LookbackWindowSizeUnit":{ + "type":"string", + "enum":[ + "HOUR", + "DAY", + "WEEK" + ] + }, "ManifestFileLocation":{ "type":"structure", "required":[ @@ -11555,6 +11948,10 @@ "type":"integer", "min":1 }, + "PositiveLong":{ + "type":"long", + "min":1 + }, "PostgreSqlParameters":{ "type":"structure", "required":[ @@ -11657,6 +12054,37 @@ "max":2000, "min":1 }, + "PutDataSetRefreshPropertiesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "DataSetRefreshProperties" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "DataSetRefreshProperties":{"shape":"DataSetRefreshProperties"} + } + }, + "PutDataSetRefreshPropertiesResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, "Query":{ "type":"string", "max":256, @@ -11912,6 +12340,53 @@ "AFTER_CUSTOM_LABEL" ] }, + "RefreshConfiguration":{ + "type":"structure", + "required":["IncrementalRefresh"], + "members":{ + "IncrementalRefresh":{"shape":"IncrementalRefresh"} + } + }, + "RefreshFrequency":{ + "type":"structure", + "required":["Interval"], + "members":{ + "Interval":{"shape":"RefreshInterval"}, + "RefreshOnDay":{"shape":"ScheduleRefreshOnEntity"}, + "Timezone":{"shape":"String"}, + "TimeOfTheDay":{"shape":"String"} + } + }, + "RefreshInterval":{ + "type":"string", + "enum":[ + "MINUTE15", + "MINUTE30", + "HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY" + ] + }, + "RefreshSchedule":{ + "type":"structure", + "required":[ + "ScheduleId", + "ScheduleFrequency", + "RefreshType" + ], + "members":{ + "ScheduleId":{"shape":"String"}, + "ScheduleFrequency":{"shape":"RefreshFrequency"}, + "StartAfterDateTime":{"shape":"Timestamp"}, + "RefreshType":{"shape":"IngestionType"}, + "Arn":{"shape":"Arn"} + } + }, + "RefreshSchedules":{ + "type":"list", + "member":{"shape":"RefreshSchedule"} + }, "RegisterUserRequest":{ "type":"structure", "required":[ @@ -12293,7 +12768,8 @@ "required":["TagRules"], "members":{ "Status":{"shape":"Status"}, - "TagRules":{"shape":"RowLevelPermissionTagRuleList"} + "TagRules":{"shape":"RowLevelPermissionTagRuleList"}, + "TagRuleConfigurations":{"shape":"RowLevelPermissionTagRuleConfigurationList"} } }, "RowLevelPermissionTagDelimiter":{ @@ -12313,6 +12789,18 @@ "MatchAllValue":{"shape":"SessionTagValue"} } }, + "RowLevelPermissionTagRuleConfiguration":{ + "type":"list", + "member":{"shape":"SessionTagKey"}, + "max":50, + "min":1 + }, + "RowLevelPermissionTagRuleConfigurationList":{ + "type":"list", + "member":{"shape":"RowLevelPermissionTagRuleConfiguration"}, + "max":50, + "min":1 + }, "RowLevelPermissionTagRuleList":{ "type":"list", "member":{"shape":"RowLevelPermissionTagRule"}, @@ -12452,6 +12940,13 @@ "ColumnHierarchies":{"shape":"ColumnHierarchyList"} } }, + "ScheduleRefreshOnEntity":{ + "type":"structure", + "members":{ + "DayOfWeek":{"shape":"DayOfWeek"}, + "DayOfMonth":{"shape":"DayOfMonth"} + } + }, "ScrollBarOptions":{ "type":"structure", "members":{ @@ -15042,6 +15537,39 @@ } } }, + "UpdateRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId", + "Schedule" + ], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Schedule":{"shape":"RefreshSchedule"} + } + }, + "UpdateRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "ScheduleId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "UpdateResourcePermissionList":{ "type":"list", "member":{"shape":"ResourcePermission"}, diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index 99d7d604d73..181286ff0d4 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -16,6 +16,7 @@ "CreateIAMPolicyAssignment": "

Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN). This policy assignment is attached to the specified groups or users of Amazon QuickSight. Assignment names are unique per Amazon Web Services account. To avoid overwriting rules in other namespaces, use assignment names that are unique.

", "CreateIngestion": "

Creates and starts a new SPICE ingestion for a dataset. You can manually refresh datasets in an Enterprise edition account 32 times in a 24-hour period. You can manually refresh datasets in a Standard edition account 8 times in a 24-hour period. Each 24-hour period is measured starting 24 hours before the current date and time.

Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags? in the Amazon Web Services Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

", "CreateNamespace": "

(Enterprise edition only) Creates a new namespace for you to use with Amazon QuickSight.

A namespace allows you to isolate the Amazon QuickSight users and groups that are registered for that namespace. Users that access the namespace can share assets only with other users or groups in the same namespace. They can't see users and groups in other namespaces. You can create a namespace after your Amazon Web Services account is subscribed to Amazon QuickSight. The namespace must be unique within the Amazon Web Services account. By default, there is a limit of 100 namespaces per Amazon Web Services account. To increase your limit, create a ticket with Amazon Web Services Support.

", + "CreateRefreshSchedule": "

Creates a refresh schedule for a dataset. You can create up to 5 different schedules for a single dataset.

", "CreateTemplate": "

Creates a template either from a TemplateDefinition or from an existing Amazon QuickSight analysis or template. You can use the resulting template to create additional dashboards, templates, or analyses.

A template is an entity in Amazon QuickSight that encapsulates the metadata required to create an analysis and that you can use to create s dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.

", "CreateTemplateAlias": "

Creates a template alias for a template.

", "CreateTheme": "

Creates a theme.

A theme is set of configuration options for color and layout. Themes apply to analyses and dashboards. For more information, see Using Themes in Amazon QuickSight in the Amazon QuickSight User Guide.

", @@ -25,6 +26,7 @@ "DeleteAnalysis": "

Deletes an analysis from Amazon QuickSight. You can optionally include a recovery window during which you can restore the analysis. If you don't specify a recovery window value, the operation defaults to 30 days. Amazon QuickSight attaches a DeletionTime stamp to the response that specifies the end of the recovery window. At the end of the recovery window, Amazon QuickSight deletes the analysis permanently.

At any time before recovery window ends, you can use the RestoreAnalysis API operation to remove the DeletionTime stamp and cancel the deletion of the analysis. The analysis remains visible in the API until it's deleted, so you can describe it but you can't make a template from it.

An analysis that's scheduled for deletion isn't accessible in the Amazon QuickSight console. To access it in the console, restore it. Deleting an analysis doesn't delete the dashboards that you publish from it.

", "DeleteDashboard": "

Deletes a dashboard.

", "DeleteDataSet": "

Deletes a dataset.

", + "DeleteDataSetRefreshProperties": "

Deletes the dataset refresh properties of the dataset.

", "DeleteDataSource": "

Deletes the data source permanently. This operation breaks all the datasets that reference the deleted data source.

", "DeleteFolder": "

Deletes an empty folder.

", "DeleteFolderMembership": "

Removes an asset, such as a dashboard, analysis, or dataset, from a folder.

", @@ -32,6 +34,7 @@ "DeleteGroupMembership": "

Removes a user from a group so that the user is no longer a member of the group.

", "DeleteIAMPolicyAssignment": "

Deletes an existing IAM policy assignment.

", "DeleteNamespace": "

Deletes a namespace and the users and groups that are associated with the namespace. This is an asynchronous process. Assets including dashboards, analyses, datasets and data sources are not deleted. To delete these assets, you use the API operations for the relevant asset.

", + "DeleteRefreshSchedule": "

Deletes a refresh schedule from a dataset.

", "DeleteTemplate": "

Deletes a template.

", "DeleteTemplateAlias": "

Deletes the item that the specified template alias points to. If you provide a specific alias, you delete the version of the template that the alias points to.

", "DeleteTheme": "

Deletes a theme.

", @@ -49,6 +52,7 @@ "DescribeDashboardPermissions": "

Describes read and write permissions for a dashboard.

", "DescribeDataSet": "

Describes a dataset. This operation doesn't support datasets that include uploaded files as a source.

", "DescribeDataSetPermissions": "

Describes the permissions on a dataset.

The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id.

", + "DescribeDataSetRefreshProperties": "

Describes the refresh properties of a dataset.

", "DescribeDataSource": "

Describes a data source.

", "DescribeDataSourcePermissions": "

Describes the resource permissions for a data source.

", "DescribeFolder": "

Describes a folder.

", @@ -60,6 +64,7 @@ "DescribeIngestion": "

Describes a SPICE ingestion.

", "DescribeIpRestriction": "

Provides a summary and status of IP rules.

", "DescribeNamespace": "

Describes the current namespace.

", + "DescribeRefreshSchedule": "

Provides a summary of a refresh schedule.

", "DescribeTemplate": "

Describes a template's metadata.

", "DescribeTemplateAlias": "

Describes the template alias for a template.

", "DescribeTemplateDefinition": "

Provides a detailed description of the definition of a template.

If you do not need to know details about the content of a template, for instance if you are trying to check the status of a recently created or updated template, use the DescribeTemplate instead.

", @@ -85,6 +90,7 @@ "ListIAMPolicyAssignmentsForUser": "

Lists all the IAM policy assignments, including the Amazon Resource Names (ARNs) for the IAM policies assigned to the specified user and group or groups that the user belongs to.

", "ListIngestions": "

Lists the history of SPICE ingestions for a dataset.

", "ListNamespaces": "

Lists the namespaces for the specified Amazon Web Services account. This operation doesn't list deleted namespaces.

", + "ListRefreshSchedules": "

Lists the refresh schedules of a dataset. Each dataset can have up to 5 schedules.

", "ListTagsForResource": "

Lists the tags assigned to a resource.

", "ListTemplateAliases": "

Lists all the aliases of a template.

", "ListTemplateVersions": "

Lists all the versions of the templates in the current Amazon QuickSight account.

", @@ -94,6 +100,7 @@ "ListThemes": "

Lists all the themes in the current Amazon Web Services account.

", "ListUserGroups": "

Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.

", "ListUsers": "

Returns a list of all of the Amazon QuickSight users belonging to this account.

", + "PutDataSetRefreshProperties": "

Creates or updates the dataset refresh properties for the dataset.

", "RegisterUser": "

Creates an Amazon QuickSight user whose identity is associated with the Identity and Access Management (IAM) identity or role specified in the request. When you register a new user from the Amazon QuickSight API, Amazon QuickSight generates a registration URL. The user accesses this registration URL to create their account. Amazon QuickSight doesn't send a registration email to users who are registered from the Amazon QuickSight API. If you want new users to receive a registration email, then add those users in the Amazon QuickSight console. For more information on registering a new user in the Amazon QuickSight console, see Inviting users to access Amazon QuickSight.

", "RestoreAnalysis": "

Restores an analysis.

", "SearchAnalyses": "

Searches for analyses that belong to the user specified in the filter.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

", @@ -121,6 +128,7 @@ "UpdateIAMPolicyAssignment": "

Updates an existing IAM policy assignment. This operation updates only the optional parameter or parameters that are specified in the request. This overwrites all of the users included in Identities.

", "UpdateIpRestriction": "

Updates the content and status of IP rules. To use this operation, you need to provide the entire map of rules. You can use the DescribeIpRestriction operation to get the current rule map.

", "UpdatePublicSharingSettings": "

Use the UpdatePublicSharingSettings operation to turn on or turn off the public sharing settings of an Amazon QuickSight dashboard.

To use this operation, turn on session capacity pricing for your Amazon QuickSight account.

Before you can turn on public sharing on your account, make sure to give public sharing permissions to an administrative user in the Identity and Access Management (IAM) console. For more information on using IAM with Amazon QuickSight, see Using Amazon QuickSight with IAM in the Amazon QuickSight User Guide.

", + "UpdateRefreshSchedule": "

Updates a refresh schedule for a dataset.

", "UpdateTemplate": "

Updates a template from an existing Amazon QuickSight analysis or another template.

", "UpdateTemplateAlias": "

Updates the template alias of a template.

", "UpdateTemplatePermissions": "

Updates the resource permissions for a template.

", @@ -438,6 +446,7 @@ "CreateIAMPolicyAssignmentResponse$PolicyArn": "

The ARN for the IAM policy that is applied to the Amazon QuickSight users and groups specified in this assignment.

", "CreateIngestionResponse$Arn": "

The Amazon Resource Name (ARN) for the data ingestion.

", "CreateNamespaceResponse$Arn": "

The ARN of the Amazon QuickSight namespace you created.

", + "CreateRefreshScheduleResponse$Arn": "

The Amazon Resource Name (ARN) for the refresh schedule.

", "CreateTemplateResponse$Arn": "

The ARN for the template.

", "CreateTemplateResponse$VersionArn": "

The ARN for the template, including the version information of the first version.

", "CreateThemeResponse$Arn": "

The Amazon Resource Name (ARN) for the theme.

", @@ -463,6 +472,7 @@ "DeleteDataSetResponse$Arn": "

The Amazon Resource Name (ARN) of the dataset.

", "DeleteDataSourceResponse$Arn": "

The Amazon Resource Name (ARN) of the data source that you deleted.

", "DeleteFolderResponse$Arn": "

The Amazon Resource Name of the deleted folder.

", + "DeleteRefreshScheduleResponse$Arn": "

The Amazon Resource Name (ARN) for the refresh schedule.

", "DeleteTemplateAliasResponse$Arn": "

The Amazon Resource Name (ARN) of the template you want to delete.

", "DeleteTemplateResponse$Arn": "

The Amazon Resource Name (ARN) of the resource.

", "DeleteThemeAliasResponse$Arn": "

The Amazon Resource Name (ARN) of the theme resource using the deleted alias.

", @@ -476,6 +486,7 @@ "DescribeDataSourcePermissionsResponse$DataSourceArn": "

The Amazon Resource Name (ARN) of the data source.

", "DescribeFolderPermissionsResponse$Arn": "

The Amazon Resource Name (ARN) for the folder.

", "DescribeFolderResolvedPermissionsResponse$Arn": "

The Amazon Resource Name (ARN) of the folder.

", + "DescribeRefreshScheduleResponse$Arn": "

The Amazon Resource Name (ARN) for the refresh schedule.

", "DescribeTemplateDefinitionResponse$ThemeArn": "

The ARN of the theme of the template.

", "DescribeTemplatePermissionsResponse$TemplateArn": "

The Amazon Resource Name (ARN) of the template.

", "DescribeThemePermissionsResponse$ThemeArn": "

The Amazon Resource Name (ARN) of the theme.

", @@ -494,6 +505,7 @@ "MemberIdArnPair$MemberArn": "

The Amazon Resource Name (ARN) of the member.

", "NamespaceInfoV2$Arn": "

The namespace ARN.

", "Path$member": null, + "RefreshSchedule$Arn": "

The Amazon Resource Name (ARN) for the refresh schedule.

", "RelationalTable$DataSourceArn": "

The Amazon Resource Name (ARN) for the data source.

", "RestoreAnalysisResponse$Arn": "

The Amazon Resource Name (ARN) of the analysis that you're restoring.

", "RowLevelPermissionDataSet$Arn": "

The Amazon Resource Name (ARN) of the dataset that contains permissions for RLS.

", @@ -531,6 +543,7 @@ "UpdateFolderResponse$Arn": "

The Amazon Resource Name (ARN) of the folder.

", "UpdateIAMPolicyAssignmentRequest$PolicyArn": "

The ARN for the IAM policy to apply to the Amazon QuickSight users and groups specified in this assignment.

", "UpdateIAMPolicyAssignmentResponse$PolicyArn": "

The ARN for the IAM policy applied to the Amazon QuickSight users and groups specified in this assignment.

", + "UpdateRefreshScheduleResponse$Arn": "

The Amazon Resource Name (ARN) for the refresh schedule.

", "UpdateTemplatePermissionsResponse$TemplateArn": "

The Amazon Resource Name (ARN) of the template.

", "UpdateTemplateResponse$Arn": "

The Amazon Resource Name (ARN) for the template.

", "UpdateTemplateResponse$VersionArn": "

The ARN for the template, including the version information of the first version.

", @@ -601,6 +614,7 @@ "CreateIAMPolicyAssignmentRequest$AwsAccountId": "

The ID of the Amazon Web Services account where you want to assign an IAM policy to Amazon QuickSight users or groups.

", "CreateIngestionRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "CreateNamespaceRequest$AwsAccountId": "

The ID for the Amazon Web Services account that you want to create the Amazon QuickSight namespace in.

", + "CreateRefreshScheduleRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "CreateTemplateAliasRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template that you creating an alias for.

", "CreateTemplateRequest$AwsAccountId": "

The ID for the Amazon Web Services account that the group is in. You use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.

", "CreateThemeAliasRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the theme for the new theme alias.

", @@ -609,6 +623,7 @@ "DeleteAccountSubscriptionRequest$AwsAccountId": "

The Amazon Web Services account ID of the account that you want to delete.

", "DeleteAnalysisRequest$AwsAccountId": "

The ID of the Amazon Web Services account where you want to delete an analysis.

", "DeleteDashboardRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the dashboard that you're deleting.

", + "DeleteDataSetRefreshPropertiesRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DeleteDataSetRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DeleteDataSourceRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DeleteFolderMembershipRequest$AwsAccountId": "

The ID for the Amazon Web Services account that contains the folder.

", @@ -617,6 +632,7 @@ "DeleteGroupRequest$AwsAccountId": "

The ID for the Amazon Web Services account that the group is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.

", "DeleteIAMPolicyAssignmentRequest$AwsAccountId": "

The Amazon Web Services account ID where you want to delete the IAM policy assignment.

", "DeleteNamespaceRequest$AwsAccountId": "

The ID for the Amazon Web Services account that you want to delete the Amazon QuickSight namespace from.

", + "DeleteRefreshScheduleRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DeleteTemplateAliasRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the item to delete.

", "DeleteTemplateRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template that you're deleting.

", "DeleteThemeAliasRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the theme alias to delete.

", @@ -634,6 +650,7 @@ "DescribeDashboardPermissionsRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the dashboard that you're describing permissions for.

", "DescribeDashboardRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the dashboard that you're describing.

", "DescribeDataSetPermissionsRequest$AwsAccountId": "

The Amazon Web Services account ID.

", + "DescribeDataSetRefreshPropertiesRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DescribeDataSetRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DescribeDataSourcePermissionsRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DescribeDataSourceRequest$AwsAccountId": "

The Amazon Web Services account ID.

", @@ -647,6 +664,7 @@ "DescribeIpRestrictionRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the IP rules.

", "DescribeIpRestrictionResponse$AwsAccountId": "

The ID of the Amazon Web Services account that contains the IP rules.

", "DescribeNamespaceRequest$AwsAccountId": "

The ID for the Amazon Web Services account that contains the Amazon QuickSight namespace that you want to describe.

", + "DescribeRefreshScheduleRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "DescribeTemplateAliasRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template alias that you're describing.

", "DescribeTemplateDefinitionRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template. You must be using the Amazon Web Services account that the template is in.

", "DescribeTemplatePermissionsRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template that you're describing.

", @@ -672,6 +690,7 @@ "ListIAMPolicyAssignmentsRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains these IAM policy assignments.

", "ListIngestionsRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "ListNamespacesRequest$AwsAccountId": "

The ID for the Amazon Web Services account that contains the Amazon QuickSight namespaces that you want to list.

", + "ListRefreshSchedulesRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "ListTemplateAliasesRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template aliases that you're listing.

", "ListTemplateVersionsRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the templates that you're listing.

", "ListTemplatesRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the templates that you're listing.

", @@ -680,6 +699,7 @@ "ListThemesRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the themes that you're listing.

", "ListUserGroupsRequest$AwsAccountId": "

The Amazon Web Services account ID that the user is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.

", "ListUsersRequest$AwsAccountId": "

The ID for the Amazon Web Services account that the user is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.

", + "PutDataSetRefreshPropertiesRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "RegisterUserRequest$AwsAccountId": "

The ID for the Amazon Web Services account that the user is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.

", "RestoreAnalysisRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the analysis.

", "SearchAnalysesRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the analyses that you're searching for.

", @@ -707,6 +727,7 @@ "UpdateIpRestrictionRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the IP rules.

", "UpdateIpRestrictionResponse$AwsAccountId": "

The ID of the Amazon Web Services account that contains the IP rules.

", "UpdatePublicSharingSettingsRequest$AwsAccountId": "

The Amazon Web Services account ID associated with your Amazon QuickSight subscription.

", + "UpdateRefreshScheduleRequest$AwsAccountId": "

The Amazon Web Services account ID.

", "UpdateTemplateAliasRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template alias that you're updating.

", "UpdateTemplatePermissionsRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template.

", "UpdateTemplateRequest$AwsAccountId": "

The ID of the Amazon Web Services account that contains the template that you're updating.

", @@ -1798,6 +1819,16 @@ "refs": { } }, + "CreateRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "CreateRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "CreateTemplateAliasRequest": { "base": null, "refs": { @@ -2321,6 +2352,13 @@ "TemplateSourceAnalysis$DataSetReferences": "

A structure containing information about the dataset references used as placeholders in the template.

" } }, + "DataSetRefreshProperties": { + "base": "

The refresh properties of a dataset.

", + "refs": { + "DescribeDataSetRefreshPropertiesResponse$DataSetRefreshProperties": "

The dataset refresh properties.

", + "PutDataSetRefreshPropertiesRequest$DataSetRefreshProperties": "

The dataset refresh properties.

" + } + }, "DataSetSchema": { "base": "

Dataset schema.

", "refs": { @@ -2559,6 +2597,18 @@ "DateTimeParameterDeclaration$ValueWhenUnset": "

The configuration that defines the default value of a DateTime parameter when a value has not been set.

" } }, + "DayOfMonth": { + "base": null, + "refs": { + "ScheduleRefreshOnEntity$DayOfMonth": "

The day of the month that you want to schedule refresh on.

" + } + }, + "DayOfWeek": { + "base": null, + "refs": { + "ScheduleRefreshOnEntity$DayOfWeek": "

The day of the week that you want to schedule a refresh on.

" + } + }, "DecimalDefaultValueList": { "base": null, "refs": { @@ -2686,6 +2736,16 @@ "refs": { } }, + "DeleteDataSetRefreshPropertiesRequest": { + "base": null, + "refs": { + } + }, + "DeleteDataSetRefreshPropertiesResponse": { + "base": null, + "refs": { + } + }, "DeleteDataSetRequest": { "base": null, "refs": { @@ -2766,6 +2826,16 @@ "refs": { } }, + "DeleteRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "DeleteRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "DeleteTemplateAliasRequest": { "base": null, "refs": { @@ -2932,6 +3002,16 @@ "refs": { } }, + "DescribeDataSetRefreshPropertiesRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSetRefreshPropertiesResponse": { + "base": null, + "refs": { + } + }, "DescribeDataSetRequest": { "base": null, "refs": { @@ -3052,6 +3132,16 @@ "refs": { } }, + "DescribeRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "DescribeRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "DescribeTemplateAliasRequest": { "base": null, "refs": { @@ -4665,6 +4755,12 @@ "refs": { } }, + "IncrementalRefresh": { + "base": "

The incremental refresh configuration for a dataset.

", + "refs": { + "RefreshConfiguration$IncrementalRefresh": "

The incremental refresh for the dataset.

" + } + }, "Ingestion": { "base": "

Information about the SPICE ingestion for a dataset.

", "refs": { @@ -4717,7 +4813,8 @@ "IngestionType": { "base": "This defines the type of ingestion user wants to trigger. This is part of create ingestion request.", "refs": { - "CreateIngestionRequest$IngestionType": "

The type of ingestion that you want to create.

" + "CreateIngestionRequest$IngestionType": "

The type of ingestion that you want to create.

", + "RefreshSchedule$RefreshType": "

The type of refresh that a datset undergoes. Valid values are as follows:

For more information on full and incremental refreshes, see Refreshing SPICE data in the Amazon QuickSight User Guide.

" } }, "Ingestions": { @@ -5282,6 +5379,16 @@ "refs": { } }, + "ListRefreshSchedulesRequest": { + "base": null, + "refs": { + } + }, + "ListRefreshSchedulesResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -5456,6 +5563,18 @@ "GeospatialCoordinateBounds$East": "

The longitude of the east bound of the geospatial coordinate bounds.

" } }, + "LookbackWindow": { + "base": "

The lookback window setup of an incremental refresh configuration.

", + "refs": { + "IncrementalRefresh$LookbackWindow": "

The lookback window setup for an incremental refresh configuration.

" + } + }, + "LookbackWindowSizeUnit": { + "base": null, + "refs": { + "LookbackWindow$SizeUnit": "

The size unit that is used for the lookback window column. Valid values for this structure are HOUR, DAY, and WEEK.

" + } + }, "ManifestFileLocation": { "base": "

Amazon S3 manifest file location.

", "refs": { @@ -6409,6 +6528,12 @@ "UploadSettings$StartFromRow": "

A row number to start reading data from.

" } }, + "PositiveLong": { + "base": null, + "refs": { + "LookbackWindow$Size": "

The lookback window column size.

" + } + }, "PostgreSqlParameters": { "base": "

The parameters for PostgreSQL.

", "refs": { @@ -6490,6 +6615,16 @@ "ProjectOperation$ProjectedColumns": "

Projected columns.

" } }, + "PutDataSetRefreshPropertiesRequest": { + "base": null, + "refs": { + } + }, + "PutDataSetRefreshPropertiesResponse": { + "base": null, + "refs": { + } + }, "Query": { "base": null, "refs": { @@ -6684,6 +6819,39 @@ "ReferenceLineValueLabelConfiguration$RelativePosition": "

The relative position of the value label. Choose one of the following options:

" } }, + "RefreshConfiguration": { + "base": "

The refresh configuration of a dataset.

", + "refs": { + "DataSetRefreshProperties$RefreshConfiguration": "

The refresh configuration for a dataset.

" + } + }, + "RefreshFrequency": { + "base": "

Specifies the interval between each scheduled refresh of a dataset.

", + "refs": { + "RefreshSchedule$ScheduleFrequency": "

The frequency for the refresh schedule.

" + } + }, + "RefreshInterval": { + "base": null, + "refs": { + "RefreshFrequency$Interval": "

The interval between scheduled refreshes. Valid values are as follows:

" + } + }, + "RefreshSchedule": { + "base": "

The refresh schedule of a dataset.

", + "refs": { + "CreateRefreshScheduleRequest$Schedule": "

The refresh schedule.

", + "DescribeRefreshScheduleResponse$RefreshSchedule": "

The refresh schedule.

", + "RefreshSchedules$member": "

A list of RefreshSchedule objects.

", + "UpdateRefreshScheduleRequest$Schedule": "

The refresh schedule.

" + } + }, + "RefreshSchedules": { + "base": null, + "refs": { + "ListRefreshSchedulesResponse$RefreshSchedules": "

The list of refresh schedules for the dataset.

" + } + }, "RegisterUserRequest": { "base": null, "refs": { @@ -6809,20 +6977,27 @@ "CreateDataSetResponse$IngestionId": "

The ID of the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE.

", "CreateDataSourceRequest$DataSourceId": "

An ID for the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "CreateDataSourceResponse$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "CreateRefreshScheduleRequest$DataSetId": "

The ID of the dataset.

", "DataSet$DataSetId": "

The ID of the dataset.

", "DataSetSummary$DataSetId": "

The ID of the dataset.

", "DataSource$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DataSourceSummary$DataSourceId": "

The unique ID of the data source.

", + "DeleteDataSetRefreshPropertiesRequest$DataSetId": "

The ID of the dataset.

", "DeleteDataSetRequest$DataSetId": "

The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DeleteDataSetResponse$DataSetId": "

The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DeleteDataSourceRequest$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DeleteDataSourceResponse$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "DeleteRefreshScheduleRequest$DataSetId": "

The ID of the dataset.

", "DescribeDataSetPermissionsRequest$DataSetId": "

The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DescribeDataSetPermissionsResponse$DataSetId": "

The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "DescribeDataSetRefreshPropertiesRequest$DataSetId": "

The ID of the dataset.

", "DescribeDataSetRequest$DataSetId": "

The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DescribeDataSourcePermissionsRequest$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DescribeDataSourcePermissionsResponse$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "DescribeDataSourceRequest$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "DescribeRefreshScheduleRequest$DataSetId": "

The ID of the dataset.

", + "ListRefreshSchedulesRequest$DataSetId": "

The ID of the dataset.

", + "PutDataSetRefreshPropertiesRequest$DataSetId": "

The ID of the dataset.

", "UpdateDataSetPermissionsRequest$DataSetId": "

The ID for the dataset whose permissions you want to update. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "UpdateDataSetPermissionsResponse$DataSetId": "

The ID for the dataset whose permissions you want to update. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "UpdateDataSetRequest$DataSetId": "

The ID for the dataset that you want to update. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", @@ -6831,7 +7006,8 @@ "UpdateDataSourcePermissionsRequest$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "UpdateDataSourcePermissionsResponse$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", "UpdateDataSourceRequest$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", - "UpdateDataSourceResponse$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

" + "UpdateDataSourceResponse$DataSourceId": "

The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.

", + "UpdateRefreshScheduleRequest$DataSetId": "

The ID of the dataset.

" } }, "ResourceName": { @@ -7051,6 +7227,18 @@ "RowLevelPermissionTagRuleList$member": null } }, + "RowLevelPermissionTagRuleConfiguration": { + "base": null, + "refs": { + "RowLevelPermissionTagRuleConfigurationList$member": null + } + }, + "RowLevelPermissionTagRuleConfigurationList": { + "base": null, + "refs": { + "RowLevelPermissionTagConfiguration$TagRuleConfigurations": "

A list of tag configuration rules to apply to a dataset. All tag configurations have the OR condition. Tags within each tile will be joined (AND). At least one rule in this structure must have all tag values assigned to it to apply Row-level security (RLS) to the dataset.

" + } + }, "RowLevelPermissionTagRuleList": { "base": null, "refs": { @@ -7153,6 +7341,12 @@ "Visual$ScatterPlotVisual": "

A scatter plot.

For more information, see Using scatter plots in the Amazon QuickSight User Guide.

" } }, + "ScheduleRefreshOnEntity": { + "base": "

The refresh on entity for weekly or monthly schedules.

", + "refs": { + "RefreshFrequency$RefreshOnDay": "

The day of the week that you want to schedule the refresh on. This value is required for weekly and monthly refresh intervals.

" + } + }, "ScrollBarOptions": { "base": "

The visual display options for a data zoom scroll bar.

", "refs": { @@ -7434,6 +7628,7 @@ "base": null, "refs": { "RowLevelPermissionTagRule$TagKey": "

The unique key for a tag.

", + "RowLevelPermissionTagRuleConfiguration$member": null, "SessionTag$Key": "

The key for the tag.

" } }, @@ -7961,6 +8156,7 @@ "CreateIAMPolicyAssignmentResponse$Status": "

The HTTP status of the request.

", "CreateIngestionResponse$Status": "

The HTTP status of the request.

", "CreateNamespaceResponse$Status": "

The HTTP status of the request.

", + "CreateRefreshScheduleResponse$Status": "

The HTTP status of the request.

", "CreateTemplateAliasResponse$Status": "

The HTTP status of the request.

", "CreateTemplateResponse$Status": "

The HTTP status of the request.

", "CreateThemeAliasResponse$Status": "

The HTTP status of the request.

", @@ -7969,6 +8165,7 @@ "DeleteAccountSubscriptionResponse$Status": "

The HTTP status of the request.

", "DeleteAnalysisResponse$Status": "

The HTTP status of the request.

", "DeleteDashboardResponse$Status": "

The HTTP status of the request.

", + "DeleteDataSetRefreshPropertiesResponse$Status": "

The HTTP status of the request.

", "DeleteDataSetResponse$Status": "

The HTTP status of the request.

", "DeleteDataSourceResponse$Status": "

The HTTP status of the request.

", "DeleteFolderMembershipResponse$Status": "

The HTTP status of the request.

", @@ -7977,6 +8174,7 @@ "DeleteGroupResponse$Status": "

The HTTP status of the request.

", "DeleteIAMPolicyAssignmentResponse$Status": "

The HTTP status of the request.

", "DeleteNamespaceResponse$Status": "

The HTTP status of the request.

", + "DeleteRefreshScheduleResponse$Status": "

The HTTP status of the request.

", "DeleteTemplateAliasResponse$Status": "

The HTTP status of the request.

", "DeleteTemplateResponse$Status": "

The HTTP status of the request.

", "DeleteThemeAliasResponse$Status": "

The HTTP status of the request.

", @@ -7993,6 +8191,7 @@ "DescribeDashboardPermissionsResponse$Status": "

The HTTP status of the request.

", "DescribeDashboardResponse$Status": "

The HTTP status of this request.

", "DescribeDataSetPermissionsResponse$Status": "

The HTTP status of the request.

", + "DescribeDataSetRefreshPropertiesResponse$Status": "

The HTTP status of the request.

", "DescribeDataSetResponse$Status": "

The HTTP status of the request.

", "DescribeDataSourcePermissionsResponse$Status": "

The HTTP status of the request.

", "DescribeDataSourceResponse$Status": "

The HTTP status of the request.

", @@ -8005,6 +8204,7 @@ "DescribeIngestionResponse$Status": "

The HTTP status of the request.

", "DescribeIpRestrictionResponse$Status": "

The HTTP status of the request.

", "DescribeNamespaceResponse$Status": "

The HTTP status of the request.

", + "DescribeRefreshScheduleResponse$Status": "

The HTTP status of the request.

", "DescribeTemplateAliasResponse$Status": "

The HTTP status of the request.

", "DescribeTemplateDefinitionResponse$Status": "

The HTTP status of the request.

", "DescribeTemplatePermissionsResponse$Status": "

The HTTP status of the request.

", @@ -8030,6 +8230,7 @@ "ListIAMPolicyAssignmentsResponse$Status": "

The HTTP status of the request.

", "ListIngestionsResponse$Status": "

The HTTP status of the request.

", "ListNamespacesResponse$Status": "

The HTTP status of the request.

", + "ListRefreshSchedulesResponse$Status": "

The HTTP status of the request.

", "ListTagsForResourceResponse$Status": "

The HTTP status of the request.

", "ListTemplateAliasesResponse$Status": "

The HTTP status of the request.

", "ListTemplateVersionsResponse$Status": "

The HTTP status of the request.

", @@ -8039,6 +8240,7 @@ "ListThemesResponse$Status": "

The HTTP status of the request.

", "ListUserGroupsResponse$Status": "

The HTTP status of the request.

", "ListUsersResponse$Status": "

The HTTP status of the request.

", + "PutDataSetRefreshPropertiesResponse$Status": "

The HTTP status of the request.

", "RegisterUserResponse$Status": "

The HTTP status of the request.

", "RestoreAnalysisResponse$Status": "

The HTTP status of the request.

", "SearchAnalysesResponse$Status": "

The HTTP status of the request.

", @@ -8066,6 +8268,7 @@ "UpdateIAMPolicyAssignmentResponse$Status": "

The HTTP status of the request.

", "UpdateIpRestrictionResponse$Status": "

The HTTP status of the request.

", "UpdatePublicSharingSettingsResponse$Status": "

The HTTP status of the request.

", + "UpdateRefreshScheduleResponse$Status": "

The HTTP status of the request.

", "UpdateTemplateAliasResponse$Status": "

The HTTP status of the request.

", "UpdateTemplatePermissionsResponse$Status": "

The HTTP status of the request.

", "UpdateTemplateResponse$Status": "

The HTTP status of the request.

", @@ -8128,6 +8331,8 @@ "CreateIngestionResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "CreateNamespaceResponse$CapacityRegion": "

The Amazon Web Services Region; that you want to use for the free SPICE capacity for the new namespace. This is set to the region that you run CreateNamespace in.

", "CreateNamespaceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "CreateRefreshScheduleResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "CreateRefreshScheduleResponse$ScheduleId": "

The ID of the refresh schedule.

", "CreateTemplateAliasResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "CreateTemplateResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "CreateThemeAliasResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -8141,6 +8346,7 @@ "DeleteAccountSubscriptionResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteAnalysisResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteDashboardResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DeleteDataSetRefreshPropertiesResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteDataSetResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteDataSourceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteFolderMembershipResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -8149,6 +8355,9 @@ "DeleteGroupResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteIAMPolicyAssignmentResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteNamespaceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DeleteRefreshScheduleRequest$ScheduleId": "

The ID of the refresh schedule.

", + "DeleteRefreshScheduleResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DeleteRefreshScheduleResponse$ScheduleId": "

The ID of the refresh schedule.

", "DeleteTemplateAliasResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteTemplateResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DeleteThemeAliasResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -8166,6 +8375,7 @@ "DescribeDashboardPermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDashboardResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDataSetPermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DescribeDataSetRefreshPropertiesResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDataSetResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDataSourcePermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDataSourceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -8179,6 +8389,8 @@ "DescribeIngestionResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeIpRestrictionResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeNamespaceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DescribeRefreshScheduleRequest$ScheduleId": "

The ID of the refresh schedule.

", + "DescribeRefreshScheduleResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeTemplateAliasResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeTemplateDefinitionResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeTemplatePermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -8259,6 +8471,7 @@ "ListNamespacesRequest$NextToken": "

A unique pagination token that can be used in a subsequent request. You will receive a pagination token in the response body of a previous ListNameSpaces API call if there is more data that can be returned. To receive the data, make another ListNamespaces API call with the returned token to retrieve the next page of data. Each token is valid for 24 hours. If you try to make a ListNamespaces API call with an expired token, you will receive a HTTP 400 InvalidNextTokenException error.

", "ListNamespacesResponse$NextToken": "

A unique pagination token that can be used in a subsequent request. Receiving NextToken in your response inticates that there is more data that can be returned. To receive the data, make another ListNamespaces API call with the returned token to retrieve the next page of data. Each token is valid for 24 hours. If you try to make a ListNamespaces API call with an expired token, you will receive a HTTP 400 InvalidNextTokenException error.

", "ListNamespacesResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "ListRefreshSchedulesResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "ListTagsForResourceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "ListTemplateAliasesRequest$NextToken": "

The token for the next set of results, or null if there are no more results.

", "ListTemplateAliasesResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -8284,6 +8497,7 @@ "ListUsersRequest$NextToken": "

A pagination token that can be used in a subsequent request.

", "ListUsersResponse$NextToken": "

A pagination token that can be used in a subsequent request.

", "ListUsersResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "LookbackWindow$ColumnName": "

The name of the lookback window column.

", "MaximumMinimumComputation$Name": "

The name of a computation.

", "MetricComparisonComputation$Name": "

The name of a computation.

", "NamespaceError$Message": "

The message for the error.

", @@ -8296,10 +8510,14 @@ "PreconditionNotMetException$RequestId": "

The Amazon Web Services request ID for this request.

", "PrincipalList$member": null, "ProjectedColumnList$member": null, + "PutDataSetRefreshPropertiesResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "QueueInfo$WaitingOnIngestion": "

The ID of the queued ingestion.

", "QueueInfo$QueuedIngestion": "

The ID of the ongoing ingestion. The queued ingestion is waiting for the ongoing ingestion to complete.

", "QuickSightUserNotFoundException$Message": null, "QuickSightUserNotFoundException$RequestId": "

The Amazon Web Services request ID for this request.

", + "RefreshFrequency$Timezone": "

The timezone that you want the refresh schedule to use. The timezone ID must match a corresponding ID found on java.util.time.getAvailableIDs().

", + "RefreshFrequency$TimeOfTheDay": "

The time of day that you want the datset to refresh. This value is expressed in HH:MM format. This field is not required for schedules that refresh hourly.

", + "RefreshSchedule$ScheduleId": "

An identifier for the refresh schedule.

", "RegisterUserRequest$Email": "

The email address of the user that you want to register.

", "RegisterUserRequest$IamArn": "

The ARN of the IAM user or role that you are registering with Amazon QuickSight.

", "RegisterUserRequest$ExternalLoginFederationProviderType": "

The type of supported external login provider that provides identity to let a user federate into Amazon QuickSight with an associated Identity and Access Management(IAM) role. The type of supported external login provider can be one of the following.

", @@ -8373,6 +8591,8 @@ "UpdateIAMPolicyAssignmentResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "UpdateIpRestrictionResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "UpdatePublicSharingSettingsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "UpdateRefreshScheduleResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "UpdateRefreshScheduleResponse$ScheduleId": "

The ID of the refresh schedule.

", "UpdateTemplateAliasResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "UpdateTemplatePermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "UpdateTemplateResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -9116,6 +9336,7 @@ "FolderSummary$CreatedTime": "

The time that the folder was created.

", "FolderSummary$LastUpdatedTime": "

The time that the folder was last updated.

", "Ingestion$CreatedTime": "

The time that this ingestion started.

", + "RefreshSchedule$StartAfterDateTime": "

Time after which the refresh schedule can be started, expressed in YYYY-MM-DDTHH:MM:SS format.

", "Template$LastUpdatedTime": "

Time when this was last updated.

", "Template$CreatedTime": "

Time when this was created.

", "TemplateSummary$CreatedTime": "

The last time that this template was created.

", @@ -9560,6 +9781,16 @@ "refs": { } }, + "UpdateRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "UpdateRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "UpdateResourcePermissionList": { "base": null, "refs": { diff --git a/models/apis/quicksight/2018-04-01/endpoint-tests-1.json b/models/apis/quicksight/2018-04-01/endpoint-tests-1.json index 1a754c4abba..726ca1da133 100644 --- a/models/apis/quicksight/2018-04-01/endpoint-tests-1.json +++ b/models/apis/quicksight/2018-04-01/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "api", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "api", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -347,8 +347,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -360,8 +371,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -373,8 +395,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -386,8 +419,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -399,8 +443,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -412,8 +456,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -425,8 +469,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -437,8 +481,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -449,10 +493,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/redshift-data/2019-12-20/docs-2.json b/models/apis/redshift-data/2019-12-20/docs-2.json index 0d87e3173c7..288ef238ecf 100644 --- a/models/apis/redshift-data/2019-12-20/docs-2.json +++ b/models/apis/redshift-data/2019-12-20/docs-2.json @@ -2,16 +2,16 @@ "version": "2.0", "service": "

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run SQL statements, which are committed if the statement succeeds.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", "operations": { - "BatchExecuteStatement": "

Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "BatchExecuteStatement": "

Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", "CancelStatement": "

Cancels a running query. To be canceled, a query must be running.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", "DescribeStatement": "

Describes the details about a specific instance when a query was run by the Amazon Redshift Data API. The information includes when the query started, when it finished, the query status, the number of rows returned, and the SQL statement.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", - "DescribeTable": "

Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", - "ExecuteStatement": "

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "DescribeTable": "

Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ExecuteStatement": "

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", "GetStatementResult": "

Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", - "ListDatabases": "

List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", - "ListSchemas": "

Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ListDatabases": "

List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ListSchemas": "

Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", "ListStatements": "

List of SQL statements. By default, only finished statements are shown. A token is returned to page through the statement list.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", - "ListTables": "

List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

" + "ListTables": "

List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

" }, "shapes": { "ActiveStatementsExceededException": { @@ -408,7 +408,7 @@ "BatchExecuteStatementException$Message": null, "BatchExecuteStatementException$StatementId": "

Statement identifier of the exception.

", "BatchExecuteStatementInput$Database": "

The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

", - "BatchExecuteStatementInput$DbUser": "

The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

", + "BatchExecuteStatementInput$DbUser": "

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

", "BatchExecuteStatementOutput$Database": "

The name of the database.

", "BatchExecuteStatementOutput$DbUser": "

The database user name.

", "ColumnMetadata$columnDefault": "

The default value of the column.

", @@ -425,7 +425,7 @@ "DescribeStatementResponse$Error": "

The error message from the cluster if the SQL statement encountered an error while running.

", "DescribeTableRequest$ConnectedDatabase": "

A database name. The connected database is specified when you connect with your authentication credentials.

", "DescribeTableRequest$Database": "

The name of the database that contains the tables to be described. If ConnectedDatabase is not specified, this is also the database to connect to with your authentication credentials.

", - "DescribeTableRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

", + "DescribeTableRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

", "DescribeTableRequest$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "DescribeTableRequest$Schema": "

The schema that contains the table. If no schema is specified, then matching tables for all schemas are returned.

", "DescribeTableRequest$Table": "

The table name. If no table is specified, then all tables for all matching schemas are returned. If no table and no schema is specified, then all tables for all schemas in the database are returned

", @@ -434,7 +434,7 @@ "ExecuteStatementException$Message": "

The exception message.

", "ExecuteStatementException$StatementId": "

Statement identifier of the exception.

", "ExecuteStatementInput$Database": "

The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

", - "ExecuteStatementInput$DbUser": "

The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

", + "ExecuteStatementInput$DbUser": "

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

", "ExecuteStatementOutput$Database": "

The name of the database.

", "ExecuteStatementOutput$DbUser": "

The database user name.

", "Field$stringValue": "

A value of the string data type.

", @@ -442,12 +442,12 @@ "GetStatementResultResponse$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "InternalServerException$Message": "

The exception message.

", "ListDatabasesRequest$Database": "

The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

", - "ListDatabasesRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

", + "ListDatabasesRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

", "ListDatabasesRequest$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "ListDatabasesResponse$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "ListSchemasRequest$ConnectedDatabase": "

A database name. The connected database is specified when you connect with your authentication credentials.

", "ListSchemasRequest$Database": "

The name of the database that contains the schemas to list. If ConnectedDatabase is not specified, this is also the database to connect to with your authentication credentials.

", - "ListSchemasRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

", + "ListSchemasRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

", "ListSchemasRequest$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "ListSchemasRequest$SchemaPattern": "

A pattern to filter results by schema name. Within a schema pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only schema name entries matching the search pattern are returned.

", "ListSchemasResponse$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", @@ -455,7 +455,7 @@ "ListStatementsResponse$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "ListTablesRequest$ConnectedDatabase": "

A database name. The connected database is specified when you connect with your authentication credentials.

", "ListTablesRequest$Database": "

The name of the database that contains the tables to list. If ConnectedDatabase is not specified, this is also the database to connect to with your authentication credentials.

", - "ListTablesRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

", + "ListTablesRequest$DbUser": "

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

", "ListTablesRequest$NextToken": "

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

", "ListTablesRequest$SchemaPattern": "

A pattern to filter results by schema name. Within a schema pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only schema name entries matching the search pattern are returned. If SchemaPattern is not specified, then all tables that match TablePattern are returned. If neither SchemaPattern or TablePattern are specified, then all tables are returned.

", "ListTablesRequest$TablePattern": "

A pattern to filter results by table name. Within a table pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only table name entries matching the search pattern are returned. If TablePattern is not specified, then all tables that match SchemaPatternare returned. If neither SchemaPattern or TablePattern are specified, then all tables are returned.

", diff --git a/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json b/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json index 58c14859d3e..3f2eaaca3aa 100644 --- a/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json +++ b/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -152,8 +152,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -165,8 +176,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -178,8 +200,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -191,8 +224,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -204,8 +248,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +261,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -230,8 +274,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -242,8 +286,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -254,10 +298,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/servicecatalog/2015-12-10/docs-2.json b/models/apis/servicecatalog/2015-12-10/docs-2.json index fa40b05b461..87a743adae0 100644 --- a/models/apis/servicecatalog/2015-12-10/docs-2.json +++ b/models/apis/servicecatalog/2015-12-10/docs-2.json @@ -2415,7 +2415,7 @@ "base": null, "refs": { "ProvisioningArtifactDetail$Type": "

The type of provisioning artifact.

", - "ProvisioningArtifactProperties$Type": "

The type of provisioning artifact.

" + "ProvisioningArtifactProperties$Type": "

The type of provisioning artifact.

" } }, "ProvisioningArtifactView": { diff --git a/service/docdb/api.go b/service/docdb/api.go index 81c23375d2a..b4fbe39075c 100644 --- a/service/docdb/api.go +++ b/service/docdb/api.go @@ -12444,8 +12444,8 @@ type GlobalCluster struct { GlobalClusterMembers []*GlobalClusterMember `locationNameList:"GlobalClusterMember" type:"list"` // The Amazon Web Services Region-unique, immutable identifier for the global - // database cluster. This identifier is found in AWS CloudTrail log entries - // whenever the AWS KMS customer master key (CMK) for the cluster is accessed. + // database cluster. This identifier is found in CloudTrail log entries whenever + // the KMS customer master key (CMK) for the cluster is accessed. GlobalClusterResourceId *string `type:"string"` // Specifies the current state of this global cluster. @@ -14781,6 +14781,17 @@ type RestoreDBClusterFromSnapshotInput struct { // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` + // The name of the DB cluster parameter group to associate with this DB cluster. + // + // Type: String. Required: No. + // + // If this argument is omitted, the default DB cluster parameter group is used. + // If supplied, must match the name of an existing default DB cluster parameter + // group. The string must consist of from 1 to 255 letters, numbers or hyphens. + // Its first character must be a letter, and it cannot end with a hyphen or + // contain two consecutive hyphens. + DBClusterParameterGroupName *string `type:"string"` + // The name of the subnet group to use for the new cluster. // // Constraints: If provided, must match the name of an existing DBSubnetGroup. @@ -14905,6 +14916,12 @@ func (s *RestoreDBClusterFromSnapshotInput) SetDBClusterIdentifier(v string) *Re return s } +// SetDBClusterParameterGroupName sets the DBClusterParameterGroupName field's value. +func (s *RestoreDBClusterFromSnapshotInput) SetDBClusterParameterGroupName(v string) *RestoreDBClusterFromSnapshotInput { + s.DBClusterParameterGroupName = &v + return s +} + // SetDBSubnetGroupName sets the DBSubnetGroupName field's value. func (s *RestoreDBClusterFromSnapshotInput) SetDBSubnetGroupName(v string) *RestoreDBClusterFromSnapshotInput { s.DBSubnetGroupName = &v @@ -15088,6 +15105,9 @@ type RestoreDBClusterToPointInTimeInput struct { // * copy-on-write - The new DB cluster is restored as a clone of the source // DB cluster. // + // Constraints: You can't specify copy-on-write if the engine version of the + // source DB cluster is earlier than 1.11. + // // If you don't specify a RestoreType value, then the new DB cluster is restored // as a full copy of the source DB cluster. RestoreType *string `type:"string"` diff --git a/service/fsx/api.go b/service/fsx/api.go index 278da89ff02..067ab6926af 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -552,7 +552,7 @@ func (c *FSx) CreateDataRepositoryAssociationRequest(input *CreateDataRepository // repository association is a link between a directory on the file system and // an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository // associations on a file system. Data repository associations are supported -// only for file systems with the Persistent_2 deployment type. +// for all file systems except for Scratch_1 deployment type. // // Each data repository association must have a unique Amazon FSx file system // directory and a unique S3 bucket or prefix associated with it. You can configure @@ -1705,8 +1705,8 @@ func (c *FSx) DeleteDataRepositoryAssociationRequest(input *DeleteDataRepository // Deleting the data repository association unlinks the file system from the // Amazon S3 bucket. When deleting a data repository association, you have the // option of deleting the data in the file system that corresponds to the data -// repository association. Data repository associations are supported only for -// file systems with the Persistent_2 deployment type. +// repository association. Data repository associations are supported for all +// file systems except for Scratch_1 deployment type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2475,8 +2475,8 @@ func (c *FSx) DescribeDataRepositoryAssociationsRequest(input *DescribeDataRepos // Returns the description of specific Amazon FSx for Lustre or Amazon File // Cache data repository associations, if one or more AssociationIds values // are provided in the request, or if filters are used in the request. Data -// repository associations are supported only for Amazon FSx for Lustre file -// systems with the Persistent_2 deployment type and for Amazon File Cache resources. +// repository associations are supported on Amazon File Cache resources and +// all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types. // // You can use filters to narrow the response to include just data repository // associations for specific file systems (use the file-system-id filter with @@ -4349,7 +4349,7 @@ func (c *FSx) UpdateDataRepositoryAssociationRequest(input *UpdateDataRepository // // Updates the configuration of an existing data repository association on an // Amazon FSx for Lustre file system. Data repository associations are supported -// only for file systems with the Persistent_2 deployment type. +// for all file systems except for Scratch_1 deployment type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4547,7 +4547,7 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // Use this operation to update the configuration of an existing Amazon FSx // file system. You can update multiple properties in a single request. // -// For Amazon FSx for Windows File Server file systems, you can update the following +// For FSx for Windows File Server file systems, you can update the following // properties: // // - AuditLogConfiguration @@ -4564,7 +4564,7 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // - WeeklyMaintenanceStartTime // -// For Amazon FSx for Lustre file systems, you can update the following properties: +// For FSx for Lustre file systems, you can update the following properties: // // - AutoImportPolicy // @@ -4580,8 +4580,9 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // - WeeklyMaintenanceStartTime // -// For Amazon FSx for NetApp ONTAP file systems, you can update the following -// properties: +// For FSx for ONTAP file systems, you can update the following properties: +// +// - AddRouteTableIds // // - AutomaticBackupRetentionDays // @@ -4591,14 +4592,15 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // - FsxAdminPassword // +// - RemoveRouteTableIds +// // - StorageCapacity // // - ThroughputCapacity // // - WeeklyMaintenanceStartTime // -// For the Amazon FSx for OpenZFS file systems, you can update the following -// properties: +// For FSx for OpenZFS file systems, you can update the following properties: // // - AutomaticBackupRetentionDays // @@ -4608,6 +4610,10 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // - DailyAutomaticBackupStartTime // +// - DiskIopsConfiguration +// +// - StorageCapacity +// // - ThroughputCapacity // // - WeeklyMaintenanceStartTime @@ -5352,7 +5358,7 @@ type AssociateFileSystemAliasesInput struct { Aliases []*string `type:"list" required:"true"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -5464,8 +5470,8 @@ func (s *AssociateFileSystemAliasesOutput) SetAliases(v []*Alias) *AssociateFile // Amazon FSx for Lustre automatically exports the defined changes asynchronously // once your application finishes modifying the file. // -// This AutoExportPolicy is supported only for Amazon FSx for Lustre file systems -// with the Persistent_2 deployment type. +// The AutoExportPolicy is only supported on Amazon FSx for Lustre file systems +// with a data repository association. type AutoExportPolicy struct { _ struct{} `type:"structure"` @@ -5513,8 +5519,8 @@ func (s *AutoExportPolicy) SetEvents(v []*string) *AutoExportPolicy { // listings up to date by importing changes to your Amazon FSx for Lustre file // system as you modify objects in a linked S3 bucket. // -// The AutoImportPolicy is supported only for Amazon FSx for Lustre file systems -// with the Persistent_2 deployment type. +// The AutoImportPolicy is only supported on Amazon FSx for Lustre file systems +// with a data repository association. type AutoImportPolicy struct { _ struct{} `type:"structure"` @@ -6253,8 +6259,7 @@ type CompletionReport struct { // Path you provide must be located within the file system’s ExportPath. An // example Path value is "s3://myBucket/myExportPath/optionalPrefix". The report // provides the following information for each file in the report: FilePath, - // FileStatus, and ErrorCode. To learn more about a file system's ExportPath, - // see . + // FileStatus, and ErrorCode. Path *string `min:"3" type:"string"` // Required if Enabled is set to true. Specifies the scope of the CompletionReport; @@ -6326,7 +6331,7 @@ type CopyBackupInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -6505,7 +6510,7 @@ func (s *CopyBackupOutput) SetBackup(v *Backup) *CopyBackupOutput { type CreateBackupInput struct { _ struct{} `type:"structure"` - // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to + // (Optional) A string of up to 63 ASCII characters that Amazon FSx uses to // ensure idempotent creation. This string is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -6639,7 +6644,7 @@ type CreateDataRepositoryAssociationInput struct { BatchImportMetaDataOnCreate *bool `type:"boolean"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -6842,7 +6847,7 @@ type CreateDataRepositoryTaskInput struct { CapacityToRelease *int64 `min:"1" type:"long"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -7024,7 +7029,7 @@ func (s *CreateDataRepositoryTaskOutput) SetDataRepositoryTask(v *DataRepository type CreateFileCacheInput struct { _ struct{} `type:"structure"` - // An idempotency token for resource creation, in a string of up to 64 ASCII + // An idempotency token for resource creation, in a string of up to 63 ASCII // characters. This token is automatically filled on your behalf when you use // the Command Line Interface (CLI) or an Amazon Web Services SDK. // @@ -7386,7 +7391,7 @@ type CreateFileSystemFromBackupInput struct { // BackupId is a required field BackupId *string `min:"12" type:"string" required:"true"` - // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent // creation. This string is automatically filled on your behalf when you use // the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -7420,9 +7425,8 @@ type CreateFileSystemFromBackupInput struct { // The Lustre configuration for the file system being created. // - // The following parameters are not supported for file systems with the Persistent_2 - // deployment type. Instead, use CreateDataRepositoryAssociation to create a - // data repository association to link your Lustre file system to a data repository. + // The following parameters are not supported for file systems with a data repository + // association created with . // // * AutoImportPolicy // @@ -7674,7 +7678,7 @@ func (s *CreateFileSystemFromBackupOutput) SetFileSystem(v *FileSystem) *CreateF type CreateFileSystemInput struct { _ struct{} `type:"structure"` - // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent // creation. This string is automatically filled on your behalf when you use // the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -7721,9 +7725,8 @@ type CreateFileSystemInput struct { // The Lustre configuration for the file system being created. // - // The following parameters are not supported for file systems with the Persistent_2 - // deployment type. Instead, use CreateDataRepositoryAssociation to create a - // data repository association to link your Lustre file system to a data repository. + // The following parameters are not supported for file systems with a data repository + // association created with . // // * AutoImportPolicy // @@ -7982,9 +7985,8 @@ func (s *CreateFileSystemInput) SetWindowsConfiguration(v *CreateFileSystemWindo // The Lustre configuration for the file system being created. // -// The following parameters are not supported for file systems with the Persistent_2 -// deployment type. Instead, use CreateDataRepositoryAssociation to create a -// data repository association to link your Lustre file system to a data repository. +// The following parameters are not supported for file systems with a data repository +// association created with . // // - AutoImportPolicy // @@ -7996,11 +7998,10 @@ func (s *CreateFileSystemInput) SetWindowsConfiguration(v *CreateFileSystemWindo type CreateFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` - // (Optional) Available with Scratch and Persistent_1 deployment types. When - // you create your file system, your existing S3 objects appear as file and - // directory listings. Use this property to choose how Amazon FSx keeps your - // file and directory listings up to date as you add or modify objects in your - // linked S3 bucket. AutoImportPolicy can have the following values: + // (Optional) When you create your file system, your existing S3 objects appear + // as file and directory listings. Use this parameter to choose how Amazon FSx + // keeps your file and directory listings up to date as you add or modify objects + // in your linked S3 bucket. AutoImportPolicy can have the following values: // // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and // directory listings from the linked S3 bucket when the file system is created. @@ -8024,9 +8025,7 @@ type CreateFileSystemLustreConfiguration struct { // For more information, see Automatically import updates from your S3 bucket // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/older-deployment-types.html#legacy-auto-import-from-s3). // - // This parameter is not supported for file systems with the Persistent_2 deployment - // type. Instead, use CreateDataRepositoryAssociation to create a data repository - // association to link your Lustre file system to a data repository. + // This parameter is not supported for file systems with a data repository association. AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` // The number of days to retain automatic backups. Setting this property to @@ -8091,10 +8090,9 @@ type CreateFileSystemLustreConfiguration struct { // // Encryption of data in transit is automatically turned on when you access // SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file systems from Amazon EC2 instances - // that support automatic encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data- - // protection.html) in the Amazon Web Services Regions where they are available. - // For more information about encryption in transit for FSx for Lustre file - // systems, see Encrypting data in transit (https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html) + // that support automatic encryption in the Amazon Web Services Regions where + // they are available. For more information about encryption in transit for + // FSx for Lustre file systems, see Encrypting data in transit (https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html) // in the Amazon FSx for Lustre User Guide. // // (Default = SCRATCH_1) @@ -8109,13 +8107,13 @@ type CreateFileSystemLustreConfiguration struct { // This parameter is required when StorageType is set to HDD. DriveCacheType *string `type:"string" enum:"DriveCacheType"` - // (Optional) Available with Scratch and Persistent_1 deployment types. Specifies - // the path in the Amazon S3 bucket where the root of your Amazon FSx file system - // is exported. The path must use the same Amazon S3 bucket as specified in - // ImportPath. You can provide an optional prefix to which new and changed data - // is to be exported from your Amazon FSx for Lustre file system. If an ExportPath - // value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. - // The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z. + // (Optional) Specifies the path in the Amazon S3 bucket where the root of your + // Amazon FSx file system is exported. The path must use the same Amazon S3 + // bucket as specified in ImportPath. You can provide an optional prefix to + // which new and changed data is to be exported from your Amazon FSx for Lustre + // file system. If an ExportPath value is not provided, Amazon FSx sets a default + // export path, s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp + // is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z. // // The Amazon S3 export bucket must be the same as the import bucket specified // by ImportPath. If you specify only a bucket name, such as s3://import-bucket, @@ -8125,9 +8123,7 @@ type CreateFileSystemLustreConfiguration struct { // Amazon FSx exports the contents of your file system to that export prefix // in the Amazon S3 bucket. // - // This parameter is not supported for file systems with the Persistent_2 deployment - // type. Instead, use CreateDataRepositoryAssociation to create a data repository - // association to link your Lustre file system to a data repository. + // This parameter is not supported for file systems with a data repository association. ExportPath *string `min:"3" type:"string"` // (Optional) The path to the Amazon S3 bucket (including the optional prefix) @@ -8137,9 +8133,7 @@ type CreateFileSystemLustreConfiguration struct { // If you specify a prefix after the Amazon S3 bucket name, only object keys // with that prefix are loaded into the file system. // - // This parameter is not supported for file systems with the Persistent_2 deployment - // type. Instead, use CreateDataRepositoryAssociation to create a data repository - // association to link your Lustre file system to a data repository. + // This parameter is not supported for file systems with a data repository association. ImportPath *string `min:"3" type:"string"` // (Optional) For files imported from a data repository, this value determines @@ -8151,9 +8145,7 @@ type CreateFileSystemLustreConfiguration struct { // The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 // MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. // - // This parameter is not supported for file systems with the Persistent_2 deployment - // type. Instead, use CreateDataRepositoryAssociation to create a data repository - // association to link your Lustre file system to a data repository. + // This parameter is not supported for file systems with a data repository association. ImportedFileChunkSize *int64 `min:"1" type:"integer"` // The Lustre logging configuration used when creating an Amazon FSx for Lustre @@ -8368,7 +8360,8 @@ type CreateFileSystemOntapConfiguration struct { // By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses // from the VPC’s primary CIDR range to use as the endpoint IP address range // for the file system. You can have overlapping endpoint IP addresses for file - // systems deployed in the same VPC/route tables. + // systems deployed in the same VPC/route tables, as long as they don't overlap + // with any subnet. EndpointIpAddressRange *string `min:"9" type:"string"` // The ONTAP administrative password for the fsxadmin user with which you administer @@ -8989,7 +8982,8 @@ type CreateOntapVolumeConfiguration struct { CopyTagsToBackups *bool `type:"boolean"` // Specifies the location in the SVM's namespace where the volume is mounted. - // The JunctionPath must have a leading forward slash, such as /vol3. + // This parameter is required. The JunctionPath must have a leading forward + // slash, such as /vol3. JunctionPath *string `min:"1" type:"string"` // Specifies the type of volume you are creating. Valid values are the following: @@ -9024,6 +9018,8 @@ type CreateOntapVolumeConfiguration struct { SecurityStyle *string `type:"string" enum:"SecurityStyle"` // Specifies the size of the volume, in megabytes (MB), that you are creating. + // Provide any whole number in the range of 20–104857600 to specify the size + // of the volume. // // SizeInMegabytes is a required field SizeInMegabytes *int64 `type:"integer" required:"true"` @@ -9050,7 +9046,8 @@ type CreateOntapVolumeConfiguration struct { SnapshotPolicy *string `min:"1" type:"string"` // Set to true to enable deduplication, compression, and compaction storage - // efficiency features on the volume. + // efficiency features on the volume, or set to false to disable them. This + // parameter is required. StorageEfficiencyEnabled *bool `type:"boolean"` // Specifies the ONTAP SVM in which to create the volume. @@ -9472,7 +9469,7 @@ type CreateSnapshotInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -9611,7 +9608,7 @@ type CreateStorageVirtualMachineInput struct { ActiveDirectoryConfiguration *CreateSvmActiveDirectoryConfiguration `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -9871,7 +9868,7 @@ type CreateVolumeFromBackupInput struct { BackupId *string `min:"12" type:"string" required:"true"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -10014,7 +10011,7 @@ type CreateVolumeInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -10181,9 +10178,9 @@ func (s *CreateVolumeOutput) SetVolume(v *Volume) *CreateVolumeOutput { // // - DescribeDataRepositoryAssociations // -// Data repository associations are supported only for an Amazon FSx for Lustre -// file system with the Persistent_2 deployment type and for an Amazon File -// Cache resource. +// Data repository associations are supported on Amazon File Cache resources +// and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment +// types. type DataRepositoryAssociation struct { _ struct{} `type:"structure"` @@ -10510,8 +10507,8 @@ func (s *DataRepositoryAssociationNotFound) RequestID() string { // The data repository configuration object for Lustre file systems returned // in the response of the CreateFileSystem operation. // -// This data type is not supported for file systems with the Persistent_2 deployment -// type. Instead, use . +// This data type is not supported on file systems with a data repository association. +// For file systems with a data repository association, see . type DataRepositoryConfiguration struct { _ struct{} `type:"structure"` @@ -10778,6 +10775,8 @@ type DataRepositoryTask struct { // * AUTO_RELEASE_DATA tasks automatically release files from an Amazon File // Cache resource. // + // * RELEASE_DATA_FROM_FILESYSTEM tasks are not supported. + // // Type is a required field Type *string `type:"string" required:"true" enum:"DataRepositoryTaskType"` } @@ -11255,7 +11254,7 @@ type DeleteBackupInput struct { // BackupId is a required field BackupId *string `min:"12" type:"string" required:"true"` - // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent // deletion. This parameter is automatically filled on your behalf when using // the CLI or SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -11361,7 +11360,7 @@ type DeleteDataRepositoryAssociationInput struct { AssociationId *string `min:"13" type:"string" required:"true"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -11479,7 +11478,7 @@ type DeleteFileCacheInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -11583,7 +11582,7 @@ func (s *DeleteFileCacheOutput) SetLifecycle(v string) *DeleteFileCacheOutput { type DeleteFileSystemInput struct { _ struct{} `type:"structure"` - // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent // deletion. This token is automatically filled on your behalf when using the // Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -12112,7 +12111,7 @@ type DeleteSnapshotInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -12216,7 +12215,7 @@ type DeleteStorageVirtualMachineInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -12319,7 +12318,7 @@ type DeleteVolumeInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -13075,7 +13074,7 @@ type DescribeFileSystemAliasesInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -13691,7 +13690,7 @@ type DisassociateFileSystemAliasesInput struct { Aliases []*string `type:"list" required:"true"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -14764,8 +14763,8 @@ type FileSystem struct { OpenZFSConfiguration *OpenZFSFileSystemConfiguration `type:"structure"` // The Amazon Web Services account that created the file system. If the file - // system was created by an Identity and Access Management (IAM) user, the Amazon - // Web Services account to which the IAM user belongs is the owner. + // system was created by a user in IAM Identity Center, the Amazon Web Services + // account to which the IAM user belongs is the owner. OwnerId *string `min:"12" type:"string"` // The Amazon Resource Name (ARN) of the file system resource. @@ -16099,8 +16098,8 @@ type LustreFileSystemConfiguration struct { // The data repository configuration object for Lustre file systems returned // in the response of the CreateFileSystem operation. // - // This data type is not supported for file systems with the Persistent_2 deployment - // type. Instead, use . + // This data type is not supported on file systems with a data repository association. + // For file systems with a data repository association, see . DataRepositoryConfiguration *DataRepositoryConfiguration `type:"structure"` // The deployment type of the FSx for Lustre file system. Scratch deployment @@ -16843,13 +16842,13 @@ type OntapFileSystemConfiguration struct { // of provisioned IOPS and the provision mode. DiskIopsConfiguration *DiskIopsConfiguration `type:"structure"` - // (Multi-AZ only) The IP address range in which the endpoints to access your - // file system are created. - // - // The Endpoint IP address range you select for your file system must exist - // outside the VPC's CIDR range and must be at least /30 or larger. If you do - // not specify this optional parameter, Amazon FSx will automatically select - // a CIDR block for you. + // (Multi-AZ only) Specifies the IP address range in which the endpoints to + // access your file system will be created. By default in the Amazon FSx API, + // Amazon FSx selects an unused IP address range for you from the 198.19.* range. + // By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses + // from the VPC’s primary CIDR range to use as the endpoint IP address range + // for the file system. You can have overlapping endpoint IP addresses for file + // systems deployed in the same VPC/route tables. EndpointIpAddressRange *string `min:"9" type:"string"` // The Management and Intercluster endpoints that are used to access data or @@ -17861,7 +17860,7 @@ type ReleaseFileSystemNfsV3LocksInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -18095,7 +18094,7 @@ type RestoreVolumeFromSnapshotInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -19810,7 +19809,7 @@ type UpdateDataRepositoryAssociationInput struct { AssociationId *string `min:"13" type:"string" required:"true"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -19930,7 +19929,7 @@ type UpdateFileCacheInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -20092,7 +20091,7 @@ func (s *UpdateFileCacheOutput) SetFileCache(v *FileCache) *UpdateFileCacheOutpu type UpdateFileSystemInput struct { _ struct{} `type:"structure"` - // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent // updates. This string is automatically filled on your behalf when you use // the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -20112,20 +20111,14 @@ type UpdateFileSystemInput struct { // The configuration updates for an Amazon FSx for OpenZFS file system. OpenZFSConfiguration *UpdateFileSystemOpenZFSConfiguration `type:"structure"` - // Use this parameter to increase the storage capacity of an Amazon FSx for - // Windows File Server, Amazon FSx for Lustre, or Amazon FSx for NetApp ONTAP - // file system. Specifies the storage capacity target value, in GiB, to increase - // the storage capacity for the file system that you're updating. + // Use this parameter to increase the storage capacity of an FSx for Windows + // File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. + // Specifies the storage capacity target value, in GiB, to increase the storage + // capacity for the file system that you're updating. // // You can't make a storage capacity increase request if there is an existing // storage capacity increase request in progress. // - // For Windows file systems, the storage capacity target value must be at least - // 10 percent greater than the current storage capacity value. To increase storage - // capacity, the file system must have at least 16 MBps of throughput capacity. - // For more information, see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) - // in the Amazon FSx for Windows File Server User Guide. - // // For Lustre file systems, the storage capacity target value can be the following: // // * For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, @@ -20140,7 +20133,18 @@ type UpdateFileSystemInput struct { // * For SCRATCH_1 file systems, you can't increase the storage capacity. // // For more information, see Managing storage and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) - // in the Amazon FSx for Lustre User Guide. + // in the FSx for Lustre User Guide. + // + // For FSx for OpenZFS file systems, the storage capacity target value must + // be at least 10 percent greater than the current storage capacity value. For + // more information, see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-storage-capacity.html) + // in the FSx for OpenZFS User Guide. + // + // For Windows file systems, the storage capacity target value must be at least + // 10 percent greater than the current storage capacity value. To increase storage + // capacity, the file system must have at least 16 MBps of throughput capacity. + // For more information, see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide. // // For ONTAP file systems, the storage capacity target value must be at least // 10 percent greater than the current storage capacity value. For more information, @@ -20281,9 +20285,7 @@ type UpdateFileSystemLustreConfiguration struct { // any existing objects that are changed in the S3 bucket, and any objects // that were deleted in the S3 bucket. // - // The AutoImportPolicy parameter is not supported for Lustre file systems with - // the Persistent_2 deployment type. Instead, use to update a data repository - // association on your Persistent_2 file system. + // This parameter is not supported for file systems with a data repository association. AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` // The number of days to retain automatic backups. Setting this property to @@ -21138,7 +21140,7 @@ type UpdateSnapshotInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -21254,7 +21256,7 @@ type UpdateStorageVirtualMachineInput struct { ActiveDirectoryConfiguration *UpdateSvmActiveDirectoryConfiguration `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -21425,7 +21427,7 @@ type UpdateVolumeInput struct { _ struct{} `type:"structure"` // (Optional) An idempotency token for resource creation, in a string of up - // to 64 ASCII characters. This token is automatically filled on your behalf + // to 63 ASCII characters. This token is automatically filled on your behalf // when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` diff --git a/service/lambda/api.go b/service/lambda/api.go index a6f791bea78..f98308a4ab6 100644 --- a/service/lambda/api.go +++ b/service/lambda/api.go @@ -3,14 +3,21 @@ package lambda import ( + "bytes" "fmt" "io" + "sync" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) @@ -484,6 +491,8 @@ func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMapping // // - Apache Kafka (https://docs.aws.amazon.com/lambda/latest/dg/kafka-smaa.html) // +// - Amazon DocumentDB (https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html) +// // The following error handling options are available only for stream sources // (DynamoDB and Kinesis): // @@ -518,6 +527,8 @@ func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMapping // // - Apache Kafka (https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-kafka-parms) // +// - Amazon DocumentDB (https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html#docdb-configuration) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3517,6 +3528,334 @@ func (c *Lambda) InvokeAsyncWithContext(ctx aws.Context, input *InvokeAsyncInput return out, req.Send() } +const opInvokeWithResponseStream = "InvokeWithResponseStream" + +// InvokeWithResponseStreamRequest generates a "aws/request.Request" representing the +// client's request for the InvokeWithResponseStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See InvokeWithResponseStream for more information on using the InvokeWithResponseStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the InvokeWithResponseStreamRequest method. +// req, resp := client.InvokeWithResponseStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvokeWithResponseStream +func (c *Lambda) InvokeWithResponseStreamRequest(input *InvokeWithResponseStreamInput) (req *request.Request, output *InvokeWithResponseStreamOutput) { + op := &request.Operation{ + Name: opInvokeWithResponseStream, + HTTPMethod: "POST", + HTTPPath: "/2021-11-15/functions/{FunctionName}/response-streaming-invocations", + } + + if input == nil { + input = &InvokeWithResponseStreamInput{} + } + + output = &InvokeWithResponseStreamOutput{} + req = c.newRequest(op, input, output) + + es := NewInvokeWithResponseStreamEventStream() + output.eventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// InvokeWithResponseStream API operation for AWS Lambda. +// +// Configure your Lambda functions to stream response payloads back to clients. +// For more information, see Configuring a Lambda function to stream responses +// (https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lambda's +// API operation InvokeWithResponseStream for usage and error information. +// +// Returned Error Types: +// +// - ServiceException +// The Lambda service encountered an internal error. +// +// - ResourceNotFoundException +// The resource specified in the request does not exist. +// +// - InvalidRequestContentException +// The request body could not be parsed as JSON. +// +// - RequestTooLargeException +// The request payload exceeded the Invoke request body JSON input quota. For +// more information, see Lambda quotas (https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html). +// +// - UnsupportedMediaTypeException +// The content type of the Invoke request body is not JSON. +// +// - TooManyRequestsException +// The request throughput limit was exceeded. For more information, see Lambda +// quotas (https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html#api-requests). +// +// - InvalidParameterValueException +// One of the parameters in the request is not valid. +// +// - EC2UnexpectedException +// Lambda received an unexpected Amazon EC2 client exception while setting up +// for the Lambda function. +// +// - SubnetIPAddressLimitReachedException +// Lambda couldn't set up VPC access for the Lambda function because one or +// more configured subnets has no available IP addresses. +// +// - ENILimitReachedException +// Lambda couldn't create an elastic network interface in the VPC, specified +// as part of Lambda function configuration, because the limit for network interfaces +// has been reached. For more information, see Lambda quotas (https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html). +// +// - EFSMountConnectivityException +// The Lambda function couldn't make a network connection to the configured +// file system. +// +// - EFSMountFailureException +// The Lambda function couldn't mount the configured file system due to a permission +// or configuration issue. +// +// - EFSMountTimeoutException +// The Lambda function made a network connection to the configured file system, +// but the mount operation timed out. +// +// - EFSIOException +// An error occurred when reading from or writing to a connected file system. +// +// - EC2ThrottledException +// Amazon EC2 throttled Lambda during Lambda function initialization using the +// execution role provided for the function. +// +// - EC2AccessDeniedException +// Need additional permissions to configure VPC settings. +// +// - InvalidSubnetIDException +// The subnet ID provided in the Lambda function VPC configuration is not valid. +// +// - InvalidSecurityGroupIDException +// The security group ID provided in the Lambda function VPC configuration is +// not valid. +// +// - InvalidZipFileException +// Lambda could not unzip the deployment package. +// +// - KMSDisabledException +// Lambda couldn't decrypt the environment variables because the KMS key used +// is disabled. Check the Lambda function's KMS key settings. +// +// - KMSInvalidStateException +// Lambda couldn't decrypt the environment variables because the state of the +// KMS key used is not valid for Decrypt. Check the function's KMS key settings. +// +// - KMSAccessDeniedException +// Lambda couldn't decrypt the environment variables because KMS access was +// denied. Check the Lambda function's KMS permissions. +// +// - KMSNotFoundException +// Lambda couldn't decrypt the environment variables because the KMS key was +// not found. Check the function's KMS key settings. +// +// - InvalidRuntimeException +// The runtime or runtime version specified is not supported. +// +// - ResourceConflictException +// The resource already exists, or another operation is in progress. +// +// - ResourceNotReadyException +// The function is inactive and its VPC connection is no longer available. Wait +// for the VPC connection to reestablish and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvokeWithResponseStream +func (c *Lambda) InvokeWithResponseStream(input *InvokeWithResponseStreamInput) (*InvokeWithResponseStreamOutput, error) { + req, out := c.InvokeWithResponseStreamRequest(input) + return out, req.Send() +} + +// InvokeWithResponseStreamWithContext is the same as InvokeWithResponseStream with the addition of +// the ability to pass a context and additional request options. +// +// See InvokeWithResponseStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) InvokeWithResponseStreamWithContext(ctx aws.Context, input *InvokeWithResponseStreamInput, opts ...request.Option) (*InvokeWithResponseStreamOutput, error) { + req, out := c.InvokeWithResponseStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error + +// InvokeWithResponseStreamEventStream provides the event stream handling for the InvokeWithResponseStream. +// +// For testing and mocking the event stream this type should be initialized via +// the NewInvokeWithResponseStreamEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type InvokeWithResponseStreamEventStream struct { + + // Reader is the EventStream reader for the InvokeWithResponseStreamResponseEvent + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader InvokeWithResponseStreamResponseEventReader + + outputReader io.ReadCloser + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewInvokeWithResponseStreamEventStream initializes an InvokeWithResponseStreamEventStream. +// This function should only be used for testing and mocking the InvokeWithResponseStreamEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// es := NewInvokeWithResponseStreamEventStream(func(o *InvokeWithResponseStreamEventStream){ +// es.Reader = myMockStreamReader +// }) +func NewInvokeWithResponseStreamEventStream(opts ...func(*InvokeWithResponseStreamEventStream)) *InvokeWithResponseStreamEventStream { + es := &InvokeWithResponseStreamEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *InvokeWithResponseStreamEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *InvokeWithResponseStreamEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// - InvokeWithResponseStreamCompleteEvent +// - InvokeResponseStreamUpdate +// - InvokeWithResponseStreamResponseEventUnknownEvent +func (es *InvokeWithResponseStreamEventStream) Events() <-chan InvokeWithResponseStreamResponseEventEvent { + return es.Reader.Events() +} + +func (es *InvokeWithResponseStreamEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForInvokeWithResponseStreamResponseEventEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadInvokeWithResponseStreamResponseEvent(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +func (es *InvokeWithResponseStreamEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *InvokeWithResponseStreamEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *InvokeWithResponseStreamEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + const opListAliases = "ListAliases" // ListAliasesRequest generates a "aws/request.Request" representing the @@ -6553,6 +6892,8 @@ func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMapping // // - Apache Kafka (https://docs.aws.amazon.com/lambda/latest/dg/kafka-smaa.html) // +// - Amazon DocumentDB (https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html) +// // The following error handling options are available only for stream sources // (DynamoDB and Kinesis): // @@ -6587,6 +6928,8 @@ func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMapping // // - Apache Kafka (https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-kafka-parms) // +// - Amazon DocumentDB (https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html#docdb-configuration) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8468,14 +8811,16 @@ type CreateEventSourceMappingInput struct { // * Self-managed Apache Kafka – Default 100. Max 10,000. // // * Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000. + // + // * DocumentDB – Default 100. Max 10,000. BatchSize *int64 `min:"1" type:"integer"` - // (Streams only) If the function returns an error, split the batch in two and - // retry. + // (Kinesis and DynamoDB Streams only) If the function returns an error, split + // the batch in two and retry. BisectBatchOnFunctionError *bool `type:"boolean"` - // (Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded - // records. + // (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard + // Amazon SNS topic destination for discarded records. DestinationConfig *DestinationConfig `type:"structure"` // Specific configuration settings for a DocumentDB event source. @@ -8498,6 +8843,8 @@ type CreateEventSourceMappingInput struct { // * Amazon Managed Streaming for Apache Kafka – The ARN of the cluster. // // * Amazon MQ – The ARN of the broker. + // + // * Amazon DocumentDB – The ARN of the DocumentDB change stream. EventSourceArn *string `type:"string"` // An object that defines the filter criteria that determine whether Lambda @@ -8523,8 +8870,8 @@ type CreateEventSourceMappingInput struct { // FunctionName is a required field FunctionName *string `min:"1" type:"string" required:"true"` - // (Streams and Amazon SQS) A list of current response type enums applied to - // the event source mapping. + // (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type + // enums applied to the event source mapping. FunctionResponseTypes []*string `type:"list" enum:"FunctionResponseType"` // The maximum amount of time, in seconds, that Lambda spends gathering records @@ -8532,28 +8879,29 @@ type CreateEventSourceMappingInput struct { // to any value from 0 seconds to 300 seconds in increments of seconds. // // For streams and Amazon SQS event sources, the default batching window is - // 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event - // sources, the default batching window is 500 ms. Note that because you can - // only change MaximumBatchingWindowInSeconds in increments of seconds, you - // cannot revert back to the 500 ms default batching window after you have changed - // it. To restore the default batching window, you must create a new event source - // mapping. + // 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB + // event sources, the default batching window is 500 ms. Note that because you + // can only change MaximumBatchingWindowInSeconds in increments of seconds, + // you cannot revert back to the 500 ms default batching window after you have + // changed it. To restore the default batching window, you must create a new + // event source mapping. // // Related setting: For streams and Amazon SQS event sources, when you set BatchSize // to a value greater than 10, you must set MaximumBatchingWindowInSeconds to // at least 1. MaximumBatchingWindowInSeconds *int64 `type:"integer"` - // (Streams only) Discard records older than the specified age. The default - // value is infinite (-1). + // (Kinesis and DynamoDB Streams only) Discard records older than the specified + // age. The default value is infinite (-1). MaximumRecordAgeInSeconds *int64 `type:"integer"` - // (Streams only) Discard records after the specified number of retries. The - // default value is infinite (-1). When set to infinite (-1), failed records - // are retried until the record expires. + // (Kinesis and DynamoDB Streams only) Discard records after the specified number + // of retries. The default value is infinite (-1). When set to infinite (-1), + // failed records are retried until the record expires. MaximumRetryAttempts *int64 `type:"integer"` - // (Streams only) The number of batches to process from each shard concurrently. + // (Kinesis and DynamoDB Streams only) The number of batches to process from + // each shard concurrently. ParallelizationFactor *int64 `min:"1" type:"integer"` // (MQ) The name of the Amazon MQ broker destination queue to consume. @@ -8576,7 +8924,7 @@ type CreateEventSourceMappingInput struct { // The position in a stream from which to start reading. Required for Amazon // Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is - // supported only for Amazon Kinesis streams. + // supported only for Amazon Kinesis streams and Amazon DocumentDB. StartingPosition *string `type:"string" enum:"EventSourcePosition"` // With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. @@ -8585,8 +8933,9 @@ type CreateEventSourceMappingInput struct { // The name of the Kafka topic. Topics []*string `min:"1" type:"list"` - // (Streams only) The duration in seconds of a processing window. The range - // is between 1 second and 900 seconds. + // (Kinesis and DynamoDB Streams only) The duration in seconds of a processing + // window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds + // indicates no tumbling window. TumblingWindowInSeconds *int64 `type:"integer"` } @@ -9178,6 +9527,18 @@ type CreateFunctionUrlConfigInput struct { // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `type:"string" enum:"InvokeMode"` + // The alias name. Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` } @@ -9240,6 +9601,12 @@ func (s *CreateFunctionUrlConfigInput) SetFunctionName(v string) *CreateFunction return s } +// SetInvokeMode sets the InvokeMode field's value. +func (s *CreateFunctionUrlConfigInput) SetInvokeMode(v string) *CreateFunctionUrlConfigInput { + s.InvokeMode = &v + return s +} + // SetQualifier sets the Qualifier field's value. func (s *CreateFunctionUrlConfigInput) SetQualifier(v string) *CreateFunctionUrlConfigInput { s.Qualifier = &v @@ -9276,6 +9643,18 @@ type CreateFunctionUrlConfigOutput struct { // // FunctionUrl is a required field FunctionUrl *string `min:"40" type:"string" required:"true"` + + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `type:"string" enum:"InvokeMode"` } // String returns the string representation. @@ -9326,6 +9705,12 @@ func (s *CreateFunctionUrlConfigOutput) SetFunctionUrl(v string) *CreateFunction return s } +// SetInvokeMode sets the InvokeMode field's value. +func (s *CreateFunctionUrlConfigOutput) SetInvokeMode(v string) *CreateFunctionUrlConfigOutput { + s.InvokeMode = &v + return s +} + // The dead-letter queue (https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#dlq) // for failed asynchronous invocations. type DeadLetterConfig struct { @@ -11063,12 +11448,12 @@ type EventSourceMappingConfiguration struct { // set MaximumBatchingWindowInSeconds to at least 1. BatchSize *int64 `min:"1" type:"integer"` - // (Streams only) If the function returns an error, split the batch in two and - // retry. The default value is false. + // (Kinesis and DynamoDB Streams only) If the function returns an error, split + // the batch in two and retry. The default value is false. BisectBatchOnFunctionError *bool `type:"boolean"` - // (Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded - // records. + // (Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic + // destination for discarded records. DestinationConfig *DestinationConfig `type:"structure"` // Specific configuration settings for a DocumentDB event source. @@ -11085,8 +11470,8 @@ type EventSourceMappingConfiguration struct { // The ARN of the Lambda function. FunctionArn *string `type:"string"` - // (Streams and Amazon SQS) A list of current response type enums applied to - // the event source mapping. + // (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type + // enums applied to the event source mapping. FunctionResponseTypes []*string `type:"list" enum:"FunctionResponseType"` // The date that the event source mapping was last updated or that its state @@ -11101,31 +11486,31 @@ type EventSourceMappingConfiguration struct { // to any value from 0 seconds to 300 seconds in increments of seconds. // // For streams and Amazon SQS event sources, the default batching window is - // 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event - // sources, the default batching window is 500 ms. Note that because you can - // only change MaximumBatchingWindowInSeconds in increments of seconds, you - // cannot revert back to the 500 ms default batching window after you have changed - // it. To restore the default batching window, you must create a new event source - // mapping. + // 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB + // event sources, the default batching window is 500 ms. Note that because you + // can only change MaximumBatchingWindowInSeconds in increments of seconds, + // you cannot revert back to the 500 ms default batching window after you have + // changed it. To restore the default batching window, you must create a new + // event source mapping. // // Related setting: For streams and Amazon SQS event sources, when you set BatchSize // to a value greater than 10, you must set MaximumBatchingWindowInSeconds to // at least 1. MaximumBatchingWindowInSeconds *int64 `type:"integer"` - // (Streams only) Discard records older than the specified age. The default - // value is -1, which sets the maximum age to infinite. When the value is set - // to infinite, Lambda never discards old records. + // (Kinesis and DynamoDB Streams only) Discard records older than the specified + // age. The default value is -1, which sets the maximum age to infinite. When + // the value is set to infinite, Lambda never discards old records. MaximumRecordAgeInSeconds *int64 `type:"integer"` - // (Streams only) Discard records after the specified number of retries. The - // default value is -1, which sets the maximum number of retries to infinite. - // When MaximumRetryAttempts is infinite, Lambda retries failed records until - // the record expires in the event source. + // (Kinesis and DynamoDB Streams only) Discard records after the specified number + // of retries. The default value is -1, which sets the maximum number of retries + // to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed + // records until the record expires in the event source. MaximumRetryAttempts *int64 `type:"integer"` - // (Streams only) The number of batches to process concurrently from each shard. - // The default value is 1. + // (Kinesis and DynamoDB Streams only) The number of batches to process concurrently + // from each shard. The default value is 1. ParallelizationFactor *int64 `min:"1" type:"integer"` // (Amazon MQ) The name of the Amazon MQ broker destination queue to consume. @@ -11148,7 +11533,7 @@ type EventSourceMappingConfiguration struct { // The position in a stream from which to start reading. Required for Amazon // Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP is - // supported only for Amazon Kinesis streams. + // supported only for Amazon Kinesis streams and Amazon DocumentDB. StartingPosition *string `type:"string" enum:"EventSourcePosition"` // With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. @@ -11165,8 +11550,9 @@ type EventSourceMappingConfiguration struct { // The name of the Kafka topic. Topics []*string `min:"1" type:"list"` - // (Streams only) The duration in seconds of a processing window. The range - // is 1–900 seconds. + // (Kinesis and DynamoDB Streams only) The duration in seconds of a processing + // window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds + // indicates no tumbling window. TumblingWindowInSeconds *int64 `type:"integer"` // The identifier of the event source mapping. @@ -12001,9 +12387,9 @@ type FunctionEventInvokeConfig struct { // // * Function - The Amazon Resource Name (ARN) of a Lambda function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of a standard SQS queue. // - // * Topic - The ARN of an SNS topic. + // * Topic - The ARN of a standard SNS topic. // // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig `type:"structure"` @@ -12101,6 +12487,18 @@ type FunctionUrlConfig struct { // FunctionUrl is a required field FunctionUrl *string `min:"40" type:"string" required:"true"` + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `type:"string" enum:"InvokeMode"` + // When the function URL configuration was last updated, in ISO-8601 format // (https://www.w3.org/TR/NOTE-datetime) (YYYY-MM-DDThh:mm:ss.sTZD). // @@ -12156,6 +12554,12 @@ func (s *FunctionUrlConfig) SetFunctionUrl(v string) *FunctionUrlConfig { return s } +// SetInvokeMode sets the InvokeMode field's value. +func (s *FunctionUrlConfig) SetInvokeMode(v string) *FunctionUrlConfig { + s.InvokeMode = &v + return s +} + // SetLastModifiedTime sets the LastModifiedTime field's value. func (s *FunctionUrlConfig) SetLastModifiedTime(v string) *FunctionUrlConfig { s.LastModifiedTime = &v @@ -12794,9 +13198,9 @@ type GetFunctionEventInvokeConfigOutput struct { // // * Function - The Amazon Resource Name (ARN) of a Lambda function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of a standard SQS queue. // - // * Topic - The ARN of an SNS topic. + // * Topic - The ARN of a standard SNS topic. // // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig `type:"structure"` @@ -13097,6 +13501,18 @@ type GetFunctionUrlConfigOutput struct { // FunctionUrl is a required field FunctionUrl *string `min:"40" type:"string" required:"true"` + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `type:"string" enum:"InvokeMode"` + // When the function URL configuration was last updated, in ISO-8601 format // (https://www.w3.org/TR/NOTE-datetime) (YYYY-MM-DDThh:mm:ss.sTZD). // @@ -13152,6 +13568,12 @@ func (s *GetFunctionUrlConfigOutput) SetFunctionUrl(v string) *GetFunctionUrlCon return s } +// SetInvokeMode sets the InvokeMode field's value. +func (s *GetFunctionUrlConfigOutput) SetInvokeMode(v string) *GetFunctionUrlConfigOutput { + s.InvokeMode = &v + return s +} + // SetLastModifiedTime sets the LastModifiedTime field's value. func (s *GetFunctionUrlConfigOutput) SetLastModifiedTime(v string) *GetFunctionUrlConfigOutput { s.LastModifiedTime = &v @@ -14897,15 +15319,18 @@ func (s *InvokeOutput) SetStatusCode(v int64) *InvokeOutput { return s } -// Lambda couldn't decrypt the environment variables because KMS access was -// denied. Check the Lambda function's KMS permissions. -type KMSAccessDeniedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` +// A chunk of the streamed response payload. +type InvokeResponseStreamUpdate struct { + _ struct{} `type:"structure" payload:"Payload"` - Type *string `type:"string"` + // Data returned by your Lambda function. + // + // Payload is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by InvokeResponseStreamUpdate's + // String and GoString methods. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob" sensitive:"true"` } // String returns the string representation. @@ -14913,7 +15338,7 @@ type KMSAccessDeniedException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s KMSAccessDeniedException) String() string { +func (s InvokeResponseStreamUpdate) String() string { return awsutil.Prettify(s) } @@ -14922,15 +15347,493 @@ func (s KMSAccessDeniedException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s KMSAccessDeniedException) GoString() string { +func (s InvokeResponseStreamUpdate) GoString() string { return s.String() } -func newErrorKMSAccessDeniedException(v protocol.ResponseMetadata) error { - return &KMSAccessDeniedException{ - RespMetadata: v, - } -} +// SetPayload sets the Payload field's value. +func (s *InvokeResponseStreamUpdate) SetPayload(v []byte) *InvokeResponseStreamUpdate { + s.Payload = v + return s +} + +// The InvokeResponseStreamUpdate is and event in the InvokeWithResponseStreamResponseEvent group of events. +func (s *InvokeResponseStreamUpdate) eventInvokeWithResponseStreamResponseEvent() {} + +// UnmarshalEvent unmarshals the EventStream Message into the InvokeResponseStreamUpdate value. +// This method is only used internally within the SDK's EventStream handling. +func (s *InvokeResponseStreamUpdate) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *InvokeResponseStreamUpdate) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// A response confirming that the event stream is complete. +type InvokeWithResponseStreamCompleteEvent struct { + _ struct{} `type:"structure"` + + // An error code. + ErrorCode *string `type:"string"` + + // The details of any returned error. + ErrorDetails *string `type:"string"` + + // The last 4 KB of the execution log, which is base64-encoded. + LogResult *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvokeWithResponseStreamCompleteEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvokeWithResponseStreamCompleteEvent) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *InvokeWithResponseStreamCompleteEvent) SetErrorCode(v string) *InvokeWithResponseStreamCompleteEvent { + s.ErrorCode = &v + return s +} + +// SetErrorDetails sets the ErrorDetails field's value. +func (s *InvokeWithResponseStreamCompleteEvent) SetErrorDetails(v string) *InvokeWithResponseStreamCompleteEvent { + s.ErrorDetails = &v + return s +} + +// SetLogResult sets the LogResult field's value. +func (s *InvokeWithResponseStreamCompleteEvent) SetLogResult(v string) *InvokeWithResponseStreamCompleteEvent { + s.LogResult = &v + return s +} + +// The InvokeWithResponseStreamCompleteEvent is and event in the InvokeWithResponseStreamResponseEvent group of events. +func (s *InvokeWithResponseStreamCompleteEvent) eventInvokeWithResponseStreamResponseEvent() {} + +// UnmarshalEvent unmarshals the EventStream Message into the InvokeWithResponseStreamCompleteEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *InvokeWithResponseStreamCompleteEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *InvokeWithResponseStreamCompleteEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +type InvokeWithResponseStreamInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Up to 3,583 bytes of base64-encoded data about the invoking client to pass + // to the function in the context object. + ClientContext *string `location:"header" locationName:"X-Amz-Client-Context" type:"string"` + + // The name of the Lambda function. + // + // Name formats + // + // * Function name – my-function. + // + // * Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. + // + // * Partial ARN – 123456789012:function:my-function. + // + // The length constraint applies only to the full ARN. If you specify only the + // function name, it is limited to 64 characters in length. + // + // FunctionName is a required field + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Use one of the following options: + // + // * RequestResponse (default) – Invoke the function synchronously. Keep + // the connection open until the function returns a response or times out. + // The API operation response includes the function response and additional + // data. + // + // * DryRun – Validate parameter values and verify that the IAM user or + // role has permission to invoke the function. + InvocationType *string `location:"header" locationName:"X-Amz-Invocation-Type" type:"string" enum:"ResponseStreamingInvocationType"` + + // Set to Tail to include the execution log in the response. Applies to synchronously + // invoked functions only. + LogType *string `location:"header" locationName:"X-Amz-Log-Type" type:"string" enum:"LogType"` + + // The JSON that you want to provide to your Lambda function as input. + // + // You can enter the JSON directly. For example, --payload '{ "key": "value" + // }'. You can also specify a file path. For example, --payload file://payload.json. + // + // Payload is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by InvokeWithResponseStreamInput's + // String and GoString methods. + Payload []byte `type:"blob" sensitive:"true"` + + // The alias name. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvokeWithResponseStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvokeWithResponseStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InvokeWithResponseStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvokeWithResponseStreamInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientContext sets the ClientContext field's value. +func (s *InvokeWithResponseStreamInput) SetClientContext(v string) *InvokeWithResponseStreamInput { + s.ClientContext = &v + return s +} + +// SetFunctionName sets the FunctionName field's value. +func (s *InvokeWithResponseStreamInput) SetFunctionName(v string) *InvokeWithResponseStreamInput { + s.FunctionName = &v + return s +} + +// SetInvocationType sets the InvocationType field's value. +func (s *InvokeWithResponseStreamInput) SetInvocationType(v string) *InvokeWithResponseStreamInput { + s.InvocationType = &v + return s +} + +// SetLogType sets the LogType field's value. +func (s *InvokeWithResponseStreamInput) SetLogType(v string) *InvokeWithResponseStreamInput { + s.LogType = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *InvokeWithResponseStreamInput) SetPayload(v []byte) *InvokeWithResponseStreamInput { + s.Payload = v + return s +} + +// SetQualifier sets the Qualifier field's value. +func (s *InvokeWithResponseStreamInput) SetQualifier(v string) *InvokeWithResponseStreamInput { + s.Qualifier = &v + return s +} + +type InvokeWithResponseStreamOutput struct { + _ struct{} `type:"structure" payload:"EventStream"` + + eventStream *InvokeWithResponseStreamEventStream + + // The version of the function that executed. When you invoke a function with + // an alias, this indicates which version the alias resolved to. + ExecutedVersion *string `location:"header" locationName:"X-Amz-Executed-Version" min:"1" type:"string"` + + // The type of data the stream is returning. + ResponseStreamContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // For a successful request, the HTTP status code is in the 200 range. For the + // RequestResponse invocation type, this status code is 200. For the DryRun + // invocation type, this status code is 204. + StatusCode *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvokeWithResponseStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvokeWithResponseStreamOutput) GoString() string { + return s.String() +} + +// SetExecutedVersion sets the ExecutedVersion field's value. +func (s *InvokeWithResponseStreamOutput) SetExecutedVersion(v string) *InvokeWithResponseStreamOutput { + s.ExecutedVersion = &v + return s +} + +// SetResponseStreamContentType sets the ResponseStreamContentType field's value. +func (s *InvokeWithResponseStreamOutput) SetResponseStreamContentType(v string) *InvokeWithResponseStreamOutput { + s.ResponseStreamContentType = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *InvokeWithResponseStreamOutput) SetStatusCode(v int64) *InvokeWithResponseStreamOutput { + s.StatusCode = &v + return s +} + +// GetStream returns the type to interact with the event stream. +func (s *InvokeWithResponseStreamOutput) GetStream() *InvokeWithResponseStreamEventStream { + return s.eventStream +} + +// InvokeWithResponseStreamResponseEventEvent groups together all EventStream +// events writes for InvokeWithResponseStreamResponseEvent. +// +// These events are: +// +// - InvokeWithResponseStreamCompleteEvent +// - InvokeResponseStreamUpdate +type InvokeWithResponseStreamResponseEventEvent interface { + eventInvokeWithResponseStreamResponseEvent() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// InvokeWithResponseStreamResponseEventReader provides the interface for reading to the stream. The +// default implementation for this interface will be InvokeWithResponseStreamResponseEvent. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// - InvokeWithResponseStreamCompleteEvent +// - InvokeResponseStreamUpdate +// - InvokeWithResponseStreamResponseEventUnknownEvent +type InvokeWithResponseStreamResponseEventReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan InvokeWithResponseStreamResponseEventEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readInvokeWithResponseStreamResponseEvent struct { + eventReader *eventstreamapi.EventReader + stream chan InvokeWithResponseStreamResponseEventEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadInvokeWithResponseStreamResponseEvent(eventReader *eventstreamapi.EventReader) *readInvokeWithResponseStreamResponseEvent { + r := &readInvokeWithResponseStreamResponseEvent{ + eventReader: eventReader, + stream: make(chan InvokeWithResponseStreamResponseEventEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readInvokeWithResponseStreamResponseEvent) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readInvokeWithResponseStreamResponseEvent) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readInvokeWithResponseStreamResponseEvent) Closed() <-chan struct{} { + return r.done +} + +func (r *readInvokeWithResponseStreamResponseEvent) safeClose() { + close(r.done) +} + +func (r *readInvokeWithResponseStreamResponseEvent) Err() error { + return r.err.Err() +} + +func (r *readInvokeWithResponseStreamResponseEvent) Events() <-chan InvokeWithResponseStreamResponseEventEvent { + return r.stream +} + +func (r *readInvokeWithResponseStreamResponseEvent) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(InvokeWithResponseStreamResponseEventEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForInvokeWithResponseStreamResponseEventEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForInvokeWithResponseStreamResponseEventEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "InvokeComplete": + return &InvokeWithResponseStreamCompleteEvent{}, nil + case "PayloadChunk": + return &InvokeResponseStreamUpdate{}, nil + default: + return &InvokeWithResponseStreamResponseEventUnknownEvent{Type: eventType}, nil + } +} + +// InvokeWithResponseStreamResponseEventUnknownEvent provides a failsafe event for the +// InvokeWithResponseStreamResponseEvent group of events when an unknown event is received. +type InvokeWithResponseStreamResponseEventUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The InvokeWithResponseStreamResponseEventUnknownEvent is and event in the InvokeWithResponseStreamResponseEvent +// group of events. +func (s *InvokeWithResponseStreamResponseEventUnknownEvent) eventInvokeWithResponseStreamResponseEvent() { +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *InvokeWithResponseStreamResponseEventUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the InvokeWithResponseStreamResponseEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (e *InvokeWithResponseStreamResponseEventUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +// Lambda couldn't decrypt the environment variables because KMS access was +// denied. Check the Lambda function's KMS permissions. +type KMSAccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + Type *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KMSAccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KMSAccessDeniedException) GoString() string { + return s.String() +} + +func newErrorKMSAccessDeniedException(v protocol.ResponseMetadata) error { + return &KMSAccessDeniedException{ + RespMetadata: v, + } +} // Code returns the exception type name. func (s *KMSAccessDeniedException) Code() string { @@ -15756,6 +16659,8 @@ type ListEventSourceMappingsInput struct { // * Amazon Managed Streaming for Apache Kafka – The ARN of the cluster. // // * Amazon MQ – The ARN of the broker. + // + // * Amazon DocumentDB – The ARN of the DocumentDB change stream. EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` // The name of the Lambda function. @@ -17836,9 +18741,9 @@ type PutFunctionEventInvokeConfigInput struct { // // * Function - The Amazon Resource Name (ARN) of a Lambda function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of a standard SQS queue. // - // * Topic - The ARN of an SNS topic. + // * Topic - The ARN of a standard SNS topic. // // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig `type:"structure"` @@ -17949,9 +18854,9 @@ type PutFunctionEventInvokeConfigOutput struct { // // * Function - The Amazon Resource Name (ARN) of a Lambda function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of a standard SQS queue. // - // * Topic - The ARN of an SNS topic. + // * Topic - The ARN of a standard SNS topic. // // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig `type:"structure"` @@ -20305,14 +21210,16 @@ type UpdateEventSourceMappingInput struct { // * Self-managed Apache Kafka – Default 100. Max 10,000. // // * Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000. + // + // * DocumentDB – Default 100. Max 10,000. BatchSize *int64 `min:"1" type:"integer"` - // (Streams only) If the function returns an error, split the batch in two and - // retry. + // (Kinesis and DynamoDB Streams only) If the function returns an error, split + // the batch in two and retry. BisectBatchOnFunctionError *bool `type:"boolean"` - // (Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded - // records. + // (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard + // Amazon SNS topic destination for discarded records. DestinationConfig *DestinationConfig `type:"structure"` // Specific configuration settings for a DocumentDB event source. @@ -20345,8 +21252,8 @@ type UpdateEventSourceMappingInput struct { // function name, it's limited to 64 characters in length. FunctionName *string `min:"1" type:"string"` - // (Streams and Amazon SQS) A list of current response type enums applied to - // the event source mapping. + // (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type + // enums applied to the event source mapping. FunctionResponseTypes []*string `type:"list" enum:"FunctionResponseType"` // The maximum amount of time, in seconds, that Lambda spends gathering records @@ -20354,28 +21261,29 @@ type UpdateEventSourceMappingInput struct { // to any value from 0 seconds to 300 seconds in increments of seconds. // // For streams and Amazon SQS event sources, the default batching window is - // 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event - // sources, the default batching window is 500 ms. Note that because you can - // only change MaximumBatchingWindowInSeconds in increments of seconds, you - // cannot revert back to the 500 ms default batching window after you have changed - // it. To restore the default batching window, you must create a new event source - // mapping. + // 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB + // event sources, the default batching window is 500 ms. Note that because you + // can only change MaximumBatchingWindowInSeconds in increments of seconds, + // you cannot revert back to the 500 ms default batching window after you have + // changed it. To restore the default batching window, you must create a new + // event source mapping. // // Related setting: For streams and Amazon SQS event sources, when you set BatchSize // to a value greater than 10, you must set MaximumBatchingWindowInSeconds to // at least 1. MaximumBatchingWindowInSeconds *int64 `type:"integer"` - // (Streams only) Discard records older than the specified age. The default - // value is infinite (-1). + // (Kinesis and DynamoDB Streams only) Discard records older than the specified + // age. The default value is infinite (-1). MaximumRecordAgeInSeconds *int64 `type:"integer"` - // (Streams only) Discard records after the specified number of retries. The - // default value is infinite (-1). When set to infinite (-1), failed records - // are retried until the record expires. + // (Kinesis and DynamoDB Streams only) Discard records after the specified number + // of retries. The default value is infinite (-1). When set to infinite (-1), + // failed records are retried until the record expires. MaximumRetryAttempts *int64 `type:"integer"` - // (Streams only) The number of batches to process from each shard concurrently. + // (Kinesis and DynamoDB Streams only) The number of batches to process from + // each shard concurrently. ParallelizationFactor *int64 `min:"1" type:"integer"` // (Amazon SQS only) The scaling configuration for the event source. For more @@ -20387,8 +21295,9 @@ type UpdateEventSourceMappingInput struct { // your event source. SourceAccessConfigurations []*SourceAccessConfiguration `type:"list"` - // (Streams only) The duration in seconds of a processing window. The range - // is between 1 second and 900 seconds. + // (Kinesis and DynamoDB Streams only) The duration in seconds of a processing + // window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds + // indicates no tumbling window. TumblingWindowInSeconds *int64 `type:"integer"` // The identifier of the event source mapping. @@ -21007,9 +21916,9 @@ type UpdateFunctionEventInvokeConfigInput struct { // // * Function - The Amazon Resource Name (ARN) of a Lambda function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of a standard SQS queue. // - // * Topic - The ARN of an SNS topic. + // * Topic - The ARN of a standard SNS topic. // // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig `type:"structure"` @@ -21120,9 +22029,9 @@ type UpdateFunctionEventInvokeConfigOutput struct { // // * Function - The Amazon Resource Name (ARN) of a Lambda function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of a standard SQS queue. // - // * Topic - The ARN of an SNS topic. + // * Topic - The ARN of a standard SNS topic. // // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig `type:"structure"` @@ -21217,6 +22126,18 @@ type UpdateFunctionUrlConfigInput struct { // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `type:"string" enum:"InvokeMode"` + // The alias name. Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` } @@ -21276,6 +22197,12 @@ func (s *UpdateFunctionUrlConfigInput) SetFunctionName(v string) *UpdateFunction return s } +// SetInvokeMode sets the InvokeMode field's value. +func (s *UpdateFunctionUrlConfigInput) SetInvokeMode(v string) *UpdateFunctionUrlConfigInput { + s.InvokeMode = &v + return s +} + // SetQualifier sets the Qualifier field's value. func (s *UpdateFunctionUrlConfigInput) SetQualifier(v string) *UpdateFunctionUrlConfigInput { s.Qualifier = &v @@ -21313,6 +22240,18 @@ type UpdateFunctionUrlConfigOutput struct { // FunctionUrl is a required field FunctionUrl *string `min:"40" type:"string" required:"true"` + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `type:"string" enum:"InvokeMode"` + // When the function URL configuration was last updated, in ISO-8601 format // (https://www.w3.org/TR/NOTE-datetime) (YYYY-MM-DDThh:mm:ss.sTZD). // @@ -21368,6 +22307,12 @@ func (s *UpdateFunctionUrlConfigOutput) SetFunctionUrl(v string) *UpdateFunction return s } +// SetInvokeMode sets the InvokeMode field's value. +func (s *UpdateFunctionUrlConfigOutput) SetInvokeMode(v string) *UpdateFunctionUrlConfigOutput { + s.InvokeMode = &v + return s +} + // SetLastModifiedTime sets the LastModifiedTime field's value. func (s *UpdateFunctionUrlConfigOutput) SetLastModifiedTime(v string) *UpdateFunctionUrlConfigOutput { s.LastModifiedTime = &v @@ -21607,6 +22552,22 @@ func InvocationType_Values() []string { } } +const ( + // InvokeModeBuffered is a InvokeMode enum value + InvokeModeBuffered = "BUFFERED" + + // InvokeModeResponseStream is a InvokeMode enum value + InvokeModeResponseStream = "RESPONSE_STREAM" +) + +// InvokeMode_Values returns all elements of the InvokeMode enum +func InvokeMode_Values() []string { + return []string{ + InvokeModeBuffered, + InvokeModeResponseStream, + } +} + const ( // LastUpdateStatusSuccessful is a LastUpdateStatus enum value LastUpdateStatusSuccessful = "Successful" @@ -21771,6 +22732,22 @@ func ProvisionedConcurrencyStatusEnum_Values() []string { } } +const ( + // ResponseStreamingInvocationTypeRequestResponse is a ResponseStreamingInvocationType enum value + ResponseStreamingInvocationTypeRequestResponse = "RequestResponse" + + // ResponseStreamingInvocationTypeDryRun is a ResponseStreamingInvocationType enum value + ResponseStreamingInvocationTypeDryRun = "DryRun" +) + +// ResponseStreamingInvocationType_Values returns all elements of the ResponseStreamingInvocationType enum +func ResponseStreamingInvocationType_Values() []string { + return []string{ + ResponseStreamingInvocationTypeRequestResponse, + ResponseStreamingInvocationTypeDryRun, + } +} + const ( // RuntimeNodejs is a Runtime enum value RuntimeNodejs = "nodejs" diff --git a/service/lambda/eventstream_test.go b/service/lambda/eventstream_test.go new file mode 100644 index 00000000000..547b221ba48 --- /dev/null +++ b/service/lambda/eventstream_test.go @@ -0,0 +1,279 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +//go:build go1.16 +// +build go1.16 + +package lambda + +import ( + "bytes" + "context" + "io/ioutil" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamtest" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +var _ time.Time +var _ awserr.Error +var _ context.Context +var _ sync.WaitGroup +var _ strings.Reader + +func TestInvokeWithResponseStream_Read(t *testing.T) { + expectEvents, eventMsgs := mockInvokeWithResponseStreamReadEvents() + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.InvokeWithResponseStream(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + defer resp.GetStream().Close() + + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func TestInvokeWithResponseStream_ReadClose(t *testing.T) { + _, eventMsgs := mockInvokeWithResponseStreamReadEvents() + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.InvokeWithResponseStream(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + // Assert calling Err before close does not close the stream. + resp.GetStream().Err() + select { + case _, ok := <-resp.GetStream().Events(): + if !ok { + t.Fatalf("expect stream not to be closed, but was") + } + default: + } + + resp.GetStream().Close() + <-resp.GetStream().Events() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func TestInvokeWithResponseStream_ReadUnknownEvent(t *testing.T) { + expectEvents, eventMsgs := mockInvokeWithResponseStreamReadEvents() + var eventOffset int + + unknownEvent := eventstream.Message{ + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("UnknownEventName"), + }, + }, + Payload: []byte("some unknown event"), + } + + eventMsgs = append(eventMsgs[:eventOffset], + append([]eventstream.Message{unknownEvent}, eventMsgs[eventOffset:]...)...) + + expectEvents = append(expectEvents[:eventOffset], + append([]InvokeWithResponseStreamResponseEventEvent{ + &InvokeWithResponseStreamResponseEventUnknownEvent{ + Type: "UnknownEventName", + Message: unknownEvent, + }, + }, + expectEvents[eventOffset:]...)...) + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.InvokeWithResponseStream(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + defer resp.GetStream().Close() + + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func BenchmarkInvokeWithResponseStream_Read(b *testing.B) { + _, eventMsgs := mockInvokeWithResponseStreamReadEvents() + var buf bytes.Buffer + encoder := eventstream.NewEncoder(&buf) + for _, msg := range eventMsgs { + if err := encoder.Encode(msg); err != nil { + b.Fatalf("failed to encode message, %v", err) + } + } + stream := &loopReader{source: bytes.NewReader(buf.Bytes())} + + sess := unit.Session + svc := New(sess, &aws.Config{ + Endpoint: aws.String("https://example.com"), + DisableParamValidation: aws.Bool(true), + }) + svc.Handlers.Send.Swap(corehandlers.SendHandler.Name, + request.NamedHandler{Name: "mockSend", + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Status: "200 OK", + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(stream), + } + }, + }, + ) + + resp, err := svc.InvokeWithResponseStream(nil) + if err != nil { + b.Fatalf("failed to create request, %v", err) + } + defer resp.GetStream().Close() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if err = resp.GetStream().Err(); err != nil { + b.Fatalf("expect no error, got %v", err) + } + event := <-resp.GetStream().Events() + if event == nil { + b.Fatalf("expect event, got nil, %v, %d", resp.GetStream().Err(), i) + } + } +} + +func mockInvokeWithResponseStreamReadEvents() ( + []InvokeWithResponseStreamResponseEventEvent, + []eventstream.Message, +) { + expectEvents := []InvokeWithResponseStreamResponseEventEvent{ + &InvokeWithResponseStreamCompleteEvent{ + ErrorCode: aws.String("string value goes here"), + ErrorDetails: aws.String("string value goes here"), + LogResult: aws.String("string value goes here"), + }, + &InvokeResponseStreamUpdate{ + Payload: []byte("blob value goes here"), + }, + } + + var marshalers request.HandlerList + marshalers.PushBackNamed(restjson.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + _ = payloadMarshaler + + eventMsgs := []eventstream.Message{ + { + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("InvokeComplete"), + }, + }, + Payload: eventstreamtest.MarshalEventPayload(payloadMarshaler, expectEvents[0]), + }, + { + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("PayloadChunk"), + }, + }, + Payload: expectEvents[1].(*InvokeResponseStreamUpdate).Payload, + }, + } + + return expectEvents, eventMsgs +} + +type loopReader struct { + source *bytes.Reader +} + +func (c *loopReader) Read(p []byte) (int, error) { + if c.source.Len() == 0 { + c.source.Seek(0, 0) + } + + return c.source.Read(p) +} diff --git a/service/lambda/lambdaiface/interface.go b/service/lambda/lambdaiface/interface.go index ee4ba952658..45d426f8a1d 100644 --- a/service/lambda/lambdaiface/interface.go +++ b/service/lambda/lambdaiface/interface.go @@ -200,6 +200,10 @@ type LambdaAPI interface { InvokeAsyncWithContext(aws.Context, *lambda.InvokeAsyncInput, ...request.Option) (*lambda.InvokeAsyncOutput, error) InvokeAsyncRequest(*lambda.InvokeAsyncInput) (*request.Request, *lambda.InvokeAsyncOutput) + InvokeWithResponseStream(*lambda.InvokeWithResponseStreamInput) (*lambda.InvokeWithResponseStreamOutput, error) + InvokeWithResponseStreamWithContext(aws.Context, *lambda.InvokeWithResponseStreamInput, ...request.Option) (*lambda.InvokeWithResponseStreamOutput, error) + InvokeWithResponseStreamRequest(*lambda.InvokeWithResponseStreamInput) (*request.Request, *lambda.InvokeWithResponseStreamOutput) + ListAliases(*lambda.ListAliasesInput) (*lambda.ListAliasesOutput, error) ListAliasesWithContext(aws.Context, *lambda.ListAliasesInput, ...request.Option) (*lambda.ListAliasesOutput, error) ListAliasesRequest(*lambda.ListAliasesInput) (*request.Request, *lambda.ListAliasesOutput) diff --git a/service/lambda/service.go b/service/lambda/service.go index 8937161cf2d..355a67b2c76 100644 --- a/service/lambda/service.go +++ b/service/lambda/service.go @@ -85,6 +85,9 @@ func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), ) + svc.Handlers.BuildStream.PushBackNamed(restjson.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restjson.UnmarshalHandler) + // Run custom client initialization if present if initClient != nil { initClient(svc.Client) diff --git a/service/quicksight/api.go b/service/quicksight/api.go index 50055a1362e..ed706b6b21c 100644 --- a/service/quicksight/api.go +++ b/service/quicksight/api.go @@ -1569,6 +1569,111 @@ func (c *QuickSight) CreateNamespaceWithContext(ctx aws.Context, input *CreateNa return out, req.Send() } +const opCreateRefreshSchedule = "CreateRefreshSchedule" + +// CreateRefreshScheduleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRefreshSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRefreshSchedule for more information on using the CreateRefreshSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateRefreshScheduleRequest method. +// req, resp := client.CreateRefreshScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateRefreshSchedule +func (c *QuickSight) CreateRefreshScheduleRequest(input *CreateRefreshScheduleInput) (req *request.Request, output *CreateRefreshScheduleOutput) { + op := &request.Operation{ + Name: opCreateRefreshSchedule, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules", + } + + if input == nil { + input = &CreateRefreshScheduleInput{} + } + + output = &CreateRefreshScheduleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRefreshSchedule API operation for Amazon QuickSight. +// +// Creates a refresh schedule for a dataset. You can create up to 5 different +// schedules for a single dataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation CreateRefreshSchedule for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - LimitExceededException +// A limit is exceeded. +// +// - ResourceExistsException +// The resource specified already exists. +// +// - PreconditionNotMetException +// One or more preconditions aren't met. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateRefreshSchedule +func (c *QuickSight) CreateRefreshSchedule(input *CreateRefreshScheduleInput) (*CreateRefreshScheduleOutput, error) { + req, out := c.CreateRefreshScheduleRequest(input) + return out, req.Send() +} + +// CreateRefreshScheduleWithContext is the same as CreateRefreshSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRefreshSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) CreateRefreshScheduleWithContext(ctx aws.Context, input *CreateRefreshScheduleInput, opts ...request.Option) (*CreateRefreshScheduleOutput, error) { + req, out := c.CreateRefreshScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTemplate = "CreateTemplate" // CreateTemplateRequest generates a "aws/request.Request" representing the @@ -2518,6 +2623,107 @@ func (c *QuickSight) DeleteDataSetWithContext(ctx aws.Context, input *DeleteData return out, req.Send() } +const opDeleteDataSetRefreshProperties = "DeleteDataSetRefreshProperties" + +// DeleteDataSetRefreshPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataSetRefreshProperties operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataSetRefreshProperties for more information on using the DeleteDataSetRefreshProperties +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDataSetRefreshPropertiesRequest method. +// req, resp := client.DeleteDataSetRefreshPropertiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSetRefreshProperties +func (c *QuickSight) DeleteDataSetRefreshPropertiesRequest(input *DeleteDataSetRefreshPropertiesInput) (req *request.Request, output *DeleteDataSetRefreshPropertiesOutput) { + op := &request.Operation{ + Name: opDeleteDataSetRefreshProperties, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties", + } + + if input == nil { + input = &DeleteDataSetRefreshPropertiesInput{} + } + + output = &DeleteDataSetRefreshPropertiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteDataSetRefreshProperties API operation for Amazon QuickSight. +// +// Deletes the dataset refresh properties of the dataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation DeleteDataSetRefreshProperties for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - LimitExceededException +// A limit is exceeded. +// +// - ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSetRefreshProperties +func (c *QuickSight) DeleteDataSetRefreshProperties(input *DeleteDataSetRefreshPropertiesInput) (*DeleteDataSetRefreshPropertiesOutput, error) { + req, out := c.DeleteDataSetRefreshPropertiesRequest(input) + return out, req.Send() +} + +// DeleteDataSetRefreshPropertiesWithContext is the same as DeleteDataSetRefreshProperties with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataSetRefreshProperties for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DeleteDataSetRefreshPropertiesWithContext(ctx aws.Context, input *DeleteDataSetRefreshPropertiesInput, opts ...request.Option) (*DeleteDataSetRefreshPropertiesOutput, error) { + req, out := c.DeleteDataSetRefreshPropertiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDataSource = "DeleteDataSource" // DeleteDataSourceRequest generates a "aws/request.Request" representing the @@ -3231,6 +3437,104 @@ func (c *QuickSight) DeleteNamespaceWithContext(ctx aws.Context, input *DeleteNa return out, req.Send() } +const opDeleteRefreshSchedule = "DeleteRefreshSchedule" + +// DeleteRefreshScheduleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRefreshSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRefreshSchedule for more information on using the DeleteRefreshSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteRefreshScheduleRequest method. +// req, resp := client.DeleteRefreshScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteRefreshSchedule +func (c *QuickSight) DeleteRefreshScheduleRequest(input *DeleteRefreshScheduleInput) (req *request.Request, output *DeleteRefreshScheduleOutput) { + op := &request.Operation{ + Name: opDeleteRefreshSchedule, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules/{ScheduleId}", + } + + if input == nil { + input = &DeleteRefreshScheduleInput{} + } + + output = &DeleteRefreshScheduleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteRefreshSchedule API operation for Amazon QuickSight. +// +// Deletes a refresh schedule from a dataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation DeleteRefreshSchedule for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - LimitExceededException +// A limit is exceeded. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteRefreshSchedule +func (c *QuickSight) DeleteRefreshSchedule(input *DeleteRefreshScheduleInput) (*DeleteRefreshScheduleOutput, error) { + req, out := c.DeleteRefreshScheduleRequest(input) + return out, req.Send() +} + +// DeleteRefreshScheduleWithContext is the same as DeleteRefreshSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRefreshSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DeleteRefreshScheduleWithContext(ctx aws.Context, input *DeleteRefreshScheduleInput, opts ...request.Option) (*DeleteRefreshScheduleOutput, error) { + req, out := c.DeleteRefreshScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteTemplate = "DeleteTemplate" // DeleteTemplateRequest generates a "aws/request.Request" representing the @@ -4982,57 +5286,57 @@ func (c *QuickSight) DescribeDataSetPermissionsWithContext(ctx aws.Context, inpu return out, req.Send() } -const opDescribeDataSource = "DescribeDataSource" +const opDescribeDataSetRefreshProperties = "DescribeDataSetRefreshProperties" -// DescribeDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDataSource operation. The "output" return +// DescribeDataSetRefreshPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSetRefreshProperties operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDataSource for more information on using the DescribeDataSource +// See DescribeDataSetRefreshProperties for more information on using the DescribeDataSetRefreshProperties // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the DescribeDataSourceRequest method. -// req, resp := client.DescribeDataSourceRequest(params) +// // Example sending a request using the DescribeDataSetRefreshPropertiesRequest method. +// req, resp := client.DescribeDataSetRefreshPropertiesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource -func (c *QuickSight) DescribeDataSourceRequest(input *DescribeDataSourceInput) (req *request.Request, output *DescribeDataSourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetRefreshProperties +func (c *QuickSight) DescribeDataSetRefreshPropertiesRequest(input *DescribeDataSetRefreshPropertiesInput) (req *request.Request, output *DescribeDataSetRefreshPropertiesOutput) { op := &request.Operation{ - Name: opDescribeDataSource, + Name: opDescribeDataSetRefreshProperties, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties", } if input == nil { - input = &DescribeDataSourceInput{} + input = &DescribeDataSetRefreshPropertiesInput{} } - output = &DescribeDataSourceOutput{} + output = &DescribeDataSetRefreshPropertiesOutput{} req = c.newRequest(op, input, output) return } -// DescribeDataSource API operation for Amazon QuickSight. +// DescribeDataSetRefreshProperties API operation for Amazon QuickSight. // -// Describes a data source. +// Describes the refresh properties of a dataset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDataSource for usage and error information. +// API operation DescribeDataSetRefreshProperties for usage and error information. // // Returned Error Types: // @@ -5046,88 +5350,189 @@ func (c *QuickSight) DescribeDataSourceRequest(input *DescribeDataSourceInput) ( // - InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// - ResourceNotFoundException +// One or more resources can't be found. +// // - ThrottlingException // Access is throttled. // -// - ResourceNotFoundException -// One or more resources can't be found. +// - LimitExceededException +// A limit is exceeded. +// +// - PreconditionNotMetException +// One or more preconditions aren't met. // // - InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource -func (c *QuickSight) DescribeDataSource(input *DescribeDataSourceInput) (*DescribeDataSourceOutput, error) { - req, out := c.DescribeDataSourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetRefreshProperties +func (c *QuickSight) DescribeDataSetRefreshProperties(input *DescribeDataSetRefreshPropertiesInput) (*DescribeDataSetRefreshPropertiesOutput, error) { + req, out := c.DescribeDataSetRefreshPropertiesRequest(input) return out, req.Send() } -// DescribeDataSourceWithContext is the same as DescribeDataSource with the addition of +// DescribeDataSetRefreshPropertiesWithContext is the same as DescribeDataSetRefreshProperties with the addition of // the ability to pass a context and additional request options. // -// See DescribeDataSource for details on how to use this API operation. +// See DescribeDataSetRefreshProperties for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDataSourceWithContext(ctx aws.Context, input *DescribeDataSourceInput, opts ...request.Option) (*DescribeDataSourceOutput, error) { - req, out := c.DescribeDataSourceRequest(input) +func (c *QuickSight) DescribeDataSetRefreshPropertiesWithContext(ctx aws.Context, input *DescribeDataSetRefreshPropertiesInput, opts ...request.Option) (*DescribeDataSetRefreshPropertiesOutput, error) { + req, out := c.DescribeDataSetRefreshPropertiesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDataSourcePermissions = "DescribeDataSourcePermissions" +const opDescribeDataSource = "DescribeDataSource" -// DescribeDataSourcePermissionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDataSourcePermissions operation. The "output" return +// DescribeDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDataSourcePermissions for more information on using the DescribeDataSourcePermissions +// See DescribeDataSource for more information on using the DescribeDataSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the DescribeDataSourcePermissionsRequest method. -// req, resp := client.DescribeDataSourcePermissionsRequest(params) +// // Example sending a request using the DescribeDataSourceRequest method. +// req, resp := client.DescribeDataSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions -func (c *QuickSight) DescribeDataSourcePermissionsRequest(input *DescribeDataSourcePermissionsInput) (req *request.Request, output *DescribeDataSourcePermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource +func (c *QuickSight) DescribeDataSourceRequest(input *DescribeDataSourceInput) (req *request.Request, output *DescribeDataSourceOutput) { op := &request.Operation{ - Name: opDescribeDataSourcePermissions, + Name: opDescribeDataSource, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", } if input == nil { - input = &DescribeDataSourcePermissionsInput{} + input = &DescribeDataSourceInput{} } - output = &DescribeDataSourcePermissionsOutput{} + output = &DescribeDataSourceOutput{} req = c.newRequest(op, input, output) return } -// DescribeDataSourcePermissions API operation for Amazon QuickSight. +// DescribeDataSource API operation for Amazon QuickSight. // -// Describes the resource permissions for a data source. +// Describes a data source. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDataSourcePermissions for usage and error information. +// API operation DescribeDataSource for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ThrottlingException +// Access is throttled. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource +func (c *QuickSight) DescribeDataSource(input *DescribeDataSourceInput) (*DescribeDataSourceOutput, error) { + req, out := c.DescribeDataSourceRequest(input) + return out, req.Send() +} + +// DescribeDataSourceWithContext is the same as DescribeDataSource with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataSource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DescribeDataSourceWithContext(ctx aws.Context, input *DescribeDataSourceInput, opts ...request.Option) (*DescribeDataSourceOutput, error) { + req, out := c.DescribeDataSourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDataSourcePermissions = "DescribeDataSourcePermissions" + +// DescribeDataSourcePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSourcePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataSourcePermissions for more information on using the DescribeDataSourcePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeDataSourcePermissionsRequest method. +// req, resp := client.DescribeDataSourcePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions +func (c *QuickSight) DescribeDataSourcePermissionsRequest(input *DescribeDataSourcePermissionsInput) (req *request.Request, output *DescribeDataSourcePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeDataSourcePermissions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + } + + if input == nil { + input = &DescribeDataSourcePermissionsInput{} + } + + output = &DescribeDataSourcePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataSourcePermissions API operation for Amazon QuickSight. +// +// Describes the resource permissions for a data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation DescribeDataSourcePermissions for usage and error information. // // Returned Error Types: // @@ -6071,63 +6476,60 @@ func (c *QuickSight) DescribeNamespaceWithContext(ctx aws.Context, input *Descri return out, req.Send() } -const opDescribeTemplate = "DescribeTemplate" +const opDescribeRefreshSchedule = "DescribeRefreshSchedule" -// DescribeTemplateRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTemplate operation. The "output" return +// DescribeRefreshScheduleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRefreshSchedule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTemplate for more information on using the DescribeTemplate +// See DescribeRefreshSchedule for more information on using the DescribeRefreshSchedule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the DescribeTemplateRequest method. -// req, resp := client.DescribeTemplateRequest(params) +// // Example sending a request using the DescribeRefreshScheduleRequest method. +// req, resp := client.DescribeRefreshScheduleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate -func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req *request.Request, output *DescribeTemplateOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeRefreshSchedule +func (c *QuickSight) DescribeRefreshScheduleRequest(input *DescribeRefreshScheduleInput) (req *request.Request, output *DescribeRefreshScheduleOutput) { op := &request.Operation{ - Name: opDescribeTemplate, + Name: opDescribeRefreshSchedule, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules/{ScheduleId}", } if input == nil { - input = &DescribeTemplateInput{} + input = &DescribeRefreshScheduleInput{} } - output = &DescribeTemplateOutput{} + output = &DescribeRefreshScheduleOutput{} req = c.newRequest(op, input, output) return } -// DescribeTemplate API operation for Amazon QuickSight. +// DescribeRefreshSchedule API operation for Amazon QuickSight. // -// Describes a template's metadata. +// Provides a summary of a refresh schedule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeTemplate for usage and error information. +// API operation DescribeRefreshSchedule for usage and error information. // // Returned Error Types: // -// - InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// // - AccessDeniedException // You don't have access to this item. The provided credentials couldn't be // validated. You might not be authorized to carry out the request. Make sure @@ -6135,8 +6537,8 @@ func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req // your policies have the correct permissions, and that you are using the correct // credentials. // -// - ResourceExistsException -// The resource specified already exists. +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. // // - ResourceNotFoundException // One or more resources can't be found. @@ -6144,187 +6546,288 @@ func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req // - ThrottlingException // Access is throttled. // -// - ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// -// - UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. -// -// - InternalFailureException -// An internal failure occurred. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate -func (c *QuickSight) DescribeTemplate(input *DescribeTemplateInput) (*DescribeTemplateOutput, error) { - req, out := c.DescribeTemplateRequest(input) - return out, req.Send() -} - -// DescribeTemplateWithContext is the same as DescribeTemplate with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeTemplate for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) DescribeTemplateWithContext(ctx aws.Context, input *DescribeTemplateInput, opts ...request.Option) (*DescribeTemplateOutput, error) { - req, out := c.DescribeTemplateRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeTemplateAlias = "DescribeTemplateAlias" - -// DescribeTemplateAliasRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTemplateAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeTemplateAlias for more information on using the DescribeTemplateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeTemplateAliasRequest method. -// req, resp := client.DescribeTemplateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias -func (c *QuickSight) DescribeTemplateAliasRequest(input *DescribeTemplateAliasInput) (req *request.Request, output *DescribeTemplateAliasOutput) { - op := &request.Operation{ - Name: opDescribeTemplateAlias, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", - } - - if input == nil { - input = &DescribeTemplateAliasInput{} - } - - output = &DescribeTemplateAliasOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeTemplateAlias API operation for Amazon QuickSight. -// -// Describes the template alias for a template. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeTemplateAlias for usage and error information. -// -// Returned Error Types: -// -// - ThrottlingException -// Access is throttled. -// -// - ResourceNotFoundException -// One or more resources can't be found. -// -// - UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// - LimitExceededException +// A limit is exceeded. // // - InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias -func (c *QuickSight) DescribeTemplateAlias(input *DescribeTemplateAliasInput) (*DescribeTemplateAliasOutput, error) { - req, out := c.DescribeTemplateAliasRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeRefreshSchedule +func (c *QuickSight) DescribeRefreshSchedule(input *DescribeRefreshScheduleInput) (*DescribeRefreshScheduleOutput, error) { + req, out := c.DescribeRefreshScheduleRequest(input) return out, req.Send() } -// DescribeTemplateAliasWithContext is the same as DescribeTemplateAlias with the addition of +// DescribeRefreshScheduleWithContext is the same as DescribeRefreshSchedule with the addition of // the ability to pass a context and additional request options. // -// See DescribeTemplateAlias for details on how to use this API operation. +// See DescribeRefreshSchedule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeTemplateAliasWithContext(ctx aws.Context, input *DescribeTemplateAliasInput, opts ...request.Option) (*DescribeTemplateAliasOutput, error) { - req, out := c.DescribeTemplateAliasRequest(input) +func (c *QuickSight) DescribeRefreshScheduleWithContext(ctx aws.Context, input *DescribeRefreshScheduleInput, opts ...request.Option) (*DescribeRefreshScheduleOutput, error) { + req, out := c.DescribeRefreshScheduleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeTemplateDefinition = "DescribeTemplateDefinition" +const opDescribeTemplate = "DescribeTemplate" -// DescribeTemplateDefinitionRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTemplateDefinition operation. The "output" return +// DescribeTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTemplateDefinition for more information on using the DescribeTemplateDefinition +// See DescribeTemplate for more information on using the DescribeTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the DescribeTemplateDefinitionRequest method. -// req, resp := client.DescribeTemplateDefinitionRequest(params) +// // Example sending a request using the DescribeTemplateRequest method. +// req, resp := client.DescribeTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateDefinition -func (c *QuickSight) DescribeTemplateDefinitionRequest(input *DescribeTemplateDefinitionInput) (req *request.Request, output *DescribeTemplateDefinitionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate +func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req *request.Request, output *DescribeTemplateOutput) { op := &request.Operation{ - Name: opDescribeTemplateDefinition, + Name: opDescribeTemplate, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/definition", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", } if input == nil { - input = &DescribeTemplateDefinitionInput{} + input = &DescribeTemplateInput{} } - output = &DescribeTemplateDefinitionOutput{} + output = &DescribeTemplateOutput{} req = c.newRequest(op, input, output) return } -// DescribeTemplateDefinition API operation for Amazon QuickSight. -// -// Provides a detailed description of the definition of a template. +// DescribeTemplate API operation for Amazon QuickSight. // -// If you do not need to know details about the content of a template, for instance -// if you are trying to check the status of a recently created or updated template, -// use the DescribeTemplate (https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeTemplate.html) -// instead. +// Describes a template's metadata. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeTemplateDefinition for usage and error information. +// API operation DescribeTemplate for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - ResourceExistsException +// The resource specified already exists. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// - UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate +func (c *QuickSight) DescribeTemplate(input *DescribeTemplateInput) (*DescribeTemplateOutput, error) { + req, out := c.DescribeTemplateRequest(input) + return out, req.Send() +} + +// DescribeTemplateWithContext is the same as DescribeTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DescribeTemplateWithContext(ctx aws.Context, input *DescribeTemplateInput, opts ...request.Option) (*DescribeTemplateOutput, error) { + req, out := c.DescribeTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTemplateAlias = "DescribeTemplateAlias" + +// DescribeTemplateAliasRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTemplateAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTemplateAlias for more information on using the DescribeTemplateAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTemplateAliasRequest method. +// req, resp := client.DescribeTemplateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias +func (c *QuickSight) DescribeTemplateAliasRequest(input *DescribeTemplateAliasInput) (req *request.Request, output *DescribeTemplateAliasOutput) { + op := &request.Operation{ + Name: opDescribeTemplateAlias, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + } + + if input == nil { + input = &DescribeTemplateAliasInput{} + } + + output = &DescribeTemplateAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTemplateAlias API operation for Amazon QuickSight. +// +// Describes the template alias for a template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation DescribeTemplateAlias for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// Access is throttled. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias +func (c *QuickSight) DescribeTemplateAlias(input *DescribeTemplateAliasInput) (*DescribeTemplateAliasOutput, error) { + req, out := c.DescribeTemplateAliasRequest(input) + return out, req.Send() +} + +// DescribeTemplateAliasWithContext is the same as DescribeTemplateAlias with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTemplateAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DescribeTemplateAliasWithContext(ctx aws.Context, input *DescribeTemplateAliasInput, opts ...request.Option) (*DescribeTemplateAliasOutput, error) { + req, out := c.DescribeTemplateAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTemplateDefinition = "DescribeTemplateDefinition" + +// DescribeTemplateDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTemplateDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTemplateDefinition for more information on using the DescribeTemplateDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTemplateDefinitionRequest method. +// req, resp := client.DescribeTemplateDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateDefinition +func (c *QuickSight) DescribeTemplateDefinitionRequest(input *DescribeTemplateDefinitionInput) (req *request.Request, output *DescribeTemplateDefinitionOutput) { + op := &request.Operation{ + Name: opDescribeTemplateDefinition, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/definition", + } + + if input == nil { + input = &DescribeTemplateDefinitionInput{} + } + + output = &DescribeTemplateDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTemplateDefinition API operation for Amazon QuickSight. +// +// Provides a detailed description of the definition of a template. +// +// If you do not need to know details about the content of a template, for instance +// if you are trying to check the status of a recently created or updated template, +// use the DescribeTemplate (https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeTemplate.html) +// instead. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation DescribeTemplateDefinition for usage and error information. // // Returned Error Types: // @@ -9146,6 +9649,104 @@ func (c *QuickSight) ListNamespacesPagesWithContext(ctx aws.Context, input *List return p.Err() } +const opListRefreshSchedules = "ListRefreshSchedules" + +// ListRefreshSchedulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRefreshSchedules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRefreshSchedules for more information on using the ListRefreshSchedules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListRefreshSchedulesRequest method. +// req, resp := client.ListRefreshSchedulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListRefreshSchedules +func (c *QuickSight) ListRefreshSchedulesRequest(input *ListRefreshSchedulesInput) (req *request.Request, output *ListRefreshSchedulesOutput) { + op := &request.Operation{ + Name: opListRefreshSchedules, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules", + } + + if input == nil { + input = &ListRefreshSchedulesInput{} + } + + output = &ListRefreshSchedulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRefreshSchedules API operation for Amazon QuickSight. +// +// Lists the refresh schedules of a dataset. Each dataset can have up to 5 schedules. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation ListRefreshSchedules for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - LimitExceededException +// A limit is exceeded. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListRefreshSchedules +func (c *QuickSight) ListRefreshSchedules(input *ListRefreshSchedulesInput) (*ListRefreshSchedulesOutput, error) { + req, out := c.ListRefreshSchedulesRequest(input) + return out, req.Send() +} + +// ListRefreshSchedulesWithContext is the same as ListRefreshSchedules with the addition of +// the ability to pass a context and additional request options. +// +// See ListRefreshSchedules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListRefreshSchedulesWithContext(ctx aws.Context, input *ListRefreshSchedulesInput, opts ...request.Option) (*ListRefreshSchedulesOutput, error) { + req, out := c.ListRefreshSchedulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -10329,6 +10930,110 @@ func (c *QuickSight) ListUsersWithContext(ctx aws.Context, input *ListUsersInput return out, req.Send() } +const opPutDataSetRefreshProperties = "PutDataSetRefreshProperties" + +// PutDataSetRefreshPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the PutDataSetRefreshProperties operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDataSetRefreshProperties for more information on using the PutDataSetRefreshProperties +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDataSetRefreshPropertiesRequest method. +// req, resp := client.PutDataSetRefreshPropertiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/PutDataSetRefreshProperties +func (c *QuickSight) PutDataSetRefreshPropertiesRequest(input *PutDataSetRefreshPropertiesInput) (req *request.Request, output *PutDataSetRefreshPropertiesOutput) { + op := &request.Operation{ + Name: opPutDataSetRefreshProperties, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties", + } + + if input == nil { + input = &PutDataSetRefreshPropertiesInput{} + } + + output = &PutDataSetRefreshPropertiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDataSetRefreshProperties API operation for Amazon QuickSight. +// +// Creates or updates the dataset refresh properties for the dataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation PutDataSetRefreshProperties for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - LimitExceededException +// A limit is exceeded. +// +// - PreconditionNotMetException +// One or more preconditions aren't met. +// +// - ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/PutDataSetRefreshProperties +func (c *QuickSight) PutDataSetRefreshProperties(input *PutDataSetRefreshPropertiesInput) (*PutDataSetRefreshPropertiesOutput, error) { + req, out := c.PutDataSetRefreshPropertiesRequest(input) + return out, req.Send() +} + +// PutDataSetRefreshPropertiesWithContext is the same as PutDataSetRefreshProperties with the addition of +// the ability to pass a context and additional request options. +// +// See PutDataSetRefreshProperties for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) PutDataSetRefreshPropertiesWithContext(ctx aws.Context, input *PutDataSetRefreshPropertiesInput, opts ...request.Option) (*PutDataSetRefreshPropertiesOutput, error) { + req, out := c.PutDataSetRefreshPropertiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterUser = "RegisterUser" // RegisterUserRequest generates a "aws/request.Request" representing the @@ -13341,6 +14046,107 @@ func (c *QuickSight) UpdatePublicSharingSettingsWithContext(ctx aws.Context, inp return out, req.Send() } +const opUpdateRefreshSchedule = "UpdateRefreshSchedule" + +// UpdateRefreshScheduleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRefreshSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRefreshSchedule for more information on using the UpdateRefreshSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateRefreshScheduleRequest method. +// req, resp := client.UpdateRefreshScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateRefreshSchedule +func (c *QuickSight) UpdateRefreshScheduleRequest(input *UpdateRefreshScheduleInput) (req *request.Request, output *UpdateRefreshScheduleOutput) { + op := &request.Operation{ + Name: opUpdateRefreshSchedule, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules", + } + + if input == nil { + input = &UpdateRefreshScheduleInput{} + } + + output = &UpdateRefreshScheduleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRefreshSchedule API operation for Amazon QuickSight. +// +// Updates a refresh schedule for a dataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateRefreshSchedule for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// credentials. +// +// - InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// - ResourceNotFoundException +// One or more resources can't be found. +// +// - ThrottlingException +// Access is throttled. +// +// - LimitExceededException +// A limit is exceeded. +// +// - PreconditionNotMetException +// One or more preconditions aren't met. +// +// - InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateRefreshSchedule +func (c *QuickSight) UpdateRefreshSchedule(input *UpdateRefreshScheduleInput) (*UpdateRefreshScheduleOutput, error) { + req, out := c.UpdateRefreshScheduleRequest(input) + return out, req.Send() +} + +// UpdateRefreshScheduleWithContext is the same as UpdateRefreshSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRefreshSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateRefreshScheduleWithContext(ctx aws.Context, input *UpdateRefreshScheduleInput, opts ...request.Option) (*UpdateRefreshScheduleOutput, error) { + req, out := c.UpdateRefreshScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateTemplate = "UpdateTemplate" // UpdateTemplateRequest generates a "aws/request.Request" representing the @@ -24630,6 +25436,149 @@ func (s *CreateNamespaceOutput) SetStatus(v int64) *CreateNamespaceOutput { return s } +type CreateRefreshScheduleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The refresh schedule. + // + // Schedule is a required field + Schedule *RefreshSchedule `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateRefreshScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateRefreshScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRefreshScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRefreshScheduleInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateRefreshScheduleInput) SetAwsAccountId(v string) *CreateRefreshScheduleInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *CreateRefreshScheduleInput) SetDataSetId(v string) *CreateRefreshScheduleInput { + s.DataSetId = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *CreateRefreshScheduleInput) SetSchedule(v *RefreshSchedule) *CreateRefreshScheduleInput { + s.Schedule = v + return s +} + +type CreateRefreshScheduleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the refresh schedule. + Arn *string `type:"string"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The ID of the refresh schedule. + ScheduleId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateRefreshScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateRefreshScheduleOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateRefreshScheduleOutput) SetArn(v string) *CreateRefreshScheduleOutput { + s.Arn = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateRefreshScheduleOutput) SetRequestId(v string) *CreateRefreshScheduleOutput { + s.RequestId = &v + return s +} + +// SetScheduleId sets the ScheduleId field's value. +func (s *CreateRefreshScheduleOutput) SetScheduleId(v string) *CreateRefreshScheduleOutput { + s.ScheduleId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateRefreshScheduleOutput) SetStatus(v int64) *CreateRefreshScheduleOutput { + s.Status = &v + return s +} + type CreateTemplateAliasInput struct { _ struct{} `type:"structure"` @@ -28866,6 +29815,58 @@ func (s *DataSetReference) SetDataSetPlaceholder(v string) *DataSetReference { return s } +// The refresh properties of a dataset. +type DataSetRefreshProperties struct { + _ struct{} `type:"structure"` + + // The refresh configuration for a dataset. + // + // RefreshConfiguration is a required field + RefreshConfiguration *RefreshConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataSetRefreshProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataSetRefreshProperties) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSetRefreshProperties) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSetRefreshProperties"} + if s.RefreshConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RefreshConfiguration")) + } + if s.RefreshConfiguration != nil { + if err := s.RefreshConfiguration.Validate(); err != nil { + invalidParams.AddNested("RefreshConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRefreshConfiguration sets the RefreshConfiguration field's value. +func (s *DataSetRefreshProperties) SetRefreshConfiguration(v *RefreshConfiguration) *DataSetRefreshProperties { + s.RefreshConfiguration = v + return s +} + // Dataset schema. type DataSetSchema struct { _ struct{} `type:"structure"` @@ -32012,6 +33013,112 @@ func (s *DeleteDataSetOutput) SetStatus(v int64) *DeleteDataSetOutput { return s } +type DeleteDataSetRefreshPropertiesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataSetRefreshPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataSetRefreshPropertiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataSetRefreshPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataSetRefreshPropertiesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteDataSetRefreshPropertiesInput) SetAwsAccountId(v string) *DeleteDataSetRefreshPropertiesInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *DeleteDataSetRefreshPropertiesInput) SetDataSetId(v string) *DeleteDataSetRefreshPropertiesInput { + s.DataSetId = &v + return s +} + +type DeleteDataSetRefreshPropertiesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataSetRefreshPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataSetRefreshPropertiesOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteDataSetRefreshPropertiesOutput) SetRequestId(v string) *DeleteDataSetRefreshPropertiesOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteDataSetRefreshPropertiesOutput) SetStatus(v int64) *DeleteDataSetRefreshPropertiesOutput { + s.Status = &v + return s +} + type DeleteDataSourceInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -32910,6 +34017,147 @@ func (s *DeleteNamespaceOutput) SetStatus(v int64) *DeleteNamespaceOutput { return s } +type DeleteRefreshScheduleInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The ID of the refresh schedule. + // + // ScheduleId is a required field + ScheduleId *string `location:"uri" locationName:"ScheduleId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRefreshScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRefreshScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRefreshScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRefreshScheduleInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.ScheduleId == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduleId")) + } + if s.ScheduleId != nil && len(*s.ScheduleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteRefreshScheduleInput) SetAwsAccountId(v string) *DeleteRefreshScheduleInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *DeleteRefreshScheduleInput) SetDataSetId(v string) *DeleteRefreshScheduleInput { + s.DataSetId = &v + return s +} + +// SetScheduleId sets the ScheduleId field's value. +func (s *DeleteRefreshScheduleInput) SetScheduleId(v string) *DeleteRefreshScheduleInput { + s.ScheduleId = &v + return s +} + +type DeleteRefreshScheduleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the refresh schedule. + Arn *string `type:"string"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The ID of the refresh schedule. + ScheduleId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRefreshScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRefreshScheduleOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DeleteRefreshScheduleOutput) SetArn(v string) *DeleteRefreshScheduleOutput { + s.Arn = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteRefreshScheduleOutput) SetRequestId(v string) *DeleteRefreshScheduleOutput { + s.RequestId = &v + return s +} + +// SetScheduleId sets the ScheduleId field's value. +func (s *DeleteRefreshScheduleOutput) SetScheduleId(v string) *DeleteRefreshScheduleOutput { + s.ScheduleId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteRefreshScheduleOutput) SetStatus(v int64) *DeleteRefreshScheduleOutput { + s.Status = &v + return s +} + type DeleteTemplateAliasInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -35308,6 +36556,121 @@ func (s *DescribeDataSetPermissionsOutput) SetStatus(v int64) *DescribeDataSetPe return s } +type DescribeDataSetRefreshPropertiesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataSetRefreshPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataSetRefreshPropertiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSetRefreshPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataSetRefreshPropertiesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeDataSetRefreshPropertiesInput) SetAwsAccountId(v string) *DescribeDataSetRefreshPropertiesInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *DescribeDataSetRefreshPropertiesInput) SetDataSetId(v string) *DescribeDataSetRefreshPropertiesInput { + s.DataSetId = &v + return s +} + +type DescribeDataSetRefreshPropertiesOutput struct { + _ struct{} `type:"structure"` + + // The dataset refresh properties. + DataSetRefreshProperties *DataSetRefreshProperties `type:"structure"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataSetRefreshPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDataSetRefreshPropertiesOutput) GoString() string { + return s.String() +} + +// SetDataSetRefreshProperties sets the DataSetRefreshProperties field's value. +func (s *DescribeDataSetRefreshPropertiesOutput) SetDataSetRefreshProperties(v *DataSetRefreshProperties) *DescribeDataSetRefreshPropertiesOutput { + s.DataSetRefreshProperties = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DescribeDataSetRefreshPropertiesOutput) SetRequestId(v string) *DescribeDataSetRefreshPropertiesOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDataSetRefreshPropertiesOutput) SetStatus(v int64) *DescribeDataSetRefreshPropertiesOutput { + s.Status = &v + return s +} + type DescribeDataSourceInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -36728,6 +38091,147 @@ func (s *DescribeNamespaceOutput) SetStatus(v int64) *DescribeNamespaceOutput { return s } +type DescribeRefreshScheduleInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The ID of the refresh schedule. + // + // ScheduleId is a required field + ScheduleId *string `location:"uri" locationName:"ScheduleId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeRefreshScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeRefreshScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRefreshScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRefreshScheduleInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.ScheduleId == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduleId")) + } + if s.ScheduleId != nil && len(*s.ScheduleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeRefreshScheduleInput) SetAwsAccountId(v string) *DescribeRefreshScheduleInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *DescribeRefreshScheduleInput) SetDataSetId(v string) *DescribeRefreshScheduleInput { + s.DataSetId = &v + return s +} + +// SetScheduleId sets the ScheduleId field's value. +func (s *DescribeRefreshScheduleInput) SetScheduleId(v string) *DescribeRefreshScheduleInput { + s.ScheduleId = &v + return s +} + +type DescribeRefreshScheduleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the refresh schedule. + Arn *string `type:"string"` + + // The refresh schedule. + RefreshSchedule *RefreshSchedule `type:"structure"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeRefreshScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeRefreshScheduleOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeRefreshScheduleOutput) SetArn(v string) *DescribeRefreshScheduleOutput { + s.Arn = &v + return s +} + +// SetRefreshSchedule sets the RefreshSchedule field's value. +func (s *DescribeRefreshScheduleOutput) SetRefreshSchedule(v *RefreshSchedule) *DescribeRefreshScheduleOutput { + s.RefreshSchedule = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DescribeRefreshScheduleOutput) SetRequestId(v string) *DescribeRefreshScheduleOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeRefreshScheduleOutput) SetStatus(v int64) *DescribeRefreshScheduleOutput { + s.Status = &v + return s +} + type DescribeTemplateAliasInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -47426,6 +48930,58 @@ func (s *IdentityTypeNotSupportedException) RequestID() string { return s.RespMetadata.RequestID } +// The incremental refresh configuration for a dataset. +type IncrementalRefresh struct { + _ struct{} `type:"structure"` + + // The lookback window setup for an incremental refresh configuration. + // + // LookbackWindow is a required field + LookbackWindow *LookbackWindow `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalRefresh) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalRefresh) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IncrementalRefresh) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IncrementalRefresh"} + if s.LookbackWindow == nil { + invalidParams.Add(request.NewErrParamRequired("LookbackWindow")) + } + if s.LookbackWindow != nil { + if err := s.LookbackWindow.Validate(); err != nil { + invalidParams.AddNested("LookbackWindow", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLookbackWindow sets the LookbackWindow field's value. +func (s *IncrementalRefresh) SetLookbackWindow(v *LookbackWindow) *IncrementalRefresh { + s.LookbackWindow = v + return s +} + // Information about the SPICE ingestion for a dataset. type Ingestion struct { _ struct{} `type:"structure"` @@ -52575,6 +54131,121 @@ func (s *ListNamespacesOutput) SetStatus(v int64) *ListNamespacesOutput { return s } +type ListRefreshSchedulesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRefreshSchedulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRefreshSchedulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRefreshSchedulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRefreshSchedulesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListRefreshSchedulesInput) SetAwsAccountId(v string) *ListRefreshSchedulesInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *ListRefreshSchedulesInput) SetDataSetId(v string) *ListRefreshSchedulesInput { + s.DataSetId = &v + return s +} + +type ListRefreshSchedulesOutput struct { + _ struct{} `type:"structure"` + + // The list of refresh schedules for the dataset. + RefreshSchedules []*RefreshSchedule `type:"list"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRefreshSchedulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRefreshSchedulesOutput) GoString() string { + return s.String() +} + +// SetRefreshSchedules sets the RefreshSchedules field's value. +func (s *ListRefreshSchedulesOutput) SetRefreshSchedules(v []*RefreshSchedule) *ListRefreshSchedulesOutput { + s.RefreshSchedules = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *ListRefreshSchedulesOutput) SetRequestId(v string) *ListRefreshSchedulesOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListRefreshSchedulesOutput) SetStatus(v int64) *ListRefreshSchedulesOutput { + s.Status = &v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -54149,6 +55820,85 @@ func (s *LongFormatText) SetRichText(v string) *LongFormatText { return s } +// The lookback window setup of an incremental refresh configuration. +type LookbackWindow struct { + _ struct{} `type:"structure"` + + // The name of the lookback window column. + // + // ColumnName is a required field + ColumnName *string `type:"string" required:"true"` + + // The lookback window column size. + // + // Size is a required field + Size *int64 `min:"1" type:"long" required:"true"` + + // The size unit that is used for the lookback window column. Valid values for + // this structure are HOUR, DAY, and WEEK. + // + // SizeUnit is a required field + SizeUnit *string `type:"string" required:"true" enum:"LookbackWindowSizeUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LookbackWindow) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LookbackWindow) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LookbackWindow) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LookbackWindow"} + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + if s.Size != nil && *s.Size < 1 { + invalidParams.Add(request.NewErrParamMinValue("Size", 1)) + } + if s.SizeUnit == nil { + invalidParams.Add(request.NewErrParamRequired("SizeUnit")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumnName sets the ColumnName field's value. +func (s *LookbackWindow) SetColumnName(v string) *LookbackWindow { + s.ColumnName = &v + return s +} + +// SetSize sets the Size field's value. +func (s *LookbackWindow) SetSize(v int64) *LookbackWindow { + s.Size = &v + return s +} + +// SetSizeUnit sets the SizeUnit field's value. +func (s *LookbackWindow) SetSizeUnit(v string) *LookbackWindow { + s.SizeUnit = &v + return s +} + // Amazon S3 manifest file location. type ManifestFileLocation struct { _ struct{} `type:"structure"` @@ -60397,6 +62147,131 @@ func (s *ProjectOperation) SetProjectedColumns(v []*string) *ProjectOperation { return s } +type PutDataSetRefreshPropertiesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The dataset refresh properties. + // + // DataSetRefreshProperties is a required field + DataSetRefreshProperties *DataSetRefreshProperties `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataSetRefreshPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataSetRefreshPropertiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDataSetRefreshPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDataSetRefreshPropertiesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.DataSetRefreshProperties == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetRefreshProperties")) + } + if s.DataSetRefreshProperties != nil { + if err := s.DataSetRefreshProperties.Validate(); err != nil { + invalidParams.AddNested("DataSetRefreshProperties", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *PutDataSetRefreshPropertiesInput) SetAwsAccountId(v string) *PutDataSetRefreshPropertiesInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *PutDataSetRefreshPropertiesInput) SetDataSetId(v string) *PutDataSetRefreshPropertiesInput { + s.DataSetId = &v + return s +} + +// SetDataSetRefreshProperties sets the DataSetRefreshProperties field's value. +func (s *PutDataSetRefreshPropertiesInput) SetDataSetRefreshProperties(v *DataSetRefreshProperties) *PutDataSetRefreshPropertiesInput { + s.DataSetRefreshProperties = v + return s +} + +type PutDataSetRefreshPropertiesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataSetRefreshPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataSetRefreshPropertiesOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *PutDataSetRefreshPropertiesOutput) SetRequestId(v string) *PutDataSetRefreshPropertiesOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *PutDataSetRefreshPropertiesOutput) SetStatus(v int64) *PutDataSetRefreshPropertiesOutput { + s.Status = &v + return s +} + // Information about a queued dataset SPICE ingestion. type QueueInfo struct { _ struct{} `type:"structure"` @@ -61793,6 +63668,265 @@ func (s *ReferenceLineValueLabelConfiguration) SetRelativePosition(v string) *Re return s } +// The refresh configuration of a dataset. +type RefreshConfiguration struct { + _ struct{} `type:"structure"` + + // The incremental refresh for the dataset. + // + // IncrementalRefresh is a required field + IncrementalRefresh *IncrementalRefresh `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RefreshConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RefreshConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RefreshConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RefreshConfiguration"} + if s.IncrementalRefresh == nil { + invalidParams.Add(request.NewErrParamRequired("IncrementalRefresh")) + } + if s.IncrementalRefresh != nil { + if err := s.IncrementalRefresh.Validate(); err != nil { + invalidParams.AddNested("IncrementalRefresh", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIncrementalRefresh sets the IncrementalRefresh field's value. +func (s *RefreshConfiguration) SetIncrementalRefresh(v *IncrementalRefresh) *RefreshConfiguration { + s.IncrementalRefresh = v + return s +} + +// Specifies the interval between each scheduled refresh of a dataset. +type RefreshFrequency struct { + _ struct{} `type:"structure"` + + // The interval between scheduled refreshes. Valid values are as follows: + // + // * MINUTE15: The dataset refreshes every 15 minutes. This value is only + // supported for incremental refreshes. This interval can only be used for + // one schedule per dataset. + // + // * MINUTE30:The dataset refreshes every 30 minutes. This value is only + // supported for incremental refreshes. This interval can only be used for + // one schedule per dataset. + // + // * HOURLY: The dataset refreshes every hour. This interval can only be + // used for one schedule per dataset. + // + // * DAILY: The dataset refreshes every day. + // + // * WEEKLY: The dataset refreshes every week. + // + // * MONTHLY: The dataset refreshes every month. + // + // Interval is a required field + Interval *string `type:"string" required:"true" enum:"RefreshInterval"` + + // The day of the week that you want to schedule the refresh on. This value + // is required for weekly and monthly refresh intervals. + RefreshOnDay *ScheduleRefreshOnEntity `type:"structure"` + + // The time of day that you want the datset to refresh. This value is expressed + // in HH:MM format. This field is not required for schedules that refresh hourly. + TimeOfTheDay *string `type:"string"` + + // The timezone that you want the refresh schedule to use. The timezone ID must + // match a corresponding ID found on java.util.time.getAvailableIDs(). + Timezone *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RefreshFrequency) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RefreshFrequency) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RefreshFrequency) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RefreshFrequency"} + if s.Interval == nil { + invalidParams.Add(request.NewErrParamRequired("Interval")) + } + if s.RefreshOnDay != nil { + if err := s.RefreshOnDay.Validate(); err != nil { + invalidParams.AddNested("RefreshOnDay", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInterval sets the Interval field's value. +func (s *RefreshFrequency) SetInterval(v string) *RefreshFrequency { + s.Interval = &v + return s +} + +// SetRefreshOnDay sets the RefreshOnDay field's value. +func (s *RefreshFrequency) SetRefreshOnDay(v *ScheduleRefreshOnEntity) *RefreshFrequency { + s.RefreshOnDay = v + return s +} + +// SetTimeOfTheDay sets the TimeOfTheDay field's value. +func (s *RefreshFrequency) SetTimeOfTheDay(v string) *RefreshFrequency { + s.TimeOfTheDay = &v + return s +} + +// SetTimezone sets the Timezone field's value. +func (s *RefreshFrequency) SetTimezone(v string) *RefreshFrequency { + s.Timezone = &v + return s +} + +// The refresh schedule of a dataset. +type RefreshSchedule struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the refresh schedule. + Arn *string `type:"string"` + + // The type of refresh that a datset undergoes. Valid values are as follows: + // + // * FULL_REFRESH: A complete refresh of a dataset. + // + // * INCREMENTAL_REFRESH: A partial refresh of some rows of a dataset, based + // on the time window specified. + // + // For more information on full and incremental refreshes, see Refreshing SPICE + // data (https://docs.aws.amazon.com/quicksight/latest/user/refreshing-imported-data.html) + // in the Amazon QuickSight User Guide. + // + // RefreshType is a required field + RefreshType *string `type:"string" required:"true" enum:"IngestionType"` + + // The frequency for the refresh schedule. + // + // ScheduleFrequency is a required field + ScheduleFrequency *RefreshFrequency `type:"structure" required:"true"` + + // An identifier for the refresh schedule. + // + // ScheduleId is a required field + ScheduleId *string `type:"string" required:"true"` + + // Time after which the refresh schedule can be started, expressed in YYYY-MM-DDTHH:MM:SS + // format. + StartAfterDateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RefreshSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RefreshSchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RefreshSchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RefreshSchedule"} + if s.RefreshType == nil { + invalidParams.Add(request.NewErrParamRequired("RefreshType")) + } + if s.ScheduleFrequency == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduleFrequency")) + } + if s.ScheduleId == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduleId")) + } + if s.ScheduleFrequency != nil { + if err := s.ScheduleFrequency.Validate(); err != nil { + invalidParams.AddNested("ScheduleFrequency", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *RefreshSchedule) SetArn(v string) *RefreshSchedule { + s.Arn = &v + return s +} + +// SetRefreshType sets the RefreshType field's value. +func (s *RefreshSchedule) SetRefreshType(v string) *RefreshSchedule { + s.RefreshType = &v + return s +} + +// SetScheduleFrequency sets the ScheduleFrequency field's value. +func (s *RefreshSchedule) SetScheduleFrequency(v *RefreshFrequency) *RefreshSchedule { + s.ScheduleFrequency = v + return s +} + +// SetScheduleId sets the ScheduleId field's value. +func (s *RefreshSchedule) SetScheduleId(v string) *RefreshSchedule { + s.ScheduleId = &v + return s +} + +// SetStartAfterDateTime sets the StartAfterDateTime field's value. +func (s *RefreshSchedule) SetStartAfterDateTime(v time.Time) *RefreshSchedule { + s.StartAfterDateTime = &v + return s +} + type RegisterUserInput struct { _ struct{} `type:"structure"` @@ -63670,6 +65804,12 @@ type RowLevelPermissionTagConfiguration struct { // If disabled, the status is DISABLED. Status *string `type:"string" enum:"Status"` + // A list of tag configuration rules to apply to a dataset. All tag configurations + // have the OR condition. Tags within each tile will be joined (AND). At least + // one rule in this structure must have all tag values assigned to it to apply + // Row-level security (RLS) to the dataset. + TagRuleConfigurations [][]*string `min:"1" type:"list"` + // A set of rules associated with row-level security, such as the tag names // and columns that they are assigned to. // @@ -63698,6 +65838,9 @@ func (s RowLevelPermissionTagConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RowLevelPermissionTagConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RowLevelPermissionTagConfiguration"} + if s.TagRuleConfigurations != nil && len(s.TagRuleConfigurations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagRuleConfigurations", 1)) + } if s.TagRules == nil { invalidParams.Add(request.NewErrParamRequired("TagRules")) } @@ -63727,6 +65870,12 @@ func (s *RowLevelPermissionTagConfiguration) SetStatus(v string) *RowLevelPermis return s } +// SetTagRuleConfigurations sets the TagRuleConfigurations field's value. +func (s *RowLevelPermissionTagConfiguration) SetTagRuleConfigurations(v [][]*string) *RowLevelPermissionTagConfiguration { + s.TagRuleConfigurations = v + return s +} + // SetTagRules sets the TagRules field's value. func (s *RowLevelPermissionTagConfiguration) SetTagRules(v []*RowLevelPermissionTagRule) *RowLevelPermissionTagConfiguration { s.TagRules = v @@ -65006,6 +67155,60 @@ func (s *ScatterPlotVisual) SetVisualId(v string) *ScatterPlotVisual { return s } +// The refresh on entity for weekly or monthly schedules. +type ScheduleRefreshOnEntity struct { + _ struct{} `type:"structure"` + + // The day of the month that you want to schedule refresh on. + DayOfMonth *string `min:"1" type:"string"` + + // The day of the week that you want to schedule a refresh on. + DayOfWeek *string `type:"string" enum:"DayOfWeek"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScheduleRefreshOnEntity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScheduleRefreshOnEntity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleRefreshOnEntity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleRefreshOnEntity"} + if s.DayOfMonth != nil && len(*s.DayOfMonth) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DayOfMonth", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDayOfMonth sets the DayOfMonth field's value. +func (s *ScheduleRefreshOnEntity) SetDayOfMonth(v string) *ScheduleRefreshOnEntity { + s.DayOfMonth = &v + return s +} + +// SetDayOfWeek sets the DayOfWeek field's value. +func (s *ScheduleRefreshOnEntity) SetDayOfWeek(v string) *ScheduleRefreshOnEntity { + s.DayOfWeek = &v + return s +} + // The visual display options for a data zoom scroll bar. type ScrollBarOptions struct { _ struct{} `type:"structure"` @@ -78024,6 +80227,149 @@ func (s *UpdatePublicSharingSettingsOutput) SetStatus(v int64) *UpdatePublicShar return s } +type UpdateRefreshScheduleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The refresh schedule. + // + // Schedule is a required field + Schedule *RefreshSchedule `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateRefreshScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateRefreshScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRefreshScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRefreshScheduleInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateRefreshScheduleInput) SetAwsAccountId(v string) *UpdateRefreshScheduleInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *UpdateRefreshScheduleInput) SetDataSetId(v string) *UpdateRefreshScheduleInput { + s.DataSetId = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *UpdateRefreshScheduleInput) SetSchedule(v *RefreshSchedule) *UpdateRefreshScheduleInput { + s.Schedule = v + return s +} + +type UpdateRefreshScheduleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the refresh schedule. + Arn *string `type:"string"` + + // The Amazon Web Services request ID for this operation. + RequestId *string `type:"string"` + + // The ID of the refresh schedule. + ScheduleId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateRefreshScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateRefreshScheduleOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UpdateRefreshScheduleOutput) SetArn(v string) *UpdateRefreshScheduleOutput { + s.Arn = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *UpdateRefreshScheduleOutput) SetRequestId(v string) *UpdateRefreshScheduleOutput { + s.RequestId = &v + return s +} + +// SetScheduleId sets the ScheduleId field's value. +func (s *UpdateRefreshScheduleOutput) SetScheduleId(v string) *UpdateRefreshScheduleOutput { + s.ScheduleId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateRefreshScheduleOutput) SetStatus(v int64) *UpdateRefreshScheduleOutput { + s.Status = &v + return s +} + type UpdateTemplateAliasInput struct { _ struct{} `type:"structure"` @@ -82734,6 +85080,42 @@ func DateAggregationFunction_Values() []string { } } +const ( + // DayOfWeekSunday is a DayOfWeek enum value + DayOfWeekSunday = "SUNDAY" + + // DayOfWeekMonday is a DayOfWeek enum value + DayOfWeekMonday = "MONDAY" + + // DayOfWeekTuesday is a DayOfWeek enum value + DayOfWeekTuesday = "TUESDAY" + + // DayOfWeekWednesday is a DayOfWeek enum value + DayOfWeekWednesday = "WEDNESDAY" + + // DayOfWeekThursday is a DayOfWeek enum value + DayOfWeekThursday = "THURSDAY" + + // DayOfWeekFriday is a DayOfWeek enum value + DayOfWeekFriday = "FRIDAY" + + // DayOfWeekSaturday is a DayOfWeek enum value + DayOfWeekSaturday = "SATURDAY" +) + +// DayOfWeek_Values returns all elements of the DayOfWeek enum +func DayOfWeek_Values() []string { + return []string{ + DayOfWeekSunday, + DayOfWeekMonday, + DayOfWeekTuesday, + DayOfWeekWednesday, + DayOfWeekThursday, + DayOfWeekFriday, + DayOfWeekSaturday, + } +} + const ( // EditionStandard is a Edition enum value EditionStandard = "STANDARD" @@ -83442,6 +85824,9 @@ const ( // IngestionErrorTypeCursorNotEnabled is a IngestionErrorType enum value IngestionErrorTypeCursorNotEnabled = "CURSOR_NOT_ENABLED" + + // IngestionErrorTypeDuplicateColumnNamesFound is a IngestionErrorType enum value + IngestionErrorTypeDuplicateColumnNamesFound = "DUPLICATE_COLUMN_NAMES_FOUND" ) // IngestionErrorType_Values returns all elements of the IngestionErrorType enum @@ -83491,6 +85876,7 @@ func IngestionErrorType_Values() []string { IngestionErrorTypePermissionNotFound, IngestionErrorTypeElasticsearchCursorNotEnabled, IngestionErrorTypeCursorNotEnabled, + IngestionErrorTypeDuplicateColumnNamesFound, } } @@ -83782,6 +86168,26 @@ func LineInterpolation_Values() []string { } } +const ( + // LookbackWindowSizeUnitHour is a LookbackWindowSizeUnit enum value + LookbackWindowSizeUnitHour = "HOUR" + + // LookbackWindowSizeUnitDay is a LookbackWindowSizeUnit enum value + LookbackWindowSizeUnitDay = "DAY" + + // LookbackWindowSizeUnitWeek is a LookbackWindowSizeUnit enum value + LookbackWindowSizeUnitWeek = "WEEK" +) + +// LookbackWindowSizeUnit_Values returns all elements of the LookbackWindowSizeUnit enum +func LookbackWindowSizeUnit_Values() []string { + return []string{ + LookbackWindowSizeUnitHour, + LookbackWindowSizeUnitDay, + LookbackWindowSizeUnitWeek, + } +} + const ( // MapZoomModeAuto is a MapZoomMode enum value MapZoomModeAuto = "AUTO" @@ -84278,6 +86684,38 @@ func ReferenceLineValueLabelRelativePosition_Values() []string { } } +const ( + // RefreshIntervalMinute15 is a RefreshInterval enum value + RefreshIntervalMinute15 = "MINUTE15" + + // RefreshIntervalMinute30 is a RefreshInterval enum value + RefreshIntervalMinute30 = "MINUTE30" + + // RefreshIntervalHourly is a RefreshInterval enum value + RefreshIntervalHourly = "HOURLY" + + // RefreshIntervalDaily is a RefreshInterval enum value + RefreshIntervalDaily = "DAILY" + + // RefreshIntervalWeekly is a RefreshInterval enum value + RefreshIntervalWeekly = "WEEKLY" + + // RefreshIntervalMonthly is a RefreshInterval enum value + RefreshIntervalMonthly = "MONTHLY" +) + +// RefreshInterval_Values returns all elements of the RefreshInterval enum +func RefreshInterval_Values() []string { + return []string{ + RefreshIntervalMinute15, + RefreshIntervalMinute30, + RefreshIntervalHourly, + RefreshIntervalDaily, + RefreshIntervalWeekly, + RefreshIntervalMonthly, + } +} + const ( // RelativeDateTypePrevious is a RelativeDateType enum value RelativeDateTypePrevious = "PREVIOUS" diff --git a/service/quicksight/quicksightiface/interface.go b/service/quicksight/quicksightiface/interface.go index 8ca27fb9415..414db5f771f 100644 --- a/service/quicksight/quicksightiface/interface.go +++ b/service/quicksight/quicksightiface/interface.go @@ -116,6 +116,10 @@ type QuickSightAPI interface { CreateNamespaceWithContext(aws.Context, *quicksight.CreateNamespaceInput, ...request.Option) (*quicksight.CreateNamespaceOutput, error) CreateNamespaceRequest(*quicksight.CreateNamespaceInput) (*request.Request, *quicksight.CreateNamespaceOutput) + CreateRefreshSchedule(*quicksight.CreateRefreshScheduleInput) (*quicksight.CreateRefreshScheduleOutput, error) + CreateRefreshScheduleWithContext(aws.Context, *quicksight.CreateRefreshScheduleInput, ...request.Option) (*quicksight.CreateRefreshScheduleOutput, error) + CreateRefreshScheduleRequest(*quicksight.CreateRefreshScheduleInput) (*request.Request, *quicksight.CreateRefreshScheduleOutput) + CreateTemplate(*quicksight.CreateTemplateInput) (*quicksight.CreateTemplateOutput, error) CreateTemplateWithContext(aws.Context, *quicksight.CreateTemplateInput, ...request.Option) (*quicksight.CreateTemplateOutput, error) CreateTemplateRequest(*quicksight.CreateTemplateInput) (*request.Request, *quicksight.CreateTemplateOutput) @@ -152,6 +156,10 @@ type QuickSightAPI interface { DeleteDataSetWithContext(aws.Context, *quicksight.DeleteDataSetInput, ...request.Option) (*quicksight.DeleteDataSetOutput, error) DeleteDataSetRequest(*quicksight.DeleteDataSetInput) (*request.Request, *quicksight.DeleteDataSetOutput) + DeleteDataSetRefreshProperties(*quicksight.DeleteDataSetRefreshPropertiesInput) (*quicksight.DeleteDataSetRefreshPropertiesOutput, error) + DeleteDataSetRefreshPropertiesWithContext(aws.Context, *quicksight.DeleteDataSetRefreshPropertiesInput, ...request.Option) (*quicksight.DeleteDataSetRefreshPropertiesOutput, error) + DeleteDataSetRefreshPropertiesRequest(*quicksight.DeleteDataSetRefreshPropertiesInput) (*request.Request, *quicksight.DeleteDataSetRefreshPropertiesOutput) + DeleteDataSource(*quicksight.DeleteDataSourceInput) (*quicksight.DeleteDataSourceOutput, error) DeleteDataSourceWithContext(aws.Context, *quicksight.DeleteDataSourceInput, ...request.Option) (*quicksight.DeleteDataSourceOutput, error) DeleteDataSourceRequest(*quicksight.DeleteDataSourceInput) (*request.Request, *quicksight.DeleteDataSourceOutput) @@ -180,6 +188,10 @@ type QuickSightAPI interface { DeleteNamespaceWithContext(aws.Context, *quicksight.DeleteNamespaceInput, ...request.Option) (*quicksight.DeleteNamespaceOutput, error) DeleteNamespaceRequest(*quicksight.DeleteNamespaceInput) (*request.Request, *quicksight.DeleteNamespaceOutput) + DeleteRefreshSchedule(*quicksight.DeleteRefreshScheduleInput) (*quicksight.DeleteRefreshScheduleOutput, error) + DeleteRefreshScheduleWithContext(aws.Context, *quicksight.DeleteRefreshScheduleInput, ...request.Option) (*quicksight.DeleteRefreshScheduleOutput, error) + DeleteRefreshScheduleRequest(*quicksight.DeleteRefreshScheduleInput) (*request.Request, *quicksight.DeleteRefreshScheduleOutput) + DeleteTemplate(*quicksight.DeleteTemplateInput) (*quicksight.DeleteTemplateOutput, error) DeleteTemplateWithContext(aws.Context, *quicksight.DeleteTemplateInput, ...request.Option) (*quicksight.DeleteTemplateOutput, error) DeleteTemplateRequest(*quicksight.DeleteTemplateInput) (*request.Request, *quicksight.DeleteTemplateOutput) @@ -248,6 +260,10 @@ type QuickSightAPI interface { DescribeDataSetPermissionsWithContext(aws.Context, *quicksight.DescribeDataSetPermissionsInput, ...request.Option) (*quicksight.DescribeDataSetPermissionsOutput, error) DescribeDataSetPermissionsRequest(*quicksight.DescribeDataSetPermissionsInput) (*request.Request, *quicksight.DescribeDataSetPermissionsOutput) + DescribeDataSetRefreshProperties(*quicksight.DescribeDataSetRefreshPropertiesInput) (*quicksight.DescribeDataSetRefreshPropertiesOutput, error) + DescribeDataSetRefreshPropertiesWithContext(aws.Context, *quicksight.DescribeDataSetRefreshPropertiesInput, ...request.Option) (*quicksight.DescribeDataSetRefreshPropertiesOutput, error) + DescribeDataSetRefreshPropertiesRequest(*quicksight.DescribeDataSetRefreshPropertiesInput) (*request.Request, *quicksight.DescribeDataSetRefreshPropertiesOutput) + DescribeDataSource(*quicksight.DescribeDataSourceInput) (*quicksight.DescribeDataSourceOutput, error) DescribeDataSourceWithContext(aws.Context, *quicksight.DescribeDataSourceInput, ...request.Option) (*quicksight.DescribeDataSourceOutput, error) DescribeDataSourceRequest(*quicksight.DescribeDataSourceInput) (*request.Request, *quicksight.DescribeDataSourceOutput) @@ -292,6 +308,10 @@ type QuickSightAPI interface { DescribeNamespaceWithContext(aws.Context, *quicksight.DescribeNamespaceInput, ...request.Option) (*quicksight.DescribeNamespaceOutput, error) DescribeNamespaceRequest(*quicksight.DescribeNamespaceInput) (*request.Request, *quicksight.DescribeNamespaceOutput) + DescribeRefreshSchedule(*quicksight.DescribeRefreshScheduleInput) (*quicksight.DescribeRefreshScheduleOutput, error) + DescribeRefreshScheduleWithContext(aws.Context, *quicksight.DescribeRefreshScheduleInput, ...request.Option) (*quicksight.DescribeRefreshScheduleOutput, error) + DescribeRefreshScheduleRequest(*quicksight.DescribeRefreshScheduleInput) (*request.Request, *quicksight.DescribeRefreshScheduleOutput) + DescribeTemplate(*quicksight.DescribeTemplateInput) (*quicksight.DescribeTemplateOutput, error) DescribeTemplateWithContext(aws.Context, *quicksight.DescribeTemplateInput, ...request.Option) (*quicksight.DescribeTemplateOutput, error) DescribeTemplateRequest(*quicksight.DescribeTemplateInput) (*request.Request, *quicksight.DescribeTemplateOutput) @@ -413,6 +433,10 @@ type QuickSightAPI interface { ListNamespacesPages(*quicksight.ListNamespacesInput, func(*quicksight.ListNamespacesOutput, bool) bool) error ListNamespacesPagesWithContext(aws.Context, *quicksight.ListNamespacesInput, func(*quicksight.ListNamespacesOutput, bool) bool, ...request.Option) error + ListRefreshSchedules(*quicksight.ListRefreshSchedulesInput) (*quicksight.ListRefreshSchedulesOutput, error) + ListRefreshSchedulesWithContext(aws.Context, *quicksight.ListRefreshSchedulesInput, ...request.Option) (*quicksight.ListRefreshSchedulesOutput, error) + ListRefreshSchedulesRequest(*quicksight.ListRefreshSchedulesInput) (*request.Request, *quicksight.ListRefreshSchedulesOutput) + ListTagsForResource(*quicksight.ListTagsForResourceInput) (*quicksight.ListTagsForResourceOutput, error) ListTagsForResourceWithContext(aws.Context, *quicksight.ListTagsForResourceInput, ...request.Option) (*quicksight.ListTagsForResourceOutput, error) ListTagsForResourceRequest(*quicksight.ListTagsForResourceInput) (*request.Request, *quicksight.ListTagsForResourceOutput) @@ -464,6 +488,10 @@ type QuickSightAPI interface { ListUsersWithContext(aws.Context, *quicksight.ListUsersInput, ...request.Option) (*quicksight.ListUsersOutput, error) ListUsersRequest(*quicksight.ListUsersInput) (*request.Request, *quicksight.ListUsersOutput) + PutDataSetRefreshProperties(*quicksight.PutDataSetRefreshPropertiesInput) (*quicksight.PutDataSetRefreshPropertiesOutput, error) + PutDataSetRefreshPropertiesWithContext(aws.Context, *quicksight.PutDataSetRefreshPropertiesInput, ...request.Option) (*quicksight.PutDataSetRefreshPropertiesOutput, error) + PutDataSetRefreshPropertiesRequest(*quicksight.PutDataSetRefreshPropertiesInput) (*request.Request, *quicksight.PutDataSetRefreshPropertiesOutput) + RegisterUser(*quicksight.RegisterUserInput) (*quicksight.RegisterUserOutput, error) RegisterUserWithContext(aws.Context, *quicksight.RegisterUserInput, ...request.Option) (*quicksight.RegisterUserOutput, error) RegisterUserRequest(*quicksight.RegisterUserInput) (*request.Request, *quicksight.RegisterUserOutput) @@ -584,6 +612,10 @@ type QuickSightAPI interface { UpdatePublicSharingSettingsWithContext(aws.Context, *quicksight.UpdatePublicSharingSettingsInput, ...request.Option) (*quicksight.UpdatePublicSharingSettingsOutput, error) UpdatePublicSharingSettingsRequest(*quicksight.UpdatePublicSharingSettingsInput) (*request.Request, *quicksight.UpdatePublicSharingSettingsOutput) + UpdateRefreshSchedule(*quicksight.UpdateRefreshScheduleInput) (*quicksight.UpdateRefreshScheduleOutput, error) + UpdateRefreshScheduleWithContext(aws.Context, *quicksight.UpdateRefreshScheduleInput, ...request.Option) (*quicksight.UpdateRefreshScheduleOutput, error) + UpdateRefreshScheduleRequest(*quicksight.UpdateRefreshScheduleInput) (*request.Request, *quicksight.UpdateRefreshScheduleOutput) + UpdateTemplate(*quicksight.UpdateTemplateInput) (*quicksight.UpdateTemplateOutput, error) UpdateTemplateWithContext(aws.Context, *quicksight.UpdateTemplateInput, ...request.Option) (*quicksight.UpdateTemplateOutput, error) UpdateTemplateRequest(*quicksight.UpdateTemplateInput) (*request.Request, *quicksight.UpdateTemplateOutput) diff --git a/service/redshiftdataapiservice/api.go b/service/redshiftdataapiservice/api.go index 3ab7d842054..88d5275708b 100644 --- a/service/redshiftdataapiservice/api.go +++ b/service/redshiftdataapiservice/api.go @@ -59,18 +59,27 @@ func (c *RedshiftDataAPIService) BatchExecuteStatementRequest(input *BatchExecut // (DML) or data definition language (DDL). Depending on the authorization method, // use one of the following combinations of request parameters: // -// - Secrets Manager - when connecting to a cluster, specify the Amazon Resource -// Name (ARN) of the secret, the database name, and the cluster identifier -// that matches the cluster in the secret. When connecting to a serverless -// workgroup, specify the Amazon Resource Name (ARN) of the secret and the -// database name. -// -// - Temporary credentials - when connecting to a cluster, specify the cluster -// identifier, the database name, and the database user name. Also, permission -// to call the redshift:GetClusterCredentials operation is required. When -// connecting to a serverless workgroup, specify the workgroup name and database -// name. Also, permission to call the redshift-serverless:GetCredentials -// operation is required. +// - Secrets Manager - when connecting to a cluster, provide the secret-arn +// of a secret stored in Secrets Manager which has username and password. +// The specified secret contains credentials to connect to the database you +// specify. When you are connecting to a cluster, you also supply the database +// name, If you provide a cluster identifier (dbClusterIdentifier), it must +// match the cluster identifier stored in the secret. When you are connecting +// to a serverless workgroup, you also supply the database name. +// +// - Temporary credentials - when connecting to your data warehouse, choose +// one of the following options: When connecting to a serverless workgroup, +// specify the workgroup name and database name. The database user name is +// derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials +// operation is required. When connecting to a cluster as an IAM identity, +// specify the cluster identifier and the database name. The database user +// name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift:GetClusterCredentialsWithIAM +// operation is required. When connecting to a cluster as a database user, +// specify the cluster identifier, the database name, and the database user +// name. Also, permission to call the redshift:GetClusterCredentials operation +// is required. // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) @@ -354,18 +363,27 @@ func (c *RedshiftDataAPIService) DescribeTableRequest(input *DescribeTableInput) // the column list. Depending on the authorization method, use one of the following // combinations of request parameters: // -// - Secrets Manager - when connecting to a cluster, specify the Amazon Resource -// Name (ARN) of the secret, the database name, and the cluster identifier -// that matches the cluster in the secret. When connecting to a serverless -// workgroup, specify the Amazon Resource Name (ARN) of the secret and the -// database name. -// -// - Temporary credentials - when connecting to a cluster, specify the cluster -// identifier, the database name, and the database user name. Also, permission -// to call the redshift:GetClusterCredentials operation is required. When -// connecting to a serverless workgroup, specify the workgroup name and database -// name. Also, permission to call the redshift-serverless:GetCredentials -// operation is required. +// - Secrets Manager - when connecting to a cluster, provide the secret-arn +// of a secret stored in Secrets Manager which has username and password. +// The specified secret contains credentials to connect to the database you +// specify. When you are connecting to a cluster, you also supply the database +// name, If you provide a cluster identifier (dbClusterIdentifier), it must +// match the cluster identifier stored in the secret. When you are connecting +// to a serverless workgroup, you also supply the database name. +// +// - Temporary credentials - when connecting to your data warehouse, choose +// one of the following options: When connecting to a serverless workgroup, +// specify the workgroup name and database name. The database user name is +// derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials +// operation is required. When connecting to a cluster as an IAM identity, +// specify the cluster identifier and the database name. The database user +// name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift:GetClusterCredentialsWithIAM +// operation is required. When connecting to a cluster as a database user, +// specify the cluster identifier, the database name, and the database user +// name. Also, permission to call the redshift:GetClusterCredentials operation +// is required. // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) @@ -510,18 +528,27 @@ func (c *RedshiftDataAPIService) ExecuteStatementRequest(input *ExecuteStatement // Depending on the authorization method, use one of the following combinations // of request parameters: // -// - Secrets Manager - when connecting to a cluster, specify the Amazon Resource -// Name (ARN) of the secret, the database name, and the cluster identifier -// that matches the cluster in the secret. When connecting to a serverless -// workgroup, specify the Amazon Resource Name (ARN) of the secret and the -// database name. -// -// - Temporary credentials - when connecting to a cluster, specify the cluster -// identifier, the database name, and the database user name. Also, permission -// to call the redshift:GetClusterCredentials operation is required. When -// connecting to a serverless workgroup, specify the workgroup name and database -// name. Also, permission to call the redshift-serverless:GetCredentials -// operation is required. +// - Secrets Manager - when connecting to a cluster, provide the secret-arn +// of a secret stored in Secrets Manager which has username and password. +// The specified secret contains credentials to connect to the database you +// specify. When you are connecting to a cluster, you also supply the database +// name, If you provide a cluster identifier (dbClusterIdentifier), it must +// match the cluster identifier stored in the secret. When you are connecting +// to a serverless workgroup, you also supply the database name. +// +// - Temporary credentials - when connecting to your data warehouse, choose +// one of the following options: When connecting to a serverless workgroup, +// specify the workgroup name and database name. The database user name is +// derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials +// operation is required. When connecting to a cluster as an IAM identity, +// specify the cluster identifier and the database name. The database user +// name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift:GetClusterCredentialsWithIAM +// operation is required. When connecting to a cluster as a database user, +// specify the cluster identifier, the database name, and the database user +// name. Also, permission to call the redshift:GetClusterCredentials operation +// is required. // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) @@ -767,18 +794,27 @@ func (c *RedshiftDataAPIService) ListDatabasesRequest(input *ListDatabasesInput) // database list. Depending on the authorization method, use one of the following // combinations of request parameters: // -// - Secrets Manager - when connecting to a cluster, specify the Amazon Resource -// Name (ARN) of the secret, the database name, and the cluster identifier -// that matches the cluster in the secret. When connecting to a serverless -// workgroup, specify the Amazon Resource Name (ARN) of the secret and the -// database name. -// -// - Temporary credentials - when connecting to a cluster, specify the cluster -// identifier, the database name, and the database user name. Also, permission -// to call the redshift:GetClusterCredentials operation is required. When -// connecting to a serverless workgroup, specify the workgroup name and database -// name. Also, permission to call the redshift-serverless:GetCredentials -// operation is required. +// - Secrets Manager - when connecting to a cluster, provide the secret-arn +// of a secret stored in Secrets Manager which has username and password. +// The specified secret contains credentials to connect to the database you +// specify. When you are connecting to a cluster, you also supply the database +// name, If you provide a cluster identifier (dbClusterIdentifier), it must +// match the cluster identifier stored in the secret. When you are connecting +// to a serverless workgroup, you also supply the database name. +// +// - Temporary credentials - when connecting to your data warehouse, choose +// one of the following options: When connecting to a serverless workgroup, +// specify the workgroup name and database name. The database user name is +// derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials +// operation is required. When connecting to a cluster as an IAM identity, +// specify the cluster identifier and the database name. The database user +// name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift:GetClusterCredentialsWithIAM +// operation is required. When connecting to a cluster as a database user, +// specify the cluster identifier, the database name, and the database user +// name. Also, permission to call the redshift:GetClusterCredentials operation +// is required. // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) @@ -928,18 +964,27 @@ func (c *RedshiftDataAPIService) ListSchemasRequest(input *ListSchemasInput) (re // schema list. Depending on the authorization method, use one of the following // combinations of request parameters: // -// - Secrets Manager - when connecting to a cluster, specify the Amazon Resource -// Name (ARN) of the secret, the database name, and the cluster identifier -// that matches the cluster in the secret. When connecting to a serverless -// workgroup, specify the Amazon Resource Name (ARN) of the secret and the -// database name. -// -// - Temporary credentials - when connecting to a cluster, specify the cluster -// identifier, the database name, and the database user name. Also, permission -// to call the redshift:GetClusterCredentials operation is required. When -// connecting to a serverless workgroup, specify the workgroup name and database -// name. Also, permission to call the redshift-serverless:GetCredentials -// operation is required. +// - Secrets Manager - when connecting to a cluster, provide the secret-arn +// of a secret stored in Secrets Manager which has username and password. +// The specified secret contains credentials to connect to the database you +// specify. When you are connecting to a cluster, you also supply the database +// name, If you provide a cluster identifier (dbClusterIdentifier), it must +// match the cluster identifier stored in the secret. When you are connecting +// to a serverless workgroup, you also supply the database name. +// +// - Temporary credentials - when connecting to your data warehouse, choose +// one of the following options: When connecting to a serverless workgroup, +// specify the workgroup name and database name. The database user name is +// derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials +// operation is required. When connecting to a cluster as an IAM identity, +// specify the cluster identifier and the database name. The database user +// name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift:GetClusterCredentialsWithIAM +// operation is required. When connecting to a cluster as a database user, +// specify the cluster identifier, the database name, and the database user +// name. Also, permission to call the redshift:GetClusterCredentials operation +// is required. // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) @@ -1234,18 +1279,27 @@ func (c *RedshiftDataAPIService) ListTablesRequest(input *ListTablesInput) (req // to page through the table list. Depending on the authorization method, use // one of the following combinations of request parameters: // -// - Secrets Manager - when connecting to a cluster, specify the Amazon Resource -// Name (ARN) of the secret, the database name, and the cluster identifier -// that matches the cluster in the secret. When connecting to a serverless -// workgroup, specify the Amazon Resource Name (ARN) of the secret and the -// database name. -// -// - Temporary credentials - when connecting to a cluster, specify the cluster -// identifier, the database name, and the database user name. Also, permission -// to call the redshift:GetClusterCredentials operation is required. When -// connecting to a serverless workgroup, specify the workgroup name and database -// name. Also, permission to call the redshift-serverless:GetCredentials -// operation is required. +// - Secrets Manager - when connecting to a cluster, provide the secret-arn +// of a secret stored in Secrets Manager which has username and password. +// The specified secret contains credentials to connect to the database you +// specify. When you are connecting to a cluster, you also supply the database +// name, If you provide a cluster identifier (dbClusterIdentifier), it must +// match the cluster identifier stored in the secret. When you are connecting +// to a serverless workgroup, you also supply the database name. +// +// - Temporary credentials - when connecting to your data warehouse, choose +// one of the following options: When connecting to a serverless workgroup, +// specify the workgroup name and database name. The database user name is +// derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials +// operation is required. When connecting to a cluster as an IAM identity, +// specify the cluster identifier and the database name. The database user +// name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo +// has the database user name IAM:foo. Also, permission to call the redshift:GetClusterCredentialsWithIAM +// operation is required. When connecting to a cluster as a database user, +// specify the cluster identifier, the database name, and the database user +// name. Also, permission to call the redshift:GetClusterCredentials operation +// is required. // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) @@ -1493,7 +1547,7 @@ type BatchExecuteStatementInput struct { Database *string `type:"string" required:"true"` // The database user name. This parameter is required when connecting to a cluster - // and authenticating using temporary credentials. + // as a database user and authenticating using temporary credentials. DbUser *string `type:"string"` // The name or ARN of the secret that enables access to the database. This parameter @@ -2277,7 +2331,7 @@ type DescribeTableInput struct { Database *string `type:"string" required:"true"` // The database user name. This parameter is required when connecting to a cluster - // and authenticating using temporary credentials. + // as a database user and authenticating using temporary credentials. DbUser *string `type:"string"` // The maximum number of tables to return in the response. If more tables exist @@ -2546,7 +2600,7 @@ type ExecuteStatementInput struct { Database *string `type:"string" required:"true"` // The database user name. This parameter is required when connecting to a cluster - // and authenticating using temporary credentials. + // as a database user and authenticating using temporary credentials. DbUser *string `type:"string"` // The parameters for the SQL statement. @@ -3064,7 +3118,7 @@ type ListDatabasesInput struct { Database *string `type:"string" required:"true"` // The database user name. This parameter is required when connecting to a cluster - // and authenticating using temporary credentials. + // as a database user and authenticating using temporary credentials. DbUser *string `type:"string"` // The maximum number of databases to return in the response. If more databases @@ -3228,7 +3282,7 @@ type ListSchemasInput struct { Database *string `type:"string" required:"true"` // The database user name. This parameter is required when connecting to a cluster - // and authenticating using temporary credentials. + // as a database user and authenticating using temporary credentials. DbUser *string `type:"string"` // The maximum number of schemas to return in the response. If more schemas @@ -3551,7 +3605,7 @@ type ListTablesInput struct { Database *string `type:"string" required:"true"` // The database user name. This parameter is required when connecting to a cluster - // and authenticating using temporary credentials. + // as a database user and authenticating using temporary credentials. DbUser *string `type:"string"` // The maximum number of tables to return in the response. If more tables exist diff --git a/service/servicecatalog/api.go b/service/servicecatalog/api.go index 2412142205e..213926422f3 100644 --- a/service/servicecatalog/api.go +++ b/service/servicecatalog/api.go @@ -20962,6 +20962,8 @@ type ProvisioningArtifactProperties struct { // // * MARKETPLACE_CAR - Amazon Web Services Marketplace Clusters and Amazon // Web Services Resources + // + // * TERRAFORM_OPEN_SOURCE - Terraform open source configuration file Type *string `type:"string" enum:"ProvisioningArtifactType"` }