From aa9d06913127ee7e85c2a059db3425db49bd2d28 Mon Sep 17 00:00:00 2001 From: "Domene Esteban, Nil {DISB~SANT CUGAT DIA}" Date: Thu, 19 Oct 2023 12:09:10 +0200 Subject: [PATCH] feat(firehose): Add DevileryStream controller Signed-off-by: Domene Esteban, Nil {DISB~SANT CUGAT DIA} --- apis/aws.go | 2 + apis/firehose/generator-config.yaml | 17 + apis/firehose/v1alpha1/custom_types.go | 33 + apis/firehose/v1alpha1/zz_delivery_stream.go | 143 + apis/firehose/v1alpha1/zz_doc.go | 24 + apis/firehose/v1alpha1/zz_enums.go | 226 ++ .../v1alpha1/zz_generated.deepcopy.go | 3194 +++++++++++++++++ .../firehose/v1alpha1/zz_generated.managed.go | 97 + .../v1alpha1/zz_generated.managedlist.go | 30 + .../v1alpha1/zz_generated.resolvers.go | 53 + .../firehose/v1alpha1/zz_groupversion_info.go | 41 + apis/firehose/v1alpha1/zz_types.go | 1118 ++++++ examples/firehose/deliverystream.yaml | 14 + ...ose.aws.crossplane.io_deliverystreams.yaml | 1688 +++++++++ pkg/controller/aws.go | 2 + .../firehose/deliverystream/setup.go | 129 + .../firehose/deliverystream/zz_controller.go | 215 ++ .../firehose/deliverystream/zz_conversions.go | 1470 ++++++++ pkg/controller/firehose/setup.go | 33 + 19 files changed, 8529 insertions(+) create mode 100644 apis/firehose/generator-config.yaml create mode 100644 apis/firehose/v1alpha1/custom_types.go create mode 100644 apis/firehose/v1alpha1/zz_delivery_stream.go create mode 100644 apis/firehose/v1alpha1/zz_doc.go create mode 100644 apis/firehose/v1alpha1/zz_enums.go create mode 100644 apis/firehose/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/firehose/v1alpha1/zz_generated.managed.go create mode 100644 apis/firehose/v1alpha1/zz_generated.managedlist.go create mode 100644 apis/firehose/v1alpha1/zz_generated.resolvers.go create mode 100644 apis/firehose/v1alpha1/zz_groupversion_info.go create mode 100644 apis/firehose/v1alpha1/zz_types.go create mode 100644 examples/firehose/deliverystream.yaml create mode 100644 package/crds/firehose.aws.crossplane.io_deliverystreams.yaml create mode 100644 pkg/controller/firehose/deliverystream/setup.go create mode 100644 pkg/controller/firehose/deliverystream/zz_controller.go create mode 100644 pkg/controller/firehose/deliverystream/zz_conversions.go create mode 100644 pkg/controller/firehose/setup.go diff --git a/apis/aws.go b/apis/aws.go index 44d4847561..897fafd4a9 100644 --- a/apis/aws.go +++ b/apis/aws.go @@ -58,6 +58,7 @@ import ( elbv2manualv1alpha1 "github.com/crossplane-contrib/provider-aws/apis/elbv2/manualv1alpha1" elbv2v1alpha1 "github.com/crossplane-contrib/provider-aws/apis/elbv2/v1alpha1" emrcontainersv1alpah1 "github.com/crossplane-contrib/provider-aws/apis/emrcontainers/v1alpha1" + firehosev1alpha1 "github.com/crossplane-contrib/provider-aws/apis/firehose/v1alpha1" globalacceleratorv1alpha1 "github.com/crossplane-contrib/provider-aws/apis/globalaccelerator/v1alpha1" gluev1alpha1 "github.com/crossplane-contrib/provider-aws/apis/glue/v1alpha1" iamv1alpha1 "github.com/crossplane-contrib/provider-aws/apis/iam/v1alpha1" @@ -171,6 +172,7 @@ func init() { emrcontainersv1alpah1.SchemeBuilder.AddToScheme, autoscalingv1beta1.SchemeBuilder.AddToScheme, s3control.SchemeBuilder.AddToScheme, + firehosev1alpha1.SchemeBuilder.AddToScheme, ) } diff --git a/apis/firehose/generator-config.yaml b/apis/firehose/generator-config.yaml new file mode 100644 index 0000000000..3e23e1b7de --- /dev/null +++ b/apis/firehose/generator-config.yaml @@ -0,0 +1,17 @@ +resources: + DeliveryStream: + fields: + DeliveryStreamStatus: + is_read_only: true + from: + operation: DescribeDeliveryStream + path: DeliveryStreamDescription.DeliveryStreamStatus + DeliveryStreamARN: + is_read_only: true + from: + operation: DescribeDeliveryStream + path: DeliveryStreamDescription.DeliveryStreamARN + exceptions: + errors: + 404: + code: ResourceNotFoundException diff --git a/apis/firehose/v1alpha1/custom_types.go b/apis/firehose/v1alpha1/custom_types.go new file mode 100644 index 0000000000..0bb2b7e729 --- /dev/null +++ b/apis/firehose/v1alpha1/custom_types.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// CustomDeliveryStreamParameters contains the additional fields for DeliveryStreamParameters. +type CustomDeliveryStreamParameters struct { + // +optional + // +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/kms/v1alpha1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane-contrib/provider-aws/apis/kms/v1alpha1.KMSKeyARN() + KMSKeyARN *string `json:"kmsKeyARN,omitempty"` + + KMSKeyARNRef *xpv1.Reference `json:"kmsKeyARNRef,omitempty"` + + KMSKeyARNSelector *xpv1.Selector `json:"kmsKeyARNSelector,omitempty"` +} diff --git a/apis/firehose/v1alpha1/zz_delivery_stream.go b/apis/firehose/v1alpha1/zz_delivery_stream.go new file mode 100644 index 0000000000..20781532d8 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_delivery_stream.go @@ -0,0 +1,143 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// DeliveryStreamParameters defines the desired state of DeliveryStream +type DeliveryStreamParameters struct { + // Region is which region the DeliveryStream will be created. + // +kubebuilder:validation:Required + Region string `json:"region"` + // The destination in the Serverless offering for Amazon OpenSearch Service. + // You can specify only one destination. + AmazonOpenSearchServerlessDestinationConfiguration *AmazonOpenSearchServerlessDestinationConfiguration `json:"amazonOpenSearchServerlessDestinationConfiguration,omitempty"` + // The destination in Amazon OpenSearch Service. You can specify only one destination. + AmazonopensearchserviceDestinationConfiguration *AmazonopensearchserviceDestinationConfiguration `json:"amazonopensearchserviceDestinationConfiguration,omitempty"` + // Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed + // for Server-Side Encryption (SSE). + DeliveryStreamEncryptionConfigurationInput *DeliveryStreamEncryptionConfigurationInput `json:"deliveryStreamEncryptionConfigurationInput,omitempty"` + // The name of the delivery stream. This name must be unique per Amazon Web + // Services account in the same Amazon Web Services Region. If the delivery + // streams are in different accounts or different Regions, you can have multiple + // delivery streams with the same name. + // +kubebuilder:validation:Required + DeliveryStreamName *string `json:"deliveryStreamName"` + // The delivery stream type. This parameter can be one of the following values: + // + // * DirectPut: Provider applications access the delivery stream directly. + // + // * KinesisStreamAsSource: The delivery stream uses a Kinesis data stream + // as a source. + DeliveryStreamType *string `json:"deliveryStreamType,omitempty"` + // The destination in Amazon ES. You can specify only one destination. + ElasticsearchDestinationConfiguration *ElasticsearchDestinationConfiguration `json:"elasticsearchDestinationConfiguration,omitempty"` + // The destination in Amazon S3. You can specify only one destination. + ExtendedS3DestinationConfiguration *ExtendedS3DestinationConfiguration `json:"extendedS3DestinationConfiguration,omitempty"` + // Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint + // destination. You can specify only one destination. + HTTPEndpointDestinationConfiguration *HTTPEndpointDestinationConfiguration `json:"httpEndpointDestinationConfiguration,omitempty"` + // When a Kinesis data stream is used as the source for the delivery stream, + // a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon + // Resource Name (ARN) and the role ARN for the source stream. + KinesisStreamSourceConfiguration *KinesisStreamSourceConfiguration `json:"kinesisStreamSourceConfiguration,omitempty"` + // The destination in Amazon Redshift. You can specify only one destination. + RedshiftDestinationConfiguration *RedshiftDestinationConfiguration `json:"redshiftDestinationConfiguration,omitempty"` + // [Deprecated] The destination in Amazon S3. You can specify only one destination. + S3DestinationConfiguration *S3DestinationConfiguration `json:"s3DestinationConfiguration,omitempty"` + // The destination in Splunk. You can specify only one destination. + SplunkDestinationConfiguration *SplunkDestinationConfiguration `json:"splunkDestinationConfiguration,omitempty"` + // A set of tags to assign to the delivery stream. A tag is a key-value pair + // that you can define and assign to Amazon Web Services resources. Tags are + // metadata. For example, you can add friendly names and descriptions or other + // types of information that can help you distinguish the delivery stream. For + // more information about tags, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + // in the Amazon Web Services Billing and Cost Management User Guide. + // + // You can specify up to 50 tags when creating a delivery stream. + Tags []*Tag `json:"tags,omitempty"` + CustomDeliveryStreamParameters `json:",inline"` +} + +// DeliveryStreamSpec defines the desired state of DeliveryStream +type DeliveryStreamSpec struct { + xpv1.ResourceSpec `json:",inline"` + ForProvider DeliveryStreamParameters `json:"forProvider"` +} + +// DeliveryStreamObservation defines the observed state of DeliveryStream +type DeliveryStreamObservation struct { + // The Amazon Resource Name (ARN) of the delivery stream. For more information, + // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + DeliveryStreamARN *string `json:"deliveryStreamARN,omitempty"` + // The status of the delivery stream. If the status of a delivery stream is + // CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream + // again on it. However, you can invoke the DeleteDeliveryStream operation to + // delete it. + DeliveryStreamStatus *string `json:"deliveryStreamStatus,omitempty"` +} + +// DeliveryStreamStatus defines the observed state of DeliveryStream. +type DeliveryStreamStatus struct { + xpv1.ResourceStatus `json:",inline"` + AtProvider DeliveryStreamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeliveryStream is the Schema for the DeliveryStreams API +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DeliveryStream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DeliveryStreamSpec `json:"spec"` + Status DeliveryStreamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeliveryStreamList contains a list of DeliveryStreams +type DeliveryStreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DeliveryStream `json:"items"` +} + +// Repository type metadata. +var ( + DeliveryStreamKind = "DeliveryStream" + DeliveryStreamGroupKind = schema.GroupKind{Group: CRDGroup, Kind: DeliveryStreamKind}.String() + DeliveryStreamKindAPIVersion = DeliveryStreamKind + "." + GroupVersion.String() + DeliveryStreamGroupVersionKind = GroupVersion.WithKind(DeliveryStreamKind) +) + +func init() { + SchemeBuilder.Register(&DeliveryStream{}, &DeliveryStreamList{}) +} diff --git a/apis/firehose/v1alpha1/zz_doc.go b/apis/firehose/v1alpha1/zz_doc.go new file mode 100644 index 0000000000..02212d37f4 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// Package v1alpha1 is the v1alpha1 version of the firehose.aws.crossplane.io API. +// +groupName=firehose.aws.crossplane.io +// +versionName=v1alpha1 + +package v1alpha1 diff --git a/apis/firehose/v1alpha1/zz_enums.go b/apis/firehose/v1alpha1/zz_enums.go new file mode 100644 index 0000000000..9768136313 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_enums.go @@ -0,0 +1,226 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +type AmazonOpenSearchServerlessS3BackupMode string + +const ( + AmazonOpenSearchServerlessS3BackupMode_FailedDocumentsOnly AmazonOpenSearchServerlessS3BackupMode = "FailedDocumentsOnly" + AmazonOpenSearchServerlessS3BackupMode_AllDocuments AmazonOpenSearchServerlessS3BackupMode = "AllDocuments" +) + +type AmazonopensearchserviceIndexRotationPeriod string + +const ( + AmazonopensearchserviceIndexRotationPeriod_NoRotation AmazonopensearchserviceIndexRotationPeriod = "NoRotation" + AmazonopensearchserviceIndexRotationPeriod_OneHour AmazonopensearchserviceIndexRotationPeriod = "OneHour" + AmazonopensearchserviceIndexRotationPeriod_OneDay AmazonopensearchserviceIndexRotationPeriod = "OneDay" + AmazonopensearchserviceIndexRotationPeriod_OneWeek AmazonopensearchserviceIndexRotationPeriod = "OneWeek" + AmazonopensearchserviceIndexRotationPeriod_OneMonth AmazonopensearchserviceIndexRotationPeriod = "OneMonth" +) + +type AmazonopensearchserviceS3BackupMode string + +const ( + AmazonopensearchserviceS3BackupMode_FailedDocumentsOnly AmazonopensearchserviceS3BackupMode = "FailedDocumentsOnly" + AmazonopensearchserviceS3BackupMode_AllDocuments AmazonopensearchserviceS3BackupMode = "AllDocuments" +) + +type CompressionFormat string + +const ( + CompressionFormat_UNCOMPRESSED CompressionFormat = "UNCOMPRESSED" + CompressionFormat_GZIP CompressionFormat = "GZIP" + CompressionFormat_ZIP CompressionFormat = "ZIP" + CompressionFormat_Snappy CompressionFormat = "Snappy" + CompressionFormat_HADOOP_SNAPPY CompressionFormat = "HADOOP_SNAPPY" +) + +type ContentEncoding string + +const ( + ContentEncoding_NONE ContentEncoding = "NONE" + ContentEncoding_GZIP ContentEncoding = "GZIP" +) + +type DeliveryStreamEncryptionStatus string + +const ( + DeliveryStreamEncryptionStatus_ENABLED DeliveryStreamEncryptionStatus = "ENABLED" + DeliveryStreamEncryptionStatus_ENABLING DeliveryStreamEncryptionStatus = "ENABLING" + DeliveryStreamEncryptionStatus_ENABLING_FAILED DeliveryStreamEncryptionStatus = "ENABLING_FAILED" + DeliveryStreamEncryptionStatus_DISABLED DeliveryStreamEncryptionStatus = "DISABLED" + DeliveryStreamEncryptionStatus_DISABLING DeliveryStreamEncryptionStatus = "DISABLING" + DeliveryStreamEncryptionStatus_DISABLING_FAILED DeliveryStreamEncryptionStatus = "DISABLING_FAILED" +) + +type DeliveryStreamFailureType string + +const ( + DeliveryStreamFailureType_RETIRE_KMS_GRANT_FAILED DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED" + DeliveryStreamFailureType_CREATE_KMS_GRANT_FAILED DeliveryStreamFailureType = "CREATE_KMS_GRANT_FAILED" + DeliveryStreamFailureType_KMS_ACCESS_DENIED DeliveryStreamFailureType = "KMS_ACCESS_DENIED" + DeliveryStreamFailureType_DISABLED_KMS_KEY DeliveryStreamFailureType = "DISABLED_KMS_KEY" + DeliveryStreamFailureType_INVALID_KMS_KEY DeliveryStreamFailureType = "INVALID_KMS_KEY" + DeliveryStreamFailureType_KMS_KEY_NOT_FOUND DeliveryStreamFailureType = "KMS_KEY_NOT_FOUND" + DeliveryStreamFailureType_KMS_OPT_IN_REQUIRED DeliveryStreamFailureType = "KMS_OPT_IN_REQUIRED" + DeliveryStreamFailureType_CREATE_ENI_FAILED DeliveryStreamFailureType = "CREATE_ENI_FAILED" + DeliveryStreamFailureType_DELETE_ENI_FAILED DeliveryStreamFailureType = "DELETE_ENI_FAILED" + DeliveryStreamFailureType_SUBNET_NOT_FOUND DeliveryStreamFailureType = "SUBNET_NOT_FOUND" + DeliveryStreamFailureType_SECURITY_GROUP_NOT_FOUND DeliveryStreamFailureType = "SECURITY_GROUP_NOT_FOUND" + DeliveryStreamFailureType_ENI_ACCESS_DENIED DeliveryStreamFailureType = "ENI_ACCESS_DENIED" + DeliveryStreamFailureType_SUBNET_ACCESS_DENIED DeliveryStreamFailureType = "SUBNET_ACCESS_DENIED" + DeliveryStreamFailureType_SECURITY_GROUP_ACCESS_DENIED DeliveryStreamFailureType = "SECURITY_GROUP_ACCESS_DENIED" + DeliveryStreamFailureType_UNKNOWN_ERROR DeliveryStreamFailureType = "UNKNOWN_ERROR" +) + +type DeliveryStreamStatus_SDK string + +const ( + DeliveryStreamStatus_SDK_CREATING DeliveryStreamStatus_SDK = "CREATING" + DeliveryStreamStatus_SDK_CREATING_FAILED DeliveryStreamStatus_SDK = "CREATING_FAILED" + DeliveryStreamStatus_SDK_DELETING DeliveryStreamStatus_SDK = "DELETING" + DeliveryStreamStatus_SDK_DELETING_FAILED DeliveryStreamStatus_SDK = "DELETING_FAILED" + DeliveryStreamStatus_SDK_ACTIVE DeliveryStreamStatus_SDK = "ACTIVE" +) + +type DeliveryStreamType string + +const ( + DeliveryStreamType_DirectPut DeliveryStreamType = "DirectPut" + DeliveryStreamType_KinesisStreamAsSource DeliveryStreamType = "KinesisStreamAsSource" +) + +type ElasticsearchIndexRotationPeriod string + +const ( + ElasticsearchIndexRotationPeriod_NoRotation ElasticsearchIndexRotationPeriod = "NoRotation" + ElasticsearchIndexRotationPeriod_OneHour ElasticsearchIndexRotationPeriod = "OneHour" + ElasticsearchIndexRotationPeriod_OneDay ElasticsearchIndexRotationPeriod = "OneDay" + ElasticsearchIndexRotationPeriod_OneWeek ElasticsearchIndexRotationPeriod = "OneWeek" + ElasticsearchIndexRotationPeriod_OneMonth ElasticsearchIndexRotationPeriod = "OneMonth" +) + +type ElasticsearchS3BackupMode string + +const ( + ElasticsearchS3BackupMode_FailedDocumentsOnly ElasticsearchS3BackupMode = "FailedDocumentsOnly" + ElasticsearchS3BackupMode_AllDocuments ElasticsearchS3BackupMode = "AllDocuments" +) + +type HECEndpointType string + +const ( + HECEndpointType_Raw HECEndpointType = "Raw" + HECEndpointType_Event HECEndpointType = "Event" +) + +type HTTPEndpointS3BackupMode string + +const ( + HTTPEndpointS3BackupMode_FailedDataOnly HTTPEndpointS3BackupMode = "FailedDataOnly" + HTTPEndpointS3BackupMode_AllData HTTPEndpointS3BackupMode = "AllData" +) + +type KeyType string + +const ( + KeyType_AWS_OWNED_CMK KeyType = "AWS_OWNED_CMK" + KeyType_CUSTOMER_MANAGED_CMK KeyType = "CUSTOMER_MANAGED_CMK" +) + +type NoEncryptionConfig string + +const ( + NoEncryptionConfig_NoEncryption NoEncryptionConfig = "NoEncryption" +) + +type OrcCompression string + +const ( + OrcCompression_NONE OrcCompression = "NONE" + OrcCompression_ZLIB OrcCompression = "ZLIB" + OrcCompression_SNAPPY OrcCompression = "SNAPPY" +) + +type OrcFormatVersion string + +const ( + OrcFormatVersion_V0_11 OrcFormatVersion = "V0_11" + OrcFormatVersion_V0_12 OrcFormatVersion = "V0_12" +) + +type ParquetCompression string + +const ( + ParquetCompression_UNCOMPRESSED ParquetCompression = "UNCOMPRESSED" + ParquetCompression_GZIP ParquetCompression = "GZIP" + ParquetCompression_SNAPPY ParquetCompression = "SNAPPY" +) + +type ParquetWriterVersion string + +const ( + ParquetWriterVersion_V1 ParquetWriterVersion = "V1" + ParquetWriterVersion_V2 ParquetWriterVersion = "V2" +) + +type ProcessorParameterName string + +const ( + ProcessorParameterName_LambdaArn ProcessorParameterName = "LambdaArn" + ProcessorParameterName_NumberOfRetries ProcessorParameterName = "NumberOfRetries" + ProcessorParameterName_MetadataExtractionQuery ProcessorParameterName = "MetadataExtractionQuery" + ProcessorParameterName_JsonParsingEngine ProcessorParameterName = "JsonParsingEngine" + ProcessorParameterName_RoleArn ProcessorParameterName = "RoleArn" + ProcessorParameterName_BufferSizeInMBs ProcessorParameterName = "BufferSizeInMBs" + ProcessorParameterName_BufferIntervalInSeconds ProcessorParameterName = "BufferIntervalInSeconds" + ProcessorParameterName_SubRecordType ProcessorParameterName = "SubRecordType" + ProcessorParameterName_Delimiter ProcessorParameterName = "Delimiter" +) + +type ProcessorType string + +const ( + ProcessorType_RecordDeAggregation ProcessorType = "RecordDeAggregation" + ProcessorType_Lambda ProcessorType = "Lambda" + ProcessorType_MetadataExtraction ProcessorType = "MetadataExtraction" + ProcessorType_AppendDelimiterToRecord ProcessorType = "AppendDelimiterToRecord" +) + +type RedshiftS3BackupMode string + +const ( + RedshiftS3BackupMode_Disabled RedshiftS3BackupMode = "Disabled" + RedshiftS3BackupMode_Enabled RedshiftS3BackupMode = "Enabled" +) + +type S3BackupMode string + +const ( + S3BackupMode_Disabled S3BackupMode = "Disabled" + S3BackupMode_Enabled S3BackupMode = "Enabled" +) + +type SplunkS3BackupMode string + +const ( + SplunkS3BackupMode_FailedEventsOnly SplunkS3BackupMode = "FailedEventsOnly" + SplunkS3BackupMode_AllEvents SplunkS3BackupMode = "AllEvents" +) diff --git a/apis/firehose/v1alpha1/zz_generated.deepcopy.go b/apis/firehose/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..78bbf69716 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3194 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonOpenSearchServerlessBufferingHints) DeepCopyInto(out *AmazonOpenSearchServerlessBufferingHints) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(int64) + **out = **in + } + if in.SizeInMBs != nil { + in, out := &in.SizeInMBs, &out.SizeInMBs + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonOpenSearchServerlessBufferingHints. +func (in *AmazonOpenSearchServerlessBufferingHints) DeepCopy() *AmazonOpenSearchServerlessBufferingHints { + if in == nil { + return nil + } + out := new(AmazonOpenSearchServerlessBufferingHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonOpenSearchServerlessDestinationConfiguration) DeepCopyInto(out *AmazonOpenSearchServerlessDestinationConfiguration) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(AmazonOpenSearchServerlessBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpoint != nil { + in, out := &in.CollectionEndpoint, &out.CollectionEndpoint + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(AmazonOpenSearchServerlessRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonOpenSearchServerlessDestinationConfiguration. +func (in *AmazonOpenSearchServerlessDestinationConfiguration) DeepCopy() *AmazonOpenSearchServerlessDestinationConfiguration { + if in == nil { + return nil + } + out := new(AmazonOpenSearchServerlessDestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonOpenSearchServerlessDestinationDescription) DeepCopyInto(out *AmazonOpenSearchServerlessDestinationDescription) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(AmazonOpenSearchServerlessBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpoint != nil { + in, out := &in.CollectionEndpoint, &out.CollectionEndpoint + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(AmazonOpenSearchServerlessRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.VPCConfigurationDescription != nil { + in, out := &in.VPCConfigurationDescription, &out.VPCConfigurationDescription + *out = new(VPCConfigurationDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonOpenSearchServerlessDestinationDescription. +func (in *AmazonOpenSearchServerlessDestinationDescription) DeepCopy() *AmazonOpenSearchServerlessDestinationDescription { + if in == nil { + return nil + } + out := new(AmazonOpenSearchServerlessDestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonOpenSearchServerlessDestinationUpdate) DeepCopyInto(out *AmazonOpenSearchServerlessDestinationUpdate) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(AmazonOpenSearchServerlessBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpoint != nil { + in, out := &in.CollectionEndpoint, &out.CollectionEndpoint + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(AmazonOpenSearchServerlessRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonOpenSearchServerlessDestinationUpdate. +func (in *AmazonOpenSearchServerlessDestinationUpdate) DeepCopy() *AmazonOpenSearchServerlessDestinationUpdate { + if in == nil { + return nil + } + out := new(AmazonOpenSearchServerlessDestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonOpenSearchServerlessRetryOptions) DeepCopyInto(out *AmazonOpenSearchServerlessRetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonOpenSearchServerlessRetryOptions. +func (in *AmazonOpenSearchServerlessRetryOptions) DeepCopy() *AmazonOpenSearchServerlessRetryOptions { + if in == nil { + return nil + } + out := new(AmazonOpenSearchServerlessRetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonopensearchserviceBufferingHints) DeepCopyInto(out *AmazonopensearchserviceBufferingHints) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(int64) + **out = **in + } + if in.SizeInMBs != nil { + in, out := &in.SizeInMBs, &out.SizeInMBs + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonopensearchserviceBufferingHints. +func (in *AmazonopensearchserviceBufferingHints) DeepCopy() *AmazonopensearchserviceBufferingHints { + if in == nil { + return nil + } + out := new(AmazonopensearchserviceBufferingHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonopensearchserviceDestinationConfiguration) DeepCopyInto(out *AmazonopensearchserviceDestinationConfiguration) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(AmazonopensearchserviceBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainARN != nil { + in, out := &in.DomainARN, &out.DomainARN + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(AmazonopensearchserviceRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonopensearchserviceDestinationConfiguration. +func (in *AmazonopensearchserviceDestinationConfiguration) DeepCopy() *AmazonopensearchserviceDestinationConfiguration { + if in == nil { + return nil + } + out := new(AmazonopensearchserviceDestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonopensearchserviceDestinationDescription) DeepCopyInto(out *AmazonopensearchserviceDestinationDescription) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(AmazonopensearchserviceBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainARN != nil { + in, out := &in.DomainARN, &out.DomainARN + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(AmazonopensearchserviceRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfigurationDescription != nil { + in, out := &in.VPCConfigurationDescription, &out.VPCConfigurationDescription + *out = new(VPCConfigurationDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonopensearchserviceDestinationDescription. +func (in *AmazonopensearchserviceDestinationDescription) DeepCopy() *AmazonopensearchserviceDestinationDescription { + if in == nil { + return nil + } + out := new(AmazonopensearchserviceDestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonopensearchserviceDestinationUpdate) DeepCopyInto(out *AmazonopensearchserviceDestinationUpdate) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(AmazonopensearchserviceBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainARN != nil { + in, out := &in.DomainARN, &out.DomainARN + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(AmazonopensearchserviceRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonopensearchserviceDestinationUpdate. +func (in *AmazonopensearchserviceDestinationUpdate) DeepCopy() *AmazonopensearchserviceDestinationUpdate { + if in == nil { + return nil + } + out := new(AmazonopensearchserviceDestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonopensearchserviceRetryOptions) DeepCopyInto(out *AmazonopensearchserviceRetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonopensearchserviceRetryOptions. +func (in *AmazonopensearchserviceRetryOptions) DeepCopy() *AmazonopensearchserviceRetryOptions { + if in == nil { + return nil + } + out := new(AmazonopensearchserviceRetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BufferingHints) DeepCopyInto(out *BufferingHints) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(int64) + **out = **in + } + if in.SizeInMBs != nil { + in, out := &in.SizeInMBs, &out.SizeInMBs + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BufferingHints. +func (in *BufferingHints) DeepCopy() *BufferingHints { + if in == nil { + return nil + } + out := new(BufferingHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchLoggingOptions) DeepCopyInto(out *CloudWatchLoggingOptions) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchLoggingOptions. +func (in *CloudWatchLoggingOptions) DeepCopy() *CloudWatchLoggingOptions { + if in == nil { + return nil + } + out := new(CloudWatchLoggingOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyCommand) DeepCopyInto(out *CopyCommand) { + *out = *in + if in.CopyOptions != nil { + in, out := &in.CopyOptions, &out.CopyOptions + *out = new(string) + **out = **in + } + if in.DataTableColumns != nil { + in, out := &in.DataTableColumns, &out.DataTableColumns + *out = new(string) + **out = **in + } + if in.DataTableName != nil { + in, out := &in.DataTableName, &out.DataTableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyCommand. +func (in *CopyCommand) DeepCopy() *CopyCommand { + if in == nil { + return nil + } + out := new(CopyCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDeliveryStreamParameters) DeepCopyInto(out *CustomDeliveryStreamParameters) { + *out = *in + if in.KMSKeyARN != nil { + in, out := &in.KMSKeyARN, &out.KMSKeyARN + *out = new(string) + **out = **in + } + if in.KMSKeyARNRef != nil { + in, out := &in.KMSKeyARNRef, &out.KMSKeyARNRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyARNSelector != nil { + in, out := &in.KMSKeyARNSelector, &out.KMSKeyARNSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeliveryStreamParameters. +func (in *CustomDeliveryStreamParameters) DeepCopy() *CustomDeliveryStreamParameters { + if in == nil { + return nil + } + out := new(CustomDeliveryStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFormatConversionConfiguration) DeepCopyInto(out *DataFormatConversionConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InputFormatConfiguration != nil { + in, out := &in.InputFormatConfiguration, &out.InputFormatConfiguration + *out = new(InputFormatConfiguration) + (*in).DeepCopyInto(*out) + } + if in.OutputFormatConfiguration != nil { + in, out := &in.OutputFormatConfiguration, &out.OutputFormatConfiguration + *out = new(OutputFormatConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SchemaConfiguration != nil { + in, out := &in.SchemaConfiguration, &out.SchemaConfiguration + *out = new(SchemaConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFormatConversionConfiguration. +func (in *DataFormatConversionConfiguration) DeepCopy() *DataFormatConversionConfiguration { + if in == nil { + return nil + } + out := new(DataFormatConversionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStream) DeepCopyInto(out *DeliveryStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStream. +func (in *DeliveryStream) DeepCopy() *DeliveryStream { + if in == nil { + return nil + } + out := new(DeliveryStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeliveryStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamDescription) DeepCopyInto(out *DeliveryStreamDescription) { + *out = *in + if in.CreateTimestamp != nil { + in, out := &in.CreateTimestamp, &out.CreateTimestamp + *out = (*in).DeepCopy() + } + if in.DeliveryStreamARN != nil { + in, out := &in.DeliveryStreamARN, &out.DeliveryStreamARN + *out = new(string) + **out = **in + } + if in.DeliveryStreamEncryptionConfiguration != nil { + in, out := &in.DeliveryStreamEncryptionConfiguration, &out.DeliveryStreamEncryptionConfiguration + *out = new(DeliveryStreamEncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.DeliveryStreamStatus != nil { + in, out := &in.DeliveryStreamStatus, &out.DeliveryStreamStatus + *out = new(string) + **out = **in + } + if in.DeliveryStreamType != nil { + in, out := &in.DeliveryStreamType, &out.DeliveryStreamType + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]*DestinationDescription, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(DestinationDescription) + (*in).DeepCopyInto(*out) + } + } + } + if in.FailureDescription != nil { + in, out := &in.FailureDescription, &out.FailureDescription + *out = new(FailureDescription) + (*in).DeepCopyInto(*out) + } + if in.HasMoreDestinations != nil { + in, out := &in.HasMoreDestinations, &out.HasMoreDestinations + *out = new(bool) + **out = **in + } + if in.LastUpdateTimestamp != nil { + in, out := &in.LastUpdateTimestamp, &out.LastUpdateTimestamp + *out = (*in).DeepCopy() + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceDescription) + (*in).DeepCopyInto(*out) + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamDescription. +func (in *DeliveryStreamDescription) DeepCopy() *DeliveryStreamDescription { + if in == nil { + return nil + } + out := new(DeliveryStreamDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamEncryptionConfiguration) DeepCopyInto(out *DeliveryStreamEncryptionConfiguration) { + *out = *in + if in.FailureDescription != nil { + in, out := &in.FailureDescription, &out.FailureDescription + *out = new(FailureDescription) + (*in).DeepCopyInto(*out) + } + if in.KeyARN != nil { + in, out := &in.KeyARN, &out.KeyARN + *out = new(string) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamEncryptionConfiguration. +func (in *DeliveryStreamEncryptionConfiguration) DeepCopy() *DeliveryStreamEncryptionConfiguration { + if in == nil { + return nil + } + out := new(DeliveryStreamEncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamEncryptionConfigurationInput) DeepCopyInto(out *DeliveryStreamEncryptionConfigurationInput) { + *out = *in + if in.KeyARN != nil { + in, out := &in.KeyARN, &out.KeyARN + *out = new(string) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamEncryptionConfigurationInput. +func (in *DeliveryStreamEncryptionConfigurationInput) DeepCopy() *DeliveryStreamEncryptionConfigurationInput { + if in == nil { + return nil + } + out := new(DeliveryStreamEncryptionConfigurationInput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamList) DeepCopyInto(out *DeliveryStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeliveryStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamList. +func (in *DeliveryStreamList) DeepCopy() *DeliveryStreamList { + if in == nil { + return nil + } + out := new(DeliveryStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeliveryStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamObservation) DeepCopyInto(out *DeliveryStreamObservation) { + *out = *in + if in.DeliveryStreamARN != nil { + in, out := &in.DeliveryStreamARN, &out.DeliveryStreamARN + *out = new(string) + **out = **in + } + if in.DeliveryStreamStatus != nil { + in, out := &in.DeliveryStreamStatus, &out.DeliveryStreamStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamObservation. +func (in *DeliveryStreamObservation) DeepCopy() *DeliveryStreamObservation { + if in == nil { + return nil + } + out := new(DeliveryStreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamParameters) DeepCopyInto(out *DeliveryStreamParameters) { + *out = *in + if in.AmazonOpenSearchServerlessDestinationConfiguration != nil { + in, out := &in.AmazonOpenSearchServerlessDestinationConfiguration, &out.AmazonOpenSearchServerlessDestinationConfiguration + *out = new(AmazonOpenSearchServerlessDestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AmazonopensearchserviceDestinationConfiguration != nil { + in, out := &in.AmazonopensearchserviceDestinationConfiguration, &out.AmazonopensearchserviceDestinationConfiguration + *out = new(AmazonopensearchserviceDestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamEncryptionConfigurationInput != nil { + in, out := &in.DeliveryStreamEncryptionConfigurationInput, &out.DeliveryStreamEncryptionConfigurationInput + *out = new(DeliveryStreamEncryptionConfigurationInput) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.DeliveryStreamType != nil { + in, out := &in.DeliveryStreamType, &out.DeliveryStreamType + *out = new(string) + **out = **in + } + if in.ElasticsearchDestinationConfiguration != nil { + in, out := &in.ElasticsearchDestinationConfiguration, &out.ElasticsearchDestinationConfiguration + *out = new(ElasticsearchDestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ExtendedS3DestinationConfiguration != nil { + in, out := &in.ExtendedS3DestinationConfiguration, &out.ExtendedS3DestinationConfiguration + *out = new(ExtendedS3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointDestinationConfiguration != nil { + in, out := &in.HTTPEndpointDestinationConfiguration, &out.HTTPEndpointDestinationConfiguration + *out = new(HTTPEndpointDestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamSourceConfiguration != nil { + in, out := &in.KinesisStreamSourceConfiguration, &out.KinesisStreamSourceConfiguration + *out = new(KinesisStreamSourceConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RedshiftDestinationConfiguration != nil { + in, out := &in.RedshiftDestinationConfiguration, &out.RedshiftDestinationConfiguration + *out = new(RedshiftDestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.S3DestinationConfiguration != nil { + in, out := &in.S3DestinationConfiguration, &out.S3DestinationConfiguration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SplunkDestinationConfiguration != nil { + in, out := &in.SplunkDestinationConfiguration, &out.SplunkDestinationConfiguration + *out = new(SplunkDestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } + in.CustomDeliveryStreamParameters.DeepCopyInto(&out.CustomDeliveryStreamParameters) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamParameters. +func (in *DeliveryStreamParameters) DeepCopy() *DeliveryStreamParameters { + if in == nil { + return nil + } + out := new(DeliveryStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamSpec) DeepCopyInto(out *DeliveryStreamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamSpec. +func (in *DeliveryStreamSpec) DeepCopy() *DeliveryStreamSpec { + if in == nil { + return nil + } + out := new(DeliveryStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamStatus) DeepCopyInto(out *DeliveryStreamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamStatus. +func (in *DeliveryStreamStatus) DeepCopy() *DeliveryStreamStatus { + if in == nil { + return nil + } + out := new(DeliveryStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deserializer) DeepCopyInto(out *Deserializer) { + *out = *in + if in.HiveJSONSerDe != nil { + in, out := &in.HiveJSONSerDe, &out.HiveJSONSerDe + *out = new(HiveJSONSerDe) + (*in).DeepCopyInto(*out) + } + if in.OpenXJSONSerDe != nil { + in, out := &in.OpenXJSONSerDe, &out.OpenXJSONSerDe + *out = new(OpenXJSONSerDe) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deserializer. +func (in *Deserializer) DeepCopy() *Deserializer { + if in == nil { + return nil + } + out := new(Deserializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationDescription) DeepCopyInto(out *DestinationDescription) { + *out = *in + if in.AmazonOpenSearchServerlessDestinationDescription != nil { + in, out := &in.AmazonOpenSearchServerlessDestinationDescription, &out.AmazonOpenSearchServerlessDestinationDescription + *out = new(AmazonOpenSearchServerlessDestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.AmazonopensearchserviceDestinationDescription != nil { + in, out := &in.AmazonopensearchserviceDestinationDescription, &out.AmazonopensearchserviceDestinationDescription + *out = new(AmazonopensearchserviceDestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.DestinationID != nil { + in, out := &in.DestinationID, &out.DestinationID + *out = new(string) + **out = **in + } + if in.ElasticsearchDestinationDescription != nil { + in, out := &in.ElasticsearchDestinationDescription, &out.ElasticsearchDestinationDescription + *out = new(ElasticsearchDestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.ExtendedS3DestinationDescription != nil { + in, out := &in.ExtendedS3DestinationDescription, &out.ExtendedS3DestinationDescription + *out = new(ExtendedS3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointDestinationDescription != nil { + in, out := &in.HTTPEndpointDestinationDescription, &out.HTTPEndpointDestinationDescription + *out = new(HTTPEndpointDestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.RedshiftDestinationDescription != nil { + in, out := &in.RedshiftDestinationDescription, &out.RedshiftDestinationDescription + *out = new(RedshiftDestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.SplunkDestinationDescription != nil { + in, out := &in.SplunkDestinationDescription, &out.SplunkDestinationDescription + *out = new(SplunkDestinationDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationDescription. +func (in *DestinationDescription) DeepCopy() *DestinationDescription { + if in == nil { + return nil + } + out := new(DestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicPartitioningConfiguration) DeepCopyInto(out *DynamicPartitioningConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(RetryOptions) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicPartitioningConfiguration. +func (in *DynamicPartitioningConfiguration) DeepCopy() *DynamicPartitioningConfiguration { + if in == nil { + return nil + } + out := new(DynamicPartitioningConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchBufferingHints) DeepCopyInto(out *ElasticsearchBufferingHints) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(int64) + **out = **in + } + if in.SizeInMBs != nil { + in, out := &in.SizeInMBs, &out.SizeInMBs + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchBufferingHints. +func (in *ElasticsearchBufferingHints) DeepCopy() *ElasticsearchBufferingHints { + if in == nil { + return nil + } + out := new(ElasticsearchBufferingHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchDestinationConfiguration) DeepCopyInto(out *ElasticsearchDestinationConfiguration) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(ElasticsearchBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainARN != nil { + in, out := &in.DomainARN, &out.DomainARN + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(ElasticsearchRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchDestinationConfiguration. +func (in *ElasticsearchDestinationConfiguration) DeepCopy() *ElasticsearchDestinationConfiguration { + if in == nil { + return nil + } + out := new(ElasticsearchDestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchDestinationDescription) DeepCopyInto(out *ElasticsearchDestinationDescription) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(ElasticsearchBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainARN != nil { + in, out := &in.DomainARN, &out.DomainARN + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(ElasticsearchRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfigurationDescription != nil { + in, out := &in.VPCConfigurationDescription, &out.VPCConfigurationDescription + *out = new(VPCConfigurationDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchDestinationDescription. +func (in *ElasticsearchDestinationDescription) DeepCopy() *ElasticsearchDestinationDescription { + if in == nil { + return nil + } + out := new(ElasticsearchDestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchDestinationUpdate) DeepCopyInto(out *ElasticsearchDestinationUpdate) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(ElasticsearchBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainARN != nil { + in, out := &in.DomainARN, &out.DomainARN + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(ElasticsearchRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchDestinationUpdate. +func (in *ElasticsearchDestinationUpdate) DeepCopy() *ElasticsearchDestinationUpdate { + if in == nil { + return nil + } + out := new(ElasticsearchDestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchRetryOptions) DeepCopyInto(out *ElasticsearchRetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchRetryOptions. +func (in *ElasticsearchRetryOptions) DeepCopy() *ElasticsearchRetryOptions { + if in == nil { + return nil + } + out := new(ElasticsearchRetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + if in.KMSEncryptionConfig != nil { + in, out := &in.KMSEncryptionConfig, &out.KMSEncryptionConfig + *out = new(KMSEncryptionConfig) + (*in).DeepCopyInto(*out) + } + if in.NoEncryptionConfig != nil { + in, out := &in.NoEncryptionConfig, &out.NoEncryptionConfig + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3DestinationConfiguration) DeepCopyInto(out *ExtendedS3DestinationConfiguration) { + *out = *in + if in.BucketARN != nil { + in, out := &in.BucketARN, &out.BucketARN + *out = new(string) + **out = **in + } + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(BufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.DataFormatConversionConfiguration != nil { + in, out := &in.DataFormatConversionConfiguration, &out.DataFormatConversionConfiguration + *out = new(DataFormatConversionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.DynamicPartitioningConfiguration != nil { + in, out := &in.DynamicPartitioningConfiguration, &out.DynamicPartitioningConfiguration + *out = new(DynamicPartitioningConfiguration) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3DestinationConfiguration. +func (in *ExtendedS3DestinationConfiguration) DeepCopy() *ExtendedS3DestinationConfiguration { + if in == nil { + return nil + } + out := new(ExtendedS3DestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3DestinationDescription) DeepCopyInto(out *ExtendedS3DestinationDescription) { + *out = *in + if in.BucketARN != nil { + in, out := &in.BucketARN, &out.BucketARN + *out = new(string) + **out = **in + } + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(BufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.DataFormatConversionConfiguration != nil { + in, out := &in.DataFormatConversionConfiguration, &out.DataFormatConversionConfiguration + *out = new(DataFormatConversionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.DynamicPartitioningConfiguration != nil { + in, out := &in.DynamicPartitioningConfiguration, &out.DynamicPartitioningConfiguration + *out = new(DynamicPartitioningConfiguration) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupDescription != nil { + in, out := &in.S3BackupDescription, &out.S3BackupDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3DestinationDescription. +func (in *ExtendedS3DestinationDescription) DeepCopy() *ExtendedS3DestinationDescription { + if in == nil { + return nil + } + out := new(ExtendedS3DestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3DestinationUpdate) DeepCopyInto(out *ExtendedS3DestinationUpdate) { + *out = *in + if in.BucketARN != nil { + in, out := &in.BucketARN, &out.BucketARN + *out = new(string) + **out = **in + } + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(BufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.DataFormatConversionConfiguration != nil { + in, out := &in.DataFormatConversionConfiguration, &out.DataFormatConversionConfiguration + *out = new(DataFormatConversionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.DynamicPartitioningConfiguration != nil { + in, out := &in.DynamicPartitioningConfiguration, &out.DynamicPartitioningConfiguration + *out = new(DynamicPartitioningConfiguration) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3DestinationUpdate. +func (in *ExtendedS3DestinationUpdate) DeepCopy() *ExtendedS3DestinationUpdate { + if in == nil { + return nil + } + out := new(ExtendedS3DestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailureDescription) DeepCopyInto(out *FailureDescription) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailureDescription. +func (in *FailureDescription) DeepCopy() *FailureDescription { + if in == nil { + return nil + } + out := new(FailureDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointBufferingHints) DeepCopyInto(out *HTTPEndpointBufferingHints) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(int64) + **out = **in + } + if in.SizeInMBs != nil { + in, out := &in.SizeInMBs, &out.SizeInMBs + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointBufferingHints. +func (in *HTTPEndpointBufferingHints) DeepCopy() *HTTPEndpointBufferingHints { + if in == nil { + return nil + } + out := new(HTTPEndpointBufferingHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointCommonAttribute) DeepCopyInto(out *HTTPEndpointCommonAttribute) { + *out = *in + if in.AttributeName != nil { + in, out := &in.AttributeName, &out.AttributeName + *out = new(string) + **out = **in + } + if in.AttributeValue != nil { + in, out := &in.AttributeValue, &out.AttributeValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointCommonAttribute. +func (in *HTTPEndpointCommonAttribute) DeepCopy() *HTTPEndpointCommonAttribute { + if in == nil { + return nil + } + out := new(HTTPEndpointCommonAttribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfiguration) DeepCopyInto(out *HTTPEndpointConfiguration) { + *out = *in + if in.AccessKey != nil { + in, out := &in.AccessKey, &out.AccessKey + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfiguration. +func (in *HTTPEndpointConfiguration) DeepCopy() *HTTPEndpointConfiguration { + if in == nil { + return nil + } + out := new(HTTPEndpointConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointDescription) DeepCopyInto(out *HTTPEndpointDescription) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointDescription. +func (in *HTTPEndpointDescription) DeepCopy() *HTTPEndpointDescription { + if in == nil { + return nil + } + out := new(HTTPEndpointDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointDestinationConfiguration) DeepCopyInto(out *HTTPEndpointDestinationConfiguration) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(HTTPEndpointBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(HTTPEndpointConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RequestConfiguration != nil { + in, out := &in.RequestConfiguration, &out.RequestConfiguration + *out = new(HTTPEndpointRequestConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(HTTPEndpointRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointDestinationConfiguration. +func (in *HTTPEndpointDestinationConfiguration) DeepCopy() *HTTPEndpointDestinationConfiguration { + if in == nil { + return nil + } + out := new(HTTPEndpointDestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointDestinationDescription) DeepCopyInto(out *HTTPEndpointDestinationDescription) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(HTTPEndpointBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(HTTPEndpointDescription) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RequestConfiguration != nil { + in, out := &in.RequestConfiguration, &out.RequestConfiguration + *out = new(HTTPEndpointRequestConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(HTTPEndpointRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointDestinationDescription. +func (in *HTTPEndpointDestinationDescription) DeepCopy() *HTTPEndpointDestinationDescription { + if in == nil { + return nil + } + out := new(HTTPEndpointDestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointDestinationUpdate) DeepCopyInto(out *HTTPEndpointDestinationUpdate) { + *out = *in + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(HTTPEndpointBufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(HTTPEndpointConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RequestConfiguration != nil { + in, out := &in.RequestConfiguration, &out.RequestConfiguration + *out = new(HTTPEndpointRequestConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(HTTPEndpointRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointDestinationUpdate. +func (in *HTTPEndpointDestinationUpdate) DeepCopy() *HTTPEndpointDestinationUpdate { + if in == nil { + return nil + } + out := new(HTTPEndpointDestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointRequestConfiguration) DeepCopyInto(out *HTTPEndpointRequestConfiguration) { + *out = *in + if in.CommonAttributes != nil { + in, out := &in.CommonAttributes, &out.CommonAttributes + *out = make([]*HTTPEndpointCommonAttribute, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(HTTPEndpointCommonAttribute) + (*in).DeepCopyInto(*out) + } + } + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointRequestConfiguration. +func (in *HTTPEndpointRequestConfiguration) DeepCopy() *HTTPEndpointRequestConfiguration { + if in == nil { + return nil + } + out := new(HTTPEndpointRequestConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointRetryOptions) DeepCopyInto(out *HTTPEndpointRetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointRetryOptions. +func (in *HTTPEndpointRetryOptions) DeepCopy() *HTTPEndpointRetryOptions { + if in == nil { + return nil + } + out := new(HTTPEndpointRetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveJSONSerDe) DeepCopyInto(out *HiveJSONSerDe) { + *out = *in + if in.TimestampFormats != nil { + in, out := &in.TimestampFormats, &out.TimestampFormats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveJSONSerDe. +func (in *HiveJSONSerDe) DeepCopy() *HiveJSONSerDe { + if in == nil { + return nil + } + out := new(HiveJSONSerDe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatConfiguration) DeepCopyInto(out *InputFormatConfiguration) { + *out = *in + if in.Deserializer != nil { + in, out := &in.Deserializer, &out.Deserializer + *out = new(Deserializer) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatConfiguration. +func (in *InputFormatConfiguration) DeepCopy() *InputFormatConfiguration { + if in == nil { + return nil + } + out := new(InputFormatConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSEncryptionConfig) DeepCopyInto(out *KMSEncryptionConfig) { + *out = *in + if in.AWSKMSKeyARN != nil { + in, out := &in.AWSKMSKeyARN, &out.AWSKMSKeyARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSEncryptionConfig. +func (in *KMSEncryptionConfig) DeepCopy() *KMSEncryptionConfig { + if in == nil { + return nil + } + out := new(KMSEncryptionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamSourceConfiguration) DeepCopyInto(out *KinesisStreamSourceConfiguration) { + *out = *in + if in.KinesisStreamARN != nil { + in, out := &in.KinesisStreamARN, &out.KinesisStreamARN + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamSourceConfiguration. +func (in *KinesisStreamSourceConfiguration) DeepCopy() *KinesisStreamSourceConfiguration { + if in == nil { + return nil + } + out := new(KinesisStreamSourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamSourceDescription) DeepCopyInto(out *KinesisStreamSourceDescription) { + *out = *in + if in.DeliveryStartTimestamp != nil { + in, out := &in.DeliveryStartTimestamp, &out.DeliveryStartTimestamp + *out = (*in).DeepCopy() + } + if in.KinesisStreamARN != nil { + in, out := &in.KinesisStreamARN, &out.KinesisStreamARN + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamSourceDescription. +func (in *KinesisStreamSourceDescription) DeepCopy() *KinesisStreamSourceDescription { + if in == nil { + return nil + } + out := new(KinesisStreamSourceDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenXJSONSerDe) DeepCopyInto(out *OpenXJSONSerDe) { + *out = *in + if in.CaseInsensitive != nil { + in, out := &in.CaseInsensitive, &out.CaseInsensitive + *out = new(bool) + **out = **in + } + if in.ColumnToJSONKeyMappings != nil { + in, out := &in.ColumnToJSONKeyMappings, &out.ColumnToJSONKeyMappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ConvertDotsInJSONKeysToUnderscores != nil { + in, out := &in.ConvertDotsInJSONKeysToUnderscores, &out.ConvertDotsInJSONKeysToUnderscores + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenXJSONSerDe. +func (in *OpenXJSONSerDe) DeepCopy() *OpenXJSONSerDe { + if in == nil { + return nil + } + out := new(OpenXJSONSerDe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrcSerDe) DeepCopyInto(out *OrcSerDe) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(int64) + **out = **in + } + if in.BloomFilterColumns != nil { + in, out := &in.BloomFilterColumns, &out.BloomFilterColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BloomFilterFalsePositiveProbability != nil { + in, out := &in.BloomFilterFalsePositiveProbability, &out.BloomFilterFalsePositiveProbability + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.DictionaryKeyThreshold != nil { + in, out := &in.DictionaryKeyThreshold, &out.DictionaryKeyThreshold + *out = new(float64) + **out = **in + } + if in.EnablePadding != nil { + in, out := &in.EnablePadding, &out.EnablePadding + *out = new(bool) + **out = **in + } + if in.FormatVersion != nil { + in, out := &in.FormatVersion, &out.FormatVersion + *out = new(string) + **out = **in + } + if in.PaddingTolerance != nil { + in, out := &in.PaddingTolerance, &out.PaddingTolerance + *out = new(float64) + **out = **in + } + if in.RowIndexStride != nil { + in, out := &in.RowIndexStride, &out.RowIndexStride + *out = new(int64) + **out = **in + } + if in.StripeSizeBytes != nil { + in, out := &in.StripeSizeBytes, &out.StripeSizeBytes + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrcSerDe. +func (in *OrcSerDe) DeepCopy() *OrcSerDe { + if in == nil { + return nil + } + out := new(OrcSerDe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFormatConfiguration) DeepCopyInto(out *OutputFormatConfiguration) { + *out = *in + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = new(Serializer) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFormatConfiguration. +func (in *OutputFormatConfiguration) DeepCopy() *OutputFormatConfiguration { + if in == nil { + return nil + } + out := new(OutputFormatConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParquetSerDe) DeepCopyInto(out *ParquetSerDe) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(int64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EnableDictionaryCompression != nil { + in, out := &in.EnableDictionaryCompression, &out.EnableDictionaryCompression + *out = new(bool) + **out = **in + } + if in.MaxPaddingBytes != nil { + in, out := &in.MaxPaddingBytes, &out.MaxPaddingBytes + *out = new(int64) + **out = **in + } + if in.PageSizeBytes != nil { + in, out := &in.PageSizeBytes, &out.PageSizeBytes + *out = new(int64) + **out = **in + } + if in.WriterVersion != nil { + in, out := &in.WriterVersion, &out.WriterVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParquetSerDe. +func (in *ParquetSerDe) DeepCopy() *ParquetSerDe { + if in == nil { + return nil + } + out := new(ParquetSerDe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfiguration) DeepCopyInto(out *ProcessingConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]*Processor, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Processor) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfiguration. +func (in *ProcessingConfiguration) DeepCopy() *ProcessingConfiguration { + if in == nil { + return nil + } + out := new(ProcessingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Processor) DeepCopyInto(out *Processor) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]*ProcessorParameter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ProcessorParameter) + (*in).DeepCopyInto(*out) + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Processor. +func (in *Processor) DeepCopy() *Processor { + if in == nil { + return nil + } + out := new(Processor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorParameter) DeepCopyInto(out *ProcessorParameter) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorParameter. +func (in *ProcessorParameter) DeepCopy() *ProcessorParameter { + if in == nil { + return nil + } + out := new(ProcessorParameter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftDestinationConfiguration) DeepCopyInto(out *RedshiftDestinationConfiguration) { + *out = *in + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterJDBCURL != nil { + in, out := &in.ClusterJDBCURL, &out.ClusterJDBCURL + *out = new(string) + **out = **in + } + if in.CopyCommand != nil { + in, out := &in.CopyCommand, &out.CopyCommand + *out = new(CopyCommand) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(RedshiftRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftDestinationConfiguration. +func (in *RedshiftDestinationConfiguration) DeepCopy() *RedshiftDestinationConfiguration { + if in == nil { + return nil + } + out := new(RedshiftDestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftDestinationDescription) DeepCopyInto(out *RedshiftDestinationDescription) { + *out = *in + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterJDBCURL != nil { + in, out := &in.ClusterJDBCURL, &out.ClusterJDBCURL + *out = new(string) + **out = **in + } + if in.CopyCommand != nil { + in, out := &in.CopyCommand, &out.CopyCommand + *out = new(CopyCommand) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(RedshiftRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupDescription != nil { + in, out := &in.S3BackupDescription, &out.S3BackupDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftDestinationDescription. +func (in *RedshiftDestinationDescription) DeepCopy() *RedshiftDestinationDescription { + if in == nil { + return nil + } + out := new(RedshiftDestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftDestinationUpdate) DeepCopyInto(out *RedshiftDestinationUpdate) { + *out = *in + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.ClusterJDBCURL != nil { + in, out := &in.ClusterJDBCURL, &out.ClusterJDBCURL + *out = new(string) + **out = **in + } + if in.CopyCommand != nil { + in, out := &in.CopyCommand, &out.CopyCommand + *out = new(CopyCommand) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(RedshiftRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftDestinationUpdate. +func (in *RedshiftDestinationUpdate) DeepCopy() *RedshiftDestinationUpdate { + if in == nil { + return nil + } + out := new(RedshiftDestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftRetryOptions) DeepCopyInto(out *RedshiftRetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftRetryOptions. +func (in *RedshiftRetryOptions) DeepCopy() *RedshiftRetryOptions { + if in == nil { + return nil + } + out := new(RedshiftRetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryOptions) DeepCopyInto(out *RetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryOptions. +func (in *RetryOptions) DeepCopy() *RetryOptions { + if in == nil { + return nil + } + out := new(RetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationConfiguration) DeepCopyInto(out *S3DestinationConfiguration) { + *out = *in + if in.BucketARN != nil { + in, out := &in.BucketARN, &out.BucketARN + *out = new(string) + **out = **in + } + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(BufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationConfiguration. +func (in *S3DestinationConfiguration) DeepCopy() *S3DestinationConfiguration { + if in == nil { + return nil + } + out := new(S3DestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationDescription) DeepCopyInto(out *S3DestinationDescription) { + *out = *in + if in.BucketARN != nil { + in, out := &in.BucketARN, &out.BucketARN + *out = new(string) + **out = **in + } + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(BufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationDescription. +func (in *S3DestinationDescription) DeepCopy() *S3DestinationDescription { + if in == nil { + return nil + } + out := new(S3DestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationUpdate) DeepCopyInto(out *S3DestinationUpdate) { + *out = *in + if in.BucketARN != nil { + in, out := &in.BucketARN, &out.BucketARN + *out = new(string) + **out = **in + } + if in.BufferingHints != nil { + in, out := &in.BufferingHints, &out.BufferingHints + *out = new(BufferingHints) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationUpdate. +func (in *S3DestinationUpdate) DeepCopy() *S3DestinationUpdate { + if in == nil { + return nil + } + out := new(S3DestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaConfiguration) DeepCopyInto(out *SchemaConfiguration) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaConfiguration. +func (in *SchemaConfiguration) DeepCopy() *SchemaConfiguration { + if in == nil { + return nil + } + out := new(SchemaConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Serializer) DeepCopyInto(out *Serializer) { + *out = *in + if in.OrcSerDe != nil { + in, out := &in.OrcSerDe, &out.OrcSerDe + *out = new(OrcSerDe) + (*in).DeepCopyInto(*out) + } + if in.ParquetSerDe != nil { + in, out := &in.ParquetSerDe, &out.ParquetSerDe + *out = new(ParquetSerDe) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Serializer. +func (in *Serializer) DeepCopy() *Serializer { + if in == nil { + return nil + } + out := new(Serializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDescription) DeepCopyInto(out *SourceDescription) { + *out = *in + if in.KinesisStreamSourceDescription != nil { + in, out := &in.KinesisStreamSourceDescription, &out.KinesisStreamSourceDescription + *out = new(KinesisStreamSourceDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDescription. +func (in *SourceDescription) DeepCopy() *SourceDescription { + if in == nil { + return nil + } + out := new(SourceDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkDestinationConfiguration) DeepCopyInto(out *SplunkDestinationConfiguration) { + *out = *in + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.HECAcknowledgmentTimeoutInSeconds != nil { + in, out := &in.HECAcknowledgmentTimeoutInSeconds, &out.HECAcknowledgmentTimeoutInSeconds + *out = new(int64) + **out = **in + } + if in.HECEndpoint != nil { + in, out := &in.HECEndpoint, &out.HECEndpoint + *out = new(string) + **out = **in + } + if in.HECEndpointType != nil { + in, out := &in.HECEndpointType, &out.HECEndpointType + *out = new(string) + **out = **in + } + if in.HECToken != nil { + in, out := &in.HECToken, &out.HECToken + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(SplunkRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3DestinationConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkDestinationConfiguration. +func (in *SplunkDestinationConfiguration) DeepCopy() *SplunkDestinationConfiguration { + if in == nil { + return nil + } + out := new(SplunkDestinationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkDestinationDescription) DeepCopyInto(out *SplunkDestinationDescription) { + *out = *in + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.HECAcknowledgmentTimeoutInSeconds != nil { + in, out := &in.HECAcknowledgmentTimeoutInSeconds, &out.HECAcknowledgmentTimeoutInSeconds + *out = new(int64) + **out = **in + } + if in.HECEndpoint != nil { + in, out := &in.HECEndpoint, &out.HECEndpoint + *out = new(string) + **out = **in + } + if in.HECEndpointType != nil { + in, out := &in.HECEndpointType, &out.HECEndpointType + *out = new(string) + **out = **in + } + if in.HECToken != nil { + in, out := &in.HECToken, &out.HECToken + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(SplunkRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3DestinationDescription != nil { + in, out := &in.S3DestinationDescription, &out.S3DestinationDescription + *out = new(S3DestinationDescription) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkDestinationDescription. +func (in *SplunkDestinationDescription) DeepCopy() *SplunkDestinationDescription { + if in == nil { + return nil + } + out := new(SplunkDestinationDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkDestinationUpdate) DeepCopyInto(out *SplunkDestinationUpdate) { + *out = *in + if in.CloudWatchLoggingOptions != nil { + in, out := &in.CloudWatchLoggingOptions, &out.CloudWatchLoggingOptions + *out = new(CloudWatchLoggingOptions) + (*in).DeepCopyInto(*out) + } + if in.HECAcknowledgmentTimeoutInSeconds != nil { + in, out := &in.HECAcknowledgmentTimeoutInSeconds, &out.HECAcknowledgmentTimeoutInSeconds + *out = new(int64) + **out = **in + } + if in.HECEndpoint != nil { + in, out := &in.HECEndpoint, &out.HECEndpoint + *out = new(string) + **out = **in + } + if in.HECEndpointType != nil { + in, out := &in.HECEndpointType, &out.HECEndpointType + *out = new(string) + **out = **in + } + if in.HECToken != nil { + in, out := &in.HECToken, &out.HECToken + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryOptions != nil { + in, out := &in.RetryOptions, &out.RetryOptions + *out = new(SplunkRetryOptions) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkDestinationUpdate. +func (in *SplunkDestinationUpdate) DeepCopy() *SplunkDestinationUpdate { + if in == nil { + return nil + } + out := new(SplunkDestinationUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkRetryOptions) DeepCopyInto(out *SplunkRetryOptions) { + *out = *in + if in.DurationInSeconds != nil { + in, out := &in.DurationInSeconds, &out.DurationInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkRetryOptions. +func (in *SplunkRetryOptions) DeepCopy() *SplunkRetryOptions { + if in == nil { + return nil + } + out := new(SplunkRetryOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tag) DeepCopyInto(out *Tag) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag. +func (in *Tag) DeepCopy() *Tag { + if in == nil { + return nil + } + out := new(Tag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfiguration) DeepCopyInto(out *VPCConfiguration) { + *out = *in + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDs != nil { + in, out := &in.SubnetIDs, &out.SubnetIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfiguration. +func (in *VPCConfiguration) DeepCopy() *VPCConfiguration { + if in == nil { + return nil + } + out := new(VPCConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationDescription) DeepCopyInto(out *VPCConfigurationDescription) { + *out = *in + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDs != nil { + in, out := &in.SubnetIDs, &out.SubnetIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationDescription. +func (in *VPCConfigurationDescription) DeepCopy() *VPCConfigurationDescription { + if in == nil { + return nil + } + out := new(VPCConfigurationDescription) + in.DeepCopyInto(out) + return out +} diff --git a/apis/firehose/v1alpha1/zz_generated.managed.go b/apis/firehose/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000000..b0ebe133e2 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_generated.managed.go @@ -0,0 +1,97 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DeliveryStream. +func (mg *DeliveryStream) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DeliveryStream. +func (mg *DeliveryStream) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DeliveryStream. +func (mg *DeliveryStream) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DeliveryStream. +func (mg *DeliveryStream) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this DeliveryStream. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *DeliveryStream) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this DeliveryStream. +func (mg *DeliveryStream) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DeliveryStream. +func (mg *DeliveryStream) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DeliveryStream. +func (mg *DeliveryStream) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DeliveryStream. +func (mg *DeliveryStream) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DeliveryStream. +func (mg *DeliveryStream) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DeliveryStream. +func (mg *DeliveryStream) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this DeliveryStream. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *DeliveryStream) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this DeliveryStream. +func (mg *DeliveryStream) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DeliveryStream. +func (mg *DeliveryStream) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/firehose/v1alpha1/zz_generated.managedlist.go b/apis/firehose/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000000..78f136c7af --- /dev/null +++ b/apis/firehose/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DeliveryStreamList. +func (l *DeliveryStreamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/firehose/v1alpha1/zz_generated.resolvers.go b/apis/firehose/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000000..336c5b631e --- /dev/null +++ b/apis/firehose/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + v1alpha1 "github.com/crossplane-contrib/provider-aws/apis/kms/v1alpha1" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this DeliveryStream. +func (mg *DeliveryStream) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomDeliveryStreamParameters.KMSKeyARN), + Extract: v1alpha1.KMSKeyARN(), + Reference: mg.Spec.ForProvider.CustomDeliveryStreamParameters.KMSKeyARNRef, + Selector: mg.Spec.ForProvider.CustomDeliveryStreamParameters.KMSKeyARNSelector, + To: reference.To{ + List: &v1alpha1.KeyList{}, + Managed: &v1alpha1.Key{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomDeliveryStreamParameters.KMSKeyARN") + } + mg.Spec.ForProvider.CustomDeliveryStreamParameters.KMSKeyARN = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomDeliveryStreamParameters.KMSKeyARNRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/firehose/v1alpha1/zz_groupversion_info.go b/apis/firehose/v1alpha1/zz_groupversion_info.go new file mode 100644 index 0000000000..08570accd2 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,41 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "firehose.aws.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // GroupVersion is the API Group Version used to register the objects + GroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/firehose/v1alpha1/zz_types.go b/apis/firehose/v1alpha1/zz_types.go new file mode 100644 index 0000000000..40754c1af4 --- /dev/null +++ b/apis/firehose/v1alpha1/zz_types.go @@ -0,0 +1,1118 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} +) + +// +kubebuilder:skipversion +type AmazonOpenSearchServerlessBufferingHints struct { + IntervalInSeconds *int64 `json:"intervalInSeconds,omitempty"` + + SizeInMBs *int64 `json:"sizeInMBs,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonOpenSearchServerlessDestinationConfiguration struct { + // Describes the buffering to perform before delivering data to the Serverless + // offering for Amazon OpenSearch Service destination. + BufferingHints *AmazonOpenSearchServerlessBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CollectionEndpoint *string `json:"collectionEndpoint,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to the Serverless offering for Amazon OpenSearch Service. + RetryOptions *AmazonOpenSearchServerlessRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `json:"s3Configuration,omitempty"` + // The details of the VPC of the Amazon ES destination. + VPCConfiguration *VPCConfiguration `json:"vpcConfiguration,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonOpenSearchServerlessDestinationDescription struct { + // Describes the buffering to perform before delivering data to the Serverless + // offering for Amazon OpenSearch Service destination. + BufferingHints *AmazonOpenSearchServerlessBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CollectionEndpoint *string `json:"collectionEndpoint,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to the Serverless offering for Amazon OpenSearch Service. + RetryOptions *AmazonOpenSearchServerlessRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` + // The details of the VPC of the Amazon ES destination. + VPCConfigurationDescription *VPCConfigurationDescription `json:"vpcConfigurationDescription,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonOpenSearchServerlessDestinationUpdate struct { + // Describes the buffering to perform before delivering data to the Serverless + // offering for Amazon OpenSearch Service destination. + BufferingHints *AmazonOpenSearchServerlessBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CollectionEndpoint *string `json:"collectionEndpoint,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to the Serverless offering for Amazon OpenSearch Service. + RetryOptions *AmazonOpenSearchServerlessRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonOpenSearchServerlessRetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonopensearchserviceBufferingHints struct { + IntervalInSeconds *int64 `json:"intervalInSeconds,omitempty"` + + SizeInMBs *int64 `json:"sizeInMBs,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonopensearchserviceDestinationConfiguration struct { + // Describes the buffering to perform before delivering data to the Amazon OpenSearch + // Service destination. + BufferingHints *AmazonopensearchserviceBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterEndpoint *string `json:"clusterEndpoint,omitempty"` + + DomainARN *string `json:"domainARN,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon OpenSearch Service. + RetryOptions *AmazonopensearchserviceRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `json:"s3Configuration,omitempty"` + + TypeName *string `json:"typeName,omitempty"` + // The details of the VPC of the Amazon ES destination. + VPCConfiguration *VPCConfiguration `json:"vpcConfiguration,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonopensearchserviceDestinationDescription struct { + // Describes the buffering to perform before delivering data to the Amazon OpenSearch + // Service destination. + BufferingHints *AmazonopensearchserviceBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterEndpoint *string `json:"clusterEndpoint,omitempty"` + + DomainARN *string `json:"domainARN,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon OpenSearch Service. + RetryOptions *AmazonopensearchserviceRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` + + TypeName *string `json:"typeName,omitempty"` + // The details of the VPC of the Amazon ES destination. + VPCConfigurationDescription *VPCConfigurationDescription `json:"vpcConfigurationDescription,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonopensearchserviceDestinationUpdate struct { + // Describes the buffering to perform before delivering data to the Amazon OpenSearch + // Service destination. + BufferingHints *AmazonopensearchserviceBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterEndpoint *string `json:"clusterEndpoint,omitempty"` + + DomainARN *string `json:"domainARN,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon OpenSearch Service. + RetryOptions *AmazonopensearchserviceRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + TypeName *string `json:"typeName,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonopensearchserviceRetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type BufferingHints struct { + IntervalInSeconds *int64 `json:"intervalInSeconds,omitempty"` + + SizeInMBs *int64 `json:"sizeInMBs,omitempty"` +} + +// +kubebuilder:skipversion +type CloudWatchLoggingOptions struct { + Enabled *bool `json:"enabled,omitempty"` + + LogGroupName *string `json:"logGroupName,omitempty"` + + LogStreamName *string `json:"logStreamName,omitempty"` +} + +// +kubebuilder:skipversion +type CopyCommand struct { + CopyOptions *string `json:"copyOptions,omitempty"` + + DataTableColumns *string `json:"dataTableColumns,omitempty"` + + DataTableName *string `json:"dataTableName,omitempty"` +} + +// +kubebuilder:skipversion +type DataFormatConversionConfiguration struct { + Enabled *bool `json:"enabled,omitempty"` + // Specifies the deserializer you want to use to convert the format of the input + // data. This parameter is required if Enabled is set to true. + InputFormatConfiguration *InputFormatConfiguration `json:"inputFormatConfiguration,omitempty"` + // Specifies the serializer that you want Kinesis Data Firehose to use to convert + // the format of your data before it writes it to Amazon S3. This parameter + // is required if Enabled is set to true. + OutputFormatConfiguration *OutputFormatConfiguration `json:"outputFormatConfiguration,omitempty"` + // Specifies the schema to which you want Kinesis Data Firehose to configure + // your data before it writes it to Amazon S3. This parameter is required if + // Enabled is set to true. + SchemaConfiguration *SchemaConfiguration `json:"schemaConfiguration,omitempty"` +} + +// +kubebuilder:skipversion +type DeliveryStreamDescription struct { + CreateTimestamp *metav1.Time `json:"createTimestamp,omitempty"` + + DeliveryStreamARN *string `json:"deliveryStreamARN,omitempty"` + // Contains information about the server-side encryption (SSE) status for the + // delivery stream, the type customer master key (CMK) in use, if any, and the + // ARN of the CMK. You can get DeliveryStreamEncryptionConfiguration by invoking + // the DescribeDeliveryStream operation. + DeliveryStreamEncryptionConfiguration *DeliveryStreamEncryptionConfiguration `json:"deliveryStreamEncryptionConfiguration,omitempty"` + + DeliveryStreamName *string `json:"deliveryStreamName,omitempty"` + + DeliveryStreamStatus *string `json:"deliveryStreamStatus,omitempty"` + + DeliveryStreamType *string `json:"deliveryStreamType,omitempty"` + + Destinations []*DestinationDescription `json:"destinations,omitempty"` + // Provides details in case one of the following operations fails due to an + // error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, + // StopDeliveryStreamEncryption. + FailureDescription *FailureDescription `json:"failureDescription,omitempty"` + + HasMoreDestinations *bool `json:"hasMoreDestinations,omitempty"` + + LastUpdateTimestamp *metav1.Time `json:"lastUpdateTimestamp,omitempty"` + // Details about a Kinesis data stream used as the source for a Kinesis Data + // Firehose delivery stream. + Source *SourceDescription `json:"source,omitempty"` + + VersionID *string `json:"versionID,omitempty"` +} + +// +kubebuilder:skipversion +type DeliveryStreamEncryptionConfiguration struct { + // Provides details in case one of the following operations fails due to an + // error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, + // StopDeliveryStreamEncryption. + FailureDescription *FailureDescription `json:"failureDescription,omitempty"` + + KeyARN *string `json:"keyARN,omitempty"` + + KeyType *string `json:"keyType,omitempty"` + + Status *string `json:"status,omitempty"` +} + +// +kubebuilder:skipversion +type DeliveryStreamEncryptionConfigurationInput struct { + KeyARN *string `json:"keyARN,omitempty"` + + KeyType *string `json:"keyType,omitempty"` +} + +// +kubebuilder:skipversion +type Deserializer struct { + // The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing + // data, which means converting it from the JSON format in preparation for serializing + // it to the Parquet or ORC format. This is one of two deserializers you can + // choose, depending on which one offers the functionality you need. The other + // option is the OpenX SerDe. + HiveJSONSerDe *HiveJSONSerDe `json:"hiveJSONSerDe,omitempty"` + // The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which + // means converting it from the JSON format in preparation for serializing it + // to the Parquet or ORC format. This is one of two deserializers you can choose, + // depending on which one offers the functionality you need. The other option + // is the native Hive / HCatalog JsonSerDe. + OpenXJSONSerDe *OpenXJSONSerDe `json:"openXJSONSerDe,omitempty"` +} + +// +kubebuilder:skipversion +type DestinationDescription struct { + // The destination description in the Serverless offering for Amazon OpenSearch + // Service. + AmazonOpenSearchServerlessDestinationDescription *AmazonOpenSearchServerlessDestinationDescription `json:"amazonOpenSearchServerlessDestinationDescription,omitempty"` + // The destination description in Amazon OpenSearch Service. + AmazonopensearchserviceDestinationDescription *AmazonopensearchserviceDestinationDescription `json:"amazonopensearchserviceDestinationDescription,omitempty"` + + DestinationID *string `json:"destinationID,omitempty"` + // The destination description in Amazon ES. + ElasticsearchDestinationDescription *ElasticsearchDestinationDescription `json:"elasticsearchDestinationDescription,omitempty"` + // Describes a destination in Amazon S3. + ExtendedS3DestinationDescription *ExtendedS3DestinationDescription `json:"extendedS3DestinationDescription,omitempty"` + // Describes the HTTP endpoint destination. + HTTPEndpointDestinationDescription *HTTPEndpointDestinationDescription `json:"httpEndpointDestinationDescription,omitempty"` + // Describes a destination in Amazon Redshift. + RedshiftDestinationDescription *RedshiftDestinationDescription `json:"redshiftDestinationDescription,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` + // Describes a destination in Splunk. + SplunkDestinationDescription *SplunkDestinationDescription `json:"splunkDestinationDescription,omitempty"` +} + +// +kubebuilder:skipversion +type DynamicPartitioningConfiguration struct { + Enabled *bool `json:"enabled,omitempty"` + // The retry behavior in case Kinesis Data Firehose is unable to deliver data + // to an Amazon S3 prefix. + RetryOptions *RetryOptions `json:"retryOptions,omitempty"` +} + +// +kubebuilder:skipversion +type ElasticsearchBufferingHints struct { + IntervalInSeconds *int64 `json:"intervalInSeconds,omitempty"` + + SizeInMBs *int64 `json:"sizeInMBs,omitempty"` +} + +// +kubebuilder:skipversion +type ElasticsearchDestinationConfiguration struct { + // Describes the buffering to perform before delivering data to the Amazon ES + // destination. + BufferingHints *ElasticsearchBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterEndpoint *string `json:"clusterEndpoint,omitempty"` + + DomainARN *string `json:"domainARN,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon ES. + RetryOptions *ElasticsearchRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `json:"s3Configuration,omitempty"` + + TypeName *string `json:"typeName,omitempty"` + // The details of the VPC of the Amazon ES destination. + VPCConfiguration *VPCConfiguration `json:"vpcConfiguration,omitempty"` +} + +// +kubebuilder:skipversion +type ElasticsearchDestinationDescription struct { + // Describes the buffering to perform before delivering data to the Amazon ES + // destination. + BufferingHints *ElasticsearchBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterEndpoint *string `json:"clusterEndpoint,omitempty"` + + DomainARN *string `json:"domainARN,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon ES. + RetryOptions *ElasticsearchRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` + + TypeName *string `json:"typeName,omitempty"` + // The details of the VPC of the Amazon ES destination. + VPCConfigurationDescription *VPCConfigurationDescription `json:"vpcConfigurationDescription,omitempty"` +} + +// +kubebuilder:skipversion +type ElasticsearchDestinationUpdate struct { + // Describes the buffering to perform before delivering data to the Amazon ES + // destination. + BufferingHints *ElasticsearchBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterEndpoint *string `json:"clusterEndpoint,omitempty"` + + DomainARN *string `json:"domainARN,omitempty"` + + IndexName *string `json:"indexName,omitempty"` + + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon ES. + RetryOptions *ElasticsearchRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + TypeName *string `json:"typeName,omitempty"` +} + +// +kubebuilder:skipversion +type ElasticsearchRetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type EncryptionConfiguration struct { + // Describes an encryption key for a destination in Amazon S3. + KMSEncryptionConfig *KMSEncryptionConfig `json:"kmsEncryptionConfig,omitempty"` + + NoEncryptionConfig *string `json:"noEncryptionConfig,omitempty"` +} + +// +kubebuilder:skipversion +type ExtendedS3DestinationConfiguration struct { + BucketARN *string `json:"bucketARN,omitempty"` + // Describes hints for the buffering to perform before delivering data to the + // destination. These options are treated as hints, and therefore Kinesis Data + // Firehose might choose to use different values when it is optimal. The SizeInMBs + // and IntervalInSeconds parameters are optional. However, if specify a value + // for one of them, you must also provide a value for the other. + BufferingHints *BufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CompressionFormat *string `json:"compressionFormat,omitempty"` + // Specifies that you want Kinesis Data Firehose to convert data from the JSON + // format to the Parquet or ORC format before writing it to Amazon S3. Kinesis + // Data Firehose uses the serializer and deserializer that you specify, in addition + // to the column information from the Amazon Web Services Glue table, to deserialize + // your input data from JSON and then serialize it to the Parquet or ORC format. + // For more information, see Kinesis Data Firehose Record Format Conversion + // (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html). + DataFormatConversionConfiguration *DataFormatConversionConfiguration `json:"dataFormatConversionConfiguration,omitempty"` + // The configuration of the dynamic partitioning mechanism that creates smaller + // data sets from the streaming data by partitioning it based on partition keys. + // Currently, dynamic partitioning is only supported for Amazon S3 destinations. + DynamicPartitioningConfiguration *DynamicPartitioningConfiguration `json:"dynamicPartitioningConfiguration,omitempty"` + // Describes the encryption for a destination in Amazon S3. + EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` + + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty"` + + Prefix *string `json:"prefix,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3BackupConfiguration *S3DestinationConfiguration `json:"s3BackupConfiguration,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` +} + +// +kubebuilder:skipversion +type ExtendedS3DestinationDescription struct { + BucketARN *string `json:"bucketARN,omitempty"` + // Describes hints for the buffering to perform before delivering data to the + // destination. These options are treated as hints, and therefore Kinesis Data + // Firehose might choose to use different values when it is optimal. The SizeInMBs + // and IntervalInSeconds parameters are optional. However, if specify a value + // for one of them, you must also provide a value for the other. + BufferingHints *BufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CompressionFormat *string `json:"compressionFormat,omitempty"` + // Specifies that you want Kinesis Data Firehose to convert data from the JSON + // format to the Parquet or ORC format before writing it to Amazon S3. Kinesis + // Data Firehose uses the serializer and deserializer that you specify, in addition + // to the column information from the Amazon Web Services Glue table, to deserialize + // your input data from JSON and then serialize it to the Parquet or ORC format. + // For more information, see Kinesis Data Firehose Record Format Conversion + // (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html). + DataFormatConversionConfiguration *DataFormatConversionConfiguration `json:"dataFormatConversionConfiguration,omitempty"` + // The configuration of the dynamic partitioning mechanism that creates smaller + // data sets from the streaming data by partitioning it based on partition keys. + // Currently, dynamic partitioning is only supported for Amazon S3 destinations. + DynamicPartitioningConfiguration *DynamicPartitioningConfiguration `json:"dynamicPartitioningConfiguration,omitempty"` + // Describes the encryption for a destination in Amazon S3. + EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` + + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty"` + + Prefix *string `json:"prefix,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + // Describes a destination in Amazon S3. + S3BackupDescription *S3DestinationDescription `json:"s3BackupDescription,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` +} + +// +kubebuilder:skipversion +type ExtendedS3DestinationUpdate struct { + BucketARN *string `json:"bucketARN,omitempty"` + // Describes hints for the buffering to perform before delivering data to the + // destination. These options are treated as hints, and therefore Kinesis Data + // Firehose might choose to use different values when it is optimal. The SizeInMBs + // and IntervalInSeconds parameters are optional. However, if specify a value + // for one of them, you must also provide a value for the other. + BufferingHints *BufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CompressionFormat *string `json:"compressionFormat,omitempty"` + // Specifies that you want Kinesis Data Firehose to convert data from the JSON + // format to the Parquet or ORC format before writing it to Amazon S3. Kinesis + // Data Firehose uses the serializer and deserializer that you specify, in addition + // to the column information from the Amazon Web Services Glue table, to deserialize + // your input data from JSON and then serialize it to the Parquet or ORC format. + // For more information, see Kinesis Data Firehose Record Format Conversion + // (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html). + DataFormatConversionConfiguration *DataFormatConversionConfiguration `json:"dataFormatConversionConfiguration,omitempty"` + // The configuration of the dynamic partitioning mechanism that creates smaller + // data sets from the streaming data by partitioning it based on partition keys. + // Currently, dynamic partitioning is only supported for Amazon S3 destinations. + DynamicPartitioningConfiguration *DynamicPartitioningConfiguration `json:"dynamicPartitioningConfiguration,omitempty"` + // Describes the encryption for a destination in Amazon S3. + EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` + + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty"` + + Prefix *string `json:"prefix,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` +} + +// +kubebuilder:skipversion +type FailureDescription struct { + Details *string `json:"details,omitempty"` + + Type *string `json:"type_,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointBufferingHints struct { + IntervalInSeconds *int64 `json:"intervalInSeconds,omitempty"` + + SizeInMBs *int64 `json:"sizeInMBs,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointCommonAttribute struct { + AttributeName *string `json:"attributeName,omitempty"` + + AttributeValue *string `json:"attributeValue,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointConfiguration struct { + AccessKey *string `json:"accessKey,omitempty"` + + Name *string `json:"name,omitempty"` + + URL *string `json:"url,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointDescription struct { + Name *string `json:"name,omitempty"` + + URL *string `json:"url,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointDestinationConfiguration struct { + // Describes the buffering options that can be applied before data is delivered + // to the HTTP endpoint destination. Kinesis Data Firehose treats these options + // as hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for + // one of them, you must also provide a value for the other. + BufferingHints *HTTPEndpointBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + // Describes the configuration of the HTTP endpoint to which Kinesis Firehose + // delivers data. + EndpointConfiguration *HTTPEndpointConfiguration `json:"endpointConfiguration,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // The configuration of the HTTP endpoint request. + RequestConfiguration *HTTPEndpointRequestConfiguration `json:"requestConfiguration,omitempty"` + // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver + // data to the specified HTTP endpoint destination, or if it doesn't receive + // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + RetryOptions *HTTPEndpointRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `json:"s3Configuration,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointDestinationDescription struct { + // Describes the buffering options that can be applied before data is delivered + // to the HTTP endpoint destination. Kinesis Data Firehose treats these options + // as hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for + // one of them, you must also provide a value for the other. + BufferingHints *HTTPEndpointBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + // Describes the HTTP endpoint selected as the destination. + EndpointConfiguration *HTTPEndpointDescription `json:"endpointConfiguration,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // The configuration of the HTTP endpoint request. + RequestConfiguration *HTTPEndpointRequestConfiguration `json:"requestConfiguration,omitempty"` + // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver + // data to the specified HTTP endpoint destination, or if it doesn't receive + // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + RetryOptions *HTTPEndpointRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointDestinationUpdate struct { + // Describes the buffering options that can be applied before data is delivered + // to the HTTP endpoint destination. Kinesis Data Firehose treats these options + // as hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for + // one of them, you must also provide a value for the other. + BufferingHints *HTTPEndpointBufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + // Describes the configuration of the HTTP endpoint to which Kinesis Firehose + // delivers data. + EndpointConfiguration *HTTPEndpointConfiguration `json:"endpointConfiguration,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // The configuration of the HTTP endpoint request. + RequestConfiguration *HTTPEndpointRequestConfiguration `json:"requestConfiguration,omitempty"` + // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver + // data to the specified HTTP endpoint destination, or if it doesn't receive + // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + RetryOptions *HTTPEndpointRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointRequestConfiguration struct { + CommonAttributes []*HTTPEndpointCommonAttribute `json:"commonAttributes,omitempty"` + + ContentEncoding *string `json:"contentEncoding,omitempty"` +} + +// +kubebuilder:skipversion +type HTTPEndpointRetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type HiveJSONSerDe struct { + TimestampFormats []*string `json:"timestampFormats,omitempty"` +} + +// +kubebuilder:skipversion +type InputFormatConfiguration struct { + // The deserializer you want Kinesis Data Firehose to use for converting the + // input data from JSON. Kinesis Data Firehose then serializes the data to its + // final format using the Serializer. Kinesis Data Firehose supports two types + // of deserializers: the Apache Hive JSON SerDe (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON) + // and the OpenX JSON SerDe (https://github.com/rcongiu/Hive-JSON-Serde). + Deserializer *Deserializer `json:"deserializer,omitempty"` +} + +// +kubebuilder:skipversion +type KMSEncryptionConfig struct { + AWSKMSKeyARN *string `json:"awsKMSKeyARN,omitempty"` +} + +// +kubebuilder:skipversion +type KinesisStreamSourceConfiguration struct { + KinesisStreamARN *string `json:"kinesisStreamARN,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` +} + +// +kubebuilder:skipversion +type KinesisStreamSourceDescription struct { + DeliveryStartTimestamp *metav1.Time `json:"deliveryStartTimestamp,omitempty"` + + KinesisStreamARN *string `json:"kinesisStreamARN,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` +} + +// +kubebuilder:skipversion +type OpenXJSONSerDe struct { + CaseInsensitive *bool `json:"caseInsensitive,omitempty"` + + ColumnToJSONKeyMappings map[string]*string `json:"columnToJSONKeyMappings,omitempty"` + + ConvertDotsInJSONKeysToUnderscores *bool `json:"convertDotsInJSONKeysToUnderscores,omitempty"` +} + +// +kubebuilder:skipversion +type OrcSerDe struct { + BlockSizeBytes *int64 `json:"blockSizeBytes,omitempty"` + + BloomFilterColumns []*string `json:"bloomFilterColumns,omitempty"` + + BloomFilterFalsePositiveProbability *float64 `json:"bloomFilterFalsePositiveProbability,omitempty"` + + Compression *string `json:"compression,omitempty"` + + DictionaryKeyThreshold *float64 `json:"dictionaryKeyThreshold,omitempty"` + + EnablePadding *bool `json:"enablePadding,omitempty"` + + FormatVersion *string `json:"formatVersion,omitempty"` + + PaddingTolerance *float64 `json:"paddingTolerance,omitempty"` + + RowIndexStride *int64 `json:"rowIndexStride,omitempty"` + + StripeSizeBytes *int64 `json:"stripeSizeBytes,omitempty"` +} + +// +kubebuilder:skipversion +type OutputFormatConfiguration struct { + // The serializer that you want Kinesis Data Firehose to use to convert data + // to the target format before writing it to Amazon S3. Kinesis Data Firehose + // supports two types of serializers: the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) + // and the Parquet SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.html). + Serializer *Serializer `json:"serializer,omitempty"` +} + +// +kubebuilder:skipversion +type ParquetSerDe struct { + BlockSizeBytes *int64 `json:"blockSizeBytes,omitempty"` + + Compression *string `json:"compression,omitempty"` + + EnableDictionaryCompression *bool `json:"enableDictionaryCompression,omitempty"` + + MaxPaddingBytes *int64 `json:"maxPaddingBytes,omitempty"` + + PageSizeBytes *int64 `json:"pageSizeBytes,omitempty"` + + WriterVersion *string `json:"writerVersion,omitempty"` +} + +// +kubebuilder:skipversion +type ProcessingConfiguration struct { + Enabled *bool `json:"enabled,omitempty"` + + Processors []*Processor `json:"processors,omitempty"` +} + +// +kubebuilder:skipversion +type Processor struct { + Parameters []*ProcessorParameter `json:"parameters,omitempty"` + + Type *string `json:"type_,omitempty"` +} + +// +kubebuilder:skipversion +type ProcessorParameter struct { + ParameterName *string `json:"parameterName,omitempty"` + + ParameterValue *string `json:"parameterValue,omitempty"` +} + +// +kubebuilder:skipversion +type RedshiftDestinationConfiguration struct { + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterJDBCURL *string `json:"clusterJDBCURL,omitempty"` + // Describes a COPY command for Amazon Redshift. + CopyCommand *CopyCommand `json:"copyCommand,omitempty"` + + Password *string `json:"password,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon Redshift. + RetryOptions *RedshiftRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3BackupConfiguration *S3DestinationConfiguration `json:"s3BackupConfiguration,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `json:"s3Configuration,omitempty"` + + Username *string `json:"username,omitempty"` +} + +// +kubebuilder:skipversion +type RedshiftDestinationDescription struct { + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterJDBCURL *string `json:"clusterJDBCURL,omitempty"` + // Describes a COPY command for Amazon Redshift. + CopyCommand *CopyCommand `json:"copyCommand,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon Redshift. + RetryOptions *RedshiftRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + // Describes a destination in Amazon S3. + S3BackupDescription *S3DestinationDescription `json:"s3BackupDescription,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` + + Username *string `json:"username,omitempty"` +} + +// +kubebuilder:skipversion +type RedshiftDestinationUpdate struct { + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + ClusterJDBCURL *string `json:"clusterJDBCURL,omitempty"` + // Describes a COPY command for Amazon Redshift. + CopyCommand *CopyCommand `json:"copyCommand,omitempty"` + + Password *string `json:"password,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Amazon Redshift. + RetryOptions *RedshiftRetryOptions `json:"retryOptions,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + + Username *string `json:"username,omitempty"` +} + +// +kubebuilder:skipversion +type RedshiftRetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type RetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type S3DestinationConfiguration struct { + BucketARN *string `json:"bucketARN,omitempty"` + // Describes hints for the buffering to perform before delivering data to the + // destination. These options are treated as hints, and therefore Kinesis Data + // Firehose might choose to use different values when it is optimal. The SizeInMBs + // and IntervalInSeconds parameters are optional. However, if specify a value + // for one of them, you must also provide a value for the other. + BufferingHints *BufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CompressionFormat *string `json:"compressionFormat,omitempty"` + // Describes the encryption for a destination in Amazon S3. + EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` + + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty"` + + Prefix *string `json:"prefix,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` +} + +// +kubebuilder:skipversion +type S3DestinationDescription struct { + BucketARN *string `json:"bucketARN,omitempty"` + // Describes hints for the buffering to perform before delivering data to the + // destination. These options are treated as hints, and therefore Kinesis Data + // Firehose might choose to use different values when it is optimal. The SizeInMBs + // and IntervalInSeconds parameters are optional. However, if specify a value + // for one of them, you must also provide a value for the other. + BufferingHints *BufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CompressionFormat *string `json:"compressionFormat,omitempty"` + // Describes the encryption for a destination in Amazon S3. + EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` + + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty"` + + Prefix *string `json:"prefix,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` +} + +// +kubebuilder:skipversion +type S3DestinationUpdate struct { + BucketARN *string `json:"bucketARN,omitempty"` + // Describes hints for the buffering to perform before delivering data to the + // destination. These options are treated as hints, and therefore Kinesis Data + // Firehose might choose to use different values when it is optimal. The SizeInMBs + // and IntervalInSeconds parameters are optional. However, if specify a value + // for one of them, you must also provide a value for the other. + BufferingHints *BufferingHints `json:"bufferingHints,omitempty"` + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + CompressionFormat *string `json:"compressionFormat,omitempty"` + // Describes the encryption for a destination in Amazon S3. + EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` + + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty"` + + Prefix *string `json:"prefix,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` +} + +// +kubebuilder:skipversion +type SchemaConfiguration struct { + CatalogID *string `json:"catalogID,omitempty"` + + DatabaseName *string `json:"databaseName,omitempty"` + + Region *string `json:"region,omitempty"` + + RoleARN *string `json:"roleARN,omitempty"` + + TableName *string `json:"tableName,omitempty"` + + VersionID *string `json:"versionID,omitempty"` +} + +// +kubebuilder:skipversion +type Serializer struct { + // A serializer to use for converting data to the ORC format before storing + // it in Amazon S3. For more information, see Apache ORC (https://orc.apache.org/docs/). + OrcSerDe *OrcSerDe `json:"orcSerDe,omitempty"` + // A serializer to use for converting data to the Parquet format before storing + // it in Amazon S3. For more information, see Apache Parquet (https://parquet.apache.org/documentation/latest/). + ParquetSerDe *ParquetSerDe `json:"parquetSerDe,omitempty"` +} + +// +kubebuilder:skipversion +type SourceDescription struct { + // Details about a Kinesis data stream used as the source for a Kinesis Data + // Firehose delivery stream. + KinesisStreamSourceDescription *KinesisStreamSourceDescription `json:"kinesisStreamSourceDescription,omitempty"` +} + +// +kubebuilder:skipversion +type SplunkDestinationConfiguration struct { + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + HECAcknowledgmentTimeoutInSeconds *int64 `json:"hECAcknowledgmentTimeoutInSeconds,omitempty"` + + HECEndpoint *string `json:"hECEndpoint,omitempty"` + + HECEndpointType *string `json:"hECEndpointType,omitempty"` + + HECToken *string `json:"hECToken,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Splunk, or if it doesn't receive an acknowledgment from Splunk. + RetryOptions *SplunkRetryOptions `json:"retryOptions,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `json:"s3Configuration,omitempty"` +} + +// +kubebuilder:skipversion +type SplunkDestinationDescription struct { + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + HECAcknowledgmentTimeoutInSeconds *int64 `json:"hECAcknowledgmentTimeoutInSeconds,omitempty"` + + HECEndpoint *string `json:"hECEndpoint,omitempty"` + + HECEndpointType *string `json:"hECEndpointType,omitempty"` + + HECToken *string `json:"hECToken,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Splunk, or if it doesn't receive an acknowledgment from Splunk. + RetryOptions *SplunkRetryOptions `json:"retryOptions,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `json:"s3DestinationDescription,omitempty"` +} + +// +kubebuilder:skipversion +type SplunkDestinationUpdate struct { + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `json:"cloudWatchLoggingOptions,omitempty"` + + HECAcknowledgmentTimeoutInSeconds *int64 `json:"hECAcknowledgmentTimeoutInSeconds,omitempty"` + + HECEndpoint *string `json:"hECEndpoint,omitempty"` + + HECEndpointType *string `json:"hECEndpointType,omitempty"` + + HECToken *string `json:"hECToken,omitempty"` + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `json:"processingConfiguration,omitempty"` + // Configures retry behavior in case Kinesis Data Firehose is unable to deliver + // documents to Splunk, or if it doesn't receive an acknowledgment from Splunk. + RetryOptions *SplunkRetryOptions `json:"retryOptions,omitempty"` + + S3BackupMode *string `json:"s3BackupMode,omitempty"` +} + +// +kubebuilder:skipversion +type SplunkRetryOptions struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` +} + +// +kubebuilder:skipversion +type Tag struct { + Key *string `json:"key,omitempty"` + + Value *string `json:"value,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConfiguration struct { + RoleARN *string `json:"roleARN,omitempty"` + + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + + SubnetIDs []*string `json:"subnetIDs,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConfigurationDescription struct { + RoleARN *string `json:"roleARN,omitempty"` + + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + + SubnetIDs []*string `json:"subnetIDs,omitempty"` + + VPCID *string `json:"vpcID,omitempty"` +} diff --git a/examples/firehose/deliverystream.yaml b/examples/firehose/deliverystream.yaml new file mode 100644 index 0000000000..6a70ee3f22 --- /dev/null +++ b/examples/firehose/deliverystream.yaml @@ -0,0 +1,14 @@ +apiVersion: firehose.aws.crossplane.io/v1alpha1 +kind: DeliveryStream +metadata: + name: firehose-deliverystream +spec: + forProvider: + region: us-east-1 + deliveryStreamName: firehose-deliverystream + deliveryStreamType: DirectPut + extendedS3DestinationConfiguration: + roleARN: arn:aws:iam::000000000000:role/Firehose-Example-Role + bucketARN: arn:aws:s3:::bucket-example + providerConfigRef: + name: example diff --git a/package/crds/firehose.aws.crossplane.io_deliverystreams.yaml b/package/crds/firehose.aws.crossplane.io_deliverystreams.yaml new file mode 100644 index 0000000000..f195f11c4b --- /dev/null +++ b/package/crds/firehose.aws.crossplane.io_deliverystreams.yaml @@ -0,0 +1,1688 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: deliverystreams.firehose.aws.crossplane.io +spec: + group: firehose.aws.crossplane.io + names: + categories: + - crossplane + - managed + - aws + kind: DeliveryStream + listKind: DeliveryStreamList + plural: deliverystreams + singular: deliverystream + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DeliveryStream is the Schema for the DeliveryStreams API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DeliveryStreamSpec defines the desired state of DeliveryStream + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + description: DeliveryStreamParameters defines the desired state of + DeliveryStream + properties: + amazonOpenSearchServerlessDestinationConfiguration: + description: The destination in the Serverless offering for Amazon + OpenSearch Service. You can specify only one destination. + properties: + bufferingHints: + description: Describes the buffering to perform before delivering + data to the Serverless offering for Amazon OpenSearch Service + destination. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + collectionEndpoint: + type: string + indexName: + type: string + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + retryOptions: + description: Configures retry behavior in case Kinesis Data + Firehose is unable to deliver documents to the Serverless + offering for Amazon OpenSearch Service. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + roleARN: + type: string + s3BackupMode: + type: string + s3Configuration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + vpcConfiguration: + description: The details of the VPC of the Amazon ES destination. + properties: + roleARN: + type: string + securityGroupIDs: + items: + type: string + type: array + subnetIDs: + items: + type: string + type: array + type: object + type: object + amazonopensearchserviceDestinationConfiguration: + description: The destination in Amazon OpenSearch Service. You + can specify only one destination. + properties: + bufferingHints: + description: Describes the buffering to perform before delivering + data to the Amazon OpenSearch Service destination. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + clusterEndpoint: + type: string + domainARN: + type: string + indexName: + type: string + indexRotationPeriod: + type: string + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + retryOptions: + description: Configures retry behavior in case Kinesis Data + Firehose is unable to deliver documents to Amazon OpenSearch + Service. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + roleARN: + type: string + s3BackupMode: + type: string + s3Configuration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + typeName: + type: string + vpcConfiguration: + description: The details of the VPC of the Amazon ES destination. + properties: + roleARN: + type: string + securityGroupIDs: + items: + type: string + type: array + subnetIDs: + items: + type: string + type: array + type: object + type: object + deliveryStreamEncryptionConfigurationInput: + description: Used to specify the type and Amazon Resource Name + (ARN) of the KMS key needed for Server-Side Encryption (SSE). + properties: + keyARN: + type: string + keyType: + type: string + type: object + deliveryStreamName: + description: The name of the delivery stream. This name must be + unique per Amazon Web Services account in the same Amazon Web + Services Region. If the delivery streams are in different accounts + or different Regions, you can have multiple delivery streams + with the same name. + type: string + deliveryStreamType: + description: "The delivery stream type. This parameter can be + one of the following values: \n * DirectPut: Provider applications + access the delivery stream directly. \n * KinesisStreamAsSource: + The delivery stream uses a Kinesis data stream as a source." + type: string + elasticsearchDestinationConfiguration: + description: The destination in Amazon ES. You can specify only + one destination. + properties: + bufferingHints: + description: Describes the buffering to perform before delivering + data to the Amazon ES destination. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + clusterEndpoint: + type: string + domainARN: + type: string + indexName: + type: string + indexRotationPeriod: + type: string + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + retryOptions: + description: Configures retry behavior in case Kinesis Data + Firehose is unable to deliver documents to Amazon ES. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + roleARN: + type: string + s3BackupMode: + type: string + s3Configuration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + typeName: + type: string + vpcConfiguration: + description: The details of the VPC of the Amazon ES destination. + properties: + roleARN: + type: string + securityGroupIDs: + items: + type: string + type: array + subnetIDs: + items: + type: string + type: array + type: object + type: object + extendedS3DestinationConfiguration: + description: The destination in Amazon S3. You can specify only + one destination. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must also + provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + dataFormatConversionConfiguration: + description: Specifies that you want Kinesis Data Firehose + to convert data from the JSON format to the Parquet or ORC + format before writing it to Amazon S3. Kinesis Data Firehose + uses the serializer and deserializer that you specify, in + addition to the column information from the Amazon Web Services + Glue table, to deserialize your input data from JSON and + then serialize it to the Parquet or ORC format. For more + information, see Kinesis Data Firehose Record Format Conversion + (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html). + properties: + enabled: + type: boolean + inputFormatConfiguration: + description: Specifies the deserializer you want to use + to convert the format of the input data. This parameter + is required if Enabled is set to true. + properties: + deserializer: + description: 'The deserializer you want Kinesis Data + Firehose to use for converting the input data from + JSON. Kinesis Data Firehose then serializes the + data to its final format using the Serializer. Kinesis + Data Firehose supports two types of deserializers: + the Apache Hive JSON SerDe (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON) + and the OpenX JSON SerDe (https://github.com/rcongiu/Hive-JSON-Serde).' + properties: + hiveJSONSerDe: + description: The native Hive / HCatalog JsonSerDe. + Used by Kinesis Data Firehose for deserializing + data, which means converting it from the JSON + format in preparation for serializing it to + the Parquet or ORC format. This is one of two + deserializers you can choose, depending on which + one offers the functionality you need. The other + option is the OpenX SerDe. + properties: + timestampFormats: + items: + type: string + type: array + type: object + openXJSONSerDe: + description: The OpenX SerDe. Used by Kinesis + Data Firehose for deserializing data, which + means converting it from the JSON format in + preparation for serializing it to the Parquet + or ORC format. This is one of two deserializers + you can choose, depending on which one offers + the functionality you need. The other option + is the native Hive / HCatalog JsonSerDe. + properties: + caseInsensitive: + type: boolean + columnToJSONKeyMappings: + additionalProperties: + type: string + type: object + convertDotsInJSONKeysToUnderscores: + type: boolean + type: object + type: object + type: object + outputFormatConfiguration: + description: Specifies the serializer that you want Kinesis + Data Firehose to use to convert the format of your data + before it writes it to Amazon S3. This parameter is + required if Enabled is set to true. + properties: + serializer: + description: 'The serializer that you want Kinesis + Data Firehose to use to convert data to the target + format before writing it to Amazon S3. Kinesis Data + Firehose supports two types of serializers: the + ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) + and the Parquet SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.html).' + properties: + orcSerDe: + description: A serializer to use for converting + data to the ORC format before storing it in + Amazon S3. For more information, see Apache + ORC (https://orc.apache.org/docs/). + properties: + blockSizeBytes: + format: int64 + type: integer + bloomFilterColumns: + items: + type: string + type: array + bloomFilterFalsePositiveProbability: + type: number + compression: + type: string + dictionaryKeyThreshold: + type: number + enablePadding: + type: boolean + formatVersion: + type: string + paddingTolerance: + type: number + rowIndexStride: + format: int64 + type: integer + stripeSizeBytes: + format: int64 + type: integer + type: object + parquetSerDe: + description: A serializer to use for converting + data to the Parquet format before storing it + in Amazon S3. For more information, see Apache + Parquet (https://parquet.apache.org/documentation/latest/). + properties: + blockSizeBytes: + format: int64 + type: integer + compression: + type: string + enableDictionaryCompression: + type: boolean + maxPaddingBytes: + format: int64 + type: integer + pageSizeBytes: + format: int64 + type: integer + writerVersion: + type: string + type: object + type: object + type: object + schemaConfiguration: + description: Specifies the schema to which you want Kinesis + Data Firehose to configure your data before it writes + it to Amazon S3. This parameter is required if Enabled + is set to true. + properties: + catalogID: + type: string + databaseName: + type: string + region: + type: string + roleARN: + type: string + tableName: + type: string + versionID: + type: string + type: object + type: object + dynamicPartitioningConfiguration: + description: The configuration of the dynamic partitioning + mechanism that creates smaller data sets from the streaming + data by partitioning it based on partition keys. Currently, + dynamic partitioning is only supported for Amazon S3 destinations. + properties: + enabled: + type: boolean + retryOptions: + description: The retry behavior in case Kinesis Data Firehose + is unable to deliver data to an Amazon S3 prefix. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + type: object + encryptionConfiguration: + description: Describes the encryption for a destination in + Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + roleARN: + type: string + s3BackupConfiguration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + s3BackupMode: + type: string + type: object + httpEndpointDestinationConfiguration: + description: Enables configuring Kinesis Firehose to deliver data + to any HTTP endpoint destination. You can specify only one destination. + properties: + bufferingHints: + description: Describes the buffering options that can be applied + before data is delivered to the HTTP endpoint destination. + Kinesis Data Firehose treats these options as hints, and + it might choose to use more optimal values. The SizeInMBs + and IntervalInSeconds parameters are optional. However, + if specify a value for one of them, you must also provide + a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + endpointConfiguration: + description: Describes the configuration of the HTTP endpoint + to which Kinesis Firehose delivers data. + properties: + accessKey: + type: string + name: + type: string + url: + type: string + type: object + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + requestConfiguration: + description: The configuration of the HTTP endpoint request. + properties: + commonAttributes: + items: + properties: + attributeName: + type: string + attributeValue: + type: string + type: object + type: array + contentEncoding: + type: string + type: object + retryOptions: + description: Describes the retry behavior in case Kinesis + Data Firehose is unable to deliver data to the specified + HTTP endpoint destination, or if it doesn't receive a valid + acknowledgment of receipt from the specified HTTP endpoint + destination. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + roleARN: + type: string + s3BackupMode: + type: string + s3Configuration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + type: object + kinesisStreamSourceConfiguration: + description: When a Kinesis data stream is used as the source + for the delivery stream, a KinesisStreamSourceConfiguration + containing the Kinesis data stream Amazon Resource Name (ARN) + and the role ARN for the source stream. + properties: + kinesisStreamARN: + type: string + roleARN: + type: string + type: object + kmsKeyARN: + type: string + kmsKeyARNRef: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyARNSelector: + description: A Selector selects an object. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with the + same controller reference as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + redshiftDestinationConfiguration: + description: The destination in Amazon Redshift. You can specify + only one destination. + properties: + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + clusterJDBCURL: + type: string + copyCommand: + description: Describes a COPY command for Amazon Redshift. + properties: + copyOptions: + type: string + dataTableColumns: + type: string + dataTableName: + type: string + type: object + password: + type: string + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + retryOptions: + description: Configures retry behavior in case Kinesis Data + Firehose is unable to deliver documents to Amazon Redshift. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + roleARN: + type: string + s3BackupConfiguration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + s3BackupMode: + type: string + s3Configuration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + username: + type: string + type: object + region: + description: Region is which region the DeliveryStream will be + created. + type: string + s3DestinationConfiguration: + description: '[Deprecated] The destination in Amazon S3. You can + specify only one destination.' + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must also + provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination in + Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + splunkDestinationConfiguration: + description: The destination in Splunk. You can specify only one + destination. + properties: + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + hECAcknowledgmentTimeoutInSeconds: + format: int64 + type: integer + hECEndpoint: + type: string + hECEndpointType: + type: string + hECToken: + type: string + processingConfiguration: + description: Describes a data processing configuration. + properties: + enabled: + type: boolean + processors: + items: + properties: + parameters: + items: + properties: + parameterName: + type: string + parameterValue: + type: string + type: object + type: array + type_: + type: string + type: object + type: array + type: object + retryOptions: + description: Configures retry behavior in case Kinesis Data + Firehose is unable to deliver documents to Splunk, or if + it doesn't receive an acknowledgment from Splunk. + properties: + durationInSeconds: + format: int64 + type: integer + type: object + s3BackupMode: + type: string + s3Configuration: + description: Describes the configuration of a destination + in Amazon S3. + properties: + bucketARN: + type: string + bufferingHints: + description: Describes hints for the buffering to perform + before delivering data to the destination. These options + are treated as hints, and therefore Kinesis Data Firehose + might choose to use different values when it is optimal. + The SizeInMBs and IntervalInSeconds parameters are optional. + However, if specify a value for one of them, you must + also provide a value for the other. + properties: + intervalInSeconds: + format: int64 + type: integer + sizeInMBs: + format: int64 + type: integer + type: object + cloudWatchLoggingOptions: + description: Describes the Amazon CloudWatch logging options + for your delivery stream. + properties: + enabled: + type: boolean + logGroupName: + type: string + logStreamName: + type: string + type: object + compressionFormat: + type: string + encryptionConfiguration: + description: Describes the encryption for a destination + in Amazon S3. + properties: + kmsEncryptionConfig: + description: Describes an encryption key for a destination + in Amazon S3. + properties: + awsKMSKeyARN: + type: string + type: object + noEncryptionConfig: + type: string + type: object + errorOutputPrefix: + type: string + prefix: + type: string + roleARN: + type: string + type: object + type: object + tags: + description: "A set of tags to assign to the delivery stream. + A tag is a key-value pair that you can define and assign to + Amazon Web Services resources. Tags are metadata. For example, + you can add friendly names and descriptions or other types of + information that can help you distinguish the delivery stream. + For more information about tags, see Using Cost Allocation Tags + (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + in the Amazon Web Services Billing and Cost Management User + Guide. \n You can specify up to 50 tags when creating a delivery + stream." + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + required: + - deliveryStreamName + - region + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DeliveryStreamStatus defines the observed state of DeliveryStream. + properties: + atProvider: + description: DeliveryStreamObservation defines the observed state + of DeliveryStream + properties: + deliveryStreamARN: + description: The Amazon Resource Name (ARN) of the delivery stream. + For more information, see Amazon Resource Names (ARNs) and Amazon + Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + type: string + deliveryStreamStatus: + description: The status of the delivery stream. If the status + of a delivery stream is CREATING_FAILED, this status doesn't + change, and you can't invoke CreateDeliveryStream again on it. + However, you can invoke the DeleteDeliveryStream operation to + delete it. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/pkg/controller/aws.go b/pkg/controller/aws.go index 42e9740c8f..89f608f015 100644 --- a/pkg/controller/aws.go +++ b/pkg/controller/aws.go @@ -47,6 +47,7 @@ import ( "github.com/crossplane-contrib/provider-aws/pkg/controller/elasticloadbalancing" "github.com/crossplane-contrib/provider-aws/pkg/controller/elbv2" "github.com/crossplane-contrib/provider-aws/pkg/controller/emrcontainers" + "github.com/crossplane-contrib/provider-aws/pkg/controller/firehose" "github.com/crossplane-contrib/provider-aws/pkg/controller/globalaccelerator" "github.com/crossplane-contrib/provider-aws/pkg/controller/glue" "github.com/crossplane-contrib/provider-aws/pkg/controller/iam" @@ -109,6 +110,7 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { elasticloadbalancing.Setup, elbv2.Setup, emrcontainers.Setup, + firehose.Setup, glue.Setup, globalaccelerator.Setup, iam.Setup, diff --git a/pkg/controller/firehose/deliverystream/setup.go b/pkg/controller/firehose/deliverystream/setup.go new file mode 100644 index 0000000000..7778a33907 --- /dev/null +++ b/pkg/controller/firehose/deliverystream/setup.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deliverystream + +import ( + "context" + + svcsdk "github.com/aws/aws-sdk-go/service/firehose" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/meta" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + "github.com/crossplane/crossplane-runtime/pkg/resource" + ctrl "sigs.k8s.io/controller-runtime" + + svcapitypes "github.com/crossplane-contrib/provider-aws/apis/firehose/v1alpha1" + "github.com/crossplane-contrib/provider-aws/apis/v1alpha1" + "github.com/crossplane-contrib/provider-aws/pkg/features" + "github.com/crossplane-contrib/provider-aws/pkg/utils/pointer" +) + +// SetupDeliveryStream adds a controller that reconciles DeliveryStream. +func SetupDeliveryStream(mgr ctrl.Manager, o controller.Options) error { + name := managed.ControllerName(svcapitypes.DeliveryStreamGroupKind) + opts := []option{ + func(e *external) { + + e.preObserve = preObserve + e.postObserve = postObserve + e.preDelete = preDelete + e.postCreate = postCreate + e.preCreate = preCreate + + }, + } + + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.Features.Enabled(features.EnableAlphaExternalSecretStores) { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), v1alpha1.StoreConfigGroupVersionKind)) + } + + reconcilerOpts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(&connector{kube: mgr.GetClient(), opts: opts}), + managed.WithReferenceResolver(managed.NewAPISimpleReferenceResolver(mgr.GetClient())), + managed.WithInitializers( + managed.NewDefaultProviderConfig(mgr.GetClient()), + managed.NewNameAsExternalName(mgr.GetClient())), + managed.WithPollInterval(o.PollInterval), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithConnectionPublishers(cps...), + } + + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + reconcilerOpts = append(reconcilerOpts, managed.WithManagementPolicies()) + } + + r := managed.NewReconciler(mgr, + resource.ManagedKind(svcapitypes.DeliveryStreamGroupVersionKind), + reconcilerOpts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(resource.DesiredStateChanged()). + For(&svcapitypes.DeliveryStream{}). + Complete(r) +} + +func preObserve(_ context.Context, cr *svcapitypes.DeliveryStream, obj *svcsdk.DescribeDeliveryStreamInput) error { + obj.DeliveryStreamName = pointer.String(meta.GetExternalName(cr)) + return nil +} + +func preCreate(_ context.Context, cr *svcapitypes.DeliveryStream, obj *svcsdk.CreateDeliveryStreamInput) error { + obj.DeliveryStreamName = pointer.String(meta.GetExternalName(cr)) + return nil +} + +func preDelete(_ context.Context, cr *svcapitypes.DeliveryStream, obj *svcsdk.DeleteDeliveryStreamInput) (bool, error) { + obj.DeliveryStreamName = pointer.String(meta.GetExternalName(cr)) + return false, nil +} + +func postObserve(_ context.Context, cr *svcapitypes.DeliveryStream, obj *svcsdk.DescribeDeliveryStreamOutput, obs managed.ExternalObservation, err error) (managed.ExternalObservation, error) { + if err != nil { + return managed.ExternalObservation{}, err + } + + switch pointer.StringValue(obj.DeliveryStreamDescription.DeliveryStreamStatus) { + case string(svcapitypes.DeliveryStreamStatus_SDK_ACTIVE): + cr.SetConditions(xpv1.Available()) + case string(svcapitypes.DeliveryStreamStatus_SDK_CREATING): + cr.SetConditions(xpv1.Creating()) + case string(svcapitypes.DeliveryStreamStatus_SDK_DELETING): + cr.SetConditions(xpv1.Deleting()) + } + + obs.ConnectionDetails = managed.ConnectionDetails{ + "arn": []byte(pointer.StringValue(obj.DeliveryStreamDescription.DeliveryStreamARN)), + "name": []byte(meta.GetExternalName(cr)), + } + + return obs, nil +} + +func postCreate(_ context.Context, cr *svcapitypes.DeliveryStream, obj *svcsdk.CreateDeliveryStreamOutput, _ managed.ExternalCreation, err error) (managed.ExternalCreation, error) { + if err != nil { + return managed.ExternalCreation{}, err + } + + return managed.ExternalCreation{ExternalNameAssigned: true}, nil +} diff --git a/pkg/controller/firehose/deliverystream/zz_controller.go b/pkg/controller/firehose/deliverystream/zz_controller.go new file mode 100644 index 0000000000..f29ea2e43a --- /dev/null +++ b/pkg/controller/firehose/deliverystream/zz_controller.go @@ -0,0 +1,215 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +package deliverystream + +import ( + "context" + + svcapi "github.com/aws/aws-sdk-go/service/firehose" + svcsdk "github.com/aws/aws-sdk-go/service/firehose" + svcsdkapi "github.com/aws/aws-sdk-go/service/firehose/firehoseiface" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/meta" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + cpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + + svcapitypes "github.com/crossplane-contrib/provider-aws/apis/firehose/v1alpha1" + connectaws "github.com/crossplane-contrib/provider-aws/pkg/utils/connect/aws" + errorutils "github.com/crossplane-contrib/provider-aws/pkg/utils/errors" +) + +const ( + errUnexpectedObject = "managed resource is not an DeliveryStream resource" + + errCreateSession = "cannot create a new session" + errCreate = "cannot create DeliveryStream in AWS" + errUpdate = "cannot update DeliveryStream in AWS" + errDescribe = "failed to describe DeliveryStream" + errDelete = "failed to delete DeliveryStream" +) + +type connector struct { + kube client.Client + opts []option +} + +func (c *connector) Connect(ctx context.Context, mg cpresource.Managed) (managed.ExternalClient, error) { + cr, ok := mg.(*svcapitypes.DeliveryStream) + if !ok { + return nil, errors.New(errUnexpectedObject) + } + sess, err := connectaws.GetConfigV1(ctx, c.kube, mg, cr.Spec.ForProvider.Region) + if err != nil { + return nil, errors.Wrap(err, errCreateSession) + } + return newExternal(c.kube, svcapi.New(sess), c.opts), nil +} + +func (e *external) Observe(ctx context.Context, mg cpresource.Managed) (managed.ExternalObservation, error) { + cr, ok := mg.(*svcapitypes.DeliveryStream) + if !ok { + return managed.ExternalObservation{}, errors.New(errUnexpectedObject) + } + if meta.GetExternalName(cr) == "" { + return managed.ExternalObservation{ + ResourceExists: false, + }, nil + } + input := GenerateDescribeDeliveryStreamInput(cr) + if err := e.preObserve(ctx, cr, input); err != nil { + return managed.ExternalObservation{}, errors.Wrap(err, "pre-observe failed") + } + resp, err := e.client.DescribeDeliveryStreamWithContext(ctx, input) + if err != nil { + return managed.ExternalObservation{ResourceExists: false}, errorutils.Wrap(cpresource.Ignore(IsNotFound, err), errDescribe) + } + currentSpec := cr.Spec.ForProvider.DeepCopy() + if err := e.lateInitialize(&cr.Spec.ForProvider, resp); err != nil { + return managed.ExternalObservation{}, errors.Wrap(err, "late-init failed") + } + GenerateDeliveryStream(resp).Status.AtProvider.DeepCopyInto(&cr.Status.AtProvider) + + upToDate, diff, err := e.isUpToDate(ctx, cr, resp) + if err != nil { + return managed.ExternalObservation{}, errors.Wrap(err, "isUpToDate check failed") + } + return e.postObserve(ctx, cr, resp, managed.ExternalObservation{ + ResourceExists: true, + ResourceUpToDate: upToDate, + Diff: diff, + ResourceLateInitialized: !cmp.Equal(&cr.Spec.ForProvider, currentSpec), + }, nil) +} + +func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.ExternalCreation, error) { + cr, ok := mg.(*svcapitypes.DeliveryStream) + if !ok { + return managed.ExternalCreation{}, errors.New(errUnexpectedObject) + } + cr.Status.SetConditions(xpv1.Creating()) + input := GenerateCreateDeliveryStreamInput(cr) + if err := e.preCreate(ctx, cr, input); err != nil { + return managed.ExternalCreation{}, errors.Wrap(err, "pre-create failed") + } + resp, err := e.client.CreateDeliveryStreamWithContext(ctx, input) + if err != nil { + return managed.ExternalCreation{}, errorutils.Wrap(err, errCreate) + } + + if resp.DeliveryStreamARN != nil { + cr.Status.AtProvider.DeliveryStreamARN = resp.DeliveryStreamARN + } else { + cr.Status.AtProvider.DeliveryStreamARN = nil + } + + return e.postCreate(ctx, cr, resp, managed.ExternalCreation{}, err) +} + +func (e *external) Update(ctx context.Context, mg cpresource.Managed) (managed.ExternalUpdate, error) { + return e.update(ctx, mg) + +} + +func (e *external) Delete(ctx context.Context, mg cpresource.Managed) error { + cr, ok := mg.(*svcapitypes.DeliveryStream) + if !ok { + return errors.New(errUnexpectedObject) + } + cr.Status.SetConditions(xpv1.Deleting()) + input := GenerateDeleteDeliveryStreamInput(cr) + ignore, err := e.preDelete(ctx, cr, input) + if err != nil { + return errors.Wrap(err, "pre-delete failed") + } + if ignore { + return nil + } + resp, err := e.client.DeleteDeliveryStreamWithContext(ctx, input) + return e.postDelete(ctx, cr, resp, errorutils.Wrap(cpresource.Ignore(IsNotFound, err), errDelete)) +} + +type option func(*external) + +func newExternal(kube client.Client, client svcsdkapi.FirehoseAPI, opts []option) *external { + e := &external{ + kube: kube, + client: client, + preObserve: nopPreObserve, + postObserve: nopPostObserve, + lateInitialize: nopLateInitialize, + isUpToDate: alwaysUpToDate, + preCreate: nopPreCreate, + postCreate: nopPostCreate, + preDelete: nopPreDelete, + postDelete: nopPostDelete, + update: nopUpdate, + } + for _, f := range opts { + f(e) + } + return e +} + +type external struct { + kube client.Client + client svcsdkapi.FirehoseAPI + preObserve func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DescribeDeliveryStreamInput) error + postObserve func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DescribeDeliveryStreamOutput, managed.ExternalObservation, error) (managed.ExternalObservation, error) + lateInitialize func(*svcapitypes.DeliveryStreamParameters, *svcsdk.DescribeDeliveryStreamOutput) error + isUpToDate func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DescribeDeliveryStreamOutput) (bool, string, error) + preCreate func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.CreateDeliveryStreamInput) error + postCreate func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.CreateDeliveryStreamOutput, managed.ExternalCreation, error) (managed.ExternalCreation, error) + preDelete func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DeleteDeliveryStreamInput) (bool, error) + postDelete func(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DeleteDeliveryStreamOutput, error) error + update func(context.Context, cpresource.Managed) (managed.ExternalUpdate, error) +} + +func nopPreObserve(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DescribeDeliveryStreamInput) error { + return nil +} + +func nopPostObserve(_ context.Context, _ *svcapitypes.DeliveryStream, _ *svcsdk.DescribeDeliveryStreamOutput, obs managed.ExternalObservation, err error) (managed.ExternalObservation, error) { + return obs, err +} +func nopLateInitialize(*svcapitypes.DeliveryStreamParameters, *svcsdk.DescribeDeliveryStreamOutput) error { + return nil +} +func alwaysUpToDate(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DescribeDeliveryStreamOutput) (bool, string, error) { + return true, "", nil +} + +func nopPreCreate(context.Context, *svcapitypes.DeliveryStream, *svcsdk.CreateDeliveryStreamInput) error { + return nil +} +func nopPostCreate(_ context.Context, _ *svcapitypes.DeliveryStream, _ *svcsdk.CreateDeliveryStreamOutput, cre managed.ExternalCreation, err error) (managed.ExternalCreation, error) { + return cre, err +} +func nopPreDelete(context.Context, *svcapitypes.DeliveryStream, *svcsdk.DeleteDeliveryStreamInput) (bool, error) { + return false, nil +} +func nopPostDelete(_ context.Context, _ *svcapitypes.DeliveryStream, _ *svcsdk.DeleteDeliveryStreamOutput, err error) error { + return err +} +func nopUpdate(context.Context, cpresource.Managed) (managed.ExternalUpdate, error) { + return managed.ExternalUpdate{}, nil +} diff --git a/pkg/controller/firehose/deliverystream/zz_conversions.go b/pkg/controller/firehose/deliverystream/zz_conversions.go new file mode 100644 index 0000000000..c21480223a --- /dev/null +++ b/pkg/controller/firehose/deliverystream/zz_conversions.go @@ -0,0 +1,1470 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ack-generate. DO NOT EDIT. + +package deliverystream + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + svcsdk "github.com/aws/aws-sdk-go/service/firehose" + + svcapitypes "github.com/crossplane-contrib/provider-aws/apis/firehose/v1alpha1" +) + +// NOTE(muvaf): We return pointers in case the function needs to start with an +// empty object, hence need to return a new pointer. + +// GenerateDescribeDeliveryStreamInput returns input for read +// operation. +func GenerateDescribeDeliveryStreamInput(cr *svcapitypes.DeliveryStream) *svcsdk.DescribeDeliveryStreamInput { + res := &svcsdk.DescribeDeliveryStreamInput{} + + if cr.Spec.ForProvider.DeliveryStreamName != nil { + res.SetDeliveryStreamName(*cr.Spec.ForProvider.DeliveryStreamName) + } + + return res +} + +// GenerateDeliveryStream returns the current state in the form of *svcapitypes.DeliveryStream. +func GenerateDeliveryStream(resp *svcsdk.DescribeDeliveryStreamOutput) *svcapitypes.DeliveryStream { + cr := &svcapitypes.DeliveryStream{} + + if resp.DeliveryStreamDescription.DeliveryStreamARN != nil { + cr.Status.AtProvider.DeliveryStreamARN = resp.DeliveryStreamDescription.DeliveryStreamARN + } else { + cr.Status.AtProvider.DeliveryStreamARN = nil + } + if resp.DeliveryStreamDescription.DeliveryStreamName != nil { + cr.Spec.ForProvider.DeliveryStreamName = resp.DeliveryStreamDescription.DeliveryStreamName + } else { + cr.Spec.ForProvider.DeliveryStreamName = nil + } + if resp.DeliveryStreamDescription.DeliveryStreamStatus != nil { + cr.Status.AtProvider.DeliveryStreamStatus = resp.DeliveryStreamDescription.DeliveryStreamStatus + } else { + cr.Status.AtProvider.DeliveryStreamStatus = nil + } + if resp.DeliveryStreamDescription.DeliveryStreamType != nil { + cr.Spec.ForProvider.DeliveryStreamType = resp.DeliveryStreamDescription.DeliveryStreamType + } else { + cr.Spec.ForProvider.DeliveryStreamType = nil + } + + return cr +} + +// GenerateCreateDeliveryStreamInput returns a create input. +func GenerateCreateDeliveryStreamInput(cr *svcapitypes.DeliveryStream) *svcsdk.CreateDeliveryStreamInput { + res := &svcsdk.CreateDeliveryStreamInput{} + + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration != nil { + f0 := &svcsdk.AmazonOpenSearchServerlessDestinationConfiguration{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.BufferingHints != nil { + f0f0 := &svcsdk.AmazonOpenSearchServerlessBufferingHints{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.BufferingHints.IntervalInSeconds != nil { + f0f0.SetIntervalInSeconds(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.BufferingHints.SizeInMBs != nil { + f0f0.SetSizeInMBs(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.BufferingHints.SizeInMBs) + } + f0.SetBufferingHints(f0f0) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions != nil { + f0f1 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f0f1.SetEnabled(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f0f1.SetLogGroupName(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f0f1.SetLogStreamName(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f0.SetCloudWatchLoggingOptions(f0f1) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CollectionEndpoint != nil { + f0.SetCollectionEndpoint(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.CollectionEndpoint) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.IndexName != nil { + f0.SetIndexName(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.IndexName) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.ProcessingConfiguration != nil { + f0f4 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f0f4.SetEnabled(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.ProcessingConfiguration.Processors != nil { + f0f4f1 := []*svcsdk.Processor{} + for _, f0f4f1iter := range cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.ProcessingConfiguration.Processors { + f0f4f1elem := &svcsdk.Processor{} + if f0f4f1iter.Parameters != nil { + f0f4f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f0f4f1elemf0iter := range f0f4f1iter.Parameters { + f0f4f1elemf0elem := &svcsdk.ProcessorParameter{} + if f0f4f1elemf0iter.ParameterName != nil { + f0f4f1elemf0elem.SetParameterName(*f0f4f1elemf0iter.ParameterName) + } + if f0f4f1elemf0iter.ParameterValue != nil { + f0f4f1elemf0elem.SetParameterValue(*f0f4f1elemf0iter.ParameterValue) + } + f0f4f1elemf0 = append(f0f4f1elemf0, f0f4f1elemf0elem) + } + f0f4f1elem.SetParameters(f0f4f1elemf0) + } + if f0f4f1iter.Type != nil { + f0f4f1elem.SetType(*f0f4f1iter.Type) + } + f0f4f1 = append(f0f4f1, f0f4f1elem) + } + f0f4.SetProcessors(f0f4f1) + } + f0.SetProcessingConfiguration(f0f4) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.RetryOptions != nil { + f0f5 := &svcsdk.AmazonOpenSearchServerlessRetryOptions{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.RetryOptions.DurationInSeconds != nil { + f0f5.SetDurationInSeconds(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.RetryOptions.DurationInSeconds) + } + f0.SetRetryOptions(f0f5) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.RoleARN != nil { + f0.SetRoleARN(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.RoleARN) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3BackupMode != nil { + f0.SetS3BackupMode(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3BackupMode) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration != nil { + f0f8 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BucketARN != nil { + f0f8.SetBucketARN(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BucketARN) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BufferingHints != nil { + f0f8f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds != nil { + f0f8f1.SetIntervalInSeconds(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs != nil { + f0f8f1.SetSizeInMBs(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs) + } + f0f8.SetBufferingHints(f0f8f1) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions != nil { + f0f8f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled != nil { + f0f8f2.SetEnabled(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName != nil { + f0f8f2.SetLogGroupName(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName != nil { + f0f8f2.SetLogStreamName(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName) + } + f0f8.SetCloudWatchLoggingOptions(f0f8f2) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CompressionFormat != nil { + f0f8.SetCompressionFormat(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.CompressionFormat) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.EncryptionConfiguration != nil { + f0f8f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f0f8f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f0f8f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f0f8f4.SetKMSEncryptionConfig(f0f8f4f0) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig != nil { + f0f8f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig) + } + f0f8.SetEncryptionConfiguration(f0f8f4) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.ErrorOutputPrefix != nil { + f0f8.SetErrorOutputPrefix(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.Prefix != nil { + f0f8.SetPrefix(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.Prefix) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.RoleARN != nil { + f0f8.SetRoleARN(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.S3Configuration.RoleARN) + } + f0.SetS3Configuration(f0f8) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration != nil { + f0f9 := &svcsdk.VpcConfiguration{} + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration.RoleARN != nil { + f0f9.SetRoleARN(*cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration.RoleARN) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration.SecurityGroupIDs != nil { + f0f9f1 := []*string{} + for _, f0f9f1iter := range cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration.SecurityGroupIDs { + var f0f9f1elem string + f0f9f1elem = *f0f9f1iter + f0f9f1 = append(f0f9f1, &f0f9f1elem) + } + f0f9.SetSecurityGroupIds(f0f9f1) + } + if cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration.SubnetIDs != nil { + f0f9f2 := []*string{} + for _, f0f9f2iter := range cr.Spec.ForProvider.AmazonOpenSearchServerlessDestinationConfiguration.VPCConfiguration.SubnetIDs { + var f0f9f2elem string + f0f9f2elem = *f0f9f2iter + f0f9f2 = append(f0f9f2, &f0f9f2elem) + } + f0f9.SetSubnetIds(f0f9f2) + } + f0.SetVpcConfiguration(f0f9) + } + res.SetAmazonOpenSearchServerlessDestinationConfiguration(f0) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration != nil { + f1 := &svcsdk.AmazonopensearchserviceDestinationConfiguration{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.BufferingHints != nil { + f1f0 := &svcsdk.AmazonopensearchserviceBufferingHints{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.BufferingHints.IntervalInSeconds != nil { + f1f0.SetIntervalInSeconds(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.BufferingHints.SizeInMBs != nil { + f1f0.SetSizeInMBs(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.BufferingHints.SizeInMBs) + } + f1.SetBufferingHints(f1f0) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions != nil { + f1f1 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f1f1.SetEnabled(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f1f1.SetLogGroupName(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f1f1.SetLogStreamName(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f1.SetCloudWatchLoggingOptions(f1f1) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ClusterEndpoint != nil { + f1.SetClusterEndpoint(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ClusterEndpoint) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.DomainARN != nil { + f1.SetDomainARN(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.DomainARN) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.IndexName != nil { + f1.SetIndexName(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.IndexName) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.IndexRotationPeriod != nil { + f1.SetIndexRotationPeriod(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.IndexRotationPeriod) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ProcessingConfiguration != nil { + f1f6 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f1f6.SetEnabled(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ProcessingConfiguration.Processors != nil { + f1f6f1 := []*svcsdk.Processor{} + for _, f1f6f1iter := range cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.ProcessingConfiguration.Processors { + f1f6f1elem := &svcsdk.Processor{} + if f1f6f1iter.Parameters != nil { + f1f6f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f1f6f1elemf0iter := range f1f6f1iter.Parameters { + f1f6f1elemf0elem := &svcsdk.ProcessorParameter{} + if f1f6f1elemf0iter.ParameterName != nil { + f1f6f1elemf0elem.SetParameterName(*f1f6f1elemf0iter.ParameterName) + } + if f1f6f1elemf0iter.ParameterValue != nil { + f1f6f1elemf0elem.SetParameterValue(*f1f6f1elemf0iter.ParameterValue) + } + f1f6f1elemf0 = append(f1f6f1elemf0, f1f6f1elemf0elem) + } + f1f6f1elem.SetParameters(f1f6f1elemf0) + } + if f1f6f1iter.Type != nil { + f1f6f1elem.SetType(*f1f6f1iter.Type) + } + f1f6f1 = append(f1f6f1, f1f6f1elem) + } + f1f6.SetProcessors(f1f6f1) + } + f1.SetProcessingConfiguration(f1f6) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.RetryOptions != nil { + f1f7 := &svcsdk.AmazonopensearchserviceRetryOptions{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.RetryOptions.DurationInSeconds != nil { + f1f7.SetDurationInSeconds(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.RetryOptions.DurationInSeconds) + } + f1.SetRetryOptions(f1f7) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.RoleARN != nil { + f1.SetRoleARN(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.RoleARN) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3BackupMode != nil { + f1.SetS3BackupMode(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3BackupMode) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration != nil { + f1f10 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BucketARN != nil { + f1f10.SetBucketARN(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BucketARN) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BufferingHints != nil { + f1f10f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds != nil { + f1f10f1.SetIntervalInSeconds(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs != nil { + f1f10f1.SetSizeInMBs(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs) + } + f1f10.SetBufferingHints(f1f10f1) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions != nil { + f1f10f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled != nil { + f1f10f2.SetEnabled(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName != nil { + f1f10f2.SetLogGroupName(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName != nil { + f1f10f2.SetLogStreamName(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName) + } + f1f10.SetCloudWatchLoggingOptions(f1f10f2) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CompressionFormat != nil { + f1f10.SetCompressionFormat(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.CompressionFormat) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.EncryptionConfiguration != nil { + f1f10f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f1f10f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f1f10f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f1f10f4.SetKMSEncryptionConfig(f1f10f4f0) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig != nil { + f1f10f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig) + } + f1f10.SetEncryptionConfiguration(f1f10f4) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.ErrorOutputPrefix != nil { + f1f10.SetErrorOutputPrefix(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.Prefix != nil { + f1f10.SetPrefix(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.Prefix) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.RoleARN != nil { + f1f10.SetRoleARN(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.S3Configuration.RoleARN) + } + f1.SetS3Configuration(f1f10) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.TypeName != nil { + f1.SetTypeName(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.TypeName) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration != nil { + f1f12 := &svcsdk.VpcConfiguration{} + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration.RoleARN != nil { + f1f12.SetRoleARN(*cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration.RoleARN) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration.SecurityGroupIDs != nil { + f1f12f1 := []*string{} + for _, f1f12f1iter := range cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration.SecurityGroupIDs { + var f1f12f1elem string + f1f12f1elem = *f1f12f1iter + f1f12f1 = append(f1f12f1, &f1f12f1elem) + } + f1f12.SetSecurityGroupIds(f1f12f1) + } + if cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration.SubnetIDs != nil { + f1f12f2 := []*string{} + for _, f1f12f2iter := range cr.Spec.ForProvider.AmazonopensearchserviceDestinationConfiguration.VPCConfiguration.SubnetIDs { + var f1f12f2elem string + f1f12f2elem = *f1f12f2iter + f1f12f2 = append(f1f12f2, &f1f12f2elem) + } + f1f12.SetSubnetIds(f1f12f2) + } + f1.SetVpcConfiguration(f1f12) + } + res.SetAmazonopensearchserviceDestinationConfiguration(f1) + } + if cr.Spec.ForProvider.DeliveryStreamEncryptionConfigurationInput != nil { + f2 := &svcsdk.DeliveryStreamEncryptionConfigurationInput{} + if cr.Spec.ForProvider.DeliveryStreamEncryptionConfigurationInput.KeyARN != nil { + f2.SetKeyARN(*cr.Spec.ForProvider.DeliveryStreamEncryptionConfigurationInput.KeyARN) + } + if cr.Spec.ForProvider.DeliveryStreamEncryptionConfigurationInput.KeyType != nil { + f2.SetKeyType(*cr.Spec.ForProvider.DeliveryStreamEncryptionConfigurationInput.KeyType) + } + res.SetDeliveryStreamEncryptionConfigurationInput(f2) + } + if cr.Spec.ForProvider.DeliveryStreamName != nil { + res.SetDeliveryStreamName(*cr.Spec.ForProvider.DeliveryStreamName) + } + if cr.Spec.ForProvider.DeliveryStreamType != nil { + res.SetDeliveryStreamType(*cr.Spec.ForProvider.DeliveryStreamType) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration != nil { + f5 := &svcsdk.ElasticsearchDestinationConfiguration{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.BufferingHints != nil { + f5f0 := &svcsdk.ElasticsearchBufferingHints{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.BufferingHints.IntervalInSeconds != nil { + f5f0.SetIntervalInSeconds(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.BufferingHints.SizeInMBs != nil { + f5f0.SetSizeInMBs(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.BufferingHints.SizeInMBs) + } + f5.SetBufferingHints(f5f0) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions != nil { + f5f1 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f5f1.SetEnabled(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f5f1.SetLogGroupName(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f5f1.SetLogStreamName(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f5.SetCloudWatchLoggingOptions(f5f1) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ClusterEndpoint != nil { + f5.SetClusterEndpoint(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ClusterEndpoint) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.DomainARN != nil { + f5.SetDomainARN(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.DomainARN) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.IndexName != nil { + f5.SetIndexName(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.IndexName) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.IndexRotationPeriod != nil { + f5.SetIndexRotationPeriod(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.IndexRotationPeriod) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ProcessingConfiguration != nil { + f5f6 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f5f6.SetEnabled(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ProcessingConfiguration.Processors != nil { + f5f6f1 := []*svcsdk.Processor{} + for _, f5f6f1iter := range cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.ProcessingConfiguration.Processors { + f5f6f1elem := &svcsdk.Processor{} + if f5f6f1iter.Parameters != nil { + f5f6f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f5f6f1elemf0iter := range f5f6f1iter.Parameters { + f5f6f1elemf0elem := &svcsdk.ProcessorParameter{} + if f5f6f1elemf0iter.ParameterName != nil { + f5f6f1elemf0elem.SetParameterName(*f5f6f1elemf0iter.ParameterName) + } + if f5f6f1elemf0iter.ParameterValue != nil { + f5f6f1elemf0elem.SetParameterValue(*f5f6f1elemf0iter.ParameterValue) + } + f5f6f1elemf0 = append(f5f6f1elemf0, f5f6f1elemf0elem) + } + f5f6f1elem.SetParameters(f5f6f1elemf0) + } + if f5f6f1iter.Type != nil { + f5f6f1elem.SetType(*f5f6f1iter.Type) + } + f5f6f1 = append(f5f6f1, f5f6f1elem) + } + f5f6.SetProcessors(f5f6f1) + } + f5.SetProcessingConfiguration(f5f6) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.RetryOptions != nil { + f5f7 := &svcsdk.ElasticsearchRetryOptions{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.RetryOptions.DurationInSeconds != nil { + f5f7.SetDurationInSeconds(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.RetryOptions.DurationInSeconds) + } + f5.SetRetryOptions(f5f7) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.RoleARN != nil { + f5.SetRoleARN(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.RoleARN) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3BackupMode != nil { + f5.SetS3BackupMode(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3BackupMode) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration != nil { + f5f10 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BucketARN != nil { + f5f10.SetBucketARN(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BucketARN) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BufferingHints != nil { + f5f10f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds != nil { + f5f10f1.SetIntervalInSeconds(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs != nil { + f5f10f1.SetSizeInMBs(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs) + } + f5f10.SetBufferingHints(f5f10f1) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions != nil { + f5f10f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled != nil { + f5f10f2.SetEnabled(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName != nil { + f5f10f2.SetLogGroupName(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName != nil { + f5f10f2.SetLogStreamName(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName) + } + f5f10.SetCloudWatchLoggingOptions(f5f10f2) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CompressionFormat != nil { + f5f10.SetCompressionFormat(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.CompressionFormat) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.EncryptionConfiguration != nil { + f5f10f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f5f10f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f5f10f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f5f10f4.SetKMSEncryptionConfig(f5f10f4f0) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig != nil { + f5f10f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig) + } + f5f10.SetEncryptionConfiguration(f5f10f4) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.ErrorOutputPrefix != nil { + f5f10.SetErrorOutputPrefix(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.Prefix != nil { + f5f10.SetPrefix(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.Prefix) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.RoleARN != nil { + f5f10.SetRoleARN(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.S3Configuration.RoleARN) + } + f5.SetS3Configuration(f5f10) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.TypeName != nil { + f5.SetTypeName(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.TypeName) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration != nil { + f5f12 := &svcsdk.VpcConfiguration{} + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration.RoleARN != nil { + f5f12.SetRoleARN(*cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration.RoleARN) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration.SecurityGroupIDs != nil { + f5f12f1 := []*string{} + for _, f5f12f1iter := range cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration.SecurityGroupIDs { + var f5f12f1elem string + f5f12f1elem = *f5f12f1iter + f5f12f1 = append(f5f12f1, &f5f12f1elem) + } + f5f12.SetSecurityGroupIds(f5f12f1) + } + if cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration.SubnetIDs != nil { + f5f12f2 := []*string{} + for _, f5f12f2iter := range cr.Spec.ForProvider.ElasticsearchDestinationConfiguration.VPCConfiguration.SubnetIDs { + var f5f12f2elem string + f5f12f2elem = *f5f12f2iter + f5f12f2 = append(f5f12f2, &f5f12f2elem) + } + f5f12.SetSubnetIds(f5f12f2) + } + f5.SetVpcConfiguration(f5f12) + } + res.SetElasticsearchDestinationConfiguration(f5) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration != nil { + f6 := &svcsdk.ExtendedS3DestinationConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BucketARN != nil { + f6.SetBucketARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BucketARN) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BufferingHints != nil { + f6f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BufferingHints.IntervalInSeconds != nil { + f6f1.SetIntervalInSeconds(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BufferingHints.SizeInMBs != nil { + f6f1.SetSizeInMBs(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.BufferingHints.SizeInMBs) + } + f6.SetBufferingHints(f6f1) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions != nil { + f6f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f6f2.SetEnabled(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f6f2.SetLogGroupName(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f6f2.SetLogStreamName(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f6.SetCloudWatchLoggingOptions(f6f2) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CompressionFormat != nil { + f6.SetCompressionFormat(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.CompressionFormat) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration != nil { + f6f4 := &svcsdk.DataFormatConversionConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.Enabled != nil { + f6f4.SetEnabled(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.Enabled) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration != nil { + f6f4f1 := &svcsdk.InputFormatConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer != nil { + f6f4f1f0 := &svcsdk.Deserializer{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.HiveJSONSerDe != nil { + f6f4f1f0f0 := &svcsdk.HiveJsonSerDe{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.HiveJSONSerDe.TimestampFormats != nil { + f6f4f1f0f0f0 := []*string{} + for _, f6f4f1f0f0f0iter := range cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.HiveJSONSerDe.TimestampFormats { + var f6f4f1f0f0f0elem string + f6f4f1f0f0f0elem = *f6f4f1f0f0f0iter + f6f4f1f0f0f0 = append(f6f4f1f0f0f0, &f6f4f1f0f0f0elem) + } + f6f4f1f0f0.SetTimestampFormats(f6f4f1f0f0f0) + } + f6f4f1f0.SetHiveJsonSerDe(f6f4f1f0f0) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe != nil { + f6f4f1f0f1 := &svcsdk.OpenXJsonSerDe{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe.CaseInsensitive != nil { + f6f4f1f0f1.SetCaseInsensitive(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe.CaseInsensitive) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe.ColumnToJSONKeyMappings != nil { + f6f4f1f0f1f1 := map[string]*string{} + for f6f4f1f0f1f1key, f6f4f1f0f1f1valiter := range cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe.ColumnToJSONKeyMappings { + var f6f4f1f0f1f1val string + f6f4f1f0f1f1val = *f6f4f1f0f1f1valiter + f6f4f1f0f1f1[f6f4f1f0f1f1key] = &f6f4f1f0f1f1val + } + f6f4f1f0f1.SetColumnToJsonKeyMappings(f6f4f1f0f1f1) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe.ConvertDotsInJSONKeysToUnderscores != nil { + f6f4f1f0f1.SetConvertDotsInJsonKeysToUnderscores(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.InputFormatConfiguration.Deserializer.OpenXJSONSerDe.ConvertDotsInJSONKeysToUnderscores) + } + f6f4f1f0.SetOpenXJsonSerDe(f6f4f1f0f1) + } + f6f4f1.SetDeserializer(f6f4f1f0) + } + f6f4.SetInputFormatConfiguration(f6f4f1) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration != nil { + f6f4f2 := &svcsdk.OutputFormatConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer != nil { + f6f4f2f0 := &svcsdk.Serializer{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe != nil { + f6f4f2f0f0 := &svcsdk.OrcSerDe{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.BlockSizeBytes != nil { + f6f4f2f0f0.SetBlockSizeBytes(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.BlockSizeBytes) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.BloomFilterColumns != nil { + f6f4f2f0f0f1 := []*string{} + for _, f6f4f2f0f0f1iter := range cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.BloomFilterColumns { + var f6f4f2f0f0f1elem string + f6f4f2f0f0f1elem = *f6f4f2f0f0f1iter + f6f4f2f0f0f1 = append(f6f4f2f0f0f1, &f6f4f2f0f0f1elem) + } + f6f4f2f0f0.SetBloomFilterColumns(f6f4f2f0f0f1) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.BloomFilterFalsePositiveProbability != nil { + f6f4f2f0f0.SetBloomFilterFalsePositiveProbability(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.BloomFilterFalsePositiveProbability) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.Compression != nil { + f6f4f2f0f0.SetCompression(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.Compression) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.DictionaryKeyThreshold != nil { + f6f4f2f0f0.SetDictionaryKeyThreshold(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.DictionaryKeyThreshold) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.EnablePadding != nil { + f6f4f2f0f0.SetEnablePadding(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.EnablePadding) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.FormatVersion != nil { + f6f4f2f0f0.SetFormatVersion(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.FormatVersion) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.PaddingTolerance != nil { + f6f4f2f0f0.SetPaddingTolerance(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.PaddingTolerance) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.RowIndexStride != nil { + f6f4f2f0f0.SetRowIndexStride(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.RowIndexStride) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.StripeSizeBytes != nil { + f6f4f2f0f0.SetStripeSizeBytes(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.OrcSerDe.StripeSizeBytes) + } + f6f4f2f0.SetOrcSerDe(f6f4f2f0f0) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe != nil { + f6f4f2f0f1 := &svcsdk.ParquetSerDe{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.BlockSizeBytes != nil { + f6f4f2f0f1.SetBlockSizeBytes(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.BlockSizeBytes) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.Compression != nil { + f6f4f2f0f1.SetCompression(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.Compression) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.EnableDictionaryCompression != nil { + f6f4f2f0f1.SetEnableDictionaryCompression(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.EnableDictionaryCompression) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.MaxPaddingBytes != nil { + f6f4f2f0f1.SetMaxPaddingBytes(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.MaxPaddingBytes) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.PageSizeBytes != nil { + f6f4f2f0f1.SetPageSizeBytes(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.PageSizeBytes) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.WriterVersion != nil { + f6f4f2f0f1.SetWriterVersion(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.OutputFormatConfiguration.Serializer.ParquetSerDe.WriterVersion) + } + f6f4f2f0.SetParquetSerDe(f6f4f2f0f1) + } + f6f4f2.SetSerializer(f6f4f2f0) + } + f6f4.SetOutputFormatConfiguration(f6f4f2) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration != nil { + f6f4f3 := &svcsdk.SchemaConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.CatalogID != nil { + f6f4f3.SetCatalogId(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.CatalogID) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.DatabaseName != nil { + f6f4f3.SetDatabaseName(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.DatabaseName) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.Region != nil { + f6f4f3.SetRegion(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.Region) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.RoleARN != nil { + f6f4f3.SetRoleARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.RoleARN) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.TableName != nil { + f6f4f3.SetTableName(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.TableName) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.VersionID != nil { + f6f4f3.SetVersionId(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DataFormatConversionConfiguration.SchemaConfiguration.VersionID) + } + f6f4.SetSchemaConfiguration(f6f4f3) + } + f6.SetDataFormatConversionConfiguration(f6f4) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DynamicPartitioningConfiguration != nil { + f6f5 := &svcsdk.DynamicPartitioningConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DynamicPartitioningConfiguration.Enabled != nil { + f6f5.SetEnabled(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DynamicPartitioningConfiguration.Enabled) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DynamicPartitioningConfiguration.RetryOptions != nil { + f6f5f1 := &svcsdk.RetryOptions{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DynamicPartitioningConfiguration.RetryOptions.DurationInSeconds != nil { + f6f5f1.SetDurationInSeconds(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.DynamicPartitioningConfiguration.RetryOptions.DurationInSeconds) + } + f6f5.SetRetryOptions(f6f5f1) + } + f6.SetDynamicPartitioningConfiguration(f6f5) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.EncryptionConfiguration != nil { + f6f6 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f6f6f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f6f6f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f6f6.SetKMSEncryptionConfig(f6f6f0) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.EncryptionConfiguration.NoEncryptionConfig != nil { + f6f6.SetNoEncryptionConfig(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.EncryptionConfiguration.NoEncryptionConfig) + } + f6.SetEncryptionConfiguration(f6f6) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ErrorOutputPrefix != nil { + f6.SetErrorOutputPrefix(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.Prefix != nil { + f6.SetPrefix(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.Prefix) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ProcessingConfiguration != nil { + f6f9 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f6f9.SetEnabled(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ProcessingConfiguration.Processors != nil { + f6f9f1 := []*svcsdk.Processor{} + for _, f6f9f1iter := range cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.ProcessingConfiguration.Processors { + f6f9f1elem := &svcsdk.Processor{} + if f6f9f1iter.Parameters != nil { + f6f9f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f6f9f1elemf0iter := range f6f9f1iter.Parameters { + f6f9f1elemf0elem := &svcsdk.ProcessorParameter{} + if f6f9f1elemf0iter.ParameterName != nil { + f6f9f1elemf0elem.SetParameterName(*f6f9f1elemf0iter.ParameterName) + } + if f6f9f1elemf0iter.ParameterValue != nil { + f6f9f1elemf0elem.SetParameterValue(*f6f9f1elemf0iter.ParameterValue) + } + f6f9f1elemf0 = append(f6f9f1elemf0, f6f9f1elemf0elem) + } + f6f9f1elem.SetParameters(f6f9f1elemf0) + } + if f6f9f1iter.Type != nil { + f6f9f1elem.SetType(*f6f9f1iter.Type) + } + f6f9f1 = append(f6f9f1, f6f9f1elem) + } + f6f9.SetProcessors(f6f9f1) + } + f6.SetProcessingConfiguration(f6f9) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.RoleARN != nil { + f6.SetRoleARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.RoleARN) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration != nil { + f6f11 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BucketARN != nil { + f6f11.SetBucketARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BucketARN) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BufferingHints != nil { + f6f11f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BufferingHints.IntervalInSeconds != nil { + f6f11f1.SetIntervalInSeconds(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BufferingHints.SizeInMBs != nil { + f6f11f1.SetSizeInMBs(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.BufferingHints.SizeInMBs) + } + f6f11.SetBufferingHints(f6f11f1) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions != nil { + f6f11f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f6f11f2.SetEnabled(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f6f11f2.SetLogGroupName(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f6f11f2.SetLogStreamName(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f6f11.SetCloudWatchLoggingOptions(f6f11f2) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CompressionFormat != nil { + f6f11.SetCompressionFormat(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.CompressionFormat) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration != nil { + f6f11f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f6f11f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f6f11f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f6f11f4.SetKMSEncryptionConfig(f6f11f4f0) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.NoEncryptionConfig != nil { + f6f11f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.NoEncryptionConfig) + } + f6f11.SetEncryptionConfiguration(f6f11f4) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.ErrorOutputPrefix != nil { + f6f11.SetErrorOutputPrefix(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.Prefix != nil { + f6f11.SetPrefix(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.Prefix) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.RoleARN != nil { + f6f11.SetRoleARN(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupConfiguration.RoleARN) + } + f6.SetS3BackupConfiguration(f6f11) + } + if cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupMode != nil { + f6.SetS3BackupMode(*cr.Spec.ForProvider.ExtendedS3DestinationConfiguration.S3BackupMode) + } + res.SetExtendedS3DestinationConfiguration(f6) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration != nil { + f7 := &svcsdk.HttpEndpointDestinationConfiguration{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.BufferingHints != nil { + f7f0 := &svcsdk.HttpEndpointBufferingHints{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.BufferingHints.IntervalInSeconds != nil { + f7f0.SetIntervalInSeconds(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.BufferingHints.SizeInMBs != nil { + f7f0.SetSizeInMBs(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.BufferingHints.SizeInMBs) + } + f7.SetBufferingHints(f7f0) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions != nil { + f7f1 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f7f1.SetEnabled(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f7f1.SetLogGroupName(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f7f1.SetLogStreamName(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f7.SetCloudWatchLoggingOptions(f7f1) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration != nil { + f7f2 := &svcsdk.HttpEndpointConfiguration{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration.AccessKey != nil { + f7f2.SetAccessKey(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration.AccessKey) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration.Name != nil { + f7f2.SetName(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration.Name) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration.URL != nil { + f7f2.SetUrl(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.EndpointConfiguration.URL) + } + f7.SetEndpointConfiguration(f7f2) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.ProcessingConfiguration != nil { + f7f3 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f7f3.SetEnabled(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.ProcessingConfiguration.Processors != nil { + f7f3f1 := []*svcsdk.Processor{} + for _, f7f3f1iter := range cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.ProcessingConfiguration.Processors { + f7f3f1elem := &svcsdk.Processor{} + if f7f3f1iter.Parameters != nil { + f7f3f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f7f3f1elemf0iter := range f7f3f1iter.Parameters { + f7f3f1elemf0elem := &svcsdk.ProcessorParameter{} + if f7f3f1elemf0iter.ParameterName != nil { + f7f3f1elemf0elem.SetParameterName(*f7f3f1elemf0iter.ParameterName) + } + if f7f3f1elemf0iter.ParameterValue != nil { + f7f3f1elemf0elem.SetParameterValue(*f7f3f1elemf0iter.ParameterValue) + } + f7f3f1elemf0 = append(f7f3f1elemf0, f7f3f1elemf0elem) + } + f7f3f1elem.SetParameters(f7f3f1elemf0) + } + if f7f3f1iter.Type != nil { + f7f3f1elem.SetType(*f7f3f1iter.Type) + } + f7f3f1 = append(f7f3f1, f7f3f1elem) + } + f7f3.SetProcessors(f7f3f1) + } + f7.SetProcessingConfiguration(f7f3) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RequestConfiguration != nil { + f7f4 := &svcsdk.HttpEndpointRequestConfiguration{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RequestConfiguration.CommonAttributes != nil { + f7f4f0 := []*svcsdk.HttpEndpointCommonAttribute{} + for _, f7f4f0iter := range cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RequestConfiguration.CommonAttributes { + f7f4f0elem := &svcsdk.HttpEndpointCommonAttribute{} + if f7f4f0iter.AttributeName != nil { + f7f4f0elem.SetAttributeName(*f7f4f0iter.AttributeName) + } + if f7f4f0iter.AttributeValue != nil { + f7f4f0elem.SetAttributeValue(*f7f4f0iter.AttributeValue) + } + f7f4f0 = append(f7f4f0, f7f4f0elem) + } + f7f4.SetCommonAttributes(f7f4f0) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RequestConfiguration.ContentEncoding != nil { + f7f4.SetContentEncoding(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RequestConfiguration.ContentEncoding) + } + f7.SetRequestConfiguration(f7f4) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RetryOptions != nil { + f7f5 := &svcsdk.HttpEndpointRetryOptions{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RetryOptions.DurationInSeconds != nil { + f7f5.SetDurationInSeconds(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RetryOptions.DurationInSeconds) + } + f7.SetRetryOptions(f7f5) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RoleARN != nil { + f7.SetRoleARN(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.RoleARN) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3BackupMode != nil { + f7.SetS3BackupMode(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3BackupMode) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration != nil { + f7f8 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BucketARN != nil { + f7f8.SetBucketARN(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BucketARN) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BufferingHints != nil { + f7f8f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds != nil { + f7f8f1.SetIntervalInSeconds(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs != nil { + f7f8f1.SetSizeInMBs(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs) + } + f7f8.SetBufferingHints(f7f8f1) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions != nil { + f7f8f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled != nil { + f7f8f2.SetEnabled(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName != nil { + f7f8f2.SetLogGroupName(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName != nil { + f7f8f2.SetLogStreamName(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName) + } + f7f8.SetCloudWatchLoggingOptions(f7f8f2) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CompressionFormat != nil { + f7f8.SetCompressionFormat(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.CompressionFormat) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.EncryptionConfiguration != nil { + f7f8f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f7f8f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f7f8f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f7f8f4.SetKMSEncryptionConfig(f7f8f4f0) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig != nil { + f7f8f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig) + } + f7f8.SetEncryptionConfiguration(f7f8f4) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.ErrorOutputPrefix != nil { + f7f8.SetErrorOutputPrefix(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.Prefix != nil { + f7f8.SetPrefix(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.Prefix) + } + if cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.RoleARN != nil { + f7f8.SetRoleARN(*cr.Spec.ForProvider.HTTPEndpointDestinationConfiguration.S3Configuration.RoleARN) + } + f7.SetS3Configuration(f7f8) + } + res.SetHttpEndpointDestinationConfiguration(f7) + } + if cr.Spec.ForProvider.KinesisStreamSourceConfiguration != nil { + f8 := &svcsdk.KinesisStreamSourceConfiguration{} + if cr.Spec.ForProvider.KinesisStreamSourceConfiguration.KinesisStreamARN != nil { + f8.SetKinesisStreamARN(*cr.Spec.ForProvider.KinesisStreamSourceConfiguration.KinesisStreamARN) + } + if cr.Spec.ForProvider.KinesisStreamSourceConfiguration.RoleARN != nil { + f8.SetRoleARN(*cr.Spec.ForProvider.KinesisStreamSourceConfiguration.RoleARN) + } + res.SetKinesisStreamSourceConfiguration(f8) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration != nil { + f9 := &svcsdk.RedshiftDestinationConfiguration{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions != nil { + f9f0 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f9f0.SetEnabled(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f9f0.SetLogGroupName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f9f0.SetLogStreamName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f9.SetCloudWatchLoggingOptions(f9f0) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.ClusterJDBCURL != nil { + f9.SetClusterJDBCURL(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.ClusterJDBCURL) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand != nil { + f9f2 := &svcsdk.CopyCommand{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand.CopyOptions != nil { + f9f2.SetCopyOptions(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand.CopyOptions) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand.DataTableColumns != nil { + f9f2.SetDataTableColumns(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand.DataTableColumns) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand.DataTableName != nil { + f9f2.SetDataTableName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.CopyCommand.DataTableName) + } + f9.SetCopyCommand(f9f2) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.Password != nil { + f9.SetPassword(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.Password) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.ProcessingConfiguration != nil { + f9f4 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f9f4.SetEnabled(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.ProcessingConfiguration.Processors != nil { + f9f4f1 := []*svcsdk.Processor{} + for _, f9f4f1iter := range cr.Spec.ForProvider.RedshiftDestinationConfiguration.ProcessingConfiguration.Processors { + f9f4f1elem := &svcsdk.Processor{} + if f9f4f1iter.Parameters != nil { + f9f4f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f9f4f1elemf0iter := range f9f4f1iter.Parameters { + f9f4f1elemf0elem := &svcsdk.ProcessorParameter{} + if f9f4f1elemf0iter.ParameterName != nil { + f9f4f1elemf0elem.SetParameterName(*f9f4f1elemf0iter.ParameterName) + } + if f9f4f1elemf0iter.ParameterValue != nil { + f9f4f1elemf0elem.SetParameterValue(*f9f4f1elemf0iter.ParameterValue) + } + f9f4f1elemf0 = append(f9f4f1elemf0, f9f4f1elemf0elem) + } + f9f4f1elem.SetParameters(f9f4f1elemf0) + } + if f9f4f1iter.Type != nil { + f9f4f1elem.SetType(*f9f4f1iter.Type) + } + f9f4f1 = append(f9f4f1, f9f4f1elem) + } + f9f4.SetProcessors(f9f4f1) + } + f9.SetProcessingConfiguration(f9f4) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.RetryOptions != nil { + f9f5 := &svcsdk.RedshiftRetryOptions{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.RetryOptions.DurationInSeconds != nil { + f9f5.SetDurationInSeconds(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.RetryOptions.DurationInSeconds) + } + f9.SetRetryOptions(f9f5) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.RoleARN != nil { + f9.SetRoleARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.RoleARN) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration != nil { + f9f7 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BucketARN != nil { + f9f7.SetBucketARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BucketARN) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BufferingHints != nil { + f9f7f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BufferingHints.IntervalInSeconds != nil { + f9f7f1.SetIntervalInSeconds(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BufferingHints.SizeInMBs != nil { + f9f7f1.SetSizeInMBs(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.BufferingHints.SizeInMBs) + } + f9f7.SetBufferingHints(f9f7f1) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions != nil { + f9f7f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f9f7f2.SetEnabled(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f9f7f2.SetLogGroupName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f9f7f2.SetLogStreamName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f9f7.SetCloudWatchLoggingOptions(f9f7f2) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CompressionFormat != nil { + f9f7.SetCompressionFormat(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.CompressionFormat) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration != nil { + f9f7f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f9f7f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f9f7f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f9f7f4.SetKMSEncryptionConfig(f9f7f4f0) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.NoEncryptionConfig != nil { + f9f7f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.EncryptionConfiguration.NoEncryptionConfig) + } + f9f7.SetEncryptionConfiguration(f9f7f4) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.ErrorOutputPrefix != nil { + f9f7.SetErrorOutputPrefix(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.Prefix != nil { + f9f7.SetPrefix(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.Prefix) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.RoleARN != nil { + f9f7.SetRoleARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupConfiguration.RoleARN) + } + f9.SetS3BackupConfiguration(f9f7) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupMode != nil { + f9.SetS3BackupMode(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3BackupMode) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration != nil { + f9f9 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BucketARN != nil { + f9f9.SetBucketARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BucketARN) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BufferingHints != nil { + f9f9f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds != nil { + f9f9f1.SetIntervalInSeconds(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs != nil { + f9f9f1.SetSizeInMBs(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs) + } + f9f9.SetBufferingHints(f9f9f1) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions != nil { + f9f9f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled != nil { + f9f9f2.SetEnabled(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName != nil { + f9f9f2.SetLogGroupName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName != nil { + f9f9f2.SetLogStreamName(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName) + } + f9f9.SetCloudWatchLoggingOptions(f9f9f2) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CompressionFormat != nil { + f9f9.SetCompressionFormat(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.CompressionFormat) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.EncryptionConfiguration != nil { + f9f9f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f9f9f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f9f9f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f9f9f4.SetKMSEncryptionConfig(f9f9f4f0) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig != nil { + f9f9f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig) + } + f9f9.SetEncryptionConfiguration(f9f9f4) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.ErrorOutputPrefix != nil { + f9f9.SetErrorOutputPrefix(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.Prefix != nil { + f9f9.SetPrefix(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.Prefix) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.RoleARN != nil { + f9f9.SetRoleARN(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.S3Configuration.RoleARN) + } + f9.SetS3Configuration(f9f9) + } + if cr.Spec.ForProvider.RedshiftDestinationConfiguration.Username != nil { + f9.SetUsername(*cr.Spec.ForProvider.RedshiftDestinationConfiguration.Username) + } + res.SetRedshiftDestinationConfiguration(f9) + } + if cr.Spec.ForProvider.S3DestinationConfiguration != nil { + f10 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.S3DestinationConfiguration.BucketARN != nil { + f10.SetBucketARN(*cr.Spec.ForProvider.S3DestinationConfiguration.BucketARN) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.BufferingHints != nil { + f10f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.S3DestinationConfiguration.BufferingHints.IntervalInSeconds != nil { + f10f1.SetIntervalInSeconds(*cr.Spec.ForProvider.S3DestinationConfiguration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.BufferingHints.SizeInMBs != nil { + f10f1.SetSizeInMBs(*cr.Spec.ForProvider.S3DestinationConfiguration.BufferingHints.SizeInMBs) + } + f10.SetBufferingHints(f10f1) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions != nil { + f10f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f10f2.SetEnabled(*cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f10f2.SetLogGroupName(*cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f10f2.SetLogStreamName(*cr.Spec.ForProvider.S3DestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f10.SetCloudWatchLoggingOptions(f10f2) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.CompressionFormat != nil { + f10.SetCompressionFormat(*cr.Spec.ForProvider.S3DestinationConfiguration.CompressionFormat) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.EncryptionConfiguration != nil { + f10f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.S3DestinationConfiguration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f10f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.S3DestinationConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f10f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.S3DestinationConfiguration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f10f4.SetKMSEncryptionConfig(f10f4f0) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.EncryptionConfiguration.NoEncryptionConfig != nil { + f10f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.S3DestinationConfiguration.EncryptionConfiguration.NoEncryptionConfig) + } + f10.SetEncryptionConfiguration(f10f4) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.ErrorOutputPrefix != nil { + f10.SetErrorOutputPrefix(*cr.Spec.ForProvider.S3DestinationConfiguration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.Prefix != nil { + f10.SetPrefix(*cr.Spec.ForProvider.S3DestinationConfiguration.Prefix) + } + if cr.Spec.ForProvider.S3DestinationConfiguration.RoleARN != nil { + f10.SetRoleARN(*cr.Spec.ForProvider.S3DestinationConfiguration.RoleARN) + } + res.SetS3DestinationConfiguration(f10) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration != nil { + f11 := &svcsdk.SplunkDestinationConfiguration{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions != nil { + f11f0 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions.Enabled != nil { + f11f0.SetEnabled(*cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName != nil { + f11f0.SetLogGroupName(*cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName != nil { + f11f0.SetLogStreamName(*cr.Spec.ForProvider.SplunkDestinationConfiguration.CloudWatchLoggingOptions.LogStreamName) + } + f11.SetCloudWatchLoggingOptions(f11f0) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.HECAcknowledgmentTimeoutInSeconds != nil { + f11.SetHECAcknowledgmentTimeoutInSeconds(*cr.Spec.ForProvider.SplunkDestinationConfiguration.HECAcknowledgmentTimeoutInSeconds) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.HECEndpoint != nil { + f11.SetHECEndpoint(*cr.Spec.ForProvider.SplunkDestinationConfiguration.HECEndpoint) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.HECEndpointType != nil { + f11.SetHECEndpointType(*cr.Spec.ForProvider.SplunkDestinationConfiguration.HECEndpointType) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.HECToken != nil { + f11.SetHECToken(*cr.Spec.ForProvider.SplunkDestinationConfiguration.HECToken) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.ProcessingConfiguration != nil { + f11f5 := &svcsdk.ProcessingConfiguration{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.ProcessingConfiguration.Enabled != nil { + f11f5.SetEnabled(*cr.Spec.ForProvider.SplunkDestinationConfiguration.ProcessingConfiguration.Enabled) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.ProcessingConfiguration.Processors != nil { + f11f5f1 := []*svcsdk.Processor{} + for _, f11f5f1iter := range cr.Spec.ForProvider.SplunkDestinationConfiguration.ProcessingConfiguration.Processors { + f11f5f1elem := &svcsdk.Processor{} + if f11f5f1iter.Parameters != nil { + f11f5f1elemf0 := []*svcsdk.ProcessorParameter{} + for _, f11f5f1elemf0iter := range f11f5f1iter.Parameters { + f11f5f1elemf0elem := &svcsdk.ProcessorParameter{} + if f11f5f1elemf0iter.ParameterName != nil { + f11f5f1elemf0elem.SetParameterName(*f11f5f1elemf0iter.ParameterName) + } + if f11f5f1elemf0iter.ParameterValue != nil { + f11f5f1elemf0elem.SetParameterValue(*f11f5f1elemf0iter.ParameterValue) + } + f11f5f1elemf0 = append(f11f5f1elemf0, f11f5f1elemf0elem) + } + f11f5f1elem.SetParameters(f11f5f1elemf0) + } + if f11f5f1iter.Type != nil { + f11f5f1elem.SetType(*f11f5f1iter.Type) + } + f11f5f1 = append(f11f5f1, f11f5f1elem) + } + f11f5.SetProcessors(f11f5f1) + } + f11.SetProcessingConfiguration(f11f5) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.RetryOptions != nil { + f11f6 := &svcsdk.SplunkRetryOptions{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.RetryOptions.DurationInSeconds != nil { + f11f6.SetDurationInSeconds(*cr.Spec.ForProvider.SplunkDestinationConfiguration.RetryOptions.DurationInSeconds) + } + f11.SetRetryOptions(f11f6) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3BackupMode != nil { + f11.SetS3BackupMode(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3BackupMode) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration != nil { + f11f8 := &svcsdk.S3DestinationConfiguration{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BucketARN != nil { + f11f8.SetBucketARN(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BucketARN) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BufferingHints != nil { + f11f8f1 := &svcsdk.BufferingHints{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds != nil { + f11f8f1.SetIntervalInSeconds(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BufferingHints.IntervalInSeconds) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs != nil { + f11f8f1.SetSizeInMBs(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.BufferingHints.SizeInMBs) + } + f11f8.SetBufferingHints(f11f8f1) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions != nil { + f11f8f2 := &svcsdk.CloudWatchLoggingOptions{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled != nil { + f11f8f2.SetEnabled(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.Enabled) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName != nil { + f11f8f2.SetLogGroupName(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogGroupName) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName != nil { + f11f8f2.SetLogStreamName(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CloudWatchLoggingOptions.LogStreamName) + } + f11f8.SetCloudWatchLoggingOptions(f11f8f2) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CompressionFormat != nil { + f11f8.SetCompressionFormat(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.CompressionFormat) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.EncryptionConfiguration != nil { + f11f8f4 := &svcsdk.EncryptionConfiguration{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig != nil { + f11f8f4f0 := &svcsdk.KMSEncryptionConfig{} + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN != nil { + f11f8f4f0.SetAWSKMSKeyARN(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + } + f11f8f4.SetKMSEncryptionConfig(f11f8f4f0) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig != nil { + f11f8f4.SetNoEncryptionConfig(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.EncryptionConfiguration.NoEncryptionConfig) + } + f11f8.SetEncryptionConfiguration(f11f8f4) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.ErrorOutputPrefix != nil { + f11f8.SetErrorOutputPrefix(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.ErrorOutputPrefix) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.Prefix != nil { + f11f8.SetPrefix(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.Prefix) + } + if cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.RoleARN != nil { + f11f8.SetRoleARN(*cr.Spec.ForProvider.SplunkDestinationConfiguration.S3Configuration.RoleARN) + } + f11.SetS3Configuration(f11f8) + } + res.SetSplunkDestinationConfiguration(f11) + } + if cr.Spec.ForProvider.Tags != nil { + f12 := []*svcsdk.Tag{} + for _, f12iter := range cr.Spec.ForProvider.Tags { + f12elem := &svcsdk.Tag{} + if f12iter.Key != nil { + f12elem.SetKey(*f12iter.Key) + } + if f12iter.Value != nil { + f12elem.SetValue(*f12iter.Value) + } + f12 = append(f12, f12elem) + } + res.SetTags(f12) + } + + return res +} + +// GenerateDeleteDeliveryStreamInput returns a deletion input. +func GenerateDeleteDeliveryStreamInput(cr *svcapitypes.DeliveryStream) *svcsdk.DeleteDeliveryStreamInput { + res := &svcsdk.DeleteDeliveryStreamInput{} + + if cr.Spec.ForProvider.DeliveryStreamName != nil { + res.SetDeliveryStreamName(*cr.Spec.ForProvider.DeliveryStreamName) + } + + return res +} + +// IsNotFound returns whether the given error is of type NotFound or not. +func IsNotFound(err error) bool { + awsErr, ok := err.(awserr.Error) + return ok && awsErr.Code() == "ResourceNotFoundException" +} diff --git a/pkg/controller/firehose/setup.go b/pkg/controller/firehose/setup.go new file mode 100644 index 0000000000..fa8fe001cb --- /dev/null +++ b/pkg/controller/firehose/setup.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package firehose + +import ( + "github.com/crossplane/crossplane-runtime/pkg/controller" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/crossplane-contrib/provider-aws/pkg/controller/firehose/deliverystream" + "github.com/crossplane-contrib/provider-aws/pkg/utils/setup" +) + +// Setup firehose controllers. +func Setup(mgr ctrl.Manager, o controller.Options) error { + return setup.SetupControllers( + mgr, o, + deliverystream.SetupDeliveryStream, + ) +}