diff --git a/.devcontainer.json b/.devcontainer.json new file mode 100644 index 0000000000000..c40157671ce76 --- /dev/null +++ b/.devcontainer.json @@ -0,0 +1,8 @@ +{ + "name": "Dev Container Definition - AWS CDK", + "image": "jsii/superchain", + "postCreateCommand": "yarn build --skip-test --no-bail --skip-prereqs --skip-compat", + "extensions": [ + "dbaeumer.vscode-eslint@2.1.5" + ] +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-codepipeline-actions/lib/codecommit/source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/lib/codecommit/source-action.ts index 2fa7a67b29b93..8f33d16ac8c41 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/lib/codecommit/source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/lib/codecommit/source-action.ts @@ -2,7 +2,7 @@ import * as codecommit from '@aws-cdk/aws-codecommit'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as targets from '@aws-cdk/aws-events-targets'; import * as iam from '@aws-cdk/aws-iam'; -import { Construct } from '@aws-cdk/core'; +import { Construct, Token } from '@aws-cdk/core'; import { Action } from '../action'; import { sourceArtifactBounds } from '../common'; @@ -122,8 +122,8 @@ export class CodeCommitSourceAction extends Action { const createEvent = this.props.trigger === undefined || this.props.trigger === CodeCommitTrigger.EVENTS; if (createEvent) { - const branchIdDisambiguator = this.branch === 'master' ? '' : `-${this.branch}-`; - this.props.repository.onCommit(`${stage.pipeline.node.uniqueId}${branchIdDisambiguator}EventRule`, { + const eventId = this.generateEventId(stage); + this.props.repository.onCommit(eventId, { target: new targets.CodePipeline(stage.pipeline), branches: [this.branch], }); @@ -153,4 +153,24 @@ export class CodeCommitSourceAction extends Action { }, }; } + + private generateEventId(stage: codepipeline.IStage): string { + const baseId = stage.pipeline.node.uniqueId; + if (Token.isUnresolved(this.branch)) { + let candidate = ''; + let counter = 0; + do { + candidate = this.eventIdFromPrefix(`${baseId}${counter}`); + counter += 1; + } while (this.props.repository.node.tryFindChild(candidate) !== undefined); + return candidate; + } else { + const branchIdDisambiguator = this.branch === 'master' ? '' : '-${this.branch}-'; + return this.eventIdFromPrefix(`${baseId}${branchIdDisambiguator}`); + } + } + + private eventIdFromPrefix(eventIdPrefix: string) { + return `${eventIdPrefix}EventRule`; + } } diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts index e62e301168fa1..fda7c79dc1800 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts @@ -2,7 +2,7 @@ import { countResources, expect, haveResourceLike, not } from '@aws-cdk/assert'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codecommit from '@aws-cdk/aws-codecommit'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; -import { Stack } from '@aws-cdk/core'; +import { Stack, Lazy } from '@aws-cdk/core'; import { Test } from 'nodeunit'; import * as cpactions from '../../lib'; @@ -224,6 +224,49 @@ export = { test.done(); }, + + 'allows using a Token for the branch name'(test: Test) { + const stack = new Stack(); + + const sourceOutput = new codepipeline.Artifact(); + new codepipeline.Pipeline(stack, 'P', { + stages: [ + { + stageName: 'Source', + actions: [ + new cpactions.CodeCommitSourceAction({ + actionName: 'CodeCommit', + repository: new codecommit.Repository(stack, 'R', { + repositoryName: 'repository', + }), + branch: Lazy.stringValue({ produce: () => 'my-branch' }), + output: sourceOutput, + }), + ], + }, + { + stageName: 'Build', + actions: [ + new cpactions.CodeBuildAction({ + actionName: 'Build', + project: new codebuild.PipelineProject(stack, 'CodeBuild'), + input: sourceOutput, + }), + ], + }, + ], + }); + + expect(stack).to(haveResourceLike('AWS::Events::Rule', { + EventPattern: { + detail: { + referenceName: ['my-branch'], + }, + }, + })); + + test.done(); + }, }, }; diff --git a/packages/@aws-cdk/aws-eks/README.md b/packages/@aws-cdk/aws-eks/README.md index bf9200ce865ad..28f03e0d9644a 100644 --- a/packages/@aws-cdk/aws-eks/README.md +++ b/packages/@aws-cdk/aws-eks/README.md @@ -47,6 +47,8 @@ cluster.addManifest('mypod', { }); ``` +> **NOTE: You can only create 1 cluster per stack.** If you have a use-case for multiple clusters per stack, > or would like to understand more about this limitation, see https://github.com/aws/aws-cdk/issues/10073. + In order to interact with your cluster through `kubectl`, you can use the `aws eks update-kubeconfig` [AWS CLI command](https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html) to configure your local kubeconfig. @@ -98,7 +100,8 @@ const cluster = new eks.Cluster(this, 'hello-eks', { }); ``` -The default value is `eks.EndpointAccess.PUBLIC_AND_PRIVATE`. Which means the cluster endpoint is accessible from outside of your VPC, and worker node traffic to the endpoint will stay within your VPC. +The default value is `eks.EndpointAccess.PUBLIC_AND_PRIVATE`. Which means the cluster endpoint is accessible from outside of your VPC, but worker node traffic as well as `kubectl` commands +to the endpoint will stay within your VPC. ### Capacity @@ -139,16 +142,12 @@ new eks.Cluster(this, 'cluster-with-no-capacity', { }); ``` -The `cluster.defaultCapacity` property will reference the `AutoScalingGroup` -resource for the default capacity. It will be `undefined` if `defaultCapacity` -is set to `0` or `defaultCapacityType` is either `NODEGROUP` or undefined. +When creating a cluster with default capacity (i.e `defaultCapacity !== 0` or is undefined), you can access the allocated capacity using: -And the `cluster.defaultNodegroup` property will reference the `Nodegroup` -resource for the default capacity. It will be `undefined` if `defaultCapacity` -is set to `0` or `defaultCapacityType` is `EC2`. +- `cluster.defaultCapacity` will reference the `AutoScalingGroup` resource in case `defaultCapacityType` is set to `EC2` or is undefined. +- `cluster.defaultNodegroup` will reference the `Nodegroup` resource in case `defaultCapacityType` is set to `NODEGROUP`. -You can add `AutoScalingGroup` resource as customized capacity through `cluster.addCapacity()` or -`cluster.addAutoScalingGroup()`: +You can add customized capacity in the form of an `AutoScalingGroup` resource through `cluster.addCapacity()` or `cluster.addAutoScalingGroup()`: ```ts cluster.addCapacity('frontend-nodes', { @@ -167,7 +166,7 @@ for Amazon EKS Kubernetes clusters. By default, `eks.Nodegroup` create a nodegro new eks.Nodegroup(stack, 'nodegroup', { cluster }); ``` -You can add customized node group through `cluster.addNodegroup()`: +You can add customized node groups through `cluster.addNodegroup()`: ```ts cluster.addNodegroup('nodegroup', { @@ -206,14 +205,13 @@ this.cluster.addNodegroup('extra-ng', { ### ARM64 Support -Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest +Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest Amazon Linux 2 AMI for ARM64 will be automatically selected. ```ts -// create a cluster with a default managed nodegroup +// create a cluster with a default managed nodegroup cluster = new eks.Cluster(this, 'Cluster', { vpc, - mastersRole, version: eks.KubernetesVersion.V1_17, }); @@ -298,12 +296,9 @@ can cause your EC2 instance to become unavailable, such as [EC2 maintenance even and [EC2 Spot interruptions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html) and helps gracefully stop all pods running on spot nodes that are about to be terminated. -Current version: - -| name | version | -|------------|---------| -| Helm Chart | 0.9.5 | -| App | 1.7.0 | +> Handler Version: [1.7.0](https://github.com/aws/aws-node-termination-handler/releases/tag/v1.7.0) +> +> Chart Version: [0.9.5](https://github.com/aws/eks-charts/blob/v0.0.28/stable/aws-node-termination-handler/Chart.yaml) ### Bootstrapping @@ -327,7 +322,7 @@ cluster.addCapacity('spot', { To disable bootstrapping altogether (i.e. to fully customize user-data), set `bootstrapEnabled` to `false` when you add the capacity. -### Kubernetes Resources +### Kubernetes Manifests The `KubernetesManifest` construct or `cluster.addManifest` method can be used to apply Kubernetes resource manifests to this cluster. @@ -387,7 +382,7 @@ cluster.addManifest('hello-kub', service, deployment); #### Kubectl Layer and Environment -The resources are created in the cluster by running `kubectl apply` from a python lambda function. You can configure the environment of this function by specifying it at cluster instantiation. For example, this can useful in order to configure an http proxy: +The resources are created in the cluster by running `kubectl apply` from a python lambda function. You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy: ```typescript const cluster = new eks.Cluster(this, 'hello-eks', { @@ -450,10 +445,14 @@ const manifest = yaml.safeLoadAll(request('GET', manifestUrl).getBody()); cluster.addManifest('my-resource', ...manifest); ``` -Since Kubernetes resources are implemented as CloudFormation resources in the -CDK. This means that if the resource is deleted from your code (or the stack is +Since Kubernetes manifests are implemented as CloudFormation resources in the +CDK. This means that if the manifest is deleted from your code (or the stack is deleted), the next `cdk deploy` will issue a `kubectl delete` command and the -Kubernetes resources will be deleted. +Kubernetes resources in that manifest will be deleted. + +#### Caveat + +If you have multiple resources in a single `KubernetesManifest`, and one of those **resources** is removed from the manifest, it will not be deleted and will remain orphan. See [Support Object pruning](https://github.com/aws/aws-cdk/issues/10495) for more details. #### Dependencies @@ -482,9 +481,9 @@ const service = cluster.addManifest('my-service', { service.node.addDependency(namespace); // will apply `my-namespace` before `my-service`. ``` -NOTE: when a `KubernetesManifest` includes multiple resources (either directly +**NOTE:** when a `KubernetesManifest` includes multiple resources (either directly or through `cluster.addManifest()`) (e.g. `cluster.addManifest('foo', r1, r2, -r3,...))`), these resources will be applied as a single manifest via `kubectl` +r3,...)`), these resources will be applied as a single manifest via `kubectl` and will be applied sequentially (the standard behavior in `kubectl`). ### Patching Kubernetes Resources @@ -582,7 +581,7 @@ If the cluster is configured with private-only or private and restricted public Kubernetes [endpoint access](#endpoint-access), you must also specify: - `kubectlSecurityGroupId` - the ID of an EC2 security group that is allowed - connections to the cluster's control security group. + connections to the cluster's control security group. For example, the EKS managed [cluster security group](#cluster-security-group). - `kubectlPrivateSubnetIds` - a list of private VPC subnets IDs that will be used to access the Kubernetes endpoint. @@ -598,7 +597,7 @@ users, roles and accounts. Furthermore, when auto-scaling capacity is added to the cluster (through `cluster.addCapacity` or `cluster.addAutoScalingGroup`), the IAM instance role of the auto-scaling group will be automatically mapped to RBAC so nodes can -connect to the cluster. No manual mapping is required any longer. +connect to the cluster. No manual mapping is required. For example, let's say you want to grant an IAM user administrative privileges on your cluster: @@ -657,11 +656,10 @@ const clusterEncryptionConfigKeyArn = cluster.clusterEncryptionConfigKeyArn; ### Node ssh Access If you want to be able to SSH into your worker nodes, you must already -have an SSH key in the region you're connecting to and pass it, and you must -be able to connect to the hosts (meaning they must have a public IP and you +have an SSH key in the region you're connecting to and pass it when you add capacity to the cluster. You must also be able to connect to the hosts (meaning they must have a public IP and you should be allowed to connect to them on port 22): -[ssh into nodes example](test/example.ssh-into-nodes.lit.ts) +See [SSH into nodes](test/example.ssh-into-nodes.lit.ts) for a code example. If you want to SSH into nodes in a private subnet, you should set up a bastion host in a public subnet. That setup is recommended, but is @@ -699,7 +697,7 @@ cluster.addChart('NginxIngress', { Helm charts will be installed and updated using `helm upgrade --install`, where a few parameters are being passed down (such as `repo`, `values`, `version`, `namespace`, `wait`, `timeout`, etc). This means that if the chart is added to CDK with the same release name, it will try to update -the chart in the cluster. The chart will exists as CloudFormation resource. +the chart in the cluster. Helm charts are implemented as CloudFormation resources in CDK. This means that if the chart is deleted from your code (or the stack is @@ -775,9 +773,11 @@ const mypod = cluster.addManifest('mypod', { } }); -// create the resource after the service account +// create the resource after the service account. +// note that using `sa.serviceAccountName` above **does not** translate into a dependency. +// this is why an explicit dependency is needed. See https://github.com/aws/aws-cdk/issues/9910 for more details. mypod.node.addDependency(sa); // print the IAM role arn for this service account new cdk.CfnOutput(this, 'ServiceAccountIamRole', { value: sa.role.roleArn }) -``` +``` \ No newline at end of file diff --git a/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts b/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts index f68123c4d36be..5160904878e11 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts +++ b/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts @@ -212,7 +212,7 @@ export class Bundling { }); return lambda.Code.fromAsset(projectRoot, { - assetHashType: cdk.AssetHashType.BUNDLE, + assetHashType: cdk.AssetHashType.OUTPUT, bundling: { local: localBundler, ...dockerBundler.bundlingOptions, diff --git a/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts b/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts index 40fdb21ac60a8..654e3b49f9029 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts +++ b/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts @@ -40,7 +40,7 @@ test('Parcel bundling', () => { // Correctly bundles with parcel expect(Code.fromAsset).toHaveBeenCalledWith('/project', { - assetHashType: AssetHashType.BUNDLE, + assetHashType: AssetHashType.OUTPUT, bundling: expect.objectContaining({ local: { props: expect.objectContaining({ @@ -93,7 +93,7 @@ test('Parcel bundling with handler named index.ts', () => { // Correctly bundles with parcel expect(Code.fromAsset).toHaveBeenCalledWith('/project', { - assetHashType: AssetHashType.BUNDLE, + assetHashType: AssetHashType.OUTPUT, bundling: expect.objectContaining({ command: [ 'bash', '-c', @@ -112,7 +112,7 @@ test('Parcel bundling with tsx handler', () => { // Correctly bundles with parcel expect(Code.fromAsset).toHaveBeenCalledWith('/project', { - assetHashType: AssetHashType.BUNDLE, + assetHashType: AssetHashType.OUTPUT, bundling: expect.objectContaining({ command: [ 'bash', '-c', @@ -152,7 +152,7 @@ test('Parcel bundling with externals and dependencies', () => { // Correctly bundles with parcel expect(Code.fromAsset).toHaveBeenCalledWith('/project', { - assetHashType: AssetHashType.BUNDLE, + assetHashType: AssetHashType.OUTPUT, bundling: expect.objectContaining({ command: [ 'bash', '-c', @@ -199,7 +199,7 @@ test('Detects yarn.lock', () => { // Correctly bundles with parcel expect(Code.fromAsset).toHaveBeenCalledWith('/project', { - assetHashType: AssetHashType.BUNDLE, + assetHashType: AssetHashType.OUTPUT, bundling: expect.objectContaining({ command: expect.arrayContaining([ expect.stringMatching(/yarn\.lock.+yarn install/), @@ -316,7 +316,7 @@ test('Custom bundling docker image', () => { }); expect(Code.fromAsset).toHaveBeenCalledWith('/project', { - assetHashType: AssetHashType.BUNDLE, + assetHashType: AssetHashType.OUTPUT, bundling: expect.objectContaining({ image: { image: 'my-custom-image' }, }), diff --git a/packages/@aws-cdk/aws-lambda/lib/event-source-mapping.ts b/packages/@aws-cdk/aws-lambda/lib/event-source-mapping.ts index 760aca4f71e4d..a9fbc97bdea0e 100644 --- a/packages/@aws-cdk/aws-lambda/lib/event-source-mapping.ts +++ b/packages/@aws-cdk/aws-lambda/lib/event-source-mapping.ts @@ -17,7 +17,7 @@ export interface EventSourceMappingOptions { * * Valid Range: Minimum value of 1. Maximum value of 10000. * - * @default - Amazon Kinesis and Amazon DynamoDB is 100 records. + * @default - Amazon Kinesis, Amazon DynamoDB, and Amazon MSK is 100 records. * Both the default and maximum for Amazon SQS are 10 messages. */ readonly batchSize?: number; @@ -44,12 +44,12 @@ export interface EventSourceMappingOptions { readonly enabled?: boolean; /** - * The position in the DynamoDB or Kinesis stream where AWS Lambda should + * The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should * start reading. * * @see https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType * - * @default - Required for Amazon Kinesis and Amazon DynamoDB Streams sources. + * @default - Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. */ readonly startingPosition?: StartingPosition; @@ -91,6 +91,13 @@ export interface EventSourceMappingOptions { * @default 1 */ readonly parallelizationFactor?: number; + + /** + * The name of the Kafka topic. + * + * @default - no topic + */ + readonly kafkaTopic?: string; } /** @@ -185,13 +192,14 @@ export class EventSourceMapping extends cdk.Resource implements IEventSourceMapp maximumRecordAgeInSeconds: props.maxRecordAge?.toSeconds(), maximumRetryAttempts: props.retryAttempts, parallelizationFactor: props.parallelizationFactor, + topics: props.kafkaTopic !== undefined ? [props.kafkaTopic] : undefined, }); this.eventSourceMappingId = cfnEventSourceMapping.ref; } } /** - * The position in the DynamoDB or Kinesis stream where AWS Lambda should start + * The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start * reading. */ export enum StartingPosition { diff --git a/packages/@aws-cdk/aws-lambda/test/test.event-source-mapping.ts b/packages/@aws-cdk/aws-lambda/test/test.event-source-mapping.ts index 4de8108b609d2..bc9492f1d2f7e 100644 --- a/packages/@aws-cdk/aws-lambda/test/test.event-source-mapping.ts +++ b/packages/@aws-cdk/aws-lambda/test/test.event-source-mapping.ts @@ -1,3 +1,4 @@ +import { expect, haveResourceLike } from '@aws-cdk/assert'; import * as cdk from '@aws-cdk/core'; import { Test } from 'nodeunit'; import { Code, EventSourceMapping, Function, Runtime } from '../lib'; @@ -185,4 +186,31 @@ export = { test.equals(imported.stack.stackName, 'test-stack'); test.done(); }, + + 'accepts if kafkaTopic is a parameter'(test: Test) { + const stack = new cdk.Stack(); + const topicNameParam = new cdk.CfnParameter(stack, 'TopicNameParam', { + type: 'String', + }); + + const fn = new Function(stack, 'fn', { + handler: 'index.handler', + code: Code.fromInline('exports.handler = ${handler.toString()}'), + runtime: Runtime.NODEJS_10_X, + }); + + new EventSourceMapping(stack, 'test', { + target: fn, + eventSourceArn: '', + kafkaTopic: topicNameParam.valueAsString, + }); + + expect(stack).to(haveResourceLike('AWS::Lambda::EventSourceMapping', { + Topics: [{ + Ref: 'TopicNameParam', + }], + })); + + test.done(); + }, }; diff --git a/packages/@aws-cdk/aws-rds/README.md b/packages/@aws-cdk/aws-rds/README.md index cc8be1361bca8..88a2d001107de 100644 --- a/packages/@aws-cdk/aws-rds/README.md +++ b/packages/@aws-cdk/aws-rds/README.md @@ -27,9 +27,7 @@ your instances will be launched privately or publicly: ```ts const cluster = new rds.DatabaseCluster(this, 'Database', { engine: rds.DatabaseClusterEngine.auroraMysql({ version: rds.AuroraMysqlEngineVersion.VER_2_08_1 }), - masterUser: { - username: 'clusteradmin' - }, + masterUser: rds.Login.fromUsername('clusteradmin'), // Optional - will default to admin instanceProps: { // optional, defaults to t3.medium instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), @@ -76,7 +74,7 @@ const instance = new rds.DatabaseInstance(this, 'Instance', { engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), // optional, defaults to m5.large instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL), - masterUsername: 'syscdk', + masterUsername: rds.Login.fromUsername('syscdk'), // Optional - will default to admin vpc, vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE @@ -103,7 +101,6 @@ const instance = new rds.DatabaseInstance(this, 'Instance', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), // optional, defaults to m5.large instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'syscdk', vpc, maxAllocatedStorage: 200, }); @@ -141,6 +138,35 @@ method: const rule = instance.onEvent('InstanceEvent', { target: new targets.LambdaFunction(fn) }); ``` +### Login credentials + +By default, database instances and clusters will have `admin` user with an auto-generated password. +An alternative username (and password) may be specified for the admin user instead of the default. + +The following examples use a `DatabaseInstance`, but the same usage is applicable to `DatabaseCluster`. + +```ts +const engine = rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }); +new rds.DatabaseInstance(this, 'InstanceWithUsername', { + engine, + vpc, + credentials: rds.Credentials.fromUsername('postgres'), // Creates an admin user of postgres with a generated password +}); + +new rds.DatabaseInstance(this, 'InstanceWithUsernameAndPassword', { + engine, + vpc, + credentials: rds.Credentials.fromUsername('postgres', { password: SecretValue.ssmSecure('/dbPassword', 1) }), // Use password from SSM +}); + +const mySecret = secretsmanager.Secret.fromSecretName(this, 'DBSecret', 'myDBLoginInfo'); +new rds.DatabaseInstance(this, 'InstanceWithSecretLogin', { + engine, + vpc, + credentials: rds.Credentials.fromSecret(mySecret), // Get both username and password from existing secret +}); +``` + ### Connecting To control who can access the cluster or instance, use the `.connections` attribute. RDS databases have @@ -211,7 +237,6 @@ The following example shows enabling IAM authentication for a database instance ```ts const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, iamAuthentication: true, // Optional - will be automatically set if you call grantConnect(). }); @@ -240,7 +265,6 @@ const role = new iam.Role(stack, 'RDSDirectoryServicesRole', { }); const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, domain: 'd-????????', // The ID of the domain for the instance to join. domainRole: role, // Optional - will be create automatically if not provided. diff --git a/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts b/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts index 76a542078958a..9fb5bf08ccf3a 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts @@ -154,6 +154,7 @@ interface MysqlClusterEngineBaseProps { } abstract class MySqlClusterEngineBase extends ClusterEngineBase { + public readonly engineFamily = 'MYSQL'; public readonly supportedLogTypes: string[] = ['error', 'general', 'slowquery', 'audit']; constructor(props: MysqlClusterEngineBaseProps) { @@ -322,6 +323,10 @@ export class AuroraMysqlEngineVersion { public static readonly VER_2_08_0 = AuroraMysqlEngineVersion.builtIn_5_7('2.08.0'); /** Version "5.7.mysql_aurora.2.08.1". */ public static readonly VER_2_08_1 = AuroraMysqlEngineVersion.builtIn_5_7('2.08.1'); + /** Version "5.7.mysql_aurora.2.08.2". */ + public static readonly VER_2_08_2 = AuroraMysqlEngineVersion.builtIn_5_7('2.08.2'); + /** Version "5.7.mysql_aurora.2.09.0". */ + public static readonly VER_2_09_0 = AuroraMysqlEngineVersion.builtIn_5_7('2.09.0'); /** * Create a new AuroraMysqlEngineVersion with an arbitrary version. @@ -489,6 +494,7 @@ class AuroraPostgresClusterEngine extends ClusterEngineBase { */ private static readonly S3_EXPORT_FEATURE_NAME = 's3Export'; + public readonly engineFamily = 'POSTGRESQL'; public readonly supportedLogTypes: string[] = ['postgresql']; constructor(version?: AuroraPostgresEngineVersion) { diff --git a/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts b/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts index a464e2a0fd5c0..de1f89bd3dafa 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts @@ -1,6 +1,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import { IResource } from '@aws-cdk/core'; +import { IClusterEngine } from './cluster-engine'; import { Endpoint } from './endpoint'; import { DatabaseProxy, DatabaseProxyOptions } from './proxy'; @@ -35,6 +36,12 @@ export interface IDatabaseCluster extends IResource, ec2.IConnectable, secretsma */ readonly instanceEndpoints: Endpoint[]; + /** + * The engine of this Cluster. + * May be not known for imported Clusters if it wasn't provided explicitly. + */ + readonly engine?: IClusterEngine; + /** * Add a new db proxy to this cluster. */ @@ -92,4 +99,11 @@ export interface DatabaseClusterAttributes { * @default - no instance endpoints */ readonly instanceEndpointAddresses?: string[]; + + /** + * The engine of the existing Cluster. + * + * @default - the imported Cluster's engine is unknown + */ + readonly engine?: IClusterEngine; } diff --git a/packages/@aws-cdk/aws-rds/lib/cluster.ts b/packages/@aws-cdk/aws-rds/lib/cluster.ts index 9bd29be6e64e0..f3f9300788e1c 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster.ts @@ -11,7 +11,7 @@ import { DatabaseSecret } from './database-secret'; import { Endpoint } from './endpoint'; import { IParameterGroup } from './parameter-group'; import { applyRemovalPolicy, defaultDeletionProtection, setupS3ImportExport } from './private/util'; -import { BackupProps, InstanceProps, Login, PerformanceInsightRetention, RotationMultiUserOptions } from './props'; +import { BackupProps, Credentials, InstanceProps, PerformanceInsightRetention, RotationMultiUserOptions } from './props'; import { DatabaseProxy, DatabaseProxyOptions, ProxyTarget } from './proxy'; import { CfnDBCluster, CfnDBClusterProps, CfnDBInstance } from './rds.generated'; import { ISubnetGroup, SubnetGroup } from './subnet-group'; @@ -227,6 +227,9 @@ interface DatabaseClusterBaseProps { * A new or imported clustered database. */ export abstract class DatabaseClusterBase extends Resource implements IDatabaseCluster { + // only required because of JSII bug: https://github.com/aws/jsii/issues/2040 + public abstract readonly engine?: IClusterEngine; + /** * Identifier of the cluster */ @@ -281,7 +284,11 @@ export abstract class DatabaseClusterBase extends Resource implements IDatabaseC * Abstract base for ``DatabaseCluster`` and ``DatabaseClusterFromSnapshot`` */ abstract class DatabaseClusterNew extends DatabaseClusterBase { - + /** + * The engine for this Cluster. + * Never undefined. + */ + public readonly engine?: IClusterEngine; public readonly instanceIdentifiers: string[] = []; public readonly instanceEndpoints: Endpoint[] = []; @@ -331,6 +338,7 @@ abstract class DatabaseClusterNew extends DatabaseClusterBase { const clusterParameterGroup = props.parameterGroup ?? clusterEngineBindConfig.parameterGroup; const clusterParameterGroupConfig = clusterParameterGroup?.bindToCluster({}); + this.engine = props.engine; this.newCfnProps = { // Basic @@ -359,6 +367,7 @@ abstract class DatabaseClusterNew extends DatabaseClusterBase { class ImportedDatabaseCluster extends DatabaseClusterBase implements IDatabaseCluster { public readonly clusterIdentifier: string; public readonly connections: ec2.Connections; + public readonly engine?: IClusterEngine; private readonly _clusterEndpoint?: Endpoint; private readonly _clusterReadEndpoint?: Endpoint; @@ -375,6 +384,7 @@ class ImportedDatabaseCluster extends DatabaseClusterBase implements IDatabaseCl securityGroups: attrs.securityGroups, defaultPort, }); + this.engine = attrs.engine; this._clusterEndpoint = (attrs.clusterEndpointAddress && attrs.port) ? new Endpoint(attrs.clusterEndpointAddress, attrs.port) : undefined; this._clusterReadEndpoint = (attrs.readerEndpointAddress && attrs.port) ? new Endpoint(attrs.readerEndpointAddress, attrs.port) : undefined; @@ -418,9 +428,11 @@ class ImportedDatabaseCluster extends DatabaseClusterBase implements IDatabaseCl */ export interface DatabaseClusterProps extends DatabaseClusterBaseProps { /** - * Username and password for the administrative user + * Credentials for the administrative user + * + * @default - A username of 'admin' and SecretsManager-generated password */ - readonly masterUser: Login; + readonly credentials?: Credentials; /** * Whether to enable storage encryption. @@ -476,23 +488,20 @@ export class DatabaseCluster extends DatabaseClusterNew { this.singleUserRotationApplication = props.engine.singleUserRotationApplication; this.multiUserRotationApplication = props.engine.multiUserRotationApplication; - let secret: DatabaseSecret | undefined; - if (!props.masterUser.password) { - secret = new DatabaseSecret(this, 'Secret', { - username: props.masterUser.username, - encryptionKey: props.masterUser.encryptionKey, - }); + let credentials = props.credentials ?? Credentials.fromUsername('admin'); + if (!credentials.secret && !credentials.password) { + credentials = Credentials.fromSecret(new DatabaseSecret(this, 'Secret', { + username: credentials.username, + encryptionKey: credentials.encryptionKey, + })); } + const secret = credentials.secret; const cluster = new CfnDBCluster(this, 'Resource', { ...this.newCfnProps, // Admin - masterUsername: secret ? secret.secretValueFromJson('username').toString() : props.masterUser.username, - masterUserPassword: secret - ? secret.secretValueFromJson('password').toString() - : (props.masterUser.password - ? props.masterUser.password.toString() - : undefined), + masterUsername: credentials.username, + masterUserPassword: credentials.password?.toString(), // Encryption kmsKeyId: props.storageEncryptionKey?.keyArn, storageEncrypted: props.storageEncryptionKey ? true : props.storageEncrypted, diff --git a/packages/@aws-cdk/aws-rds/lib/engine.ts b/packages/@aws-cdk/aws-rds/lib/engine.ts index b1ccef4084c87..2feced9927ca2 100644 --- a/packages/@aws-cdk/aws-rds/lib/engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/engine.ts @@ -28,4 +28,15 @@ export interface IEngine { * (which means the major version of the engine is also not known) */ readonly parameterGroupFamily?: string; + + /** + * The family this engine belongs to, + * like "MYSQL", or "POSTGRESQL". + * This property is used when creating a Database Proxy. + * Most engines don't belong to any family + * (and because of that, you can't create Database Proxies for their Clusters or Instances). + * + * @default - the engine doesn't belong to any family + */ + readonly engineFamily?: string; } diff --git a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts index 3b2310212dbf8..25370706664c7 100644 --- a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts @@ -109,6 +109,7 @@ interface InstanceEngineBaseProps { readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication; readonly version?: EngineVersion; readonly parameterGroupFamily?: string; + readonly engineFamily?: string; readonly features?: InstanceEngineFeatures; } @@ -118,6 +119,7 @@ abstract class InstanceEngineBase implements IInstanceEngine { public readonly parameterGroupFamily?: string; public readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication; public readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication; + public readonly engineFamily?: string; private readonly features?: InstanceEngineFeatures; @@ -129,6 +131,7 @@ abstract class InstanceEngineBase implements IInstanceEngine { this.engineVersion = props.version; this.parameterGroupFamily = props.parameterGroupFamily ?? (this.engineVersion ? `${this.engineType}${this.engineVersion.majorVersion}` : undefined); + this.engineFamily = props.engineFamily; } public bindToInstance(_scope: core.Construct, options: InstanceEngineBindOptions): InstanceEngineConfig { @@ -391,6 +394,7 @@ class MySqlInstanceEngine extends InstanceEngineBase { majorVersion: version.mysqlMajorVersion, } : undefined, + engineFamily: 'MYSQL', }); } } @@ -586,6 +590,7 @@ class PostgresInstanceEngine extends InstanceEngineBase { } : undefined, features: version ? version?._features : { s3Import: 's3Import' }, + engineFamily: 'POSTGRESQL', }); } } diff --git a/packages/@aws-cdk/aws-rds/lib/instance.ts b/packages/@aws-cdk/aws-rds/lib/instance.ts index 1339306e9553f..1479fa2fafe40 100644 --- a/packages/@aws-cdk/aws-rds/lib/instance.ts +++ b/packages/@aws-cdk/aws-rds/lib/instance.ts @@ -5,14 +5,14 @@ import * as kms from '@aws-cdk/aws-kms'; import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; -import { Construct, Duration, IResource, Lazy, RemovalPolicy, Resource, SecretValue, Stack, Token } from '@aws-cdk/core'; +import { Construct, Duration, IResource, Lazy, RemovalPolicy, Resource, Stack, Token } from '@aws-cdk/core'; import { DatabaseSecret } from './database-secret'; import { Endpoint } from './endpoint'; import { IInstanceEngine } from './instance-engine'; import { IOptionGroup } from './option-group'; import { IParameterGroup } from './parameter-group'; import { applyRemovalPolicy, defaultDeletionProtection, engineDescription, setupS3ImportExport } from './private/util'; -import { PerformanceInsightRetention, RotationMultiUserOptions } from './props'; +import { Credentials, PerformanceInsightRetention, RotationMultiUserOptions, SnapshotCredentials } from './props'; import { DatabaseProxy, DatabaseProxyOptions, ProxyTarget } from './proxy'; import { CfnDBInstance, CfnDBInstanceProps } from './rds.generated'; import { ISubnetGroup, SubnetGroup } from './subnet-group'; @@ -50,6 +50,13 @@ export interface IDatabaseInstance extends IResource, ec2.IConnectable, secretsm */ readonly instanceEndpoint: Endpoint; + /** + * The engine of this database Instance. + * May be not known for imported Instances if it wasn't provided explicitly, + * or for read replicas. + */ + readonly engine?: IInstanceEngine; + /** * Add a new db proxy to this instance. */ @@ -90,6 +97,13 @@ export interface DatabaseInstanceAttributes { * The security groups of the instance. */ readonly securityGroups: ec2.ISecurityGroup[]; + + /** + * The engine of the existing database Instance. + * + * @default - the imported Instance's engine is unknown + */ + readonly engine?: IInstanceEngine; } /** @@ -110,6 +124,7 @@ export abstract class DatabaseInstanceBase extends Resource implements IDatabase public readonly dbInstanceEndpointAddress = attrs.instanceEndpointAddress; public readonly dbInstanceEndpointPort = attrs.port.toString(); public readonly instanceEndpoint = new Endpoint(attrs.instanceEndpointAddress, attrs.port); + public readonly engine = attrs.engine; protected enableIamAuthentication = true; } @@ -120,6 +135,8 @@ export abstract class DatabaseInstanceBase extends Resource implements IDatabase public abstract readonly dbInstanceEndpointAddress: string; public abstract readonly dbInstanceEndpointPort: string; public abstract readonly instanceEndpoint: Endpoint; + // only required because of JSII bug: https://github.com/aws/jsii/issues/2040 + public abstract readonly engine?: IInstanceEngine; protected abstract enableIamAuthentication?: boolean; /** @@ -750,20 +767,6 @@ export interface DatabaseInstanceSourceProps extends DatabaseInstanceNewProps { */ readonly allocatedStorage?: number; - /** - * The master user password. - * - * @default - a Secrets Manager generated password - */ - readonly masterUserPassword?: SecretValue; - - /** - * The KMS key used to encrypt the secret for the master user password. - * - * @default - default master key - */ - readonly masterUserPasswordEncryptionKey?: kms.IKey; - /** * The name of the database. * @@ -783,6 +786,7 @@ export interface DatabaseInstanceSourceProps extends DatabaseInstanceNewProps { * A new source database instance (not a read replica) */ abstract class DatabaseInstanceSource extends DatabaseInstanceNew implements IDatabaseInstance { + public readonly engine?: IInstanceEngine; /** * The AWS Secrets Manager secret attached to the instance. */ @@ -799,6 +803,7 @@ abstract class DatabaseInstanceSource extends DatabaseInstanceNew implements IDa this.singleUserRotationApplication = props.engine.singleUserRotationApplication; this.multiUserRotationApplication = props.engine.multiUserRotationApplication; + this.engine = props.engine; let { s3ImportRole, s3ExportRole } = setupS3ImportExport(this, props, true); const engineConfig = props.engine.bindToInstance(this, { @@ -894,9 +899,11 @@ abstract class DatabaseInstanceSource extends DatabaseInstanceNew implements IDa */ export interface DatabaseInstanceProps extends DatabaseInstanceSourceProps { /** - * The master user name. + * Credentials for the administrative user + * + * @default - A username of 'admin' and SecretsManager-generated password */ - readonly masterUsername: string; + readonly credentials?: Credentials; /** * For supported engines, specifies the character set to associate with the @@ -936,22 +943,21 @@ export class DatabaseInstance extends DatabaseInstanceSource implements IDatabas constructor(scope: Construct, id: string, props: DatabaseInstanceProps) { super(scope, id, props); - let secret: DatabaseSecret | undefined; - if (!props.masterUserPassword) { - secret = new DatabaseSecret(this, 'Secret', { - username: props.masterUsername, - encryptionKey: props.masterUserPasswordEncryptionKey, - }); + let credentials = props.credentials ?? Credentials.fromUsername('admin'); + if (!credentials.secret && !credentials.password) { + credentials = Credentials.fromSecret(new DatabaseSecret(this, 'Secret', { + username: credentials.username, + encryptionKey: credentials.encryptionKey, + })); } + const secret = credentials.secret; const instance = new CfnDBInstance(this, 'Resource', { ...this.sourceCfnProps, characterSetName: props.characterSetName, kmsKeyId: props.storageEncryptionKey && props.storageEncryptionKey.keyArn, - masterUsername: secret ? secret.secretValueFromJson('username').toString() : props.masterUsername, - masterUserPassword: secret - ? secret.secretValueFromJson('password').toString() - : props.masterUserPassword && props.masterUserPassword.toString(), + masterUsername: credentials.username, + masterUserPassword: credentials.password?.toString(), storageEncrypted: props.storageEncryptionKey ? true : props.storageEncrypted, }); @@ -985,26 +991,14 @@ export interface DatabaseInstanceFromSnapshotProps extends DatabaseInstanceSourc readonly snapshotIdentifier: string; /** - * The master user name. - * - * Specify this prop with the **current** master user name of the snapshot - * only when generating a new master user password with `generateMasterUserPassword`. - * The value will be set in the generated secret attached to the instance. + * Master user credentials. * - * It is not possible to change the master user name of a RDS instance. + * Note - It is not possible to change the master username for a snapshot; + * however, it is possible to provide (or generate) a new password. * - * @default - inherited from the snapshot + * @default - The existing username and password from the snapshot will be used. */ - readonly masterUsername?: string; - - /** - * Whether to generate a new master user password and store it in - * Secrets Manager. `masterUsername` must be specified with the **current** - * master user name of the snapshot when this property is set to true. - * - * @default false - */ - readonly generateMasterUserPassword?: boolean; + readonly credentials?: SnapshotCredentials; } /** @@ -1022,25 +1016,17 @@ export class DatabaseInstanceFromSnapshot extends DatabaseInstanceSource impleme constructor(scope: Construct, id: string, props: DatabaseInstanceFromSnapshotProps) { super(scope, id, props); - let secret: DatabaseSecret | undefined; - - if (props.generateMasterUserPassword) { - if (!props.masterUsername) { // We need the master username to include it in the generated secret - throw new Error('`masterUsername` must be specified when `generateMasterUserPassword` is set to true.'); - } - - if (props.masterUserPassword) { - throw new Error('Cannot specify `masterUserPassword` when `generateMasterUserPassword` is set to true.'); + let credentials = props.credentials; + let secret = credentials?.secret; + if (!secret && credentials?.generatePassword) { + if (!credentials.username) { + throw new Error('`credentials` `username` must be specified when `generatePassword` is set to true'); } secret = new DatabaseSecret(this, 'Secret', { - username: props.masterUsername, - encryptionKey: props.masterUserPasswordEncryptionKey, + username: credentials.username, + encryptionKey: credentials.encryptionKey, }); - } else { - if (props.masterUsername) { // It's not possible to change the master username of a RDS instance - throw new Error('Cannot specify `masterUsername` when `generateMasterUserPassword` is set to false.'); - } } const instance = new CfnDBInstance(this, 'Resource', { @@ -1048,7 +1034,7 @@ export class DatabaseInstanceFromSnapshot extends DatabaseInstanceSource impleme dbSnapshotIdentifier: props.snapshotIdentifier, masterUserPassword: secret ? secret.secretValueFromJson('password').toString() - : props.masterUserPassword && props.masterUserPassword.toString(), + : credentials?.password?.toString(), }); this.instanceIdentifier = instance.ref; @@ -1112,6 +1098,7 @@ export class DatabaseInstanceReadReplica extends DatabaseInstanceNew implements public readonly dbInstanceEndpointAddress: string; public readonly dbInstanceEndpointPort: string; public readonly instanceEndpoint: Endpoint; + public readonly engine?: IInstanceEngine = undefined; protected readonly instanceType: ec2.InstanceType; constructor(scope: Construct, id: string, props: DatabaseInstanceReadReplicaProps) { diff --git a/packages/@aws-cdk/aws-rds/lib/private/util.ts b/packages/@aws-cdk/aws-rds/lib/private/util.ts index 0caf78551b88d..a8439b652abc9 100644 --- a/packages/@aws-cdk/aws-rds/lib/private/util.ts +++ b/packages/@aws-cdk/aws-rds/lib/private/util.ts @@ -1,7 +1,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as s3 from '@aws-cdk/aws-s3'; import { Construct, CfnDeletionPolicy, CfnResource, RemovalPolicy } from '@aws-cdk/core'; -import { IInstanceEngine } from '../instance-engine'; +import { IEngine } from '../engine'; /** Common base of `DatabaseInstanceProps` and `DatabaseClusterBaseProps` that has only the S3 props */ export interface DatabaseS3ImportExportProps { @@ -56,7 +56,7 @@ export function setupS3ImportExport( return { s3ImportRole, s3ExportRole }; } -export function engineDescription(engine: IInstanceEngine) { +export function engineDescription(engine: IEngine) { return engine.engineType + (engine.engineVersion?.fullVersion ? `-${engine.engineVersion.fullVersion}` : ''); } diff --git a/packages/@aws-cdk/aws-rds/lib/props.ts b/packages/@aws-cdk/aws-rds/lib/props.ts index 8e037b77c7174..81a27a1e05d70 100644 --- a/packages/@aws-cdk/aws-rds/lib/props.ts +++ b/packages/@aws-cdk/aws-rds/lib/props.ts @@ -94,30 +94,166 @@ export interface BackupProps { readonly preferredWindow?: string; } +/** + * Options for creating a Login from a username. + */ +export interface CredentialsFromUsernameOptions { + /** + * Password + * + * Do not put passwords in your CDK code directly. + * + * @default - a Secrets Manager generated password + */ + readonly password?: SecretValue; + + /** + * KMS encryption key to encrypt the generated secret. + * + * @default - default master key + */ + readonly encryptionKey?: kms.IKey; +} + /** * Username and password combination */ -export interface Login { +export abstract class Credentials { + + /** + * Creates Credentials for the given username, and optional password and key. + * If no password is provided, one will be generated and stored in SecretsManager. + */ + public static fromUsername(username: string, options: CredentialsFromUsernameOptions = {}): Credentials { + return { username, password: options.password, encryptionKey: options.encryptionKey }; + } + + /** + * Creates Credentials from an existing SecretsManager ``Secret`` (or ``DatabaseSecret``) + * + * The Secret must be a JSON string with a ``username`` and ``password`` field: + * ``` + * { + * ... + * "username": , + * "password": , + * } + * ``` + */ + public static fromSecret(secret: secretsmanager.Secret): Credentials { + return { + username: secret.secretValueFromJson('username').toString(), + password: secret.secretValueFromJson('password'), + encryptionKey: secret.encryptionKey, + secret, + }; + } + /** * Username */ - readonly username: string; + public abstract readonly username: string; /** * Password * * Do not put passwords in your CDK code directly. * - * @default a Secrets Manager generated password + * @default - a Secrets Manager generated password */ - readonly password?: SecretValue; + public abstract readonly password?: SecretValue; /** * KMS encryption key to encrypt the generated secret. * - * @default default master key + * @default - default master key */ - readonly encryptionKey?: kms.IKey; + public abstract readonly encryptionKey?: kms.IKey; + + /** + * Secret used to instantiate this Login. + * + * @default - none + */ + public abstract readonly secret?: secretsmanager.Secret; +} + +/** + * Credentials to update the password for a ``DatabaseInstanceFromSnapshot``. + */ +export abstract class SnapshotCredentials { + /** + * Generate a new password for the snapshot, using the existing username and an optional encryption key. + * + * Note - The username must match the existing master username of the snapshot. + */ + public static fromGeneratedPassword(username: string, encryptionKey?: kms.IKey): SnapshotCredentials { + return { generatePassword: true, username, encryptionKey }; + } + + /** + * Update the snapshot login with an existing password. + */ + public static fromPassword(password: SecretValue): SnapshotCredentials { + return { generatePassword: false, password }; + } + + /** + * Update the snapshot login with an existing password from a Secret. + * + * The Secret must be a JSON string with a ``password`` field: + * ``` + * { + * ... + * "password": , + * } + * ``` + */ + public static fromSecret(secret: secretsmanager.Secret): SnapshotCredentials { + return { + generatePassword: false, + password: secret.secretValueFromJson('password'), + secret, + }; + } + + /** + * The master user name. + * + * Must be the **current** master user name of the snapshot. + * It is not possible to change the master user name of a RDS instance. + * + * @default - the existing username from the snapshot + */ + public abstract readonly username?: string; + + /** + * Whether a new password should be generated. + */ + public abstract readonly generatePassword: boolean; + + /** + * The master user password. + * + * Do not put passwords in your CDK code directly. + * + * @default - the existing password from the snapshot + */ + public abstract readonly password?: SecretValue; + + /** + * KMS encryption key to encrypt the generated secret. + * + * @default - default master key + */ + public abstract readonly encryptionKey?: kms.IKey; + + /** + * Secret used to instantiate this Login. + * + * @default - none + */ + public abstract readonly secret?: secretsmanager.Secret; } /** diff --git a/packages/@aws-cdk/aws-rds/lib/proxy.ts b/packages/@aws-cdk/aws-rds/lib/proxy.ts index 7109202019c2f..dec7df1443723 100644 --- a/packages/@aws-cdk/aws-rds/lib/proxy.ts +++ b/packages/@aws-cdk/aws-rds/lib/proxy.ts @@ -3,8 +3,10 @@ import * as iam from '@aws-cdk/aws-iam'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import * as cdk from '@aws-cdk/core'; import { IDatabaseCluster } from './cluster-ref'; +import { IEngine } from './engine'; import { IDatabaseInstance } from './instance'; -import { CfnDBCluster, CfnDBInstance, CfnDBProxy, CfnDBProxyTargetGroup } from './rds.generated'; +import { engineDescription } from './private/util'; +import { CfnDBProxy, CfnDBProxyTargetGroup } from './rds.generated'; /** * SessionPinningFilter @@ -47,7 +49,7 @@ export class ProxyTarget { * @param instance RDS database instance */ public static fromInstance(instance: IDatabaseInstance): ProxyTarget { - return new ProxyTarget(instance); + return new ProxyTarget(instance, undefined); } /** @@ -59,34 +61,26 @@ export class ProxyTarget { return new ProxyTarget(undefined, cluster); } - private constructor(private readonly dbInstance?: IDatabaseInstance, private readonly dbCluster?: IDatabaseCluster) {} + private constructor( + private readonly dbInstance: IDatabaseInstance | undefined, + private readonly dbCluster: IDatabaseCluster | undefined) { + } /** * Bind this target to the specified database proxy. */ public bind(_: DatabaseProxy): ProxyTargetConfig { - let engine: string | undefined; - if (this.dbCluster && this.dbInstance) { - throw new Error('Proxy cannot target both database cluster and database instance.'); - } else if (this.dbCluster) { - engine = (this.dbCluster.node.defaultChild as CfnDBCluster).engine; - } else if (this.dbInstance) { - engine = (this.dbInstance.node.defaultChild as CfnDBInstance).engine; + const engine: IEngine | undefined = this.dbInstance?.engine ?? this.dbCluster?.engine; + + if (!engine) { + const errorResource = this.dbCluster ?? this.dbInstance; + throw new Error(`Could not determine engine for proxy target '${errorResource?.node.path}'. ` + + 'Please provide it explicitly when importing the resource'); } - let engineFamily; - switch (engine) { - case 'aurora': - case 'aurora-mysql': - case 'mysql': - engineFamily = 'MYSQL'; - break; - case 'aurora-postgresql': - case 'postgres': - engineFamily = 'POSTGRESQL'; - break; - default: - throw new Error(`Unsupported engine type - ${engine}`); + const engineFamily = engine.engineFamily; + if (!engineFamily) { + throw new Error(`Engine '${engineDescription(engine)}' does not support proxies`); } return { @@ -105,12 +99,14 @@ export interface ProxyTargetConfig { * The engine family of the database instance or cluster this proxy connects with. */ readonly engineFamily: string; + /** * The database instances to which this proxy connects. * Either this or `dbClusters` will be set and the other `undefined`. * @default - `undefined` if `dbClusters` is set. */ readonly dbInstances?: IDatabaseInstance[]; + /** * The database clusters to which this proxy connects. * Either this or `dbInstances` will be set and the other `undefined`. diff --git a/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.ts b/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.ts index 900364b2aa7b8..0a429ae3d5742 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.ts @@ -10,9 +10,6 @@ const vpc = new ec2.Vpc(stack, 'VPC'); /// !show const cluster = new rds.DatabaseCluster(stack, 'Database', { engine: rds.DatabaseClusterEngine.AURORA, - masterUser: { - username: 'admin', - }, instanceProps: { instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL), vpc, diff --git a/packages/@aws-cdk/aws-rds/test/integ.cluster-s3.ts b/packages/@aws-cdk/aws-rds/test/integ.cluster-s3.ts index 2153d8ea95410..5b4f2d3742f02 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.cluster-s3.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.cluster-s3.ts @@ -2,7 +2,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as kms from '@aws-cdk/aws-kms'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; -import { DatabaseCluster, DatabaseClusterEngine } from '../lib'; +import { Credentials, DatabaseCluster, DatabaseClusterEngine } from '../lib'; const app = new cdk.App(); const stack = new cdk.Stack(app, 'aws-cdk-rds-s3-integ'); @@ -16,10 +16,7 @@ const exportBucket = new s3.Bucket(stack, 'ExportBucket'); const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { - username: 'admin', - password: cdk.SecretValue.plainText('7959866cacc02c2d243ecfe177464fe6'), - }, + credentials: Credentials.fromUsername('admin', { password: cdk.SecretValue.plainText('7959866cacc02c2d243ecfe177464fe6') }), instanceProps: { instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL), vpcSubnets: { subnetType: ec2.SubnetType.PUBLIC }, diff --git a/packages/@aws-cdk/aws-rds/test/integ.cluster.ts b/packages/@aws-cdk/aws-rds/test/integ.cluster.ts index a5bdaba4883c9..245ab98ab8e78 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.cluster.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.cluster.ts @@ -1,7 +1,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as kms from '@aws-cdk/aws-kms'; import * as cdk from '@aws-cdk/core'; -import { DatabaseCluster, DatabaseClusterEngine, ParameterGroup } from '../lib'; +import { Credentials, DatabaseCluster, DatabaseClusterEngine, ParameterGroup } from '../lib'; const app = new cdk.App(); const stack = new cdk.Stack(app, 'aws-cdk-rds-integ'); @@ -20,10 +20,7 @@ const kmsKey = new kms.Key(stack, 'DbSecurity'); const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { - username: 'admin', - password: cdk.SecretValue.plainText('7959866cacc02c2d243ecfe177464fe6'), - }, + credentials: Credentials.fromUsername('admin', { password: cdk.SecretValue.plainText('7959866cacc02c2d243ecfe177464fe6') }), instanceProps: { instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL), vpcSubnets: { subnetType: ec2.SubnetType.PUBLIC }, diff --git a/packages/@aws-cdk/aws-rds/test/integ.instance-s3.ts b/packages/@aws-cdk/aws-rds/test/integ.instance-s3.ts index d3503d5889404..876ac2251e8fc 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.instance-s3.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.instance-s3.ts @@ -13,7 +13,6 @@ const exportBucket = new s3.Bucket(stack, 'ExportBucket', { removalPolicy: cdk.R new DatabaseInstance(stack, 'Database', { engine: DatabaseInstanceEngine.sqlServerSe({ version: SqlServerEngineVersion.VER_14_00_3192_2_V1 }), - masterUsername: 'admin', vpc, licenseModel: LicenseModel.LICENSE_INCLUDED, s3ImportBuckets: [importBucket], diff --git a/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts b/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts index 7f36806c35230..eafe7a2953735 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts @@ -49,7 +49,7 @@ class DatabaseInstanceStack extends cdk.Stack { instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM), multiAz: true, storageType: rds.StorageType.IO1, - masterUsername: 'syscdk', + credentials: rds.Credentials.fromUsername('syscdk'), vpc, databaseName: 'ORCL', storageEncrypted: true, diff --git a/packages/@aws-cdk/aws-rds/test/integ.proxy.ts b/packages/@aws-cdk/aws-rds/test/integ.proxy.ts index 8c2ef1879787b..e45da0950ed2a 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.proxy.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.proxy.ts @@ -10,7 +10,7 @@ const vpc = new ec2.Vpc(stack, 'vpc', { maxAzs: 2 }); const dbInstance = new rds.DatabaseInstance(stack, 'dbInstance', { engine: rds.DatabaseInstanceEngine.POSTGRES, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM), - masterUsername: 'master', + credentials: rds.Credentials.fromUsername('master'), vpc, }); diff --git a/packages/@aws-cdk/aws-rds/test/test.cluster.ts b/packages/@aws-cdk/aws-rds/test/test.cluster.ts index 8915973a5938d..82dce160e7128 100644 --- a/packages/@aws-cdk/aws-rds/test/test.cluster.ts +++ b/packages/@aws-cdk/aws-rds/test/test.cluster.ts @@ -20,7 +20,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -61,7 +61,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -95,7 +95,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -133,7 +133,7 @@ export = { }); new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -157,7 +157,7 @@ export = { const vpc = new ec2.Vpc(stack, 'Vpc'); new DatabaseCluster(stack, 'Cluster', { - masterUser: { username: 'admin' }, + credentials: { username: 'admin' }, engine: DatabaseClusterEngine.AURORA, instanceProps: { instanceType: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.LARGE), @@ -182,7 +182,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -239,7 +239,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -276,7 +276,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -305,7 +305,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -333,7 +333,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -358,7 +358,7 @@ export = { test.throws(() => { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -383,7 +383,7 @@ export = { engine: DatabaseClusterEngine.auroraMysql({ version: AuroraMysqlEngineVersion.VER_2_04_4, }), - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -411,7 +411,7 @@ export = { engine: DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.VER_10_7, }), - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -437,7 +437,7 @@ export = { // WHEN const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -538,7 +538,7 @@ export = { const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.auroraMysql({ version: AuroraMysqlEngineVersion.VER_5_7_12 }), - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -569,7 +569,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -635,7 +635,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -665,7 +665,7 @@ export = { // WHEN const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -687,7 +687,7 @@ export = { const vpc = new ec2.Vpc(stack, 'VPC'); const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, - masterUser: { username: 'admin' }, + credentials: { username: 'admin' }, instanceProps: { instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), vpc, @@ -716,7 +716,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -764,7 +764,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -852,7 +852,7 @@ export = { version: AuroraPostgresEngineVersion.VER_10_12, }), instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -892,7 +892,7 @@ export = { version: AuroraPostgresEngineVersion.VER_10_4, }), instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -909,7 +909,7 @@ export = { version: AuroraPostgresEngineVersion.VER_10_4, }), instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -936,7 +936,7 @@ export = { version: AuroraPostgresEngineVersion.VER_10_12, }), instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -975,7 +975,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1023,7 +1023,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1113,7 +1113,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1184,7 +1184,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1251,7 +1251,7 @@ export = { version: AuroraPostgresEngineVersion.VER_11_6, }), instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1291,7 +1291,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_POSTGRESQL, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1339,7 +1339,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1372,7 +1372,7 @@ export = { test.throws(() => new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1400,7 +1400,7 @@ export = { test.throws(() => new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1422,7 +1422,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -1449,7 +1449,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -1494,7 +1494,7 @@ export = { test.throws(() => { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -1517,7 +1517,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, - masterUser: { + credentials: { username: 'admin', password: cdk.SecretValue.plainText('tooshort'), }, @@ -1545,7 +1545,7 @@ export = { new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA, instances: 1, - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1604,7 +1604,7 @@ export = { // WHEN new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.aurora({ version: AuroraEngineVersion.VER_1_22_2 }), - masterUser: { + credentials: { username: 'admin', }, instanceProps: { @@ -1630,7 +1630,7 @@ export = { // WHEN const cluster = new DatabaseCluster(stack, 'Database', { engine: DatabaseClusterEngine.aurora({ version: AuroraEngineVersion.VER_1_22_2 }), - masterUser: { + credentials: { username: 'admin', }, instanceProps: { diff --git a/packages/@aws-cdk/aws-rds/test/test.instance.ts b/packages/@aws-cdk/aws-rds/test/test.instance.ts index 2cd024ead7aab..6dad43e8ec7f3 100644 --- a/packages/@aws-cdk/aws-rds/test/test.instance.ts +++ b/packages/@aws-cdk/aws-rds/test/test.instance.ts @@ -28,7 +28,7 @@ export = { instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MEDIUM), multiAz: true, storageType: rds.StorageType.IO1, - masterUsername: 'syscdk', + credentials: rds.Credentials.fromUsername('syscdk'), vpc, databaseName: 'ORCL', storageEncrypted: true, @@ -220,8 +220,6 @@ export = { new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.SQL_SERVER_EE, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'syscdk', - masterUserPassword: cdk.SecretValue.plainText('tooshort'), vpc, optionGroup, parameterGroup, @@ -244,7 +242,7 @@ export = { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19, }), - masterUsername: 'syscdk', + credentials: rds.Credentials.fromUsername('syscdk'), vpc, vpcPlacement: { subnetType: ec2.SubnetType.PRIVATE, @@ -272,67 +270,106 @@ export = { test.done(); }, - 'create an instance from snapshot'(test: Test) { - // WHEN - new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { - snapshotIdentifier: 'my-snapshot', - engine: rds.DatabaseInstanceEngine.POSTGRES, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE), - vpc, - }); + 'DatabaseInstanceFromSnapshot': { + 'create an instance from snapshot'(test: Test) { + new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { + snapshotIdentifier: 'my-snapshot', + engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), + instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE), + vpc, + }); - expect(stack).to(haveResource('AWS::RDS::DBInstance', { - DBSnapshotIdentifier: 'my-snapshot', - })); + expect(stack).to(haveResource('AWS::RDS::DBInstance', { + DBSnapshotIdentifier: 'my-snapshot', + })); - test.done(); - }, + test.done(); + }, - 'throws when trying to generate a new password from snapshot without username'(test: Test) { - // THEN - test.throws(() => new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { - snapshotIdentifier: 'my-snapshot', - engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE), - vpc, - generateMasterUserPassword: true, - }), '`masterUsername` must be specified when `generateMasterUserPassword` is set to true.'); + 'can generate a new snapshot password'(test: Test) { + new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { + snapshotIdentifier: 'my-snapshot', + engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), + vpc, + credentials: rds.SnapshotCredentials.fromGeneratedPassword('admin'), + }); - test.done(); - }, + expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + MasterUsername: ABSENT, + MasterUserPassword: { + 'Fn::Join': ['', ['{{resolve:secretsmanager:', { Ref: 'InstanceSecret478E0A47' }, ':SecretString:password::}}']], + }, + })); + expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + Description: { + 'Fn::Join': ['', ['Generated by the CDK for stack: ', { Ref: 'AWS::StackName' }]], + }, + GenerateSecretString: { + ExcludeCharacters: '\"@/\\', + GenerateStringKey: 'password', + PasswordLength: 30, + SecretStringTemplate: '{"username":"admin"}', + }, + })); - 'throws when specifying user name without asking to generate a new password'(test: Test) { - // THEN - test.throws(() => new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { - snapshotIdentifier: 'my-snapshot', - engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE), - vpc, - masterUsername: 'superadmin', - }), 'Cannot specify `masterUsername` when `generateMasterUserPassword` is set to false.'); + test.done(); + }, - test.done(); - }, + 'throws if generating a new password without a username'(test: Test) { + test.throws(() => new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { + snapshotIdentifier: 'my-snapshot', + engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), + vpc, + credentials: { generatePassword: true }, + }), /`credentials` `username` must be specified when `generatePassword` is set to true/); - 'throws when password and generate password ar both specified'(test: Test) { - // THEN - test.throws(() => new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { - snapshotIdentifier: 'my-snapshot', - engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE), - vpc, - masterUserPassword: cdk.SecretValue.plainText('supersecret'), - generateMasterUserPassword: true, - }), 'Cannot specify `masterUserPassword` when `generateMasterUserPassword` is set to true.'); + test.done(); + }, - test.done(); + 'can set a new snapshot password from an existing SecretValue'(test: Test) { + new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { + snapshotIdentifier: 'my-snapshot', + engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), + vpc, + credentials: rds.SnapshotCredentials.fromPassword(cdk.SecretValue.plainText('mysecretpassword')), + }); + + // TODO - Expect this to be broken + expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + MasterUsername: ABSENT, + MasterUserPassword: 'mysecretpassword', + })); + + test.done(); + }, + + 'can set a new snapshot password from an existing Secret'(test: Test) { + const secret = new rds.DatabaseSecret(stack, 'DBSecret', { + username: 'admin', + encryptionKey: new kms.Key(stack, 'PasswordKey'), + }); + new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { + snapshotIdentifier: 'my-snapshot', + engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), + vpc, + credentials: rds.SnapshotCredentials.fromSecret(secret), + }); + + expect(stack).to(haveResourceLike('AWS::RDS::DBInstance', { + MasterUsername: ABSENT, + MasterUserPassword: { + 'Fn::Join': ['', ['{{resolve:secretsmanager:', { Ref: 'DBSecretD58955BC' }, ':SecretString:password::}}']], + }, + })); + + test.done(); + }, }, 'create a read replica in the same region - with the subnet group name'(test: Test) { const sourceInstance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, }); @@ -368,8 +405,6 @@ export = { 'on event'(test: Test) { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, }); const fn = new lambda.Function(stack, 'Function', { @@ -432,8 +467,6 @@ export = { 'on event without target'(test: Test) { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, }); @@ -481,8 +514,6 @@ export = { // WHEN const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, }); @@ -502,8 +533,6 @@ export = { // WHEN const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, }); @@ -529,8 +558,6 @@ export = { // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, backupRetention: cdk.Duration.seconds(0), }); @@ -575,8 +602,6 @@ export = { // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, monitoringInterval: cdk.Duration.minutes(1), monitoringRole, @@ -601,8 +626,6 @@ export = { // WHEN const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, securityGroups: [securityGroup], }); @@ -635,9 +658,7 @@ export = { 'throws when trying to add rotation to an instance without secret'(test: Test) { const instance = new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.SQL_SERVER_EE, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'syscdk', - masterUserPassword: cdk.SecretValue.plainText('tooshort'), + credentials: rds.Credentials.fromUsername('syscdk', { password: cdk.SecretValue.plainText('tooshort') }), vpc, }); @@ -651,7 +672,7 @@ export = { const instance = new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.SQL_SERVER_EE, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'syscdk', + credentials: rds.Credentials.fromUsername('syscdk'), vpc, }); @@ -674,8 +695,6 @@ export = { tzSupportedEngines.forEach((engine) => { test.ok(new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.C5, ec2.InstanceSize.SMALL), - masterUsername: 'master', timezone: 'Europe/Zurich', vpc, })); @@ -684,8 +703,6 @@ export = { tzUnsupportedEngines.forEach((engine) => { test.throws(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.C5, ec2.InstanceSize.SMALL), - masterUsername: 'master', timezone: 'Europe/Zurich', vpc, }), /timezone property can not be configured for/); @@ -716,8 +733,6 @@ export = { // WHEN new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, backupRetention: cdk.Duration.seconds(0), maxAllocatedStorage: 250, @@ -735,7 +750,6 @@ export = { 'iam authentication - off by default'(test: Test) { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, }); @@ -749,7 +763,6 @@ export = { 'createGrant - creates IAM policy and enables IAM auth'(test: Test) { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, }); const role = new Role(stack, 'DBRole', { @@ -779,7 +792,6 @@ export = { 'createGrant - throws if IAM auth disabled'(test: Test) { const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, iamAuthentication: false, }); @@ -799,7 +811,6 @@ export = { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.sqlServerWeb({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), vpc, - masterUsername: 'admin', domain: domain, }); @@ -819,7 +830,6 @@ export = { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.sqlServerWeb({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), vpc, - masterUsername: 'admin', domain: domain, domainRole: role, }); @@ -840,7 +850,6 @@ export = { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.sqlServerWeb({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), vpc, - masterUsername: 'admin', domain: domain, }); @@ -892,8 +901,6 @@ export = { domainSupportedEngines.forEach((engine) => { test.ok(new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.C5, ec2.InstanceSize.SMALL), - masterUsername: 'master', domain: 'd-90670a8d36', vpc, })); @@ -904,8 +911,6 @@ export = { test.throws(() => new rds.DatabaseInstance(stack, `${engine.engineType}-db`, { engine, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.C5, ec2.InstanceSize.SMALL), - masterUsername: 'master', domain: 'd-90670a8d36', vpc, }), expectedError); @@ -918,7 +923,6 @@ export = { 'instance with all performance insights properties'(test: Test) { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, enablePerformanceInsights: true, performanceInsightRetention: rds.PerformanceInsightRetention.LONG_TERM, @@ -937,7 +941,6 @@ export = { 'setting performance insights fields enables performance insights'(test: Test) { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, performanceInsightRetention: rds.PerformanceInsightRetention.LONG_TERM, }); @@ -954,7 +957,6 @@ export = { test.throws(() => { new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, enablePerformanceInsights: false, performanceInsightRetention: rds.PerformanceInsightRetention.DEFAULT, @@ -968,7 +970,6 @@ export = { 'reuse an existing subnet group'(test: Test) { new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), - masterUsername: 'admin', vpc, subnetGroup: rds.SubnetGroup.fromSubnetGroupName(stack, 'SubnetGroup', 'my-subnet-group'), }); @@ -984,7 +985,6 @@ export = { 'defaultChild returns the DB Instance'(test: Test) { const instance = new rds.DatabaseInstance(stack, 'Database', { engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), - masterUsername: 'admin', vpc, }); @@ -997,7 +997,6 @@ export = { 'instance with s3 import and export buckets'(test: Test) { new rds.DatabaseInstance(stack, 'DB', { engine: rds.DatabaseInstanceEngine.sqlServerSe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), - masterUsername: 'admin', vpc, s3ImportBuckets: [new s3.Bucket(stack, 'S3Import')], s3ExportBuckets: [new s3.Bucket(stack, 'S3Export')], @@ -1058,7 +1057,6 @@ export = { test.throws(() => { new rds.DatabaseInstance(stack, 'DBWithImportBucket', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, s3ImportBuckets: [new s3.Bucket(stack, 'S3Import')], }); @@ -1066,7 +1064,6 @@ export = { test.throws(() => { new rds.DatabaseInstance(stack, 'DBWithImportRole', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, s3ImportRole, }); @@ -1083,7 +1080,6 @@ export = { test.throws(() => { new rds.DatabaseInstance(stack, 'DBWithExportBucket', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, s3ExportBuckets: [new s3.Bucket(stack, 'S3Export')], }); @@ -1091,7 +1087,6 @@ export = { test.throws(() => { new rds.DatabaseInstance(stack, 'DBWithExportRole', { engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }), - masterUsername: 'admin', vpc, s3ExportRole: s3ExportRole, }); @@ -1111,7 +1106,6 @@ export = { test.throws(() => { new rds.DatabaseInstance(stack, 'DBWithExportBucket', { engine: rds.DatabaseInstanceEngine.sqlServerEe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }), - masterUsername: 'admin', vpc, s3ImportRole, s3ExportRole, diff --git a/packages/@aws-cdk/aws-rds/test/test.proxy.ts b/packages/@aws-cdk/aws-rds/test/test.proxy.ts index 6e4de63c7aeff..db57aaf4d8b93 100644 --- a/packages/@aws-cdk/aws-rds/test/test.proxy.ts +++ b/packages/@aws-cdk/aws-rds/test/test.proxy.ts @@ -1,18 +1,25 @@ -import { ABSENT, expect, haveResource, ResourcePart } from '@aws-cdk/assert'; +import { ABSENT, expect, haveResourceLike } from '@aws-cdk/assert'; import * as ec2 from '@aws-cdk/aws-ec2'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import * as cdk from '@aws-cdk/core'; import { Test } from 'nodeunit'; import * as rds from '../lib'; +let stack: cdk.Stack; +let vpc: ec2.IVpc; + export = { + 'setUp'(cb: () => void) { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + + cb(); + }, + 'create a DB proxy from an instance'(test: Test) { // GIVEN - const stack = new cdk.Stack(); - const vpc = new ec2.Vpc(stack, 'VPC'); const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - masterUsername: 'admin', vpc, }); @@ -24,71 +31,59 @@ export = { }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBProxy', { - Properties: { - Auth: [ - { - AuthScheme: 'SECRETS', - IAMAuth: 'DISABLED', - SecretArn: { - Ref: 'InstanceSecretAttachment83BEE581', - }, + expect(stack).to(haveResourceLike('AWS::RDS::DBProxy', { + Auth: [ + { + AuthScheme: 'SECRETS', + IAMAuth: 'DISABLED', + SecretArn: { + Ref: 'InstanceSecretAttachment83BEE581', }, - ], - DBProxyName: 'Proxy', - EngineFamily: 'MYSQL', - RequireTLS: true, - RoleArn: { - 'Fn::GetAtt': [ - 'ProxyIAMRole2FE8AB0F', - 'Arn', - ], }, - VpcSubnetIds: [ - { - Ref: 'VPCPrivateSubnet1Subnet8BCA10E0', - }, - { - Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', - }, + ], + DBProxyName: 'Proxy', + EngineFamily: 'MYSQL', + RequireTLS: true, + RoleArn: { + 'Fn::GetAtt': [ + 'ProxyIAMRole2FE8AB0F', + 'Arn', ], }, - }, ResourcePart.CompleteDefinition)); + VpcSubnetIds: [ + { + Ref: 'VPCPrivateSubnet1Subnet8BCA10E0', + }, + { + Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', + }, + ], + })); // THEN - expect(stack).to(haveResource('AWS::RDS::DBProxyTargetGroup', { - Properties: { - DBProxyName: { - Ref: 'ProxyCB0DFB71', - }, - ConnectionPoolConfigurationInfo: {}, - DBInstanceIdentifiers: [ - { - Ref: 'InstanceC1063A87', - }, - ], - TargetGroupName: 'default', + expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + DBProxyName: { + Ref: 'ProxyCB0DFB71', }, - }, ResourcePart.CompleteDefinition)); + ConnectionPoolConfigurationInfo: {}, + DBInstanceIdentifiers: [ + { + Ref: 'InstanceC1063A87', + }, + ], + TargetGroupName: 'default', + })); test.done(); }, 'create a DB proxy from a cluster'(test: Test) { // GIVEN - const stack = new cdk.Stack(); - const vpc = new ec2.Vpc(stack, 'VPC'); const cluster = new rds.DatabaseCluster(stack, 'Database', { engine: rds.DatabaseClusterEngine.auroraPostgres({ version: rds.AuroraPostgresEngineVersion.VER_10_7, }), - masterUser: { - username: 'admin', - }, - instanceProps: { - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - vpc, - }, + instanceProps: { vpc }, }); // WHEN @@ -99,110 +94,132 @@ export = { }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBProxy', { - Properties: { - Auth: [ - { - AuthScheme: 'SECRETS', - IAMAuth: 'DISABLED', - SecretArn: { - Ref: 'DatabaseSecretAttachmentE5D1B020', - }, + expect(stack).to(haveResourceLike('AWS::RDS::DBProxy', { + Auth: [ + { + AuthScheme: 'SECRETS', + IAMAuth: 'DISABLED', + SecretArn: { + Ref: 'DatabaseSecretAttachmentE5D1B020', }, - ], - DBProxyName: 'Proxy', - EngineFamily: 'POSTGRESQL', - RequireTLS: true, - RoleArn: { - 'Fn::GetAtt': [ - 'ProxyIAMRole2FE8AB0F', - 'Arn', - ], }, - VpcSubnetIds: [ - { - Ref: 'VPCPrivateSubnet1Subnet8BCA10E0', - }, - { - Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', - }, + ], + DBProxyName: 'Proxy', + EngineFamily: 'POSTGRESQL', + RequireTLS: true, + RoleArn: { + 'Fn::GetAtt': [ + 'ProxyIAMRole2FE8AB0F', + 'Arn', ], }, - }, ResourcePart.CompleteDefinition)); + VpcSubnetIds: [ + { + Ref: 'VPCPrivateSubnet1Subnet8BCA10E0', + }, + { + Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', + }, + ], + })); // THEN - expect(stack).to(haveResource('AWS::RDS::DBProxyTargetGroup', { - Properties: { - DBProxyName: { - Ref: 'ProxyCB0DFB71', - }, - ConnectionPoolConfigurationInfo: {}, - DBClusterIdentifiers: [ - { - Ref: 'DatabaseB269D8BB', - }, - ], - TargetGroupName: 'default', + expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + DBProxyName: { + Ref: 'ProxyCB0DFB71', }, - }, ResourcePart.CompleteDefinition)); + ConnectionPoolConfigurationInfo: {}, + DBClusterIdentifiers: [ + { + Ref: 'DatabaseB269D8BB', + }, + ], + DBInstanceIdentifiers: ABSENT, + TargetGroupName: 'default', + })); test.done(); }, - 'Cannot specify both dbInstanceIdentifiers and dbClusterIdentifiers'(test: Test) { + 'One or more secrets are required.'(test: Test) { // GIVEN - const stack = new cdk.Stack(); - const vpc = new ec2.Vpc(stack, 'VPC'); const cluster = new rds.DatabaseCluster(stack, 'Database', { - engine: rds.DatabaseClusterEngine.auroraPostgres({ - version: rds.AuroraPostgresEngineVersion.VER_10_7, - }), - masterUser: { - username: 'admin', - }, - instanceProps: { - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), - vpc, - }, + engine: rds.DatabaseClusterEngine.auroraPostgres({ version: rds.AuroraPostgresEngineVersion.VER_10_7 }), + instanceProps: { vpc }, }); // WHEN - test.doesNotThrow(() => { + test.throws(() => { new rds.DatabaseProxy(stack, 'Proxy', { proxyTarget: rds.ProxyTarget.fromCluster(cluster), - secrets: [cluster.secret!], + secrets: [], // No secret vpc, }); - }, /Cannot specify both dbInstanceIdentifiers and dbClusterIdentifiers/); - - expect(stack).to(haveResource('AWS::RDS::DBProxyTargetGroup', { - DBInstanceIdentifiers: ABSENT, - }, ResourcePart.Properties)); + }, 'One or more secrets are required.'); test.done(); }, - 'One or more secrets are required.'(test: Test) { - // GIVEN - const stack = new cdk.Stack(); - const vpc = new ec2.Vpc(stack, 'VPC'); - const cluster = new rds.DatabaseCluster(stack, 'Database', { - engine: rds.DatabaseClusterEngine.auroraPostgres({ version: rds.AuroraPostgresEngineVersion.VER_10_7 }), - masterUser: { username: 'admin' }, - instanceProps: { - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), + 'fails when trying to create a proxy for a target without an engine'(test: Test) { + const importedCluster = rds.DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Cluster', { + clusterIdentifier: 'my-cluster', + }); + + test.throws(() => { + new rds.DatabaseProxy(stack, 'Proxy', { + proxyTarget: rds.ProxyTarget.fromCluster(importedCluster), vpc, - }, + secrets: [new secretsmanager.Secret(stack, 'Secret')], + }); + }, /Could not determine engine for proxy target 'Default\/Cluster'\. Please provide it explicitly when importing the resource/); + + test.done(); + }, + + "fails when trying to create a proxy for a target with an engine that doesn't have engineFamily"(test: Test) { + const importedInstance = rds.DatabaseInstance.fromDatabaseInstanceAttributes(stack, 'Cluster', { + instanceIdentifier: 'my-instance', + instanceEndpointAddress: 'instance-address', + port: 5432, + securityGroups: [], + engine: rds.DatabaseInstanceEngine.mariaDb({ + version: rds.MariaDbEngineVersion.VER_10_0_24, + }), }); - // WHEN test.throws(() => { new rds.DatabaseProxy(stack, 'Proxy', { - proxyTarget: rds.ProxyTarget.fromCluster(cluster), - secrets: [], // No secret + proxyTarget: rds.ProxyTarget.fromInstance(importedInstance), vpc, + secrets: [new secretsmanager.Secret(stack, 'Secret')], }); - }, 'One or more secrets are required.'); + }, /Engine 'mariadb-10\.0\.24' does not support proxies/); + + test.done(); + }, + + 'correctly creates a proxy for an imported Cluster if its engine is known'(test: Test) { + const importedCluster = rds.DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Cluster', { + clusterIdentifier: 'my-cluster', + engine: rds.DatabaseClusterEngine.auroraPostgres({ + version: rds.AuroraPostgresEngineVersion.VER_9_6_11, + }), + }); + + new rds.DatabaseProxy(stack, 'Proxy', { + proxyTarget: rds.ProxyTarget.fromCluster(importedCluster), + vpc, + secrets: [new secretsmanager.Secret(stack, 'Secret')], + }); + + expect(stack).to(haveResourceLike('AWS::RDS::DBProxy', { + EngineFamily: 'POSTGRESQL', + })); + expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + DBClusterIdentifiers: [ + 'my-cluster', + ], + })); test.done(); }, diff --git a/packages/@aws-cdk/aws-s3/lib/bucket-policy.ts b/packages/@aws-cdk/aws-s3/lib/bucket-policy.ts index 10f35b5c40e3d..395ff706b5fdc 100644 --- a/packages/@aws-cdk/aws-s3/lib/bucket-policy.ts +++ b/packages/@aws-cdk/aws-s3/lib/bucket-policy.ts @@ -1,5 +1,6 @@ import { PolicyDocument } from '@aws-cdk/aws-iam'; -import { Construct, RemovalPolicy, Resource } from '@aws-cdk/core'; +import { RemovalPolicy, Resource } from '@aws-cdk/core'; +import { Construct } from 'constructs'; import { IBucket } from './bucket'; import { CfnBucketPolicy } from './s3.generated'; diff --git a/packages/@aws-cdk/aws-s3/lib/bucket.ts b/packages/@aws-cdk/aws-s3/lib/bucket.ts index d44f989ba267b..99ecca8402211 100644 --- a/packages/@aws-cdk/aws-s3/lib/bucket.ts +++ b/packages/@aws-cdk/aws-s3/lib/bucket.ts @@ -2,7 +2,8 @@ import { EOL } from 'os'; import * as events from '@aws-cdk/aws-events'; import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; -import { Construct, Fn, IResource, Lazy, RemovalPolicy, Resource, Stack, Token } from '@aws-cdk/core'; +import { Fn, IResource, Lazy, RemovalPolicy, Resource, Stack, Token } from '@aws-cdk/core'; +import { Construct } from 'constructs'; import { BucketPolicy } from './bucket-policy'; import { IBucketNotificationDestination } from './destination'; import { BucketNotifications } from './notifications-resource'; diff --git a/packages/@aws-cdk/aws-s3/lib/util.ts b/packages/@aws-cdk/aws-s3/lib/util.ts index 643fdc7472e1a..1c45ed899be4b 100644 --- a/packages/@aws-cdk/aws-s3/lib/util.ts +++ b/packages/@aws-cdk/aws-s3/lib/util.ts @@ -1,7 +1,8 @@ import * as cdk from '@aws-cdk/core'; +import { IConstruct } from 'constructs'; import { BucketAttributes } from './bucket'; -export function parseBucketArn(construct: cdk.IConstruct, props: BucketAttributes): string { +export function parseBucketArn(construct: IConstruct, props: BucketAttributes): string { // if we have an explicit bucket ARN, use it. if (props.bucketArn) { @@ -22,7 +23,7 @@ export function parseBucketArn(construct: cdk.IConstruct, props: BucketAttribute throw new Error('Cannot determine bucket ARN. At least `bucketArn` or `bucketName` is needed'); } -export function parseBucketName(construct: cdk.IConstruct, props: BucketAttributes): string | undefined { +export function parseBucketName(construct: IConstruct, props: BucketAttributes): string | undefined { // if we have an explicit bucket name, use it. if (props.bucketName) { diff --git a/packages/@aws-cdk/aws-s3/package.json b/packages/@aws-cdk/aws-s3/package.json index e9673a87c4893..83150e561020d 100644 --- a/packages/@aws-cdk/aws-s3/package.json +++ b/packages/@aws-cdk/aws-s3/package.json @@ -48,7 +48,10 @@ "compat": "cdk-compat" }, "cdk-build": { - "cloudformation": "AWS::S3" + "cloudformation": "AWS::S3", + "env": { + "AWSLINT_BASE_CONSTRUCT": "true" + } }, "keywords": [ "aws", diff --git a/packages/@aws-cdk/aws-s3/test/test.aspect.ts b/packages/@aws-cdk/aws-s3/test/test.aspect.ts index a1a94a44b0f1d..df020a5ca4698 100644 --- a/packages/@aws-cdk/aws-s3/test/test.aspect.ts +++ b/packages/@aws-cdk/aws-s3/test/test.aspect.ts @@ -1,6 +1,7 @@ // import { expect, haveResource, haveResourceLike, SynthUtils } from '@aws-cdk/assert'; import { SynthUtils } from '@aws-cdk/assert'; import * as cdk from '@aws-cdk/core'; +import { IConstruct } from 'constructs'; import { Test } from 'nodeunit'; import * as s3 from '../lib'; @@ -40,7 +41,7 @@ export = { }; class BucketVersioningChecker implements cdk.IAspect { - public visit(node: cdk.IConstruct): void { + public visit(node: IConstruct): void { if (node instanceof s3.CfnBucket) { if (!node.versioningConfiguration || (!cdk.Tokenization.isResolvable(node.versioningConfiguration) && node.versioningConfiguration.status !== 'Enabled')) { diff --git a/packages/@aws-cdk/core/lib/asset-staging.ts b/packages/@aws-cdk/core/lib/asset-staging.ts index dc49750a46f2f..e5878c2a31365 100644 --- a/packages/@aws-cdk/core/lib/asset-staging.ts +++ b/packages/@aws-cdk/core/lib/asset-staging.ts @@ -113,7 +113,7 @@ export class AssetStaging extends Construct { this.relativePath = renderAssetFilename(this.assetHash); this.stagedPath = this.relativePath; } else { // Bundling is skipped - this.assetHash = props.assetHashType === AssetHashType.BUNDLE + this.assetHash = props.assetHashType === AssetHashType.BUNDLE || props.assetHashType === AssetHashType.OUTPUT ? this.calculateHash(AssetHashType.CUSTOM, this.node.path) // Use node path as dummy hash because we're not bundling : this.calculateHash(hashType, props.assetHash); this.stagedPath = this.sourcePath; @@ -295,8 +295,9 @@ export class AssetStaging extends Construct { case AssetHashType.SOURCE: return FileSystem.fingerprint(this.sourcePath, this.fingerprintOptions); case AssetHashType.BUNDLE: + case AssetHashType.OUTPUT: if (!this.bundleDir) { - throw new Error('Cannot use `AssetHashType.BUNDLE` when `bundling` is not specified.'); + throw new Error(`Cannot use \`${hashType}\` hash type when \`bundling\` is not specified.`); } return FileSystem.fingerprint(this.bundleDir, this.fingerprintOptions); default: diff --git a/packages/@aws-cdk/core/lib/assets.ts b/packages/@aws-cdk/core/lib/assets.ts index 012b7da4d489c..17d3b9d93e53f 100644 --- a/packages/@aws-cdk/core/lib/assets.ts +++ b/packages/@aws-cdk/core/lib/assets.ts @@ -60,18 +60,35 @@ export interface AssetOptions { /** * The type of asset hash + * + * NOTE: the hash is used in order to identify a specific revision of the asset, and + * used for optimizing and caching deployment activities related to this asset such as + * packaging, uploading to Amazon S3, etc. */ export enum AssetHashType { /** * Based on the content of the source path + * + * When bundling, use `SOURCE` when the content of the bundling output is not + * stable across repeated bundling operations. */ SOURCE = 'source', /** * Based on the content of the bundled path + * + * @deprecated use `OUTPUT` instead */ BUNDLE = 'bundle', + /** + * Based on the content of the bundling output + * + * Use `OUTPUT` when the source of the asset is a top level folder containing + * code and/or dependencies that are not directly linked to the asset. + */ + OUTPUT = 'output', + /** * Use a custom hash */ diff --git a/packages/@aws-cdk/core/lib/cfn-fn.ts b/packages/@aws-cdk/core/lib/cfn-fn.ts index d2967779b08b3..b05d41968a909 100644 --- a/packages/@aws-cdk/core/lib/cfn-fn.ts +++ b/packages/@aws-cdk/core/lib/cfn-fn.ts @@ -134,6 +134,15 @@ export class Fn { return Token.asList(new FnCidr(ipBlock, count, sizeMask)); } + /** + * Given an url, parse the domain name + * @param url the url to parse + */ + public static parseDomainName(url: string): string { + const noHttps = Fn.select(1, Fn.split('//', url)); + return Fn.select(0, Fn.split('/', noHttps)); + } + /** * The intrinsic function ``Fn::GetAZs`` returns an array that lists * Availability Zones for a specified region. Because customers have access to diff --git a/packages/@aws-cdk/core/lib/construct-compat.ts b/packages/@aws-cdk/core/lib/construct-compat.ts index 934b726d92199..d781e74bb0396 100644 --- a/packages/@aws-cdk/core/lib/construct-compat.ts +++ b/packages/@aws-cdk/core/lib/construct-compat.ts @@ -64,7 +64,7 @@ export class Construct extends constructs.Construct implements IConstruct { */ public readonly node: ConstructNode; - constructor(scope: Construct, id: string) { + constructor(scope: constructs.Construct, id: string) { super(scope, id, { nodeFactory: { createNode: (h: constructs.Construct, s: constructs.IConstruct, i: string) => diff --git a/packages/@aws-cdk/core/lib/private/physical-name-generator.ts b/packages/@aws-cdk/core/lib/private/physical-name-generator.ts index dbd0a2c8af772..7c9fae2ab15da 100644 --- a/packages/@aws-cdk/core/lib/private/physical-name-generator.ts +++ b/packages/@aws-cdk/core/lib/private/physical-name-generator.ts @@ -1,4 +1,5 @@ import * as crypto from 'crypto'; +import { Node } from 'constructs'; import { IResolvable, IResolveContext } from '../resolvable'; import { IResource } from '../resource'; import { Stack } from '../stack'; @@ -8,16 +9,16 @@ import { TokenMap } from './token-map'; export function generatePhysicalName(resource: IResource): string { const stack = Stack.of(resource); const stackPart = new PrefixNamePart(stack.stackName, 25); - const idPart = new SuffixNamePart(resource.node.uniqueId, 24); + const idPart = new SuffixNamePart(Node.of(resource).uniqueId, 24); const region: string = stack.region; if (Token.isUnresolved(region) || !region) { - throw new Error(`Cannot generate a physical name for ${resource.node.path}, because the region is un-resolved or missing`); + throw new Error(`Cannot generate a physical name for ${Node.of(resource).path}, because the region is un-resolved or missing`); } const account: string = stack.account; if (Token.isUnresolved(account) || !account) { - throw new Error(`Cannot generate a physical name for ${resource.node.path}, because the account is un-resolved or missing`); + throw new Error(`Cannot generate a physical name for ${Node.of(resource).path}, because the account is un-resolved or missing`); } const parts = [stackPart, idPart] diff --git a/packages/@aws-cdk/core/lib/resource.ts b/packages/@aws-cdk/core/lib/resource.ts index 1eb1da50f7335..c437ef98d5d42 100644 --- a/packages/@aws-cdk/core/lib/resource.ts +++ b/packages/@aws-cdk/core/lib/resource.ts @@ -1,5 +1,6 @@ +import { Construct } from 'constructs'; import { ArnComponents } from './arn'; -import { Construct, IConstruct } from './construct-compat'; +import { IConstruct, Construct as CoreConstruct } from './construct-compat'; import { Lazy } from './lazy'; import { generatePhysicalName, isGeneratedWhenNeededMarker } from './private/physical-name-generator'; import { IResolveContext } from './resolvable'; @@ -86,7 +87,7 @@ export interface ResourceProps { /** * A construct which represents an AWS resource. */ -export abstract class Resource extends Construct implements IResource { +export abstract class Resource extends CoreConstruct implements IResource { public readonly stack: Stack; public readonly env: ResourceEnvironment; diff --git a/packages/@aws-cdk/core/lib/stack.ts b/packages/@aws-cdk/core/lib/stack.ts index 2aad06f44c721..68b82dadfbb7d 100644 --- a/packages/@aws-cdk/core/lib/stack.ts +++ b/packages/@aws-cdk/core/lib/stack.ts @@ -2,6 +2,7 @@ import * as fs from 'fs'; import * as path from 'path'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import * as cxapi from '@aws-cdk/cx-api'; +import { IConstruct, Node } from 'constructs'; import { Annotations } from './annotations'; import { App } from './app'; import { Arn, ArnComponents } from './arn'; @@ -10,7 +11,7 @@ import { CfnElement } from './cfn-element'; import { Fn } from './cfn-fn'; import { Aws, ScopedAws } from './cfn-pseudo'; import { CfnResource, TagType } from './cfn-resource'; -import { Construct, IConstruct, ISynthesisSession } from './construct-compat'; +import { Construct, ISynthesisSession } from './construct-compat'; import { ContextProvider } from './context-provider'; import { Environment } from './environment'; import { FeatureFlags } from './feature-flags'; @@ -169,11 +170,12 @@ export class Stack extends Construct implements ITaggable { return c; } - if (Stage.isStage(c) || !c.node.scope) { - throw new Error(`${construct.constructor?.name ?? 'Construct'} at '${construct.node.path}' should be created in the scope of a Stack, but no Stack found`); + const _scope = Node.of(c).scope; + if (Stage.isStage(c) || !_scope) { + throw new Error(`${construct.constructor?.name ?? 'Construct'} at '${Node.of(construct).path}' should be created in the scope of a Stack, but no Stack found`); } - return _lookup(c.node.scope); + return _lookup(_scope); } } @@ -934,7 +936,7 @@ export class Stack extends Construct implements ITaggable { */ private generateStackId(container: IConstruct | undefined) { const rootPath = rootPathTo(this, container); - const ids = rootPath.map(c => c.node.id); + const ids = rootPath.map(c => Node.of(c).id); // In unit tests our Stack (which is the only component) may not have an // id, so in that case just pretend it's "Stack". @@ -1051,7 +1053,7 @@ function cfnElements(node: IConstruct, into: CfnElement[] = []): CfnElement[] { into.push(node); } - for (const child of node.node.children) { + for (const child of Node.of(node).children) { // Don't recurse into a substack if (Stack.isStack(child)) { continue; } @@ -1067,7 +1069,7 @@ function cfnElements(node: IConstruct, into: CfnElement[] = []): CfnElement[] { * If no ancestor is given or the ancestor is not found, return the entire root path. */ export function rootPathTo(construct: IConstruct, ancestor?: IConstruct): IConstruct[] { - const scopes = construct.node.scopes; + const scopes = Node.of(construct).scopes; for (let i = scopes.length - 2; i >= 0; i--) { if (scopes[i] === ancestor) { return scopes.slice(i + 1); diff --git a/packages/@aws-cdk/core/lib/stage.ts b/packages/@aws-cdk/core/lib/stage.ts index b6f6a1f985d19..94296a6d167ee 100644 --- a/packages/@aws-cdk/core/lib/stage.ts +++ b/packages/@aws-cdk/core/lib/stage.ts @@ -1,5 +1,6 @@ import * as cxapi from '@aws-cdk/cx-api'; -import { Construct, IConstruct } from './construct-compat'; +import { IConstruct, Node } from 'constructs'; +import { Construct } from './construct-compat'; import { Environment } from './environment'; import { collectRuntimeInformation } from './private/runtime-info'; import { synthesize } from './private/synthesis'; @@ -73,7 +74,7 @@ export class Stage extends Construct { * @experimental */ public static of(construct: IConstruct): Stage | undefined { - return construct.node.scopes.reverse().slice(1).find(Stage.isStage); + return Node.of(construct).scopes.reverse().slice(1).find(Stage.isStage); } /** diff --git a/packages/@aws-cdk/core/package.json b/packages/@aws-cdk/core/package.json index 3d64a2d8f03fd..ac384aac42fb1 100644 --- a/packages/@aws-cdk/core/package.json +++ b/packages/@aws-cdk/core/package.json @@ -37,6 +37,7 @@ "exclude": [ "props-physical-name:@aws-cdk/aws-cloudformation.CustomResourceProps", "construct-ctor:@aws-cdk/core.App.", + "construct-ctor:@aws-cdk/core.Construct..params[0]", "props-no-cfn-types:@aws-cdk/core.CfnOutputProps.condition", "duration-prop-type:@aws-cdk/core.ResourceSignal.timeout", "props-no-any:@aws-cdk/core.CfnParameterProps.default", diff --git a/packages/@aws-cdk/core/test/test.fn.ts b/packages/@aws-cdk/core/test/test.fn.ts index 8b5c7d7d68348..ec3f537ba0945 100644 --- a/packages/@aws-cdk/core/test/test.fn.ts +++ b/packages/@aws-cdk/core/test/test.fn.ts @@ -35,6 +35,24 @@ export = nodeunit.testCase({ test.done(); }, }, + 'FnParseDomainName': { + 'parse domain name from resolved url'(test: nodeunit.Test) { + test.deepEqual(Fn.parseDomainName('https://test.com/'), 'test.com'); + test.done(); + }, + 'parse domain name on token'(test: nodeunit.Test) { + const stack = new Stack(); + const url = Fn.join('//', [ + 'https:', + Fn.join('/', [ + 'test.com', + 'graphql', + ]), + ]); + test.deepEqual(Fn.parseDomainName(stack.resolve(url)), 'test.com'); + test.done(); + }, + }, 'FnJoin': { 'rejects empty list of arguments to join'(test: nodeunit.Test) { test.throws(() => Fn.join('.', [])); diff --git a/packages/@aws-cdk/core/test/test.staging.ts b/packages/@aws-cdk/core/test/test.staging.ts index ff7279172bb00..e652c884935f4 100644 --- a/packages/@aws-cdk/core/test/test.staging.ts +++ b/packages/@aws-cdk/core/test/test.staging.ts @@ -425,6 +425,28 @@ export = { test.done(); }, + 'bundling with OUTPUT asset hash type'(test: Test) { + // GIVEN + const app = new App(); + const stack = new Stack(app, 'stack'); + const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); + + // WHEN + const asset = new AssetStaging(stack, 'Asset', { + sourcePath: directory, + bundling: { + image: BundlingDockerImage.fromRegistry('alpine'), + command: [DockerStubCommand.SUCCESS], + }, + assetHashType: AssetHashType.OUTPUT, + }); + + // THEN + test.equal(asset.assetHash, '33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); + + test.done(); + }, + 'custom hash'(test: Test) { // GIVEN const app = new App(); @@ -474,7 +496,23 @@ export = { test.throws(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, assetHashType: AssetHashType.BUNDLE, - }), /Cannot use `AssetHashType.BUNDLE` when `bundling` is not specified/); + }), /Cannot use `bundle` hash type when `bundling` is not specified/); + test.equal(fs.existsSync(STUB_INPUT_FILE), false); + + test.done(); + }, + + 'throws with OUTPUT hash type and no bundling'(test: Test) { + // GIVEN + const app = new App(); + const stack = new Stack(app, 'stack'); + const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); + + // THEN + test.throws(() => new AssetStaging(stack, 'Asset', { + sourcePath: directory, + assetHashType: AssetHashType.OUTPUT, + }), /Cannot use `output` hash type when `bundling` is not specified/); test.equal(fs.existsSync(STUB_INPUT_FILE), false); test.done(); diff --git a/packages/@aws-cdk/pipelines/README.md b/packages/@aws-cdk/pipelines/README.md index 759554c66fd47..350c99386d429 100644 --- a/packages/@aws-cdk/pipelines/README.md +++ b/packages/@aws-cdk/pipelines/README.md @@ -135,6 +135,9 @@ class MyPipelineStack extends Stack { sourceArtifact, cloudAssemblyArtifact, + // Optionally specify a VPC in which the action runs + vpc: new ec2.Vpc(this, 'NpmSynthVpc'), + // Use this if you need a build step (if you're not using ts-node // or if you have TypeScript Lambdas that need to be compiled). buildCommand: 'npm run build', diff --git a/packages/@aws-cdk/pipelines/lib/actions/publish-assets-action.ts b/packages/@aws-cdk/pipelines/lib/actions/publish-assets-action.ts index a16862c20d014..ca2bb7221ce87 100644 --- a/packages/@aws-cdk/pipelines/lib/actions/publish-assets-action.ts +++ b/packages/@aws-cdk/pipelines/lib/actions/publish-assets-action.ts @@ -1,6 +1,7 @@ import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as codepipeline_actions from '@aws-cdk/aws-codepipeline-actions'; +import * as ec2 from '@aws-cdk/aws-ec2'; import * as events from '@aws-cdk/aws-events'; import * as iam from '@aws-cdk/aws-iam'; import { Construct, Lazy } from '@aws-cdk/core'; @@ -59,6 +60,22 @@ export interface PublishAssetsActionProps { * @default - Automatically generated */ readonly role?: iam.IRole; + + /** + * The VPC where to execute the PublishAssetsAction. + * + * @default - No VPC + */ + readonly vpc?: ec2.IVpc; + + /** + * Which subnets to use. + * + * Only used if 'vpc' is supplied. + * + * @default - All private subnets. + */ + readonly subnetSelection?: ec2.SubnetSelection; } /** @@ -85,6 +102,8 @@ export class PublishAssetsAction extends Construct implements codepipeline.IActi buildImage: codebuild.LinuxBuildImage.STANDARD_4_0, privileged: (props.assetType === AssetType.DOCKER_IMAGE) ? true : undefined, }, + vpc: props.vpc, + subnetSelection: props.subnetSelection, buildSpec: codebuild.BuildSpec.fromObject({ version: '0.2', phases: { diff --git a/packages/@aws-cdk/pipelines/lib/pipeline.ts b/packages/@aws-cdk/pipelines/lib/pipeline.ts index c7ff6b7985888..9643b413eef08 100644 --- a/packages/@aws-cdk/pipelines/lib/pipeline.ts +++ b/packages/@aws-cdk/pipelines/lib/pipeline.ts @@ -1,5 +1,6 @@ import * as path from 'path'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; +import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; import { Annotations, App, CfnOutput, Construct, PhysicalName, Stack, Stage, Aspects } from '@aws-cdk/core'; import { AssetType, DeployCdkStackAction, PublishAssetsAction, UpdatePipelineAction } from './actions'; @@ -63,6 +64,22 @@ export interface CdkPipelineProps { * @default - Latest version */ readonly cdkCliVersion?: string; + + /** + * The VPC where to execute the CdkPipeline actions. + * + * @default - No VPC + */ + readonly vpc?: ec2.IVpc; + + /** + * Which subnets to use. + * + * Only used if 'vpc' is supplied. + * + * @default - All private subnets. + */ + readonly subnetSelection?: ec2.SubnetSelection; } /** @@ -147,6 +164,8 @@ export class CdkPipeline extends Construct { cdkCliVersion: props.cdkCliVersion, pipeline: this._pipeline, projectName: maybeSuffix(props.pipelineName, '-publish'), + vpc: props.vpc, + subnetSelection: props.subnetSelection, }); Aspects.of(this).add({ visit: () => this._assets.removeAssetsStageIfEmpty() }); @@ -294,6 +313,8 @@ interface AssetPublishingProps { readonly pipeline: codepipeline.Pipeline; readonly cdkCliVersion?: string; readonly projectName?: string; + readonly vpc?: ec2.IVpc; + readonly subnetSelection?: ec2.SubnetSelection; } /** @@ -361,6 +382,8 @@ class AssetPublishing extends Construct { cdkCliVersion: this.props.cdkCliVersion, assetType: command.assetType, role: this.assetRoles[command.assetType], + vpc: this.props.vpc, + subnetSelection: this.props.subnetSelection, }); this.stage.addAction(action); } diff --git a/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts b/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts index 88d36ae81d3eb..785ef9cc46bc7 100644 --- a/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts +++ b/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts @@ -3,6 +3,7 @@ import * as path from 'path'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as codepipeline_actions from '@aws-cdk/aws-codepipeline-actions'; +import * as ec2 from '@aws-cdk/aws-ec2'; import * as events from '@aws-cdk/aws-events'; import * as iam from '@aws-cdk/aws-iam'; import { Construct, Stack } from '@aws-cdk/core'; @@ -88,6 +89,22 @@ export interface SimpleSynthOptions { * @default - No policy statements added to CodeBuild Project Role */ readonly rolePolicyStatements?: iam.PolicyStatement[]; + + /** + * The VPC where to execute the SimpleSynth. + * + * @default - No VPC + */ + readonly vpc?: ec2.IVpc; + + /** + * Which subnets to use. + * + * Only used if 'vpc' is supplied. + * + * @default - All private subnets. + */ + readonly subnetSelection?: ec2.SubnetSelection; } /** @@ -186,6 +203,8 @@ export class SimpleSynthAction implements codepipeline.IAction, iam.IGrantable { ...options, installCommand: options.installCommand ?? 'npm ci', synthCommand: options.synthCommand ?? 'npx cdk synth', + vpc: options.vpc, + subnetSelection: options.subnetSelection, }); } @@ -201,6 +220,8 @@ export class SimpleSynthAction implements codepipeline.IAction, iam.IGrantable { ...options, installCommand: options.installCommand ?? 'yarn install --frozen-lockfile', synthCommand: options.synthCommand ?? 'npx cdk synth', + vpc: options.vpc, + subnetSelection: options.subnetSelection, }); } @@ -314,6 +335,8 @@ export class SimpleSynthAction implements codepipeline.IAction, iam.IGrantable { const project = new codebuild.PipelineProject(scope, 'CdkBuildProject', { projectName: this.props.projectName, environment, + vpc: this.props.vpc, + subnetSelection: this.props.subnetSelection, buildSpec, environmentVariables, }); diff --git a/packages/@aws-cdk/pipelines/test/builds.test.ts b/packages/@aws-cdk/pipelines/test/builds.test.ts index 41c1187105700..fdb9171aedc73 100644 --- a/packages/@aws-cdk/pipelines/test/builds.test.ts +++ b/packages/@aws-cdk/pipelines/test/builds.test.ts @@ -2,6 +2,7 @@ import { arrayWith, deepObjectLike, encodedJson, objectLike, Capture } from '@aw import '@aws-cdk/assert/jest'; import * as cbuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; +import * as ec2 from '@aws-cdk/aws-ec2'; import * as s3 from '@aws-cdk/aws-s3'; import { Stack } from '@aws-cdk/core'; import * as cdkp from '../lib'; @@ -218,6 +219,88 @@ test('Standard (NPM) synth can output additional artifacts', () => { }); }); +test('Standard (NPM) synth can run in a VPC', () => { + // WHEN + new TestGitHubNpmPipeline(pipelineStack, 'Cdk', { + sourceArtifact, + cloudAssemblyArtifact, + synthAction: cdkp.SimpleSynthAction.standardNpmSynth({ + vpc: new ec2.Vpc(pipelineStack, 'NpmSynthTestVpc'), + sourceArtifact, + cloudAssemblyArtifact, + }), + }); + + // THEN + expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + VpcConfig: { + SecurityGroupIds: [ + { + 'Fn::GetAtt': [ + 'CdkPipelineBuildSynthCdkBuildProjectSecurityGroupEA44D7C2', + 'GroupId', + ], + }, + ], + Subnets: [ + { + Ref: 'NpmSynthTestVpcPrivateSubnet1Subnet81E3AA56', + }, + { + Ref: 'NpmSynthTestVpcPrivateSubnet2SubnetC1CA3EF0', + }, + { + Ref: 'NpmSynthTestVpcPrivateSubnet3SubnetA04163EE', + }, + ], + VpcId: { + Ref: 'NpmSynthTestVpc5E703F25', + }, + }, + }); +}); + +test('Standard (Yarn) synth can run in a VPC', () => { + // WHEN + new TestGitHubNpmPipeline(pipelineStack, 'Cdk', { + sourceArtifact, + cloudAssemblyArtifact, + synthAction: cdkp.SimpleSynthAction.standardYarnSynth({ + vpc: new ec2.Vpc(pipelineStack, 'YarnSynthTestVpc'), + sourceArtifact, + cloudAssemblyArtifact, + }), + }); + + // THEN + expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + VpcConfig: { + SecurityGroupIds: [ + { + 'Fn::GetAtt': [ + 'CdkPipelineBuildSynthCdkBuildProjectSecurityGroupEA44D7C2', + 'GroupId', + ], + }, + ], + Subnets: [ + { + Ref: 'YarnSynthTestVpcPrivateSubnet1Subnet2805334B', + }, + { + Ref: 'YarnSynthTestVpcPrivateSubnet2SubnetDCFBF596', + }, + { + Ref: 'YarnSynthTestVpcPrivateSubnet3SubnetE11E0C86', + }, + ], + VpcId: { + Ref: 'YarnSynthTestVpc5F654735', + }, + }, + }); +}); + test('Pipeline action contains a hash that changes as the buildspec changes', () => { const hash1 = synthWithAction((sa, cxa) => cdkp.SimpleSynthAction.standardNpmSynth({ sourceArtifact: sa, diff --git a/packages/@aws-cdk/pipelines/test/pipeline-assets.test.ts b/packages/@aws-cdk/pipelines/test/pipeline-assets.test.ts index 499c0834e16b7..13ae9fcceda87 100644 --- a/packages/@aws-cdk/pipelines/test/pipeline-assets.test.ts +++ b/packages/@aws-cdk/pipelines/test/pipeline-assets.test.ts @@ -1,9 +1,9 @@ +import * as path from 'path'; import { arrayWith, deepObjectLike, encodedJson, notMatching, objectLike, stringLike } from '@aws-cdk/assert'; import '@aws-cdk/assert/jest'; import * as ecr_assets from '@aws-cdk/aws-ecr-assets'; import * as s3_assets from '@aws-cdk/aws-s3-assets'; import { Construct, Stack, Stage, StageProps } from '@aws-cdk/core'; -import * as path from 'path'; import * as cdkp from '../lib'; import { BucketStack, PIPELINE_ENV, TestApp, TestGitHubNpmPipeline } from './testutil'; @@ -156,6 +156,39 @@ test('docker image asset publishers use privilegedmode, have right AssumeRole', }); }); +test('docker image asset can use a VPC', () => { + // WHEN + pipeline.addApplicationStage(new DockerAssetApp(app, 'DockerAssetApp')); + + // THEN + expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + VpcConfig: objectLike({ + SecurityGroupIds: [ + { + 'Fn::GetAtt': [ + 'CdkAssetsDockerAsset1SecurityGroup078F5C66', + 'GroupId', + ], + }, + ], + Subnets: [ + { + Ref: 'TestVpcPrivateSubnet1SubnetCC65D771', + }, + { + Ref: 'TestVpcPrivateSubnet2SubnetDE0C64A2', + }, + { + Ref: 'TestVpcPrivateSubnet3Subnet2311D32F', + }, + ], + VpcId: { + Ref: 'TestVpcE77CE678', + }, + }), + }); +}); + test('can control fix/CLI version used in pipeline selfupdate', () => { // WHEN const stack2 = new Stack(app, 'Stack2', { env: PIPELINE_ENV }); diff --git a/packages/@aws-cdk/pipelines/test/testutil.ts b/packages/@aws-cdk/pipelines/test/testutil.ts index beb6e0180fa87..821b795105365 100644 --- a/packages/@aws-cdk/pipelines/test/testutil.ts +++ b/packages/@aws-cdk/pipelines/test/testutil.ts @@ -2,6 +2,7 @@ import * as fs from 'fs'; import * as path from 'path'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as codepipeline_actions from '@aws-cdk/aws-codepipeline-actions'; +import * as ec2 from '@aws-cdk/aws-ec2'; import * as s3 from '@aws-cdk/aws-s3'; import { App, AppProps, Construct, Environment, SecretValue, Stack, StackProps, Stage } from '@aws-cdk/core'; import * as cdkp from '../lib'; @@ -45,6 +46,7 @@ export class TestGitHubNpmPipeline extends cdkp.CdkPipeline { sourceArtifact, cloudAssemblyArtifact, }), + vpc: new ec2.Vpc(scope, 'TestVpc'), cloudAssemblyArtifact, ...props, }); diff --git a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts index 6b7522e4e09f2..d88d69406ca65 100644 --- a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts +++ b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts @@ -148,7 +148,7 @@ export class SdkProvider { params: { RoleArn: roleArn, ...externalId ? { ExternalId: externalId } : {}, - RoleSessionName: `aws-cdk-${os.userInfo().username}`, + RoleSessionName: `aws-cdk-${safeUsername()}`, }, stsConfig: { region, @@ -362,4 +362,13 @@ function readIfPossible(filename: string): string | undefined { debug(e); return undefined; } +} + +/** + * Return the username with characters invalid for a RoleSessionName removed + * + * @see https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html#API_AssumeRole_RequestParameters + */ +function safeUsername() { + return os.userInfo().username.replace(/[^\w+=,.@-]/g, '@'); } \ No newline at end of file diff --git a/packages/aws-cdk/lib/api/aws-auth/sdk.ts b/packages/aws-cdk/lib/api/aws-auth/sdk.ts index d735231836e27..28e24f19e988c 100644 --- a/packages/aws-cdk/lib/api/aws-auth/sdk.ts +++ b/packages/aws-cdk/lib/api/aws-auth/sdk.ts @@ -106,6 +106,16 @@ export class SDK implements ISDK { return { accountId, partition }; })); } + + /** + * Return the current credentials + * + * Don't use -- only used to write tests around assuming roles. + */ + public async currentCredentials(): Promise { + await this.credentials.getPromise(); + return this.credentials; + } } /** diff --git a/packages/aws-cdk/test/api/sdk-provider.test.ts b/packages/aws-cdk/test/api/sdk-provider.test.ts index 83eb525d54771..70319a113f829 100644 --- a/packages/aws-cdk/test/api/sdk-provider.test.ts +++ b/packages/aws-cdk/test/api/sdk-provider.test.ts @@ -1,3 +1,4 @@ +import * as os from 'os'; import * as cxapi from '@aws-cdk/cx-api'; import * as AWS from 'aws-sdk'; import * as SDKMock from 'aws-sdk-mock'; @@ -271,6 +272,44 @@ describe('with default config files', () => { await expect(sdk.s3().listBuckets().promise()).rejects.toThrow('did you bootstrap'); await expect(sdk.s3().listBuckets().promise()).rejects.toThrow('Nope!'); }); + + test('assuming a role sanitizes the username into the session name', async () => { + // GIVEN + SDKMock.restore(); + + await withMocked(os, 'userInfo', async (userInfo) => { + userInfo.mockReturnValue({ username: 'skål', uid: 1, gid: 1, homedir: '/here', shell: '/bin/sh' }); + + await withMocked((new AWS.STS()).constructor.prototype, 'assumeRole', async (assumeRole) => { + let assumeRoleRequest; + + assumeRole.mockImplementation(function ( + this: any, + request: AWS.STS.Types.AssumeRoleRequest, + cb?: (err: Error | null, x: AWS.STS.Types.AssumeRoleResponse) => void) { + + // Part of the request is stored on "this" + assumeRoleRequest = { ...this.config.params, ...request }; + + const response = { + Credentials: { AccessKeyId: `${uid}aid`, Expiration: new Date(), SecretAccessKey: 's', SessionToken: '' }, + }; + if (cb) { cb(null, response); } + return { promise: () => Promise.resolve(response) }; + }); + + // WHEN + const provider = new SdkProvider(new AWS.CredentialProviderChain([() => new AWS.Credentials({ accessKeyId: 'a', secretAccessKey: 's' })]), 'eu-somewhere'); + const sdk = await provider.withAssumedRole('bla.role.arn', undefined, undefined); + + await sdk.currentCredentials(); + + expect(assumeRoleRequest).toEqual(expect.objectContaining({ + RoleSessionName: 'aws-cdk-sk@l', + })); + }); + }); + }); }); describe('Plugins', () => { diff --git a/packages/awslint/lib/rules/construct.ts b/packages/awslint/lib/rules/construct.ts index ae922413828a4..0cdb50eebded6 100644 --- a/packages/awslint/lib/rules/construct.ts +++ b/packages/awslint/lib/rules/construct.ts @@ -24,6 +24,9 @@ export class ConstructReflection { return typeRef.fqn; } + /** + * @deprecated - use `CoreTypes.constructClass()` or `CoreTypes.baseConstructClass()` as appropriate + */ public readonly ROOT_CLASS: reflect.ClassType; // cdk.Construct public readonly fqn: string; @@ -79,7 +82,7 @@ export class ConstructReflection { constructLinter.add({ code: 'construct-ctor', - message: 'signature of all construct constructors should be "scope, id, props"', + message: 'signature of all construct constructors should be "scope, id, props". ' + baseConstructAddendum(), eval: e => { // only applies to non abstract classes if (e.ctx.classType.abstract) { @@ -93,9 +96,15 @@ constructLinter.add({ const expectedParams = new Array(); + let baseType; + if (process.env.AWSLINT_BASE_CONSTRUCT && !initializer.parentType.name.startsWith('Cfn')) { + baseType = e.ctx.core.baseConstructClass; + } else { + baseType = e.ctx.core.constructClass; + } expectedParams.push({ name: 'scope', - type: e.ctx.core.constructClass.fqn, + type: baseType.fqn, }); expectedParams.push({ @@ -276,3 +285,10 @@ constructLinter.add({ } }, }); + +function baseConstructAddendum(): string { + if (!process.env.AWSLINT_BASE_CONSTRUCT) { + return 'If the construct is using the "constructs" module, set the environment variable "AWSLINT_BASE_CONSTRUCT" and re-run'; + } + return ''; +} \ No newline at end of file diff --git a/packages/awslint/lib/rules/core-types.ts b/packages/awslint/lib/rules/core-types.ts index 09be91656d446..9e14eb5bb9d51 100644 --- a/packages/awslint/lib/rules/core-types.ts +++ b/packages/awslint/lib/rules/core-types.ts @@ -4,12 +4,18 @@ import { getDocTag } from './util'; const CORE_MODULE = '@aws-cdk/core'; enum CoreTypesFqn { CfnResource = '@aws-cdk/core.CfnResource', - Construct = '@aws-cdk/core.Construct', - ConstructInterface = '@aws-cdk/core.IConstruct', Resource = '@aws-cdk/core.Resource', ResourceInterface = '@aws-cdk/core.IResource', ResolvableInterface = '@aws-cdk/core.IResolvable', - PhysicalName = '@aws-cdk/core.PhysicalName' + PhysicalName = '@aws-cdk/core.PhysicalName', + + BaseConstruct = 'constructs.Construct', + BaseConstructInterface = 'constructs.Construct', + + /** @deprecated - use BaseConstruct */ + Construct = '@aws-cdk/core.Construct', + /** @deprecated - use BaseConstructInterface */ + ConstructInterface = '@aws-cdk/core.IConstruct', } export class CoreTypes { @@ -86,18 +92,34 @@ export class CoreTypes { /** * @returns `classType` for the core type Construct + * @deprecated - use `baseConstructClass()` */ public get constructClass() { return this.sys.findClass(CoreTypesFqn.Construct); } + /** + * @returns `classType` for the core type Construct + */ + public get baseConstructClass() { + return this.sys.findClass(CoreTypesFqn.BaseConstruct); + } + /** * @returns `interfacetype` for the core type Construct + * @deprecated - use `baseConstructInterface()` */ public get constructInterface() { return this.sys.findInterface(CoreTypesFqn.ConstructInterface); } + /** + * @returns `interfacetype` for the core type Construct + */ + public get baseConstructInterface() { + return this.sys.findInterface(CoreTypesFqn.BaseConstructInterface); + } + /** * @returns `classType` for the core type Construct */ diff --git a/packages/awslint/lib/rules/imports.ts b/packages/awslint/lib/rules/imports.ts index d69c5f2745d98..4940021e8fdce 100644 --- a/packages/awslint/lib/rules/imports.ts +++ b/packages/awslint/lib/rules/imports.ts @@ -63,16 +63,18 @@ importsLinter.add({ importsLinter.add({ code: 'from-signature', - message: 'invalid method signature for fromXxx method', + message: 'invalid method signature for fromXxx method. ' + baseConstructAddendum(), eval: e => { for (const method of e.ctx.fromMethods) { // "fromRoleArn" => "roleArn" const argName = e.ctx.resource.basename[0].toLocaleLowerCase() + method.name.slice('from'.length + 1); + const baseType = process.env.AWSLINT_BASE_CONSTRUCT ? e.ctx.resource.core.baseConstructClass : + e.ctx.resource.core.constructClass; e.assertSignature(method, { parameters: [ - { name: 'scope', type: e.ctx.resource.construct.ROOT_CLASS }, + { name: 'scope', type: baseType }, { name: 'id', type: 'string' }, { name: argName, type: 'string' }, ], @@ -84,15 +86,17 @@ importsLinter.add({ importsLinter.add({ code: 'from-attributes', - message: 'static fromXxxAttributes is a factory of IXxx from its primitive attributes', + message: 'static fromXxxAttributes is a factory of IXxx from its primitive attributes. ' + baseConstructAddendum(), eval: e => { if (!e.ctx.fromAttributesMethod) { return; } + const baseType = process.env.AWSLINT_BASE_CONSTRUCT ? e.ctx.resource.core.baseConstructClass + : e.ctx.resource.core.constructClass; e.assertSignature(e.ctx.fromAttributesMethod, { parameters: [ - { name: 'scope', type: e.ctx.resource.construct.ROOT_CLASS }, + { name: 'scope', type: baseType }, { name: 'id', type: 'string' }, { name: 'attrs', type: e.ctx.attributesStruct }, ], @@ -111,3 +115,10 @@ importsLinter.add({ e.assert(e.ctx.attributesStruct, e.ctx.attributesStructName); }, }); + +function baseConstructAddendum(): string { + if (!process.env.AWSLINT_BASE_CONSTRUCT) { + return 'If the construct is using the "constructs" module, set the environment variable "AWSLINT_BASE_CONSTRUCT" and re-run'; + } + return ''; +} diff --git a/packages/cdk-assets/bin/publish.ts b/packages/cdk-assets/bin/publish.ts index 12a0e318d0f78..20b7a609bfdd0 100644 --- a/packages/cdk-assets/bin/publish.ts +++ b/packages/cdk-assets/bin/publish.ts @@ -140,7 +140,7 @@ class DefaultAwsClient implements IAws { params: { RoleArn: roleArn, ExternalId: externalId, - RoleSessionName: `cdk-assets-${os.userInfo().username}`, + RoleSessionName: `cdk-assets-${safeUsername()}`, }, stsConfig: { region, @@ -149,3 +149,12 @@ class DefaultAwsClient implements IAws { }); } } + +/** + * Return the username with characters invalid for a RoleSessionName removed + * + * @see https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html#API_AssumeRole_RequestParameters + */ +function safeUsername() { + return os.userInfo().username.replace(/[^\w+=,.@-]/g, '@'); +} \ No newline at end of file diff --git a/tools/cdk-build-tools/bin/cdk-build.ts b/tools/cdk-build-tools/bin/cdk-build.ts index 87f269364adb8..185811c6b0658 100644 --- a/tools/cdk-build-tools/bin/cdk-build.ts +++ b/tools/cdk-build-tools/bin/cdk-build.ts @@ -27,9 +27,10 @@ async function main() { .argv; const options = cdkBuildOptions(); + const env = options.env; if (options.pre) { - await shell(options.pre, { timers }); + await shell(options.pre, { timers, env }); } // See if we need to call cfn2ts @@ -38,15 +39,15 @@ async function main() { // There can be multiple scopes, ensuring it's always an array. options.cloudformation = [options.cloudformation]; } - await shell(['cfn2ts', ...options.cloudformation.map(scope => `--scope=${scope}`)], { timers }); + await shell(['cfn2ts', ...options.cloudformation.map(scope => `--scope=${scope}`)], { timers, env }); } const overrides: CompilerOverrides = { eslint: args.eslint, jsii: args.jsii, tsc: args.tsc }; - await compileCurrentPackage(timers, overrides); + await compileCurrentPackage(options, timers, overrides); await lintCurrentPackage(options, overrides); if (options.post) { - await shell(options.post, { timers }); + await shell(options.post, { timers, env }); } } diff --git a/tools/cdk-build-tools/lib/compile.ts b/tools/cdk-build-tools/lib/compile.ts index 1101333113bf9..fe63d0c8f346e 100644 --- a/tools/cdk-build-tools/lib/compile.ts +++ b/tools/cdk-build-tools/lib/compile.ts @@ -1,12 +1,13 @@ import { makeExecutable, shell } from './os'; -import { CompilerOverrides, currentPackageJson, packageCompiler } from './package-info'; +import { CDKBuildOptions, CompilerOverrides, currentPackageJson, packageCompiler } from './package-info'; import { Timers } from './timer'; /** * Run the compiler on the current package */ -export async function compileCurrentPackage(timers: Timers, compilers: CompilerOverrides = {}): Promise { - await shell(packageCompiler(compilers), { timers }); +export async function compileCurrentPackage(options: CDKBuildOptions, timers: Timers, compilers: CompilerOverrides = {}): Promise { + const env = options.env; + await shell(packageCompiler(compilers), { timers, env }); // Find files in bin/ that look like they should be executable, and make them so. const scripts = currentPackageJson().bin || {}; diff --git a/tools/cdk-build-tools/lib/lint.ts b/tools/cdk-build-tools/lib/lint.ts index 77943bad48e8c..ca9128a4e5f82 100644 --- a/tools/cdk-build-tools/lib/lint.ts +++ b/tools/cdk-build-tools/lib/lint.ts @@ -3,6 +3,7 @@ import { shell } from './os'; import { CDKBuildOptions, CompilerOverrides } from './package-info'; export async function lintCurrentPackage(options: CDKBuildOptions, compilers: CompilerOverrides & { fix?: boolean } = {}): Promise { + const env = options.env; if (!options.eslint?.disable) { await shell([ compilers.eslint || require.resolve('eslint/bin/eslint'), @@ -10,12 +11,12 @@ export async function lintCurrentPackage(options: CDKBuildOptions, compilers: Co '--ext=.ts', `--resolve-plugins-relative-to=${__dirname}`, ...compilers.fix ? ['--fix'] : [], - ]); + ], { env }); } if (!options.pkglint?.disable) { - await shell(['pkglint']); + await shell(['pkglint'], { env }); } - await shell([path.join(__dirname, '..', 'bin', 'cdk-awslint')]); + await shell([path.join(__dirname, '..', 'bin', 'cdk-awslint')], { env }); } diff --git a/tools/cdk-build-tools/lib/package-info.ts b/tools/cdk-build-tools/lib/package-info.ts index afc4041776b9f..cf5b657d9a470 100644 --- a/tools/cdk-build-tools/lib/package-info.ts +++ b/tools/cdk-build-tools/lib/package-info.ts @@ -135,6 +135,11 @@ export interface CDKBuildOptions { * but we want to eventually move all of them to Jest. */ jest?: boolean; + + /** + * Environment variables to be passed to 'cdk-build' and all of its child processes. + */ + env?: NodeJS.ProcessEnv; } /**