diff --git a/.kitchen.yml b/.kitchen.yml index f37acd5806..fc2eab8c7c 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -162,6 +162,19 @@ suites: systems: - name: workload_metadata_config backend: local + - name: "beta_cluster" + driver: + root_module_directory: test/fixtures/beta_cluster + verifier: + systems: + - name: gcloud + backend: local + controls: + - gcloud + - name: gcp + backend: gcp + controls: + - gcp - name: "deploy_service" driver: root_module_directory: test/fixtures/deploy_service diff --git a/CHANGELOG.md b/CHANGELOG.md index aca3cd5f8a..18be4c015e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,30 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +## [v6.1.1] - 2019-12-04 + +### Fixed + +- Fix endpoint output for private clusters where `private_nodes=false`. [#365](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/365) + +## [v6.1.0] - 2019-12-03 + +### Added +- Support for using a pre-existing Service Account with the ACM submodule. [#346](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/346) + +### Fixed +- Compute region output for zonal clusters. [#362](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/362) + +## [v6.0.1] - 2019-12-02 + +### Fixed + +- The required Google provider constraint has been relaxed to `~> 2.18` (>= 2.18, <3.0). [#359] + +## [v6.0.0] - 2019-11-28 + +v6.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v6.0.md). + ### Added * Support for Shielded Nodes beta feature via `enabled_shielded_nodes` variable. [#300] @@ -23,17 +47,20 @@ Extending the adopted spec, each change should have a link to its corresponding * `private_zonal_with_networking` example. [#308] * `regional_private_node_pool_oauth_scopes` example. [#321] * The `cluster_autoscaling` variable for beta submodules. [#93] +* The `master_authorized_networks` variable. [#354] ### Changed * The `node_pool_labels`, `node_pool_tags`, and `node_pool_taints` variables have defaults and can be overridden within the `node_pools` object. [#3] * `upstream_nameservers` variable is typed as a list of strings. [#350] +* The `network_policy` variable defaults to `true`. [#138] ### Removed * **Breaking**: Removed support for enabling the Kubernetes dashboard, as this is deprecated on GKE. [#337] -* **Beaking**: Removed support for versions of the Google provider and the Google Beta provider older than 2.18. [#261] +* **Breaking**: Removed support for versions of the Google provider and the Google Beta provider older than 2.18. [#261] +* **Breaking**: Removed the `master_authorized_networks_config` variable. [#354] ### Fixed @@ -236,7 +263,11 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o * Initial release of module. -[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.2.0...HEAD +[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v6.1.1...HEAD +[v6.1.1]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v6.1.0...v6.1.1 +[v6.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v6.0.1...v6.1.0 +[v6.0.1]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v6.0.0...v6.0.1 +[v6.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.0...v6.0.0 [v5.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.1...v5.2.0 [v5.1.1]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.0...v5.1.1 [v5.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...v5.1.0 @@ -254,6 +285,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#359]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/359 +[#354]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/354 [#350]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/350 [#340]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/340 [#339]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/339 @@ -307,6 +340,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [#151]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/151 [#149]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/149 [#148]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/148 +[#138]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/138 +[#136]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/138 [#136]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/136 [#132]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/132 [#124]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/124 diff --git a/README.md b/README.md index 1cebf678c5..9e0bdc76d8 100644 --- a/README.md +++ b/README.md @@ -108,22 +108,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -153,7 +137,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy | Enable network policy addon | bool | `"true"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | @@ -251,9 +235,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md [terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/autogen/README.md b/autogen/README.md index c8e956a76e..d7dc65c1b9 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -12,9 +12,20 @@ The resources/services/activations/deletions that this module will create/trigge Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. {% if private_cluster %} -**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. +## Private Cluster Endpoints +When creating a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters), nodes are provisioned with private IPs. +The Kubernetes master endpoint is also [locked down](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#access_to_the_cluster_endpoints), which affects these module features: +- `configure_ip_masq` +- `stub_domains` + +If you are *not* using these features, then the module will function normally for private clusters and no special configuration is needed. +If you are using these features with a private cluster, you will need to either: +1. Run Terraform from a VM on the same VPC as your cluster (allowing it to connect to the private endpoint) and set `deploy_using_private_endpoint` to `true`. +2. Enable (beta) [route export functionality](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#master-on-prem-routing) to connect from an on-premise network over a VPN or Interconnect. +3. Include the external IP of your Terraform deployer in the `master_authorized_networks` configuration. Note that only IP addresses reserved in Google Cloud (such as in other VPCs) can be whitelisted. +4. Deploy a [bastion host](https://github.com/terraform-google-modules/terraform-google-bastion-host) or [proxy](https://cloud.google.com/solutions/creating-kubernetes-engine-private-clusters-with-net-proxies) in the same VPC as your GKE cluster. - {% endif %} +{% endif %} ## Compatibility @@ -125,22 +136,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - @@ -199,17 +194,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -{% if private_cluster %} -[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md -{% else %} -[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md -{% endif %} -{% if private_cluster or beta_cluster %} -[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md -{% else %} -[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md -{% endif %} {% if beta_cluster %} [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta {% else %} diff --git a/autogen/cluster.tf.tmpl b/autogen/cluster.tf.tmpl index dc19ed2d90..18db2b5859 100644 --- a/autogen/cluster.tf.tmpl +++ b/autogen/cluster.tf.tmpl @@ -191,10 +191,18 @@ resource "google_container_cluster" "primary" { } {% if private_cluster %} - private_cluster_config { - enable_private_endpoint = var.enable_private_endpoint - enable_private_nodes = var.enable_private_nodes - master_ipv4_cidr_block = var.master_ipv4_cidr_block + dynamic "private_cluster_config" { + for_each = var.enable_private_nodes ? [{ + enable_private_nodes = var.enable_private_nodes, + enable_private_endpoint = var.enable_private_endpoint + master_ipv4_cidr_block = var.master_ipv4_cidr_block + }] : [] + + content { + enable_private_endpoint = private_cluster_config.value.enable_private_endpoint + enable_private_nodes = private_cluster_config.value.enable_private_nodes + master_ipv4_cidr_block = private_cluster_config.value.master_ipv4_cidr_block + } } {% endif %} diff --git a/autogen/main.tf.tmpl b/autogen/main.tf.tmpl index 3ac28cc16a..5c79b2bea5 100644 --- a/autogen/main.tf.tmpl +++ b/autogen/main.tf.tmpl @@ -96,16 +96,14 @@ locals { {% endif %} cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones {% if private_cluster %} - cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint + cluster_endpoint = var.enable_private_nodes ? (var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint) : google_container_cluster.primary.endpoint {% else %} - cluster_output_endpoint = google_container_cluster.primary.endpoint + cluster_endpoint = google_container_cluster.primary.endpoint {% endif %} cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) @@ -137,12 +135,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/autogen/variables.tf.tmpl b/autogen/variables.tf.tmpl index 3295199e48..9a94ee8c66 100644 --- a/autogen/variables.tf.tmpl +++ b/autogen/variables.tf.tmpl @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/autogen/versions.tf.tmpl b/autogen/versions.tf.tmpl index 387a2e37c8..3024374262 100644 --- a/autogen/versions.tf.tmpl +++ b/autogen/versions.tf.tmpl @@ -19,9 +19,9 @@ terraform { required_providers { {% if beta_cluster %} - google-beta = "~> 2.18.0" + google-beta = "~> 2.18" {% else %} - google = "~> 2.18.0" + google = "~> 2.18" {% endif %} } } diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index 45e23b6193..8a8ecb5477 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -264,6 +264,26 @@ steps: - verify workload-metadata-config-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] +- id: create beta-cluster-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create beta-cluster-local'] +- id: converge beta-cluster-local + waitFor: + - create beta-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge beta-cluster-local'] +- id: verify beta-cluster-local + waitFor: + - converge beta-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify beta-cluster-local'] +#- id: destroy beta-cluster-local +# waitFor: +# - verify beta-cluster-local +# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' +# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy beta-cluster-local'] - id: create deploy-service-local waitFor: - prepare diff --git a/examples/simple_regional_beta/README.md b/examples/simple_regional_beta/README.md index 72bb221d9f..32bfc8fbfd 100644 --- a/examples/simple_regional_beta/README.md +++ b/examples/simple_regional_beta/README.md @@ -10,17 +10,22 @@ This example illustrates how to create a simple cluster with beta features. | cloudrun | Boolean to enable / disable CloudRun | string | `"true"` | no | | cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | | compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | | ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | | ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | | istio | Boolean to enable / disable Istio | string | `"true"` | no | | network | The VPC network to host the cluster in | string | n/a | yes | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | ## Outputs diff --git a/examples/simple_regional_beta/main.tf b/examples/simple_regional_beta/main.tf index 0863cc51de..55acfaed79 100644 --- a/examples/simple_regional_beta/main.tf +++ b/examples/simple_regional_beta/main.tf @@ -24,23 +24,27 @@ provider "google-beta" { } module "gke" { - source = "../../modules/beta-public-cluster/" - project_id = var.project_id - name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" - regional = true - region = var.region - network = var.network - subnetwork = var.subnetwork - ip_range_pods = var.ip_range_pods - ip_range_services = var.ip_range_services - create_service_account = false - service_account = var.compute_engine_service_account - istio = var.istio - cloudrun = var.cloudrun - node_metadata = var.node_metadata - sandbox_enabled = var.sandbox_enabled - remove_default_node_pool = var.remove_default_node_pool - node_pools = var.node_pools + source = "../../modules/beta-public-cluster/" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = var.regional + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = var.compute_engine_service_account == "create" + service_account = var.compute_engine_service_account + istio = var.istio + cloudrun = var.cloudrun + node_metadata = var.node_metadata + sandbox_enabled = var.sandbox_enabled + remove_default_node_pool = var.remove_default_node_pool + node_pools = var.node_pools + database_encryption = var.database_encryption + enable_binary_authorization = var.enable_binary_authorization + pod_security_policy_config = var.pod_security_policy_config } data "google_client_config" "default" { diff --git a/examples/simple_regional_beta/variables.tf b/examples/simple_regional_beta/variables.tf index ed16642774..58e1ae7433 100644 --- a/examples/simple_regional_beta/variables.tf +++ b/examples/simple_regional_beta/variables.tf @@ -85,3 +85,36 @@ variable "node_pools" { }, ] } + +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({ state = string, key_name = string })) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + +variable "enable_binary_authorization" { + description = "Enable BinAuthZ Admission controller" + default = false +} + +variable "pod_security_policy_config" { + description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ + "enabled" = false + }] +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} diff --git a/examples/stub_domains/main.tf b/examples/stub_domains/main.tf index b81dc0cf8c..90ffd97687 100644 --- a/examples/stub_domains/main.tf +++ b/examples/stub_domains/main.tf @@ -32,7 +32,6 @@ module "gke" { subnetwork = var.subnetwork ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services - network_policy = true service_account = var.compute_engine_service_account create_service_account = false diff --git a/examples/stub_domains_private/main.tf b/examples/stub_domains_private/main.tf index b263922b2a..31b3d7aec5 100644 --- a/examples/stub_domains_private/main.tf +++ b/examples/stub_domains_private/main.tf @@ -49,7 +49,6 @@ module "gke" { master_ipv4_cidr_block = "172.16.0.0/28" - network_policy = true create_service_account = false service_account = var.compute_engine_service_account diff --git a/examples/stub_domains_upstream_nameservers/main.tf b/examples/stub_domains_upstream_nameservers/main.tf index 4b7448b7e8..009de87950 100644 --- a/examples/stub_domains_upstream_nameservers/main.tf +++ b/examples/stub_domains_upstream_nameservers/main.tf @@ -32,7 +32,6 @@ module "gke" { subnetwork = var.subnetwork ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services - network_policy = true create_service_account = false service_account = var.compute_engine_service_account diff --git a/examples/upstream_nameservers/main.tf b/examples/upstream_nameservers/main.tf index 784e8a0cd3..26895f32eb 100644 --- a/examples/upstream_nameservers/main.tf +++ b/examples/upstream_nameservers/main.tf @@ -32,7 +32,6 @@ module "gke" { subnetwork = var.subnetwork ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services - network_policy = true create_service_account = false service_account = var.compute_engine_service_account diff --git a/main.tf b/main.tf index 409cbfe55b..45a8cd6f2c 100644 --- a/main.tf +++ b/main.tf @@ -64,13 +64,11 @@ locals { cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones - cluster_output_endpoint = google_container_cluster.primary.endpoint + cluster_endpoint = google_container_cluster.primary.endpoint cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) cluster_output_master_version = google_container_cluster.primary.master_version @@ -92,12 +90,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/modules/acm/README.md b/modules/acm/README.md index bef7eccd53..9960667669 100644 --- a/modules/acm/README.md +++ b/modules/acm/README.md @@ -53,6 +53,7 @@ By default, this module will attempt to download the ACM operator from Google di | operator\_path | Path to the operator yaml config. If unset, will download from GCS releases. | string | `"null"` | no | | policy\_dir | Subfolder containing configs in ACM Git repo | string | n/a | yes | | project\_id | The project in which the resource belongs. | string | n/a | yes | +| ssh\_auth\_key | Key for Git authentication. Overrides 'create_ssh_key' variable. Can be set using 'file(path/to/file)'-function. | string | `"null"` | no | | sync\_branch | ACM repo Git branch | string | `"master"` | no | | sync\_repo | ACM Git repo address | string | n/a | yes | diff --git a/modules/acm/main.tf b/modules/acm/main.tf index 3a78172ad6..69b5755337 100644 --- a/modules/acm/main.tf +++ b/modules/acm/main.tf @@ -18,7 +18,7 @@ locals { cluster_endpoint = "https://${var.cluster_endpoint}" token = data.google_client_config.default.access_token cluster_ca_certificate = data.google_container_cluster.primary.master_auth.0.cluster_ca_certificate - private_key = var.create_ssh_key ? tls_private_key.git_creds[0].private_key_pem : "" + private_key = var.create_ssh_key && var.ssh_auth_key == null ? tls_private_key.git_creds[0].private_key_pem : var.ssh_auth_key download_operator = var.operator_path == null ? true : false operator_path = local.download_operator ? "${path.module}/config-management-operator.yaml" : var.operator_path } diff --git a/modules/acm/variables.tf b/modules/acm/variables.tf index 513556364d..d56f20b073 100644 --- a/modules/acm/variables.tf +++ b/modules/acm/variables.tf @@ -62,6 +62,12 @@ variable "create_ssh_key" { default = true } +variable "ssh_auth_key" { + description = "Key for Git authentication. Overrides 'create_ssh_key' variable. Can be set using 'file(path/to/file)'-function." + type = string + default = null +} + variable "enable_policy_controller" { description = "Whether to enable the ACM Policy Controller on the cluster" type = bool diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 6df69df5bd..8bc9626de1 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -10,7 +10,18 @@ The resources/services/activations/deletions that this module will create/trigge Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. -**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. +## Private Cluster Endpoints +When creating a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters), nodes are provisioned with private IPs. +The Kubernetes master endpoint is also [locked down](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#access_to_the_cluster_endpoints), which affects these module features: +- `configure_ip_masq` +- `stub_domains` + +If you are *not* using these features, then the module will function normally for private clusters and no special configuration is needed. +If you are using these features with a private cluster, you will need to either: +1. Run Terraform from a VM on the same VPC as your cluster (allowing it to connect to the private endpoint) and set `deploy_using_private_endpoint` to `true`. +2. Enable (beta) [route export functionality](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#master-on-prem-routing) to connect from an on-premise network over a VPN or Interconnect. +3. Include the external IP of your Terraform deployer in the `master_authorized_networks` configuration. Note that only IP addresses reserved in Google Cloud (such as in other VPCs) can be whitelisted. +4. Deploy a [bastion host](https://github.com/terraform-google-modules/terraform-google-bastion-host) or [proxy](https://cloud.google.com/solutions/creating-kubernetes-engine-private-clusters-with-net-proxies) in the same VPC as your GKE cluster. ## Compatibility @@ -116,22 +127,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -176,7 +171,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy | Enable network policy addon | bool | `"true"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | @@ -287,9 +282,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 10fcf764a3..2daaa9ee57 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -178,10 +178,18 @@ resource "google_container_cluster" "primary" { } } - private_cluster_config { - enable_private_endpoint = var.enable_private_endpoint - enable_private_nodes = var.enable_private_nodes - master_ipv4_cidr_block = var.master_ipv4_cidr_block + dynamic "private_cluster_config" { + for_each = var.enable_private_nodes ? [{ + enable_private_nodes = var.enable_private_nodes, + enable_private_endpoint = var.enable_private_endpoint + master_ipv4_cidr_block = var.master_ipv4_cidr_block + }] : [] + + content { + enable_private_endpoint = private_cluster_config.value.enable_private_endpoint + enable_private_nodes = private_cluster_config.value.enable_private_nodes + master_ipv4_cidr_block = private_cluster_config.value.master_ipv4_cidr_block + } } remove_default_node_pool = var.remove_default_node_pool diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index 9afc2502b8..37e06e58f1 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -88,13 +88,11 @@ locals { cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones - cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint + cluster_endpoint = var.enable_private_nodes ? (var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint) : google_container_cluster.primary.endpoint cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) cluster_output_master_version = google_container_cluster.primary.master_version @@ -123,12 +121,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 62c47d002d..da7c358427 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/modules/beta-private-cluster-update-variant/versions.tf b/modules/beta-private-cluster-update-variant/versions.tf index 8e29303fa9..cdf71b53b4 100644 --- a/modules/beta-private-cluster-update-variant/versions.tf +++ b/modules/beta-private-cluster-update-variant/versions.tf @@ -18,6 +18,6 @@ terraform { required_version = ">= 0.12" required_providers { - google-beta = "~> 2.18.0" + google-beta = "~> 2.18" } } diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index b03a4ea921..96ff7b416b 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -10,7 +10,18 @@ The resources/services/activations/deletions that this module will create/trigge Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. -**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. +## Private Cluster Endpoints +When creating a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters), nodes are provisioned with private IPs. +The Kubernetes master endpoint is also [locked down](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#access_to_the_cluster_endpoints), which affects these module features: +- `configure_ip_masq` +- `stub_domains` + +If you are *not* using these features, then the module will function normally for private clusters and no special configuration is needed. +If you are using these features with a private cluster, you will need to either: +1. Run Terraform from a VM on the same VPC as your cluster (allowing it to connect to the private endpoint) and set `deploy_using_private_endpoint` to `true`. +2. Enable (beta) [route export functionality](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#master-on-prem-routing) to connect from an on-premise network over a VPN or Interconnect. +3. Include the external IP of your Terraform deployer in the `master_authorized_networks` configuration. Note that only IP addresses reserved in Google Cloud (such as in other VPCs) can be whitelisted. +4. Deploy a [bastion host](https://github.com/terraform-google-modules/terraform-google-bastion-host) or [proxy](https://cloud.google.com/solutions/creating-kubernetes-engine-private-clusters-with-net-proxies) in the same VPC as your GKE cluster. ## Compatibility @@ -116,22 +127,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -176,7 +171,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy | Enable network policy addon | bool | `"true"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | @@ -287,9 +282,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index f2789bfd03..a0121e77b2 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -178,10 +178,18 @@ resource "google_container_cluster" "primary" { } } - private_cluster_config { - enable_private_endpoint = var.enable_private_endpoint - enable_private_nodes = var.enable_private_nodes - master_ipv4_cidr_block = var.master_ipv4_cidr_block + dynamic "private_cluster_config" { + for_each = var.enable_private_nodes ? [{ + enable_private_nodes = var.enable_private_nodes, + enable_private_endpoint = var.enable_private_endpoint + master_ipv4_cidr_block = var.master_ipv4_cidr_block + }] : [] + + content { + enable_private_endpoint = private_cluster_config.value.enable_private_endpoint + enable_private_nodes = private_cluster_config.value.enable_private_nodes + master_ipv4_cidr_block = private_cluster_config.value.master_ipv4_cidr_block + } } remove_default_node_pool = var.remove_default_node_pool diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 9afc2502b8..37e06e58f1 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -88,13 +88,11 @@ locals { cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones - cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint + cluster_endpoint = var.enable_private_nodes ? (var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint) : google_container_cluster.primary.endpoint cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) cluster_output_master_version = google_container_cluster.primary.master_version @@ -123,12 +121,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 62c47d002d..da7c358427 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/modules/beta-private-cluster/versions.tf b/modules/beta-private-cluster/versions.tf index 8e29303fa9..cdf71b53b4 100644 --- a/modules/beta-private-cluster/versions.tf +++ b/modules/beta-private-cluster/versions.tf @@ -18,6 +18,6 @@ terraform { required_version = ">= 0.12" required_providers { - google-beta = "~> 2.18.0" + google-beta = "~> 2.18" } } diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index b6ab1f39d3..7fbc72a36f 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -111,22 +111,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -167,7 +151,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy | Enable network policy addon | bool | `"true"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | @@ -278,9 +262,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index 5cff8bdd4e..2742594d87 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -88,13 +88,11 @@ locals { cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones - cluster_output_endpoint = google_container_cluster.primary.endpoint + cluster_endpoint = google_container_cluster.primary.endpoint cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) cluster_output_master_version = google_container_cluster.primary.master_version @@ -123,12 +121,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 1a1b9c54b4..3b20891262 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/modules/beta-public-cluster/versions.tf b/modules/beta-public-cluster/versions.tf index 8e29303fa9..cdf71b53b4 100644 --- a/modules/beta-public-cluster/versions.tf +++ b/modules/beta-public-cluster/versions.tf @@ -18,6 +18,6 @@ terraform { required_version = ">= 0.12" required_providers { - google-beta = "~> 2.18.0" + google-beta = "~> 2.18" } } diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 8d26153072..8e5c574893 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -10,7 +10,18 @@ The resources/services/activations/deletions that this module will create/trigge Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. -**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. +## Private Cluster Endpoints +When creating a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters), nodes are provisioned with private IPs. +The Kubernetes master endpoint is also [locked down](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#access_to_the_cluster_endpoints), which affects these module features: +- `configure_ip_masq` +- `stub_domains` + +If you are *not* using these features, then the module will function normally for private clusters and no special configuration is needed. +If you are using these features with a private cluster, you will need to either: +1. Run Terraform from a VM on the same VPC as your cluster (allowing it to connect to the private endpoint) and set `deploy_using_private_endpoint` to `true`. +2. Enable (beta) [route export functionality](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#master-on-prem-routing) to connect from an on-premise network over a VPN or Interconnect. +3. Include the external IP of your Terraform deployer in the `master_authorized_networks` configuration. Note that only IP addresses reserved in Google Cloud (such as in other VPCs) can be whitelisted. +4. Deploy a [bastion host](https://github.com/terraform-google-modules/terraform-google-bastion-host) or [proxy](https://cloud.google.com/solutions/creating-kubernetes-engine-private-clusters-with-net-proxies) in the same VPC as your GKE cluster. ## Compatibility @@ -113,22 +124,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -162,7 +157,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy | Enable network policy addon | bool | `"true"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | @@ -260,9 +255,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index 7027d766ce..15e64c823b 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -114,10 +114,18 @@ resource "google_container_cluster" "primary" { } } - private_cluster_config { - enable_private_endpoint = var.enable_private_endpoint - enable_private_nodes = var.enable_private_nodes - master_ipv4_cidr_block = var.master_ipv4_cidr_block + dynamic "private_cluster_config" { + for_each = var.enable_private_nodes ? [{ + enable_private_nodes = var.enable_private_nodes, + enable_private_endpoint = var.enable_private_endpoint + master_ipv4_cidr_block = var.master_ipv4_cidr_block + }] : [] + + content { + enable_private_endpoint = private_cluster_config.value.enable_private_endpoint + enable_private_nodes = private_cluster_config.value.enable_private_nodes + master_ipv4_cidr_block = private_cluster_config.value.master_ipv4_cidr_block + } } remove_default_node_pool = var.remove_default_node_pool diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index 7dea99f25c..3955a61e13 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -64,13 +64,11 @@ locals { cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones - cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint + cluster_endpoint = var.enable_private_nodes ? (var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint) : google_container_cluster.primary.endpoint cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) cluster_output_master_version = google_container_cluster.primary.master_version @@ -92,12 +90,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 9c75edb5ec..ff98b283b0 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/modules/private-cluster-update-variant/versions.tf b/modules/private-cluster-update-variant/versions.tf index e4544656fa..38e1d1bf9c 100644 --- a/modules/private-cluster-update-variant/versions.tf +++ b/modules/private-cluster-update-variant/versions.tf @@ -18,6 +18,6 @@ terraform { required_version = ">= 0.12" required_providers { - google = "~> 2.18.0" + google = "~> 2.18" } } diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 60fbe8de76..3efa415a4a 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -10,7 +10,18 @@ The resources/services/activations/deletions that this module will create/trigge Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. -**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. +## Private Cluster Endpoints +When creating a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters), nodes are provisioned with private IPs. +The Kubernetes master endpoint is also [locked down](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#access_to_the_cluster_endpoints), which affects these module features: +- `configure_ip_masq` +- `stub_domains` + +If you are *not* using these features, then the module will function normally for private clusters and no special configuration is needed. +If you are using these features with a private cluster, you will need to either: +1. Run Terraform from a VM on the same VPC as your cluster (allowing it to connect to the private endpoint) and set `deploy_using_private_endpoint` to `true`. +2. Enable (beta) [route export functionality](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#master-on-prem-routing) to connect from an on-premise network over a VPN or Interconnect. +3. Include the external IP of your Terraform deployer in the `master_authorized_networks` configuration. Note that only IP addresses reserved in Google Cloud (such as in other VPCs) can be whitelisted. +4. Deploy a [bastion host](https://github.com/terraform-google-modules/terraform-google-bastion-host) or [proxy](https://cloud.google.com/solutions/creating-kubernetes-engine-private-clusters-with-net-proxies) in the same VPC as your GKE cluster. ## Compatibility @@ -113,22 +124,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs @@ -162,7 +157,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy | Enable network policy addon | bool | `"true"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | @@ -260,9 +255,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. - -[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google [3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 [terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index acb6f29a68..e5ce88da5f 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -114,10 +114,18 @@ resource "google_container_cluster" "primary" { } } - private_cluster_config { - enable_private_endpoint = var.enable_private_endpoint - enable_private_nodes = var.enable_private_nodes - master_ipv4_cidr_block = var.master_ipv4_cidr_block + dynamic "private_cluster_config" { + for_each = var.enable_private_nodes ? [{ + enable_private_nodes = var.enable_private_nodes, + enable_private_endpoint = var.enable_private_endpoint + master_ipv4_cidr_block = var.master_ipv4_cidr_block + }] : [] + + content { + enable_private_endpoint = private_cluster_config.value.enable_private_endpoint + enable_private_nodes = private_cluster_config.value.enable_private_nodes + master_ipv4_cidr_block = private_cluster_config.value.master_ipv4_cidr_block + } } remove_default_node_pool = var.remove_default_node_pool diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 7dea99f25c..3955a61e13 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -64,13 +64,11 @@ locals { cluster_output_name = google_container_cluster.primary.name - cluster_output_location = google_container_cluster.primary.location - cluster_output_region = google_container_cluster.primary.region cluster_output_regional_zones = google_container_cluster.primary.node_locations cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_output_zones = local.cluster_output_regional_zones - cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint + cluster_endpoint = var.enable_private_nodes ? (var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint) : google_container_cluster.primary.endpoint cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) cluster_output_master_version = google_container_cluster.primary.master_version @@ -92,12 +90,12 @@ locals { cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] - # cluster locals + + cluster_location = google_container_cluster.primary.location + cluster_region = var.regional ? google_container_cluster.primary.region : join("-", slice(split("-", local.cluster_location), 0, 2)) + cluster_zones = sort(local.cluster_output_zones) + cluster_name = local.cluster_output_name - cluster_location = local.cluster_output_location - cluster_region = local.cluster_output_region - cluster_zones = sort(local.cluster_output_zones) - cluster_endpoint = local.cluster_output_endpoint cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] cluster_master_version = local.cluster_output_master_version cluster_min_master_version = local.cluster_output_min_master_version diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 9c75edb5ec..ff98b283b0 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/modules/private-cluster/versions.tf b/modules/private-cluster/versions.tf index e4544656fa..38e1d1bf9c 100644 --- a/modules/private-cluster/versions.tf +++ b/modules/private-cluster/versions.tf @@ -18,6 +18,6 @@ terraform { required_version = ">= 0.12" required_providers { - google = "~> 2.18.0" + google = "~> 2.18" } } diff --git a/test/ci/beta-cluster.yml b/test/ci/beta-cluster.yml new file mode 100644 index 0000000000..dd4ce29302 --- /dev/null +++ b/test/ci/beta-cluster.yml @@ -0,0 +1,18 @@ +--- + +platform: linux + +inputs: +- name: pull-request + path: terraform-google-kubernetes-engine + +run: + path: make + args: ['test_integration'] + dir: terraform-google-kubernetes-engine + +params: + SUITE: "beta-cluster-local" + COMPUTE_ENGINE_SERVICE_ACCOUNT: "" + REGION: "us-east4" + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' diff --git a/test/fixtures/beta_cluster/main.tf b/test/fixtures/beta_cluster/main.tf new file mode 100644 index 0000000000..58941b7f69 --- /dev/null +++ b/test/fixtures/beta_cluster/main.tf @@ -0,0 +1,67 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + name = "beta-cluster-${random_string.suffix.result}" + project_id = var.project_ids[0] +} + +resource "google_kms_key_ring" "db" { + location = var.region + name = "${local.name}-db" + project = local.project_id +} + +resource "google_kms_crypto_key" "db" { + name = local.name + key_ring = google_kms_key_ring.db.self_link +} + +module "this" { + source = "../../../examples/simple_regional_beta" + + cluster_name_suffix = "-${random_string.suffix.result}" + project_id = local.project_id + regional = false + region = var.region + zones = slice(var.zones, 0, 1) + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + compute_engine_service_account = "create" + + // Beta features + istio = true + + database_encryption = [{ + state = "ENCRYPTED" + key_name = google_kms_crypto_key.db.self_link + }] + + cloudrun = true + + enable_binary_authorization = true + + pod_security_policy_config = [{ + enabled = true + }] + + node_metadata = "EXPOSE" +} + +data "google_client_config" "default" { +} diff --git a/test/fixtures/beta_cluster/network.tf b/test/fixtures/beta_cluster/network.tf new file mode 100644 index 0000000000..c173435cbe --- /dev/null +++ b/test/fixtures/beta_cluster/network.tf @@ -0,0 +1,45 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false + project = local.project_id +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + project = local.project_id + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} diff --git a/test/fixtures/beta_cluster/outputs.tf b/test/fixtures/beta_cluster/outputs.tf new file mode 100644 index 0000000000..f2d5730ec1 --- /dev/null +++ b/test/fixtures/beta_cluster/outputs.tf @@ -0,0 +1,84 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "project_id" { + value = local.project_id +} + +output "region" { + value = module.this.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.this.cluster_name +} + +output "network" { + value = google_compute_network.main.name +} + +output "subnetwork" { + value = google_compute_subnetwork.main.name +} + +output "location" { + value = module.this.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = google_compute_subnetwork.main.secondary_ip_range[0].range_name +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = google_compute_subnetwork.main.secondary_ip_range[1].range_name +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.this.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.this.master_kubernetes_version +} + +output "kubernetes_endpoint" { + sensitive = true + value = module.this.kubernetes_endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + description = "The cluster CA certificate" + value = module.this.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.this.service_account +} + +output "database_encryption_key_name" { + value = google_kms_crypto_key.db.self_link +} diff --git a/test/fixtures/beta_cluster/variables.tf b/test/fixtures/beta_cluster/variables.tf new file mode 120000 index 0000000000..c28fc18c01 --- /dev/null +++ b/test/fixtures/beta_cluster/variables.tf @@ -0,0 +1 @@ +../deploy_service/variables.tf \ No newline at end of file diff --git a/test/integration/beta_cluster/controls/gcloud.rb b/test/integration/beta_cluster/controls/gcloud.rb new file mode 100644 index 0000000000..032be9601a --- /dev/null +++ b/test/integration/beta_cluster/controls/gcloud.rb @@ -0,0 +1,202 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') +service_account = attribute('service_account') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is zonal" do + expect(data['location']).to match(/^(.*)[1-9]-[a-z]$/) + end + + it "is single zoned" do + expect(data['locations'].size).to eq 1 + end + + it "uses public nodes and master endpoint" do + expect(data['privateClusterConfig']).to eq nil + end + + it "has the expected addon settings" do + expect(data['addonsConfig']).to eq({ + "horizontalPodAutoscaling" => {}, + "httpLoadBalancing" => {}, + "kubernetesDashboard" => { + "disabled" => true, + }, + "networkPolicyConfig" => {}, + "istioConfig" => {}, + "cloudRunConfig" => {}, + }) + end + + it "has the expected binaryAuthorization config" do + expect(data['binaryAuthorization']).to eq({ + "enabled" => true, + }) + end + + it "has the expected nodeMetadata conseal config" do + expect(data['nodeConfig']['workloadMetadataConfig']).to eq({ + "nodeMetadata" => 'EXPOSE', + }) + end + + it "has the expected podSecurityPolicyConfig config" do + expect(data['podSecurityPolicyConfig']).to eq({ + "enabled" => true, + }) + end + + it "has the expected databaseEncryption config" do + expect(data['databaseEncryption']).to eq({ + "state" => 'ENCRYPTED', + "keyName" => attribute('database_encryption_key_name'), + }) + end + end + + describe "default node pool" do + let(:default_node_pool) { data['nodePools'].select { |p| p['name'] == "default-pool" }.first } + + it "has no initial node count" do + expect(default_node_pool['initialNodeCount']).to eq nil + end + + it "does not have autoscaling enabled" do + expect(default_node_pool['autoscaling']).to eq nil + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "uses an automatically created service account" do + expect(node_pools).to include( + including( + "config" => including( + "serviceAccount" => service_account, + ), + ), + ) + end + + it "has autoscaling enabled" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "enabled" => true, + ), + ) + ) + end + + it "has the expected minimum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "minNodeCount" => 1, + ), + ) + ) + end + + it "has the expected maximum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "maxNodeCount" => 100, + ), + ) + ) + end + + it "is the expected machine type" do + expect(node_pools).to include( + including( + "config" => including( + "machineType" => "n1-standard-2", + ), + ) + ) + end + + it "has the expected disk size" do + expect(node_pools).to include( + including( + "config" => including( + "diskSizeGb" => 100, + ), + ) + ) + end + + it "has the expected labels" do + expect(node_pools).to include( + including( + "config" => including( + "labels" => including( + "cluster_name" => cluster_name, + "node_pool" => "default-node-pool", + ), + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-default-node-pool", + ]), + ), + ) + ) + end + + it "has autorepair enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoRepair" => true, + ), + ) + ) + end + end + end +end diff --git a/test/integration/beta_cluster/controls/gcp.rb b/test/integration/beta_cluster/controls/gcp.rb new file mode 100644 index 0000000000..6e9ade64ff --- /dev/null +++ b/test/integration/beta_cluster/controls/gcp.rb @@ -0,0 +1,31 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +control "gcp" do + title "Native InSpec Resources" + + service_account = attribute("service_account") + project_id = attribute("project_id") + + if service_account.start_with? "projects/" + service_account_name = service_account + else + service_account_name = "projects/#{project_id}/serviceAccounts/#{service_account}" + end + + describe google_service_account name: service_account_name do + its("display_name") { should eq "Terraform-managed service account for cluster #{attribute("cluster_name")}" } + its("project_id") { should eq project_id } + end +end diff --git a/test/integration/beta_cluster/inspec.yml b/test/integration/beta_cluster/inspec.yml new file mode 100644 index 0000000000..66062ea35d --- /dev/null +++ b/test/integration/beta_cluster/inspec.yml @@ -0,0 +1,33 @@ +name: beta_cluster +depends: + - name: inspec-gcp + git: https://github.com/inspec/inspec-gcp.git + tag: v0.10.0 +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: master_kubernetes_version + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string + - name: service_account + required: true + type: string + - name: service_account + required: true + type: string + - name: database_encryption_key_name + required: true + type: string diff --git a/test/integration/private_zonal_with_networking/controls/gcloud.rb b/test/integration/private_zonal_with_networking/controls/gcloud.rb index adaf6fd646..6e24b1142c 100644 --- a/test/integration/private_zonal_with_networking/controls/gcloud.rb +++ b/test/integration/private_zonal_with_networking/controls/gcloud.rb @@ -58,9 +58,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/integration/sandbox_enabled/controls/gcloud.rb b/test/integration/sandbox_enabled/controls/gcloud.rb index eb0ffdaf46..a5b785e725 100644 --- a/test/integration/sandbox_enabled/controls/gcloud.rb +++ b/test/integration/sandbox_enabled/controls/gcloud.rb @@ -50,9 +50,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/integration/simple_regional/controls/gcloud.rb b/test/integration/simple_regional/controls/gcloud.rb index e6bbcfc047..0f47490d40 100644 --- a/test/integration/simple_regional/controls/gcloud.rb +++ b/test/integration/simple_regional/controls/gcloud.rb @@ -50,9 +50,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/integration/simple_regional_private/controls/gcloud.rb b/test/integration/simple_regional_private/controls/gcloud.rb index b15dafcd02..b86834a2a2 100644 --- a/test/integration/simple_regional_private/controls/gcloud.rb +++ b/test/integration/simple_regional_private/controls/gcloud.rb @@ -54,9 +54,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/integration/simple_regional_with_networking/controls/gcloud.rb b/test/integration/simple_regional_with_networking/controls/gcloud.rb index e6bbcfc047..0f47490d40 100644 --- a/test/integration/simple_regional_with_networking/controls/gcloud.rb +++ b/test/integration/simple_regional_with_networking/controls/gcloud.rb @@ -50,9 +50,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/integration/simple_zonal/controls/gcloud.rb b/test/integration/simple_zonal/controls/gcloud.rb index c2e72936b0..058ed9ba53 100644 --- a/test/integration/simple_zonal/controls/gcloud.rb +++ b/test/integration/simple_zonal/controls/gcloud.rb @@ -55,9 +55,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/integration/simple_zonal_private/controls/gcloud.rb b/test/integration/simple_zonal_private/controls/gcloud.rb index 9968affcb6..653c11bb0c 100644 --- a/test/integration/simple_zonal_private/controls/gcloud.rb +++ b/test/integration/simple_zonal_private/controls/gcloud.rb @@ -58,9 +58,7 @@ "kubernetesDashboard" => { "disabled" => true, }, - "networkPolicyConfig" => { - "disabled" => true, - }, + "networkPolicyConfig" => {}, }) end end diff --git a/test/setup/iam.tf b/test/setup/iam.tf index b26c46568e..8685b9af5c 100644 --- a/test/setup/iam.tf +++ b/test/setup/iam.tf @@ -16,6 +16,7 @@ locals { int_required_roles = [ + "roles/cloudkms.admin", "roles/cloudkms.cryptoKeyEncrypterDecrypter", "roles/compute.networkAdmin", "roles/container.admin", @@ -72,3 +73,12 @@ resource "google_project_iam_member" "int_test_2" { resource "google_service_account_key" "int_test" { service_account_id = google_service_account.int_test.id } + +resource "google_project_iam_binding" "kubernetes_engine_kms_access" { + project = module.gke-project-1.project_id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + members = [ + "serviceAccount:service-${module.gke-project-1.project_number}@container-engine-robot.iam.gserviceaccount.com", + ] +} diff --git a/test/setup/main.tf b/test/setup/main.tf index 961d1b47ac..a9624a92cd 100644 --- a/test/setup/main.tf +++ b/test/setup/main.tf @@ -52,8 +52,6 @@ module "gke-project-2" { folder_id = var.folder_id billing_account = var.billing_account - auto_create_network = true - activate_apis = [ "bigquery-json.googleapis.com", "cloudkms.googleapis.com", diff --git a/variables.tf b/variables.tf index 904cd6ddab..9c420fb423 100644 --- a/variables.tf +++ b/variables.tf @@ -99,7 +99,7 @@ variable "http_load_balancing" { variable "network_policy" { type = bool description = "Enable network policy addon" - default = false + default = true } variable "network_policy_provider" { diff --git a/versions.tf b/versions.tf index e4544656fa..38e1d1bf9c 100644 --- a/versions.tf +++ b/versions.tf @@ -18,6 +18,6 @@ terraform { required_version = ">= 0.12" required_providers { - google = "~> 2.18.0" + google = "~> 2.18" } }