From 97223bf560700650e2ad0e1b7b09868d6b8bb6f4 Mon Sep 17 00:00:00 2001 From: Derek Menteer Date: Tue, 29 Nov 2022 16:39:28 +0000 Subject: [PATCH] backport of commit 41b45acdf20090567c1708e3ab431a1e412286f3 --- .changelog/14132.txt | 3 - .changelog/14465.txt | 3 - .changelog/14832.txt | 3 - .changelog/14833.txt | 3 - .changelog/14956.txt | 3 - .changelog/15001.txt | 4 - .changelog/15297.txt | 7 - .changelog/15503.txt | 3 + .changelog/15525.txt | 3 + .github/workflows/backport-assistant.yml | 8 +- .github/workflows/bot-auto-approve.yaml | 13 + .github/workflows/nightly-test-1.14.x.yaml | 230 -------- .golangci.yml | 1 - CHANGELOG.md | 6 +- acl/MockAuthorizer.go | 223 -------- acl/authorizer_test.go | 223 +++++++- acl/errors_test.go | 2 +- agent/acl_test.go | 16 +- agent/agent.go | 23 +- agent/agent_endpoint_test.go | 5 +- agent/agent_test.go | 111 ++-- agent/auto-config/auto_config.go | 4 +- agent/auto-config/auto_config_test.go | 5 +- agent/auto-config/persist.go | 5 +- agent/cache/cache.go | 304 +++++------ agent/cache/cache_test.go | 33 +- agent/cache/entry.go | 6 +- agent/cache/watch.go | 4 +- agent/checks/check.go | 3 +- agent/checks/docker.go | 3 +- agent/checks/grpc_test.go | 6 +- agent/config/builder.go | 3 +- agent/config/builder_test.go | 9 +- agent/config/golden_test.go | 5 +- agent/config/runtime_test.go | 3 +- .../TestRuntimeConfig_Sanitize.golden | 2 - agent/configentry/merge_service_config.go | 3 + agent/connect/ca/provider_vault.go | 62 ++- agent/connect/ca/provider_vault_test.go | 109 +++- agent/connect/ca/testing.go | 8 +- agent/connect/testing_ca_test.go | 15 +- agent/connect_ca_endpoint_test.go | 4 +- agent/consul/acl_endpoint.go | 3 +- agent/consul/acl_endpoint_test.go | 5 +- agent/consul/authmethod/kubeauth/testing.go | 6 +- agent/consul/auto_config_endpoint_test.go | 12 +- agent/consul/auto_encrypt_endpoint.go | 8 - agent/consul/auto_encrypt_endpoint_test.go | 55 -- agent/consul/catalog_endpoint.go | 8 +- agent/consul/health_endpoint.go | 2 +- agent/consul/leader_connect_test.go | 3 +- agent/consul/leader_peering.go | 2 +- agent/consul/leader_peering_test.go | 4 +- agent/consul/operator_backend.go | 34 -- agent/consul/operator_backend_test.go | 193 ------- agent/consul/operator_raft_endpoint.go | 1 - agent/consul/rpc_test.go | 9 +- agent/consul/server.go | 55 +- agent/consul/snapshot_endpoint.go | 3 +- agent/dns.go | 22 +- agent/dns_test.go | 8 +- .../dataplane/get_envoy_bootstrap_params.go | 6 + .../services/peerstream/stream_test.go | 46 +- .../services/peerstream/stream_tracker.go | 64 +-- .../peerstream/stream_tracker_test.go | 44 +- agent/grpc-internal/client.go | 4 +- agent/hcp/bootstrap/bootstrap.go | 9 +- agent/hcp/manager_test.go | 8 +- agent/http_register.go | 1 - agent/http_test.go | 9 +- agent/keyring.go | 3 +- agent/keyring_test.go | 6 +- agent/metrics_test.go | 8 +- agent/nodeid.go | 5 +- agent/nodeid_test.go | 6 +- agent/operator_endpoint.go | 39 -- agent/pool/peek_test.go | 6 +- agent/pool/pool.go | 4 +- .../proxycfg-sources/catalog/config_source.go | 11 +- agent/proxycfg/ingress_gateway.go | 3 - agent/proxycfg/testing.go | 4 +- agent/proxycfg/testing_ingress_gateway.go | 41 -- agent/remote_exec.go | 3 +- agent/routine-leak-checker/leak_test.go | 8 +- agent/rpc/operator/service.go | 103 ---- agent/rpc/operator/service_test.go | 104 ---- agent/rpc/peering/service.go | 15 +- agent/rpc/peering/service_test.go | 6 +- agent/service_manager_test.go | 3 +- agent/setup.go | 13 +- agent/testagent.go | 5 +- agent/token/persistence.go | 3 +- agent/token/persistence_test.go | 14 +- agent/ui_endpoint_test.go | 4 +- agent/uiserver/uiserver_test.go | 5 +- agent/watch_handler_test.go | 8 +- agent/xds/clusters_test.go | 6 - agent/xds/endpoints_test.go | 6 - agent/xds/golden_test.go | 9 +- agent/xds/listeners_test.go | 6 - agent/xds/routes_test.go | 6 - ...ess-gateway-nil-config-entry.latest.golden | 5 - ...ess-gateway-nil-config-entry.latest.golden | 5 - ...ess-gateway-nil-config-entry.latest.golden | 5 - .../ingress-config-entry-nil.latest.golden | 5 - api/acl.go | 5 +- api/agent_test.go | 3 +- api/api_test.go | 9 +- api/config_entry.go | 2 +- api/go.mod | 32 +- api/go.sum | 6 + api/mock_api_test.go | 3 +- api/operator_license.go | 4 +- api/operator_raft.go | 25 - api/operator_raft_test.go | 18 - api/peering.go | 6 +- .../create/authmethod_create_test.go | 6 +- .../update/authmethod_update_test.go | 8 +- .../acl/policy/create/policy_create_test.go | 6 +- .../acl/policy/update/policy_update_test.go | 6 +- command/acl/role/formatter_test.go | 6 +- command/acl/rules/translate_test.go | 4 +- command/acl/token/formatter_test.go | 6 +- command/agent/agent.go | 6 +- command/config/write/config_write_test.go | 19 - command/connect/ca/set/connect_ca_set.go | 4 +- command/connect/envoy/envoy_test.go | 7 +- command/connect/envoy/exec_test.go | 3 +- command/flags/http.go | 4 +- command/helpers/helpers.go | 3 +- command/kv/imp/kv_import.go | 3 +- command/lock/lock_test.go | 18 +- command/login/login.go | 4 +- command/login/login_test.go | 17 +- command/logout/logout_test.go | 3 + .../state/operator_autopilot_state_test.go | 8 +- .../raft/transferleader/transfer_leader.go | 90 ---- .../transferleader/transfer_leader_test.go | 43 -- command/registry.go | 2 - command/snapshot/inspect/snapshot_inspect.go | 3 +- .../snapshot/inspect/snapshot_inspect_test.go | 6 +- .../snapshot/restore/snapshot_restore_test.go | 5 +- command/snapshot/save/snapshot_save_test.go | 4 +- command/tls/ca/create/tls_ca_create_test.go | 5 +- command/tls/cert/create/tls_cert_create.go | 6 +- .../tls/cert/create/tls_cert_create_test.go | 5 +- command/validate/validate_test.go | 16 +- command/version/formatter_test.go | 6 +- command/watch/watch_test.go | 3 +- connect/certgen/certgen.go | 3 +- connect/service_test.go | 3 +- connect/tls.go | 4 +- go.sum | 3 + .../go-sso/oidcauth/oidcauthtest/testing.go | 4 +- internal/testing/golden/golden.go | 5 +- .../tools/proto-gen-rpc-glue/main_test.go | 4 +- lib/file/atomic_test.go | 5 +- logging/logfile_test.go | 7 +- main.go | 4 +- proto-public/go.mod | 5 + proto-public/go.sum | 6 + proto/pboperator/operator.gen.go | 18 - proto/pboperator/operator.pb.binary.go | 28 - proto/pboperator/operator.pb.go | 242 --------- proto/pboperator/operator.proto | 24 - proto/pboperator/operator_grpc.pb.go | 105 ---- proto/pbpeering/peering.go | 12 +- sdk/freeport/ephemeral_linux.go | 4 +- sdk/go.mod | 14 +- sdk/testutil/io.go | 5 +- sdk/testutil/server.go | 5 +- sdk/testutil/server_methods.go | 5 +- snapshot/archive.go | 3 +- snapshot/archive_test.go | 5 +- snapshot/snapshot.go | 9 +- .../envoy/case-centralconf/config_entries.hcl | 19 + .../connect/envoy/case-centralconf/setup.sh | 24 +- .../alpha/config_entries.hcl | 26 + .../alpha/setup.sh | 25 +- .../primary/config_entries.hcl | 31 ++ .../primary/setup.sh | 29 +- .../primary/config_entries.hcl | 23 + .../primary/setup.sh | 21 +- .../primary/config_entries.hcl | 23 + .../primary/setup.sh | 21 +- .../config_entries.hcl | 26 + .../case-cfg-resolver-defaultsubset/setup.sh | 24 +- .../config_entries.hcl | 27 + .../envoy/case-cfg-resolver-features/setup.sh | 25 +- .../config_entries.hcl | 23 + .../setup.sh | 23 +- .../config_entries.hcl | 35 ++ .../setup.sh | 33 +- .../config_entries.hcl | 37 ++ .../case-cfg-resolver-svc-failover/setup.sh | 35 +- .../config_entries.hcl | 19 + .../setup.sh | 18 +- .../config_entries.hcl | 19 + .../setup.sh | 18 +- .../config_entries.hcl | 327 ++++++++++++ .../envoy/case-cfg-router-features/setup.sh | 325 +----------- .../alpha/config_entries.hcl | 26 + .../alpha/setup.sh | 26 +- .../primary/config_entries.hcl | 53 ++ .../primary/setup.sh | 52 +- .../config_entries.hcl | 65 +++ .../envoy/case-cfg-splitter-features/setup.sh | 63 +-- .../alpha/config_entries.hcl | 34 ++ .../alpha/setup.sh | 32 +- .../primary/config_entries.hcl | 88 +++ .../primary/setup.sh | 90 +--- .../alpha/config_entries.hcl | 32 ++ .../alpha/setup.sh | 34 +- .../primary/config_entries.hcl | 18 + .../primary/setup.sh | 17 +- .../alpha/config_entries.hcl | 39 ++ .../alpha/setup.sh | 41 +- .../primary/config_entries.hcl | 12 + .../primary/setup.sh | 10 +- .../alpha/config_entries.hcl | 26 + .../case-cross-peers-http/alpha/setup.sh | 26 +- .../primary/config_entries.hcl | 13 + .../case-cross-peers-http/primary/setup.sh | 11 +- .../alpha/config_entries.hcl | 33 ++ .../alpha/setup.sh | 35 +- .../primary/config_entries.hcl | 12 + .../primary/setup.sh | 10 +- .../case-cross-peers/alpha/config_entries.hcl | 26 + .../envoy/case-cross-peers/alpha/setup.sh | 26 +- .../primary/config_entries.hcl | 12 + .../envoy/case-cross-peers/primary/setup.sh | 10 +- .../config_entries.hcl | 24 + .../envoy/case-ingress-gateway-grpc/setup.sh | 24 +- .../config_entries.hcl | 81 +++ .../envoy/case-ingress-gateway-http/setup.sh | 80 +-- .../config_entries.hcl | 45 ++ .../setup.sh | 44 +- .../alpha/config_entries.hcl | 26 + .../alpha/setup.sh | 26 +- .../primary/config_entries.hcl | 47 ++ .../primary/setup.sh | 48 +- .../config_entries.hcl | 60 +++ .../envoy/case-ingress-gateway-sds/setup.sh | 59 +- .../config_entries.hcl | 24 + .../case-ingress-gateway-simple/setup.sh | 24 +- .../config_entries.hcl | 41 ++ .../envoy/case-ingress-gateway-tls/setup.sh | 38 +- .../primary/config_entries.hcl | 61 +++ .../primary/setup.sh | 62 +-- .../connect/envoy/case-l7-intentions/acl.hcl | 3 - .../case-l7-intentions/config_entries.hcl | 97 ++++ .../connect/envoy/case-l7-intentions/setup.sh | 90 +--- .../case-mesh-to-lambda/config_entries.hcl | 12 + .../envoy/case-mesh-to-lambda/setup.sh | 11 +- .../config_entries.hcl | 17 + .../setup.sh | 17 +- .../config_entries.hcl | 12 + .../case-terminating-gateway-simple/setup.sh | 11 +- .../config_entries.hcl | 37 ++ .../case-terminating-gateway-subsets/setup.sh | 35 +- test/integration/connect/envoy/helpers.bash | 7 - test/integration/connect/envoy/main_test.go | 3 +- .../connect/envoy/test-sds-server/sds.go | 5 +- test/integration/consul-container/go.sum | 2 + .../consul-container/libs/agent/container.go | 7 +- .../test/metrics/leader_test.go | 4 +- tlsutil/config.go | 3 +- tlsutil/config_test.go | 17 +- .../consul/peer/bento-box/index.hbs | 11 +- .../app/templates/dc/peers/show/exported.hbs | 6 +- .../consul/auth-method/search-bar/index.hbs | 288 +++++----- .../consul/auth-method/type/index.hbs | 11 +- .../consul/external-source/index.hbs | 6 +- .../consul/external-source/index.scss | 23 + .../consul/intention/list/table/index.hbs | 344 ++++++------ .../app/components/consul/logo/index.hbs | 4 + .../service-instance/search-bar/index.hbs | 270 +++++----- .../consul/service/search-bar/index.hbs | 34 +- .../consul/service/search-bar/index.js | 1 - .../consul/sources-select/index.hbs | 25 - .../app/components/custom-element/README.mdx | 87 +++ .../app/components/custom-element/index.hbs | 11 + .../app/components/custom-element/index.js | 189 +++++++ .../app/components/disclosure-card/README.mdx | 125 +++++ .../app/components/disclosure-card/index.hbs | 90 ++++ .../components/distribution-meter/README.mdx | 83 +++ .../distribution-meter/index.css.js | 32 ++ .../components/distribution-meter/index.hbs | 30 ++ .../distribution-meter/meter/element.js | 29 + .../distribution-meter/meter/index.css.js | 80 +++ .../distribution-meter/meter/index.hbs | 64 +++ .../app/components/hashicorp-consul/index.hbs | 336 +++++++----- .../app/components/informed-action/skin.scss | 6 + .../consul-ui/app/components/pill/index.scss | 7 + .../app/components/popover-select/index.scss | 20 + .../app/components/shadow-host/README.mdx | 29 + .../app/components/shadow-host/index.hbs | 5 + .../app/components/shadow-host/index.js | 12 + .../app/components/shadow-template/README.mdx | 162 ++++++ .../app/components/shadow-template/debug.scss | 6 + .../app/components/shadow-template/index.hbs | 11 + .../consul-ui/app/helpers/icon-mapping.js | 28 - .../consul-ui/app/helpers/icons-debug.js | 10 + .../consul-ui/app/modifiers/attach-shadow.js | 23 + .../consul-ui/app/modifiers/attach-shadow.mdx | 28 + .../app/styles/base/icons/README.mdx | 106 ++-- .../app/styles/base/icons/debug.scss | 502 ++++++++++++++++++ .../base/icons/icons/aws-color/index.scss | 6 + .../base/icons/icons/aws-color/keyframes.scss | 5 + .../icons/icons/aws-color/placeholders.scss | 10 + .../icons/icons/aws-color/property-16.scss | 3 + .../icons/icons/aws-color/property-24.scss | 3 + .../base/icons/icons/aws-ec2-color/index.scss | 6 + .../icons/icons/aws-ec2-color/keyframes.scss | 5 + .../icons/aws-ec2-color/placeholders.scss | 10 + .../icons/aws-ec2-color/property-16.scss | 3 + .../icons/aws-ec2-color/property-24.scss | 3 + .../base/icons/icons/aws-ec2/index.scss | 6 + .../base/icons/icons/aws-ec2/keyframes.scss | 9 + .../icons/icons/aws-ec2/placeholders.scss | 10 + .../base/icons/icons/aws-ec2/property-16.scss | 3 + .../base/icons/icons/aws-ec2/property-24.scss | 3 + .../styles/base/icons/icons/aws/index.scss | 6 + .../base/icons/icons/aws/keyframes.scss | 9 + .../base/icons/icons/aws/placeholders.scss | 10 + .../base/icons/icons/aws/property-16.scss | 3 + .../base/icons/icons/aws/property-24.scss | 3 + .../base/icons/icons/consul-color/index.scss | 6 + .../icons/icons/consul-color/keyframes.scss | 5 + .../icons/consul-color/placeholders.scss | 10 + .../icons/icons/consul-color/property-16.scss | 3 + .../icons/icons/consul-color/property-24.scss | 3 + .../styles/base/icons/icons/consul/index.scss | 6 + .../base/icons/icons/consul/keyframes.scss | 9 + .../base/icons/icons/consul/placeholders.scss | 10 + .../base/icons/icons/consul/property-16.scss | 3 + .../base/icons/icons/consul/property-24.scss | 3 + .../app/styles/base/icons/icons/index.scss | 14 + .../icons/icons/logo-consul-color/index.scss | 6 + .../icons/logo-consul-color/keyframes.scss | 5 + .../icons/logo-consul-color/placeholders.scss | 10 + .../icons/icons/logo-nomad-color/index.scss | 6 + .../icons/logo-nomad-color/keyframes.scss | 5 + .../icons/logo-nomad-color/placeholders.scss | 10 + .../icons/logo-terraform-color/index.scss | 6 + .../icons/logo-terraform-color/keyframes.scss | 5 + .../logo-terraform-color/placeholders.scss | 10 + .../icons/icons/logo-vault-color/index.scss | 6 + .../icons/logo-vault-color/keyframes.scss | 5 + .../icons/logo-vault-color/placeholders.scss | 10 + .../icons/logo-vault-color/property-16.scss | 3 + .../icons/logo-vault-color/property-24.scss | 3 + ui/packages/consul-ui/app/styles/debug.scss | 1 + .../consul-ui/app/styles/tailwind.scss | 6 - ui/packages/consul-ui/docs/hds.mdx | 90 ++-- ui/packages/consul-ui/package.json | 2 +- ui/packages/consul-ui/tailwind.config.js | 39 +- ui/yarn.lock | 15 - version/version.go | 2 +- website/content/api-docs/operator/raft.mdx | 34 -- website/content/commands/operator/raft.mdx | 23 - website/content/commands/snapshot/agent.mdx | 28 +- .../configuration/gatewayclass.mdx | 26 +- .../configuration/gatewayclassconfig.mdx | 4 +- .../api-gateway/configuration/meshservice.mdx | 32 +- website/content/docs/api-gateway/install.mdx | 4 +- .../content/docs/api-gateway/tech-specs.mdx | 6 +- .../content/docs/api-gateway/usage/errors.mdx | 8 +- .../usage/reroute-http-requests.mdx | 12 +- .../usage/route-to-peered-services.mdx | 15 +- .../content/docs/api-gateway/usage/usage.mdx | 2 +- website/content/docs/connect/ca/vault.mdx | 2 - .../docs/connect/cluster-peering/index.mdx | 2 +- .../docs/connect/cluster-peering/k8s.mdx | 2 +- .../config-entries/service-resolver.mdx | 4 +- .../content/docs/connect/dataplane/index.mdx | 17 +- .../content/docs/connect/proxies/envoy.mdx | 14 +- website/content/docs/discovery/dns.mdx | 18 +- .../docs/enterprise/admin-partitions.mdx | 2 +- website/content/docs/k8s/architecture.mdx | 6 +- .../docs/k8s/connect/connect-ca-provider.mdx | 3 - website/content/docs/k8s/connect/health.mdx | 10 +- .../servers-outside-kubernetes.mdx | 12 +- .../single-dc-multi-k8s.mdx | 2 +- .../vault/data-integration/connect-ca.mdx | 2 - .../vault/data-integration/index.mdx | 2 +- .../snapshot-agent-config.mdx | 6 +- .../vault/data-integration/webhook-certs.mdx | 2 +- .../vault/wan-federation.mdx | 8 +- website/content/docs/k8s/helm.mdx | 30 +- .../docs/k8s/installation/install-cli.mdx | 2 +- .../content/docs/k8s/installation/install.mdx | 2 +- .../consul-api-gateway/v0_5_x.mdx | 56 -- .../docs/release-notes/consul-k8s/v1_0_x.mdx | 4 +- .../docs/release-notes/consul/v1_14_x.mdx | 4 +- website/data/docs-nav-data.json | 4 - website/public/img/dataplanes-diagram.png | Bin 170540 -> 0 bytes .../img/k8s-dataplanes-architecture.png | Bin 0 -> 191246 bytes 398 files changed, 5840 insertions(+), 5145 deletions(-) delete mode 100644 .changelog/14132.txt delete mode 100644 .changelog/14465.txt delete mode 100644 .changelog/14832.txt delete mode 100644 .changelog/14833.txt delete mode 100644 .changelog/14956.txt delete mode 100644 .changelog/15001.txt delete mode 100644 .changelog/15297.txt create mode 100644 .changelog/15503.txt create mode 100644 .changelog/15525.txt create mode 100644 .github/workflows/bot-auto-approve.yaml delete mode 100644 .github/workflows/nightly-test-1.14.x.yaml delete mode 100644 acl/MockAuthorizer.go delete mode 100644 agent/consul/operator_backend.go delete mode 100644 agent/consul/operator_backend_test.go delete mode 100644 agent/rpc/operator/service.go delete mode 100644 agent/rpc/operator/service_test.go delete mode 100644 agent/xds/testdata/clusters/ingress-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-config-entry-nil.latest.golden delete mode 100644 command/operator/raft/transferleader/transfer_leader.go delete mode 100644 command/operator/raft/transferleader/transfer_leader_test.go delete mode 100644 proto/pboperator/operator.gen.go delete mode 100644 proto/pboperator/operator.pb.binary.go delete mode 100644 proto/pboperator/operator.pb.go delete mode 100644 proto/pboperator/operator.proto delete mode 100644 proto/pboperator/operator_grpc.pb.go create mode 100644 test/integration/connect/envoy/case-centralconf/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-features/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-router-features/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-splitter-features/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers-http-router/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers-http-router/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers-http/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers-http/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cross-peers/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-grpc/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-http/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-multiple-services/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-sds/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-simple/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-gateway-tls/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/config_entries.hcl delete mode 100644 test/integration/connect/envoy/case-l7-intentions/acl.hcl create mode 100644 test/integration/connect/envoy/case-l7-intentions/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-terminating-gateway-hostnames/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-terminating-gateway-simple/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-terminating-gateway-subsets/config_entries.hcl create mode 100644 ui/packages/consul-ui/app/components/consul/logo/index.hbs delete mode 100644 ui/packages/consul-ui/app/components/consul/sources-select/index.hbs create mode 100644 ui/packages/consul-ui/app/components/custom-element/README.mdx create mode 100644 ui/packages/consul-ui/app/components/custom-element/index.hbs create mode 100644 ui/packages/consul-ui/app/components/custom-element/index.js create mode 100644 ui/packages/consul-ui/app/components/disclosure-card/README.mdx create mode 100644 ui/packages/consul-ui/app/components/disclosure-card/index.hbs create mode 100644 ui/packages/consul-ui/app/components/distribution-meter/README.mdx create mode 100644 ui/packages/consul-ui/app/components/distribution-meter/index.css.js create mode 100644 ui/packages/consul-ui/app/components/distribution-meter/index.hbs create mode 100644 ui/packages/consul-ui/app/components/distribution-meter/meter/element.js create mode 100644 ui/packages/consul-ui/app/components/distribution-meter/meter/index.css.js create mode 100644 ui/packages/consul-ui/app/components/distribution-meter/meter/index.hbs create mode 100644 ui/packages/consul-ui/app/components/shadow-host/README.mdx create mode 100644 ui/packages/consul-ui/app/components/shadow-host/index.hbs create mode 100644 ui/packages/consul-ui/app/components/shadow-host/index.js create mode 100644 ui/packages/consul-ui/app/components/shadow-template/README.mdx create mode 100644 ui/packages/consul-ui/app/components/shadow-template/debug.scss create mode 100644 ui/packages/consul-ui/app/components/shadow-template/index.hbs delete mode 100644 ui/packages/consul-ui/app/helpers/icon-mapping.js create mode 100644 ui/packages/consul-ui/app/modifiers/attach-shadow.js create mode 100644 ui/packages/consul-ui/app/modifiers/attach-shadow.mdx create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-color/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-color/property-24.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2-color/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2-color/property-24.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws-ec2/property-24.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/aws/property-24.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul-color/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul-color/property-24.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/consul/property-24.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-consul-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-consul-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-consul-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-nomad-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-nomad-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-nomad-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-terraform-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-terraform-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-terraform-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-vault-color/index.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-vault-color/keyframes.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-vault-color/placeholders.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-vault-color/property-16.scss create mode 100644 ui/packages/consul-ui/app/styles/base/icons/icons/logo-vault-color/property-24.scss delete mode 100644 website/content/docs/release-notes/consul-api-gateway/v0_5_x.mdx delete mode 100644 website/public/img/dataplanes-diagram.png create mode 100644 website/public/img/k8s-dataplanes-architecture.png diff --git a/.changelog/14132.txt b/.changelog/14132.txt deleted file mode 100644 index 7037f479d9237..0000000000000 --- a/.changelog/14132.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:enhancement -raft: add an operator api endpoint and a command to initiate raft leadership transfer. -``` diff --git a/.changelog/14465.txt b/.changelog/14465.txt deleted file mode 100644 index 8fbdf14e6a064..0000000000000 --- a/.changelog/14465.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -dns: support RFC 2782 SRV lookups for prepared queries using format `_._tcp.query[.].`. -``` diff --git a/.changelog/14832.txt b/.changelog/14832.txt deleted file mode 100644 index 6fef1c257427b..0000000000000 --- a/.changelog/14832.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Give better error when client specifies wrong datacenter when auto-encrypt is enabled. -``` diff --git a/.changelog/14833.txt b/.changelog/14833.txt deleted file mode 100644 index fd8f911e27cd1..0000000000000 --- a/.changelog/14833.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: always use name "global" for proxy-defaults config entries -``` diff --git a/.changelog/14956.txt b/.changelog/14956.txt deleted file mode 100644 index fac4bc12ea006..0000000000000 --- a/.changelog/14956.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -cache: refactor agent cache fetching to prevent unnecessary fetches on error -``` diff --git a/.changelog/15001.txt b/.changelog/15001.txt deleted file mode 100644 index 18d658547eb79..0000000000000 --- a/.changelog/15001.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -ingress-gateways: Don't log error when gateway is registered without a config entry -``` - diff --git a/.changelog/15297.txt b/.changelog/15297.txt deleted file mode 100644 index 66fa6450dad45..0000000000000 --- a/.changelog/15297.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:improvement -api: updated the go module directive to 1.18. -``` - -```release-note:improvement -sdk: updated the go module directive to 1.18. -``` diff --git a/.changelog/15503.txt b/.changelog/15503.txt new file mode 100644 index 0000000000000..05997fad59795 --- /dev/null +++ b/.changelog/15503.txt @@ -0,0 +1,3 @@ +```release-note:bug +peering: fix the limit of replication gRPC message; set to 8MB +``` diff --git a/.changelog/15525.txt b/.changelog/15525.txt new file mode 100644 index 0000000000000..d920109e64934 --- /dev/null +++ b/.changelog/15525.txt @@ -0,0 +1,3 @@ +```release-note:bug +ca: Fixed issue where using Vault as Connect CA with Vault-managed policies would error on start-up if the intermediate PKI mount existed but was empty +``` \ No newline at end of file diff --git a/.github/workflows/backport-assistant.yml b/.github/workflows/backport-assistant.yml index b68e41e612692..7eac100546c84 100644 --- a/.github/workflows/backport-assistant.yml +++ b/.github/workflows/backport-assistant.yml @@ -16,11 +16,11 @@ jobs: backport: if: github.event.pull_request.merged runs-on: ubuntu-latest - container: hashicorpdev/backport-assistant:0.2.5 + container: hashicorpdev/backport-assistant:0.3.0 steps: - name: Run Backport Assistant for stable-website run: | - backport-assistant backport -merge-method=squash -automerge + backport-assistant backport -merge-method=squash -gh-automerge env: BACKPORT_LABEL_REGEXP: "type/docs-(?Pcherrypick)" BACKPORT_TARGET_TEMPLATE: "stable-website" @@ -41,13 +41,13 @@ jobs: # set BACKPORT_TARGET_TEMPLATE for backport-assistant # trims backport/ from the beginning with parameter substitution export BACKPORT_TARGET_TEMPLATE="release/${latest_backport_label#backport/}.x" - backport-assistant backport -merge-method=squash -automerge + backport-assistant backport -merge-method=squash -gh-automerge env: BACKPORT_LABEL_REGEXP: "type/docs-(?Pcherrypick)" GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Run Backport Assistant for release branches run: | - backport-assistant backport -merge-method=squash -automerge + backport-assistant backport -merge-method=squash -gh-automerge env: BACKPORT_LABEL_REGEXP: "backport/(?P\\d+\\.\\d+)" BACKPORT_TARGET_TEMPLATE: "release/{{.target}}.x" diff --git a/.github/workflows/bot-auto-approve.yaml b/.github/workflows/bot-auto-approve.yaml new file mode 100644 index 0000000000000..7731e4553ce8f --- /dev/null +++ b/.github/workflows/bot-auto-approve.yaml @@ -0,0 +1,13 @@ +name: Bot Auto Approve + +on: pull_request_target + +jobs: + auto-approve: + runs-on: ubuntu-latest + if: github.actor == 'hc-github-team-consul-core' + steps: + - uses: hmarr/auto-approve-action@v3 + with: + review-message: "Auto approved Consul Bot automated PR" + github-token: ${{ secrets.MERGE_APPROVE_TOKEN }} diff --git a/.github/workflows/nightly-test-1.14.x.yaml b/.github/workflows/nightly-test-1.14.x.yaml deleted file mode 100644 index 745ad7608ee8a..0000000000000 --- a/.github/workflows/nightly-test-1.14.x.yaml +++ /dev/null @@ -1,230 +0,0 @@ -name: Nightly Test 1.14.x -on: - schedule: - - cron: '0 4 * * *' - workflow_dispatch: {} - -env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.14.x" - BRANCH_NAME: "release-1.14.x" # Used for naming artifacts - -jobs: - frontend-test-workspace-node: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ env.BRANCH }} - - # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: 'yarn' - cache-dependency-path: ./ui/yarn.lock - - - name: Install - id: install - working-directory: ./ui - run: make deps - - - name: Workspace Tests - id: workspace-test - working-directory: ./ui - run: make test-workspace - - - name: Node Tests - id: node-test - working-directory: ./ui/packages/consul-ui - run: make test-node - - frontend-build-oss: - runs-on: ubuntu-latest - env: - JOBS: 2 - CONSUL_NSPACES_ENABLED: 0 - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ env.BRANCH }} - - # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: 'yarn' - cache-dependency-path: ./ui/yarn.lock - - - name: Install - id: install - working-directory: ./ui - run: make deps - - - name: Ember Build OSS - id: build-oss - working-directory: ./ui/packages/consul-ui - run: make build-ci - - - name: Upload OSS Frontend - uses: actions/upload-artifact@v3 - with: - name: frontend-oss-${{ env.BRANCH_NAME }} - path: ./ui/packages/consul-ui/dist - if-no-files-found: error - - frontend-test-oss: - runs-on: ubuntu-latest - needs: [frontend-build-oss] - strategy: - matrix: - partition: [ 1, 2, 3, 4 ] - env: - CONSUL_NSPACES_ENABLED: 0 - EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary - EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ env.BRANCH }} - - # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: 'yarn' - cache-dependency-path: ./ui/yarn.lock - - - name: Install - id: install - working-directory: ./ui - run: make deps - - - name: Download OSS Frontend - uses: actions/download-artifact@v3 - with: - name: frontend-oss-${{ env.BRANCH_NAME }} - path: ./ui/packages/consul-ui/dist - - - name: Ember Test OSS - id: cache - working-directory: ./ui/packages/consul-ui - run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit - - frontend-build-ent: - runs-on: ubuntu-latest - env: - JOBS: 2 - CONSUL_NSPACES_ENABLED: 1 - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ env.BRANCH }} - - # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: 'yarn' - cache-dependency-path: ./ui/yarn.lock - - - name: Install - id: install - working-directory: ./ui - run: make deps - - - name: Ember Build ENT - id: build-oss - working-directory: ./ui/packages/consul-ui - run: make build-ci - - - name: Upload ENT Frontend - uses: actions/upload-artifact@v3 - with: - name: frontend-ent-${{ env.BRANCH_NAME }} - path: ./ui/packages/consul-ui/dist - if-no-files-found: error - - frontend-test-ent: - runs-on: ubuntu-latest - needs: [frontend-build-ent] - strategy: - matrix: - partition: [ 1, 2, 3, 4 ] - env: - CONSUL_NSPACES_ENABLED: 1 - EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary - EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ env.BRANCH }} - - # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: 'yarn' - cache-dependency-path: ./ui/yarn.lock - - - name: Install - id: install - working-directory: ./ui - run: make deps - - - name: Download ENT Frontend - uses: actions/download-artifact@v3 - with: - name: frontend-ent-${{ env.BRANCH_NAME }} - path: ./ui/packages/consul-ui/dist - - - name: Ember Test ENT - id: cache - working-directory: ./ui/packages/consul-ui - run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit - - frontend-test-coverage-ent: - runs-on: ubuntu-latest - needs: [frontend-build-ent] - steps: - - uses: actions/checkout@v2 - with: - ref: ${{ env.BRANCH }} - - # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: 'yarn' - cache-dependency-path: ./ui/yarn.lock - - - name: Install - id: install - working-directory: ./ui - run: make deps - - - name: Download ENT Frontend - uses: actions/download-artifact@v3 - with: - name: frontend-ent-${{ env.BRANCH_NAME }} - path: ./ui/packages/consul-ui/dist - - - name: Run ENT Code Coverage - working-directory: ./ui/packages/consul-ui - run: make test-coverage-ci - - slack-failure-notification: - runs-on: ubuntu-latest - needs: [frontend-test-oss, frontend-test-ent] - if: ${{ failure() }} - steps: - - name: Slack Notification - id: slack - uses: slackapi/slack-github-action@v1.19 - with: - payload: | - { - "message": "One or more nightly UI tests have failed on branch ${{ env.BRANCH }} for Consul. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_UI_SLACK_WEBHOOK }} diff --git a/.golangci.yml b/.golangci.yml index 60cfc50595045..b2ff1231e7ac8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -83,7 +83,6 @@ linters-settings: forbidigo: # Forbid the following identifiers (list of regexp). forbid: - - '\bioutil\b(# Use io and os packages instead of ioutil)?' - '\brequire\.New\b(# Use package-level functions with explicit TestingT)?' - '\bassert\.New\b(# Use package-level functions with explicit TestingT)?' # Exclude godoc examples from forbidigo checks. diff --git a/CHANGELOG.md b/CHANGELOG.md index abef48ee23809..c433ab05bc9d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -188,7 +188,7 @@ BUG FIXES: BREAKING CHANGES: -* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change is resolved in 1.13.3. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information. +* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change will be resolved in an upcoming 1.13 patch release. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information. SECURITY: @@ -234,7 +234,7 @@ BUG FIXES: BREAKING CHANGES: -* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change is resolved in 1.12.6. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information. +* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change will be resolved in an upcoming 1.12 patch release. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information. SECURITY: @@ -262,7 +262,7 @@ BUG FIXES: BREAKING CHANGES: -* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change is resolved in 1.11.11. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information. +* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change will be resolved in an upcoming 1.11 patch release. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information. SECURITY: diff --git a/acl/MockAuthorizer.go b/acl/MockAuthorizer.go deleted file mode 100644 index 247cdb1151360..0000000000000 --- a/acl/MockAuthorizer.go +++ /dev/null @@ -1,223 +0,0 @@ -package acl - -import "github.com/stretchr/testify/mock" - -type MockAuthorizer struct { - mock.Mock -} - -var _ Authorizer = (*MockAuthorizer)(nil) - -// ACLRead checks for permission to list all the ACLs -func (m *MockAuthorizer) ACLRead(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// ACLWrite checks for permission to manipulate ACLs -func (m *MockAuthorizer) ACLWrite(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// AgentRead checks for permission to read from agent endpoints for a -// given node. -func (m *MockAuthorizer) AgentRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// AgentWrite checks for permission to make changes via agent endpoints -// for a given node. -func (m *MockAuthorizer) AgentWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// EventRead determines if a specific event can be queried. -func (m *MockAuthorizer) EventRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// EventWrite determines if a specific event may be fired. -func (m *MockAuthorizer) EventWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// IntentionDefaultAllow determines the default authorized behavior -// when no intentions match a Connect request. -func (m *MockAuthorizer) IntentionDefaultAllow(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// IntentionRead determines if a specific intention can be read. -func (m *MockAuthorizer) IntentionRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// IntentionWrite determines if a specific intention can be -// created, modified, or deleted. -func (m *MockAuthorizer) IntentionWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// KeyList checks for permission to list keys under a prefix -func (m *MockAuthorizer) KeyList(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// KeyRead checks for permission to read a given key -func (m *MockAuthorizer) KeyRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// KeyWrite checks for permission to write a given key -func (m *MockAuthorizer) KeyWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// KeyWritePrefix checks for permission to write to an -// entire key prefix. This means there must be no sub-policies -// that deny a write. -func (m *MockAuthorizer) KeyWritePrefix(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// KeyringRead determines if the encryption keyring used in -// the gossip layer can be read. -func (m *MockAuthorizer) KeyringRead(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// KeyringWrite determines if the keyring can be manipulated -func (m *MockAuthorizer) KeyringWrite(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// NodeRead checks for permission to read (discover) a given node. -func (m *MockAuthorizer) NodeRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -func (m *MockAuthorizer) NodeReadAll(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// NodeWrite checks for permission to create or update (register) a -// given node. -func (m *MockAuthorizer) NodeWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -func (m *MockAuthorizer) MeshRead(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -func (m *MockAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// PeeringRead determines if the read-only Consul peering functions -// can be used. -func (m *MockAuthorizer) PeeringRead(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// PeeringWrite determines if the state-changing Consul peering -// functions can be used. -func (m *MockAuthorizer) PeeringWrite(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// OperatorRead determines if the read-only Consul operator functions -// can be used. ret := m.Called(segment, ctx) -func (m *MockAuthorizer) OperatorRead(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// OperatorWrite determines if the state-changing Consul operator -// functions can be used. -func (m *MockAuthorizer) OperatorWrite(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// PreparedQueryRead determines if a specific prepared query can be read -// to show its contents (this is not used for execution). -func (m *MockAuthorizer) PreparedQueryRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// PreparedQueryWrite determines if a specific prepared query can be -// created, modified, or deleted. -func (m *MockAuthorizer) PreparedQueryWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// ServiceRead checks for permission to read a given service -func (m *MockAuthorizer) ServiceRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -func (m *MockAuthorizer) ServiceReadAll(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// ServiceWrite checks for permission to create or update a given -// service -func (m *MockAuthorizer) ServiceWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// ServiceWriteAny checks for service:write on any service -func (m *MockAuthorizer) ServiceWriteAny(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// SessionRead checks for permission to read sessions for a given node. -func (m *MockAuthorizer) SessionRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// SessionWrite checks for permission to create sessions for a given -// node. -func (m *MockAuthorizer) SessionWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// Snapshot checks for permission to take and restore snapshots. -func (m *MockAuthorizer) Snapshot(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -func (p *MockAuthorizer) ToAllowAuthorizer() AllowAuthorizer { - return AllowAuthorizer{Authorizer: p} -} diff --git a/acl/authorizer_test.go b/acl/authorizer_test.go index 27a7aef4b9b29..03c0517a1609e 100644 --- a/acl/authorizer_test.go +++ b/acl/authorizer_test.go @@ -4,9 +4,230 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) +type mockAuthorizer struct { + mock.Mock +} + +var _ Authorizer = (*mockAuthorizer)(nil) + +// ACLRead checks for permission to list all the ACLs +func (m *mockAuthorizer) ACLRead(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// ACLWrite checks for permission to manipulate ACLs +func (m *mockAuthorizer) ACLWrite(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// AgentRead checks for permission to read from agent endpoints for a +// given node. +func (m *mockAuthorizer) AgentRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// AgentWrite checks for permission to make changes via agent endpoints +// for a given node. +func (m *mockAuthorizer) AgentWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// EventRead determines if a specific event can be queried. +func (m *mockAuthorizer) EventRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// EventWrite determines if a specific event may be fired. +func (m *mockAuthorizer) EventWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// IntentionDefaultAllow determines the default authorized behavior +// when no intentions match a Connect request. +func (m *mockAuthorizer) IntentionDefaultAllow(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// IntentionRead determines if a specific intention can be read. +func (m *mockAuthorizer) IntentionRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// IntentionWrite determines if a specific intention can be +// created, modified, or deleted. +func (m *mockAuthorizer) IntentionWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// KeyList checks for permission to list keys under a prefix +func (m *mockAuthorizer) KeyList(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// KeyRead checks for permission to read a given key +func (m *mockAuthorizer) KeyRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// KeyWrite checks for permission to write a given key +func (m *mockAuthorizer) KeyWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// KeyWritePrefix checks for permission to write to an +// entire key prefix. This means there must be no sub-policies +// that deny a write. +func (m *mockAuthorizer) KeyWritePrefix(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// KeyringRead determines if the encryption keyring used in +// the gossip layer can be read. +func (m *mockAuthorizer) KeyringRead(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// KeyringWrite determines if the keyring can be manipulated +func (m *mockAuthorizer) KeyringWrite(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// NodeRead checks for permission to read (discover) a given node. +func (m *mockAuthorizer) NodeRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +func (m *mockAuthorizer) NodeReadAll(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// NodeWrite checks for permission to create or update (register) a +// given node. +func (m *mockAuthorizer) NodeWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +func (m *mockAuthorizer) MeshRead(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +func (m *mockAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// PeeringRead determines if the read-only Consul peering functions +// can be used. +func (m *mockAuthorizer) PeeringRead(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// PeeringWrite determines if the state-changing Consul peering +// functions can be used. +func (m *mockAuthorizer) PeeringWrite(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// OperatorRead determines if the read-only Consul operator functions +// can be used. ret := m.Called(segment, ctx) +func (m *mockAuthorizer) OperatorRead(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// OperatorWrite determines if the state-changing Consul operator +// functions can be used. +func (m *mockAuthorizer) OperatorWrite(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// PreparedQueryRead determines if a specific prepared query can be read +// to show its contents (this is not used for execution). +func (m *mockAuthorizer) PreparedQueryRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// PreparedQueryWrite determines if a specific prepared query can be +// created, modified, or deleted. +func (m *mockAuthorizer) PreparedQueryWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// ServiceRead checks for permission to read a given service +func (m *mockAuthorizer) ServiceRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +func (m *mockAuthorizer) ServiceReadAll(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// ServiceWrite checks for permission to create or update a given +// service +func (m *mockAuthorizer) ServiceWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// ServiceWriteAny checks for service:write on any service +func (m *mockAuthorizer) ServiceWriteAny(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// SessionRead checks for permission to read sessions for a given node. +func (m *mockAuthorizer) SessionRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// SessionWrite checks for permission to create sessions for a given +// node. +func (m *mockAuthorizer) SessionWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// Snapshot checks for permission to take and restore snapshots. +func (m *mockAuthorizer) Snapshot(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +func (p *mockAuthorizer) ToAllowAuthorizer() AllowAuthorizer { + return AllowAuthorizer{Authorizer: p} +} + func TestACL_Enforce(t *testing.T) { type testCase struct { method string @@ -443,7 +664,7 @@ func TestACL_Enforce(t *testing.T) { for _, tcase := range cases { t.Run(testName(tcase), func(t *testing.T) { - m := &MockAuthorizer{} + m := &mockAuthorizer{} if tcase.err == "" { var nilCtx *AuthorizerContext diff --git a/acl/errors_test.go b/acl/errors_test.go index 5910c08e9e326..7c651f1ec3ba6 100644 --- a/acl/errors_test.go +++ b/acl/errors_test.go @@ -16,7 +16,7 @@ func TestPermissionDeniedError(t *testing.T) { return t.expected } - auth1 := MockAuthorizer{} + auth1 := mockAuthorizer{} cases := []testCase{ { diff --git a/agent/acl_test.go b/agent/acl_test.go index 48679122c0312..79cc5f7b70be6 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -51,14 +51,6 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveAuthz authzRe dataDir := testutil.TempDir(t, "acl-agent") logBuffer := testutil.NewLogBuffer(t) - - logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ - Name: name, - Level: testutil.TestLogLevel, - Output: logBuffer, - TimeFormat: "04:05.000", - }) - loader := func(source config.Source) (config.LoadResult, error) { dataDir := fmt.Sprintf(`data_dir = "%s"`, dataDir) opts := config.LoadOpts{ @@ -71,9 +63,15 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveAuthz authzRe } return result, err } - bd, err := NewBaseDeps(loader, logBuffer, logger) + bd, err := NewBaseDeps(loader, logBuffer) require.NoError(t, err) + bd.Logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Name: name, + Level: testutil.TestLogLevel, + Output: logBuffer, + TimeFormat: "04:05.000", + }) bd.MetricsConfig = &lib.MetricsConfig{ Handler: metrics.NewInmemSink(1*time.Second, time.Minute), } diff --git a/agent/agent.go b/agent/agent.go index 7c17502664c1a..a095f1d5a3c05 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -5,8 +5,8 @@ import ( "crypto/tls" "encoding/json" "fmt" - "github.com/hashicorp/consul/proto/pboperator" "io" + "io/ioutil" "net" "net/http" "os" @@ -383,8 +383,6 @@ type Agent struct { rpcClientPeering pbpeering.PeeringServiceClient - rpcClientOperator pboperator.OperatorServiceClient - // routineManager is responsible for managing longer running go routines // run by the Agent routineManager *routine.Manager @@ -470,7 +468,6 @@ func New(bd BaseDeps) (*Agent, error) { } a.rpcClientPeering = pbpeering.NewPeeringServiceClient(conn) - a.rpcClientOperator = pboperator.NewOperatorServiceClient(conn) a.serviceManager = NewServiceManager(&a) @@ -2137,7 +2134,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se out := make(map[structs.ServiceID]*structs.ServiceConfigResponse) configDir := filepath.Join(a.config.DataDir, serviceConfigDir) - files, err := os.ReadDir(configDir) + files, err := ioutil.ReadDir(configDir) if err != nil { if os.IsNotExist(err) { return nil, nil @@ -2159,7 +2156,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se // Read the contents into a buffer file := filepath.Join(configDir, fi.Name()) - buf, err := os.ReadFile(file) + buf, err := ioutil.ReadFile(file) if err != nil { return nil, fmt.Errorf("failed reading service config file %q: %w", file, err) } @@ -3370,7 +3367,7 @@ func (a *Agent) persistCheckState(check *checks.CheckTTL, status, output string) tempFile := file + ".tmp" // persistCheckState is called frequently, so don't use writeFileAtomic to avoid calling fsync here - if err := os.WriteFile(tempFile, buf, 0600); err != nil { + if err := ioutil.WriteFile(tempFile, buf, 0600); err != nil { return fmt.Errorf("failed writing temp file %q: %s", tempFile, err) } if err := os.Rename(tempFile, file); err != nil { @@ -3385,12 +3382,12 @@ func (a *Agent) loadCheckState(check *structs.HealthCheck) error { cid := check.CompoundCheckID() // Try to read the persisted state for this check file := filepath.Join(a.config.DataDir, checkStateDir, cid.StringHashSHA256()) - buf, err := os.ReadFile(file) + buf, err := ioutil.ReadFile(file) if err != nil { if os.IsNotExist(err) { // try the md5 based name. This can be removed once we no longer support upgrades from versions that use MD5 hashing oldFile := filepath.Join(a.config.DataDir, checkStateDir, cid.StringHashMD5()) - buf, err = os.ReadFile(oldFile) + buf, err = ioutil.ReadFile(oldFile) if err != nil { if os.IsNotExist(err) { return nil @@ -3592,7 +3589,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI // Load any persisted services svcDir := filepath.Join(a.config.DataDir, servicesDir) - files, err := os.ReadDir(svcDir) + files, err := ioutil.ReadDir(svcDir) if err != nil { if os.IsNotExist(err) { return nil @@ -3613,7 +3610,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI // Read the contents into a buffer file := filepath.Join(svcDir, fi.Name()) - buf, err := os.ReadFile(file) + buf, err := ioutil.ReadFile(file) if err != nil { return fmt.Errorf("failed reading service file %q: %w", file, err) } @@ -3756,7 +3753,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] // Load any persisted checks checkDir := filepath.Join(a.config.DataDir, checksDir) - files, err := os.ReadDir(checkDir) + files, err := ioutil.ReadDir(checkDir) if err != nil { if os.IsNotExist(err) { return nil @@ -3771,7 +3768,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] // Read the contents into a buffer file := filepath.Join(checkDir, fi.Name()) - buf, err := os.ReadFile(file) + buf, err := ioutil.ReadFile(file) if err != nil { return fmt.Errorf("failed reading check file %q: %w", file, err) } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 5801160a5401f..6e52157dfe882 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -1762,7 +1763,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { } dc1 := "dc1" - tmpFileRaw, err := os.CreateTemp("", "rexec") + tmpFileRaw, err := ioutil.TempFile("", "rexec") require.NoError(t, err) tmpFile := tmpFileRaw.Name() defer os.Remove(tmpFile) @@ -1801,7 +1802,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { contentsStr := "" // Wait for watch to be populated for i := 1; i < 7; i++ { - contents, err := os.ReadFile(tmpFile) + contents, err := ioutil.ReadFile(tmpFile) if err != nil { t.Fatalf("should be able to read file, but had: %#v", err) } diff --git a/agent/agent_test.go b/agent/agent_test.go index d32c4981d6d9e..015bb3843896b 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "io/ioutil" "math/rand" "net" "net/http" @@ -2327,7 +2328,7 @@ func testAgent_PersistService(t *testing.T, extraHCL string) { if err != nil { t.Fatalf("err: %s", err) } - content, err := os.ReadFile(file) + content, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("err: %s", err) } @@ -2348,7 +2349,7 @@ func testAgent_PersistService(t *testing.T, extraHCL string) { if err != nil { t.Fatalf("err: %s", err) } - content, err = os.ReadFile(file) + content, err = ioutil.ReadFile(file) if err != nil { t.Fatalf("err: %s", err) } @@ -2417,7 +2418,7 @@ func testAgent_persistedService_compat(t *testing.T, extraHCL string) { if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil { t.Fatalf("err: %s", err) } - if err := os.WriteFile(file, encoded, 0600); err != nil { + if err := ioutil.WriteFile(file, encoded, 0600); err != nil { t.Fatalf("err: %s", err) } @@ -2472,7 +2473,7 @@ func testAgent_persistedService_compat_hash(t *testing.T, extraHCL string) { if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil { t.Fatalf("err: %s", err) } - if err := os.WriteFile(file, encoded, 0600); err != nil { + if err := ioutil.WriteFile(file, encoded, 0600); err != nil { t.Fatalf("err: %s", err) } @@ -2491,7 +2492,7 @@ func testAgent_persistedService_compat_hash(t *testing.T, extraHCL string) { if err := os.MkdirAll(filepath.Dir(configFile), 0700); err != nil { t.Fatalf("err: %s", err) } - if err := os.WriteFile(configFile, encodedConfig, 0600); err != nil { + if err := ioutil.WriteFile(configFile, encodedConfig, 0600); err != nil { t.Fatalf("err: %s", err) } @@ -2672,7 +2673,7 @@ func TestAgent_PersistCheck(t *testing.T) { }) require.NoError(t, err) - content, err := os.ReadFile(file) + content, err := ioutil.ReadFile(file) require.NoError(t, err) require.Equal(t, expected, content) @@ -2687,7 +2688,7 @@ func TestAgent_PersistCheck(t *testing.T) { Source: "local", }) require.NoError(t, err) - content, err = os.ReadFile(file) + content, err = ioutil.ReadFile(file) require.NoError(t, err) require.Equal(t, expected, content) a.Shutdown() @@ -3718,7 +3719,7 @@ func TestAgent_persistCheckState(t *testing.T) { // Check the persisted file exists and has the content file := filepath.Join(a.Config.DataDir, checkStateDir, cid.StringHashSHA256()) - buf, err := os.ReadFile(file) + buf, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("err: %s", err) } @@ -4796,19 +4797,19 @@ services { deadlineCh := time.After(10 * time.Second) start := time.Now() +LOOP: for { select { case evt := <-ch: // We may receive several notifications of an error until we get the // first successful reply. require.Equal(t, "foo", evt.CorrelationID) - if evt.Err == nil { - require.NoError(t, evt.Err) - require.NotNil(t, evt.Result) - t.Logf("took %s to get first success", time.Since(start)) - return + if evt.Err != nil { + break LOOP } - t.Logf("saw error: %v", evt.Err) + require.NoError(t, evt.Err) + require.NotNil(t, evt.Result) + t.Logf("took %s to get first success", time.Since(start)) case <-deadlineCh: t.Fatal("did not get notified successfully") } @@ -5137,9 +5138,9 @@ func TestAutoConfig_Integration(t *testing.T) { caFile := filepath.Join(cfgDir, "cacert.pem") keyFile := filepath.Join(cfgDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(cacert), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(key), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(cacert), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(key), 0600)) // generate a gossip key gossipKey := make([]byte, 32) @@ -5266,7 +5267,7 @@ func TestAutoConfig_Integration(t *testing.T) { require.NotEqual(r, cert1, client.Agent.tlsConfigurator.Cert()) // check that the on disk certs match expectations - data, err := os.ReadFile(filepath.Join(client.DataDir, "auto-config.json")) + data, err := ioutil.ReadFile(filepath.Join(client.DataDir, "auto-config.json")) require.NoError(r, err) rdr := strings.NewReader(string(data)) @@ -5301,9 +5302,9 @@ func TestAgent_AutoEncrypt(t *testing.T) { caFile := filepath.Join(cfgDir, "cacert.pem") keyFile := filepath.Join(cfgDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(cacert), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(key), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(cacert), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(key), 0600)) hclConfig := TestACLConfigWithParams(nil) + ` verify_incoming = true @@ -5497,9 +5498,9 @@ func TestAgent_AutoReloadDoReload_WhenCertAndKeyUpdated(t *testing.T) { caFile := filepath.Join(certsDir, "cacert.pem") keyFile := filepath.Join(certsDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) // generate a gossip key gossipKey := make([]byte, 32) @@ -5539,8 +5540,8 @@ func TestAgent_AutoReloadDoReload_WhenCertAndKeyUpdated(t *testing.T) { ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, }) require.NoError(t, err) - require.NoError(t, os.WriteFile(certFile, []byte(cert2), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKey2), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert2), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey2), 0600)) retry.Run(t, func(r *retry.R) { aeCert2 := srv.tlsConfigurator.Cert() @@ -5578,9 +5579,9 @@ func TestAgent_AutoReloadDoNotReload_WhenCaUpdated(t *testing.T) { caFile := filepath.Join(certsDir, "cacert.pem") keyFile := filepath.Join(certsDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) // generate a gossip key gossipKey := make([]byte, 32) @@ -5613,7 +5614,7 @@ func TestAgent_AutoReloadDoNotReload_WhenCaUpdated(t *testing.T) { ca2, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) require.NoError(t, err) - require.NoError(t, os.WriteFile(caFile, []byte(ca2), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca2), 0600)) // wait a bit to see if it get updated. time.Sleep(time.Second) @@ -5652,9 +5653,9 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { caFile := filepath.Join(certsDir, "cacert.pem") keyFile := filepath.Join(certsDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) // generate a gossip key gossipKey := make([]byte, 32) @@ -5666,7 +5667,7 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { hclConfig := TestACLConfigWithParams(nil) configFile := testutil.TempDir(t, "config") + "/config.hcl" - require.NoError(t, os.WriteFile(configFile, []byte(` + require.NoError(t, ioutil.WriteFile(configFile, []byte(` encrypt = "`+gossipKeyEncoded+`" encrypt_verify_incoming = true encrypt_verify_outgoing = true @@ -5698,8 +5699,8 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { }) require.NoError(t, err) certFileNew := filepath.Join(certsDir, "cert_new.pem") - require.NoError(t, os.WriteFile(certFileNew, []byte(certNew), 0600)) - require.NoError(t, os.WriteFile(configFile, []byte(` + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(` encrypt = "`+gossipKeyEncoded+`" encrypt_verify_incoming = true encrypt_verify_outgoing = true @@ -5722,7 +5723,7 @@ func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) { require.Equal(r, cert1Key, cert.PrivateKey) }) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) // cert should change as we did not update the associated key time.Sleep(1 * time.Second) @@ -5761,9 +5762,9 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { caFile := filepath.Join(certsDir, "cacert.pem") keyFile := filepath.Join(certsDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) // generate a gossip key gossipKey := make([]byte, 32) @@ -5775,7 +5776,7 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { hclConfig := TestACLConfigWithParams(nil) configFile := testutil.TempDir(t, "config") + "/config.hcl" - require.NoError(t, os.WriteFile(configFile, []byte(` + require.NoError(t, ioutil.WriteFile(configFile, []byte(` encrypt = "`+gossipKeyEncoded+`" encrypt_verify_incoming = true encrypt_verify_outgoing = true @@ -5808,7 +5809,7 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { }) require.NoError(t, err) certFileNew := filepath.Join(certsDir, "cert_new.pem") - require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) // cert should not change as we did not update the associated key time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { @@ -5818,8 +5819,8 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { require.Equal(r, cert1Key, cert.PrivateKey) }) - require.NoError(t, os.WriteFile(certFileNew, []byte(certNew), 0600)) - require.NoError(t, os.WriteFile(configFile, []byte(` + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(` encrypt = "`+gossipKeyEncoded+`" encrypt_verify_incoming = true encrypt_verify_outgoing = true @@ -5853,7 +5854,7 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, }) require.NoError(t, err) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew2), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew2), 0600)) // cert should not change as we did not update the associated cert time.Sleep(1 * time.Second) retry.Run(t, func(r *retry.R) { @@ -5863,7 +5864,7 @@ func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) { require.Equal(r, cert2Key, cert.PrivateKey) }) - require.NoError(t, os.WriteFile(certFileNew, []byte(certNew2), 0600)) + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew2), 0600)) // cert should change as we did update the associated key time.Sleep(1 * time.Second) @@ -5901,9 +5902,9 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) { caFile := filepath.Join(certsDir, "cacert.pem") keyFile := filepath.Join(certsDir, "key.pem") - require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600)) - require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600)) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600)) + require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600)) // generate a gossip key gossipKey := make([]byte, 32) @@ -5915,7 +5916,7 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) { hclConfig := TestACLConfigWithParams(nil) configFile := testutil.TempDir(t, "config") + "/config.hcl" - require.NoError(t, os.WriteFile(configFile, []byte(` + require.NoError(t, ioutil.WriteFile(configFile, []byte(` encrypt = "`+gossipKeyEncoded+`" encrypt_verify_incoming = true encrypt_verify_outgoing = true @@ -5951,8 +5952,8 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) { }) require.NoError(t, err) certFileNew := filepath.Join(certsDir, "cert_new.pem") - require.NoError(t, os.WriteFile(certFileNew, []byte(certNew), 0600)) - require.NoError(t, os.WriteFile(configFile, []byte(` + require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(` encrypt = "`+gossipKeyEncoded+`" encrypt_verify_incoming = true encrypt_verify_outgoing = true @@ -5975,7 +5976,7 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) { require.Equal(r, cert1Key, cert.PrivateKey) }) - require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew), 0600)) + require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600)) // cert should change as we did not update the associated key time.Sleep(coalesceInterval * 2) @@ -6180,7 +6181,7 @@ cloud { func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool { pool := x509.NewCertPool() - data, err := os.ReadFile("../test/ca/root.cer") + data, err := ioutil.ReadFile("../test/ca/root.cer") require.NoError(t, err) if !pool.AppendCertsFromPEM(data) { t.Fatal("could not add test ca ../test/ca/root.cer to pool") @@ -6196,7 +6197,7 @@ func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool { for _, entry := range entries { filename := path.Join("../test/ca_path", entry.Name()) - data, err := os.ReadFile(filename) + data, err := ioutil.ReadFile(filename) require.NoError(t, err) if !pool.AppendCertsFromPEM(data) { diff --git a/agent/auto-config/auto_config.go b/agent/auto-config/auto_config.go index df4fe3d29df63..9abbd4bb7f7c2 100644 --- a/agent/auto-config/auto_config.go +++ b/agent/auto-config/auto_config.go @@ -3,7 +3,7 @@ package autoconf import ( "context" "fmt" - "os" + "io/ioutil" "sync" "time" @@ -208,7 +208,7 @@ func (ac *AutoConfig) introToken() (string, error) { token := conf.IntroToken if token == "" { // load the intro token from the file - content, err := os.ReadFile(conf.IntroTokenFile) + content, err := ioutil.ReadFile(conf.IntroTokenFile) if err != nil { return "", fmt.Errorf("Failed to read intro token from file: %w", err) } diff --git a/agent/auto-config/auto_config_test.go b/agent/auto-config/auto_config_test.go index ea23fa049aba4..c810ed926060e 100644 --- a/agent/auto-config/auto_config_test.go +++ b/agent/auto-config/auto_config_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/x509" "fmt" + "io/ioutil" "net" "os" "path/filepath" @@ -308,7 +309,7 @@ func TestInitialConfiguration_restored(t *testing.T) { } data, err := pbMarshaler.MarshalToString(response) require.NoError(t, err) - require.NoError(t, os.WriteFile(persistedFile, []byte(data), 0600)) + require.NoError(t, ioutil.WriteFile(persistedFile, []byte(data), 0600)) // recording the initial configuration even when restoring is going to update // the agent token in the token store @@ -1138,7 +1139,7 @@ func TestIntroToken(t *testing.T) { tokenFromFile := "8ae34d3a-8adf-446a-b236-69874597cb5b" tokenFromConfig := "3ad9b572-ea42-4e47-9cd0-53a398a98abf" - require.NoError(t, os.WriteFile(tokenFile.Name(), []byte(tokenFromFile), 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile.Name(), []byte(tokenFromFile), 0600)) type testCase struct { config *config.RuntimeConfig diff --git a/agent/auto-config/persist.go b/agent/auto-config/persist.go index cbb0d21516e8d..9f94f445c786d 100644 --- a/agent/auto-config/persist.go +++ b/agent/auto-config/persist.go @@ -2,6 +2,7 @@ package autoconf import ( "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -38,7 +39,7 @@ func (ac *AutoConfig) readPersistedAutoConfig() (*pbautoconf.AutoConfigResponse, path := filepath.Join(ac.config.DataDir, autoConfigFileName) ac.logger.Debug("attempting to restore any persisted configuration", "path", path) - content, err := os.ReadFile(path) + content, err := ioutil.ReadFile(path) if err == nil { rdr := strings.NewReader(string(content)) @@ -74,7 +75,7 @@ func (ac *AutoConfig) persistAutoConfig(resp *pbautoconf.AutoConfigResponse) err path := filepath.Join(ac.config.DataDir, autoConfigFileName) - err = os.WriteFile(path, []byte(serialized), 0660) + err = ioutil.WriteFile(path, []byte(serialized), 0660) if err != nil { return fmt.Errorf("failed to write auto-config configurations: %w", err) } diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 55b1654af26ea..ea537cc9e6f7f 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -84,8 +84,8 @@ var Counters = []prometheus.CounterDefinition{ // Constants related to refresh backoff. We probably don't ever need to // make these configurable knobs since they primarily exist to lower load. const ( - DefaultCacheRefreshBackoffMin = 3 // 3 attempts before backing off - DefaultCacheRefreshMaxWait = 1 * time.Minute // maximum backoff wait time + CacheRefreshBackoffMin = 3 // 3 attempts before backing off + CacheRefreshMaxWait = 1 * time.Minute // maximum backoff wait time // The following constants are default values for the cache entry // rate limiter settings. @@ -138,7 +138,10 @@ type Cache struct { entriesLock sync.RWMutex entries map[string]cacheEntry entriesExpiryHeap *ttlcache.ExpiryHeap - lastGoroutineID uint64 + + fetchLock sync.Mutex + lastFetchID uint64 + fetchHandles map[string]fetchHandle // stopped is used as an atomic flag to signal that the Cache has been // discarded so background fetches and expiry processing should stop. @@ -151,6 +154,11 @@ type Cache struct { rateLimitCancel context.CancelFunc } +type fetchHandle struct { + id uint64 + stopCh chan struct{} +} + // typeEntry is a single type that is registered with a Cache. type typeEntry struct { // Name that was used to register the Type @@ -196,13 +204,6 @@ type Options struct { EntryFetchMaxBurst int // EntryFetchRate represents the max calls/sec for a single cache entry EntryFetchRate rate.Limit - - // CacheRefreshBackoffMin is the number of attempts to wait before backing off. - // Mostly configurable just for testing. - CacheRefreshBackoffMin uint - // CacheRefreshMaxWait is the maximum backoff wait time. - // Mostly configurable just for testing. - CacheRefreshMaxWait time.Duration } // Equal return true if both options are equivalent @@ -218,12 +219,6 @@ func applyDefaultValuesOnOptions(options Options) Options { if options.EntryFetchMaxBurst == 0 { options.EntryFetchMaxBurst = DefaultEntryFetchMaxBurst } - if options.CacheRefreshBackoffMin == 0 { - options.CacheRefreshBackoffMin = DefaultCacheRefreshBackoffMin - } - if options.CacheRefreshMaxWait == 0 { - options.CacheRefreshMaxWait = DefaultCacheRefreshMaxWait - } if options.Logger == nil { options.Logger = hclog.New(nil) } @@ -239,6 +234,7 @@ func New(options Options) *Cache { types: make(map[string]typeEntry), entries: make(map[string]cacheEntry), entriesExpiryHeap: ttlcache.NewExpiryHeap(), + fetchHandles: make(map[string]fetchHandle), stopCh: make(chan struct{}), options: options, rateLimitContext: ctx, @@ -408,23 +404,11 @@ func (c *Cache) getEntryLocked( // Check if re-validate is requested. If so the first time round the // loop is not a hit but subsequent ones should be treated normally. if !tEntry.Opts.Refresh && info.MustRevalidate { - // It is important to note that this block ONLY applies when we are not - // in indefinite refresh mode (where the underlying goroutine will - // continue to re-query for data). - // - // In this mode goroutines have a 1:1 relationship to RPCs that get - // executed, and importantly they DO NOT SLEEP after executing. - // - // This means that a running goroutine for this cache entry extremely - // strongly implies that the RPC has not yet completed, which is why - // this check works for the revalidation-avoidance optimization here. - if entry.GoroutineID != 0 { - // There is an active goroutine performing a blocking query for - // this data, which has not returned. - // - // We can logically deduce that the contents of the cache are - // actually current, and we can simply return this while leaving - // the blocking query alone. + if entry.Fetching { + // There is an active blocking query for this data, which has not + // returned. We can logically deduce that the contents of the cache + // are actually current, and we can simply return this while + // leaving the blocking query alone. return true, true, entry } return true, false, entry @@ -554,7 +538,7 @@ RETRY_GET: // At this point, we know we either don't have a value at all or the // value we have is too old. We need to wait for new data. - waiterCh := c.fetch(key, r) + waiterCh := c.fetch(key, r, true, 0, false) // No longer our first time through first = false @@ -581,36 +565,46 @@ func makeEntryKey(t, dc, peerName, token, key string) string { return fmt.Sprintf("%s/%s/%s/%s", t, dc, token, key) } -// fetch triggers a new background fetch for the given Request. If a background -// fetch is already running or a goroutine to manage that still exists for a -// matching Request, the waiter channel for that request is returned. The -// effect of this is that there is only ever one blocking query and goroutine -// for any matching requests. -func (c *Cache) fetch(key string, r getOptions) <-chan struct{} { +// fetch triggers a new background fetch for the given Request. If a +// background fetch is already running for a matching Request, the waiter +// channel for that request is returned. The effect of this is that there +// is only ever one blocking query for any matching requests. +// +// If allowNew is true then the fetch should create the cache entry +// if it doesn't exist. If this is false, then fetch will do nothing +// if the entry doesn't exist. This latter case is to support refreshing. +func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ignoreExisting bool) <-chan struct{} { + // We acquire a write lock because we may have to set Fetching to true. c.entriesLock.Lock() defer c.entriesLock.Unlock() - ok, entryValid, entry := c.getEntryLocked(r.TypeEntry, key, r.Info) - switch { - case ok && entryValid: - // This handles the case where a fetch succeeded after checking for its - // existence in getWithIndex. This ensures that we don't miss updates. + // This handles the case where a fetch succeeded after checking for its existence in + // getWithIndex. This ensures that we don't miss updates. + if ok && entryValid && !ignoreExisting { ch := make(chan struct{}) close(ch) return ch + } - case ok && entry.GoroutineID != 0: - // If we already have an entry and there's a goroutine to keep it - // refreshed then don't spawn another one to do the same work. - // - // Return the currently active waiter. + // If we aren't allowing new values and we don't have an existing value, + // return immediately. We return an immediately-closed channel so nothing + // blocks. + if !ok && !allowNew { + ch := make(chan struct{}) + close(ch) + return ch + } + + // If we already have an entry and it is actively fetching, then return + // the currently active waiter. + if ok && entry.Fetching { return entry.Waiter + } - case !ok: - // If we don't have an entry, then create it. The entry must be marked - // as invalid so that it isn't returned as a valid value for a zero - // index. + // If we don't have an entry, then create it. The entry must be marked + // as invalid so that it isn't returned as a valid value for a zero index. + if !ok { entry = cacheEntry{ Valid: false, Waiter: make(chan struct{}), @@ -621,100 +615,27 @@ func (c *Cache) fetch(key string, r getOptions) <-chan struct{} { } } - // Assign each background fetching goroutine a unique ID and fingerprint - // the cache entry with the same ID. This way if the cache entry is ever - // cleaned up due to expiry and later recreated the old goroutine can - // detect that and terminate rather than leak and do double work. - c.lastGoroutineID++ - entry.GoroutineID = c.lastGoroutineID + // Set that we're fetching to true, which makes it so that future + // identical calls to fetch will return the same waiter rather than + // perform multiple fetches. + entry.Fetching = true c.entries[key] = entry metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries))) metrics.SetGauge([]string{"cache", "entries_count"}, float32(len(c.entries))) - // The actual Fetch must be performed in a goroutine. - go c.launchBackgroundFetcher(entry.GoroutineID, key, r) - - return entry.Waiter -} - -func (c *Cache) launchBackgroundFetcher(goroutineID uint64, key string, r getOptions) { - defer func() { - c.entriesLock.Lock() - defer c.entriesLock.Unlock() - entry, ok := c.entries[key] - if ok && entry.GoroutineID == goroutineID { - entry.GoroutineID = 0 - c.entries[key] = entry - } - }() - - var attempt uint - for { - shouldStop, shouldBackoff := c.runBackgroundFetcherOnce(goroutineID, key, r) - if shouldStop { - return - } - - if shouldBackoff { - attempt++ - } else { - attempt = 0 - } - // If we're over the attempt minimum, start an exponential backoff. - wait := backOffWait(c.options, attempt) - - // If we have a timer, wait for it - wait += r.TypeEntry.Opts.RefreshTimer - - select { - case <-time.After(wait): - case <-c.stopCh: - return // Check if cache was stopped - } - - // Trigger. - r.Info.MustRevalidate = false - r.Info.MinIndex = 0 - - // We acquire a write lock because we may have to set Fetching to true. - c.entriesLock.Lock() - - entry, ok := c.entries[key] - if !ok || entry.GoroutineID != goroutineID { - // If we don't have an existing entry, return immediately. - // - // Also if we already have an entry and it is actively fetching, then - // return immediately. - // - // If we've somehow lost control of the entry, also return. - c.entriesLock.Unlock() - return - } - - c.entries[key] = entry - metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries))) - metrics.SetGauge([]string{"cache", "entries_count"}, float32(len(c.entries))) - c.entriesLock.Unlock() - } -} - -func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOptions) (shouldStop, shouldBackoff bool) { - // Freshly re-read this, rather than relying upon the caller to fetch it - // and pass it in. - c.entriesLock.RLock() - entry, ok := c.entries[key] - c.entriesLock.RUnlock() + tEntry := r.TypeEntry - if !ok || entry.GoroutineID != goroutineID { - // If we don't have an existing entry, return immediately. - // - // Also if something weird has happened to orphan this goroutine, also - // return immediately. - return true, false - } + // The actual Fetch must be performed in a goroutine. Ensure that we only + // have one in-flight at a time, but don't use a deferred + // context.WithCancel style termination so that these things outlive their + // requester. + // + // By the time we get here the system WANTS to make a replacement fetcher, so + // we terminate the prior one and replace it. + handle := c.getOrReplaceFetchHandle(key) + go func(handle fetchHandle) { + defer c.deleteFetchHandle(key, handle.id) - tEntry := r.TypeEntry - { // NOTE: this indentation is here to facilitate the PR review diff only // If we have background refresh and currently are in "disconnected" state, // waiting for a response might mean we mark our results as stale for up to // 10 minutes (max blocking timeout) after connection is restored. To reduce @@ -728,7 +649,7 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp c.entriesLock.Lock() defer c.entriesLock.Unlock() entry, ok := c.entries[key] - if !ok || entry.RefreshLostContact.IsZero() || entry.GoroutineID != goroutineID { + if !ok || entry.RefreshLostContact.IsZero() { return } entry.RefreshLostContact = time.Time{} @@ -752,15 +673,12 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp Index: entry.Index, } } - if err := entry.FetchRateLimiter.Wait(c.rateLimitContext); err != nil { if connectedTimer != nil { connectedTimer.Stop() } entry.Error = fmt.Errorf("rateLimitContext canceled: %s", err.Error()) - // NOTE: this can only happen when the entire cache is being - // shutdown and isn't something that can happen normally. - return true, false + return } // Start building the new entry by blocking on the fetch. result, err := r.Fetch(fOpts) @@ -768,8 +686,17 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp connectedTimer.Stop() } + // If we were stopped while waiting on a blocking query now would be a + // good time to detect that. + select { + case <-handle.stopCh: + return + default: + } + // Copy the existing entry to start. newEntry := entry + newEntry.Fetching = false // Importantly, always reset the Error. Having both Error and a Value that // are non-nil is allowed in the cache entry but it indicates that the Error @@ -825,7 +752,7 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp if result.Index > 0 { // Reset the attempts counter so we don't have any backoff - shouldBackoff = false + attempt = 0 } else { // Result having a zero index is an implicit error case. There was no // actual error but it implies the RPC found in index (nothing written @@ -840,7 +767,7 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp // state it can be considered a bug in the RPC implementation (to ever // return a zero index) however since it can happen this is a safety net // for the future. - shouldBackoff = true + attempt++ } // If we have refresh active, this successful response means cache is now @@ -860,7 +787,7 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp metrics.IncrCounterWithLabels([]string{"cache", tEntry.Name, "fetch_error"}, 1, labels) // Increment attempt counter - shouldBackoff = true + attempt++ // If we are refreshing and just failed, updated the lost contact time as // our cache will be stale until we get successfully reconnected. We only @@ -877,7 +804,7 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp // Set our entry c.entriesLock.Lock() - if currEntry, ok := c.entries[key]; !ok || currEntry.GoroutineID != goroutineID { + if _, ok := c.entries[key]; !ok { // This entry was evicted during our fetch. DON'T re-insert it or fall // through to the refresh loop below otherwise it will live forever! In // theory there should not be any Get calls waiting on entry.Waiter since @@ -890,7 +817,7 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp // Trigger any waiters that are around. close(entry.Waiter) - return true, false + return } // If this is a new entry (not in the heap yet), then setup the @@ -915,22 +842,79 @@ func (c *Cache) runBackgroundFetcherOnce(goroutineID uint64, key string, r getOp // request back up again shortly but in the general case this prevents // spamming the logs with tons of ACL not found errors for days. if tEntry.Opts.Refresh && !preventRefresh { - return false, shouldBackoff + // Check if cache was stopped + if atomic.LoadUint32(&c.stopped) == 1 { + return + } + + // If we're over the attempt minimum, start an exponential backoff. + wait := backOffWait(attempt) + + // If we have a timer, wait for it + wait += tEntry.Opts.RefreshTimer + + select { + case <-time.After(wait): + case <-handle.stopCh: + return + } + + // Trigger. The "allowNew" field is false because in the time we were + // waiting to refresh we may have expired and got evicted. If that + // happened, we don't want to create a new entry. + r.Info.MustRevalidate = false + r.Info.MinIndex = 0 + c.fetch(key, r, false, attempt, true) } + }(handle) + + return entry.Waiter +} + +func (c *Cache) getOrReplaceFetchHandle(key string) fetchHandle { + c.fetchLock.Lock() + defer c.fetchLock.Unlock() + + if prevHandle, ok := c.fetchHandles[key]; ok { + close(prevHandle.stopCh) } - return true, false + c.lastFetchID++ + + handle := fetchHandle{ + id: c.lastFetchID, + stopCh: make(chan struct{}), + } + + c.fetchHandles[key] = handle + + return handle +} + +func (c *Cache) deleteFetchHandle(key string, fetchID uint64) { + c.fetchLock.Lock() + defer c.fetchLock.Unlock() + + // Only remove a fetchHandle if it's YOUR fetchHandle. + handle, ok := c.fetchHandles[key] + if !ok { + return + } + + if handle.id == fetchID { + delete(c.fetchHandles, key) + } } -func backOffWait(opts Options, failures uint) time.Duration { - if failures > opts.CacheRefreshBackoffMin { - shift := failures - opts.CacheRefreshBackoffMin - waitTime := opts.CacheRefreshMaxWait +func backOffWait(failures uint) time.Duration { + if failures > CacheRefreshBackoffMin { + shift := failures - CacheRefreshBackoffMin + waitTime := CacheRefreshMaxWait if shift < 31 { waitTime = (1 << shift) * time.Second } - if waitTime > opts.CacheRefreshMaxWait { - waitTime = opts.CacheRefreshMaxWait + if waitTime > CacheRefreshMaxWait { + waitTime = CacheRefreshMaxWait } return waitTime + lib.RandomStagger(waitTime) } diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 98b04ee9a488d..6f8805be06d88 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/lib/ttlcache" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" ) // Test a basic Get with no indexes (and therefore no blocking queries). @@ -1750,12 +1751,22 @@ func TestCache_RefreshLifeCycle(t *testing.T) { require.NoError(t, err) require.Equal(t, true, result) + waitUntilFetching := func(expectValue bool) { + retry.Run(t, func(t *retry.R) { + c.entriesLock.Lock() + defer c.entriesLock.Unlock() + entry, ok := c.entries[key] + require.True(t, ok) + if expectValue { + require.True(t, entry.Fetching) + } else { + require.False(t, entry.Fetching) + } + }) + } + // ensure that the entry is fetching again - c.entriesLock.Lock() - entry, ok := c.entries[key] - require.True(t, ok) - require.True(t, entry.GoroutineID > 0) - c.entriesLock.Unlock() + waitUntilFetching(true) requestChan := make(chan error) @@ -1789,11 +1800,7 @@ func TestCache_RefreshLifeCycle(t *testing.T) { } // ensure that the entry is fetching again - c.entriesLock.Lock() - entry, ok = c.entries[key] - require.True(t, ok) - require.True(t, entry.GoroutineID > 0) - c.entriesLock.Unlock() + waitUntilFetching(true) // background a call that will wait for a newer version - will result in an acl not found error go getError(5) @@ -1814,11 +1821,7 @@ func TestCache_RefreshLifeCycle(t *testing.T) { // ensure that the ACL not found error killed off the background refresh // but didn't remove it from the cache - c.entriesLock.Lock() - entry, ok = c.entries[key] - require.True(t, ok) - require.False(t, entry.GoroutineID > 0) - c.entriesLock.Unlock() + waitUntilFetching(false) } type fakeType struct { diff --git a/agent/cache/entry.go b/agent/cache/entry.go index 7130381dea457..0c71e94437134 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -26,9 +26,9 @@ type cacheEntry struct { Index uint64 // Metadata that is used for internal accounting - Valid bool // True if the Value is set - GoroutineID uint64 // Nonzero if a fetch goroutine is running. - Waiter chan struct{} // Closed when this entry is invalidated + Valid bool // True if the Value is set + Fetching bool // True if a fetch is already active + Waiter chan struct{} // Closed when this entry is invalidated // Expiry contains information about the expiration of this // entry. This is a pointer as its shared as a value in the diff --git a/agent/cache/watch.go b/agent/cache/watch.go index a981c01e4df9c..f99f85c04ba4c 100644 --- a/agent/cache/watch.go +++ b/agent/cache/watch.go @@ -136,7 +136,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlati failures = 0 } else { failures++ - wait = backOffWait(c.options, failures) + wait = backOffWait(failures) c.options.Logger. With("error", err). @@ -223,7 +223,7 @@ func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlatio // as this would eliminate the single-flighting of these requests in the cache and // the efficiencies gained by it. if failures > 0 { - wait = backOffWait(c.options, failures) + wait = backOffWait(failures) } else { // Calculate when the cached data's Age will get too stale and // need to be re-queried. When the data's Age already exceeds the diff --git a/agent/checks/check.go b/agent/checks/check.go index b1bdad66a1a1c..e0c3df2e0aa52 100644 --- a/agent/checks/check.go +++ b/agent/checks/check.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net" "net/http" "os" @@ -859,7 +860,7 @@ func (c *CheckDocker) Start() { } if c.Logger == nil { - c.Logger = hclog.New(&hclog.LoggerOptions{Output: io.Discard}) + c.Logger = hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}) } if c.Shell == "" { diff --git a/agent/checks/docker.go b/agent/checks/docker.go index 17974b96822d2..78acf6b616a79 100644 --- a/agent/checks/docker.go +++ b/agent/checks/docker.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strings" @@ -105,7 +106,7 @@ func (c *DockerClient) call(method, uri string, v interface{}) (*circbuf.Buffer, if err := json.NewEncoder(&b).Encode(v); err != nil { return nil, 0, err } - req.Body = io.NopCloser(&b) + req.Body = ioutil.NopCloser(&b) req.Header.Set("Content-Type", "application/json") } diff --git a/agent/checks/grpc_test.go b/agent/checks/grpc_test.go index 6db78bfa5a0bd..63f5405a64424 100644 --- a/agent/checks/grpc_test.go +++ b/agent/checks/grpc_test.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "flag" "fmt" - "io" + "io/ioutil" "log" "net" "os" @@ -110,7 +110,7 @@ func TestGRPC_Proxied(t *testing.T) { notif := mock.NewNotify() logger := hclog.New(&hclog.LoggerOptions{ Name: uniqueID(), - Output: io.Discard, + Output: ioutil.Discard, }) statusHandler := NewStatusHandler(notif, logger, 0, 0, 0) @@ -144,7 +144,7 @@ func TestGRPC_NotProxied(t *testing.T) { notif := mock.NewNotify() logger := hclog.New(&hclog.LoggerOptions{ Name: uniqueID(), - Output: io.Discard, + Output: ioutil.Discard, }) statusHandler := NewStatusHandler(notif, logger, 0, 0, 0) diff --git a/agent/config/builder.go b/agent/config/builder.go index f77054db78ed9..c8d2d1f0c6a22 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io/ioutil" "net" "net/url" "os" @@ -276,7 +277,7 @@ func (b *builder) sourcesFromPath(path string, format string) ([]Source, error) // newSourceFromFile creates a Source from the contents of the file at path. func newSourceFromFile(path string, format string) (Source, error) { - data, err := os.ReadFile(path) + data, err := ioutil.ReadFile(path) if err != nil { return nil, fmt.Errorf("config: failed to read %s: %s", path, err) } diff --git a/agent/config/builder_test.go b/agent/config/builder_test.go index 9ee6a7af2bf90..c3afbd1fd0048 100644 --- a/agent/config/builder_test.go +++ b/agent/config/builder_test.go @@ -2,6 +2,7 @@ package config import ( "fmt" + "io/ioutil" "net" "os" "path/filepath" @@ -103,7 +104,7 @@ func TestNewBuilder_PopulatesSourcesFromConfigFiles_WithConfigFormat(t *testing. // TODO: this would be much nicer with gotest.tools/fs func setupConfigFiles(t *testing.T) []string { t.Helper() - path, err := os.MkdirTemp("", t.Name()) + path, err := ioutil.TempDir("", t.Name()) require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(path) }) @@ -112,13 +113,13 @@ func setupConfigFiles(t *testing.T) []string { require.NoError(t, err) for _, dir := range []string{path, subpath} { - err = os.WriteFile(filepath.Join(dir, "a.hcl"), []byte("content a"), 0644) + err = ioutil.WriteFile(filepath.Join(dir, "a.hcl"), []byte("content a"), 0644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, "b.json"), []byte("content b"), 0644) + err = ioutil.WriteFile(filepath.Join(dir, "b.json"), []byte("content b"), 0644) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, "c.yaml"), []byte("content c"), 0644) + err = ioutil.WriteFile(filepath.Join(dir, "c.yaml"), []byte("content c"), 0644) require.NoError(t, err) } return []string{ diff --git a/agent/config/golden_test.go b/agent/config/golden_test.go index da49f3fce4dd1..6ba2cc15ec9c4 100644 --- a/agent/config/golden_test.go +++ b/agent/config/golden_test.go @@ -2,6 +2,7 @@ package config import ( "flag" + "io/ioutil" "os" "path/filepath" "testing" @@ -25,11 +26,11 @@ func golden(t *testing.T, actual, filename string) string { if dir := filepath.Dir(path); dir != "." { require.NoError(t, os.MkdirAll(dir, 0755)) } - err := os.WriteFile(path, []byte(actual), 0644) + err := ioutil.WriteFile(path, []byte(actual), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(path) + expected, err := ioutil.ReadFile(path) require.NoError(t, err) return string(expected) } diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 92fc452045081..bf40e774fa8be 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -7,6 +7,7 @@ import ( "errors" "flag" "fmt" + "io/ioutil" "net" "net/netip" "os" @@ -7189,7 +7190,7 @@ func writeFile(path string, data []byte) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { panic(err) } - if err := os.WriteFile(path, data, 0640); err != nil { + if err := ioutil.WriteFile(path, data, 0640); err != nil { panic(err) } } diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 23aff3a881b0c..a4c35e4b97e78 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -78,8 +78,6 @@ "BootstrapExpect": 0, "BuildDate": "2019-11-20 05:00:00 +0000 UTC", "Cache": { - "CacheRefreshBackoffMin": 0, - "CacheRefreshMaxWait": "0s", "EntryFetchMaxBurst": 42, "EntryFetchRate": 0.334, "Logger": null diff --git a/agent/configentry/merge_service_config.go b/agent/configentry/merge_service_config.go index 2c72bcda75de1..94e9d5e869c10 100644 --- a/agent/configentry/merge_service_config.go +++ b/agent/configentry/merge_service_config.go @@ -23,6 +23,7 @@ type StateStore interface { func MergeNodeServiceWithCentralConfig( ws memdb.WatchSet, state StateStore, + args *structs.ServiceSpecificRequest, ns *structs.NodeService, logger hclog.Logger) (uint64, *structs.NodeService, error) { @@ -46,6 +47,8 @@ func MergeNodeServiceWithCentralConfig( configReq := &structs.ServiceConfigRequest{ Name: serviceName, + Datacenter: args.Datacenter, + QueryOptions: args.QueryOptions, MeshGateway: ns.Proxy.MeshGateway, Mode: ns.Proxy.Mode, UpstreamIDs: upstreams, diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 25eb076918159..5f637f88f978f 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -6,7 +6,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io" + "io/ioutil" "net/http" "os" "strings" @@ -70,6 +70,13 @@ type VaultProvider struct { clusterID string spiffeID *connect.SpiffeIDSigning logger hclog.Logger + + // isConsulMountedIntermediate is used to determine if we should tune the + // mount if the VaultProvider is ever reconfigured. This is at most a + // "best guess" to determine whether this instance of Consul created the + // intermediate mount but will not be able to tell if an existing mount + // was created by Consul (in a previous running instance) or was external. + isConsulMountedIntermediate bool } func NewVaultProvider(logger hclog.Logger) *VaultProvider { @@ -310,9 +317,10 @@ func (v *VaultProvider) GenerateRoot() (RootResult, error) { }, }) if err != nil { - return RootResult{}, err + return RootResult{}, fmt.Errorf("failed to mount root CA backend: %w", err) } + // We want to initialize afterwards fallthrough case ErrBackendNotInitialized: uid, err := connect.CompactUID() @@ -326,7 +334,7 @@ func (v *VaultProvider) GenerateRoot() (RootResult, error) { "key_bits": v.config.PrivateKeyBits, }) if err != nil { - return RootResult{}, err + return RootResult{}, fmt.Errorf("failed to initialize root CA: %w", err) } var ok bool rootPEM, ok = resp.Data["certificate"].(string) @@ -336,7 +344,7 @@ func (v *VaultProvider) GenerateRoot() (RootResult, error) { default: if err != nil { - return RootResult{}, err + return RootResult{}, fmt.Errorf("unexpected error while setting root PKI backend: %w", err) } } @@ -381,19 +389,51 @@ func (v *VaultProvider) setupIntermediatePKIPath() error { Config: mountConfig, }) if err != nil { - return err + return fmt.Errorf("failed to mount intermediate PKI backend: %w", err) } + // Required to determine if we should tune the mount + // if the VaultProvider is ever reconfigured. + v.isConsulMountedIntermediate = true + + } else if err == ErrBackendNotInitialized { + // If this is the first time calling setupIntermediatePKIPath, the backend + // will not have been initialized. Since the mount is ready we can suppress + // this error. } else { - return err + return fmt.Errorf("unexpected error while fetching intermediate CA: %w", err) } } else { + v.logger.Info("Found existing Intermediate PKI path mount", + "namespace", v.config.IntermediatePKINamespace, + "path", v.config.IntermediatePKIPath, + ) + + // This codepath requires the Vault policy: + // + // path "/sys/mounts//tune" { + // capabilities = [ "update" ] + // } + // err := v.tuneMountNamespaced(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath, &mountConfig) if err != nil { - v.logger.Warn("Could not update intermediate PKI mount settings", "path", v.config.IntermediatePKIPath, "error", err) + if v.isConsulMountedIntermediate { + v.logger.Warn("Intermediate PKI path was mounted by Consul but could not be tuned", + "namespace", v.config.IntermediatePKINamespace, + "path", v.config.IntermediatePKIPath, + "error", err, + ) + } else { + v.logger.Debug("Failed to tune Intermediate PKI mount. 403 Forbidden is expected if Consul does not have tune capabilities for the Intermediate PKI mount (i.e. using Vault-managed policies)", + "namespace", v.config.IntermediatePKINamespace, + "path", v.config.IntermediatePKIPath, + "error", err, + ) + } + } } - // Create the role for issuing leaf certs if it doesn't exist yet + // Create the role for issuing leaf certs rolePath := v.config.IntermediatePKIPath + "roles/" + VaultCALeafCertRole _, err = v.writeNamespaced(v.config.IntermediatePKINamespace, rolePath, map[string]interface{}{ "allow_any_name": true, @@ -501,7 +541,7 @@ func (v *VaultProvider) getCA(namespace, path string) (string, error) { return "", err } - bytes, err := io.ReadAll(resp.Body) + bytes, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } @@ -530,7 +570,7 @@ func (v *VaultProvider) getCAChain(namespace, path string) (string, error) { return "", err } - raw, err := io.ReadAll(resp.Body) + raw, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } @@ -710,7 +750,7 @@ func (v *VaultProvider) SignIntermediate(csr *x509.CertificateRequest) (string, func (v *VaultProvider) CrossSignCA(cert *x509.Certificate) (string, error) { rootPEM, err := v.getCA(v.config.RootPKINamespace, v.config.RootPKIPath) if err != nil { - return "", err + return "", fmt.Errorf("failed to get root CA: %w", err) } rootCert, err := connect.ParseCert(rootPEM) if err != nil { diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index 797084c2e4f63..3574d48c47285 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -4,7 +4,7 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io" + "io/ioutil" "sync/atomic" "testing" "time" @@ -340,7 +340,7 @@ func TestVaultCAProvider_Bootstrap(t *testing.T) { req := client.NewRequest("GET", "/v1/"+tc.backendPath+"ca/pem") resp, err := client.RawRequest(req) require.NoError(t, err) - bytes, err := io.ReadAll(resp.Body) + bytes, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, cert, string(bytes)+"\n") @@ -924,6 +924,111 @@ func TestVaultCAProvider_GenerateIntermediate(t *testing.T) { require.NotEqual(t, orig, new) } +func TestVaultCAProvider_VaultManaged(t *testing.T) { + + SkipIfVaultNotPresent(t) + + const vaultManagedPKIPolicy = ` +path "/pki-root/" { + capabilities = [ "read" ] +} + +path "/pki-root/root/sign-intermediate" { + capabilities = [ "update" ] +} + +path "/pki-intermediate/*" { + capabilities = [ "create", "read", "update", "delete", "list" ] +} + +path "auth/token/renew-self" { + capabilities = [ "update" ] +} + +path "auth/token/lookup-self" { + capabilities = [ "read" ] +} +` + + testVault, err := runTestVault(t) + if err != nil { + t.Fatalf("err: %v", err) + } + + testVault.WaitUntilReady(t) + + client := testVault.Client() + + client.SetToken("root") + + // Mount pki root externally + require.NoError(t, client.Sys().Mount("pki-root", &vaultapi.MountInput{ + Type: "pki", + Description: "root CA backend for Consul Connect", + Config: vaultapi.MountConfigInput{ + MaxLeaseTTL: "12m", + }, + })) + _, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "common_name": "testconsul", + }) + require.NoError(t, err) + + // Mount pki intermediate externally + require.NoError(t, client.Sys().Mount("pki-intermediate", &vaultapi.MountInput{ + Type: "pki", + Description: "intermediate CA backend for Consul Connect", + Config: vaultapi.MountConfigInput{ + MaxLeaseTTL: "6m", + }, + })) + + // Generate a policy and token for the VaultProvider to use + require.NoError(t, client.Sys().PutPolicy("consul-ca", vaultManagedPKIPolicy)) + tcr := &vaultapi.TokenCreateRequest{ + Policies: []string{"consul-ca"}, + } + secret, err := testVault.client.Auth().Token().Create(tcr) + require.NoError(t, err) + providerToken := secret.Auth.ClientToken + + // We want to test the provider.Configure() step + _, err = createVaultProvider(t, true, testVault.Addr, providerToken, nil) + require.NoError(t, err) +} + +func TestVaultCAProvider_ConsulManaged(t *testing.T) { + + SkipIfVaultNotPresent(t) + + testVault, err := runTestVault(t) + if err != nil { + t.Fatalf("err: %v", err) + } + + testVault.WaitUntilReady(t) + + client := testVault.Client() + + client.SetToken("root") + + // We do not configure any mounts and instead let Consul + // be responsible for mounting root and intermediate PKI + + // Generate a policy and token for the VaultProvider to use + require.NoError(t, client.Sys().PutPolicy("consul-ca", pkiTestPolicy)) + tcr := &vaultapi.TokenCreateRequest{ + Policies: []string{"consul-ca"}, + } + secret, err := testVault.client.Auth().Token().Create(tcr) + require.NoError(t, err) + providerToken := secret.Auth.ClientToken + + // We want to test the provider.Configure() step + _, err = createVaultProvider(t, true, testVault.Addr, providerToken, nil) + require.NoError(t, err) +} + func getIntermediateCertTTL(t *testing.T, caConf *structs.CAConfiguration) time.Duration { t.Helper() diff --git a/agent/connect/ca/testing.go b/agent/connect/ca/testing.go index 5bd8c9908800f..97c28871d4684 100644 --- a/agent/connect/ca/testing.go +++ b/agent/connect/ca/testing.go @@ -3,7 +3,7 @@ package ca import ( "errors" "fmt" - "io" + "io/ioutil" "os" "os/exec" "strings" @@ -77,7 +77,7 @@ func CASigningKeyTypeCases() []CASigningKeyTypes { // TestConsulProvider creates a new ConsulProvider, taking care to stub out it's // Logger so that logging calls don't panic. If logging output is important func TestConsulProvider(t testing.T, d ConsulProviderStateDelegate) *ConsulProvider { - logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard}) + logger := hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}) provider := &ConsulProvider{Delegate: d, logger: logger} return provider } @@ -155,8 +155,8 @@ func runTestVault(t testing.T) (*TestVaultServer, error) { } cmd := exec.Command(vaultBinaryName, args...) - cmd.Stdout = io.Discard - cmd.Stderr = io.Discard + cmd.Stdout = ioutil.Discard + cmd.Stderr = ioutil.Discard if err := cmd.Start(); err != nil { return nil, err } diff --git a/agent/connect/testing_ca_test.go b/agent/connect/testing_ca_test.go index 7d3cb95798a1a..fa0233c19ff84 100644 --- a/agent/connect/testing_ca_test.go +++ b/agent/connect/testing_ca_test.go @@ -2,6 +2,7 @@ package connect import ( "fmt" + "io/ioutil" "os" "os/exec" "path/filepath" @@ -33,13 +34,13 @@ func testCAAndLeaf(t *testing.T, keyType string, keyBits int) { leaf, _ := TestLeaf(t, "web", ca) // Create a temporary directory for storing the certs - td, err := os.MkdirTemp("", "consul") + td, err := ioutil.TempDir("", "consul") require.NoError(t, err) defer os.RemoveAll(td) // Write the cert - require.NoError(t, os.WriteFile(filepath.Join(td, "ca.pem"), []byte(ca.RootCert), 0644)) - require.NoError(t, os.WriteFile(filepath.Join(td, "leaf.pem"), []byte(leaf[:]), 0644)) + require.NoError(t, ioutil.WriteFile(filepath.Join(td, "ca.pem"), []byte(ca.RootCert), 0644)) + require.NoError(t, ioutil.WriteFile(filepath.Join(td, "leaf.pem"), []byte(leaf[:]), 0644)) // Use OpenSSL to verify so we have an external, known-working process // that can verify this outside of our own implementations. @@ -65,7 +66,7 @@ func testCAAndLeaf_xc(t *testing.T, keyType string, keyBits int) { leaf2, _ := TestLeaf(t, "web", ca2) // Create a temporary directory for storing the certs - td, err := os.MkdirTemp("", "consul") + td, err := ioutil.TempDir("", "consul") assert.Nil(t, err) defer os.RemoveAll(td) @@ -73,9 +74,9 @@ func testCAAndLeaf_xc(t *testing.T, keyType string, keyBits int) { xcbundle := []byte(ca1.RootCert) xcbundle = append(xcbundle, '\n') xcbundle = append(xcbundle, []byte(ca2.SigningCert)...) - assert.Nil(t, os.WriteFile(filepath.Join(td, "ca.pem"), xcbundle, 0644)) - assert.Nil(t, os.WriteFile(filepath.Join(td, "leaf1.pem"), []byte(leaf1), 0644)) - assert.Nil(t, os.WriteFile(filepath.Join(td, "leaf2.pem"), []byte(leaf2), 0644)) + assert.Nil(t, ioutil.WriteFile(filepath.Join(td, "ca.pem"), xcbundle, 0644)) + assert.Nil(t, ioutil.WriteFile(filepath.Join(td, "leaf1.pem"), []byte(leaf1), 0644)) + assert.Nil(t, ioutil.WriteFile(filepath.Join(td, "leaf2.pem"), []byte(leaf2), 0644)) // OpenSSL verify the cross-signed leaf (leaf2) { diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index 1296ab3175567..2a299bc761395 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -4,7 +4,7 @@ import ( "bytes" "crypto/x509" "encoding/pem" - "io" + "io/ioutil" "net/http" "net/http/httptest" "testing" @@ -287,7 +287,7 @@ func TestConnectCARoots_PEMEncoding(t *testing.T) { resp := recorder.Result() require.Equal(t, resp.Header.Get("Content-Type"), "application/pem-certificate-chain") - data, err := io.ReadAll(resp.Body) + data, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) // expecting the root cert from dc1 and an intermediate in dc2 diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index ee9551facefd6..513b594b74fa7 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/ioutil" "os" "path/filepath" "time" @@ -124,7 +125,7 @@ func (a *ACL) fileBootstrapResetIndex() uint64 { path := filepath.Join(a.srv.config.DataDir, aclBootstrapReset) // Read the file - raw, err := os.ReadFile(path) + raw, err := ioutil.ReadFile(path) if err != nil { if !os.IsNotExist(err) { a.logger.Error("bootstrap: failed to read path", diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index 35be03857853c..79846beaa5438 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -2,6 +2,7 @@ package consul import ( "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -60,7 +61,7 @@ func TestACLEndpoint_BootstrapTokens(t *testing.T) { require.NoError(t, err) resetPath := filepath.Join(dir, "acl-bootstrap-reset") - require.NoError(t, os.WriteFile(resetPath, []byte(fmt.Sprintf("%d", resetIdx)), 0600)) + require.NoError(t, ioutil.WriteFile(resetPath, []byte(fmt.Sprintf("%d", resetIdx)), 0600)) oldID := out.AccessorID // Finally, make sure that another attempt is rejected. @@ -2943,7 +2944,7 @@ func TestACLEndpoint_AuthMethodSet(t *testing.T) { t.Parallel() - tempDir, err := os.MkdirTemp("", "consul") + tempDir, err := ioutil.TempDir("", "consul") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(tempDir) }) _, srv, codec := testACLServerWithConfig(t, nil, false) diff --git a/agent/consul/authmethod/kubeauth/testing.go b/agent/consul/authmethod/kubeauth/testing.go index 83e5d4fa29de0..87938f406bad1 100644 --- a/agent/consul/authmethod/kubeauth/testing.go +++ b/agent/consul/authmethod/kubeauth/testing.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "encoding/pem" - "io" + "io/ioutil" "log" "net/http" "net/http/httptest" @@ -43,7 +43,7 @@ type TestAPIServer struct { func StartTestAPIServer(t testing.T) *TestAPIServer { s := &TestAPIServer{} s.srv = httptest.NewUnstartedServer(s) - s.srv.Config.ErrorLog = log.New(io.Discard, "", 0) + s.srv.Config.ErrorLog = log.New(ioutil.Discard, "", 0) s.srv.StartTLS() bs := s.srv.TLS.Certificates[0].Certificate[0] @@ -162,7 +162,7 @@ func (s *TestAPIServer) handleTokenReview(w http.ResponseWriter, req *http.Reque } defer req.Body.Close() - b, err := io.ReadAll(req.Body) + b, err := ioutil.ReadAll(req.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) return diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index ac9ea4128ddf0..1036044fabca7 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -8,10 +8,10 @@ import ( "encoding/base64" "encoding/pem" "fmt" + "io/ioutil" "math/rand" "net" "net/url" - "os" "path" "testing" "time" @@ -162,15 +162,15 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { c.AutoConfigAuthzAllowReuse = true cafile := path.Join(c.DataDir, "cacert.pem") - err := os.WriteFile(cafile, []byte(cacert), 0600) + err := ioutil.WriteFile(cafile, []byte(cacert), 0600) require.NoError(t, err) certfile := path.Join(c.DataDir, "cert.pem") - err = os.WriteFile(certfile, []byte(cert), 0600) + err = ioutil.WriteFile(certfile, []byte(cert), 0600) require.NoError(t, err) keyfile := path.Join(c.DataDir, "key.pem") - err = os.WriteFile(keyfile, []byte(key), 0600) + err = ioutil.WriteFile(keyfile, []byte(key), 0600) require.NoError(t, err) c.TLSConfig.InternalRPC.CAFile = cafile @@ -426,7 +426,7 @@ func TestAutoConfig_updateTLSSettingsInConfig(t *testing.T) { dir := testutil.TempDir(t, "auto-config-tls-settings") cafile := path.Join(dir, "cacert.pem") - err = os.WriteFile(cafile, []byte(cacert), 0600) + err = ioutil.WriteFile(cafile, []byte(cacert), 0600) require.NoError(t, err) type testCase struct { @@ -632,7 +632,7 @@ func TestAutoConfig_updateTLSCertificatesInConfig(t *testing.T) { // will error if it cannot load the CA certificate from disk dir := testutil.TempDir(t, "auto-config-tls-certificate") cafile := path.Join(dir, "cacert.pem") - err = os.WriteFile(cafile, []byte(cacert), 0600) + err = ioutil.WriteFile(cafile, []byte(cacert), 0600) require.NoError(t, err) // translate the roots response to protobuf to be embedded diff --git a/agent/consul/auto_encrypt_endpoint.go b/agent/consul/auto_encrypt_endpoint.go index 3adfad8ecd2b4..2f96efacfcdd3 100644 --- a/agent/consul/auto_encrypt_endpoint.go +++ b/agent/consul/auto_encrypt_endpoint.go @@ -2,7 +2,6 @@ package consul import ( "errors" - "fmt" "github.com/hashicorp/consul/agent/structs" ) @@ -25,13 +24,6 @@ func (a *AutoEncrypt) Sign( if !a.srv.config.AutoEncryptAllowTLS { return ErrAutoEncryptAllowTLSNotEnabled } - // There's no reason to forward the AutoEncrypt.Sign RPC to a remote datacenter because its certificates - // won't be valid in this datacenter. If the client is requesting a different datacenter, then this is a - // misconfiguration, and we can give them a useful error. - if args.Datacenter != a.srv.config.Datacenter { - return fmt.Errorf("mismatched datacenter (client_dc='%s' server_dc='%s');"+ - " check client has same datacenter set as servers", args.Datacenter, a.srv.config.Datacenter) - } if done, err := a.srv.ForwardRPC("AutoEncrypt.Sign", args, reply); done { return err } diff --git a/agent/consul/auto_encrypt_endpoint_test.go b/agent/consul/auto_encrypt_endpoint_test.go index 50d356d9f86da..a27cef26b2f5d 100644 --- a/agent/consul/auto_encrypt_endpoint_test.go +++ b/agent/consul/auto_encrypt_endpoint_test.go @@ -139,58 +139,3 @@ func TestAutoEncryptSign(t *testing.T) { }) } } - -func TestAutoEncryptSign_MismatchedDC(t *testing.T) { - t.Parallel() - - cert := "../../test/key/ourdomain.cer" - key := "../../test/key/ourdomain.key" - root := "../../test/ca/root.cer" - dir, s := testServerWithConfig(t, func(c *Config) { - c.AutoEncryptAllowTLS = true - c.PrimaryDatacenter = "dc1" - c.Bootstrap = true - c.TLSConfig.InternalRPC.CAFile = root - c.TLSConfig.InternalRPC.VerifyOutgoing = true - c.TLSConfig.InternalRPC.CertFile = cert - c.TLSConfig.InternalRPC.KeyFile = key - }) - defer os.RemoveAll(dir) - defer s.Shutdown() - testrpc.WaitForLeader(t, s.RPC, "dc1") - - // Generate a CSR and request signing - id := &connect.SpiffeIDAgent{ - Host: strings.TrimSuffix("domain", "."), - Datacenter: "different", - Agent: "uuid", - } - - // Create a new private key - pk, _, err := connect.GeneratePrivateKey() - require.NoError(t, err) - - // Create a CSR. - dnsNames := []string{"localhost"} - ipAddresses := []net.IP{net.ParseIP("127.0.0.1")} - csr, err := connect.CreateCSR(id, pk, dnsNames, ipAddresses) - require.NoError(t, err) - require.NotEmpty(t, csr) - args := &structs.CASignRequest{ - Datacenter: "different", - CSR: csr, - } - - cfg := tlsutil.Config{ - AutoTLS: true, - Domain: "consul", - } - codec, err := insecureRPCClient(s, cfg) - require.NoError(t, err) - - var reply structs.SignedResponse - err = msgpackrpc.CallWithCodec(codec, "AutoEncrypt.Sign", args, &reply) - codec.Close() - require.EqualError(t, err, "mismatched datacenter (client_dc='different' server_dc='dc1'); check client has same datacenter set as servers") - return -} diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index bacafa688ae86..5d5dcb8b56d58 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -764,7 +764,7 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru mergedsn := sn ns := sn.ToNodeService() if ns.IsSidecarProxy() || ns.IsGateway() { - cfgIndex, mergedns, err := configentry.MergeNodeServiceWithCentralConfig(ws, state, ns, c.logger) + cfgIndex, mergedns, err := configentry.MergeNodeServiceWithCentralConfig(ws, state, args, ns, c.logger) if err != nil { return err } @@ -968,7 +968,11 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru for _, ns := range services.Services { mergedns := ns if ns.IsSidecarProxy() || ns.IsGateway() { - cfgIndex, mergedns, err = configentry.MergeNodeServiceWithCentralConfig(ws, state, ns, c.logger) + serviceSpecificReq := structs.ServiceSpecificRequest{ + Datacenter: args.Datacenter, + QueryOptions: args.QueryOptions, + } + cfgIndex, mergedns, err = configentry.MergeNodeServiceWithCentralConfig(ws, state, &serviceSpecificReq, ns, c.logger) if err != nil { return err } diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 858f3394176ed..4625a08cf9902 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -259,7 +259,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc for _, node := range resolvedNodes { ns := node.Service if ns.IsSidecarProxy() || ns.IsGateway() { - cfgIndex, mergedns, err := configentry.MergeNodeServiceWithCentralConfig(ws, state, ns, h.logger) + cfgIndex, mergedns, err := configentry.MergeNodeServiceWithCentralConfig(ws, state, args, ns, h.logger) if err != nil { return err } diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index 2d7e06f0484b3..bf6fa95221041 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/x509" "fmt" + "io/ioutil" "os" "path/filepath" "reflect" @@ -1456,7 +1457,7 @@ func TestNewCARoot(t *testing.T) { func readTestData(t *testing.T, name string) string { t.Helper() path := filepath.Join("testdata", name) - bs, err := os.ReadFile(path) + bs, err := ioutil.ReadFile(path) if err != nil { t.Fatalf("failed reading fixture file %s: %s", name, err) } diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index 602e311df8050..4e76c26d31dda 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -360,7 +360,7 @@ func (s *Server) establishStream(ctx context.Context, // send keepalive pings even if there is no active streams PermitWithoutStream: true, }), - grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(50 * 1024 * 1024)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(8*1024*1024), grpc.MaxCallRecvMsgSize(8*1024*1024)), } logger.Trace("dialing peer", "addr", addr) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index afc8af28a7fd7..cd23a43c7d58a 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -6,8 +6,8 @@ import ( "encoding/json" "errors" "fmt" + "io/ioutil" "net" - "os" "testing" "time" @@ -480,7 +480,7 @@ func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) { }, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.peering.11111111-2222-3333-4444-555555555555.consul, not wrong.name`) }) t.Run("bad-ca-roots", func(t *testing.T) { - wrongRoot, err := os.ReadFile("../../test/client_certs/rootca.crt") + wrongRoot, err := ioutil.ReadFile("../../test/client_certs/rootca.crt") require.NoError(t, err) testLeader_PeeringSync_failsForTLSError(t, func(token *structs.PeeringToken) { diff --git a/agent/consul/operator_backend.go b/agent/consul/operator_backend.go deleted file mode 100644 index 8305c8fd20588..0000000000000 --- a/agent/consul/operator_backend.go +++ /dev/null @@ -1,34 +0,0 @@ -package consul - -import ( - "context" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - "github.com/hashicorp/consul/agent/rpc/operator" - "github.com/hashicorp/consul/proto/pboperator" - "github.com/hashicorp/raft" -) - -type OperatorBackend struct { - srv *Server -} - -// NewOperatorBackend returns a operator.Backend implementation that is bound to the given server. -func NewOperatorBackend(srv *Server) *OperatorBackend { - return &OperatorBackend{ - srv: srv, - } -} - -func (op *OperatorBackend) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) { - return op.srv.ResolveTokenAndDefaultMeta(token, entMeta, authzCtx) -} - -func (op *OperatorBackend) TransferLeader(_ context.Context, request *pboperator.TransferLeaderRequest) (*pboperator.TransferLeaderResponse, error) { - reply := new(pboperator.TransferLeaderResponse) - err := op.srv.attemptLeadershipTransfer(raft.ServerID(request.ID)) - reply.Success = err == nil - return reply, err -} - -var _ operator.Backend = (*OperatorBackend)(nil) diff --git a/agent/consul/operator_backend_test.go b/agent/consul/operator_backend_test.go deleted file mode 100644 index 3fdca15af973d..0000000000000 --- a/agent/consul/operator_backend_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package consul - -import ( - "context" - "github.com/hashicorp/consul/acl" - external "github.com/hashicorp/consul/agent/grpc-external" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pboperator" - "github.com/hashicorp/consul/sdk/testutil/retry" - "google.golang.org/grpc/credentials/insecure" - "testing" - "time" - - "github.com/stretchr/testify/require" - gogrpc "google.golang.org/grpc" - - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/consul/testrpc" -) - -func TestOperatorBackend_TransferLeader(t *testing.T) { - t.Parallel() - - conf := testClusterConfig{ - Datacenter: "dc1", - Servers: 3, - ServerConf: func(config *Config) { - config.RaftConfig.HeartbeatTimeout = 2 * time.Second - config.RaftConfig.ElectionTimeout = 2 * time.Second - config.RaftConfig.LeaderLeaseTimeout = 1 * time.Second - }, - } - - nodes := newTestCluster(t, &conf) - s1 := nodes.Servers[0] - // Make sure a leader is elected - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Make a write call to server2 and make sure it gets forwarded to server1 - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - t.Cleanup(cancel) - - // Dial server2 directly - conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), - gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), - gogrpc.WithTransportCredentials(insecure.NewCredentials()), - gogrpc.WithBlock()) - require.NoError(t, err) - t.Cleanup(func() { conn.Close() }) - - operatorClient := pboperator.NewOperatorServiceClient(conn) - - testutil.RunStep(t, "transfer leader", func(t *testing.T) { - beforeLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, beforeLeader) - // Do the grpc Write call to server2 - req := pboperator.TransferLeaderRequest{ - ID: "", - } - reply, err := operatorClient.TransferLeader(ctx, &req) - require.NoError(t, err) - require.True(t, reply.Success) - time.Sleep(1 * time.Second) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - retry.Run(t, func(r *retry.R) { - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(r, afterLeader) - }) - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, afterLeader) - if afterLeader == beforeLeader { - t.Fatalf("leader should have changed %s == %s", afterLeader, beforeLeader) - } - }) -} - -func TestOperatorBackend_TransferLeaderWithACL(t *testing.T) { - t.Parallel() - - conf := testClusterConfig{ - Datacenter: "dc1", - Servers: 3, - ServerConf: func(config *Config) { - config.RaftConfig.HeartbeatTimeout = 2 * time.Second - config.RaftConfig.ElectionTimeout = 2 * time.Second - config.RaftConfig.LeaderLeaseTimeout = 1 * time.Second - config.ACLsEnabled = true - config.ACLInitialManagementToken = "root" - config.ACLResolverSettings.ACLDefaultPolicy = "deny" - }, - } - - nodes := newTestCluster(t, &conf) - s1 := nodes.Servers[0] - // Make sure a leader is elected - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Make a write call to server2 and make sure it gets forwarded to server1 - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - t.Cleanup(cancel) - - // Dial server2 directly - conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), - gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), - gogrpc.WithTransportCredentials(insecure.NewCredentials()), - gogrpc.WithBlock()) - require.NoError(t, err) - t.Cleanup(func() { conn.Close() }) - - operatorClient := pboperator.NewOperatorServiceClient(conn) - - testutil.RunStep(t, "transfer leader no token", func(t *testing.T) { - beforeLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, beforeLeader) - // Do the grpc Write call to server2 - req := pboperator.TransferLeaderRequest{ - ID: "", - } - reply, err := operatorClient.TransferLeader(ctx, &req) - require.True(t, acl.IsErrPermissionDenied(err)) - require.Nil(t, reply) - time.Sleep(1 * time.Second) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - retry.Run(t, func(r *retry.R) { - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(r, afterLeader) - }) - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, afterLeader) - if afterLeader != beforeLeader { - t.Fatalf("leader should have changed %s == %s", afterLeader, beforeLeader) - } - }) - - testutil.RunStep(t, "transfer leader operator read token", func(t *testing.T) { - - beforeLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, beforeLeader) - // Do the grpc Write call to server2 - req := pboperator.TransferLeaderRequest{ - ID: "", - } - codec := rpcClient(t, s1) - rules := `operator = "read"` - tokenRead := createToken(t, codec, rules) - - ctxToken, err := external.ContextWithQueryOptions(ctx, structs.QueryOptions{Token: tokenRead}) - require.NoError(t, err) - reply, err := operatorClient.TransferLeader(ctxToken, &req) - require.True(t, acl.IsErrPermissionDenied(err)) - require.Nil(t, reply) - time.Sleep(1 * time.Second) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - retry.Run(t, func(r *retry.R) { - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(r, afterLeader) - }) - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, afterLeader) - if afterLeader != beforeLeader { - t.Fatalf("leader should have changed %s == %s", afterLeader, beforeLeader) - } - }) - - testutil.RunStep(t, "transfer leader operator write token", func(t *testing.T) { - - beforeLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, beforeLeader) - // Do the grpc Write call to server2 - req := pboperator.TransferLeaderRequest{ - ID: "", - } - codec := rpcClient(t, s1) - rules := `operator = "write"` - tokenWrite := createTokenWithPolicyNameFull(t, codec, "the-policy-write", rules, "root") - ctxToken, err := external.ContextWithQueryOptions(ctx, structs.QueryOptions{Token: tokenWrite.SecretID}) - require.NoError(t, err) - reply, err := operatorClient.TransferLeader(ctxToken, &req) - require.NoError(t, err) - require.True(t, reply.Success) - time.Sleep(1 * time.Second) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - retry.Run(t, func(r *retry.R) { - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(r, afterLeader) - }) - afterLeader, _ := s1.raft.LeaderWithID() - require.NotEmpty(t, afterLeader) - if afterLeader == beforeLeader { - t.Fatalf("leader should have changed %s == %s", afterLeader, beforeLeader) - } - }) -} diff --git a/agent/consul/operator_raft_endpoint.go b/agent/consul/operator_raft_endpoint.go index a0c194e7aee61..328f8ff964e02 100644 --- a/agent/consul/operator_raft_endpoint.go +++ b/agent/consul/operator_raft_endpoint.go @@ -2,7 +2,6 @@ package consul import ( "fmt" - "net" "github.com/hashicorp/raft" diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index 01ea34961b8d9..ff586157083f8 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "math" "net" "os" @@ -1389,13 +1390,13 @@ func TestRPC_AuthorizeRaftRPC(t *testing.T) { require.NoError(t, err) dir := testutil.TempDir(t, "certs") - err = os.WriteFile(filepath.Join(dir, "ca.pem"), []byte(caPEM), 0600) + err = ioutil.WriteFile(filepath.Join(dir, "ca.pem"), []byte(caPEM), 0600) require.NoError(t, err) intermediatePEM, intermediatePK, err := tlsutil.GenerateCert(tlsutil.CertOpts{IsCA: true, CA: caPEM, Signer: caSigner, Days: 5}) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, "intermediate.pem"), []byte(intermediatePEM), 0600) + err = ioutil.WriteFile(filepath.Join(dir, "intermediate.pem"), []byte(intermediatePEM), 0600) require.NoError(t, err) newCert := func(t *testing.T, caPEM, pk, node, name string) { @@ -1414,9 +1415,9 @@ func TestRPC_AuthorizeRaftRPC(t *testing.T) { }) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, node+"-"+name+".pem"), []byte(pem), 0600) + err = ioutil.WriteFile(filepath.Join(dir, node+"-"+name+".pem"), []byte(pem), 0600) require.NoError(t, err) - err = os.WriteFile(filepath.Join(dir, node+"-"+name+".key"), []byte(key), 0600) + err = ioutil.WriteFile(filepath.Join(dir, node+"-"+name+".key"), []byte(key), 0600) require.NoError(t, err) } diff --git a/agent/consul/server.go b/agent/consul/server.go index 24cf98e56352a..9a1363c7abe16 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net" "os" "path/filepath" @@ -21,6 +22,7 @@ import ( "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-version" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" raftboltdb "github.com/hashicorp/raft-boltdb/v2" @@ -50,7 +52,6 @@ import ( "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" "github.com/hashicorp/consul/agent/rpc/middleware" - "github.com/hashicorp/consul/agent/rpc/operator" "github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" @@ -370,9 +371,6 @@ type Server struct { // peeringBackend is shared between the external and internal gRPC services for peering peeringBackend *PeeringBackend - // operatorBackend is shared between the external and internal gRPC services for peering - operatorBackend *OperatorBackend - // peerStreamServer is a server used to handle peering streams from external clusters. peerStreamServer *peerstream.Server @@ -388,7 +386,6 @@ type Server struct { // embedded struct to hold all the enterprise specific data EnterpriseServer - operatorServer *operator.Server } type connHandler interface { Run() error @@ -743,7 +740,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser }).Register(s.externalGRPCServer) s.peeringBackend = NewPeeringBackend(s) - s.operatorBackend = NewOperatorBackend(s) s.peerStreamServer = peerstream.NewServer(peerstream.Config{ Backend: s.peeringBackend, GetStore: func() peerstream.StateStore { return s.FSM().State() }, @@ -831,19 +827,6 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler PeeringEnabled: config.PeeringEnabled, }) s.peeringServer = p - o := operator.NewServer(operator.Config{ - Backend: s.operatorBackend, - Logger: deps.Logger.Named("grpc-api.operator"), - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - // Only forward the request if the dc in the request matches the server's datacenter. - if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { - return false, fmt.Errorf("requests to transfer leader cannot be forwarded to remote datacenters") - } - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - Datacenter: config.Datacenter, - }) - s.operatorServer = o register := func(srv *grpc.Server) { if config.RPCConfig.EnableStreaming { @@ -852,7 +835,6 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler deps.Logger.Named("grpc-api.subscription"))) } s.peeringServer.Register(srv) - s.operatorServer.Register(srv) s.registerEnterpriseGRPCServices(deps, srv) // Note: these external gRPC services are also exposed on the internal server to @@ -984,7 +966,7 @@ func (s *Server) setupRaft() error { peersFile := filepath.Join(path, "peers.json") peersInfoFile := filepath.Join(path, "peers.info") if _, err := os.Stat(peersInfoFile); os.IsNotExist(err) { - if err := os.WriteFile(peersInfoFile, []byte(peersInfoContent), 0755); err != nil { + if err := ioutil.WriteFile(peersInfoFile, []byte(peersInfoContent), 0755); err != nil { return fmt.Errorf("failed to write peers.info file: %v", err) } @@ -1212,25 +1194,20 @@ func (s *Server) Shutdown() error { return nil } -func (s *Server) attemptLeadershipTransfer(id raft.ServerID) (err error) { - var addr raft.ServerAddress - if id != "" { - addr, err = s.serverLookup.ServerAddr(id) - if err != nil { - return err - } - future := s.raft.LeadershipTransferToServer(id, addr) - if err := future.Error(); err != nil { - return err - } - } else { - future := s.raft.LeadershipTransfer() - if err := future.Error(); err != nil { - return err - } +func (s *Server) attemptLeadershipTransfer() (success bool) { + leadershipTransferVersion := version.Must(version.NewVersion(LeaderTransferMinVersion)) + + ok, _ := ServersInDCMeetMinimumVersion(s, s.config.Datacenter, leadershipTransferVersion) + if !ok { + return false } - return nil + future := s.raft.LeadershipTransfer() + if err := future.Error(); err != nil { + s.logger.Error("failed to transfer leadership, removing the server", "error", err) + return false + } + return true } // Leave is used to prepare for a graceful shutdown. @@ -1252,7 +1229,7 @@ func (s *Server) Leave() error { // removed for some reasonable period of time. isLeader := s.IsLeader() if isLeader && numPeers > 1 { - if err := s.attemptLeadershipTransfer(""); err == nil { + if s.attemptLeadershipTransfer() { isLeader = false } else { future := s.raft.RemoveServer(raft.ServerID(s.config.NodeID), 0, 0) diff --git a/agent/consul/snapshot_endpoint.go b/agent/consul/snapshot_endpoint.go index 102bc0a38a5c0..80e255bd9e141 100644 --- a/agent/consul/snapshot_endpoint.go +++ b/agent/consul/snapshot_endpoint.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net" "time" @@ -137,7 +138,7 @@ func (s *Server) dispatchSnapshotRequest(args *structs.SnapshotRequest, in io.Re // Give the caller back an empty reader since there's nothing to // stream back. - return io.NopCloser(bytes.NewReader([]byte(""))), nil + return ioutil.NopCloser(bytes.NewReader([]byte(""))), nil default: return nil, fmt.Errorf("unrecognized snapshot op %q", args.Op) diff --git a/agent/dns.go b/agent/dns.go index b35f80c630bf9..976b6dbdb97dd 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -946,11 +946,10 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi return d.nodeLookup(cfg, lookup, req, resp) case "query": - n := len(queryParts) datacenter := d.agent.config.Datacenter // ensure we have a query name - if n < 1 { + if len(queryParts) < 1 { return invalid() } @@ -958,23 +957,8 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi return invalid() } - query := "" - - // If the first and last DNS query parts begin with _, this is an RFC 2782 style SRV lookup. - // This allows for prepared query names to include "." (for backwards compatibility). - // Otherwise, this is a standard prepared query lookup. - if n >= 2 && strings.HasPrefix(queryParts[0], "_") && strings.HasPrefix(queryParts[n-1], "_") { - // The last DNS query part is the protocol field (ignored). - // All prior parts are the prepared query name or ID. - query = strings.Join(queryParts[:n-1], ".") - - // Strip leading underscore - query = query[1:] - } else { - // Allow a "." in the query name, just join all the parts. - query = strings.Join(queryParts, ".") - } - + // Allow a "." in the query name, just join all the parts. + query := strings.Join(queryParts, ".") err := d.preparedQueryLookup(cfg, datacenter, query, remoteAddr, req, resp, maxRecursionLevel) return ecsNotGlobalError{error: err} diff --git a/agent/dns_test.go b/agent/dns_test.go index 189859b9d00e7..2f2499a2efcbc 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -2743,16 +2743,13 @@ func TestDNS_ServiceLookup_ServiceAddress_SRV(t *testing.T) { } // Register an equivalent prepared query. - // Specify prepared query name containing "." to test - // since that is technically supported (though atypical). var id string - preparedQueryName := "query.name.with.dots" { args := &structs.PreparedQueryRequest{ Datacenter: "dc1", Op: structs.PreparedQueryCreate, Query: &structs.PreparedQuery{ - Name: preparedQueryName, + Name: "test", Service: structs.ServiceQuery{ Service: "db", }, @@ -2767,9 +2764,6 @@ func TestDNS_ServiceLookup_ServiceAddress_SRV(t *testing.T) { questions := []string{ "db.service.consul.", id + ".query.consul.", - preparedQueryName + ".query.consul.", - fmt.Sprintf("_%s._tcp.query.consul.", id), - fmt.Sprintf("_%s._tcp.query.consul.", preparedQueryName), } for _, question := range questions { m := new(dns.Msg) diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go index 5323dab057425..08f53578fcf1c 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go @@ -74,9 +74,15 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G NodeId: string(svc.ID), } + // This is awkward because it's designed for different requests, but + // this fakes the ServiceSpecificRequest so that we can reuse code. _, ns, err := configentry.MergeNodeServiceWithCentralConfig( nil, store, + &structs.ServiceSpecificRequest{ + Datacenter: s.Datacenter, + QueryOptions: options, + }, svc.ToNodeService(), logger, ) diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 81961d611b717..b1254271263c6 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -569,7 +569,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) var lastSendAck time.Time - var lastSendSuccess *time.Time + var lastSendSuccess time.Time client.DrainStream(t) @@ -604,7 +604,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, LastSendSuccess: lastSendSuccess, - LastAck: &lastSendAck, + LastAck: lastSendAck, ExportedServices: []string{}, } retry.Run(t, func(r *retry.R) { @@ -641,8 +641,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, LastSendSuccess: lastSendSuccess, - LastAck: &lastSendAck, - LastNack: &lastNack, + LastAck: lastSendAck, + LastNack: lastNack, LastNackMessage: lastNackMsg, ExportedServices: []string{}, } @@ -693,10 +693,10 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, LastSendSuccess: lastSendSuccess, - LastAck: &lastSendAck, - LastNack: &lastNack, + LastAck: lastSendAck, + LastNack: lastNack, LastNackMessage: lastNackMsg, - LastRecvResourceSuccess: &lastRecvResourceSuccess, + LastRecvResourceSuccess: lastRecvResourceSuccess, ExportedServices: []string{}, } @@ -749,11 +749,11 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, LastSendSuccess: lastSendSuccess, - LastAck: &lastSendAck, - LastNack: &lastNack, + LastAck: lastSendAck, + LastNack: lastNack, LastNackMessage: lastNackMsg, - LastRecvResourceSuccess: &lastRecvResourceSuccess, - LastRecvError: &lastRecvError, + LastRecvResourceSuccess: lastRecvResourceSuccess, + LastRecvError: lastRecvError, LastRecvErrorMessage: lastRecvErrorMsg, ExportedServices: []string{}, } @@ -779,13 +779,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, LastSendSuccess: lastSendSuccess, - LastAck: &lastSendAck, - LastNack: &lastNack, + LastAck: lastSendAck, + LastNack: lastNack, LastNackMessage: lastNackMsg, - LastRecvResourceSuccess: &lastRecvResourceSuccess, - LastRecvError: &lastRecvError, + LastRecvResourceSuccess: lastRecvResourceSuccess, + LastRecvError: lastRecvError, LastRecvErrorMessage: lastRecvErrorMsg, - LastRecvHeartbeat: &lastRecvHeartbeat, + LastRecvHeartbeat: lastRecvHeartbeat, ExportedServices: []string{}, } @@ -807,14 +807,14 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { Connected: false, DisconnectErrorMessage: lastRecvErrorMsg, LastSendSuccess: lastSendSuccess, - LastAck: &lastSendAck, - LastNack: &lastNack, + LastAck: lastSendAck, + LastNack: lastNack, LastNackMessage: lastNackMsg, - DisconnectTime: &disconnectTime, - LastRecvResourceSuccess: &lastRecvResourceSuccess, - LastRecvError: &lastRecvError, + DisconnectTime: disconnectTime, + LastRecvResourceSuccess: lastRecvResourceSuccess, + LastRecvError: lastRecvError, LastRecvErrorMessage: lastRecvErrorMsg, - LastRecvHeartbeat: &lastRecvHeartbeat, + LastRecvHeartbeat: lastRecvHeartbeat, ExportedServices: []string{}, } @@ -1239,7 +1239,7 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) { }) testutil.RunStep(t, "stream is disconnected due to heartbeat timeout", func(t *testing.T) { - disconnectTime := ptr(it.FutureNow(1)) + disconnectTime := it.FutureNow(1) retry.Run(t, func(r *retry.R) { status, ok := srv.StreamStatus(testPeerID) require.True(r, ok) diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index f36ca055d1027..daf891d38adfd 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -148,32 +148,22 @@ func (t *Tracker) DeleteStatus(id string) { func (t *Tracker) IsHealthy(s Status) bool { // If stream is in a disconnected state for longer than the configured // heartbeat timeout, report as unhealthy. - if s.DisconnectTime != nil && - t.timeNow().Sub(*s.DisconnectTime) > t.heartbeatTimeout { + if !s.DisconnectTime.IsZero() && + t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout { return false } // If last Nack is after last Ack, it means the peer is unable to - // handle our replication message - if s.LastAck == nil { - s.LastAck = &time.Time{} - } - - if s.LastNack != nil && - s.LastNack.After(*s.LastAck) && - t.timeNow().Sub(*s.LastAck) > t.heartbeatTimeout { + // handle our replication message. + if s.LastNack.After(s.LastAck) && + t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout { return false } // If last recv error is newer than last recv success, we were unable // to handle the peer's replication message. - if s.LastRecvResourceSuccess == nil { - s.LastRecvResourceSuccess = &time.Time{} - } - - if s.LastRecvError != nil && - s.LastRecvError.After(*s.LastRecvResourceSuccess) && - t.timeNow().Sub(*s.LastRecvError) > t.heartbeatTimeout { + if s.LastRecvError.After(s.LastRecvResourceSuccess) && + t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout { return false } @@ -207,36 +197,36 @@ type Status struct { DisconnectErrorMessage string // If the status is not connected, DisconnectTime tracks when the stream was closed. Else it's zero. - DisconnectTime *time.Time + DisconnectTime time.Time // LastAck tracks the time we received the last ACK for a resource replicated TO the peer. - LastAck *time.Time + LastAck time.Time // LastNack tracks the time we received the last NACK for a resource replicated to the peer. - LastNack *time.Time + LastNack time.Time // LastNackMessage tracks the reported error message associated with the last NACK from a peer. LastNackMessage string // LastSendError tracks the time of the last error sending into the stream. - LastSendError *time.Time + LastSendError time.Time // LastSendErrorMessage tracks the last error message when sending into the stream. LastSendErrorMessage string // LastSendSuccess tracks the time we last successfully sent a resource TO the peer. - LastSendSuccess *time.Time + LastSendSuccess time.Time // LastRecvHeartbeat tracks when we last received a heartbeat from our peer. - LastRecvHeartbeat *time.Time + LastRecvHeartbeat time.Time // LastRecvResourceSuccess tracks the time we last successfully stored a resource replicated FROM the peer. - LastRecvResourceSuccess *time.Time + LastRecvResourceSuccess time.Time // LastRecvError tracks either: // - The time we failed to store a resource replicated FROM the peer. // - The time of the last error when receiving from the stream. - LastRecvError *time.Time + LastRecvError time.Time // LastRecvErrorMessage tracks the last error message when receiving from the stream. LastRecvErrorMessage string @@ -273,47 +263,47 @@ func (s *MutableStatus) Done() <-chan struct{} { func (s *MutableStatus) TrackAck() { s.mu.Lock() - s.LastAck = ptr(s.timeNow().UTC()) + s.LastAck = s.timeNow().UTC() s.mu.Unlock() } func (s *MutableStatus) TrackSendError(error string) { s.mu.Lock() - s.LastSendError = ptr(s.timeNow().UTC()) + s.LastSendError = s.timeNow().UTC() s.LastSendErrorMessage = error s.mu.Unlock() } func (s *MutableStatus) TrackSendSuccess() { s.mu.Lock() - s.LastSendSuccess = ptr(s.timeNow().UTC()) + s.LastSendSuccess = s.timeNow().UTC() s.mu.Unlock() } // TrackRecvResourceSuccess tracks receiving a replicated resource. func (s *MutableStatus) TrackRecvResourceSuccess() { s.mu.Lock() - s.LastRecvResourceSuccess = ptr(s.timeNow().UTC()) + s.LastRecvResourceSuccess = s.timeNow().UTC() s.mu.Unlock() } // TrackRecvHeartbeat tracks receiving a heartbeat from our peer. func (s *MutableStatus) TrackRecvHeartbeat() { s.mu.Lock() - s.LastRecvHeartbeat = ptr(s.timeNow().UTC()) + s.LastRecvHeartbeat = s.timeNow().UTC() s.mu.Unlock() } func (s *MutableStatus) TrackRecvError(error string) { s.mu.Lock() - s.LastRecvError = ptr(s.timeNow().UTC()) + s.LastRecvError = s.timeNow().UTC() s.LastRecvErrorMessage = error s.mu.Unlock() } func (s *MutableStatus) TrackNack(msg string) { s.mu.Lock() - s.LastNack = ptr(s.timeNow().UTC()) + s.LastNack = s.timeNow().UTC() s.LastNackMessage = msg s.mu.Unlock() } @@ -321,7 +311,7 @@ func (s *MutableStatus) TrackNack(msg string) { func (s *MutableStatus) TrackConnected() { s.mu.Lock() s.Connected = true - s.DisconnectTime = &time.Time{} + s.DisconnectTime = time.Time{} s.DisconnectErrorMessage = "" s.mu.Unlock() } @@ -331,7 +321,7 @@ func (s *MutableStatus) TrackConnected() { func (s *MutableStatus) TrackDisconnectedGracefully() { s.mu.Lock() s.Connected = false - s.DisconnectTime = ptr(s.timeNow().UTC()) + s.DisconnectTime = s.timeNow().UTC() s.DisconnectErrorMessage = "" s.mu.Unlock() } @@ -341,7 +331,7 @@ func (s *MutableStatus) TrackDisconnectedGracefully() { func (s *MutableStatus) TrackDisconnectedDueToError(error string) { s.mu.Lock() s.Connected = false - s.DisconnectTime = ptr(s.timeNow().UTC()) + s.DisconnectTime = s.timeNow().UTC() s.DisconnectErrorMessage = error s.mu.Unlock() } @@ -399,7 +389,3 @@ func (s *MutableStatus) GetExportedServicesCount() int { return len(s.ExportedServices) } - -func ptr[T any](x T) *T { - return &x -} diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index cfe95d4012b75..bb018b4b46f40 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -29,7 +29,7 @@ func TestTracker_IsHealthy(t *testing.T) { tracker: NewTracker(defaultIncomingHeartbeatTimeout), expectedVal: true, modifierFunc: func(status *MutableStatus) { - status.DisconnectTime = ptr(time.Now()) + status.DisconnectTime = time.Now() }, }, { @@ -37,7 +37,7 @@ func TestTracker_IsHealthy(t *testing.T) { tracker: NewTracker(1 * time.Millisecond), expectedVal: false, modifierFunc: func(status *MutableStatus) { - status.DisconnectTime = ptr(time.Now().Add(-1 * time.Minute)) + status.DisconnectTime = time.Now().Add(-1 * time.Minute) }, }, { @@ -46,8 +46,8 @@ func TestTracker_IsHealthy(t *testing.T) { expectedVal: true, modifierFunc: func(status *MutableStatus) { now := time.Now() - status.LastRecvResourceSuccess = &now - status.LastRecvError = ptr(now.Add(1 * time.Second)) + status.LastRecvResourceSuccess = now + status.LastRecvError = now.Add(1 * time.Second) }, }, { @@ -56,8 +56,8 @@ func TestTracker_IsHealthy(t *testing.T) { expectedVal: true, modifierFunc: func(status *MutableStatus) { now := time.Now() - status.LastRecvResourceSuccess = &now - status.LastRecvError = ptr(now.Add(1 * time.Second)) + status.LastRecvResourceSuccess = now + status.LastRecvError = now.Add(1 * time.Second) }, }, { @@ -66,8 +66,8 @@ func TestTracker_IsHealthy(t *testing.T) { expectedVal: false, modifierFunc: func(status *MutableStatus) { now := time.Now().Add(-2 * time.Second) - status.LastRecvResourceSuccess = &now - status.LastRecvError = ptr(now.Add(1 * time.Second)) + status.LastRecvResourceSuccess = now + status.LastRecvError = now.Add(1 * time.Second) }, }, { @@ -76,8 +76,8 @@ func TestTracker_IsHealthy(t *testing.T) { expectedVal: true, modifierFunc: func(status *MutableStatus) { now := time.Now() - status.LastAck = &now - status.LastNack = ptr(now.Add(1 * time.Second)) + status.LastAck = now + status.LastNack = now.Add(1 * time.Second) }, }, { @@ -86,8 +86,8 @@ func TestTracker_IsHealthy(t *testing.T) { expectedVal: false, modifierFunc: func(status *MutableStatus) { now := time.Now().Add(-2 * time.Second) - status.LastAck = &now - status.LastNack = ptr(now.Add(1 * time.Second)) + status.LastAck = now + status.LastNack = now.Add(1 * time.Second) }, }, { @@ -148,7 +148,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { }) var sequence uint64 - var lastSuccess *time.Time + var lastSuccess time.Time testutil.RunStep(t, "stream updated", func(t *testing.T) { statusPtr.TrackAck() @@ -157,7 +157,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { status, ok := tracker.StreamStatus(peerID) require.True(t, ok) - lastSuccess = ptr(it.base.Add(time.Duration(sequence) * time.Second).UTC()) + lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() expect := Status{ Connected: true, LastAck: lastSuccess, @@ -171,7 +171,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { expect := Status{ Connected: false, - DisconnectTime: ptr(it.base.Add(time.Duration(sequence) * time.Second).UTC()), + DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), LastAck: lastSuccess, } status, ok := tracker.StreamStatus(peerID) @@ -184,9 +184,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSuccess, - DisconnectTime: &time.Time{}, + Connected: true, + LastAck: lastSuccess, + // DisconnectTime gets cleared on re-connect. } @@ -271,7 +271,7 @@ func TestMutableStatus_TrackConnected(t *testing.T) { s := MutableStatus{ Status: Status{ Connected: false, - DisconnectTime: ptr(time.Now()), + DisconnectTime: time.Now(), DisconnectErrorMessage: "disconnected", }, } @@ -279,7 +279,7 @@ func TestMutableStatus_TrackConnected(t *testing.T) { require.True(t, s.IsConnected()) require.True(t, s.Connected) - require.Equal(t, &time.Time{}, s.DisconnectTime) + require.Equal(t, time.Time{}, s.DisconnectTime) require.Empty(t, s.DisconnectErrorMessage) } @@ -287,7 +287,7 @@ func TestMutableStatus_TrackDisconnectedGracefully(t *testing.T) { it := incrementalTime{ base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), } - disconnectTime := ptr(it.FutureNow(1)) + disconnectTime := it.FutureNow(1) s := MutableStatus{ timeNow: it.Now, @@ -308,7 +308,7 @@ func TestMutableStatus_TrackDisconnectedDueToError(t *testing.T) { it := incrementalTime{ base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), } - disconnectTime := ptr(it.FutureNow(1)) + disconnectTime := it.FutureNow(1) s := MutableStatus{ timeNow: it.Now, diff --git a/agent/grpc-internal/client.go b/agent/grpc-internal/client.go index dfeb3dbb20e9c..18596eec715d3 100644 --- a/agent/grpc-internal/client.go +++ b/agent/grpc-internal/client.go @@ -148,7 +148,9 @@ func (c *ClientConnPool) dial(datacenter string, serverType string) (*grpc.Clien grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 30 * time.Second, Timeout: 10 * time.Second, - })) + }), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(8*1024*1024), grpc.MaxCallRecvMsgSize(8*1024*1024)), + ) if err != nil { return nil, err } diff --git a/agent/hcp/bootstrap/bootstrap.go b/agent/hcp/bootstrap/bootstrap.go index a55bbf49aee93..3de7de435032b 100644 --- a/agent/hcp/bootstrap/bootstrap.go +++ b/agent/hcp/bootstrap/bootstrap.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -218,11 +219,11 @@ func persistTLSCerts(dataDir string, bsCfg *hcp.BootstrapConfig) error { return err } - if err := os.WriteFile(filepath.Join(dir, certFileName), []byte(bsCfg.TLSCert), 0600); err != nil { + if err := ioutil.WriteFile(filepath.Join(dir, certFileName), []byte(bsCfg.TLSCert), 0600); err != nil { return err } - if err := os.WriteFile(filepath.Join(dir, keyFileName), []byte(bsCfg.TLSCertKey), 0600); err != nil { + if err := ioutil.WriteFile(filepath.Join(dir, keyFileName), []byte(bsCfg.TLSCertKey), 0600); err != nil { return err } @@ -255,7 +256,7 @@ func persistBootstrapConfig(dataDir, cfgJSON string) error { // Persist the important bits we got from bootstrapping. The TLS certs are // already persisted, just need to persist the config we are going to add. name := filepath.Join(dataDir, subDir, configFileName) - return os.WriteFile(name, []byte(cfgJSON), 0600) + return ioutil.WriteFile(name, []byte(cfgJSON), 0600) } func loadPersistedBootstrapConfig(rc *config.RuntimeConfig, ui UI) (string, bool) { @@ -280,7 +281,7 @@ func loadPersistedBootstrapConfig(rc *config.RuntimeConfig, ui UI) (string, bool } name := filepath.Join(rc.DataDir, subDir, configFileName) - jsonBs, err := os.ReadFile(name) + jsonBs, err := ioutil.ReadFile(name) if err != nil { ui.Warn(fmt.Sprintf("failed to read local bootstrap config file, ignoring local files: %s", err)) return "", false diff --git a/agent/hcp/manager_test.go b/agent/hcp/manager_test.go index cb4d729b7fa88..68a4505e995ba 100644 --- a/agent/hcp/manager_test.go +++ b/agent/hcp/manager_test.go @@ -1,7 +1,7 @@ package hcp import ( - "io" + "io/ioutil" "testing" "time" @@ -20,7 +20,7 @@ func TestManager_Run(t *testing.T) { client.EXPECT().PushServerStatus(mock.Anything, &ServerStatus{ID: t.Name()}).Return(nil).Once() mgr := NewManager(ManagerConfig{ Client: client, - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), + Logger: hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}), StatusFn: statusF, }) mgr.testUpdateSent = updateCh @@ -51,7 +51,7 @@ func TestManager_SendUpdate(t *testing.T) { client.EXPECT().PushServerStatus(mock.Anything, &ServerStatus{ID: t.Name()}).Return(nil).Twice() mgr := NewManager(ManagerConfig{ Client: client, - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), + Logger: hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}), StatusFn: statusF, }) mgr.testUpdateSent = updateCh @@ -81,7 +81,7 @@ func TestManager_SendUpdate_Periodic(t *testing.T) { client.EXPECT().PushServerStatus(mock.Anything, &ServerStatus{ID: t.Name()}).Return(nil).Twice() mgr := NewManager(ManagerConfig{ Client: client, - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), + Logger: hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}), StatusFn: statusF, MaxInterval: time.Second, MinInterval: 100 * time.Millisecond, diff --git a/agent/http_register.go b/agent/http_register.go index caa55d5ca4bcc..6dd8b41c60049 100644 --- a/agent/http_register.go +++ b/agent/http_register.go @@ -99,7 +99,6 @@ func init() { registerEndpoint("/v1/internal/acl/authorize", []string{"POST"}, (*HTTPHandlers).ACLAuthorize) registerEndpoint("/v1/kv/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).KVSEndpoint) registerEndpoint("/v1/operator/raft/configuration", []string{"GET"}, (*HTTPHandlers).OperatorRaftConfiguration) - registerEndpoint("/v1/operator/raft/transfer-leader", []string{"POST"}, (*HTTPHandlers).OperatorRaftTransferLeader) registerEndpoint("/v1/operator/raft/peer", []string{"DELETE"}, (*HTTPHandlers).OperatorRaftPeer) registerEndpoint("/v1/operator/keyring", []string{"GET", "POST", "PUT", "DELETE"}, (*HTTPHandlers).OperatorKeyringEndpoint) registerEndpoint("/v1/operator/autopilot/configuration", []string{"GET", "PUT"}, (*HTTPHandlers).OperatorAutopilotConfiguration) diff --git a/agent/http_test.go b/agent/http_test.go index 4464fe18341ec..f13e8143da1f9 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net" "net/http" "net/http/httptest" @@ -91,7 +92,7 @@ func TestHTTPServer_UnixSocket(t *testing.T) { } defer resp.Body.Close() - if body, err := io.ReadAll(resp.Body); err != nil || len(body) == 0 { + if body, err := ioutil.ReadAll(resp.Body); err != nil || len(body) == 0 { t.Fatalf("bad: %s %v", body, err) } } @@ -110,7 +111,7 @@ func TestHTTPServer_UnixSocket_FileExists(t *testing.T) { socket := filepath.Join(tempDir, "test.sock") // Create a regular file at the socket path - if err := os.WriteFile(socket, []byte("hello world"), 0644); err != nil { + if err := ioutil.WriteFile(socket, []byte("hello world"), 0644); err != nil { t.Fatalf("err: %s", err) } fi, err := os.Stat(socket) @@ -209,7 +210,7 @@ func TestSetupHTTPServer_HTTP2(t *testing.T) { t.Fatalf("err: %v", err) } defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) + body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("err: %v", err) } @@ -737,7 +738,7 @@ func testPrettyPrint(pretty string, t *testing.T) { expected, _ := json.MarshalIndent(r, "", " ") expected = append(expected, "\n"...) - actual, err := io.ReadAll(resp.Body) + actual, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/keyring.go b/agent/keyring.go index a430eee92cf8f..506da70f7494b 100644 --- a/agent/keyring.go +++ b/agent/keyring.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "io/ioutil" "os" "path/filepath" @@ -185,7 +186,7 @@ func loadKeyringFile(c *serf.Config) error { return err } - keyringData, err := os.ReadFile(c.KeyringFile) + keyringData, err := ioutil.ReadFile(c.KeyringFile) if err != nil { return err } diff --git a/agent/keyring_test.go b/agent/keyring_test.go index 26b29b7476181..3362a2c70b6ae 100644 --- a/agent/keyring_test.go +++ b/agent/keyring_test.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/base64" "fmt" - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -267,7 +267,7 @@ func TestAgent_InitKeyring(t *testing.T) { t.Fatalf("err: %s", err) } - content, err := os.ReadFile(file) + content, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("err: %s", err) } @@ -281,7 +281,7 @@ func TestAgent_InitKeyring(t *testing.T) { } // Content should still be the same - content, err = os.ReadFile(file) + content, err = ioutil.ReadFile(file) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/metrics_test.go b/agent/metrics_test.go index a66beae3bd1f6..4b80f9edafb7c 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -4,9 +4,9 @@ import ( "context" "crypto/x509" "fmt" + "io/ioutil" "net/http" "net/http/httptest" - "os" "path/filepath" "strings" "testing" @@ -337,7 +337,7 @@ func TestHTTPHandlers_AgentMetrics_TLSCertExpiry_Prometheus(t *testing.T) { require.NoError(t, err) caPath := filepath.Join(dir, "ca.pem") - err = os.WriteFile(caPath, []byte(caPEM), 0600) + err = ioutil.WriteFile(caPath, []byte(caPEM), 0600) require.NoError(t, err) signer, err := tlsutil.ParseSigner(caPK) @@ -353,11 +353,11 @@ func TestHTTPHandlers_AgentMetrics_TLSCertExpiry_Prometheus(t *testing.T) { require.NoError(t, err) certPath := filepath.Join(dir, "cert.pem") - err = os.WriteFile(certPath, []byte(pem), 0600) + err = ioutil.WriteFile(certPath, []byte(pem), 0600) require.NoError(t, err) keyPath := filepath.Join(dir, "cert.key") - err = os.WriteFile(keyPath, []byte(key), 0600) + err = ioutil.WriteFile(keyPath, []byte(key), 0600) require.NoError(t, err) hcl := fmt.Sprintf(` diff --git a/agent/nodeid.go b/agent/nodeid.go index 4e6f3e8d5f00b..27a7346700350 100644 --- a/agent/nodeid.go +++ b/agent/nodeid.go @@ -3,6 +3,7 @@ package agent import ( "crypto/sha512" "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -35,7 +36,7 @@ func newNodeIDFromConfig(config *config.RuntimeConfig, logger hclog.Logger) (typ // Load saved state, if any. Since a user could edit this, we also validate it. filename := filepath.Join(config.DataDir, "node-id") if _, err := os.Stat(filename); err == nil { - rawID, err := os.ReadFile(filename) + rawID, err := ioutil.ReadFile(filename) if err != nil { return "", err } @@ -55,7 +56,7 @@ func newNodeIDFromConfig(config *config.RuntimeConfig, logger hclog.Logger) (typ if err := lib.EnsurePath(filename, false); err != nil { return "", err } - if err := os.WriteFile(filename, []byte(id), 0600); err != nil { + if err := ioutil.WriteFile(filename, []byte(id), 0600); err != nil { return "", fmt.Errorf("failed to write NodeID to disk: %w", err) } return types.NodeID(id), nil diff --git a/agent/nodeid_test.go b/agent/nodeid_test.go index da9df2bd2358f..647aea14c53f2 100644 --- a/agent/nodeid_test.go +++ b/agent/nodeid_test.go @@ -2,7 +2,7 @@ package agent import ( "fmt" - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -59,7 +59,7 @@ func TestNewNodeIDFromConfig(t *testing.T) { t.Run("invalid NodeID in file", func(t *testing.T) { cfg.NodeID = "" filename := filepath.Join(cfg.DataDir, "node-id") - err := os.WriteFile(filename, []byte("adf4238a!882b!9ddc!4a9d!5b6758e4159e"), 0600) + err := ioutil.WriteFile(filename, []byte("adf4238a!882b!9ddc!4a9d!5b6758e4159e"), 0600) require.NoError(t, err) _, err = newNodeIDFromConfig(cfg, logger) @@ -70,7 +70,7 @@ func TestNewNodeIDFromConfig(t *testing.T) { t.Run("valid NodeID in file", func(t *testing.T) { cfg.NodeID = "" filename := filepath.Join(cfg.DataDir, "node-id") - err := os.WriteFile(filename, []byte("ADF4238a-882b-9ddc-4a9d-5b6758e4159e"), 0600) + err := ioutil.WriteFile(filename, []byte("ADF4238a-882b-9ddc-4a9d-5b6758e4159e"), 0600) require.NoError(t, err) nodeID, err := newNodeIDFromConfig(cfg, logger) diff --git a/agent/operator_endpoint.go b/agent/operator_endpoint.go index 10af5e31d0444..851ef52e1c31d 100644 --- a/agent/operator_endpoint.go +++ b/agent/operator_endpoint.go @@ -2,8 +2,6 @@ package agent import ( "fmt" - external "github.com/hashicorp/consul/agent/grpc-external" - "github.com/hashicorp/consul/proto/pboperator" "net/http" "strconv" "time" @@ -33,43 +31,6 @@ func (s *HTTPHandlers) OperatorRaftConfiguration(resp http.ResponseWriter, req * return reply, nil } -// OperatorRaftTransferLeader is used to transfer raft cluster leadership to another node -func (s *HTTPHandlers) OperatorRaftTransferLeader(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - - var entMeta acl.EnterpriseMeta - if err := s.parseEntMetaPartition(req, &entMeta); err != nil { - return nil, err - } - - params := req.URL.Query() - _, hasID := params["id"] - ID := "" - if hasID { - ID = params.Get("id") - } - args := pboperator.TransferLeaderRequest{ - ID: ID, - } - - var token string - s.parseToken(req, &token) - ctx, err := external.ContextWithQueryOptions(req.Context(), structs.QueryOptions{Token: token}) - if err != nil { - return nil, err - } - - result, err := s.agent.rpcClientOperator.TransferLeader(ctx, &args) - if err != nil { - return nil, err - } - if result.Success != true { - return nil, HTTPError{StatusCode: http.StatusNotFound, Reason: fmt.Sprintf("Failed to transfer Leader: %s", err.Error())} - } - reply := new(api.TransferLeaderResponse) - pboperator.TransferLeaderResponseToAPI(result, reply) - return reply, nil -} - // OperatorRaftPeer supports actions on Raft peers. Currently we only support // removing peers by address. func (s *HTTPHandlers) OperatorRaftPeer(resp http.ResponseWriter, req *http.Request) (interface{}, error) { diff --git a/agent/pool/peek_test.go b/agent/pool/peek_test.go index d510127993660..b9e74ad9342f0 100644 --- a/agent/pool/peek_test.go +++ b/agent/pool/peek_test.go @@ -5,7 +5,7 @@ import ( "crypto/x509" "errors" "fmt" - "io" + "io/ioutil" "net" "testing" "time" @@ -55,7 +55,7 @@ func TestPeekForTLS_not_TLS(t *testing.T) { require.NoError(t, err) require.False(t, isTLS) - all, err := io.ReadAll(wrapped) + all, err := ioutil.ReadAll(wrapped) require.NoError(t, err) require.Equal(t, tc.connData, all) }) @@ -160,7 +160,7 @@ func testPeekForTLS_withTLS(t *testing.T, connData []byte) { return } - all, err := io.ReadAll(tlsConn) + all, err := ioutil.ReadAll(tlsConn) if err != nil { serverErrCh <- err return diff --git a/agent/pool/pool.go b/agent/pool/pool.go index 593838601f6bd..c8887ae35f711 100644 --- a/agent/pool/pool.go +++ b/agent/pool/pool.go @@ -595,14 +595,14 @@ func (p *ConnPool) rpcInsecure(dc string, addr net.Addr, method string, args int var codec rpc.ClientCodec conn, _, err := p.dial(dc, addr, 0, RPCTLSInsecure) if err != nil { - return fmt.Errorf("rpcinsecure: error establishing connection: %w", err) + return fmt.Errorf("rpcinsecure error establishing connection: %w", err) } codec = msgpackrpc.NewCodecFromHandle(true, true, conn, structs.MsgpackHandle) // Make the RPC call err = msgpackrpc.CallWithCodec(codec, method, args, reply) if err != nil { - return fmt.Errorf("rpcinsecure: error making call: %w", err) + return fmt.Errorf("rpcinsecure error making call: %w", err) } return nil diff --git a/agent/proxycfg-sources/catalog/config_source.go b/agent/proxycfg-sources/catalog/config_source.go index 0f86a3a37d856..a6d60c328f023 100644 --- a/agent/proxycfg-sources/catalog/config_source.go +++ b/agent/proxycfg-sources/catalog/config_source.go @@ -132,7 +132,16 @@ func (m *ConfigSource) startSync(closeCh <-chan chan struct{}, proxyID proxycfg. return nil, err } - _, ns, err = configentry.MergeNodeServiceWithCentralConfig(ws, store, ns, logger) + _, ns, err = configentry.MergeNodeServiceWithCentralConfig( + ws, + store, + // TODO(agentless): it doesn't seem like we actually *need* any of the + // values on this request struct - we should try to remove the parameter + // in case that changes in the future as this call-site isn't passing them. + &structs.ServiceSpecificRequest{}, + ns, + logger, + ) if err != nil { logger.Error("failed to merge with central config", "error", err.Error()) return nil, err diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index b6c1cd6e0297a..a6549af009480 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -91,9 +91,6 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, if !ok { return fmt.Errorf("invalid type for response: %T", u.Result) } - if resp.Entry == nil { - return nil - } gatewayConf, ok := resp.Entry.(*structs.IngressGatewayConfigEntry) if !ok { return fmt.Errorf("invalid type for config entry: %T", resp.Entry) diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index 8fbb247e084cf..00ca99ab7956b 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -3,7 +3,7 @@ package proxycfg import ( "context" "fmt" - "os" + "io/ioutil" "path" "path/filepath" "runtime" @@ -923,7 +923,7 @@ func golden(t testing.T, name string) string { t.Helper() golden := filepath.Join(projectRoot(), "../", "/xds/testdata", name+".golden") - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) return string(expected) diff --git a/agent/proxycfg/testing_ingress_gateway.go b/agent/proxycfg/testing_ingress_gateway.go index f2f5ab67f77d3..bf471051af893 100644 --- a/agent/proxycfg/testing_ingress_gateway.go +++ b/agent/proxycfg/testing_ingress_gateway.go @@ -102,47 +102,6 @@ func TestConfigSnapshotIngressGateway( }, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates)) } -// TestConfigSnapshotIngressGateway_NilConfigEntry is used to test when -// the update event for the config entry returns nil -// since this always happens on the first watch if it doesn't exist. -func TestConfigSnapshotIngressGateway_NilConfigEntry( - t testing.T, -) *ConfigSnapshot { - roots, placeholderLeaf := TestCerts(t) - - baseEvents := []UpdateEvent{ - { - CorrelationID: rootsWatchID, - Result: roots, - }, - { - CorrelationID: gatewayConfigWatchID, - Result: &structs.ConfigEntryResponse{ - Entry: nil, // The first watch on a config entry will return nil if the config entry doesn't exist. - }, - }, - { - CorrelationID: leafWatchID, - Result: placeholderLeaf, - }, - { - CorrelationID: gatewayServicesWatchID, - Result: &structs.IndexedGatewayServices{ - Services: nil, - }, - }, - } - - return testConfigSnapshotFixture(t, &structs.NodeService{ - Kind: structs.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 9999, - Address: "1.2.3.4", - Meta: nil, - TaggedAddresses: nil, - }, nil, nil, testSpliceEvents(baseEvents, nil)) -} - func TestConfigSnapshotIngressGatewaySDS_GatewayLevel_MixedTLS(t testing.T) *ConfigSnapshot { secureUID := UpstreamIDFromString("secure") secureChain := discoverychain.TestCompileConfigEntries( diff --git a/agent/remote_exec.go b/agent/remote_exec.go index 3668ef8406aa0..d097e9c2c994b 100644 --- a/agent/remote_exec.go +++ b/agent/remote_exec.go @@ -3,6 +3,7 @@ package agent import ( "encoding/json" "fmt" + "io/ioutil" "os" osexec "os/exec" "path" @@ -144,7 +145,7 @@ func (a *Agent) handleRemoteExec(msg *UserEvent) { // Check if this is a script, we may need to spill to disk var script string if len(spec.Script) != 0 { - tmpFile, err := os.CreateTemp("", "rexec") + tmpFile, err := ioutil.TempFile("", "rexec") if err != nil { a.logger.Debug("failed to make tmp file", "error", err) exitCode = 255 diff --git a/agent/routine-leak-checker/leak_test.go b/agent/routine-leak-checker/leak_test.go index 34f0989238851..fd64e9c05d9f2 100644 --- a/agent/routine-leak-checker/leak_test.go +++ b/agent/routine-leak-checker/leak_test.go @@ -2,7 +2,7 @@ package leakcheck import ( "crypto/x509" - "os" + "io/ioutil" "path/filepath" "testing" @@ -51,9 +51,9 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent { keyPath := filepath.Join(d, "key.pem") caPath := filepath.Join(d, "cacert.pem") - require.NoError(t, os.WriteFile(certPath, []byte(certPEM), 0600)) - require.NoError(t, os.WriteFile(keyPath, []byte(keyPEM), 0600)) - require.NoError(t, os.WriteFile(caPath, []byte(caPEM), 0600)) + require.NoError(t, ioutil.WriteFile(certPath, []byte(certPEM), 0600)) + require.NoError(t, ioutil.WriteFile(keyPath, []byte(keyPEM), 0600)) + require.NoError(t, ioutil.WriteFile(caPath, []byte(caPEM), 0600)) aclParams := agent.DefaultTestACLConfigParams() aclParams.PrimaryDatacenter = "primary" diff --git a/agent/rpc/operator/service.go b/agent/rpc/operator/service.go deleted file mode 100644 index cbe876a7f1d6b..0000000000000 --- a/agent/rpc/operator/service.go +++ /dev/null @@ -1,103 +0,0 @@ -package operator - -import ( - "context" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - external "github.com/hashicorp/consul/agent/grpc-external" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pboperator" - "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" -) - -// For private/internal gRPC handlers, protoc-gen-rpc-glue generates the -// requisite methods to satisfy the structs.RPCInfo interface using fields -// from the pbcommon package. This service is public, so we can't use those -// fields in our proto definition. Instead, we construct our RPCInfo manually. -var writeRequest struct { - structs.WriteRequest - structs.DCSpecificRequest -} - -var readRequest struct { - structs.QueryOptions - structs.DCSpecificRequest -} - -// Server implements pboperator.OperatorService to provide RPC operations for -// managing operator operation. -type Server struct { - Config -} - -func (s *Server) TransferLeader(ctx context.Context, request *pboperator.TransferLeaderRequest) (*pboperator.TransferLeaderResponse, error) { - resp := &pboperator.TransferLeaderResponse{Success: false} - handled, err := s.ForwardRPC(&writeRequest, func(conn *grpc.ClientConn) error { - ctx := external.ForwardMetadataContext(ctx) - var err error - resp, err = pboperator.NewOperatorServiceClient(conn).TransferLeader(ctx, request) - return err - }) - if handled || err != nil { - return resp, err - } - - var authzCtx acl.AuthorizerContext - entMeta := structs.DefaultEnterpriseMetaInDefaultPartition() - - options, err := external.QueryOptionsFromContext(ctx) - if err != nil { - return nil, err - } - - authz, err := s.Backend.ResolveTokenAndDefaultMeta(options.Token, entMeta, &authzCtx) - if err != nil { - return resp, err - } - - if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(&authzCtx); err != nil { - return resp, err - } - - return s.Backend.TransferLeader(ctx, request) -} - -type Config struct { - Backend Backend - Logger hclog.Logger - ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) - Datacenter string -} - -func NewServer(cfg Config) *Server { - requireNotNil(cfg.Backend, "Backend") - requireNotNil(cfg.Logger, "Logger") - requireNotNil(cfg.ForwardRPC, "ForwardRPC") - if cfg.Datacenter == "" { - panic("Datacenter is required") - } - return &Server{ - Config: cfg, - } -} - -func requireNotNil(v interface{}, name string) { - if v == nil { - panic(name + " is required") - } -} - -var _ pboperator.OperatorServiceServer = (*Server)(nil) - -func (s *Server) Register(grpcServer *grpc.Server) { - pboperator.RegisterOperatorServiceServer(grpcServer, s) -} - -// Backend defines the core integrations the Operator endpoint depends on. A -// functional implementation will integrate with various operator operation such as -// raft, autopilot operation. The only currently implemented operation is raft leader transfer -type Backend interface { - TransferLeader(ctx context.Context, request *pboperator.TransferLeaderRequest) (*pboperator.TransferLeaderResponse, error) - ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) -} diff --git a/agent/rpc/operator/service_test.go b/agent/rpc/operator/service_test.go deleted file mode 100644 index 7686c432412fd..0000000000000 --- a/agent/rpc/operator/service_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package operator - -import ( - "context" - "fmt" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pboperator" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/mock" - "google.golang.org/grpc" - "testing" - - "github.com/stretchr/testify/require" -) - -type MockBackend struct { - mock.Mock - authorizer acl.Authorizer -} - -func (m *MockBackend) TransferLeader(ctx context.Context, request *pboperator.TransferLeaderRequest) (*pboperator.TransferLeaderResponse, error) { - called := m.Called(ctx, request) - ret := called.Get(0) - if ret == nil { - return nil, called.Error(1) - } - return ret.(*pboperator.TransferLeaderResponse), called.Error(1) -} - -func (m *MockBackend) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) { - return resolver.Result{Authorizer: m.authorizer}, nil -} - -func TestLeaderTransfer_ACL_Deny(t *testing.T) { - authorizer := acl.MockAuthorizer{} - authorizer.On("OperatorWrite", mock.Anything).Return(acl.Deny) - server := NewServer(Config{Datacenter: "dc1", Backend: &MockBackend{authorizer: &authorizer}, Logger: hclog.New(nil), ForwardRPC: doForwardRPC}) - - _, err := server.TransferLeader(context.Background(), &pboperator.TransferLeaderRequest{}) - require.Error(t, err) - require.Equal(t, "Permission denied: provided token lacks permission 'operator:write'", err.Error()) -} - -func TestLeaderTransfer_ACL_Allowed(t *testing.T) { - authorizer := &acl.MockAuthorizer{} - authorizer.On("OperatorWrite", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: authorizer} - backend.On("TransferLeader", mock.Anything, mock.Anything).Return(nil, nil) - server := NewServer(Config{Datacenter: "dc1", Backend: backend, Logger: hclog.New(nil), ForwardRPC: doForwardRPC}) - - _, err := server.TransferLeader(context.Background(), &pboperator.TransferLeaderRequest{}) - require.NoError(t, err) -} - -func TestLeaderTransfer_LeaderTransfer_Fail(t *testing.T) { - authorizer := &acl.MockAuthorizer{} - authorizer.On("OperatorWrite", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: authorizer} - backend.On("TransferLeader", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test")) - server := NewServer(Config{Datacenter: "dc1", Backend: backend, Logger: hclog.New(nil), ForwardRPC: doForwardRPC}) - - _, err := server.TransferLeader(context.Background(), &pboperator.TransferLeaderRequest{}) - require.Error(t, err) - require.Equal(t, "test", err.Error()) -} - -func TestLeaderTransfer_LeaderTransfer_Success(t *testing.T) { - authorizer := &acl.MockAuthorizer{} - authorizer.On("OperatorWrite", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: authorizer} - backend.On("TransferLeader", mock.Anything, mock.Anything).Return(&pboperator.TransferLeaderResponse{Success: true}, nil) - server := NewServer(Config{Datacenter: "dc1", Backend: backend, Logger: hclog.New(nil), ForwardRPC: doForwardRPC}) - - ret, err := server.TransferLeader(context.Background(), &pboperator.TransferLeaderRequest{}) - require.NoError(t, err) - require.NotNil(t, ret) - require.True(t, ret.Success) -} - -func TestLeaderTransfer_LeaderTransfer_ForwardRPC(t *testing.T) { - authorizer := &acl.MockAuthorizer{} - authorizer.On("OperatorWrite", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: authorizer} - backend.On("TransferLeader", mock.Anything, mock.Anything).Return(&pboperator.TransferLeaderResponse{}, nil) - server := NewServer(Config{Datacenter: "dc1", Backend: backend, Logger: hclog.New(nil), ForwardRPC: noopForwardRPC}) - - ret, err := server.TransferLeader(context.Background(), &pboperator.TransferLeaderRequest{}) - require.NoError(t, err) - require.NotNil(t, ret) - require.False(t, ret.Success) -} -func noopForwardRPC(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) { - return true, nil -} - -func doForwardRPC(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) { - return false, nil -} diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 45cbb98de58c1..9974d11e1a2e3 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -719,17 +719,14 @@ func (s *Server) reconcilePeering(peering *pbpeering.Peering) *pbpeering.Peering cp.State = pbpeering.PeeringState_FAILING } - latest := func(tt ...*time.Time) *time.Time { + latest := func(tt ...time.Time) time.Time { latest := time.Time{} for _, t := range tt { - if t == nil { - continue - } if t.After(latest) { - latest = *t + latest = t } } - return &latest + return latest } lastRecv := latest(streamState.LastRecvHeartbeat, streamState.LastRecvError, streamState.LastRecvResourceSuccess) @@ -738,9 +735,9 @@ func (s *Server) reconcilePeering(peering *pbpeering.Peering) *pbpeering.Peering cp.StreamStatus = &pbpeering.StreamStatus{ ImportedServices: streamState.ImportedServices, ExportedServices: streamState.ExportedServices, - LastHeartbeat: pbpeering.TimePtrToProto(streamState.LastRecvHeartbeat), - LastReceive: pbpeering.TimePtrToProto(lastRecv), - LastSend: pbpeering.TimePtrToProto(lastSend), + LastHeartbeat: structs.TimeToProto(streamState.LastRecvHeartbeat), + LastReceive: structs.TimeToProto(lastRecv), + LastSend: structs.TimeToProto(lastSend), } return cp diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index faae32b632176..06c7898ffc6ff 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -5,8 +5,8 @@ import ( "encoding/base64" "encoding/json" "fmt" + "io/ioutil" "net" - "os" "path" "testing" "time" @@ -69,7 +69,7 @@ func TestPeeringService_GenerateToken(t *testing.T) { signer, _, _ := tlsutil.GeneratePrivateKey() ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) cafile := path.Join(dir, "cacert.pem") - require.NoError(t, os.WriteFile(cafile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(cafile, []byte(ca), 0600)) // TODO(peering): see note on newTestServer, refactor to not use this s := newTestServer(t, func(c *consul.Config) { @@ -181,7 +181,7 @@ func TestPeeringService_GenerateTokenExternalAddress(t *testing.T) { signer, _, _ := tlsutil.GeneratePrivateKey() ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) cafile := path.Join(dir, "cacert.pem") - require.NoError(t, os.WriteFile(cafile, []byte(ca), 0600)) + require.NoError(t, ioutil.WriteFile(cafile, []byte(ca), 0600)) // TODO(peering): see note on newTestServer, refactor to not use this s := newTestServer(t, func(c *consul.Config) { diff --git a/agent/service_manager_test.go b/agent/service_manager_test.go index 6b7757a76e294..cbbd9e5e9deca 100644 --- a/agent/service_manager_test.go +++ b/agent/service_manager_test.go @@ -3,6 +3,7 @@ package agent import ( "encoding/json" "fmt" + "io/ioutil" "os" "path/filepath" "testing" @@ -801,7 +802,7 @@ func expectJSONFile(t *testing.T, file string, expect interface{}, fixupContentB expected, err := json.Marshal(expect) require.NoError(t, err) - content, err := os.ReadFile(file) + content, err := ioutil.ReadFile(file) require.NoError(t, err) if fixupContentBeforeCompareFn != nil { diff --git a/agent/setup.go b/agent/setup.go index b014996dfab1c..d237e2fb9ee11 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -53,7 +53,7 @@ type BaseDeps struct { type ConfigLoader func(source config.Source) (config.LoadResult, error) -func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hclog.InterceptLogger) (BaseDeps, error) { +func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) { d := BaseDeps{} result, err := configLoader(nil) if err != nil { @@ -63,14 +63,9 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl cfg := result.RuntimeConfig logConf := cfg.Logging logConf.Name = logging.Agent - - if providedLogger != nil { - d.Logger = providedLogger - } else { - d.Logger, err = logging.Setup(logConf, logOut) - if err != nil { - return d, err - } + d.Logger, err = logging.Setup(logConf, logOut) + if err != nil { + return d, err } grpcLogInitOnce.Do(func() { diff --git a/agent/testagent.go b/agent/testagent.go index 9642fca66823b..2d34ba198b66b 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -216,13 +216,10 @@ func (a *TestAgent) Start(t *testing.T) error { } else { result.RuntimeConfig.Telemetry.Disable = true } - // Lower the maximum backoff period of a cache refresh just for - // tests see #14956 for more. - result.RuntimeConfig.Cache.CacheRefreshMaxWait = 1 * time.Second } return result, err } - bd, err := NewBaseDeps(loader, logOutput, logger) + bd, err := NewBaseDeps(loader, logOutput) if err != nil { return fmt.Errorf("failed to create base deps: %w", err) } diff --git a/agent/token/persistence.go b/agent/token/persistence.go index c28afd4506d13..c78e2289127ab 100644 --- a/agent/token/persistence.go +++ b/agent/token/persistence.go @@ -3,6 +3,7 @@ package token import ( "encoding/json" "fmt" + "io/ioutil" "os" "path/filepath" @@ -141,7 +142,7 @@ func readPersistedFromFile(filename string) (persistedTokens, error) { LegacyAgentMaster string `json:"agent_master"` } - buf, err := os.ReadFile(filename) + buf, err := ioutil.ReadFile(filename) switch { case os.IsNotExist(err): // non-existence is not an error we care about diff --git a/agent/token/persistence_test.go b/agent/token/persistence_test.go index fc52df7e7870b..1bfe971fdb519 100644 --- a/agent/token/persistence_test.go +++ b/agent/token/persistence_test.go @@ -1,7 +1,7 @@ package token import ( - "os" + "io/ioutil" "path/filepath" "testing" @@ -63,7 +63,7 @@ func TestStore_Load(t *testing.T) { "replication" : "lima" }` - require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) // no updates since token persistence is not enabled @@ -92,7 +92,7 @@ func TestStore_Load(t *testing.T) { } tokens := `{"agent_master": "juliett"}` - require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "juliett", store.AgentRecoveryToken()) @@ -115,7 +115,7 @@ func TestStore_Load(t *testing.T) { ACLReplicationToken: "tango", } - require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "mike", store.AgentToken()) @@ -139,7 +139,7 @@ func TestStore_Load(t *testing.T) { ACLReplicationToken: "zulu", } - require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "uniform", store.AgentToken()) @@ -158,7 +158,7 @@ func TestStore_Load(t *testing.T) { ACLReplicationToken: "four", } - require.NoError(t, os.WriteFile(tokenFile, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) err := store.Load(cfg, logger) require.Error(t, err) require.Contains(t, err.Error(), "failed to decode tokens file") @@ -179,7 +179,7 @@ func TestStore_Load(t *testing.T) { ACLReplicationToken: "foxtrot", } - require.NoError(t, os.WriteFile(tokenFile, []byte("[1,2,3]"), 0600)) + require.NoError(t, ioutil.WriteFile(tokenFile, []byte("[1,2,3]"), 0600)) err := store.Load(cfg, logger) require.Error(t, err) require.Contains(t, err.Error(), "failed to decode tokens file") diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index 1bd9ff6c19498..0e8f9163a831d 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -6,10 +6,10 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/http/httptest" "net/url" - "os" "path/filepath" "sync/atomic" "testing" @@ -49,7 +49,7 @@ func TestUIIndex(t *testing.T) { // Create file path := filepath.Join(a.Config.UIConfig.Dir, "my-file") - if err := os.WriteFile(path, []byte("test"), 0644); err != nil { + if err := ioutil.WriteFile(path, []byte("test"), 0644); err != nil { t.Fatalf("err: %v", err) } diff --git a/agent/uiserver/uiserver_test.go b/agent/uiserver/uiserver_test.go index 1c310a63ce0b6..47110da5ab680 100644 --- a/agent/uiserver/uiserver_test.go +++ b/agent/uiserver/uiserver_test.go @@ -3,6 +3,7 @@ package uiserver import ( "bytes" "io" + "io/ioutil" "net/http" "net/http/httptest" "os" @@ -379,7 +380,7 @@ func TestCustomDir(t *testing.T) { defer os.RemoveAll(uiDir) path := filepath.Join(uiDir, "test-file") - require.NoError(t, os.WriteFile(path, []byte("test"), 0644)) + require.NoError(t, ioutil.WriteFile(path, []byte("test"), 0644)) cfg := basicUIEnabledConfig() cfg.UIConfig.Dir = uiDir @@ -426,7 +427,7 @@ func TestCompiledJS(t *testing.T) { require.Equal(t, http.StatusOK, rec.Code) require.Equal(t, rec.Result().Header["Content-Type"][0], "application/javascript") - wantCompiled, err := os.ReadFile("testdata/compiled-metrics-providers-golden.js") + wantCompiled, err := ioutil.ReadFile("testdata/compiled-metrics-providers-golden.js") require.NoError(t, err) require.Equal(t, rec.Body.String(), string(wantCompiled)) }) diff --git a/agent/watch_handler_test.go b/agent/watch_handler_test.go index a5ff8f7c723c4..33c0909fe5e56 100644 --- a/agent/watch_handler_test.go +++ b/agent/watch_handler_test.go @@ -1,7 +1,7 @@ package agent import ( - "io" + "io/ioutil" "net/http" "net/http/httptest" "os" @@ -20,14 +20,14 @@ func TestMakeWatchHandler(t *testing.T) { script := "bash -c 'echo $CONSUL_INDEX >> handler_index_out && cat >> handler_out'" handler := makeWatchHandler(testutil.Logger(t), script) handler(100, []string{"foo", "bar", "baz"}) - raw, err := os.ReadFile("handler_out") + raw, err := ioutil.ReadFile("handler_out") if err != nil { t.Fatalf("err: %v", err) } if string(raw) != "[\"foo\",\"bar\",\"baz\"]\n" { t.Fatalf("bad: %s", raw) } - raw, err = os.ReadFile("handler_index_out") + raw, err = ioutil.ReadFile("handler_index_out") if err != nil { t.Fatalf("err: %v", err) } @@ -47,7 +47,7 @@ func TestMakeHTTPWatchHandler(t *testing.T) { if customHeader != "abc" { t.Fatalf("bad: %s", idx) } - body, err := io.ReadAll(r.Body) + body, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("err: %v", err) } diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index f8cde23edb034..b9355b44de020 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -430,12 +430,6 @@ func TestClustersFromSnapshot(t *testing.T) { "default", nil, nil, nil) }, }, - { - name: "ingress-gateway-nil-config-entry", - create: func(t testinf.T) *proxycfg.ConfigSnapshot { - return proxycfg.TestConfigSnapshotIngressGateway_NilConfigEntry(t) - }, - }, { name: "ingress-gateway-with-tls-outgoing-min-version", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/endpoints_test.go b/agent/xds/endpoints_test.go index 725fff00f66b2..432ecfa2c6a70 100644 --- a/agent/xds/endpoints_test.go +++ b/agent/xds/endpoints_test.go @@ -368,12 +368,6 @@ func TestEndpointsFromSnapshot(t *testing.T) { "default", nil, nil, nil) }, }, - { - name: "ingress-gateway-nil-config-entry", - create: func(t testinf.T) *proxycfg.ConfigSnapshot { - return proxycfg.TestConfigSnapshotIngressGateway_NilConfigEntry(t) - }, - }, { name: "ingress-gateway-no-services", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/golden_test.go b/agent/xds/golden_test.go index 11ab299d4ec0a..0efcc155b95f7 100644 --- a/agent/xds/golden_test.go +++ b/agent/xds/golden_test.go @@ -3,6 +3,7 @@ package xds import ( "flag" "fmt" + "io/ioutil" "os" "path/filepath" "testing" @@ -87,7 +88,7 @@ func golden(t *testing.T, name, subname, latestSubname, got string) string { latestExpected := "" if latestSubname != "" && subname != latestSubname { latestGolden := filepath.Join("testdata", fmt.Sprintf("%s.%s.golden", name, latestSubname)) - raw, err := os.ReadFile(latestGolden) + raw, err := ioutil.ReadFile(latestGolden) require.NoError(t, err, "%q %q %q", name, subname, latestSubname) latestExpected = string(raw) } @@ -109,11 +110,11 @@ func golden(t *testing.T, name, subname, latestSubname, got string) string { return got } - require.NoError(t, os.WriteFile(golden, []byte(got), 0644)) + require.NoError(t, ioutil.WriteFile(golden, []byte(got), 0644)) return got } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) if latestExpected != "" && os.IsNotExist(err) { // In readonly mode if a specific golden file isn't found, we fallback // on the latest one. @@ -126,7 +127,7 @@ func golden(t *testing.T, name, subname, latestSubname, got string) string { func loadTestResource(t *testing.T, name string) string { t.Helper() - expected, err := os.ReadFile(filepath.Join("testdata", name+".golden")) + expected, err := ioutil.ReadFile(filepath.Join("testdata", name+".golden")) require.NoError(t, err) return string(expected) } diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index 5cff623bb3bcf..ed9dca5a9f731 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -532,12 +532,6 @@ func TestListenersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, nil, nil) }, }, - { - name: "ingress-gateway-nil-config-entry", - create: func(t testinf.T) *proxycfg.ConfigSnapshot { - return proxycfg.TestConfigSnapshotIngressGateway_NilConfigEntry(t) - }, - }, { name: "ingress-gateway-bind-addrs", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/routes_test.go b/agent/xds/routes_test.go index c9aaf612f2547..f4b6a75f9e6d9 100644 --- a/agent/xds/routes_test.go +++ b/agent/xds/routes_test.go @@ -82,12 +82,6 @@ func TestRoutesFromSnapshot(t *testing.T) { }, // TODO(rb): test match stanza skipped for grpc // Start ingress gateway test cases - { - name: "ingress-config-entry-nil", - create: func(t testinf.T) *proxycfg.ConfigSnapshot { - return proxycfg.TestConfigSnapshotIngressGateway_NilConfigEntry(t) - }, - }, { name: "ingress-defaults-no-chain", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/testdata/clusters/ingress-gateway-nil-config-entry.latest.golden b/agent/xds/testdata/clusters/ingress-gateway-nil-config-entry.latest.golden deleted file mode 100644 index cd8f56517eb6f..0000000000000 --- a/agent/xds/testdata/clusters/ingress-gateway-nil-config-entry.latest.golden +++ /dev/null @@ -1,5 +0,0 @@ -{ - "versionInfo": "00000001", - "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", - "nonce": "00000001" -} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/ingress-gateway-nil-config-entry.latest.golden b/agent/xds/testdata/endpoints/ingress-gateway-nil-config-entry.latest.golden deleted file mode 100644 index 8504dae2b8400..0000000000000 --- a/agent/xds/testdata/endpoints/ingress-gateway-nil-config-entry.latest.golden +++ /dev/null @@ -1,5 +0,0 @@ -{ - "versionInfo": "00000001", - "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", - "nonce": "00000001" -} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/ingress-gateway-nil-config-entry.latest.golden b/agent/xds/testdata/listeners/ingress-gateway-nil-config-entry.latest.golden deleted file mode 100644 index 53b67bb37300e..0000000000000 --- a/agent/xds/testdata/listeners/ingress-gateway-nil-config-entry.latest.golden +++ /dev/null @@ -1,5 +0,0 @@ -{ - "versionInfo": "00000001", - "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", - "nonce": "00000001" -} \ No newline at end of file diff --git a/agent/xds/testdata/routes/ingress-config-entry-nil.latest.golden b/agent/xds/testdata/routes/ingress-config-entry-nil.latest.golden deleted file mode 100644 index 9c050cbe6b4d4..0000000000000 --- a/agent/xds/testdata/routes/ingress-config-entry-nil.latest.golden +++ /dev/null @@ -1,5 +0,0 @@ -{ - "versionInfo": "00000001", - "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - "nonce": "00000001" -} \ No newline at end of file diff --git a/api/acl.go b/api/acl.go index ceafaddc2b70a..bd6d82563278e 100644 --- a/api/acl.go +++ b/api/acl.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/url" "time" @@ -1046,7 +1047,7 @@ func (a *ACL) RulesTranslate(rules io.Reader) (string, error) { parseQueryMeta(resp, qm) qm.RequestTime = rtt - ruleBytes, err := io.ReadAll(resp.Body) + ruleBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read translated rule body: %v", err) } @@ -1073,7 +1074,7 @@ func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { parseQueryMeta(resp, qm) qm.RequestTime = rtt - ruleBytes, err := io.ReadAll(resp.Body) + ruleBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read translated rule body: %v", err) } diff --git a/api/agent_test.go b/api/agent_test.go index 43e50fd7f13d3..5a58a5b8c0b7b 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io/ioutil" "net/http" "net/http/httptest" "net/http/httputil" @@ -103,7 +104,7 @@ func TestAPI_AgentReload(t *testing.T) { // Update the config file with a service definition config := `{"service":{"name":"redis", "port":1234, "Meta": {"some": "meta"}}}` - err = os.WriteFile(configFile.Name(), []byte(config), 0644) + err = ioutil.WriteFile(configFile.Name(), []byte(config), 0644) if err != nil { t.Fatalf("err: %v", err) } diff --git a/api/api_test.go b/api/api_test.go index 668fa1add78b9..e1fabbb13539e 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "io/ioutil" "net" "net/http" "net/url" @@ -617,15 +618,15 @@ func TestAPI_SetupTLSConfig(t *testing.T) { assertDeepEqual(t, expectedCaPoolByDir, cc.RootCAs, cmpCertPool) // Load certs in-memory - certPEM, err := os.ReadFile("../test/hostname/Alice.crt") + certPEM, err := ioutil.ReadFile("../test/hostname/Alice.crt") if err != nil { t.Fatalf("err: %v", err) } - keyPEM, err := os.ReadFile("../test/hostname/Alice.key") + keyPEM, err := ioutil.ReadFile("../test/hostname/Alice.key") if err != nil { t.Fatalf("err: %v", err) } - caPEM, err := os.ReadFile("../test/hostname/CertAuth.crt") + caPEM, err := ioutil.ReadFile("../test/hostname/CertAuth.crt") if err != nil { t.Fatalf("err: %v", err) } @@ -1190,7 +1191,7 @@ func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool { for _, entry := range entries { filename := path.Join("../test/ca_path", entry.Name()) - data, err := os.ReadFile(filename) + data, err := ioutil.ReadFile(filename) require.NoError(t, err) if !pool.AppendCertsFromPEM(data) { diff --git a/api/config_entry.go b/api/config_entry.go index 7e9ceffdac303..b1827fb595c19 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -273,7 +273,7 @@ type ProxyConfigEntry struct { } func (p *ProxyConfigEntry) GetKind() string { return p.Kind } -func (p *ProxyConfigEntry) GetName() string { return ProxyConfigGlobal } +func (p *ProxyConfigEntry) GetName() string { return p.Name } func (p *ProxyConfigEntry) GetPartition() string { return p.Partition } func (p *ProxyConfigEntry) GetNamespace() string { return p.Namespace } func (p *ProxyConfigEntry) GetMeta() map[string]string { return p.Meta } diff --git a/api/go.mod b/api/go.mod index 20c8e80814b22..3419f57f73d6b 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul/api -go 1.18 +go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk @@ -10,37 +10,15 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/serf v0.10.1 - github.com/mitchellh/mapstructure v1.4.1 - github.com/stretchr/testify v1.7.0 -) - -require ( - github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.9.0 // indirect - github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/hashicorp/go-immutable-radix v1.0.0 // indirect - github.com/hashicorp/go-msgpack v0.5.3 // indirect - github.com/hashicorp/go-multierror v1.1.0 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-version v1.2.1 // indirect + github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/memberlist v0.5.0 // indirect + github.com/hashicorp/serf v0.10.1 github.com/kr/pretty v0.2.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/miekg/dns v1.1.41 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.1 github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/stretchr/objx v0.1.0 // indirect + github.com/stretchr/testify v1.7.0 golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) diff --git a/api/go.sum b/api/go.sum index 043db4cd1efc5..73921ed6fb633 100644 --- a/api/go.sum +++ b/api/go.sum @@ -49,6 +49,7 @@ github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -78,6 +79,7 @@ github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxd github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -118,6 +120,8 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -133,6 +137,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/mock_api_test.go b/api/mock_api_test.go index fa18faa7aef2c..dc4de0a3e4e21 100644 --- a/api/mock_api_test.go +++ b/api/mock_api_test.go @@ -3,6 +3,7 @@ package api import ( "encoding/json" "io" + "io/ioutil" "net/http" "net/http/httptest" "testing" @@ -38,7 +39,7 @@ func (m *mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { var body interface{} if r.Body != nil { - bodyBytes, err := io.ReadAll(r.Body) + bodyBytes, err := ioutil.ReadAll(r.Body) if err == nil && len(bodyBytes) > 0 { body = bodyBytes diff --git a/api/operator_license.go b/api/operator_license.go index 14c548b1a3549..7b654317cacd3 100644 --- a/api/operator_license.go +++ b/api/operator_license.go @@ -1,7 +1,7 @@ package api import ( - "io" + "io/ioutil" "strings" "time" ) @@ -71,7 +71,7 @@ func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) { return "", err } - data, err := io.ReadAll(resp.Body) + data, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/api/operator_raft.go b/api/operator_raft.go index 1da20e899ff5b..1b48fdcd9b895 100644 --- a/api/operator_raft.go +++ b/api/operator_raft.go @@ -36,11 +36,6 @@ type RaftConfiguration struct { Index uint64 } -// TransferLeaderResponse is returned when querying for the current Raft configuration. -type TransferLeaderResponse struct { - Success bool -} - // RaftGetConfiguration is used to query the current Raft peer set. func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { r := op.c.newRequest("GET", "/v1/operator/raft/configuration") @@ -61,26 +56,6 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e return &out, nil } -// RaftLeaderTransfer is used to transfer the current raft leader to another node -func (op *Operator) RaftLeaderTransfer(q *QueryOptions) (*TransferLeaderResponse, error) { - r := op.c.newRequest("POST", "/v1/operator/raft/transfer-leader") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out TransferLeaderResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - // RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft // quorum but no longer known to Serf or the catalog) by address in the form of // "IP:port". diff --git a/api/operator_raft_test.go b/api/operator_raft_test.go index ecefaa9719204..a6eada42cd340 100644 --- a/api/operator_raft_test.go +++ b/api/operator_raft_test.go @@ -36,21 +36,3 @@ func TestAPI_OperatorRaftRemovePeerByAddress(t *testing.T) { t.Fatalf("err: %v", err) } } - -func TestAPI_OperatorRaftLeaderTransfer(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - // If we get this error, it proves we sent the address all the way - // through. - operator := c.Operator() - transfer, err := operator.RaftLeaderTransfer(nil) - if err == nil || !strings.Contains(err.Error(), - "cannot find peer") { - t.Fatalf("err: %v", err) - } - if transfer != nil { - t.Fatalf("err:%v", transfer) - } -} diff --git a/api/peering.go b/api/peering.go index 34602c878da1c..8748b63fcc110 100644 --- a/api/peering.go +++ b/api/peering.go @@ -85,11 +85,11 @@ type PeeringStreamStatus struct { // ExportedServices is the list of services exported to this peering. ExportedServices []string // LastHeartbeat represents when the last heartbeat message was received. - LastHeartbeat *time.Time + LastHeartbeat time.Time // LastReceive represents when any message was last received, regardless of success or error. - LastReceive *time.Time + LastReceive time.Time // LastSend represents when any message was last sent, regardless of success or error. - LastSend *time.Time + LastSend time.Time } type PeeringReadResponse struct { diff --git a/command/acl/authmethod/create/authmethod_create_test.go b/command/acl/authmethod/create/authmethod_create_test.go index e5a442c61631a..03bdcd1e86252 100644 --- a/command/acl/authmethod/create/authmethod_create_test.go +++ b/command/acl/authmethod/create/authmethod_create_test.go @@ -3,7 +3,7 @@ package authmethodcreate import ( "encoding/json" "io" - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -468,7 +468,7 @@ func TestAuthMethodCreateCommand_k8s(t *testing.T) { }) caFile := filepath.Join(testDir, "ca.crt") - require.NoError(t, os.WriteFile(caFile, []byte(ca.RootCert), 0600)) + require.NoError(t, ioutil.WriteFile(caFile, []byte(ca.RootCert), 0600)) t.Run("create k8s with cert file", func(t *testing.T) { name := getTestName(t) @@ -540,7 +540,7 @@ func TestAuthMethodCreateCommand_config(t *testing.T) { name := getTestName(t) configFile := filepath.Join(testDir, "config.json") jsonConfig := `{"SessionID":"foo"}` - require.NoError(t, os.WriteFile(configFile, []byte(jsonConfig), 0644)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(jsonConfig), 0644)) args := []string{ "-http-addr=" + a.HTTPAddr(), diff --git a/command/acl/authmethod/update/authmethod_update_test.go b/command/acl/authmethod/update/authmethod_update_test.go index 4afe1dfb47704..263f0b774a98c 100644 --- a/command/acl/authmethod/update/authmethod_update_test.go +++ b/command/acl/authmethod/update/authmethod_update_test.go @@ -3,7 +3,7 @@ package authmethodupdate import ( "encoding/json" "io" - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -455,7 +455,7 @@ func TestAuthMethodUpdateCommand_k8s(t *testing.T) { }) ca2File := filepath.Join(testDir, "ca2.crt") - require.NoError(t, os.WriteFile(ca2File, []byte(ca2.RootCert), 0600)) + require.NoError(t, ioutil.WriteFile(ca2File, []byte(ca2.RootCert), 0600)) t.Run("update all fields with cert file", func(t *testing.T) { name := createAuthMethod(t) @@ -750,7 +750,7 @@ func TestAuthMethodUpdateCommand_k8s_noMerge(t *testing.T) { }) ca2File := filepath.Join(testDir, "ca2.crt") - require.NoError(t, os.WriteFile(ca2File, []byte(ca2.RootCert), 0600)) + require.NoError(t, ioutil.WriteFile(ca2File, []byte(ca2.RootCert), 0600)) t.Run("update all fields with cert file", func(t *testing.T) { name := createAuthMethod(t) @@ -849,7 +849,7 @@ func TestAuthMethodUpdateCommand_config(t *testing.T) { methodName := createAuthMethod(t) configFile := filepath.Join(testDir, "config.json") jsonConfig := `{"SessionID":"update"}` - require.NoError(t, os.WriteFile(configFile, []byte(jsonConfig), 0644)) + require.NoError(t, ioutil.WriteFile(configFile, []byte(jsonConfig), 0644)) args := []string{ "-http-addr=" + a.HTTPAddr(), diff --git a/command/acl/policy/create/policy_create_test.go b/command/acl/policy/create/policy_create_test.go index 39c837ab89d6e..4466ad2d5d621 100644 --- a/command/acl/policy/create/policy_create_test.go +++ b/command/acl/policy/create/policy_create_test.go @@ -2,7 +2,7 @@ package policycreate import ( "encoding/json" - "os" + "io/ioutil" "strings" "testing" @@ -47,7 +47,7 @@ func TestPolicyCreateCommand(t *testing.T) { cmd := New(ui) rules := []byte("service \"\" { policy = \"write\" }") - err := os.WriteFile(testDir+"/rules.hcl", rules, 0644) + err := ioutil.WriteFile(testDir+"/rules.hcl", rules, 0644) require.NoError(t, err) args := []string{ @@ -87,7 +87,7 @@ func TestPolicyCreateCommand_JSON(t *testing.T) { cmd := New(ui) rules := []byte("service \"\" { policy = \"write\" }") - err := os.WriteFile(testDir+"/rules.hcl", rules, 0644) + err := ioutil.WriteFile(testDir+"/rules.hcl", rules, 0644) require.NoError(t, err) args := []string{ diff --git a/command/acl/policy/update/policy_update_test.go b/command/acl/policy/update/policy_update_test.go index 485425be07ee3..c11d2b76e58a8 100644 --- a/command/acl/policy/update/policy_update_test.go +++ b/command/acl/policy/update/policy_update_test.go @@ -2,7 +2,7 @@ package policyupdate import ( "encoding/json" - "os" + "io/ioutil" "strings" "testing" @@ -47,7 +47,7 @@ func TestPolicyUpdateCommand(t *testing.T) { cmd := New(ui) rules := []byte("service \"\" { policy = \"write\" }") - err := os.WriteFile(testDir+"/rules.hcl", rules, 0644) + err := ioutil.WriteFile(testDir+"/rules.hcl", rules, 0644) assert.NoError(t, err) // Create a policy @@ -97,7 +97,7 @@ func TestPolicyUpdateCommand_JSON(t *testing.T) { cmd := New(ui) rules := []byte("service \"\" { policy = \"write\" }") - err := os.WriteFile(testDir+"/rules.hcl", rules, 0644) + err := ioutil.WriteFile(testDir+"/rules.hcl", rules, 0644) assert.NoError(t, err) // Create a policy diff --git a/command/acl/role/formatter_test.go b/command/acl/role/formatter_test.go index 0e0721dc4dd1d..b6b3bd7c2c523 100644 --- a/command/acl/role/formatter_test.go +++ b/command/acl/role/formatter_test.go @@ -3,7 +3,7 @@ package role import ( "flag" "fmt" - "os" + "io/ioutil" "path" "path/filepath" "testing" @@ -22,11 +22,11 @@ func golden(t *testing.T, name, got string) string { golden := filepath.Join("testdata", name+".golden") if *update && got != "" { - err := os.WriteFile(golden, []byte(got), 0644) + err := ioutil.WriteFile(golden, []byte(got), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) return string(expected) diff --git a/command/acl/rules/translate_test.go b/command/acl/rules/translate_test.go index 6772d60d69910..830cb24037899 100644 --- a/command/acl/rules/translate_test.go +++ b/command/acl/rules/translate_test.go @@ -2,7 +2,7 @@ package rules import ( "io" - "os" + "io/ioutil" "strings" "testing" @@ -52,7 +52,7 @@ func TestRulesTranslateCommand(t *testing.T) { // From a file t.Run("file", func(t *testing.T) { - err := os.WriteFile(testDir+"/rules.hcl", []byte(rules), 0644) + err := ioutil.WriteFile(testDir+"/rules.hcl", []byte(rules), 0644) require.NoError(t, err) args := []string{ diff --git a/command/acl/token/formatter_test.go b/command/acl/token/formatter_test.go index 92df4105af119..aafe1fcfb268d 100644 --- a/command/acl/token/formatter_test.go +++ b/command/acl/token/formatter_test.go @@ -3,7 +3,7 @@ package token import ( "flag" "fmt" - "os" + "io/ioutil" "path" "path/filepath" "testing" @@ -23,11 +23,11 @@ func golden(t *testing.T, name, got string) string { golden := filepath.Join("testdata", name+".golden") if *update && got != "" { - err := os.WriteFile(golden, []byte(got), 0644) + err := ioutil.WriteFile(golden, []byte(got), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) return string(expected) diff --git a/command/agent/agent.go b/command/agent/agent.go index 8b49a1da80b3c..8b6a900a90336 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -4,7 +4,7 @@ import ( "context" "flag" "fmt" - "io" + "io/ioutil" "os" "os/signal" "path/filepath" @@ -199,7 +199,7 @@ func (c *cmd) run(args []string) int { return 1 } - bd, err := agent.NewBaseDeps(loader, logGate, nil) + bd, err := agent.NewBaseDeps(loader, logGate) if err != nil { ui.Error(err.Error()) return 1 @@ -218,7 +218,7 @@ func (c *cmd) run(args []string) int { if config.Logging.LogJSON { // Hide all non-error output when JSON logging is enabled. ui.Ui = &cli.BasicUI{ - BasicUi: mcli.BasicUi{ErrorWriter: c.ui.Stderr(), Writer: io.Discard}, + BasicUi: mcli.BasicUi{ErrorWriter: c.ui.Stderr(), Writer: ioutil.Discard}, } } diff --git a/command/config/write/config_write_test.go b/command/config/write/config_write_test.go index 7671ffdea342c..3ea5a8be2a333 100644 --- a/command/config/write/config_write_test.go +++ b/command/config/write/config_write_test.go @@ -147,25 +147,6 @@ http { require.True(t, proxy.HTTP.SanitizeXForwardedClientCert) }) - - // Test that if name isn't set (which isn't required for proxy-defaults because the name defaults to - // "global"), the CLI response still says "config entry written proxy-defaults/global". - t.Run("proxy defaults config entry without name set", func(t *testing.T) { - stdin := new(bytes.Buffer) - stdin.WriteString(` -kind = "proxy-defaults" -`) - - ui := cli.NewMockUi() - c := New(ui) - c.testStdin = stdin - - code := c.Run([]string{"-http-addr=" + a.HTTPAddr(), "-"}) - require.Empty(t, ui.ErrorWriter.String()) - require.Contains(t, ui.OutputWriter.String(), - `Config entry written: proxy-defaults/global`) - require.Equal(t, 0, code) - }) } func requireContainsLower(t *testing.T, haystack, needle string) { diff --git a/command/connect/ca/set/connect_ca_set.go b/command/connect/ca/set/connect_ca_set.go index 54f8854d5cc9c..29922b5b99388 100644 --- a/command/connect/ca/set/connect_ca_set.go +++ b/command/connect/ca/set/connect_ca_set.go @@ -4,7 +4,7 @@ import ( "encoding/json" "flag" "fmt" - "os" + "io/ioutil" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" @@ -66,7 +66,7 @@ func (c *cmd) Run(args []string) int { return 1 } - bytes, err := os.ReadFile(c.configFile.String()) + bytes, err := ioutil.ReadFile(c.configFile.String()) if err != nil { c.UI.Error(fmt.Sprintf("Error reading config file: %s", err)) return 1 diff --git a/command/connect/envoy/envoy_test.go b/command/connect/envoy/envoy_test.go index 31c078dc9967c..d13b7dd2ae829 100644 --- a/command/connect/envoy/envoy_test.go +++ b/command/connect/envoy/envoy_test.go @@ -3,6 +3,7 @@ package envoy import ( "encoding/json" "flag" + "io/ioutil" "net" "net/http" "net/http/httptest" @@ -1080,7 +1081,7 @@ func TestGenerateConfig(t *testing.T) { if len(tc.Files) > 0 { for fn, fv := range tc.Files { fullname := filepath.Join(testDir, fn) - require.NoError(t, os.WriteFile(fullname, []byte(fv), 0600)) + require.NoError(t, ioutil.WriteFile(fullname, []byte(fv), 0600)) } } @@ -1131,10 +1132,10 @@ func TestGenerateConfig(t *testing.T) { // If we got the arg handling write, verify output golden := filepath.Join("testdata", tc.Name+".golden") if *update { - os.WriteFile(golden, actual, 0644) + ioutil.WriteFile(golden, actual, 0644) } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) }) diff --git a/command/connect/envoy/exec_test.go b/command/connect/envoy/exec_test.go index 3765003e6adb7..9c7fc276bb836 100644 --- a/command/connect/envoy/exec_test.go +++ b/command/connect/envoy/exec_test.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "os" "os/exec" "strings" @@ -233,7 +234,7 @@ func TestHelperProcess(t *testing.T) { os.Exit(1) } - d, err := os.ReadFile(data.ConfigPath) + d, err := ioutil.ReadFile(data.ConfigPath) if err != nil { fmt.Fprintf(os.Stderr, "could not read provided --config-path file %q: %v\n", data.ConfigPath, err) os.Exit(1) diff --git a/command/flags/http.go b/command/flags/http.go index b4a2a9038c163..e82e024fbbacb 100644 --- a/command/flags/http.go +++ b/command/flags/http.go @@ -2,7 +2,7 @@ package flags import ( "flag" - "os" + "io/ioutil" "strings" "github.com/hashicorp/consul/api" @@ -131,7 +131,7 @@ func (f *HTTPFlags) ReadTokenFile() (string, error) { return "", nil } - data, err := os.ReadFile(tokenFile) + data, err := ioutil.ReadFile(tokenFile) if err != nil { return "", err } diff --git a/command/helpers/helpers.go b/command/helpers/helpers.go index 493c9ff4ab048..56ad6f7d367ec 100644 --- a/command/helpers/helpers.go +++ b/command/helpers/helpers.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "os" "time" @@ -14,7 +15,7 @@ import ( ) func loadFromFile(path string) (string, error) { - data, err := os.ReadFile(path) + data, err := ioutil.ReadFile(path) if err != nil { return "", fmt.Errorf("Failed to read file: %v", err) } diff --git a/command/kv/imp/kv_import.go b/command/kv/imp/kv_import.go index 0d8570dd60c54..d5796f24d2069 100644 --- a/command/kv/imp/kv_import.go +++ b/command/kv/imp/kv_import.go @@ -8,6 +8,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "os" "path" @@ -122,7 +123,7 @@ func (c *cmd) dataFromArgs(args []string) (string, error) { switch data[0] { case '@': - data, err := os.ReadFile(data[1:]) + data, err := ioutil.ReadFile(data[1:]) if err != nil { return "", fmt.Errorf("Failed to read file: %s", err) } diff --git a/command/lock/lock_test.go b/command/lock/lock_test.go index e9a03fc8809bd..c0f5381e31fa1 100644 --- a/command/lock/lock_test.go +++ b/command/lock/lock_test.go @@ -1,7 +1,7 @@ package lock import ( - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -63,7 +63,7 @@ func TestLockCommand(t *testing.T) { } // Check for the file - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -92,7 +92,7 @@ func TestLockCommand_NoShell(t *testing.T) { } // Check for the file - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -121,7 +121,7 @@ func TestLockCommand_TryLock(t *testing.T) { if code != 0 { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -159,7 +159,7 @@ func TestLockCommand_TrySemaphore(t *testing.T) { if code != 0 { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -197,7 +197,7 @@ func TestLockCommand_MonitorRetry_Lock_Default(t *testing.T) { if code != 0 { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -236,7 +236,7 @@ func TestLockCommand_MonitorRetry_Semaphore_Default(t *testing.T) { if code != 0 { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -275,7 +275,7 @@ func TestLockCommand_MonitorRetry_Lock_Arg(t *testing.T) { if code != 0 { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } @@ -314,7 +314,7 @@ func TestLockCommand_MonitorRetry_Semaphore_Arg(t *testing.T) { if code != 0 { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - _, err := os.ReadFile(filePath) + _, err := ioutil.ReadFile(filePath) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/login/login.go b/command/login/login.go index e4209eeda4afd..a8f58556accf1 100644 --- a/command/login/login.go +++ b/command/login/login.go @@ -3,7 +3,7 @@ package login import ( "flag" "fmt" - "os" + "io/ioutil" "strings" "github.com/hashicorp/consul/api" @@ -112,7 +112,7 @@ func (c *cmd) bearerTokenLogin() int { c.UI.Error("Missing required '-bearer-token-file' flag") return 1 } else { - data, err := os.ReadFile(c.bearerTokenFile) + data, err := ioutil.ReadFile(c.bearerTokenFile) if err != nil { c.UI.Error(err.Error()) return 1 diff --git a/command/login/login_test.go b/command/login/login_test.go index e7297ffe5102e..6340d93f717c5 100644 --- a/command/login/login_test.go +++ b/command/login/login_test.go @@ -2,6 +2,7 @@ package login import ( "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -172,7 +173,7 @@ func TestLoginCommand(t *testing.T) { t.Run("bearer-token-file is empty", func(t *testing.T) { defer os.Remove(tokenSinkFile) - require.NoError(t, os.WriteFile(bearerTokenFile, []byte(""), 0600)) + require.NoError(t, ioutil.WriteFile(bearerTokenFile, []byte(""), 0600)) ui := cli.NewMockUi() cmd := New(ui) @@ -190,7 +191,7 @@ func TestLoginCommand(t *testing.T) { require.Contains(t, ui.ErrorWriter.String(), "No bearer token found in") }) - require.NoError(t, os.WriteFile(bearerTokenFile, []byte("demo-token"), 0600)) + require.NoError(t, ioutil.WriteFile(bearerTokenFile, []byte("demo-token"), 0600)) t.Run("try login with no method configured", func(t *testing.T) { defer os.Remove(tokenSinkFile) @@ -284,7 +285,7 @@ func TestLoginCommand(t *testing.T) { require.Empty(t, ui.ErrorWriter.String()) require.Empty(t, ui.OutputWriter.String()) - raw, err := os.ReadFile(tokenSinkFile) + raw, err := ioutil.ReadFile(tokenSinkFile) require.NoError(t, err) token := strings.TrimSpace(string(raw)) @@ -308,7 +309,7 @@ func TestLoginCommand_k8s(t *testing.T) { bearerTokenFile := filepath.Join(testDir, "bearer.token") // the "B" jwt will be the one being reviewed - require.NoError(t, os.WriteFile(bearerTokenFile, []byte(acl.TestKubernetesJWT_B), 0600)) + require.NoError(t, ioutil.WriteFile(bearerTokenFile, []byte(acl.TestKubernetesJWT_B), 0600)) // spin up a fake api server testSrv := kubeauth.StartTestAPIServer(t) @@ -371,7 +372,7 @@ func TestLoginCommand_k8s(t *testing.T) { require.Empty(t, ui.ErrorWriter.String()) require.Empty(t, ui.OutputWriter.String()) - raw, err := os.ReadFile(tokenSinkFile) + raw, err := ioutil.ReadFile(tokenSinkFile) require.NoError(t, err) token := strings.TrimSpace(string(raw)) @@ -486,7 +487,7 @@ func TestLoginCommand_jwt(t *testing.T) { // Drop a JWT on disk. jwtData, err := oidcauthtest.SignJWT(privKey, cl, privateCl) require.NoError(t, err) - require.NoError(t, os.WriteFile(bearerTokenFile, []byte(jwtData), 0600)) + require.NoError(t, ioutil.WriteFile(bearerTokenFile, []byte(jwtData), 0600)) defer os.Remove(tokenSinkFile) ui := cli.NewMockUi() @@ -505,7 +506,7 @@ func TestLoginCommand_jwt(t *testing.T) { require.Empty(t, ui.ErrorWriter.String()) require.Empty(t, ui.OutputWriter.String()) - raw, err := os.ReadFile(tokenSinkFile) + raw, err := ioutil.ReadFile(tokenSinkFile) require.NoError(t, err) token := strings.TrimSpace(string(raw)) @@ -659,7 +660,7 @@ func TestLoginCommand_aws_iam(t *testing.T) { code := cmd.Run(args) require.Equal(t, 0, code, ui.ErrorWriter.String()) - raw, err := os.ReadFile(tokenSinkFile) + raw, err := ioutil.ReadFile(tokenSinkFile) require.NoError(t, err) token := strings.TrimSpace(string(raw)) diff --git a/command/logout/logout_test.go b/command/logout/logout_test.go index e41a33ef4d0f5..c5130fdf1b756 100644 --- a/command/logout/logout_test.go +++ b/command/logout/logout_test.go @@ -225,6 +225,9 @@ func TestLogoutCommand_k8s(t *testing.T) { require.Contains(t, ui.ErrorWriter.String(), "403 (Permission denied: token wasn't created via login)") }) + // go to the trouble of creating a login token + // require.NoError(t, ioutil.WriteFile(bearerTokenFile, []byte(acl.TestKubernetesJWT_B), 0600)) + // spin up a fake api server testSrv := kubeauth.StartTestAPIServer(t) defer testSrv.Stop() diff --git a/command/operator/autopilot/state/operator_autopilot_state_test.go b/command/operator/autopilot/state/operator_autopilot_state_test.go index 332f53059c948..00484974d531b 100644 --- a/command/operator/autopilot/state/operator_autopilot_state_test.go +++ b/command/operator/autopilot/state/operator_autopilot_state_test.go @@ -3,7 +3,7 @@ package state import ( "encoding/json" "flag" - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -25,11 +25,11 @@ func golden(t *testing.T, name, got string) string { golden := filepath.Join("testdata", name+".golden") if *update && got != "" { - err := os.WriteFile(golden, []byte(got), 0644) + err := ioutil.WriteFile(golden, []byte(got), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) return string(expected) @@ -111,7 +111,7 @@ func TestStateCommand_Formatter(t *testing.T) { for _, name := range cases { t.Run(name, func(t *testing.T) { statePath := filepath.Join("testdata", name, "state.json") - input, err := os.ReadFile(statePath) + input, err := ioutil.ReadFile(statePath) require.NoError(t, err) var state api.AutopilotState diff --git a/command/operator/raft/transferleader/transfer_leader.go b/command/operator/raft/transferleader/transfer_leader.go deleted file mode 100644 index b7d8c468eb44c..0000000000000 --- a/command/operator/raft/transferleader/transfer_leader.go +++ /dev/null @@ -1,90 +0,0 @@ -package transferleader - -import ( - "flag" - "fmt" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" -) - -func New(ui cli.Ui) *cmd { - c := &cmd{UI: ui} - c.init() - return c -} - -type cmd struct { - UI cli.Ui - flags *flag.FlagSet - http *flags.HTTPFlags - help string - id string -} - -func (c *cmd) init() { - c.flags = flag.NewFlagSet("", flag.ContinueOnError) - c.http = &flags.HTTPFlags{} - c.flags.StringVar(&c.id, "id", "", - "The ID to remove from the Raft configuration.") - flags.Merge(c.flags, c.http.ClientFlags()) - flags.Merge(c.flags, c.http.ServerFlags()) - c.help = flags.Usage(help, c.flags) -} - -func (c *cmd) Run(args []string) int { - if err := c.flags.Parse(args); err != nil { - if err == flag.ErrHelp { - return 0 - } - c.UI.Error(fmt.Sprintf("Failed to parse args: %v", err)) - return 1 - } - - // Set up a client. - client, err := c.http.APIClient() - if err != nil { - c.UI.Error(fmt.Sprintf("Error initializing client: %s", err)) - return 1 - } - - // Fetch the current configuration. - result, err := raftTransferLeader(client, c.http.Stale()) - if err != nil { - c.UI.Error(fmt.Sprintf("Error transfering leadership: %v", err)) - return 1 - } - - c.UI.Output(result) - return 0 -} - -func raftTransferLeader(client *api.Client, stale bool) (string, error) { - q := &api.QueryOptions{ - AllowStale: stale, - } - reply, err := client.Operator().RaftLeaderTransfer(q) - if err != nil { - return "", fmt.Errorf("Failed to transfer leadership %w", err) - } - if !reply.Success { - return "", fmt.Errorf("Failed to transfer leadership") - } - return "Success", nil -} - -func (c *cmd) Synopsis() string { - return synopsis -} - -func (c *cmd) Help() string { - return c.help -} - -const synopsis = "Transfer raft leadership to another node" -const help = ` -Usage: consul operator raft transfer-leader [options] - - Transfer raft leadership to another node. -` diff --git a/command/operator/raft/transferleader/transfer_leader_test.go b/command/operator/raft/transferleader/transfer_leader_test.go deleted file mode 100644 index b5979711989ad..0000000000000 --- a/command/operator/raft/transferleader/transfer_leader_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package transferleader - -import ( - "github.com/hashicorp/consul/agent" - "github.com/mitchellh/cli" - "strings" - "testing" -) - -func TestOperatorRaftTransferLeaderCommand_noTabs(t *testing.T) { - t.Parallel() - if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { - t.Fatal("help has tabs") - } -} - -// This only test that the command behave correctly when only one agent is present -// and no leadership transfer is possible, testing for the functionality will be done at the RPC level. -func TestOperatorRaftTransferLeaderWithSingleNode(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - a := agent.NewTestAgent(t, ``) - defer a.Shutdown() - - expected := "cannot find peer" - - // Test the transfer-leader subcommand directly - ui := cli.NewMockUi() - c := New(ui) - - args := []string{"-http-addr=" + a.HTTPAddr()} - code := c.Run(args) - if code != 1 { - t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) - } - output := strings.TrimSpace(ui.ErrorWriter.String()) - if !strings.Contains(output, expected) { - t.Fatalf("bad: %q, %q", output, expected) - } -} diff --git a/command/registry.go b/command/registry.go index 1d34f746ca906..b35ac2e4241a6 100644 --- a/command/registry.go +++ b/command/registry.go @@ -2,7 +2,6 @@ package command import ( "fmt" - "github.com/hashicorp/consul/command/operator/raft/transferleader" "os" "os/signal" "syscall" @@ -221,7 +220,6 @@ func RegisteredCommands(ui cli.Ui) map[string]mcli.CommandFactory { entry{"operator raft", func(cli.Ui) (cli.Command, error) { return operraft.New(), nil }}, entry{"operator raft list-peers", func(ui cli.Ui) (cli.Command, error) { return operraftlist.New(ui), nil }}, entry{"operator raft remove-peer", func(ui cli.Ui) (cli.Command, error) { return operraftremove.New(ui), nil }}, - entry{"operator raft transfer-leader", func(ui cli.Ui) (cli.Command, error) { return transferleader.New(ui), nil }}, entry{"peering", func(cli.Ui) (cli.Command, error) { return peering.New(), nil }}, entry{"peering delete", func(ui cli.Ui) (cli.Command, error) { return peerdelete.New(ui), nil }}, entry{"peering generate-token", func(ui cli.Ui) (cli.Command, error) { return peergenerate.New(ui), nil }}, diff --git a/command/snapshot/inspect/snapshot_inspect.go b/command/snapshot/inspect/snapshot_inspect.go index 2a09067a16b45..d0deec4435c6e 100644 --- a/command/snapshot/inspect/snapshot_inspect.go +++ b/command/snapshot/inspect/snapshot_inspect.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "os" "path" "sort" @@ -122,7 +123,7 @@ func (c *cmd) Run(args []string) int { readFile = f // Assume the meta is colocated and error if not. - metaRaw, err := os.ReadFile(path.Join(path.Dir(file), "meta.json")) + metaRaw, err := ioutil.ReadFile(path.Join(path.Dir(file), "meta.json")) if err != nil { c.UI.Error(fmt.Sprintf("Error reading meta.json from internal snapshot dir: %s", err)) return 1 diff --git a/command/snapshot/inspect/snapshot_inspect_test.go b/command/snapshot/inspect/snapshot_inspect_test.go index 5c302c6a7aaf6..9d4add371dbce 100644 --- a/command/snapshot/inspect/snapshot_inspect_test.go +++ b/command/snapshot/inspect/snapshot_inspect_test.go @@ -2,7 +2,7 @@ package inspect import ( "flag" - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -21,11 +21,11 @@ func golden(t *testing.T, name, got string) string { golden := filepath.Join("testdata", name+".golden") if *update && got != "" { - err := os.WriteFile(golden, []byte(got), 0644) + err := ioutil.WriteFile(golden, []byte(got), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) return string(expected) diff --git a/command/snapshot/restore/snapshot_restore_test.go b/command/snapshot/restore/snapshot_restore_test.go index 38a54967e1e98..8b14484608770 100644 --- a/command/snapshot/restore/snapshot_restore_test.go +++ b/command/snapshot/restore/snapshot_restore_test.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "fmt" "io" + "io/ioutil" "os" "path/filepath" "strings" @@ -134,7 +135,7 @@ func TestSnapshotRestoreCommand_TruncatedSnapshot(t *testing.T) { require.NoError(t, err) defer rc.Close() - inputData, err = io.ReadAll(rc) + inputData, err = ioutil.ReadAll(rc) require.NoError(t, err) } @@ -149,7 +150,7 @@ func TestSnapshotRestoreCommand_TruncatedSnapshot(t *testing.T) { c := New(ui) file := filepath.Join(dir, "backup.tgz") - require.NoError(t, os.WriteFile(file, data, 0644)) + require.NoError(t, ioutil.WriteFile(file, data, 0644)) args := []string{ "-http-addr=" + a.HTTPAddr(), file, diff --git a/command/snapshot/save/snapshot_save_test.go b/command/snapshot/save/snapshot_save_test.go index 3e964dbbf7e1f..10e8abcfea600 100644 --- a/command/snapshot/save/snapshot_save_test.go +++ b/command/snapshot/save/snapshot_save_test.go @@ -3,7 +3,7 @@ package save import ( "crypto/rand" "fmt" - "io" + "io/ioutil" "net/http" "net/http/httptest" "os" @@ -136,7 +136,7 @@ func TestSnapshotSaveCommand_TruncatedStream(t *testing.T) { require.NoError(t, err) defer rc.Close() - inputData, err = io.ReadAll(rc) + inputData, err = ioutil.ReadAll(rc) require.NoError(t, err) } diff --git a/command/tls/ca/create/tls_ca_create_test.go b/command/tls/ca/create/tls_ca_create_test.go index 72292ee5b6424..2f4ae544913ed 100644 --- a/command/tls/ca/create/tls_ca_create_test.go +++ b/command/tls/ca/create/tls_ca_create_test.go @@ -4,6 +4,7 @@ import ( "crypto" "crypto/x509" "io/fs" + "io/ioutil" "os" "strings" "testing" @@ -129,9 +130,9 @@ func expectFiles(t *testing.T, caPath, keyPath string) (*x509.Certificate, crypt t.Fatalf("private key file %s: permissions: want: %o; have: %o", keyPath, want, have) } - caData, err := os.ReadFile(caPath) + caData, err := ioutil.ReadFile(caPath) require.NoError(t, err) - keyData, err := os.ReadFile(keyPath) + keyData, err := ioutil.ReadFile(keyPath) require.NoError(t, err) ca, err := connect.ParseCert(string(caData)) diff --git a/command/tls/cert/create/tls_cert_create.go b/command/tls/cert/create/tls_cert_create.go index 75c9b1ada1fca..b1cdaa131d276 100644 --- a/command/tls/cert/create/tls_cert_create.go +++ b/command/tls/cert/create/tls_cert_create.go @@ -4,8 +4,8 @@ import ( "crypto/x509" "flag" "fmt" + "io/ioutil" "net" - "os" "strings" "github.com/hashicorp/consul/command/flags" @@ -150,12 +150,12 @@ func (c *cmd) Run(args []string) int { caFile := strings.Replace(c.ca, "#DOMAIN#", c.domain, 1) keyFile := strings.Replace(c.key, "#DOMAIN#", c.domain, 1) - cert, err := os.ReadFile(caFile) + cert, err := ioutil.ReadFile(caFile) if err != nil { c.UI.Error(fmt.Sprintf("Error reading CA: %s", err)) return 1 } - key, err := os.ReadFile(keyFile) + key, err := ioutil.ReadFile(keyFile) if err != nil { c.UI.Error(fmt.Sprintf("Error reading CA key: %s", err)) return 1 diff --git a/command/tls/cert/create/tls_cert_create_test.go b/command/tls/cert/create/tls_cert_create_test.go index e5134b1bd1ab0..63f84c4eec814 100644 --- a/command/tls/cert/create/tls_cert_create_test.go +++ b/command/tls/cert/create/tls_cert_create_test.go @@ -4,6 +4,7 @@ import ( "crypto" "crypto/x509" "io/fs" + "io/ioutil" "net" "os" "strings" @@ -251,9 +252,9 @@ func expectFiles(t *testing.T, certPath, keyPath string) (*x509.Certificate, cry t.Fatalf("private key file %s: permissions: want: %o; have: %o", keyPath, want, have) } - certData, err := os.ReadFile(certPath) + certData, err := ioutil.ReadFile(certPath) require.NoError(t, err) - keyData, err := os.ReadFile(keyPath) + keyData, err := ioutil.ReadFile(keyPath) require.NoError(t, err) cert, err := connect.ParseCert(string(certData)) diff --git a/command/validate/validate_test.go b/command/validate/validate_test.go index 29091f1292c6e..c8cc3bf4d128c 100644 --- a/command/validate/validate_test.go +++ b/command/validate/validate_test.go @@ -1,7 +1,7 @@ package validate import ( - "os" + "io/ioutil" "path/filepath" "strings" "testing" @@ -34,7 +34,7 @@ func TestValidateCommand_SucceedOnMinimalConfigFile(t *testing.T) { td := testutil.TempDir(t, "consul") fp := filepath.Join(td, "config.json") - err := os.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) + err := ioutil.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) require.Nilf(t, err, "err: %s", err) cmd := New(cli.NewMockUi()) @@ -49,7 +49,7 @@ func TestValidateCommand_SucceedWithMinimalJSONConfigFormat(t *testing.T) { td := testutil.TempDir(t, "consul") fp := filepath.Join(td, "json.conf") - err := os.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) + err := ioutil.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) require.Nilf(t, err, "err: %s", err) cmd := New(cli.NewMockUi()) @@ -64,7 +64,7 @@ func TestValidateCommand_SucceedWithMinimalHCLConfigFormat(t *testing.T) { td := testutil.TempDir(t, "consul") fp := filepath.Join(td, "hcl.conf") - err := os.WriteFile(fp, []byte("bind_addr = \"10.0.0.1\"\ndata_dir = \""+td+"\""), 0644) + err := ioutil.WriteFile(fp, []byte("bind_addr = \"10.0.0.1\"\ndata_dir = \""+td+"\""), 0644) require.Nilf(t, err, "err: %s", err) cmd := New(cli.NewMockUi()) @@ -79,7 +79,7 @@ func TestValidateCommand_SucceedWithJSONAsHCL(t *testing.T) { td := testutil.TempDir(t, "consul") fp := filepath.Join(td, "json.conf") - err := os.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) + err := ioutil.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) require.Nilf(t, err, "err: %s", err) cmd := New(cli.NewMockUi()) @@ -93,7 +93,7 @@ func TestValidateCommand_SucceedOnMinimalConfigDir(t *testing.T) { t.Parallel() td := testutil.TempDir(t, "consul") - err := os.WriteFile(filepath.Join(td, "config.json"), []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) + err := ioutil.WriteFile(filepath.Join(td, "config.json"), []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) require.Nilf(t, err, "err: %s", err) cmd := New(cli.NewMockUi()) @@ -108,7 +108,7 @@ func TestValidateCommand_FailForInvalidJSONConfigFormat(t *testing.T) { td := testutil.TempDir(t, "consul") fp := filepath.Join(td, "hcl.conf") - err := os.WriteFile(fp, []byte(`bind_addr = "10.0.0.1"\ndata_dir = "`+td+`"`), 0644) + err := ioutil.WriteFile(fp, []byte(`bind_addr = "10.0.0.1"\ndata_dir = "`+td+`"`), 0644) require.Nilf(t, err, "err: %s", err) cmd := New(cli.NewMockUi()) @@ -123,7 +123,7 @@ func TestValidateCommand_Quiet(t *testing.T) { td := testutil.TempDir(t, "consul") fp := filepath.Join(td, "config.json") - err := os.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) + err := ioutil.WriteFile(fp, []byte(`{"bind_addr":"10.0.0.1", "data_dir":"`+td+`"}`), 0644) require.Nilf(t, err, "err: %s", err) ui := cli.NewMockUi() diff --git a/command/version/formatter_test.go b/command/version/formatter_test.go index 094f8ede17be0..e532c487c02d1 100644 --- a/command/version/formatter_test.go +++ b/command/version/formatter_test.go @@ -3,7 +3,7 @@ package version import ( "flag" "fmt" - "os" + "io/ioutil" "path/filepath" "testing" "time" @@ -21,11 +21,11 @@ func golden(t *testing.T, name, got string) string { golden := filepath.Join("testdata", name+".golden") if *update && got != "" { - err := os.WriteFile(golden, []byte(got), 0644) + err := ioutil.WriteFile(golden, []byte(got), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) return string(expected) diff --git a/command/watch/watch_test.go b/command/watch/watch_test.go index 503a83dd038e7..a2d15deb29bae 100644 --- a/command/watch/watch_test.go +++ b/command/watch/watch_test.go @@ -1,6 +1,7 @@ package watch import ( + "io/ioutil" "os" "path/filepath" "strings" @@ -57,7 +58,7 @@ func TestWatchCommand_loadToken(t *testing.T) { testDir := testutil.TempDir(t, "watchtest") fullname := filepath.Join(testDir, "token.txt") - require.NoError(t, os.WriteFile(fullname, []byte(testToken), 0600)) + require.NoError(t, ioutil.WriteFile(fullname, []byte(testToken), 0600)) resetEnv := func() { os.Unsetenv("CONSUL_HTTP_TOKEN") diff --git a/connect/certgen/certgen.go b/connect/certgen/certgen.go index afc3c3a027502..f47e2c4b42478 100644 --- a/connect/certgen/certgen.go +++ b/connect/certgen/certgen.go @@ -31,6 +31,7 @@ package main // import "github.com/hashicorp/consul/connect/certgen" import ( "flag" "fmt" + "io/ioutil" "log" "os" @@ -85,7 +86,7 @@ func main() { func writeFile(name, content string) { fmt.Println("Writing ", name) - err := os.WriteFile(name, []byte(content), 0600) + err := ioutil.WriteFile(name, []byte(content), 0600) if err != nil { log.Fatalf("failed writing file: %s", err) } diff --git a/connect/service_test.go b/connect/service_test.go index 5405a32362b00..e72b501ed7223 100644 --- a/connect/service_test.go +++ b/connect/service_test.go @@ -7,6 +7,7 @@ import ( "crypto/x509" "fmt" "io" + "io/ioutil" "net/http" "reflect" "sort" @@ -254,7 +255,7 @@ func TestService_HTTPClient(t *testing.T) { r.Check(err) defer resp.Body.Close() - bodyBytes, err := io.ReadAll(resp.Body) + bodyBytes, err := ioutil.ReadAll(resp.Body) r.Check(err) got := string(bodyBytes) diff --git a/connect/tls.go b/connect/tls.go index b142515ecaf5e..dd7fc1869eb99 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -5,9 +5,9 @@ import ( "crypto/x509" "errors" "fmt" + "io/ioutil" "net" "net/url" - "os" "strings" "sync" @@ -89,7 +89,7 @@ func devTLSConfigFromFiles(caFile, certFile, roots := x509.NewCertPool() - bs, err := os.ReadFile(caFile) + bs, err := ioutil.ReadFile(caFile) if err != nil { return nil, err } diff --git a/go.sum b/go.sum index ceb5a6f1d2229..a1f799aa40429 100644 --- a/go.sum +++ b/go.sum @@ -396,6 +396,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -611,6 +612,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3 github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -1141,6 +1143,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= diff --git a/internal/go-sso/oidcauth/oidcauthtest/testing.go b/internal/go-sso/oidcauth/oidcauthtest/testing.go index 46b1a8ff393f6..8e20fb5750ae9 100644 --- a/internal/go-sso/oidcauth/oidcauthtest/testing.go +++ b/internal/go-sso/oidcauth/oidcauthtest/testing.go @@ -14,7 +14,7 @@ import ( "encoding/json" "encoding/pem" "fmt" - "io" + "io/ioutil" "log" "net" "net/http" @@ -82,7 +82,7 @@ func Start(t TestingT) *Server { s.jwks = jwks s.httpServer = httptest.NewUnstartedServer(s) - s.httpServer.Config.ErrorLog = log.New(io.Discard, "", 0) + s.httpServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) s.httpServer.StartTLS() t.Cleanup(s.httpServer.Close) diff --git a/internal/testing/golden/golden.go b/internal/testing/golden/golden.go index 50f0a4f65bfd6..66ec2a0ffa871 100644 --- a/internal/testing/golden/golden.go +++ b/internal/testing/golden/golden.go @@ -2,6 +2,7 @@ package golden import ( "flag" + "io/ioutil" "os" "path/filepath" "testing" @@ -25,11 +26,11 @@ func Get(t *testing.T, actual, filename string) string { if dir := filepath.Dir(path); dir != "." { require.NoError(t, os.MkdirAll(dir, 0755)) } - err := os.WriteFile(path, []byte(actual), 0644) + err := ioutil.WriteFile(path, []byte(actual), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(path) + expected, err := ioutil.ReadFile(path) require.NoError(t, err) return string(expected) } diff --git a/internal/tools/proto-gen-rpc-glue/main_test.go b/internal/tools/proto-gen-rpc-glue/main_test.go index 9fcfb7ed4511a..77ded532aeefb 100644 --- a/internal/tools/proto-gen-rpc-glue/main_test.go +++ b/internal/tools/proto-gen-rpc-glue/main_test.go @@ -39,11 +39,11 @@ func golden(t *testing.T, actual, path string) string { if dir := filepath.Dir(path); dir != "." { require.NoError(t, os.MkdirAll(dir, 0755)) } - err := os.WriteFile(path, []byte(actual), 0644) + err := ioutil.WriteFile(path, []byte(actual), 0644) require.NoError(t, err) } - expected, err := os.ReadFile(path) + expected, err := ioutil.ReadFile(path) require.NoError(t, err) return string(expected) } diff --git a/lib/file/atomic_test.go b/lib/file/atomic_test.go index 575fc6067ce69..8645b2d45ac3d 100644 --- a/lib/file/atomic_test.go +++ b/lib/file/atomic_test.go @@ -1,6 +1,7 @@ package file import ( + "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +13,7 @@ import ( // tests that it just writes the file properly. I would love to test this // better but I'm not sure how. -mitchellh func TestWriteAtomic(t *testing.T) { - td, err := os.MkdirTemp("", "lib-file") + td, err := ioutil.TempDir("", "lib-file") require.NoError(t, err) defer os.RemoveAll(td) @@ -24,7 +25,7 @@ func TestWriteAtomic(t *testing.T) { require.NoError(t, WriteAtomic(path, expected)) // Read and verify - actual, err := os.ReadFile(path) + actual, err := ioutil.ReadFile(path) require.NoError(t, err) require.Equal(t, expected, actual) } diff --git a/logging/logfile_test.go b/logging/logfile_test.go index 09313a67cb70e..96fc0e1696444 100644 --- a/logging/logfile_test.go +++ b/logging/logfile_test.go @@ -1,6 +1,7 @@ package logging import ( + "io/ioutil" "os" "path/filepath" "sort" @@ -43,7 +44,7 @@ func TestLogFile_openNew(t *testing.T) { _, err = logFile.Write([]byte(msg)) require.NoError(t, err) - content, err := os.ReadFile(logFile.FileInfo.Name()) + content, err := ioutil.ReadFile(logFile.FileInfo.Name()) require.NoError(t, err) require.Contains(t, string(content), msg) } @@ -78,11 +79,11 @@ func TestLogFile_PruneFiles(t *testing.T) { sort.Strings(logFiles) require.Len(t, logFiles, 2) - content, err := os.ReadFile(filepath.Join(tempDir, logFiles[0])) + content, err := ioutil.ReadFile(filepath.Join(tempDir, logFiles[0])) require.NoError(t, err) require.Contains(t, string(content), "Second File") - content, err = os.ReadFile(filepath.Join(tempDir, logFiles[1])) + content, err = ioutil.ReadFile(filepath.Join(tempDir, logFiles[1])) require.NoError(t, err) require.Contains(t, string(content), "Third File") } diff --git a/main.go b/main.go index 5138f8c2219d2..e7b04e241fb5b 100644 --- a/main.go +++ b/main.go @@ -2,7 +2,7 @@ package main import ( "fmt" - "io" + "io/ioutil" "log" "os" @@ -24,7 +24,7 @@ func main() { } func realMain() int { - log.SetOutput(io.Discard) + log.SetOutput(ioutil.Discard) ui := &cli.BasicUI{ BasicUi: mcli.BasicUi{Writer: os.Stdout, ErrorWriter: os.Stderr}, diff --git a/proto-public/go.mod b/proto-public/go.mod index 9870dccfb5e84..36d28c5711d49 100644 --- a/proto-public/go.mod +++ b/proto-public/go.mod @@ -4,13 +4,18 @@ go 1.19 require ( github.com/golang/protobuf v1.5.0 + github.com/stretchr/testify v1.5.1 google.golang.org/grpc v1.37.1 google.golang.org/protobuf v1.27.1 ) require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.0 // indirect golang.org/x/net v0.0.0-20190311183353-d8887717615a // indirect golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a // indirect golang.org/x/text v0.3.0 // indirect google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect ) diff --git a/proto-public/go.sum b/proto-public/go.sum index 59e6e6727b784..212cecb78be42 100644 --- a/proto-public/go.sum +++ b/proto-public/go.sum @@ -3,6 +3,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -29,9 +30,12 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -82,7 +86,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/proto/pboperator/operator.gen.go b/proto/pboperator/operator.gen.go deleted file mode 100644 index 83bd446197a2b..0000000000000 --- a/proto/pboperator/operator.gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Code generated by mog. DO NOT EDIT. - -package pboperator - -import "github.com/hashicorp/consul/api" - -func TransferLeaderResponseToAPI(s *TransferLeaderResponse, t *api.TransferLeaderResponse) { - if s == nil { - return - } - t.Success = s.Success -} -func TransferLeaderResponseFromAPI(t *api.TransferLeaderResponse, s *TransferLeaderResponse) { - if s == nil { - return - } - s.Success = t.Success -} diff --git a/proto/pboperator/operator.pb.binary.go b/proto/pboperator/operator.pb.binary.go deleted file mode 100644 index 594bcde67feb2..0000000000000 --- a/proto/pboperator/operator.pb.binary.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: proto/pboperator/operator.proto - -package pboperator - -import ( - "github.com/golang/protobuf/proto" -) - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *TransferLeaderRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *TransferLeaderRequest) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *TransferLeaderResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *TransferLeaderResponse) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} diff --git a/proto/pboperator/operator.pb.go b/proto/pboperator/operator.pb.go deleted file mode 100644 index 317822eec5a94..0000000000000 --- a/proto/pboperator/operator.pb.go +++ /dev/null @@ -1,242 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc (unknown) -// source: proto/pboperator/operator.proto - -package pboperator - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type TransferLeaderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (x *TransferLeaderRequest) Reset() { - *x = TransferLeaderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_pboperator_operator_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TransferLeaderRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransferLeaderRequest) ProtoMessage() {} - -func (x *TransferLeaderRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_pboperator_operator_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransferLeaderRequest.ProtoReflect.Descriptor instead. -func (*TransferLeaderRequest) Descriptor() ([]byte, []int) { - return file_proto_pboperator_operator_proto_rawDescGZIP(), []int{0} -} - -func (x *TransferLeaderRequest) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -// mog annotation: -// -// target=github.com/hashicorp/consul/api.TransferLeaderResponse -// output=operator.gen.go -// name=API -type TransferLeaderResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // true if the transfer is a success - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` -} - -func (x *TransferLeaderResponse) Reset() { - *x = TransferLeaderResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_pboperator_operator_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TransferLeaderResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransferLeaderResponse) ProtoMessage() {} - -func (x *TransferLeaderResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_pboperator_operator_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransferLeaderResponse.ProtoReflect.Descriptor instead. -func (*TransferLeaderResponse) Descriptor() ([]byte, []int) { - return file_proto_pboperator_operator_proto_rawDescGZIP(), []int{1} -} - -func (x *TransferLeaderResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -var File_proto_pboperator_operator_proto protoreflect.FileDescriptor - -var file_proto_pboperator_operator_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x22, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x6f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x27, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, - 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, - 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x22, 0x32, - 0x0a, 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x32, 0x9d, 0x01, 0x0a, 0x0f, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x89, 0x01, 0x0a, 0x0e, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x66, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, - 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x42, 0x91, 0x02, 0x0a, 0x26, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x0d, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x70, 0x62, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0xa2, 0x02, 0x04, 0x48, - 0x43, 0x49, 0x4f, 0xaa, 0x02, 0x22, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0xca, 0x02, 0x22, 0x48, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0xe2, 0x02, 0x2e, - 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x25, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_proto_pboperator_operator_proto_rawDescOnce sync.Once - file_proto_pboperator_operator_proto_rawDescData = file_proto_pboperator_operator_proto_rawDesc -) - -func file_proto_pboperator_operator_proto_rawDescGZIP() []byte { - file_proto_pboperator_operator_proto_rawDescOnce.Do(func() { - file_proto_pboperator_operator_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pboperator_operator_proto_rawDescData) - }) - return file_proto_pboperator_operator_proto_rawDescData -} - -var file_proto_pboperator_operator_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_proto_pboperator_operator_proto_goTypes = []interface{}{ - (*TransferLeaderRequest)(nil), // 0: hashicorp.consul.internal.operator.TransferLeaderRequest - (*TransferLeaderResponse)(nil), // 1: hashicorp.consul.internal.operator.TransferLeaderResponse -} -var file_proto_pboperator_operator_proto_depIdxs = []int32{ - 0, // 0: hashicorp.consul.internal.operator.OperatorService.TransferLeader:input_type -> hashicorp.consul.internal.operator.TransferLeaderRequest - 1, // 1: hashicorp.consul.internal.operator.OperatorService.TransferLeader:output_type -> hashicorp.consul.internal.operator.TransferLeaderResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_proto_pboperator_operator_proto_init() } -func file_proto_pboperator_operator_proto_init() { - if File_proto_pboperator_operator_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_proto_pboperator_operator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransferLeaderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_pboperator_operator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransferLeaderResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_proto_pboperator_operator_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_proto_pboperator_operator_proto_goTypes, - DependencyIndexes: file_proto_pboperator_operator_proto_depIdxs, - MessageInfos: file_proto_pboperator_operator_proto_msgTypes, - }.Build() - File_proto_pboperator_operator_proto = out.File - file_proto_pboperator_operator_proto_rawDesc = nil - file_proto_pboperator_operator_proto_goTypes = nil - file_proto_pboperator_operator_proto_depIdxs = nil -} diff --git a/proto/pboperator/operator.proto b/proto/pboperator/operator.proto deleted file mode 100644 index 71d0a6caf36b8..0000000000000 --- a/proto/pboperator/operator.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package hashicorp.consul.internal.operator; - -// Operator defines a set of operators operation applicable to Consul -service OperatorService { - //Transfer raft leadership to another node - rpc TransferLeader(TransferLeaderRequest) returns (TransferLeaderResponse) {} -} - -message TransferLeaderRequest { - string ID = 1; -} - -// -// mog annotation: -// -// target=github.com/hashicorp/consul/api.TransferLeaderResponse -// output=operator.gen.go -// name=API -message TransferLeaderResponse { - // true if the transfer is a success - bool success = 1; -} diff --git a/proto/pboperator/operator_grpc.pb.go b/proto/pboperator/operator_grpc.pb.go deleted file mode 100644 index b8dac9336c429..0000000000000 --- a/proto/pboperator/operator_grpc.pb.go +++ /dev/null @@ -1,105 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc (unknown) -// source: proto/pboperator/operator.proto - -package pboperator - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// OperatorServiceClient is the client API for OperatorService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type OperatorServiceClient interface { - // Transfer raft leadership to another node - TransferLeader(ctx context.Context, in *TransferLeaderRequest, opts ...grpc.CallOption) (*TransferLeaderResponse, error) -} - -type operatorServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewOperatorServiceClient(cc grpc.ClientConnInterface) OperatorServiceClient { - return &operatorServiceClient{cc} -} - -func (c *operatorServiceClient) TransferLeader(ctx context.Context, in *TransferLeaderRequest, opts ...grpc.CallOption) (*TransferLeaderResponse, error) { - out := new(TransferLeaderResponse) - err := c.cc.Invoke(ctx, "/hashicorp.consul.internal.operator.OperatorService/TransferLeader", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// OperatorServiceServer is the server API for OperatorService service. -// All implementations should embed UnimplementedOperatorServiceServer -// for forward compatibility -type OperatorServiceServer interface { - // Transfer raft leadership to another node - TransferLeader(context.Context, *TransferLeaderRequest) (*TransferLeaderResponse, error) -} - -// UnimplementedOperatorServiceServer should be embedded to have forward compatible implementations. -type UnimplementedOperatorServiceServer struct { -} - -func (UnimplementedOperatorServiceServer) TransferLeader(context.Context, *TransferLeaderRequest) (*TransferLeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TransferLeader not implemented") -} - -// UnsafeOperatorServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to OperatorServiceServer will -// result in compilation errors. -type UnsafeOperatorServiceServer interface { - mustEmbedUnimplementedOperatorServiceServer() -} - -func RegisterOperatorServiceServer(s grpc.ServiceRegistrar, srv OperatorServiceServer) { - s.RegisterService(&OperatorService_ServiceDesc, srv) -} - -func _OperatorService_TransferLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TransferLeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OperatorServiceServer).TransferLeader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hashicorp.consul.internal.operator.OperatorService/TransferLeader", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OperatorServiceServer).TransferLeader(ctx, req.(*TransferLeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// OperatorService_ServiceDesc is the grpc.ServiceDesc for OperatorService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var OperatorService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "hashicorp.consul.internal.operator.OperatorService", - HandlerType: (*OperatorServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "TransferLeader", - Handler: _OperatorService_TransferLeader_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "proto/pboperator/operator.proto", -} diff --git a/proto/pbpeering/peering.go b/proto/pbpeering/peering.go index dea15031efb9b..c50e827cd94fa 100644 --- a/proto/pbpeering/peering.go +++ b/proto/pbpeering/peering.go @@ -156,9 +156,9 @@ func StreamStatusToAPI(status *StreamStatus) api.PeeringStreamStatus { return api.PeeringStreamStatus{ ImportedServices: status.ImportedServices, ExportedServices: status.ExportedServices, - LastHeartbeat: TimePtrFromProto(status.LastHeartbeat), - LastReceive: TimePtrFromProto(status.LastReceive), - LastSend: TimePtrFromProto(status.LastSend), + LastHeartbeat: structs.TimeFromProto(status.LastHeartbeat), + LastReceive: structs.TimeFromProto(status.LastReceive), + LastSend: structs.TimeFromProto(status.LastSend), } } @@ -166,9 +166,9 @@ func StreamStatusFromAPI(status api.PeeringStreamStatus) *StreamStatus { return &StreamStatus{ ImportedServices: status.ImportedServices, ExportedServices: status.ExportedServices, - LastHeartbeat: TimePtrToProto(status.LastHeartbeat), - LastReceive: TimePtrToProto(status.LastReceive), - LastSend: TimePtrToProto(status.LastSend), + LastHeartbeat: structs.TimeToProto(status.LastHeartbeat), + LastReceive: structs.TimeToProto(status.LastReceive), + LastSend: structs.TimeToProto(status.LastSend), } } diff --git a/sdk/freeport/ephemeral_linux.go b/sdk/freeport/ephemeral_linux.go index 22cf4caee674d..55200ad2acced 100644 --- a/sdk/freeport/ephemeral_linux.go +++ b/sdk/freeport/ephemeral_linux.go @@ -5,7 +5,7 @@ package freeport import ( "fmt" - "os" + "io/ioutil" "regexp" "strconv" ) @@ -15,7 +15,7 @@ const ephemeralPortRangeProcFile = "/proc/sys/net/ipv4/ip_local_port_range" var ephemeralPortRangePatt = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s*$`) func getEphemeralPortRange() (int, int, error) { - out, err := os.ReadFile(ephemeralPortRangeProcFile) + out, err := ioutil.ReadFile(ephemeralPortRangeProcFile) if err != nil { return 0, 0, err } diff --git a/sdk/go.mod b/sdk/go.mod index b7c2eb014260c..0cfd6e0c24836 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -1,24 +1,18 @@ module github.com/hashicorp/consul/sdk -go 1.18 +go 1.12 require ( + github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-version v1.2.1 + github.com/kr/pretty v0.2.0 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect github.com/pkg/errors v0.8.1 github.com/stretchr/testify v1.4.0 golang.org/x/sys v0.0.0-20220412211240-33da011f77ad -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.9.0 // indirect - github.com/kr/pretty v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.4 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/sdk/testutil/io.go b/sdk/testutil/io.go index fb1a54f8a8471..b46425a50abfc 100644 --- a/sdk/testutil/io.go +++ b/sdk/testutil/io.go @@ -1,6 +1,7 @@ package testutil import ( + "io/ioutil" "os" "strings" "testing" @@ -18,7 +19,7 @@ func TempDir(t testing.TB, name string) string { } name = t.Name() + "-" + name name = strings.Replace(name, "/", "_", -1) - d, err := os.MkdirTemp("", name) + d, err := ioutil.TempDir("", name) if err != nil { t.Fatalf("err: %s", err) } @@ -44,7 +45,7 @@ func TempFile(t testing.TB, name string) *os.File { } name = t.Name() + "-" + name name = strings.Replace(name, "/", "_", -1) - f, err := os.CreateTemp("", name) + f, err := ioutil.TempFile("", name) if err != nil { t.Fatalf("err: %s", err) } diff --git a/sdk/testutil/server.go b/sdk/testutil/server.go index de42a8e41afd4..7425c37f9d8aa 100644 --- a/sdk/testutil/server.go +++ b/sdk/testutil/server.go @@ -17,6 +17,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net" "net/http" "os" @@ -266,7 +267,7 @@ func NewTestServerConfigT(t TestingTB, cb ServerConfigCallback) (*TestServer, er // Use test name for tmpdir if available prefix = strings.Replace(t.Name(), "/", "_", -1) } - tmpdir, err := os.MkdirTemp("", prefix) + tmpdir, err := ioutil.TempDir("", prefix) if err != nil { return nil, errors.Wrap(err, "failed to create tempdir") } @@ -290,7 +291,7 @@ func NewTestServerConfigT(t TestingTB, cb ServerConfigCallback) (*TestServer, er t.Logf("CONFIG JSON: %s", string(b)) configFile := filepath.Join(tmpdir, "config.json") - if err := os.WriteFile(configFile, b, 0644); err != nil { + if err := ioutil.WriteFile(configFile, b, 0644); err != nil { os.RemoveAll(tmpdir) return nil, errors.Wrap(err, "failed writing config content") } diff --git a/sdk/testutil/server_methods.go b/sdk/testutil/server_methods.go index d25e66bc8d632..2695e88fb0925 100644 --- a/sdk/testutil/server_methods.go +++ b/sdk/testutil/server_methods.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "log" "net/http" "testing" @@ -52,7 +53,7 @@ func (s *TestServer) GetKV(t testing.TB, key string) []byte { resp := s.get(t, "/v1/kv/"+key) defer resp.Body.Close() - raw, err := io.ReadAll(resp.Body) + raw, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read body: %s", err) } @@ -92,7 +93,7 @@ func (s *TestServer) ListKV(t testing.TB, prefix string) []string { resp := s.get(t, "/v1/kv/"+prefix+"?keys") defer resp.Body.Close() - raw, err := io.ReadAll(resp.Body) + raw, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("failed to read body: %s", err) } diff --git a/snapshot/archive.go b/snapshot/archive.go index b0e9ca5211ba5..2c5efb381273d 100644 --- a/snapshot/archive.go +++ b/snapshot/archive.go @@ -18,6 +18,7 @@ import ( "fmt" "hash" "io" + "io/ioutil" "time" "github.com/hashicorp/raft" @@ -201,7 +202,7 @@ func read(in io.Reader, metadata *raft.SnapshotMeta, snap io.Writer) error { // turn made the snapshot verification fail. By explicitly reading the // whole thing first we ensure that we calculate the correct hash // independent of how json.Decode works internally. - buf, err := io.ReadAll(io.TeeReader(archive, metaHash)) + buf, err := ioutil.ReadAll(io.TeeReader(archive, metaHash)) if err != nil { return fmt.Errorf("failed to read snapshot metadata: %v", err) } diff --git a/snapshot/archive_test.go b/snapshot/archive_test.go index 6a6e17d37fb44..67582af0998a1 100644 --- a/snapshot/archive_test.go +++ b/snapshot/archive_test.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "fmt" "io" + "io/ioutil" "os" "reflect" "strings" @@ -74,7 +75,7 @@ func TestArchive_GoodData(t *testing.T) { defer f.Close() var metadata raft.SnapshotMeta - err = read(f, &metadata, io.Discard) + err = read(f, &metadata, ioutil.Discard) if err != nil { t.Fatalf("case %d: should've read the snapshot, but didn't: %v", i, err) } @@ -103,7 +104,7 @@ func TestArchive_BadData(t *testing.T) { defer f.Close() var metadata raft.SnapshotMeta - err = read(f, &metadata, io.Discard) + err = read(f, &metadata, ioutil.Discard) if err == nil || !strings.Contains(err.Error(), c.Error) { t.Fatalf("case %d (%s): %v", i, c.Name, err) } diff --git a/snapshot/snapshot.go b/snapshot/snapshot.go index 691f4bc8e1144..9437cfa11cd02 100644 --- a/snapshot/snapshot.go +++ b/snapshot/snapshot.go @@ -7,6 +7,7 @@ import ( "compress/gzip" "fmt" "io" + "io/ioutil" "os" "github.com/hashicorp/go-hclog" @@ -46,7 +47,7 @@ func New(logger hclog.Logger, r *raft.Raft) (*Snapshot, error) { // Make a scratch file to receive the contents so that we don't buffer // everything in memory. This gets deleted in Close() since we keep it // around for re-reading. - archive, err := os.CreateTemp("", "snapshot") + archive, err := ioutil.TempFile("", "snapshot") if err != nil { return nil, fmt.Errorf("failed to create snapshot file: %v", err) } @@ -133,7 +134,7 @@ func Verify(in io.Reader) (*raft.SnapshotMeta, error) { // Read the archive, throwing away the snapshot data. var metadata raft.SnapshotMeta - if err := read(decomp, &metadata, io.Discard); err != nil { + if err := read(decomp, &metadata, ioutil.Discard); err != nil { return nil, fmt.Errorf("failed to read snapshot file: %v", err) } @@ -150,7 +151,7 @@ func Verify(in io.Reader) (*raft.SnapshotMeta, error) { // The docs for gzip.Reader say: "Clients should treat data returned by Read as // tentative until they receive the io.EOF marking the end of the data." func concludeGzipRead(decomp *gzip.Reader) error { - extra, err := io.ReadAll(decomp) // ReadAll consumes the EOF + extra, err := ioutil.ReadAll(decomp) // ReadAll consumes the EOF if err != nil { return err } else if len(extra) != 0 { @@ -174,7 +175,7 @@ func Read(logger hclog.Logger, in io.Reader) (*os.File, *raft.SnapshotMeta, erro // Make a scratch file to receive the contents of the snapshot data so // we can avoid buffering in memory. - snap, err := os.CreateTemp("", "snapshot") + snap, err := ioutil.TempFile("", "snapshot") if err != nil { return nil, nil, fmt.Errorf("failed to create temp snapshot file: %v", err) } diff --git a/test/integration/connect/envoy/case-centralconf/config_entries.hcl b/test/integration/connect/envoy/case-centralconf/config_entries.hcl new file mode 100644 index 0000000000000..993ad5ecb26e0 --- /dev/null +++ b/test/integration/connect/envoy/case-centralconf/config_entries.hcl @@ -0,0 +1,19 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + config { + envoy_prometheus_bind_addr = "0.0.0.0:1234" + } + } + bootstrap { + kind = "service-defaults" + name = "s1" + protocol = "http" + } + bootstrap { + kind = "service-defaults" + name = "s2" + protocol = "http" + } +} diff --git a/test/integration/connect/envoy/case-centralconf/setup.sh b/test/integration/connect/envoy/case-centralconf/setup.sh index f0c2593f7d7a8..dce23006bcfd8 100644 --- a/test/integration/connect/envoy/case-centralconf/setup.sh +++ b/test/integration/connect/envoy/case-centralconf/setup.sh @@ -2,28 +2,12 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - envoy_prometheus_bind_addr = "0.0.0.0:1234" -} -' - -upsert_config_entry primary ' -kind = "service-defaults" -name = "s1" -protocol = "http" -' - -upsert_config_entry primary ' -kind = "service-defaults" -name = "s2" -protocol = "http" -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-defaults s1 +wait_for_config_entry service-defaults s2 register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap s2 19001 primary - diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl new file mode 100644 index 0000000000000..e1f1178887d9d --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh index 46ec303e2ff7c..820506ea9be33 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh @@ -2,29 +2,10 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl new file mode 100644 index 0000000000000..65163b1115db6 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl @@ -0,0 +1,31 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + targets = [{peer = "primary-to-alpha"}] + } + } + } + + bootstrap { + kind = "service-resolver" + name = "virtual-s2" + + redirect = { + service = "s2" + peer = "primary-to-alpha" + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh index db5571d0c7d62..c65cc31e49ef2 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh @@ -2,34 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -failover = { - "*" = { - targets = [{ peer = "primary-to-alpha" }] - } -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "virtual-s2" -redirect = { - service = "s2" - peer = "primary-to-alpha" -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap s2 19001 primary + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/config_entries.hcl new file mode 100644 index 0000000000000..55441ae3d08c8 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/config_entries.hcl @@ -0,0 +1,23 @@ +config_entries { + bootstrap { + kind = "service-defaults" + name = "s2" + + protocol = "http" + + mesh_gateway { + mode = "none" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + datacenters = ["secondary"] + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/setup.sh index f5734d4b7c849..c6380b061a638 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-none/primary/setup.sh @@ -2,24 +2,9 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "service-defaults" -name = "s2" -protocol = "http" -mesh_gateway { - mode = "none" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -failover = { - "*" = { - datacenters = ["secondary"] - } -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry service-defaults s2 +wait_for_config_entry service-resolver s2 # also wait for replication to make it to the remote dc wait_for_config_entry service-defaults s2 secondary diff --git a/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/config_entries.hcl new file mode 100644 index 0000000000000..2a4d06ce31137 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/config_entries.hcl @@ -0,0 +1,23 @@ +config_entries { + bootstrap { + kind = "service-defaults" + name = "s2" + + protocol = "http" + + mesh_gateway { + mode = "remote" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + datacenters = ["secondary"] + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/setup.sh index ccd9a81a87f76..c6380b061a638 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-dc-failover-gateways-remote/primary/setup.sh @@ -2,24 +2,9 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "service-defaults" -name = "s2" -protocol = "http" -mesh_gateway { - mode = "remote" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -failover = { - "*" = { - datacenters = ["secondary"] - } -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry service-defaults s2 +wait_for_config_entry service-resolver s2 # also wait for replication to make it to the remote dc wait_for_config_entry service-defaults s2 secondary diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl new file mode 100644 index 0000000000000..80c6489671d80 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + default_subset = "v2" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh index 2dfca49e9c392..519bba361cfbf 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh @@ -2,27 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -default_subset = "v2" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-resolver-features/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-features/config_entries.hcl new file mode 100644 index 0000000000000..17a3002ec10b9 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-features/config_entries.hcl @@ -0,0 +1,27 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + default_subset = "v2" + connect_timeout = "30s" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-features/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-features/setup.sh index 2695c0621f10a..519bba361cfbf 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-features/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-features/setup.sh @@ -2,28 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -default_subset = "v2" -connect_timeout = "30s" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/config_entries.hcl new file mode 100644 index 0000000000000..e4b047726f661 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/config_entries.hcl @@ -0,0 +1,23 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + default_subset = "test" + + subsets = { + "test" = { + only_passing = true + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/setup.sh index 902dfeeb6a156..c15ca43ccf305 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-onlypassing/setup.sh @@ -2,25 +2,6 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -default_subset = "test" -subsets = { - "test" = { - only_passing = true - } -} -' - register_services primary # wait for service registration @@ -31,6 +12,10 @@ wait_for_agent_service_register s2-v1 # force s2-v1 into a warning state set_ttl_check_state service:s2-v1 warn +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 + gen_envoy_bootstrap s1 19000 gen_envoy_bootstrap s2 19001 gen_envoy_bootstrap s2-v1 19002 diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl new file mode 100644 index 0000000000000..d7226df6e117b --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl @@ -0,0 +1,35 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s3" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + redirect { + service = "s3" + service_subset = "v2" + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh index ba7ee3bb6ed4f..606d5a791ed0b 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh @@ -2,35 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s3" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -redirect { - service = "s3" - service_subset = "v2" -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-resolver s3 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl new file mode 100644 index 0000000000000..56972b39264fc --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl @@ -0,0 +1,37 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s3" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + service = "s3" + service_subset = "v1" + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh index 953919d108eac..606d5a791ed0b 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh @@ -2,37 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s3" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -failover = { - "*" = { - service = "s3" - service_subset = "v1" - } -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-resolver s3 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/config_entries.hcl new file mode 100644 index 0000000000000..549db0c60e7f2 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/config_entries.hcl @@ -0,0 +1,19 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + redirect { + service = "s3" + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/setup.sh index 87bc93016b8fa..ceae3b110f9cf 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-http/setup.sh @@ -2,21 +2,9 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -redirect { - service = "s3" -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/config_entries.hcl new file mode 100644 index 0000000000000..4cc1777abb380 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/config_entries.hcl @@ -0,0 +1,19 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + redirect { + service = "s3" + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/setup.sh index 4d13925e2b1ca..ceae3b110f9cf 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/setup.sh @@ -2,21 +2,9 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -redirect { - service = "s3" -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-router-features/config_entries.hcl b/test/integration/connect/envoy/case-cfg-router-features/config_entries.hcl new file mode 100644 index 0000000000000..800988207272b --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-router-features/config_entries.hcl @@ -0,0 +1,327 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + default_subset = "v1" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } + + bootstrap { + kind = "service-router" + name = "s2" + + routes = [ + { + match { http { path_exact = "/exact/debug" } } + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { path_exact = "/exact-alt/debug" } } + destination { + service_subset = "v1" + prefix_rewrite = "/debug" + } + }, + { + match { http { path_prefix = "/prefix/" } } + destination { + service_subset = "v2" + prefix_rewrite = "/" + } + }, + { + match { http { path_prefix = "/prefix-alt/" } } + destination { + service_subset = "v1" + prefix_rewrite = "/" + } + }, + { + match { http { + path_regex = "/deb[ug]{2}" + header = [{ + name = "x-test-debug" + exact = "regex-path" + }] + } } + destination { + service_subset = "v2" + retry_on_connect_failure = true # TODO: test + retry_on = ["reset"] # TODO: test + retry_on_status_codes = [500, 512] # TODO: test + } + }, + { + match { http { + path_exact = "/hdr-present/debug" + header = [ + { + name = "x-test-debug" + present = true + }, + ] + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-present/debug" + header = [ + { + name = "x-test-debug" + present = true + invert = true + }, + ] + } }, + destination { + service_subset = "v1" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-exact/debug" + header = [ + { + name = "x-test-debug" + exact = "exact" + }, + ] + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-exact/debug" + header = [ + { + name = "x-test-debug" + exact = "exact-alt" + }, + ] + } }, + destination { + service_subset = "v1" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-prefix/debug" + header = [ + { + name = "x-test-debug" + prefix = "prefi" + }, + ] } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-prefix/debug" + header = [ + { + name = "x-test-debug" + prefix = "alt-prefi" + }, + ] } }, + destination { + service_subset = "v1" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-suffix/debug" + header = [ + { + name = "x-test-debug" + suffix = "uffix" + }, + ] + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-suffix/debug" + header = [ + { + name = "x-test-debug" + suffix = "uffix-alt" + }, + ] + } }, + destination { + service_subset = "v1" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-regex/debug" + header = [ + { + name = "x-test-debug" + regex = "reg[ex]{2}" + }, + ] + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-regex/debug" + header = [ + { + name = "x-test-debug" + regex = "reg[ex]{3}" + }, + ] + } }, + destination { + service_subset = "v1" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/hdr-invert/debug" + header = [ + { + name = "x-test-debug" + exact = "not-this" + invert = true + }, + ], + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/qp-present/debug" + query_param = [ + { + name = "env" + present = true + }, + ], + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/qp-exact/debug" + query_param = [ + { + name = "env" + exact = "dump" + }, + ], + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/qp-regex/debug" + query_param = [ + { + name = "env" + regex = "du[mp]{2}" + }, + ], + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/method-match/debug" + methods = ["GET", "PUT"] + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + } + }, + { + match { http { + path_exact = "/header-manip/debug" + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/debug" + request_headers { + set { + x-foo = "request-bar" + } + remove = ["x-bad-req"] + } + } + }, + { + match { http { + path_exact = "/header-manip/echo" + } }, + destination { + service_subset = "v2" + prefix_rewrite = "/" + response_headers { + add { + x-foo = "response-bar" + } + remove = ["x-bad-resp"] + } + } + }, + ] + } +} diff --git a/test/integration/connect/envoy/case-cfg-router-features/setup.sh b/test/integration/connect/envoy/case-cfg-router-features/setup.sh index f4b559c1481ee..0319800fca2f5 100644 --- a/test/integration/connect/envoy/case-cfg-router-features/setup.sh +++ b/test/integration/connect/envoy/case-cfg-router-features/setup.sh @@ -2,327 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -default_subset = "v1" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' - -upsert_config_entry primary ' -kind = "service-router" -name = "s2" -routes = [ - { - match { http { path_exact = "/exact/debug" } } - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { path_exact = "/exact-alt/debug" } } - destination { - service_subset = "v1" - prefix_rewrite = "/debug" - } - }, - { - match { http { path_prefix = "/prefix/" } } - destination { - service_subset = "v2" - prefix_rewrite = "/" - } - }, - { - match { http { path_prefix = "/prefix-alt/" } } - destination { - service_subset = "v1" - prefix_rewrite = "/" - } - }, - { - match { http { - path_regex = "/deb[ug]{2}" - header = [{ - name = "x-test-debug" - exact = "regex-path" - }] - } } - destination { - service_subset = "v2" - retry_on_connect_failure = true # TODO: test - retry_on = ["reset"] # TODO: test - retry_on_status_codes = [500, 512] # TODO: test - } - }, - { - match { http { - path_exact = "/hdr-present/debug" - header = [ - { - name = "x-test-debug" - present = true - }, - ] - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-present/debug" - header = [ - { - name = "x-test-debug" - present = true - invert = true - }, - ] - } }, - destination { - service_subset = "v1" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-exact/debug" - header = [ - { - name = "x-test-debug" - exact = "exact" - }, - ] - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-exact/debug" - header = [ - { - name = "x-test-debug" - exact = "exact-alt" - }, - ] - } }, - destination { - service_subset = "v1" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-prefix/debug" - header = [ - { - name = "x-test-debug" - prefix = "prefi" - }, - ] } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-prefix/debug" - header = [ - { - name = "x-test-debug" - prefix = "alt-prefi" - }, - ] } }, - destination { - service_subset = "v1" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-suffix/debug" - header = [ - { - name = "x-test-debug" - suffix = "uffix" - }, - ] - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-suffix/debug" - header = [ - { - name = "x-test-debug" - suffix = "uffix-alt" - }, - ] - } }, - destination { - service_subset = "v1" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-regex/debug" - header = [ - { - name = "x-test-debug" - regex = "reg[ex]{2}" - }, - ] - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-regex/debug" - header = [ - { - name = "x-test-debug" - regex = "reg[ex]{3}" - }, - ] - } }, - destination { - service_subset = "v1" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/hdr-invert/debug" - header = [ - { - name = "x-test-debug" - exact = "not-this" - invert = true - }, - ], - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/qp-present/debug" - query_param = [ - { - name = "env" - present = true - }, - ], - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/qp-exact/debug" - query_param = [ - { - name = "env" - exact = "dump" - }, - ], - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/qp-regex/debug" - query_param = [ - { - name = "env" - regex = "du[mp]{2}" - }, - ], - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/method-match/debug" - methods = ["GET", "PUT"] - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - } - }, - { - match { http { - path_exact = "/header-manip/debug" - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/debug" - request_headers { - set { - x-foo = "request-bar" - } - remove = ["x-bad-req"] - } - } - }, - { - match { http { - path_exact = "/header-manip/echo" - } }, - destination { - service_subset = "v2" - prefix_rewrite = "/" - response_headers { - add { - x-foo = "response-bar" - } - remove = ["x-bad-resp"] - } - } - }, -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-router s2 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/config_entries.hcl new file mode 100644 index 0000000000000..64d011702016c --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer_name = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/setup.sh b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/setup.sh index ff7dcb8114eae..820506ea9be33 100644 --- a/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/alpha/setup.sh @@ -2,30 +2,10 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer_name = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/config_entries.hcl b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/config_entries.hcl new file mode 100644 index 0000000000000..a3970b05488a3 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/config_entries.hcl @@ -0,0 +1,53 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-splitter" + name = "split-s2" + splits = [ + { + Weight = 50 + Service = "local-s2" + ResponseHeaders { + Set { + "x-test-split" = "primary" + } + } + }, + { + Weight = 50 + Service = "peer-s2" + ResponseHeaders { + Set { + "x-test-split" = "alpha" + } + } + }, + ] + } + + bootstrap { + kind = "service-resolver" + name = "local-s2" + redirect = { + service = "s2" + } + } + + bootstrap { + kind = "service-resolver" + name = "peer-s2" + + redirect = { + service = "s2" + peer = "primary-to-alpha" + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/setup.sh b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/setup.sh index e59202e811d5e..c65cc31e49ef2 100644 --- a/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/setup.sh +++ b/test/integration/connect/envoy/case-cfg-splitter-cluster-peering/primary/setup.sh @@ -2,57 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-splitter" -name = "split-s2" -splits = [ - { - Weight = 50 - Service = "local-s2" - ResponseHeaders { - Set { - "x-test-split" = "primary" - } - } - }, - { - Weight = 50 - Service = "peer-s2" - ResponseHeaders { - Set { - "x-test-split" = "alpha" - } - } - }, -] -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "local-s2" -redirect = { - service = "s2" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "peer-s2" -redirect = { - service = "s2" - peer = "primary-to-alpha" -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap s2 19001 primary + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-cfg-splitter-features/config_entries.hcl b/test/integration/connect/envoy/case-cfg-splitter-features/config_entries.hcl new file mode 100644 index 0000000000000..1ea93fb5fcb18 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-splitter-features/config_entries.hcl @@ -0,0 +1,65 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } + + bootstrap { + kind = "service-splitter" + name = "s2" + + splits = [ + { + weight = 50, + service_subset = "v2" + request_headers { + set { + x-split-leg = "v2" + } + remove = ["x-bad-req"] + } + response_headers { + add { + x-svc-version = "v2" + } + remove = ["x-bad-resp"] + } + }, + { + weight = 50, + service_subset = "v1" + request_headers { + set { + x-split-leg = "v1" + } + remove = ["x-bad-req"] + } + response_headers { + add { + x-svc-version = "v1" + } + remove = ["x-bad-resp"] + } + }, + ] + } +} diff --git a/test/integration/connect/envoy/case-cfg-splitter-features/setup.sh b/test/integration/connect/envoy/case-cfg-splitter-features/setup.sh index c04afa2b6a21c..fe0967a383883 100644 --- a/test/integration/connect/envoy/case-cfg-splitter-features/setup.sh +++ b/test/integration/connect/envoy/case-cfg-splitter-features/setup.sh @@ -2,65 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' - -upsert_config_entry primary ' -kind = "service-splitter" -name = "s2" -splits = [ - { - weight = 50, - service_subset = "v2" - request_headers { - set { - x-split-leg = "v2" - } - remove = ["x-bad-req"] - } - response_headers { - add { - x-svc-version = "v2" - } - remove = ["x-bad-resp"] - } - }, - { - weight = 50, - service_subset = "v1" - request_headers { - set { - x-split-leg = "v1" - } - remove = ["x-bad-req"] - } - response_headers { - add { - x-svc-version = "v1" - } - remove = ["x-bad-resp"] - } - }, -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-splitter s2 register_services primary diff --git a/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/config_entries.hcl new file mode 100644 index 0000000000000..6c186ecae0c86 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/config_entries.hcl @@ -0,0 +1,34 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s1" + consumers = [ + { + peer_name = "alpha-to-primary" + } + ] + }, + { + name = "s2" + consumers = [ + { + peer_name = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/setup.sh b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/setup.sh index 42177898e0653..e6d27d5d8dfad 100644 --- a/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/alpha/setup.sh @@ -2,36 +2,8 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s1" - consumers = [ - { - peer_name = "alpha-to-primary" - } - ] - }, - { - name = "s2" - consumers = [ - { - peer_name = "alpha-to-primary" - } - ] - } -] -' +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha register_services alpha diff --git a/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/config_entries.hcl b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/config_entries.hcl new file mode 100644 index 0000000000000..0b38ad6ed4d1e --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/config_entries.hcl @@ -0,0 +1,88 @@ +config_entries { + + bootstrap { + kind = "proxy-defaults" + name = "global" + config { + protocol = "http" + } + } + + bootstrap { + kind = "ingress-gateway" + name = "ingress-gateway" + listeners = [ + { + protocol = "http" + port = 9999 + services = [ + { + name = "peer-s2" + } + ] + }, + { + protocol = "http" + port = 10000 + services = [ + { + name = "peer-s1" + } + ] + }, + { + protocol = "http" + port = 10001 + services = [ + { + name = "s1" + } + ] + }, + { + protocol = "http" + port = 10002 + services = [ + { + name = "split" + } + ] + } + ] + } + + bootstrap { + kind = "service-resolver" + name = "peer-s1" + + redirect = { + service = "s1" + peer = "primary-to-alpha" + } + } + + bootstrap { + kind = "service-resolver" + name = "peer-s2" + + redirect = { + service = "s2" + peer = "primary-to-alpha" + } + } + + bootstrap { + kind = "service-splitter" + name = "split" + splits = [ + { + Weight = 50 + Service = "peer-s1" + }, + { + Weight = 50 + Service = "peer-s2" + }, + ] + } +} diff --git a/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/setup.sh b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/setup.sh index 54a201cad0056..b92dfc15e6779 100644 --- a/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/setup.sh +++ b/test/integration/connect/envoy/case-cfg-splitter-peering-ingress-gateways/primary/setup.sh @@ -2,91 +2,13 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -listeners = [ - { - protocol = "http" - port = 9999 - services = [ - { - name = "peer-s2" - } - ] - }, - { - protocol = "http" - port = 10000 - services = [ - { - name = "peer-s1" - } - ] - }, - { - protocol = "http" - port = 10001 - services = [ - { - name = "s1" - } - ] - }, - { - protocol = "http" - port = 10002 - services = [ - { - name = "split" - } - ] - } -] -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "peer-s1" -redirect = { - service = "s1" - peer = "primary-to-alpha" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "peer-s2" -redirect = { - service = "s2" - peer = "primary-to-alpha" -} -' - -upsert_config_entry primary ' -kind = "service-splitter" -name = "split" -splits = [ - { - Weight = 50 - Service = "peer-s1" - }, - { - Weight = 50 - Service = "peer-s2" - }, -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver peer-s1 +wait_for_config_entry service-resolver peer-s2 register_services primary gen_envoy_bootstrap ingress-gateway 20000 primary true -gen_envoy_bootstrap s1 19000 primary +gen_envoy_bootstrap s1 19000 primary \ No newline at end of file diff --git a/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/config_entries.hcl new file mode 100644 index 0000000000000..996df1d21367c --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/config_entries.hcl @@ -0,0 +1,32 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "mesh" + peering { + peer_through_mesh_gateways = true + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/setup.sh b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/setup.sh index 4c6fa1594e4dc..6d341b20a2b64 100644 --- a/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/alpha/setup.sh @@ -2,37 +2,11 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry alpha ' -kind = "mesh" -peering { - peer_through_mesh_gateways = true -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha +wait_for_config_entry mesh mesh alpha diff --git a/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/config_entries.hcl b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/config_entries.hcl new file mode 100644 index 0000000000000..6baeb569f25e7 --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/config_entries.hcl @@ -0,0 +1,18 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "mesh" + peering { + peer_through_mesh_gateways = true + } + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/setup.sh b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/setup.sh index 5b0775f03ae23..3aa37f8cb0e95 100644 --- a/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/setup.sh +++ b/test/integration/connect/envoy/case-cross-peer-control-plane-mgw/primary/setup.sh @@ -2,21 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' -upsert_config_entry primary ' -kind = "mesh" -peering { - peer_through_mesh_gateways = true -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap mesh-gateway 19001 primary true + +wait_for_config_entry proxy-defaults global +wait_for_config_entry mesh mesh alpha diff --git a/test/integration/connect/envoy/case-cross-peers-http-router/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers-http-router/alpha/config_entries.hcl new file mode 100644 index 0000000000000..54941a9032d43 --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers-http-router/alpha/config_entries.hcl @@ -0,0 +1,39 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + }, + { + kind = "service-router" + name = "s2" + routes = [ + { + match { http { path_prefix = "/s3/" } } + destination { + service = "s3" + prefix_rewrite = "/" + } + }, + ] + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers-http-router/alpha/setup.sh b/test/integration/connect/envoy/case-cross-peers-http-router/alpha/setup.sh index d25c61e21bb7c..75b38c6f26c09 100644 --- a/test/integration/connect/envoy/case-cross-peers-http-router/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers-http-router/alpha/setup.sh @@ -2,45 +2,12 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry alpha ' -kind = "service-router" -name = "s2" -routes = [ - { - match { http { path_prefix = "/s3/" } } - destination { - service = "s3" - prefix_rewrite = "/" - } - }, -] -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true gen_envoy_bootstrap s3 19004 alpha + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry service-router s2 alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cross-peers-http-router/primary/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers-http-router/primary/config_entries.hcl new file mode 100644 index 0000000000000..3bba325301fd3 --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers-http-router/primary/config_entries.hcl @@ -0,0 +1,12 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers-http-router/primary/setup.sh b/test/integration/connect/envoy/case-cross-peers-http-router/primary/setup.sh index ad9c9691cc368..38122406d2c7c 100644 --- a/test/integration/connect/envoy/case-cross-peers-http-router/primary/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers-http-router/primary/setup.sh @@ -2,15 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap mesh-gateway 19001 primary true + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-cross-peers-http/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers-http/alpha/config_entries.hcl new file mode 100644 index 0000000000000..a46dc7ee2dfea --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers-http/alpha/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers-http/alpha/setup.sh b/test/integration/connect/envoy/case-cross-peers-http/alpha/setup.sh index 2a8edf8c7e5ab..820506ea9be33 100644 --- a/test/integration/connect/envoy/case-cross-peers-http/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers-http/alpha/setup.sh @@ -2,30 +2,10 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cross-peers-http/primary/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers-http/primary/config_entries.hcl new file mode 100644 index 0000000000000..ecb777e7f96a8 --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers-http/primary/config_entries.hcl @@ -0,0 +1,13 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + # This shouldn't affect the imported listener's protocol, which should be http. + protocol = "tcp" + } + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers-http/primary/setup.sh b/test/integration/connect/envoy/case-cross-peers-http/primary/setup.sh index 7f205c5e6ffb4..38122406d2c7c 100644 --- a/test/integration/connect/envoy/case-cross-peers-http/primary/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers-http/primary/setup.sh @@ -2,16 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - # This should not affect the imported listener protocol, which should be http. - protocol = "tcp" -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap mesh-gateway 19001 primary true + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/config_entries.hcl new file mode 100644 index 0000000000000..4356f4ba8c2fe --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/config_entries.hcl @@ -0,0 +1,33 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "service-resolver" + name = "s2" + redirect { + service = "s3" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/setup.sh b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/setup.sh index 556d8fa408ca9..a2645baf6f15c 100644 --- a/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/alpha/setup.sh @@ -2,39 +2,12 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry alpha ' -kind = "service-resolver" -name = "s2" -redirect { - service = "s3" -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true gen_envoy_bootstrap s3 19004 alpha + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry service-resolver s2 alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/config_entries.hcl new file mode 100644 index 0000000000000..b3a8d917152b1 --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/config_entries.hcl @@ -0,0 +1,12 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/setup.sh b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/setup.sh index 8bb0836c71cb9..38122406d2c7c 100644 --- a/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers-resolver-redirect-tcp/primary/setup.sh @@ -2,15 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap mesh-gateway 19001 primary true + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-cross-peers/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers/alpha/config_entries.hcl new file mode 100644 index 0000000000000..e1f1178887d9d --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers/alpha/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers/alpha/setup.sh b/test/integration/connect/envoy/case-cross-peers/alpha/setup.sh index 29e3c01415e13..820506ea9be33 100644 --- a/test/integration/connect/envoy/case-cross-peers/alpha/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers/alpha/setup.sh @@ -2,30 +2,10 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cross-peers/primary/config_entries.hcl b/test/integration/connect/envoy/case-cross-peers/primary/config_entries.hcl new file mode 100644 index 0000000000000..b3a8d917152b1 --- /dev/null +++ b/test/integration/connect/envoy/case-cross-peers/primary/config_entries.hcl @@ -0,0 +1,12 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + } + ] +} diff --git a/test/integration/connect/envoy/case-cross-peers/primary/setup.sh b/test/integration/connect/envoy/case-cross-peers/primary/setup.sh index 8bb0836c71cb9..38122406d2c7c 100644 --- a/test/integration/connect/envoy/case-cross-peers/primary/setup.sh +++ b/test/integration/connect/envoy/case-cross-peers/primary/setup.sh @@ -2,15 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - register_services primary gen_envoy_bootstrap s1 19000 primary gen_envoy_bootstrap mesh-gateway 19001 primary true + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-ingress-gateway-grpc/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-grpc/config_entries.hcl new file mode 100644 index 0000000000000..1d20009408a61 --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-grpc/config_entries.hcl @@ -0,0 +1,24 @@ +config_entries { + bootstrap { + kind = "service-defaults" + name = "s1" + protocol = "grpc" + } + bootstrap { + kind = "ingress-gateway" + name = "ingress-gateway" + + listeners = [ + { + port = 9999 + protocol = "grpc" + services = [ + { + name = "s1" + hosts = ["localhost:9999"] + } + ] + } + ] + } +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-grpc/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-grpc/setup.sh index 3271072c4cf92..93bfefee4840a 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-grpc/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-grpc/setup.sh @@ -2,28 +2,8 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "service-defaults" -name = "s1" -protocol = "grpc" -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -listeners = [ - { - port = 9999 - protocol = "grpc" - services = [ - { - name = "s1" - hosts = ["localhost:9999"] - } - ] - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway register_services primary diff --git a/test/integration/connect/envoy/case-ingress-gateway-http/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-http/config_entries.hcl new file mode 100644 index 0000000000000..10c9395100d3a --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-http/config_entries.hcl @@ -0,0 +1,81 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + config { + protocol = "http" + } + }, + { + kind = "ingress-gateway" + name = "ingress-gateway" + + listeners = [ + { + port = 9999 + protocol = "http" + services = [ + { + name = "router" + request_headers { + add { + x-foo = "bar-req" + x-existing-1 = "appended-req" + } + set { + x-existing-2 = "replaced-req" + x-client-ip = "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + } + remove = ["x-bad-req"] + } + response_headers { + add { + x-foo = "bar-resp" + x-existing-1 = "appended-resp" + } + set { + x-existing-2 = "replaced-resp" + } + remove = ["x-bad-resp"] + } + } + ] + } + ] + }, + { + kind = "service-router" + // This is a "virtual" service name and will not have a backing + // service definition. It must match the name defined in the ingress + // configuration. + name = "router" + routes = [ + { + match { + http { + path_prefix = "/s1/" + } + } + + destination { + service = "s1" + prefix_rewrite = "/" + } + }, + { + match { + http { + path_prefix = "/s2/" + } + } + + destination { + service = "s2" + prefix_rewrite = "/" + } + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-http/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-http/setup.sh index 34fa9592d6e0c..fd46866e434c4 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-http/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-http/setup.sh @@ -2,82 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -listeners = [ - { - port = 9999 - protocol = "http" - services = [ - { - name = "router" - request_headers { - add { - x-foo = "bar-req" - x-existing-1 = "appended-req" - } - set { - x-existing-2 = "replaced-req" - x-client-ip = "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - } - remove = ["x-bad-req"] - } - response_headers { - add { - x-foo = "bar-resp" - x-existing-1 = "appended-resp" - } - set { - x-existing-2 = "replaced-resp" - } - remove = ["x-bad-resp"] - } - } - ] - } -] -' - -upsert_config_entry primary ' -kind = "service-router" -// This is a "virtual" service name and will not have a backing -// service definition. It must match the name defined in the ingress -// configuration. -name = "router" -routes = [ - { - match { - http { - path_prefix = "/s1/" - } - } - destination { - service = "s1" - prefix_rewrite = "/" - } - }, - { - match { - http { - path_prefix = "/s2/" - } - } - destination { - service = "s2" - prefix_rewrite = "/" - } - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-router router register_services primary diff --git a/test/integration/connect/envoy/case-ingress-gateway-multiple-services/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-multiple-services/config_entries.hcl new file mode 100644 index 0000000000000..c1c7c58320f8e --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-multiple-services/config_entries.hcl @@ -0,0 +1,45 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + config { + protocol = "http" + } + }, + { + kind = "ingress-gateway" + name = "ingress-gateway" + + Defaults { + MaxConnections = 10 + MaxPendingRequests = 20 + MaxConcurrentRequests = 30 + } + listeners = [ + { + port = 9999 + protocol = "http" + services = [ + { + name = "*" + } + ] + }, + { + port = 9998 + protocol = "http" + services = [ + { + name = "s1" + hosts = ["test.example.com"] + MaxConnections = 100 + MaxPendingRequests = 200 + MaxConcurrentRequests = 300 + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-multiple-services/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-multiple-services/setup.sh index b0466de546aaa..dd732cc0ad867 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-multiple-services/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-multiple-services/setup.sh @@ -2,47 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -Defaults { - MaxConnections = 10 - MaxPendingRequests = 20 - MaxConcurrentRequests = 30 -} -listeners = [ - { - port = 9999 - protocol = "http" - services = [ - { - name = "*" - } - ] - }, - { - port = 9998 - protocol = "http" - services = [ - { - name = "s1" - hosts = ["test.example.com"] - MaxConnections = 100 - MaxPendingRequests = 200 - MaxConcurrentRequests = 300 - } - ] - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway +wait_for_config_entry proxy-defaults global register_services primary diff --git a/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/config_entries.hcl new file mode 100644 index 0000000000000..64d011702016c --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer_name = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/setup.sh index ff7dcb8114eae..820506ea9be33 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/alpha/setup.sh @@ -2,30 +2,10 @@ set -euo pipefail -upsert_config_entry alpha ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry alpha ' -kind = "exported-services" -name = "default" -services = [ - { - name = "s2" - consumers = [ - { - peer_name = "alpha-to-primary" - } - ] - } -] -' - register_services alpha gen_envoy_bootstrap s2 19002 alpha gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/config_entries.hcl new file mode 100644 index 0000000000000..8be1a8ccad3e9 --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/config_entries.hcl @@ -0,0 +1,47 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + } + + bootstrap { + kind = "ingress-gateway" + name = "ingress-gateway" + listeners = [ + { + protocol = "tcp" + port = 10000 + services = [ + { + name = "s2" + } + ] + } + ] + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + targets = [{peer = "primary-to-alpha"}] + } + } + } + + bootstrap { + kind = "service-resolver" + name = "virtual-s2" + + redirect = { + service = "s2" + peer = "primary-to-alpha" + } + } +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/setup.sh index 327bd980b7849..5577a4e8dc595 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-peering-failover/primary/setup.sh @@ -2,50 +2,12 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "tcp" -} -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -listeners = [ - { - protocol = "tcp" - port = 10000 - services = [ - { - name = "s2" - } - ] - } -] -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -failover = { - "*" = { - targets = [{ peer = "primary-to-alpha" }] - } -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "virtual-s2" -redirect = { - service = "s2" - peer = "primary-to-alpha" -} -' +wait_for_config_entry ingress-gateway ingress-gateway +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-resolver virtual-s2 register_services primary gen_envoy_bootstrap ingress-gateway 20000 primary true -gen_envoy_bootstrap s2 19001 primary +gen_envoy_bootstrap s2 19001 primary \ No newline at end of file diff --git a/test/integration/connect/envoy/case-ingress-gateway-sds/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-sds/config_entries.hcl new file mode 100644 index 0000000000000..16db5bec949ca --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-sds/config_entries.hcl @@ -0,0 +1,60 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + config { + protocol = "http" + } + }, + { + kind = "ingress-gateway" + name = "ingress-gateway" + + listeners = [ + { + port = 9999 + protocol = "http" + services = [ + { + name = "*" + } + ] + tls { + sds { + cluster_name = "sds-cluster" + cert_resource = "wildcard.ingress.consul" + } + } + }, + { + port = 9998 + protocol = "http" + services = [ + { + name = "s1" + hosts = ["foo.example.com"] + tls { + sds { + cluster_name = "sds-cluster" + cert_resource = "foo.example.com" + } + } + }, + { + # Route to s2 on a differet domain with different cert + name = "s2" + hosts = ["www.example.com"] + tls { + sds { + cluster_name = "sds-cluster" + cert_resource = "www.example.com" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-sds/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-sds/setup.sh index c651fdc2b1f71..dd732cc0ad867 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-sds/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-sds/setup.sh @@ -2,62 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -listeners = [ - { - port = 9999 - protocol = "http" - services = [ - { - name = "*" - } - ] - tls { - sds { - cluster_name = "sds-cluster" - cert_resource = "wildcard.ingress.consul" - } - } - }, - { - port = 9998 - protocol = "http" - services = [ - { - name = "s1" - hosts = ["foo.example.com"] - tls { - sds { - cluster_name = "sds-cluster" - cert_resource = "foo.example.com" - } - } - }, - { - # Route to s2 on a differet domain with different cert - name = "s2" - hosts = ["www.example.com"] - tls { - sds { - cluster_name = "sds-cluster" - cert_resource = "www.example.com" - } - } - } - ] - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway +wait_for_config_entry proxy-defaults global register_services primary diff --git a/test/integration/connect/envoy/case-ingress-gateway-simple/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-simple/config_entries.hcl new file mode 100644 index 0000000000000..88a76594a80fd --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-simple/config_entries.hcl @@ -0,0 +1,24 @@ +config_entries { + bootstrap { + kind = "ingress-gateway" + name = "ingress-gateway" + Defaults { + MaxConnections = 10 + MaxPendingRequests = 20 + MaxConcurrentRequests = 30 + } + listeners = [ + { + port = 9999 + protocol = "tcp" + services = [ + { + name = "s1" + MaxConnections = 100 + MaxPendingRequests = 200 + } + ] + } + ] + } +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-simple/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-simple/setup.sh index 2c0378325d2a0..93bfefee4840a 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-simple/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-simple/setup.sh @@ -2,28 +2,8 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -Defaults { - MaxConnections = 10 - MaxPendingRequests = 20 - MaxConcurrentRequests = 30 -} -listeners = [ - { - port = 9999 - protocol = "tcp" - services = [ - { - name = "s1" - MaxConnections = 100 - MaxPendingRequests = 200 - } - ] - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway register_services primary diff --git a/test/integration/connect/envoy/case-ingress-gateway-tls/config_entries.hcl b/test/integration/connect/envoy/case-ingress-gateway-tls/config_entries.hcl new file mode 100644 index 0000000000000..66e3bf1adddb0 --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-gateway-tls/config_entries.hcl @@ -0,0 +1,41 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + config { + protocol = "http" + } + }, + { + kind = "ingress-gateway" + name = "ingress-gateway" + + tls { + enabled = true + } + + listeners = [ + { + port = 9998 + protocol = "http" + services = [ + { + name = "s1" + } + ] + }, + { + port = 9999 + protocol = "http" + services = [ + { + name = "s1" + hosts = ["test.example.com"] + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-ingress-gateway-tls/setup.sh b/test/integration/connect/envoy/case-ingress-gateway-tls/setup.sh index b23971f0bc60b..93bfefee4840a 100644 --- a/test/integration/connect/envoy/case-ingress-gateway-tls/setup.sh +++ b/test/integration/connect/envoy/case-ingress-gateway-tls/setup.sh @@ -2,42 +2,8 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -tls { - enabled = true -} -listeners = [ - { - port = 9998 - protocol = "http" - services = [ - { - name = "s1" - } - ] - }, - { - port = 9999 - protocol = "http" - services = [ - { - name = "s1" - hosts = ["test.example.com"] - } - ] - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway register_services primary diff --git a/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/config_entries.hcl b/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/config_entries.hcl new file mode 100644 index 0000000000000..20d449c4e657e --- /dev/null +++ b/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/config_entries.hcl @@ -0,0 +1,61 @@ +config_entries { + bootstrap { + kind = "ingress-gateway" + name = "ingress-gateway" + + listeners = [ + { + protocol = "tcp" + port = 9999 + services = [ + { + name = "s2" + } + ] + }, + { + protocol = "tcp" + port = 10000 + services = [ + { + name = "s1" + } + ] + } + ] + } + + bootstrap { + kind = "proxy-defaults" + name = "global" + mesh_gateway { + mode = "local" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + redirect { + service = "s2" + datacenter = "secondary" + } + } + + bootstrap { + kind = "service-defaults" + name = "s1" + mesh_gateway { + mode = "remote" + } + } + + bootstrap { + kind = "service-resolver" + name = "s1" + redirect { + service = "s1" + datacenter = "secondary" + } + } +} diff --git a/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/setup.sh b/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/setup.sh index d9c269b790db6..1d8641064ec95 100644 --- a/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/setup.sh +++ b/test/integration/connect/envoy/case-ingress-mesh-gateways-resolver/primary/setup.sh @@ -2,67 +2,13 @@ set -eEuo pipefail -upsert_config_entry primary ' -kind = "ingress-gateway" -name = "ingress-gateway" -listeners = [ - { - protocol = "tcp" - port = 9999 - services = [ - { - name = "s2" - } - ] - }, - { - protocol = "tcp" - port = 10000 - services = [ - { - name = "s1" - } - ] - } -] -' - -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -mesh_gateway { - mode = "local" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -redirect { - service = "s2" - datacenter = "secondary" -} -' - -upsert_config_entry primary ' -kind = "service-defaults" -name = "s1" -mesh_gateway { - mode = "remote" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s1" -redirect { - service = "s1" - datacenter = "secondary" -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry ingress-gateway ingress-gateway +wait_for_config_entry proxy-defaults global register_services primary gen_envoy_bootstrap mesh-gateway 19002 primary true gen_envoy_bootstrap ingress-gateway 20000 primary true retry_default docker_consul primary curl -s "http://localhost:8500/v1/catalog/service/consul?dc=secondary" >/dev/null + diff --git a/test/integration/connect/envoy/case-l7-intentions/acl.hcl b/test/integration/connect/envoy/case-l7-intentions/acl.hcl deleted file mode 100644 index 5d6141ec7b603..0000000000000 --- a/test/integration/connect/envoy/case-l7-intentions/acl.hcl +++ /dev/null @@ -1,3 +0,0 @@ -acl { - default_policy = "deny" -} diff --git a/test/integration/connect/envoy/case-l7-intentions/config_entries.hcl b/test/integration/connect/envoy/case-l7-intentions/config_entries.hcl new file mode 100644 index 0000000000000..e16a540a7b588 --- /dev/null +++ b/test/integration/connect/envoy/case-l7-intentions/config_entries.hcl @@ -0,0 +1,97 @@ +enable_central_service_config = true + +acl { + default_policy = "deny" +} + +config_entries { + bootstrap { + kind = "service-defaults" + name = "s2" + protocol = "http" + } + + # TODO: test header invert + bootstrap { + kind = "service-intentions" + name = "s2" + + sources { + name = "s1" + permissions = [ + // paths + { + action = "allow" + http { path_exact = "/exact" } + }, + { + action = "allow" + http { path_prefix = "/prefix" } + }, + { + action = "allow" + http { path_regex = "/reg[ex]{2}" } + }, + // headers + { + action = "allow" + http { + path_exact = "/hdr-present" + header = [{ + name = "x-test-debug" + present = true + }] + } + }, + { + action = "allow" + http { + path_exact = "/hdr-exact" + header = [{ + name = "x-test-debug" + exact = "exact" + }] + } + }, + { + action = "allow" + http { + path_exact = "/hdr-prefix" + header = [{ + name = "x-test-debug" + prefix = "prefi" + }] + } + }, + { + action = "allow" + http { + path_exact = "/hdr-suffix" + header = [{ + name = "x-test-debug" + suffix = "uffix" + }] + } + }, + { + action = "allow" + http { + path_exact = "/hdr-regex" + header = [{ + name = "x-test-debug" + regex = "reg[ex]{2}" + }] + } + }, + // methods + { + action = "allow" + http { + path_exact = "/method-match" + methods = ["GET", "PUT"] + } + } + ] + } + } +} diff --git a/test/integration/connect/envoy/case-l7-intentions/setup.sh b/test/integration/connect/envoy/case-l7-intentions/setup.sh index 5a2b734394c82..33e147b099c36 100644 --- a/test/integration/connect/envoy/case-l7-intentions/setup.sh +++ b/test/integration/connect/envoy/case-l7-intentions/setup.sh @@ -2,93 +2,9 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "service-defaults" -name = "s2" -protocol = "http" -' - -upsert_config_entry primary ' -kind = "service-intentions" -name = "s2" -sources { - name = "s1" - permissions = [ - // paths - { - action = "allow" - http { path_exact = "/exact" } - }, - { - action = "allow" - http { path_prefix = "/prefix" } - }, - { - action = "allow" - http { path_regex = "/reg[ex]{2}" } - }, - // headers - { - action = "allow" - http { - path_exact = "/hdr-present" - header = [{ - name = "x-test-debug" - present = true - }] - } - }, - { - action = "allow" - http { - path_exact = "/hdr-exact" - header = [{ - name = "x-test-debug" - exact = "exact" - }] - } - }, - { - action = "allow" - http { - path_exact = "/hdr-prefix" - header = [{ - name = "x-test-debug" - prefix = "prefi" - }] - } - }, - { - action = "allow" - http { - path_exact = "/hdr-suffix" - header = [{ - name = "x-test-debug" - suffix = "uffix" - }] - } - }, - { - action = "allow" - http { - path_exact = "/hdr-regex" - header = [{ - name = "x-test-debug" - regex = "reg[ex]{2}" - }] - } - }, - // methods - { - action = "allow" - http { - path_exact = "/method-match" - methods = ["GET", "PUT"] - } - } - ] -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry service-defaults s2 +wait_for_config_entry service-intentions s2 register_services primary diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl b/test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl new file mode 100644 index 0000000000000..2cf6a2e28db04 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl @@ -0,0 +1,12 @@ +config_entries { + bootstrap { + kind = "terminating-gateway" + name = "terminating-gateway" + + services = [ + { + name = "l2" + } + ] + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh b/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh index ee8c69d7c5ddd..c187c8df289cf 100644 --- a/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh +++ b/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh @@ -5,15 +5,8 @@ set -eEuo pipefail # Copy lambda config files into the register dir find ${CASE_DIR} -maxdepth 1 -name '*_l*.json' -type f -exec cp -f {} workdir/${CLUSTER}/register \; -upsert_config_entry primary ' -kind = "terminating-gateway" -name = "terminating-gateway" -services = [ - { - name = "l2" - } -] -' +# wait for tgw config entry +wait_for_config_entry terminating-gateway terminating-gateway register_services primary register_lambdas primary diff --git a/test/integration/connect/envoy/case-terminating-gateway-hostnames/config_entries.hcl b/test/integration/connect/envoy/case-terminating-gateway-hostnames/config_entries.hcl new file mode 100644 index 0000000000000..63e2106bf8420 --- /dev/null +++ b/test/integration/connect/envoy/case-terminating-gateway-hostnames/config_entries.hcl @@ -0,0 +1,17 @@ +config_entries { + bootstrap { + kind = "terminating-gateway" + name = "terminating-gateway" + + services = [ + { + name = "s4" + } + ] + } + bootstrap { + kind = "service-defaults" + name = "s4" + protocol = "http" + } +} diff --git a/test/integration/connect/envoy/case-terminating-gateway-hostnames/setup.sh b/test/integration/connect/envoy/case-terminating-gateway-hostnames/setup.sh index 6b1e41575dff6..df98bbbbfb356 100644 --- a/test/integration/connect/envoy/case-terminating-gateway-hostnames/setup.sh +++ b/test/integration/connect/envoy/case-terminating-gateway-hostnames/setup.sh @@ -2,21 +2,8 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "terminating-gateway" -name = "terminating-gateway" -services = [ - { - name = "s4" - } -] -' - -upsert_config_entry primary ' -kind = "service-defaults" -name = "s4" -protocol = "http" -' +# wait for bootstrap to apply config entries +wait_for_config_entry terminating-gateway terminating-gateway register_services primary diff --git a/test/integration/connect/envoy/case-terminating-gateway-simple/config_entries.hcl b/test/integration/connect/envoy/case-terminating-gateway-simple/config_entries.hcl new file mode 100644 index 0000000000000..0f4309d6dea37 --- /dev/null +++ b/test/integration/connect/envoy/case-terminating-gateway-simple/config_entries.hcl @@ -0,0 +1,12 @@ +config_entries { + bootstrap { + kind = "terminating-gateway" + name = "terminating-gateway" + + services = [ + { + name = "s2" + } + ] + } +} diff --git a/test/integration/connect/envoy/case-terminating-gateway-simple/setup.sh b/test/integration/connect/envoy/case-terminating-gateway-simple/setup.sh index 11a2b40305a0a..df98bbbbfb356 100644 --- a/test/integration/connect/envoy/case-terminating-gateway-simple/setup.sh +++ b/test/integration/connect/envoy/case-terminating-gateway-simple/setup.sh @@ -2,15 +2,8 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "terminating-gateway" -name = "terminating-gateway" -services = [ - { - name = "s2" - } -] -' +# wait for bootstrap to apply config entries +wait_for_config_entry terminating-gateway terminating-gateway register_services primary diff --git a/test/integration/connect/envoy/case-terminating-gateway-subsets/config_entries.hcl b/test/integration/connect/envoy/case-terminating-gateway-subsets/config_entries.hcl new file mode 100644 index 0000000000000..0459ba42850fe --- /dev/null +++ b/test/integration/connect/envoy/case-terminating-gateway-subsets/config_entries.hcl @@ -0,0 +1,37 @@ +config_entries { + bootstrap { + kind = "terminating-gateway" + name = "terminating-gateway" + + services = [ + { + name = "s2" + } + ] + } + + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + default_subset = "v1" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } +} diff --git a/test/integration/connect/envoy/case-terminating-gateway-subsets/setup.sh b/test/integration/connect/envoy/case-terminating-gateway-subsets/setup.sh index fdd49572ba8d0..850b47c68c9a7 100644 --- a/test/integration/connect/envoy/case-terminating-gateway-subsets/setup.sh +++ b/test/integration/connect/envoy/case-terminating-gateway-subsets/setup.sh @@ -2,37 +2,10 @@ set -euo pipefail -upsert_config_entry primary ' -kind = "terminating-gateway" -name = "terminating-gateway" -services = [ - { - name = "s2" - } -] -' - -upsert_config_entry primary ' -kind = "proxy-defaults" -name = "global" -config { - protocol = "http" -} -' - -upsert_config_entry primary ' -kind = "service-resolver" -name = "s2" -default_subset = "v1" -subsets = { - "v1" = { - filter = "Service.Meta.version == v1" - } - "v2" = { - filter = "Service.Meta.version == v2" - } -} -' +# wait for bootstrap to apply config entries +wait_for_config_entry terminating-gateway terminating-gateway +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 register_services primary diff --git a/test/integration/connect/envoy/helpers.bash b/test/integration/connect/envoy/helpers.bash index 9991fb6974090..d0837a3ba9cf7 100755 --- a/test/integration/connect/envoy/helpers.bash +++ b/test/integration/connect/envoy/helpers.bash @@ -754,13 +754,6 @@ function wait_for_config_entry { retry_default read_config_entry "$@" >/dev/null } -function upsert_config_entry { - local DC="$1" - local BODY="$2" - - echo "$BODY" | docker_consul "$DC" config write - -} - function delete_config_entry { local KIND=$1 local NAME=$2 diff --git a/test/integration/connect/envoy/main_test.go b/test/integration/connect/envoy/main_test.go index ed511d3323560..6b60efab5d6be 100644 --- a/test/integration/connect/envoy/main_test.go +++ b/test/integration/connect/envoy/main_test.go @@ -4,6 +4,7 @@ package envoy import ( + "io/ioutil" "os" "os/exec" "sort" @@ -56,7 +57,7 @@ func discoverCases() ([]string, error) { return nil, err } - dirs, err := os.ReadDir(cwd) + dirs, err := ioutil.ReadDir(cwd) if err != nil { return nil, err } diff --git a/test/integration/connect/envoy/test-sds-server/sds.go b/test/integration/connect/envoy/test-sds-server/sds.go index cf878805aadd1..30020daa3da43 100644 --- a/test/integration/connect/envoy/test-sds-server/sds.go +++ b/test/integration/connect/envoy/test-sds-server/sds.go @@ -2,6 +2,7 @@ package main import ( "context" + "io/ioutil" "net" "os" "os/signal" @@ -99,12 +100,12 @@ func loadCertsFromPath(cache *cache.LinearCache, log hclog.Logger, dir string) e } certName := strings.TrimSuffix(entry.Name(), ".crt") - cert, err := os.ReadFile(filepath.Join(dir, entry.Name())) + cert, err := ioutil.ReadFile(filepath.Join(dir, entry.Name())) if err != nil { return err } keyFile := certName + ".key" - key, err := os.ReadFile(filepath.Join(dir, keyFile)) + key, err := ioutil.ReadFile(filepath.Join(dir, keyFile)) if err != nil { return err } diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index 167ac1f929f64..8ff66bebef43d 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -447,6 +447,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1187,6 +1188,7 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/test/integration/consul-container/libs/agent/container.go b/test/integration/consul-container/libs/agent/container.go index 3fe4b70bdcf38..198c73f8edceb 100644 --- a/test/integration/consul-container/libs/agent/container.go +++ b/test/integration/consul-container/libs/agent/container.go @@ -10,6 +10,7 @@ import ( "time" dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" @@ -63,7 +64,7 @@ func NewConsulContainer(ctx context.Context, config Config, network string, inde // Inject new Agent name config.Cmd = append(config.Cmd, "-node", name) - tmpDirData, err := os.MkdirTemp("", name) + tmpDirData, err := ioutils.TempDir("", name) if err != nil { return nil, err } @@ -77,7 +78,7 @@ func NewConsulContainer(ctx context.Context, config Config, network string, inde return nil, err } - tmpCertData, err := os.MkdirTemp("", fmt.Sprintf("%s-certs", name)) + tmpCertData, err := ioutils.TempDir("", fmt.Sprintf("%s-certs", name)) if err != nil { return nil, err } @@ -373,7 +374,7 @@ func readLicense() (string, error) { } func createConfigFile(JSON string) (string, error) { - tmpDir, err := os.MkdirTemp("", "consul-container-test-config") + tmpDir, err := ioutils.TempDir("", "consul-container-test-config") if err != nil { return "", err } diff --git a/test/integration/consul-container/test/metrics/leader_test.go b/test/integration/consul-container/test/metrics/leader_test.go index 399816b7017fd..964b1bd7b76f5 100644 --- a/test/integration/consul-container/test/metrics/leader_test.go +++ b/test/integration/consul-container/test/metrics/leader_test.go @@ -3,7 +3,7 @@ package metrics import ( "context" "fmt" - "io" + "io/ioutil" "net/http" "net/url" "strings" @@ -87,7 +87,7 @@ func getMetrics(t *testing.T, addr string, port int, path string) (string, error if err != nil { return "", fmt.Errorf("error get metrics: %v", err) } - body, err := io.ReadAll(resp.Body) + body, err := ioutil.ReadAll(resp.Body) if err != nil { return "nil", fmt.Errorf("error read metrics: %v", err) } diff --git a/tlsutil/config.go b/tlsutil/config.go index e67b70d1b9cb4..027db6617a8ce 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -4,6 +4,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "io/ioutil" "net" "os" "path/filepath" @@ -515,7 +516,7 @@ func LoadCAs(caFile, caPath string) ([]string, error) { pems := []string{} readFn := func(path string) error { - pem, err := os.ReadFile(path) + pem, err := ioutil.ReadFile(path) if err != nil { return fmt.Errorf("Error loading from %s: %s", path, err) } diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index 7ce7893bfbe68..7c4068a1a3998 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -5,6 +5,7 @@ import ( "crypto/x509" "fmt" "io" + "io/ioutil" "net" "os" "path" @@ -1489,7 +1490,7 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { dir := testutil.TempDir(t, "ca") caPath := filepath.Join(dir, "ca.pem") - err = os.WriteFile(caPath, []byte(caPEM), 0600) + err = ioutil.WriteFile(caPath, []byte(caPEM), 0600) require.NoError(t, err) // Cert and key are not used, but required to get past validation. @@ -1501,10 +1502,10 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { }) require.NoError(t, err) certFile := filepath.Join("cert.pem") - err = os.WriteFile(certFile, []byte(pub), 0600) + err = ioutil.WriteFile(certFile, []byte(pub), 0600) require.NoError(t, err) keyFile := filepath.Join("cert.key") - err = os.WriteFile(keyFile, []byte(pk), 0600) + err = ioutil.WriteFile(keyFile, []byte(pk), 0600) require.NoError(t, err) cfg := Config{ @@ -1549,7 +1550,7 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { dir := testutil.TempDir(t, "other") caPath := filepath.Join(dir, "ca.pem") - err = os.WriteFile(caPath, []byte(caPEM), 0600) + err = ioutil.WriteFile(caPath, []byte(caPEM), 0600) require.NoError(t, err) signer, err := ParseSigner(caPK) @@ -1737,7 +1738,7 @@ func startTLSServer(tlsConfigServer *tls.Config) (net.Conn, <-chan error, <-chan // server read any data from the client until error or // EOF, which will allow the client to Close(), and // *then* we Close() the server. - io.Copy(io.Discard, tlsServer) + io.Copy(ioutil.Discard, tlsServer) tlsServer.Close() }() return clientConn, errc, certc @@ -1746,14 +1747,14 @@ func startTLSServer(tlsConfigServer *tls.Config) (net.Conn, <-chan error, <-chan func loadFile(t *testing.T, path string) string { t.Helper() - data, err := os.ReadFile(path) + data, err := ioutil.ReadFile(path) require.NoError(t, err) return string(data) } func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool { pool := x509.NewCertPool() - data, err := os.ReadFile("../test/ca/root.cer") + data, err := ioutil.ReadFile("../test/ca/root.cer") if err != nil { t.Fatal("could not open test file ../test/ca/root.cer for reading") } @@ -1773,7 +1774,7 @@ func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool { for _, entry := range entries { filename := path.Join("../test/ca_path", entry.Name()) - data, err := os.ReadFile(filename) + data, err := ioutil.ReadFile(filename) if err != nil { t.Fatalf("could not open test file %s for reading", filename) } diff --git a/ui/packages/consul-peerings/app/components/consul/peer/bento-box/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/bento-box/index.hbs index a4414b3293916..8066b55cfa215 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/bento-box/index.hbs +++ b/ui/packages/consul-peerings/app/components/consul/peer/bento-box/index.hbs @@ -2,7 +2,7 @@
Status
@@ -10,15 +10,14 @@
Latest heartbeat
{{#if @peering.LastHeartbeat}} {{#let (smart-date-format @peering.LastHeartbeat) as |smartDate|}} {{#if smartDate.isNearDate}} {{smartDate.relative}} @@ -33,7 +32,7 @@
Latest receipt
{{#if @peering.LastReceive}} @@ -51,7 +50,7 @@
Latest send
{{#if @peering.LastSend}} diff --git a/ui/packages/consul-peerings/app/templates/dc/peers/show/exported.hbs b/ui/packages/consul-peerings/app/templates/dc/peers/show/exported.hbs index 86a020f994b35..10af30503c305 100644 --- a/ui/packages/consul-peerings/app/templates/dc/peers/show/exported.hbs +++ b/ui/packages/consul-peerings/app/templates/dc/peers/show/exported.hbs @@ -53,12 +53,10 @@ @items={{search.data.items}} as |service index| > -
  • +
  • - <:status as |search|> + + <:status as |search|> - {{#let - (t - (concat 'components.consul.auth-method.search-bar.' search.status.key '.name') - default=(array - (concat 'common.search.' search.status.key) (concat 'common.consul.' search.status.key) - ) +{{#let + + (t (concat "components.consul.auth-method.search-bar." search.status.key ".name") + default=(array + (concat "common.search." search.status.key) + (concat "common.consul." search.status.key) ) - (t - (concat - 'components.consul.auth-method.search-bar.' - search.status.key - '.options.' - search.status.value - ) - default=(array - (concat 'common.search.' search.status.value) - (concat 'common.consul.' search.status.value) - (concat 'common.brand.' search.status.value) - ) + ) + + (t (concat "components.consul.auth-method.search-bar." search.status.key ".options." search.status.value) + default=(array + (concat "common.search." search.status.value) + (concat "common.consul." search.status.value) + (concat "common.brand." search.status.value) ) - as |key value| - }} - + ) + +as |key value|}} +
    {{key}}
    {{value}}
    - {{/let}} +{{/let}} - - <:search as |search|> - - + <:search as |search|> + - - - {{t 'common.search.searchproperty'}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + {{t "common.search.searchproperty"}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} {{#each @filter.searchproperty.default as |prop|}} {{/each}} - {{/let}} + {{/let}} + + + + + <:filter as |search|> + + + + {{t "components.consul.auth-method.search-bar.kind.name"}} + - - - - <:filter as |search|> - - - - {{t 'components.consul.auth-method.search-bar.kind.name'}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - {{#if (env 'CONSUL_SSO_ENABLED')}} - - {{/if}} - {{/let}} - - - - - - {{t 'components.consul.auth-method.search-bar.locality.name'}} - - - + {{#let components.Optgroup components.Option as |Optgroup Option|}} - {{#each (array 'local' 'global') as |option|}} - - {{/each}} + + + {{#if (env 'CONSUL_SSO_ENABLED')}} + + {{/if}} {{/let}} - - - - <:sort as |search|> - - - - {{#let - (from-entries - (array - (array 'MethodName:asc' (t 'common.sort.alpha.asc')) - (array 'MethodName:desc' (t 'common.sort.alpha.desc')) - (array 'TokenTTL:desc' (t 'common.sort.duration.asc')) - (array 'TokenTTL:asc' (t 'common.sort.duration.desc')) - ) - ) - as |selectable| - }} - {{get selectable @sort.value}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - {{/let}} - - - + + + + + + {{t "components.consul.auth-method.search-bar.locality.name"}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + {{#each (array "local" "global") as |option|}} + + {{/each}} + {{/let}} + + + + <:sort as |search|> + + + + {{#let (from-entries (array + (array "MethodName:asc" (t "common.sort.alpha.asc")) + (array "MethodName:desc" (t "common.sort.alpha.desc")) + (array "TokenTTL:desc" (t "common.sort.duration.asc")) + (array "TokenTTL:asc" (t "common.sort.duration.desc")) + )) + as |selectable| + }} + {{get selectable @sort.value}} + {{/let}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + + {{/let}} + + +
    diff --git a/ui/packages/consul-ui/app/components/consul/auth-method/type/index.hbs b/ui/packages/consul-ui/app/components/consul/auth-method/type/index.hbs index 7d4e4aa87df8b..6aadacc3ca526 100644 --- a/ui/packages/consul-ui/app/components/consul/auth-method/type/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/auth-method/type/index.hbs @@ -1,8 +1,3 @@ -{{#let (icon-mapping @item.Type) as |flightIcon|}} - - {{#if flightIcon}} - - {{/if}} - {{t (concat "common.brand." @item.Type)}} - -{{/let}} + + {{t (concat "common.brand." @item.Type)}} + diff --git a/ui/packages/consul-ui/app/components/consul/external-source/index.hbs b/ui/packages/consul-ui/app/components/consul/external-source/index.hbs index 19d33a8ab6c76..a2ee22a0016a5 100644 --- a/ui/packages/consul-ui/app/components/consul/external-source/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/external-source/index.hbs @@ -5,10 +5,9 @@
    - Registered via {{t (concat "common.brand." externalSource)}}
    @@ -33,10 +32,9 @@ {{else if externalSource}} - {{#if @label}} {{@label}} {{else}} diff --git a/ui/packages/consul-ui/app/components/consul/external-source/index.scss b/ui/packages/consul-ui/app/components/consul/external-source/index.scss index b79530dfebb7d..0b6656edef5ef 100644 --- a/ui/packages/consul-ui/app/components/consul/external-source/index.scss +++ b/ui/packages/consul-ui/app/components/consul/external-source/index.scss @@ -6,6 +6,29 @@ --icon-size: icon-300; } +.consul-external-source.kubernetes::before { + @extend %with-logo-kubernetes-color-icon, %as-pseudo; +} +.consul-external-source.terraform::before { + @extend %with-logo-terraform-color-icon, %as-pseudo; +} +.consul-external-source.nomad::before { + @extend %with-logo-nomad-color-icon, %as-pseudo; +} +.consul-external-source.consul::before, +.consul-external-source.consul-api-gateway::before { + @extend %with-logo-consul-color-icon, %as-pseudo; +} +.consul-external-source.vault::before { + @extend %with-vault-300; +} +.consul-external-source.lambda::before, +.consul-external-source.aws::before { + @extend %with-aws-300; +} +.consul-external-source.leader::before { + @extend %with-star-outline-mask, %as-pseudo; +} .consul-external-source.jwt::before { @extend %with-logo-jwt-color-icon, %as-pseudo; } diff --git a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs index 9dbd2a8e50930..e8190f4838251 100644 --- a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs @@ -1,187 +1,171 @@ - - Source -   - Destination - - Permissions - - Permissions intercept an Intention's traffic using Layer 7 criteria, such as path - prefixes and http headers. - - -   - - - -
    - {{#if (eq item.SourceName '*')}} - All Services (*) - {{else}} - {{item.SourceName}} - {{/if}} - {{! TODO: slugify }} - - - - - - - {{capitalize (or item.Action 'App aware')}} - - - - {{#if (eq item.DestinationName '*')}} - All Services (*) - {{else}} - {{item.DestinationName}} - {{/if}} - {{#if (or (can 'use nspaces') (can 'use partitions'))}} - {{! TODO: slugify }} - - {{or - item.DestinationPartition - 'default' - }} - / - {{or - item.DestinationNS - 'default' - }} - +as |item index|> + + Source +   + Destination + + Permissions + + Permissions intercept an Intention's traffic using Layer 7 criteria, such as path prefixes and http headers. + + +   + + + + + {{#if (eq item.SourceName '*') }} + All Services (*) + {{else}} + {{item.SourceName}} + {{/if}} + {{! TODO: slugify }} + + + + + + + {{capitalize (or item.Action "App aware")}} + + + + {{#if (eq item.DestinationName '*') }} + All Services (*) + {{else}} + {{item.DestinationName}} + {{/if}} + {{#if (or (can 'use nspaces') (can 'use partitions'))}} + {{! TODO: slugify }} + + {{or item.DestinationPartition 'default'}} / {{or item.DestinationNS 'default'}} + + {{/if}} + + + + {{#if (gt item.Permissions.length 0)}} + {{pluralize item.Permissions.length 'Permission'}} + {{/if}} + + + {{#if item.IsManagedByCRD}} + {{/if}} - - - - {{#if (gt item.Permissions.length 0)}} - {{pluralize item.Permissions.length 'Permission'}} - {{/if}} - - - {{#if item.IsManagedByCRD}} - - {{/if}} - - - - - - More - - - {{#if (can 'write intention' item=item)}} -
  • - Edit -
  • -
  • - -
    - - <:header> - Confirm Delete - - <:body> -

    - Are you sure you want to delete this intention? -

    - - <:actions as |Actions|> - - - Delete - - - - - Cancel - - - -
    -
    -
  • - {{else if (can 'view CRD intention' item=item)}} -
  • -
    - - <:header> - - + + + + + + More + + + {{#if (can "write intention" item=item)}} +
  • + Edit +
  • +
  • + +
    + + <:header> + Confirm Delete + + <:body> +

    + Are you sure you want to delete this intention? +

    + + <:actions as |Actions|> + + + Delete + + + + + Cancel + + + +
    +
    +
  • + {{else if (can "view CRD intention" item=item)}} +
  • +
    + + <:header> Managed by CRD - - - <:body> -

    - This intention is being managed through an Intention Custom Resource in your - Kubernetes cluster. It is view only in the UI. -

    - - <:actions as |Actions|> - - - View - - - - - Cancel - - - -
    -
    -
  • - {{else}} -
  • - - View - -
  • - {{/if}} - - - + + <:body> +

    + This intention is being managed through an Intention Custom Resource in your Kubernetes cluster. It is view only in the UI. +

    + + <:actions as |Actions|> + + + View + + + + + Cancel + + + + +
    + + {{else}} +
  • + + View + +
  • + {{/if}} + + + diff --git a/ui/packages/consul-ui/app/components/consul/logo/index.hbs b/ui/packages/consul-ui/app/components/consul/logo/index.hbs new file mode 100644 index 0000000000000..f6152df80639f --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/logo/index.hbs @@ -0,0 +1,4 @@ + + Consul + + \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/consul/service-instance/search-bar/index.hbs b/ui/packages/consul-ui/app/components/consul/service-instance/search-bar/index.hbs index 919926b04f3d9..b8fdbcf9af1d9 100644 --- a/ui/packages/consul-ui/app/components/consul/service-instance/search-bar/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service-instance/search-bar/index.hbs @@ -1,160 +1,156 @@ - - <:status as |search|> + + <:status as |search|> - {{#let - (t - (concat 'components.consul.service-instance.search-bar.' search.status.key '.name') - default=(array - (concat 'common.search.' search.status.key) (concat 'common.consul.' search.status.key) - ) +{{#let + + (t (concat "components.consul.service-instance.search-bar." search.status.key ".name") + default=(array + (concat "common.search." search.status.key) + (concat "common.consul." search.status.key) ) - (t - (concat - 'components.consul.service-instance.search-bar.' - search.status.key - '.options.' - search.status.value - ) - default=(array - (concat 'common.search.' search.status.value) - (concat 'common.consul.' search.status.value) - (concat 'common.brand.' search.status.value) - ) + ) + + (t (concat "components.consul.service-instance.search-bar." search.status.key ".options." search.status.value) + default=(array + (concat "common.search." search.status.value) + (concat "common.consul." search.status.value) + (concat "common.brand." search.status.value) ) - as |key value| - }} - + ) + +as |key value|}} +
    {{key}}
    {{value}}
    - {{/let}} +{{/let}} - - <:search as |search|> - - {{#if @filter.searchproperty}} + + <:search as |search|> + +{{#if @filter.searchproperty}} - + as |components|> + - {{t 'common.search.searchproperty'}} + {{t "common.search.searchproperty"}} - - {{#let components.Option as |Option|}} - {{#each @filter.searchproperty.default as |prop|}} - - {{/each}} - {{/let}} + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + {{#each @filter.searchproperty.default as |prop|}} + + {{/each}} + {{/let}} - {{/if}} - - - <:filter as |search|> - - - - {{t 'common.consul.status'}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - {{#each (array 'passing' 'warning' 'critical' 'empty') as |state|}} - - {{/each}} - {{/let}} - - - {{#if (gt @sources.length 0)}} + {{/if}} + + + <:filter as |search|> + + + {{t "common.consul.status"}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + {{#each (array "passing" "warning" "critical" "empty") as |state|}} + + {{/each}} + {{/let}} + + + {{#if (gt @sources.length 0)}} + - + as |components|> + + + {{t "common.search.source"}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + {{#each @sources as |source|}} + + {{/each}} + {{/let}} + + + {{/if}} + + <:sort as |search|> + + + + {{#let (from-entries (array + (array "Name:asc" (t "common.sort.alpha.asc")) + (array "Name:desc" (t "common.sort.alpha.desc")) + (array "Status:asc" (t "common.sort.status.asc")) + (array "Status:desc" (t "common.sort.status.desc")) + )) + as |selectable| + }} + {{get selectable @sort.value}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + +{{/let}} + - {{/if}} - - <:sort as |search|> - - - - {{#let - (from-entries - (array - (array 'Name:asc' (t 'common.sort.alpha.asc')) - (array 'Name:desc' (t 'common.sort.alpha.desc')) - (array 'Status:asc' (t 'common.sort.status.asc')) - (array 'Status:desc' (t 'common.sort.status.desc')) - ) - ) - as |selectable| - }} - {{get selectable @sort.value}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - {{/let}} - - - +
    diff --git a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs index 5c76cf501ca29..c88651b5eb8ed 100644 --- a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs @@ -126,11 +126,33 @@ @multiple={{true}} as |components| > - + + + {{t 'common.search.source'}} + + + + {{#let components.Option as |Option|}} + {{#if (gt @sources.length 0)}} + + {{#each this.sortedSources as |source|}} + + {{/each}} + {{/if}} + {{/let}} + {{/if}} @@ -183,4 +205,4 @@ -
    + \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.js b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.js index d4747b1855a2b..700434c900d39 100644 --- a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.js +++ b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.js @@ -11,7 +11,6 @@ export default class ConsulServiceSearchBar extends Component { get sortedSources() { const sources = this.args.sources || []; - sources.unshift(['consul']); if (sources.includes('consul-api-gateway')) { return [...sources.filter((s) => s !== 'consul-api-gateway'), 'consul-api-gateway']; diff --git a/ui/packages/consul-ui/app/components/consul/sources-select/index.hbs b/ui/packages/consul-ui/app/components/consul/sources-select/index.hbs deleted file mode 100644 index 8109ef18e015d..0000000000000 --- a/ui/packages/consul-ui/app/components/consul/sources-select/index.hbs +++ /dev/null @@ -1,25 +0,0 @@ -{{#if (gt @sources.length 0)}} - - - {{t 'common.search.source'}} - - - - {{#let @components.Option as |Option|}} - {{#each @sources as |source|}} - {{#let (icon-mapping source) as |flightIcon|}} - - {{/let}} - {{/each}} - {{/let}} - -{{/if}} diff --git a/ui/packages/consul-ui/app/components/custom-element/README.mdx b/ui/packages/consul-ui/app/components/custom-element/README.mdx new file mode 100644 index 0000000000000..4aa9d64c80225 --- /dev/null +++ b/ui/packages/consul-ui/app/components/custom-element/README.mdx @@ -0,0 +1,87 @@ +# CustomElement + +A renderless component to aid with the creation of HTML custom elements a.k.a +WebComponents. + +All of the CustomElement component arguments are only used at construction +time (within the components constructor) therefore they are, by design, static. +You shouldn't be dynamically updating these values at all. They are only for +type checking and documention purposes and therefore once defined/hardcoded +they should only change if you as the author wishes to change them. + +The component is built from various other components, also see their documentaton +for further details (``, ``). + +```hbs preview-template + + + + +``` + +## Arguments + +All `descriptions` in attributes will be compiled out at build time as well as the `@description` attribute itself. + +| Attribute | Type | Default | Description | +| :------------ | :------------- | :------ | :------------------------------------------------------------------------- | +| element | string | | The custom tag to be used for the custom element. Must include a dash | +| description | string | | Short 1 line description for the element. Think "git commit title" style | +| attrs | attrInfo[] | | An array of attributes that can be used on the element | +| slots | slotsInfo[] | | An array of slots that can be used for the element (100% compiled out) | +| cssprops | cssPropsInfo[] | | An array of CSS properties that are relevant to the component | +| cssparts | cssPartsInfo[] | | An array of CSS parts that can be used for the element (100% compiled out) | +| args | argsInfo[] | | An array of Glimmer arguments used for the component (100% compiled out) | + +## Exports + +### custom + +| Attribute | Type | Description | +| :--------- | :------- | :---------------------------------------------------------------------------------- | +| connect | function | A did-insert-able callback for tagging an element to be used for the custom element | +| disconnect | function | A will-destroy-able callback for destroying an element used for the custom element | + +### element + +| Attribute | Type | Description | +| :--------- | :------- | :------------------------------------------------------------------------------- | +| attributes | object | An object containing a reference to all the custom elements' observed properties | +| * | | All other properties proxy through to the CustomElements class | + + diff --git a/ui/packages/consul-ui/app/components/custom-element/index.hbs b/ui/packages/consul-ui/app/components/custom-element/index.hbs new file mode 100644 index 0000000000000..a040d5eca9a6b --- /dev/null +++ b/ui/packages/consul-ui/app/components/custom-element/index.hbs @@ -0,0 +1,11 @@ + + {{yield + (hash + root=(fn this.setHost (fn shadow.host)) + connect=(fn this.setHost (fn shadow.host)) + Template=shadow.Template + disconnect=(fn this.disconnect) + ) + this.element + }} + diff --git a/ui/packages/consul-ui/app/components/custom-element/index.js b/ui/packages/consul-ui/app/components/custom-element/index.js new file mode 100644 index 0000000000000..7e1605a68a225 --- /dev/null +++ b/ui/packages/consul-ui/app/components/custom-element/index.js @@ -0,0 +1,189 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; +import { tracked } from '@glimmer/tracking'; +import { assert } from '@ember/debug'; + +const ATTRIBUTE_CHANGE = 'custom-element.attributeChange'; +const elements = new Map(); +const proxies = new WeakMap(); + +const typeCast = (attributeInfo, value) => { + let type = attributeInfo.type; + const d = attributeInfo.default; + value = value == null ? attributeInfo.default : value; + if (type.indexOf('|') !== -1) { + assert( + `"${value} is not of type '${type}'"`, + type + .split('|') + .map((item) => item.replaceAll('"', '').trim()) + .includes(value) + ); + type = 'string'; + } + switch (type) { + case '': + case '': + case '': + case 'number': { + const num = parseFloat(value); + if (isNaN(num)) { + return typeof d === 'undefined' ? 0 : d; + } else { + return num; + } + } + case '': + return parseInt(value); + case '': + case 'string': + return (value || '').toString(); + } +}; + +const attributeChangingElement = (name, Cls = HTMLElement, attributes = {}, cssprops = {}) => { + const attrs = Object.keys(attributes); + + const customClass = class extends Cls { + static get observedAttributes() { + return attrs; + } + + attributeChangedCallback(name, oldValue, newValue) { + const prev = typeCast(attributes[name], oldValue); + const value = typeCast(attributes[name], newValue); + + const cssProp = cssprops[`--${name}`]; + if (typeof cssProp !== 'undefined' && cssProp.track === `[${name}]`) { + this.style.setProperty(`--${name}`, value); + } + + if (typeof super.attributeChangedCallback === 'function') { + super.attributeChangedCallback(name, prev, value); + } + + this.dispatchEvent( + new CustomEvent(ATTRIBUTE_CHANGE, { + detail: { + name: name, + previousValue: prev, + value: value, + }, + }) + ); + } + }; + customElements.define(name, customClass); + return () => {}; +}; + +const infoFromArray = (arr, keys) => { + return (arr || []).reduce((prev, info) => { + let key; + const obj = {}; + keys.forEach((item, i) => { + if (item === '_') { + key = i; + return; + } + obj[item] = info[i]; + }); + prev[info[key]] = obj; + return prev; + }, {}); +}; +const debounceRAF = (cb, prev) => { + if (typeof prev !== 'undefined') { + cancelAnimationFrame(prev); + } + return requestAnimationFrame(cb); +}; +const createElementProxy = ($element, component) => { + return new Proxy($element, { + get: (target, prop, receiver) => { + switch (prop) { + case 'attrs': + return component.attributes; + default: + if (typeof target[prop] === 'function') { + // need to ensure we use a MultiWeakMap here + // if(this.methods.has(prop)) { + // return this.methods.get(prop); + // } + const method = target[prop].bind(target); + // this.methods.set(prop, method); + return method; + } + } + }, + }); +}; + +export default class CustomElementComponent extends Component { + @tracked $element; + @tracked _attributes = {}; + + __attributes; + _attchange; + + constructor(owner, args) { + super(...arguments); + if (!elements.has(args.element)) { + const cb = attributeChangingElement( + args.element, + args.class, + infoFromArray(args.attrs, ['_', 'type', 'default', 'description']), + infoFromArray(args.cssprops, ['_', 'type', 'track', 'description']) + ); + elements.set(args.element, cb); + } + } + + get attributes() { + return this._attributes; + } + + get element() { + if (this.$element) { + if (proxies.has(this.$element)) { + return proxies.get(this.$element); + } + const proxy = createElementProxy(this.$element, this); + proxies.set(this.$element, proxy); + return proxy; + } + return undefined; + } + + @action + setHost(attachShadow, $element) { + attachShadow($element); + this.$element = $element; + this.$element.addEventListener(ATTRIBUTE_CHANGE, this.attributeChange); + + (this.args.attrs || []).forEach((entry) => { + const value = $element.getAttribute(entry[0]); + $element.attributeChangedCallback(entry[0], value, value); + }); + } + + @action + disconnect() { + this.$element.removeEventListener(ATTRIBUTE_CHANGE, this.attributeChange); + } + + @action + attributeChange(e) { + e.stopImmediatePropagation(); + // currently if one single attribute changes + // they all change + this.__attributes = { + ...this.__attributes, + [e.detail.name]: e.detail.value, + }; + this._attchange = debounceRAF(() => { + // tell glimmer we changed the attrs + this._attributes = this.__attributes; + }, this._attchange); + } +} diff --git a/ui/packages/consul-ui/app/components/disclosure-card/README.mdx b/ui/packages/consul-ui/app/components/disclosure-card/README.mdx new file mode 100644 index 0000000000000..098456452961a --- /dev/null +++ b/ui/packages/consul-ui/app/components/disclosure-card/README.mdx @@ -0,0 +1,125 @@ + +# DisclosureCard + + +```hbs preview-template + +
    + +
    + Use the component +
    + + + +
    +

    api-service-1

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {{if disclosure.expanded "View less" "View more"}} + + +
    + +
    +``` + +## Attributes + + + + +## Arguments + + + + +## Slots + + + + +## CSS Parts + + + + +## CSS Properties + + + + +## Contextual Components + + + diff --git a/ui/packages/consul-ui/app/components/disclosure-card/index.hbs b/ui/packages/consul-ui/app/components/disclosure-card/index.hbs new file mode 100644 index 0000000000000..9ce8bb5ed0fc6 --- /dev/null +++ b/ui/packages/consul-ui/app/components/disclosure-card/index.hbs @@ -0,0 +1,90 @@ + + + + + +
    +
    + + +
    +
    + + +
    +
    + + {{yield disclosure}} + +
    +
    +
    diff --git a/ui/packages/consul-ui/app/components/distribution-meter/README.mdx b/ui/packages/consul-ui/app/components/distribution-meter/README.mdx new file mode 100644 index 0000000000000..2fd1c1016a8fd --- /dev/null +++ b/ui/packages/consul-ui/app/components/distribution-meter/README.mdx @@ -0,0 +1,83 @@ +--- +type: custom-element +--- + +# DistributionMeter + + + +A meter-like component to show a distribution of values. + + +```hbs preview-template +
    +
    + Provide a widget so we can try switching between all types of meter +
    + +
    +
    + + + {{#let + (group-by "MeshStatus" (or source.data (array))) + as |grouped|}} + + {{#each (array 'passing' 'warning' 'critical') as |status|}} + {{#let + (concat (percentage-of (get grouped (concat status '.length')) source.data.length) '%') + as |percentage|}} + + {{/let}} + {{/each}} + + {{/let}} + +
    +``` + +## Attributes + + +| Attribute | Type | Default | Description | +| :-------- | :--------------------------------- | :------ | :------------------------------------ | +| type | "linear" \| "radial" \| "circular" | linear | The type of distribution meter to use | + + + +## Contextual Components + + + +### DistributionMeter::Meter + +#### Attributes + +| Attribute | Type | Default | Description | +| :---------- | :----- | :------ | :----------------------------------------- | +| percentage | number | 0 | The percentage to be used for the meter | +| description | string | | Textual value to describe the meters value | + + +#### CSS Properties + +| Property | Type | Tracks | Description | +| :---------------------- | :--------- | :----------- | :---------------------------------------------------------------- | +| --percentage | percentage | [percentage] | Read-only alias of the percentage attribute | +| --aggregated-percentage | percentage | | Aggregated percentage of all meters within the distribution meter | + + diff --git a/ui/packages/consul-ui/app/components/distribution-meter/index.css.js b/ui/packages/consul-ui/app/components/distribution-meter/index.css.js new file mode 100644 index 0000000000000..336511f6817fa --- /dev/null +++ b/ui/packages/consul-ui/app/components/distribution-meter/index.css.js @@ -0,0 +1,32 @@ +export default (css) => { + return css` + :host { + display: block; + width: 100%; + height: 100%; + } + dl { + position: relative; + height: 100%; + } + :host([type='linear']) { + height: 3px; + } + :host([type='radial']), + :host([type='circular']) { + height: 300px; + } + :host([type='linear']) dl { + background-color: currentColor; + color: rgb(var(--tone-gray-100)); + border-radius: var(--decor-radius-999); + transition-property: transform; + transition-timing-function: ease-out; + transition-duration: 0.1s; + } + :host([type='linear']) dl:hover { + transform: scaleY(3); + box-shadow: var(--decor-elevation-200); + } + `; +}; diff --git a/ui/packages/consul-ui/app/components/distribution-meter/index.hbs b/ui/packages/consul-ui/app/components/distribution-meter/index.hbs new file mode 100644 index 0000000000000..7e5a71ffc00cb --- /dev/null +++ b/ui/packages/consul-ui/app/components/distribution-meter/index.hbs @@ -0,0 +1,30 @@ + + + +
    + +
    +
    + {{yield (hash + Meter=(component 'distribution-meter/meter' + type=element.attrs.type + ) + )}} +
    +
    diff --git a/ui/packages/consul-ui/app/components/distribution-meter/meter/element.js b/ui/packages/consul-ui/app/components/distribution-meter/meter/element.js new file mode 100644 index 0000000000000..5c058fdf0c154 --- /dev/null +++ b/ui/packages/consul-ui/app/components/distribution-meter/meter/element.js @@ -0,0 +1,29 @@ +const parseFloatWithDefault = (val, d = 0) => { + const num = parseFloat(val); + return isNaN(num) ? d : num; +}; + +export default (Component) => { + return class extends Component { + attributeChangedCallback(name, prev, value) { + const target = this; + switch (name) { + case 'percentage': { + let prevSibling = target; + while (prevSibling) { + const nextSibling = prevSibling.nextElementSibling; + const aggregatedPercentage = nextSibling + ? parseFloatWithDefault(nextSibling.style.getPropertyValue('--aggregated-percentage')) + : 0; + const perc = + parseFloatWithDefault(prevSibling.getAttribute('percentage')) + aggregatedPercentage; + prevSibling.style.setProperty('--aggregated-percentage', perc); + prevSibling.setAttribute('aggregated-percentage', perc); + prevSibling = prevSibling.previousElementSibling; + } + break; + } + } + } + }; +}; diff --git a/ui/packages/consul-ui/app/components/distribution-meter/meter/index.css.js b/ui/packages/consul-ui/app/components/distribution-meter/meter/index.css.js new file mode 100644 index 0000000000000..7f105a8d9b8fb --- /dev/null +++ b/ui/packages/consul-ui/app/components/distribution-meter/meter/index.css.js @@ -0,0 +1,80 @@ +export default (css) => { + return css` + /*@import '~/styles/base/decoration/visually-hidden.css';*/ + + :host(.critical) { + color: rgb(var(--tone-red-500)); + } + :host(.warning) { + color: rgb(var(--tone-orange-500)); + } + :host(.passing) { + color: rgb(var(--tone-green-500)); + } + + :host { + position: absolute; + top: 0; + height: 100%; + + transition-timing-function: ease-out; + transition-duration: 0.5s; + } + dt, + dd meter { + animation-name: visually-hidden; + animation-fill-mode: forwards; + animation-play-state: paused; + } + + :host(.type-linear) { + transition-property: width; + width: calc(var(--aggregated-percentage) * 1%); + height: 100%; + background-color: currentColor; + border-radius: var(--decor-radius-999); + } + + :host svg { + height: 100%; + } + :host(.type-radial), + :host(.type-circular) { + transition-property: none; + } + :host(.type-radial) dd, + :host(.type-circular) dd { + width: 100%; + height: 100%; + } + :host(.type-radial) circle, + :host(.type-circular) circle { + transition-timing-function: ease-out; + transition-duration: 0.5s; + pointer-events: stroke; + transition-property: stroke-dashoffset, stroke-width; + transform: rotate(-90deg); + transform-origin: 50%; + fill: transparent; + stroke: currentColor; + stroke-dasharray: 100, 100; + stroke-dashoffset: calc(calc(100 - var(--aggregated-percentage)) * 1px); + } + :host([aggregated-percentage='100']) circle { + stroke-dasharray: 0 !important; + } + :host([aggregated-percentage='0']) circle { + stroke-dasharray: 0, 100 !important; + } + :host(.type-radial) circle, + :host(.type-circular]) svg { + pointer-events: none; + } + :host(.type-radial) circle { + stroke-width: 32; + } + :host(.type-circular) circle { + stroke-width: 14; + } + `; +}; diff --git a/ui/packages/consul-ui/app/components/distribution-meter/meter/index.hbs b/ui/packages/consul-ui/app/components/distribution-meter/meter/index.hbs new file mode 100644 index 0000000000000..a557e1701caa9 --- /dev/null +++ b/ui/packages/consul-ui/app/components/distribution-meter/meter/index.hbs @@ -0,0 +1,64 @@ + + + + +
    {{element.attrs.description}}
    +
    + + {{concat element.attrs.percentage '%'}} + + {{#if (or (eq @type 'circular') (eq @type 'radial'))}} + + {{/if}} +
    +
    +
    + +
    + diff --git a/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs b/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs index 6942abded6b04..672985310daa0 100644 --- a/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs +++ b/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs @@ -1,83 +1,88 @@ - + <:notifications as |app|> - {{#each flashMessages.queue as |flash|}} - - {{#if flash.dom}} - {{{flash.dom}}} +{{#each flashMessages.queue as |flash|}} + + {{#if flash.dom}} + {{{flash.dom}}} + {{else}} +{{#let (lowercase flash.type) (lowercase flash.action) as |status type|}} + + + + {{capitalize status}}! + + + +

    + {{#if (eq type 'logout')}} + {{#if (eq status 'success') }} + You are now logged out. + {{else}} + There was an error logging out. + {{/if}} + {{else if (eq type 'authorize')}} + {{#if (eq status 'success') }} + You are now logged in. + {{else}} + There was an error, please check your SecretID/Token + {{/if}} {{else}} - {{#let (lowercase flash.type) (lowercase flash.action) as |status type|}} - - - - {{capitalize status}}! - - - -

    - {{#if (eq type 'logout')}} - {{#if (eq status 'success')}} - You are now logged out. - {{else}} - There was an error logging out. - {{/if}} - {{else if (eq type 'authorize')}} - {{#if (eq status 'success')}} - You are now logged in. - {{else}} - There was an error, please check your SecretID/Token - {{/if}} - {{else}} - {{#if (or (eq type 'use') (eq flash.model 'token'))}} - - {{else if (eq flash.model 'intention')}} - - {{else if (eq flash.model 'role')}} - - {{else if (eq flash.model 'policy')}} - - {{/if}} - {{/if}} -

    -
    -
    - {{/let}} + {{#if (or (eq type 'use') (eq flash.model 'token'))}} + + {{else if (eq flash.model 'intention')}} + + {{else if (eq flash.model 'role')}} + + {{else if (eq flash.model 'policy')}} + + {{/if}} {{/if}} -
    - {{/each}} +

    + + +{{/let}} + {{/if}} +
    +{{/each}} <:home-nav> - - - + <:main-nav> @@ -94,110 +99,147 @@ @partition={{@partition}} @nspace={{@nspace}} @partitions={{this.partitions}} - @onchange={{action (mut this.partitions) value='data'}} + @onchange={{action (mut this.partitions) value="data"}} /> - {{#if (can 'access overview')}} +{{#if (can 'access overview')}}
  • - + Overview
  • - {{/if}} - {{#if (can 'read services')}} +{{/if}} +{{#if (can "read services")}}
  • - Services + Services
  • - {{/if}} - {{#if (can 'read nodes')}} +{{/if}} +{{#if (can "read nodes")}}
  • - Nodes + Nodes
  • - {{/if}} - {{#if (can 'read kv')}} +{{/if}} +{{#if (can "read kv")}}
  • - Key/Value + Key/Value
  • - {{/if}} - {{#if (can 'read intentions')}} -
  • - Intentions +{{/if}} +{{#if (can "read intentions")}} +
  • + Intentions
  • - {{/if}} - - +{{/if}} + + <:complementary-nav>
      - -
    • - - - Help - - - - - Consul v{{env 'CONSUL_VERSION'}} - - - - Documentation - - - - +
    • + + + Help + + + + + Consul v{{env 'CONSUL_VERSION'}} + + - HashiCorp Learn - - - - - - - -
    • -
    • - - Settings - -
    • - - - + + Documentation + + + + + HashiCorp Learn + + + + + + + + +
    • + + Settings + +
    • + + +
    <:main> - {{yield - (hash login=(if this.tokenSelector this.tokenSelector (hash open=undefined close=undefined))) - }} + {{yield (hash + login=(if this.tokenSelector this.tokenSelector (hash open=undefined close=undefined)) + )}} <:content-info> @@ -207,4 +249,4 @@ {{{concat ''}}} -
    +
    \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/informed-action/skin.scss b/ui/packages/consul-ui/app/components/informed-action/skin.scss index 25b7fba8d8e73..72b3624449f0e 100644 --- a/ui/packages/consul-ui/app/components/informed-action/skin.scss +++ b/ui/packages/consul-ui/app/components/informed-action/skin.scss @@ -66,6 +66,12 @@ background-color: rgb(var(--tone-yellow-050)); } } + /* brands */ + &.kubernetes { + header::before { + @extend %with-logo-kubernetes-color-icon, %without-mask, %as-pseudo; + } + } /**/ > ul > .action > * { color: rgb(var(--tone-blue-500)); diff --git a/ui/packages/consul-ui/app/components/pill/index.scss b/ui/packages/consul-ui/app/components/pill/index.scss index 242924aa59dff..c528bd9ff3358 100644 --- a/ui/packages/consul-ui/app/components/pill/index.scss +++ b/ui/packages/consul-ui/app/components/pill/index.scss @@ -30,3 +30,10 @@ span.policy-service-identity::before { %pill.oidc::before { @extend %with-logo-oidc-color-icon, %as-pseudo; } +%pill.kubernetes::before { + @extend %with-logo-kubernetes-color-icon, %as-pseudo; +} +%pill.aws-iam::before { + --icon-name: icon-aws-color; + content: ''; +} diff --git a/ui/packages/consul-ui/app/components/popover-select/index.scss b/ui/packages/consul-ui/app/components/popover-select/index.scss index ac45555434021..dad07b2cdd684 100644 --- a/ui/packages/consul-ui/app/components/popover-select/index.scss +++ b/ui/packages/consul-ui/app/components/popover-select/index.scss @@ -56,10 +56,30 @@ @extend %with-user-team-mask, %as-pseudo; color: rgb(var(--tone-gray-500)); } +%popover-select .lambda button::before, +%popover-select .aws button::before { + @extend %with-aws-300; +} +%popover-select .kubernetes button::before { + @extend %with-logo-kubernetes-color-icon, %as-pseudo; +} %popover-select .jwt button::before { @extend %with-logo-jwt-color-icon, %as-pseudo; } %popover-select .oidc button::before { @extend %with-logo-oidc-color-icon, %as-pseudo; } +%popover-select .consul button::before, +%popover-select .consul-api-gateway button::before { + @extend %with-logo-consul-color-icon, %as-pseudo; +} +%popover-select .nomad button::before { + @extend %with-logo-nomad-color-icon, %as-pseudo; +} +%popover-select .vault button::before { + @extend %with-vault-300; +} +%popover-select .terraform button::before { + @extend %with-logo-terraform-color-icon, %as-pseudo; +} /**/ diff --git a/ui/packages/consul-ui/app/components/shadow-host/README.mdx b/ui/packages/consul-ui/app/components/shadow-host/README.mdx new file mode 100644 index 0000000000000..c83c58eb94394 --- /dev/null +++ b/ui/packages/consul-ui/app/components/shadow-host/README.mdx @@ -0,0 +1,29 @@ +# ShadowHost + +`ShadowHost` is a small renderless mainly utility component for easily attaching +ShadowDOM to any applicable DOM node. It mainly exists to provide a context for +passing around a reference to the element to be used for the shadow template, +but named appropriately for recognition. + +If you are looking to write a custom element, please use the `CustomElement` +component. If you are simply attaching ShadowDOM to a native HTML element then +this is the component for you. + +```hbs preview-template + +
    + +

    hi

    +
    +
    +
    +``` + +## Exports + +| Attribute | Type | Description | +| :-------- | :---------------------- | :------------------------------------------------------------------------------- | +| host | function | A did-insert-able callback for tagging an element to be used for the shadow root | +| Template | ShadowTemplateComponent | ShadowTemplate component pre-configured with the shadow host | diff --git a/ui/packages/consul-ui/app/components/shadow-host/index.hbs b/ui/packages/consul-ui/app/components/shadow-host/index.hbs new file mode 100644 index 0000000000000..3c70ea8f0bd65 --- /dev/null +++ b/ui/packages/consul-ui/app/components/shadow-host/index.hbs @@ -0,0 +1,5 @@ +{{yield (hash + host=(fn this.attachShadow) + root=this.shadowRoot + Template=(component 'shadow-template' shadowRoot=this.shadowRoot) +)}} diff --git a/ui/packages/consul-ui/app/components/shadow-host/index.js b/ui/packages/consul-ui/app/components/shadow-host/index.js new file mode 100644 index 0000000000000..d13e34c199ee6 --- /dev/null +++ b/ui/packages/consul-ui/app/components/shadow-host/index.js @@ -0,0 +1,12 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; +import { tracked } from '@glimmer/tracking'; + +export default class ShadowHostComponent extends Component { + @tracked shadowRoot; + + @action + attachShadow($element) { + this.shadowRoot = $element.attachShadow({ mode: 'open' }); + } +} diff --git a/ui/packages/consul-ui/app/components/shadow-template/README.mdx b/ui/packages/consul-ui/app/components/shadow-template/README.mdx new file mode 100644 index 0000000000000..64d352a7d20ee --- /dev/null +++ b/ui/packages/consul-ui/app/components/shadow-template/README.mdx @@ -0,0 +1,162 @@ +# ShadowTemplate + +A component to aid creating ShadowDOM based components (when required), heavily +inspired by the upcoming Declarative Shadow DOM spec, a new way to implement and +use Shadow DOM directly in HTML. + +Instead of passing `shadowroot="open|closed"` as you would with Declarative +Shadow DOM we have a `@shadowRoot` argument to which you would pass the actual +Shadow DOM element (which itself either open or closed). You can get a reference +to this by using the `{{attach-shadow}}` modifier. + +Additionally a `@styles` argument is made available for you to optionally +pass completely isolated, scoped, constructable stylesheets to be used for the +Shadow DOM tree (you can also continue to use `