From 1e6d99644705bcbd71d28bc7a77b2bbdd141394d Mon Sep 17 00:00:00 2001 From: Satish Matti Date: Tue, 1 Oct 2019 09:09:50 -0700 Subject: [PATCH] Add v2 frontend namer workflow --- cmd/glbc/main.go | 14 +- pkg/context/context.go | 6 +- pkg/controller/controller.go | 92 ++++- pkg/controller/controller_test.go | 354 +++++++++++++++++-- pkg/controller/translator/translator_test.go | 2 +- pkg/firewalls/controller_test.go | 2 +- pkg/flags/flags.go | 2 + pkg/loadbalancers/interfaces.go | 12 +- pkg/loadbalancers/l7s.go | 77 +++- pkg/loadbalancers/l7s_test.go | 334 ++++++++++++++++- pkg/loadbalancers/loadbalancers_test.go | 28 +- pkg/loadbalancers/url_maps_test.go | 2 +- pkg/neg/controller_test.go | 2 +- pkg/neg/manager_test.go | 2 +- pkg/neg/readiness/reflector_test.go | 2 +- pkg/neg/syncers/syncer_test.go | 2 +- pkg/neg/syncers/transaction_test.go | 2 +- pkg/sync/interfaces.go | 9 +- pkg/sync/sync.go | 9 +- pkg/test/utils.go | 30 ++ pkg/utils/common/common.go | 29 ++ pkg/utils/common/finalizer.go | 91 +++-- pkg/utils/namer/frontendnamer.go | 136 ++++++- pkg/utils/namer/frontendnamer_test.go | 154 +++++++- pkg/utils/namer/utils.go | 15 +- pkg/utils/namer/utils_test.go | 33 +- pkg/utils/utils.go | 13 +- 27 files changed, 1330 insertions(+), 124 deletions(-) diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index 9b6e326a06..fa37f1ec5b 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -24,6 +24,8 @@ import ( "time" flag "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/ingress-gce/pkg/frontendconfig" "k8s.io/klog" @@ -130,6 +132,16 @@ func main() { klog.V(0).Infof("Cluster name: %+v", namer.UID()) } + var kubeSystemUID types.UID + if flags.F.EnableV2FrontendNamer { + // Get kube-system UID that will be used for v2 frontend naming scheme. + ksNameSpace, err := kubeClient.CoreV1().Namespaces().Get("kube-system", metav1.GetOptions{}) + if err != nil { + klog.Fatalf("kubeClient.CoreV1().Namespaces().Get(%q, _): %v, want nil", "kube-system", err) + } + kubeSystemUID = ksNameSpace.GetUID() + } + cloud := app.NewGCEClient() defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient) ctxConfig := ingctx.ControllerContextConfig{ @@ -141,7 +153,7 @@ func main() { FrontendConfigEnabled: flags.F.EnableFrontendConfig, EnableCSM: flags.F.EnableCSM, } - ctx := ingctx.NewControllerContext(kubeClient, dynamicClient, backendConfigClient, frontendConfigClient, cloud, namer, ctxConfig) + ctx := ingctx.NewControllerContext(kubeClient, dynamicClient, backendConfigClient, frontendConfigClient, cloud, namer, kubeSystemUID, ctxConfig) go app.RunHTTPServer(ctx.HealthCheck) if !flags.F.LeaderElection.LeaderElect { diff --git a/pkg/context/context.go b/pkg/context/context.go index dbb07462d5..a39458e61a 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -19,6 +19,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" informerv1 "k8s.io/client-go/informers/core/v1" @@ -51,7 +52,8 @@ type ControllerContext struct { Cloud *gce.Cloud - ClusterNamer *namer.Namer + ClusterNamer *namer.Namer + KubeSystemUID types.UID ControllerContextConfig @@ -92,12 +94,14 @@ func NewControllerContext( frontendConfigClient frontendconfigclient.Interface, cloud *gce.Cloud, namer *namer.Namer, + kubeSystemUID types.UID, config ControllerContextConfig) *ControllerContext { context := &ControllerContext{ KubeClient: kubeClient, Cloud: cloud, ClusterNamer: namer, + KubeSystemUID: kubeSystemUID, ControllerContextConfig: config, IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, config.Namespace, config.ResyncPeriod, utils.NewNamespaceIndexer()), ServiceInformer: informerv1.NewServiceInformer(kubeClient, config.Namespace, config.ResyncPeriod, utils.NewNamespaceIndexer()), diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index b71a2fe1bc..d868096600 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -115,7 +115,7 @@ func NewLoadBalancerController( hasSynced: ctx.HasSynced, nodes: NewNodeController(ctx, instancePool), instancePool: instancePool, - l7Pool: loadbalancers.NewLoadBalancerPool(ctx.Cloud, ctx.ClusterNamer, ctx, namer.NewFrontendNamerFactory(ctx.ClusterNamer)), + l7Pool: loadbalancers.NewLoadBalancerPool(ctx.Cloud, ctx.ClusterNamer, ctx, namer.NewFrontendNamerFactory(ctx.ClusterNamer, ctx.KubeSystemUID)), backendSyncer: backends.NewBackendSyncer(backendPool, healthChecker, ctx.Cloud), negLinker: backends.NewNEGLinker(backendPool, negtypes.NewAdapter(ctx.Cloud), ctx.Cloud), igLinker: backends.NewInstanceGroupLinker(instancePool, backendPool), @@ -330,9 +330,10 @@ func (lbc *LoadBalancerController) Stop(deleteAll bool) error { // TODO(rramkumar): Do we need deleteAll? Can we get rid of its' flag? if deleteAll { klog.Infof("Shutting down cluster manager.") - if err := lbc.l7Pool.Shutdown(); err != nil { + if err := lbc.l7Pool.Shutdown(lbc.ctx.Ingresses().List()); err != nil { return err } + // The backend pool will also delete instance groups. return lbc.backendSyncer.Shutdown() } @@ -453,29 +454,46 @@ func (lbc *LoadBalancerController) SyncLoadBalancer(state interface{}) error { return nil } -// GCLoadBalancers implements Controller. -func (lbc *LoadBalancerController) GCLoadBalancers(toKeep []*v1beta1.Ingress) error { +// GCV1LoadBalancers implements Controller. +func (lbc *LoadBalancerController) GCV1LoadBalancers(toKeep []*v1beta1.Ingress) error { + // Filter GCE ingresses that use v1 naming scheme. // Only GCE ingress associated resources are managed by this controller. - GCEIngresses := operator.Ingresses(toKeep).Filter(utils.IsGCEIngress).AsList() - return lbc.l7Pool.GC(common.ToIngressKeys(GCEIngresses)) + V1GCEIngresses := operator.Ingresses(toKeep).Filter(func(ing *v1beta1.Ingress) bool { + return utils.IsGCEIngress(ing) && !namer.IsV2NamingScheme(ing) + }).AsList() + return lbc.l7Pool.GC(common.ToIngressKeys(V1GCEIngresses)) } -// MaybeRemoveFinalizers cleans up Finalizers if needed. -func (lbc *LoadBalancerController) MaybeRemoveFinalizers(toCleanup []*v1beta1.Ingress) error { +// EnsureDeleteV1Finalizers ensures that v1 finalizers are removed. +func (lbc *LoadBalancerController) EnsureDeleteV1Finalizers(toCleanup []*v1beta1.Ingress) error { if !flags.F.FinalizerRemove { klog.V(4).Infof("Removing finalizers not enabled") return nil } for _, ing := range toCleanup { + if namer.IsV2NamingScheme(ing) { + // Skip if ingress uses v2 naming policy. + continue + } ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) - if err := common.RemoveFinalizer(ing, ingClient); err != nil { - klog.Errorf("Failed to remove Finalizer from Ingress %s/%s: %v", ing.Namespace, ing.Name, err) + if err := common.EnsureDeleteV1Finalizer(ing, ingClient); err != nil { + klog.Errorf("Failed to ensure delete for V1 Finalizer on Ingress %s/%s: %v", ing.Namespace, ing.Name, err) return err } } return nil } +// ensureDeleteV2Finalizer ensures that v2 finalizer is removed for given ingress. +func (lbc *LoadBalancerController) ensureDeleteV2Finalizer(ing *v1beta1.Ingress) error { + if !flags.F.FinalizerRemove { + klog.V(4).Infof("Removing finalizers not enabled") + return nil + } + ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) + return common.EnsureDeleteV2Finalizer(ing, ingClient) +} + // PostProcess implements Controller. func (lbc *LoadBalancerController) PostProcess(state interface{}) error { // We expect state to be a syncState @@ -505,17 +523,41 @@ func (lbc *LoadBalancerController) sync(key string) error { allIngresses := lbc.ctx.Ingresses().List() // Determine if the ingress needs to be GCed. if !ingExists || utils.NeedsCleanup(ing) { - // GC will find GCE resources that were used for this ingress and delete them. + // GC workflow for ingress with v2 naming policy. + // 1. GC load balancer for ingress being synced. + // 2. GC load balancers for v1 naming policy. + // 3. GC backends. + // 4. Ensure delete finalizers for v1 naming scheme. + // 5. Ensure delete finalizers for ingress being synced. + if namer.IsV2NamingScheme(ing) { + if err := lbc.l7Pool.V2GC(ing); err != nil { + return fmt.Errorf("error running load balancer garbage collection routine for v2 naming scheme: %v", err) + } + if err := lbc.ingSyncer.GC(allIngresses); err != nil { + return fmt.Errorf("error running garbage collection: %v", err) + } + return lbc.ensureDeleteV2Finalizer(ing) + } return lbc.ingSyncer.GC(allIngresses) } // Get ingress and DeepCopy for assurance that we don't pollute other goroutines with changes. ing = ing.DeepCopy() ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) - if flags.F.FinalizerAdd { - if err := common.AddFinalizer(ing, ingClient); err != nil { - klog.Errorf("Failed to add Finalizer to Ingress %q: %v", key, err) + if flags.F.FinalizerAdd && !common.HasFinalizer(ing.ObjectMeta) { + // Determine if we need to use v1 frontend naming scheme. + if isV1NamingScheme, err := lbc.needsToUseV1NamingScheme(ing); err != nil { return err + } else if isV1NamingScheme { + if err := common.EnsureV1Finalizer(ing, ingClient); err != nil { + klog.Errorf("Failed to ensure V1 Finalizer on Ingress %q: %v", key, err) + return err + } + } else { + if err := common.EnsureV2Finalizer(ing, ingClient); err != nil { + klog.Errorf("Failed to ensure V2 Finalizer on Ingress %q: %v", key, err) + return err + } } } @@ -649,3 +691,25 @@ func (lbc *LoadBalancerController) ToSvcPorts(ings []*v1beta1.Ingress) []utils.S } return knownPorts } + +// needsToUseV1NamingScheme returns true if given ingress needs to use legacy(v1) frontend naming scheme. +func (lbc *LoadBalancerController) needsToUseV1NamingScheme(ing *v1beta1.Ingress) (bool, error) { + // Ingress frontend naming scheme is determined based on the following logic, + // V2 frontend namer is disabled : v1 frontend naming scheme + // V2 frontend namer is enabled + // - VIP does not exists : v2 frontend naming scheme + // - VIP exists + // - GCE URL Map exists : v1 frontend naming scheme + // - GCE URL Map does not exists : v2 frontend naming scheme + if !flags.F.EnableV2FrontendNamer { + return true, nil + } else if utils.HasVIP(ing) { + if urlMapExists, err := lbc.l7Pool.HasUrlMap(ing); err != nil { + return false, err + } else if urlMapExists { + return true, nil + } + return false, nil + } + return false, nil +} diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 881bf85b73..9618c01823 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -19,12 +19,14 @@ package controller import ( "fmt" "reflect" + "sort" "strings" "testing" "time" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/google/go-cmp/cmp" + "google.golang.org/api/compute/v1" api_v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +36,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/ingress-gce/pkg/annotations" backendconfigclient "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned/fake" + "k8s.io/ingress-gce/pkg/common/operator" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/events" "k8s.io/ingress-gce/pkg/flags" @@ -52,19 +55,6 @@ var ( clusterUID = "aaaaa" ) -// saveFinalizerFlags captures current value of finalizer flags and -// restore them after a test is finished. -type saveFinalizerFlags struct{ add, remove bool } - -func (s *saveFinalizerFlags) save() { - s.add = flags.F.FinalizerAdd - s.remove = flags.F.FinalizerRemove -} -func (s *saveFinalizerFlags) reset() { - flags.F.FinalizerAdd = s.add - flags.F.FinalizerRemove = s.remove -} - // newLoadBalancerController create a loadbalancer controller. func newLoadBalancerController() *LoadBalancerController { kubeClient := fake.NewSimpleClientset() @@ -82,11 +72,11 @@ func newLoadBalancerController() *LoadBalancerController { HealthCheckPath: "/", DefaultBackendHealthCheckPath: "/healthz", } - ctx := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, fakeGCE, namer, ctxConfig) + ctx := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, fakeGCE, namer, "", ctxConfig) lbc := NewLoadBalancerController(ctx, stopCh) // TODO(rramkumar): Fix this so we don't have to override with our fake lbc.instancePool = instances.NewNodePool(instances.NewFakeInstanceGroups(sets.NewString(), namer), namer) - lbc.l7Pool = loadbalancers.NewLoadBalancerPool(fakeGCE, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer)) + lbc.l7Pool = loadbalancers.NewLoadBalancerPool(fakeGCE, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer, "")) lbc.instancePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}}) lbc.hasSynced = func() bool { return true } @@ -138,11 +128,15 @@ func setDeletionTimestamp(lbc *LoadBalancerController, ing *v1beta1.Ingress) { func deleteIngress(lbc *LoadBalancerController, ing *v1beta1.Ingress) { if len(ing.GetFinalizers()) == 0 { - lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, &meta_v1.DeleteOptions{}) - lbc.ctx.IngressInformer.GetIndexer().Delete(ing) + deleteIngressWithFinalizer(lbc, ing) } } +func deleteIngressWithFinalizer(lbc *LoadBalancerController, ing *v1beta1.Ingress) { + lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, &meta_v1.DeleteOptions{}) + lbc.ctx.IngressInformer.GetIndexer().Delete(ing) +} + // getKey returns the key for an ingress. func getKey(ing *v1beta1.Ingress, t *testing.T) string { key, err := common.KeyFunc(ing) @@ -187,9 +181,11 @@ func TestIngressSyncError(t *testing.T) { // Ingresses that need to be deleted, and keep the ones that don't, depending // on whether Finalizer Adds and/or Removes are enabled. func TestIngressCreateDeleteFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) testCases := []struct { enableFinalizerAdd bool enableFinalizerRemove bool @@ -308,9 +304,11 @@ func TestIngressCreateDeleteFinalizer(t *testing.T) { // a good ingress config status is updated and LB is deleted after class change. // Note: This test cannot be run in parallel as it stubs global flags. func TestIngressClassChangeWithFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) flags.F.FinalizerAdd = true flags.F.FinalizerRemove = true lbc := newLoadBalancerController() @@ -363,9 +361,11 @@ func TestIngressClassChangeWithFinalizer(t *testing.T) { // multiple ingresses with shared resources are added or deleted. // Note: This test cannot be run in parallel as it stubs global flags. func TestIngressesWithSharedResourcesWithFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) flags.F.FinalizerAdd = true flags.F.FinalizerRemove = true lbc := newLoadBalancerController() @@ -432,9 +432,9 @@ func TestIngressesWithSharedResourcesWithFinalizer(t *testing.T) { // to existing ingressesToCleanup when lbc is upgraded to enable finalizer. // Note: This test cannot be run in parallel as it stubs global flags. func TestEnableFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) lbc := newLoadBalancerController() svc := test.NewService(types.NamespacedName{Name: "my-service", Namespace: "namespace1"}, api_v1.ServiceSpec{ Type: api_v1.ServiceTypeNodePort, @@ -669,3 +669,299 @@ func TestToRuntimeInfoCerts(t *testing.T) { t.Errorf("lbInfo.TLS = %v, want %v", lbInfo.TLS, tlsCerts) } } + +// TestIngressTagging asserts that appropriate finalizer that defines frontend naming scheme, +// is added to ingress being synced. +func TestIngressTagging(t *testing.T) { + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.EnableV2FrontendNamerFlag, &flags.F.EnableV2FrontendNamer) + defer flagSaver.Reset(test.EnableV2FrontendNamerFlag, &flags.F.EnableV2FrontendNamer) + testCases := []struct { + enableFinalizerAdd bool + enableV2Namer bool + vipExists bool + urlMapExists bool + expectedFinalizer string + }{ + {enableFinalizerAdd: false, expectedFinalizer: ""}, + {enableFinalizerAdd: true, enableV2Namer: false, expectedFinalizer: common.FinalizerKey}, + {enableFinalizerAdd: true, enableV2Namer: true, vipExists: false, expectedFinalizer: common.FinalizerKeyV2}, + {enableFinalizerAdd: true, enableV2Namer: true, vipExists: true, urlMapExists: false, expectedFinalizer: common.FinalizerKeyV2}, + {enableFinalizerAdd: true, enableV2Namer: true, vipExists: true, urlMapExists: true, expectedFinalizer: common.FinalizerKey}, + } + + for idx, tc := range testCases { + var desc string + switch idx { + case 0: + desc = fmt.Sprintf("enableFinalizerAdd %t", tc.enableFinalizerAdd) + case 1: + desc = fmt.Sprintf("enableFinalizerAdd %t enableV2Namer %t ", tc.enableFinalizerAdd, tc.enableV2Namer) + case 2: + desc = fmt.Sprintf("enableFinalizerAdd %t enableV2Namer %t vipExists %t", tc.enableFinalizerAdd, tc.enableV2Namer, tc.vipExists) + default: + desc = fmt.Sprintf("enableFinalizerAdd %t enableV2Namer %t vipExists %t urlMapExists %t", tc.enableFinalizerAdd, tc.enableV2Namer, tc.vipExists, tc.urlMapExists) + } + t.Run(desc, func(t *testing.T) { + flags.F.FinalizerAdd = tc.enableFinalizerAdd + flags.F.EnableV2FrontendNamer = tc.enableV2Namer + + lbc := newLoadBalancerController() + svc := test.NewService(types.NamespacedName{Name: "my-service", Namespace: "default"}, api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeNodePort, + Ports: []api_v1.ServicePort{{Port: 80}}, + }) + addService(lbc, svc) + + defaultBackend := backend("my-service", intstr.FromInt(80)) + ing := test.NewIngress(types.NamespacedName{Name: "my-ingress", Namespace: "default"}, + v1beta1.IngressSpec{ + Backend: &defaultBackend, + }) + if tc.vipExists { + ing.Status.LoadBalancer.Ingress = []api_v1.LoadBalancerIngress{{IP: "0.0.0.0"}} + } + addIngress(lbc, ing) + // Create URL map if enabled. + if tc.urlMapExists { + lbName := lbc.ctx.ClusterNamer.LoadBalancer(common.IngressKeyFunc(ing)) + lbc.ctx.Cloud.CreateURLMap(&compute.UrlMap{Name: lbc.ctx.ClusterNamer.UrlMap(lbName)}) + } + + ingStoreKey := getKey(ing, t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + + updatedIng, err := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Get(ing.Name, meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("Get(%v) = %v, want nil", ingStoreKey, err) + } + ingFinalizers := updatedIng.GetFinalizers() + // Verify that no finalizer is added when finalizer flag is disabled. + if !flags.F.FinalizerAdd { + if l := len(ingFinalizers); l != 0 { + t.Fatalf("len(updatedIng.GetFinalizers()) = %d, want 0; updatedIng = %+v", l, updatedIng) + } + return + } + // Verify that appropriate finalizer is added + if l := len(ingFinalizers); l != 1 { + t.Fatalf("len(updatedIng.GetFinalizers()) = %d, want 1; updatedIng = %+v", l, updatedIng) + } + if diff := cmp.Diff(tc.expectedFinalizer, ingFinalizers[0]); diff != "" { + t.Fatalf("Got diff for Finalizer (-want +got):\n%s", diff) + } + }) + } +} + +// TestGC asserts that GC workflow runs as expected during a controller sync. +func TestGC(t *testing.T) { + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + flags.F.FinalizerRemove = true + const namespace = "namespace" + for _, tc := range []struct { + desc string + v1IngressNames []string + v2IngressNames []string + expectedIngresses []string + // If enabled, multiple ingresses are deleted on API server before ingress controller + // sync is performed. Also, the sync runs on the first ingress of given list. + deleteMultiple bool + }{ + { + desc: "empty", + v1IngressNames: []string{}, + v2IngressNames: []string{}, + expectedIngresses: []string{}, + deleteMultiple: false, + }, + { + desc: "v1 ingresses only", + v1IngressNames: []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"}, + v2IngressNames: []string{}, + expectedIngresses: []string{"namespace/v1-ing3", "namespace/v1-ing4"}, + deleteMultiple: false, + }, + { + desc: "v2 ingresses only", + v1IngressNames: []string{}, + v2IngressNames: []string{"v2-ing1", "v2-ing2", "v2-ing3", "v2-ing4"}, + expectedIngresses: []string{"namespace/v2-ing3", "namespace/v2-ing4"}, + deleteMultiple: false, + }, + { + desc: "both ingresses", + v1IngressNames: []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"}, + v2IngressNames: []string{"v2-ing1", "v2-ing2", "v2-ing3", "v2-ing4"}, + expectedIngresses: []string{"namespace/v1-ing3", "namespace/v1-ing4", "namespace/v2-ing3", "namespace/v2-ing4"}, + deleteMultiple: false, + }, + { + desc: "v1 ingresses only with multiple deletes", + v1IngressNames: []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"}, + v2IngressNames: []string{}, + expectedIngresses: []string{"namespace/v1-ing3", "namespace/v1-ing4"}, + deleteMultiple: true, + }, + { + desc: "v2 ingresses only with multiple deletes", + v1IngressNames: []string{}, + v2IngressNames: []string{"v2-ing1", "v2-ing2", "v2-ing3", "v2-ing4"}, + expectedIngresses: []string{"namespace/v2-ing2", "namespace/v2-ing3", "namespace/v2-ing4"}, + deleteMultiple: true, + }, + { + desc: "both ingresses with multiple deletes", + v1IngressNames: []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"}, + v2IngressNames: []string{"v2-ing1", "v2-ing2", "v2-ing3", "v2-ing4"}, + expectedIngresses: []string{"namespace/v1-ing3", "namespace/v1-ing4", "namespace/v2-ing2", "namespace/v2-ing3", "namespace/v2-ing4"}, + deleteMultiple: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // expected ingresses after creation. + expectedIngressKeys := make([]string, 0) + for _, ingName := range tc.v1IngressNames { + expectedIngressKeys = append(expectedIngressKeys, fmt.Sprintf("%s/%s", namespace, ingName)) + } + for _, ingName := range tc.v2IngressNames { + expectedIngressKeys = append(expectedIngressKeys, fmt.Sprintf("%s/%s", namespace, ingName)) + } + lbc := newLoadBalancerController() + // Create ingresses and run sync on them. + var v1Ingresses, v2Ingresses []*v1beta1.Ingress + for _, ing := range tc.v1IngressNames { + v1Ingresses = append(v1Ingresses, ensureIngress(t, lbc, namespace, ing, namer_util.V1NamingScheme)) + } + for _, ing := range tc.v2IngressNames { + v2Ingresses = append(v2Ingresses, ensureIngress(t, lbc, namespace, ing, namer_util.V2NamingScheme)) + } + + allIngressKeys := lbc.ctx.Ingresses().ListKeys() + sort.Strings(allIngressKeys) + if diff := cmp.Diff(expectedIngressKeys, allIngressKeys); diff != "" { + t.Fatalf("Got diff for ingresses (-want +got):\n%s", diff) + } + + // delete multiple ingresses during a single sync. + if tc.deleteMultiple { + timestamp := meta_v1.NewTime(time.Now()) + // Add deletion timestamp to mock ingress deletions. + // Delete half of the v1 ingresses. + for i := 0; i < len(v1Ingresses)/2; i++ { + v1Ingresses[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, v1Ingresses[i]) + } + // Delete half of the v2 ingresses. + for i := 0; i < len(v2Ingresses)/2; i++ { + v2Ingresses[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, v2Ingresses[i]) + } + + // Run sync on v2 ingress when available. + if len(v2Ingresses) > 0 { + // Sync on the first ingress with v2 naming policy. + // Note that only this ingress is cleaned up. + ingStoreKey := getKey(v2Ingresses[0], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + } else if len(v1Ingresses) > 0 { + ingStoreKey := getKey(v1Ingresses[len(v1Ingresses)-1], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + } + } else { + // Delete half of the v1 ingresses. + for i := 0; i < len(v1Ingresses)/2; i++ { + // Add deletion stamp to indicate that ingress is deleted. + timestamp := meta_v1.NewTime(time.Now()) + v1Ingresses[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, v1Ingresses[i]) + ingStoreKey := getKey(v1Ingresses[i], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + } + + // Delete half of the v2 ingresses. + for i := 0; i < len(v2Ingresses)/2; i++ { + // Add deletion stamp to indicate that ingress is deleted. + timestamp := meta_v1.NewTime(time.Now()) + v2Ingresses[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, v2Ingresses[i]) + ingStoreKey := getKey(v2Ingresses[i], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + } + } + + // Update ingress store with any potential changes made by controller. + // This step is needed only because we use a mock controller setup. + for _, ing := range v1Ingresses { + lbc.ctx.IngressInformer.GetIndexer().Update(getUpdatedIngress(t, lbc, ing)) + } + for _, ing := range v2Ingresses { + lbc.ctx.IngressInformer.GetIndexer().Update(getUpdatedIngress(t, lbc, ing)) + } + + // Assert that controller returns same ingresses as expected ingresses. + // Controller sync removes finalizers from all deletion candidates. + // Filter ingresses with finalizer for un-deleted ingresses. + allIngresses := operator.Ingresses(lbc.ctx.Ingresses().List()).Filter(func(ing *v1beta1.Ingress) bool { + return common.HasFinalizer(ing.ObjectMeta) + }).AsList() + allIngressKeys = common.ToIngressKeys(allIngresses) + sort.Strings(allIngressKeys) + if diff := cmp.Diff(tc.expectedIngresses, allIngressKeys); diff != "" { + t.Fatalf("Got diff for Ingresses after delete (-want +got):\n%s", diff) + } + }) + } +} + +// ensureIngress creates an ingress and syncs it with ingress controller. +// This returns updated ingress after sync. +func ensureIngress(t *testing.T, lbc *LoadBalancerController, namespace, name string, scheme namer_util.Scheme) *v1beta1.Ingress { + serviceName := fmt.Sprintf("service-for-%s", name) + svc := test.NewService(types.NamespacedName{Name: serviceName, Namespace: namespace}, api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeNodePort, + Ports: []api_v1.ServicePort{{Port: 80}}, + }) + addService(lbc, svc) + + defaultBackend := backend(serviceName, intstr.FromInt(80)) + ing := test.NewIngress(types.NamespacedName{Name: name, Namespace: namespace}, + v1beta1.IngressSpec{ + Backend: &defaultBackend, + }) + if scheme == namer_util.V1NamingScheme { + ing.ObjectMeta.Finalizers = []string{common.FinalizerKey} + } else if scheme == namer_util.V2NamingScheme { + ing.ObjectMeta.Finalizers = []string{common.FinalizerKeyV2} + } + addIngress(lbc, ing) + + ingStoreKey := getKey(ing, t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + + return getUpdatedIngress(t, lbc, ing) +} + +func getUpdatedIngress(t *testing.T, lbc *LoadBalancerController, ing *v1beta1.Ingress) *v1beta1.Ingress { + updatedIng, err := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Get(ing.Name, meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("lbc.ctx.KubeClient...Get(%v) = %v, want nil", getKey(ing, t), err) + } + return updatedIng +} diff --git a/pkg/controller/translator/translator_test.go b/pkg/controller/translator/translator_test.go index 5cc3cb2688..6adc54baaa 100644 --- a/pkg/controller/translator/translator_test.go +++ b/pkg/controller/translator/translator_test.go @@ -65,7 +65,7 @@ func fakeTranslator() *Translator { HealthCheckPath: "/", DefaultBackendHealthCheckPath: "/healthz", } - ctx := context.NewControllerContext(client, nil, backendConfigClient, nil, nil, defaultNamer, ctxConfig) + ctx := context.NewControllerContext(client, nil, backendConfigClient, nil, nil, defaultNamer, "", ctxConfig) gce := &Translator{ ctx: ctx, } diff --git a/pkg/firewalls/controller_test.go b/pkg/firewalls/controller_test.go index 88b4656350..61b70c5232 100644 --- a/pkg/firewalls/controller_test.go +++ b/pkg/firewalls/controller_test.go @@ -46,7 +46,7 @@ func newFirewallController() *FirewallController { DefaultBackendSvcPort: test.DefaultBeSvcPort, } - ctx := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, fakeGCE, defaultNamer, ctxConfig) + ctx := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, fakeGCE, defaultNamer, "", ctxConfig) fwc := NewFirewallController(ctx, []string{"30000-32767"}) fwc.hasSynced = func() bool { return true } diff --git a/pkg/flags/flags.go b/pkg/flags/flags.go index 23e9d76852..2315f75254 100644 --- a/pkg/flags/flags.go +++ b/pkg/flags/flags.go @@ -87,6 +87,7 @@ var ( EnableCSM bool CSMServiceNEGSkipNamespaces []string EnableNonGCPMode bool + EnableV2FrontendNamer bool LeaderElection LeaderElectionConfiguration }{} @@ -204,6 +205,7 @@ L7 load balancing. CSV values accepted. Example: -node-port-ranges=80,8080,400-5 flag.BoolVar(&F.EnableCSM, "enable-csm", false, "Enable CSM(Istio) support") flag.StringSliceVar(&F.CSMServiceNEGSkipNamespaces, "csm-service-skip-namespaces", []string{}, "Only for CSM mode, skip the NEG creation for Services in the given namespaces.") flag.BoolVar(&F.EnableNonGCPMode, "enable-non-gcp-mode", false, "Set to true when running on a non-GCP cluster.") + flag.BoolVar(&F.EnableV2FrontendNamer, "enable-v2-frontend-namer", false, "Enable v2 ingress frontend naming policy.") } type RateLimitSpecs struct { diff --git a/pkg/loadbalancers/interfaces.go b/pkg/loadbalancers/interfaces.go index 6a3b0c753b..6ddda5c498 100644 --- a/pkg/loadbalancers/interfaces.go +++ b/pkg/loadbalancers/interfaces.go @@ -18,6 +18,7 @@ package loadbalancers import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "k8s.io/api/networking/v1beta1" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/loadbalancers/features" ) @@ -25,9 +26,18 @@ import ( // LoadBalancerPool is an interface to manage the cloud resources associated // with a gce loadbalancer. type LoadBalancerPool interface { + // Ensure ensures a loadbalancer and its resources given the RuntimeInfo. Ensure(ri *L7RuntimeInfo) (*L7, error) + // Delete deletes a loadbalancer by name. Delete(name string, versions *features.ResourceVersions, scope meta.KeyType) error + // V2GC garbage collects loadbalancer associated with given ingress using v2 naming scheme. + V2GC(ing *v1beta1.Ingress) error + // GC garbage collects loadbalancers not in the input list using v1 naming scheme. GC(names []string) error - Shutdown() error + // Shutdown deletes all loadbalancers for given list of ingresses. + Shutdown(ings []*v1beta1.Ingress) error + // List returns a list of urlMaps (the top level LB resource) that belong to the cluster. List(key *meta.Key, version meta.Version) ([]*composite.UrlMap, error) + // HasUrlMap returns true if an URL map exists in GCE for given ingress. + HasUrlMap(ing *v1beta1.Ingress) (bool, error) } diff --git a/pkg/loadbalancers/l7s.go b/pkg/loadbalancers/l7s.go index a8a8ee05be..2aea4b5929 100644 --- a/pkg/loadbalancers/l7s.go +++ b/pkg/loadbalancers/l7s.go @@ -18,13 +18,17 @@ package loadbalancers import ( "fmt" + "net/http" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/events" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/loadbalancers/features" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/common" namer_util "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/klog" "k8s.io/legacy-cloud-providers/gce" @@ -52,7 +56,7 @@ func NewLoadBalancerPool(cloud *gce.Cloud, v1NamerHelper namer_util.V1FrontendNa } } -// Ensure ensures a loadbalancer and its resources given the RuntimeInfo +// Ensure implements LoadBalancerPool. func (l *L7s) Ensure(ri *L7RuntimeInfo) (*L7, error) { lb := &L7{ runtimeInfo: ri, @@ -69,7 +73,7 @@ func (l *L7s) Ensure(ri *L7RuntimeInfo) (*L7, error) { return lb, nil } -// Delete deletes a load balancer by name. +// Delete implements LoadBalancerPool. func (l *L7s) Delete(name string, versions *features.ResourceVersions, scope meta.KeyType) error { lb := &L7{ runtimeInfo: &L7RuntimeInfo{}, @@ -86,7 +90,24 @@ func (l *L7s) Delete(name string, versions *features.ResourceVersions, scope met return nil } -// List returns a list of urlMaps (the top level LB resource) that belong to the cluster +// v2Delete deletes a loadbalancer frontend resources by ingress using v2 naming scheme. +func (l *L7s) v2Delete(ing *v1beta1.Ingress, versions *features.ResourceVersions, scope meta.KeyType) error { + lb := &L7{ + runtimeInfo: &L7RuntimeInfo{Ingress: ing}, + cloud: l.cloud, + namer: l.namerFactory.Namer(ing), + scope: scope, + } + + klog.V(3).Infof("Deleting lb (using v2 naming scheme) %v", lb) + + if err := lb.Cleanup(versions); err != nil { + return err + } + return nil +} + +// List implements LoadBalancerPool. func (l *L7s) List(key *meta.Key, version meta.Version) ([]*composite.UrlMap, error) { var result []*composite.UrlMap urlMaps, err := composite.ListUrlMaps(l.cloud, key, version) @@ -103,7 +124,18 @@ func (l *L7s) List(key *meta.Key, version meta.Version) ([]*composite.UrlMap, er return result, nil } -// GC garbage collects loadbalancers not in the input list. +// V2GC implements LoadBalancerPool. +func (l *L7s) V2GC(ing *v1beta1.Ingress) error { + ingKey := common.NamespacedName(ing) + klog.V(2).Infof("V2GC(%v)", ingKey) + if err := l.v2Delete(ing, features.VersionsFromIngress(ing), features.ScopeFromIngress(ing)); err != nil { + return err + } + klog.V(2).Infof("V2GC(%v) ok", ingKey) + return nil +} + +// GC implements LoadBalancerPool. // TODO(shance): Update to handle regional and global LB with same name func (l *L7s) GC(names []string) error { klog.V(2).Infof("GC(%v)", names) @@ -142,7 +174,7 @@ func (l *L7s) GC(names []string) error { return nil } -// gc is a helper for GC +// gc is a helper for GC. // TODO(shance): get versions from description func (l *L7s) gc(urlMaps []*composite.UrlMap, knownLoadBalancers sets.String, versions *features.ResourceVersions) []error { var errors []error @@ -171,11 +203,40 @@ func (l *L7s) gc(urlMaps []*composite.UrlMap, knownLoadBalancers sets.String, ve return nil } -// Shutdown logs whether or not the pool is empty. -func (l *L7s) Shutdown() error { +// Shutdown implements LoadBalancerPool. +func (l *L7s) Shutdown(ings []*v1beta1.Ingress) error { + // Delete ingresses that use v1 naming scheme. if err := l.GC([]string{}); err != nil { - return err + return fmt.Errorf("error deleting load-balancers for v1 naming policy: %v", err) + } + // Delete ingresses that use v2 naming policy. + var errs []error + for _, ing := range ings { + if namer_util.IsV2NamingScheme(ing) { + if err := l.V2GC(ing); err != nil { + errs = append(errs, err) + } + } + } + if errs != nil { + return fmt.Errorf("error deleting load-balancers for v2 naming policy: %v", utils.JoinErrs(errs)) } klog.V(2).Infof("Loadbalancer pool shutdown.") return nil } + +// HasUrlMap implements LoadBalancerPool. +func (l *L7s) HasUrlMap(ing *v1beta1.Ingress) (bool, error) { + namer := l.namerFactory.Namer(ing) + key, err := composite.CreateKey(l.cloud, namer.UrlMap(), features.ScopeFromIngress(ing)) + if err != nil { + return false, err + } + if _, err := composite.GetUrlMap(l.cloud, key, features.VersionsFromIngress(ing).UrlMap); err != nil { + if utils.IsHTTPErrorCode(err, http.StatusNotFound) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/pkg/loadbalancers/l7s_test.go b/pkg/loadbalancers/l7s_test.go index c7f04fb111..5e2938d077 100644 --- a/pkg/loadbalancers/l7s_test.go +++ b/pkg/loadbalancers/l7s_test.go @@ -25,16 +25,20 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/loadbalancers/features" + "k8s.io/ingress-gce/pkg/utils/common" namer_util "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/legacy-cloud-providers/gce" ) const ( testClusterName = "0123456789abcedf" + kubeSystemUID = "ksuid123" // resourceLeakLimit is the limit when ingress namespace and name are too long and ingress // GC will leak the LB resources because the cluster uid got truncated. @@ -164,7 +168,7 @@ func TestGC(t *testing.T) { } otherNamer := namer_util.NewNamer("clusteruid", "fw1") - otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer) + otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer, "") otherKeys := []string{ "a/a", "namespace/name", @@ -301,11 +305,327 @@ func TestGCToLeakLB(t *testing.T) { } } +// TestV2GC asserts that GC workflow for v2 naming scheme deletes load balancer +// associated with given v2 ingress. This also checks that other v2 ingress +// associated LBs or v1 ingress associated LBs are not deleted. +func TestV2GC(t *testing.T) { + t.Parallel() + pool := newTestLoadBalancerPool() + l7sPool := pool.(*L7s) + cloud := l7sPool.cloud + feNamerFactory := l7sPool.namerFactory + testCases := []struct { + desc string + ingressToDelete *v1beta1.Ingress + addV1Ingresses bool + gcpLBs []*v1beta1.Ingress + expectedLBs []*v1beta1.Ingress + }{ + { + desc: "empty", + ingressToDelete: newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + addV1Ingresses: false, + gcpLBs: []*v1beta1.Ingress{}, + expectedLBs: []*v1beta1.Ingress{}, + }, + { + desc: "simple case v2 only", + ingressToDelete: newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + }, + }, + { + desc: "simple case both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:11], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:11], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "63 characters v2 only", + ingressToDelete: newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:19], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:19], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "63 characters both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "longNameSpace v2 only", + ingressToDelete: newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "longNameSpace both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "longName v2 only", + ingressToDelete: newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "longName both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "longNameSpace and longName v2 only", + ingressToDelete: newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "longNameSpace and longName both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + } + + // Add LBs owned by another cluster. + otherNamer := namer_util.NewNamer("clusteruid", "fw1") + otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer, "ksuid234") + otherIngresses := []*v1beta1.Ingress{ + // ingresses with v1 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + // ingresses with v2 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:16], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:17], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + } + versions := features.GAResourceVersions + + for _, ing := range otherIngresses { + createFakeLoadbalancer(cloud, otherFeNamerFactory.Namer(ing), versions, defaultScope) + } + + for _, tc := range testCases { + desc := fmt.Sprintf("%s namespaceLength %d nameLength %d", tc.desc, len(tc.ingressToDelete.Namespace), len(tc.ingressToDelete.Name)) + t.Run(desc, func(t *testing.T) { + for _, ing := range tc.gcpLBs { + createFakeLoadbalancer(cloud, feNamerFactory.Namer(ing), versions, defaultScope) + } + + err := l7sPool.V2GC(tc.ingressToDelete) + if err != nil { + t.Errorf("l7sPool.V2GC(%q) = %v, want nil for case %q", common.NamespacedName(tc.ingressToDelete), err, tc.desc) + } + + // Check if LBs associated with other ingresses are not deleted. + for _, ing := range otherIngresses { + if err := checkFakeLoadBalancer(cloud, otherFeNamerFactory.Namer(ing), versions, defaultScope, true); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q and ingress %q", err, tc.desc, common.NamespacedName(ing)) + } + } + + // Check if the total number of url maps is as expected. + urlMaps, _ := l7sPool.cloud.ListURLMaps() + if ingCount := len(tc.expectedLBs) + len(otherIngresses); ingCount != len(urlMaps) { + t.Errorf("len(l7sPool.cloud.ListURLMaps()) = %d, want %d for case %q", len(urlMaps), ingCount, tc.desc) + } + + // Check if the load balancer associated with ingress to be deleted is actually GCed. + if err := checkFakeLoadBalancer(cloud, feNamerFactory.Namer(tc.ingressToDelete), versions, defaultScope, false); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q and ingress %q", err, tc.desc, common.NamespacedName(tc.ingressToDelete)) + } + + // Check if all expected LBs exist. + for _, ing := range tc.expectedLBs { + feNamer := feNamerFactory.Namer(ing) + if err := checkFakeLoadBalancer(cloud, feNamer, versions, defaultScope, true); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q and ingress %q", err, tc.desc, common.NamespacedName(ing)) + } + removeFakeLoadBalancer(cloud, feNamer, versions, defaultScope) + } + }) + } +} + +// TestDoNotLeakV2LB asserts that GC workflow for v2 naming scheme does not leak +// GCE resources for different namespaced names. +func TestDoNotLeakV2LB(t *testing.T) { + t.Parallel() + pool := newTestLoadBalancerPool() + l7sPool := pool.(*L7s) + cloud := l7sPool.cloud + feNamerFactory := l7sPool.namerFactory + + type testCase struct { + desc string + ing *v1beta1.Ingress + } + var testCases []testCase + + for i := 3; i <= len(longName)*2; i++ { + ing := newIngressWithFinalizer(longName[:i/2], longName[:(i-i/2)], common.FinalizerKeyV2) + testCases = append(testCases, testCase{fmt.Sprintf("IngressKeyLength: %d", i), ing}) + } + + // Add LBs owned by another cluster. + otherNamer := namer_util.NewNamer("clusteruid", "fw1") + otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer, "ksuid234") + otherIngresses := []*v1beta1.Ingress{ + // ingresses with v1 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + // ingresses with v2 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:16], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:17], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + } + versions := features.GAResourceVersions + + for _, ing := range otherIngresses { + createFakeLoadbalancer(cloud, otherFeNamerFactory.Namer(ing), versions, defaultScope) + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + feNamer := feNamerFactory.Namer(tc.ing) + createFakeLoadbalancer(l7sPool.cloud, feNamer, versions, defaultScope) + err := l7sPool.V2GC(tc.ing) + if err != nil { + t.Errorf("l7sPool.V2GC(%q) = %v, want nil for case %q", common.NamespacedName(tc.ing), err, tc.desc) + } + + if err := checkFakeLoadBalancer(l7sPool.cloud, feNamer, versions, defaultScope, false); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q", err, tc.desc) + } + urlMaps, _ := l7sPool.cloud.ListURLMaps() + if ingCount := len(otherIngresses); ingCount != len(urlMaps) { + t.Errorf("len(l7sPool.cloud.ListURLMaps()) = %d, want %d for case %q", len(urlMaps), ingCount, tc.desc) + } + removeFakeLoadBalancer(l7sPool.cloud, feNamer, versions, defaultScope) + }) + } +} + func newTestLoadBalancerPool() LoadBalancerPool { namer := namer_util.NewNamer(testClusterName, "fw1") fakeGCECloud := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) ctx := &context.ControllerContext{} - return NewLoadBalancerPool(fakeGCECloud, namer, ctx, namer_util.NewFrontendNamerFactory(namer)) + return NewLoadBalancerPool(fakeGCECloud, namer, ctx, namer_util.NewFrontendNamerFactory(namer, kubeSystemUID)) } func createFakeLoadbalancer(cloud *gce.Cloud, namer namer_util.IngressFrontendNamer, versions *features.ResourceVersions, scope meta.KeyType) { @@ -384,3 +704,13 @@ func generateKeyWithLength(length int) string { length = length - 1 return fmt.Sprintf("%s/%s", longName[:length/2], longName[:length-length/2]) } + +func newIngressWithFinalizer(namespace, name, finalizer string) *v1beta1.Ingress { + return &v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Finalizers: []string{finalizer}, + }, + } +} diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index 9c530303c8..afa2a88d24 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -98,7 +98,7 @@ func newTestJig(t *testing.T) *testJig { mockGCE.MockGlobalForwardingRules.InsertHook = InsertGlobalForwardingRuleHook ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(namer, "").Namer(ing) return &testJig{ pool: newFakeLoadBalancerPool(fakeGCE, t, namer), @@ -130,7 +130,7 @@ func newFakeLoadBalancerPool(cloud *gce.Cloud, t *testing.T, namer *namer_util.N nodePool := instances.NewNodePool(fakeIGs, namer) nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) - return NewLoadBalancerPool(cloud, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer)) + return NewLoadBalancerPool(cloud, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer, "")) } func newILBIngress() *v1beta1.Ingress { @@ -324,7 +324,7 @@ func TestCertUpdate(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -362,7 +362,7 @@ func TestMultipleSecretsWithSameCert(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) lbInfo := &L7RuntimeInfo{ AllowHTTP: false, @@ -391,7 +391,7 @@ func TestCertCreationWithCollision(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -452,7 +452,7 @@ func TestMultipleCertRetentionAfterRestart(t *testing.T) { cert2 := createCert("key2", "cert2", "name2") cert3 := createCert("key3", "cert3", "name3") ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(cert1.CertHash) certName2 := feNamer.SSLCertName(cert2.CertHash) certName3 := feNamer.SSLCertName(cert3.CertHash) @@ -500,7 +500,7 @@ func TestUpgradeToNewCertNames(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) lbInfo := &L7RuntimeInfo{ AllowHTTP: false, UrlMap: gceUrlMap, @@ -563,7 +563,7 @@ func TestMaxCertsUpload(t *testing.T) { expectCerts := make(map[string]string) expectCertsExtra := make(map[string]string) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) for ix := 0; ix < FakeCertQuota; ix++ { str := strconv.Itoa(ix) @@ -626,7 +626,7 @@ func TestIdenticalHostnameCerts(t *testing.T) { var tlsCerts []*TLSCerts expectCerts := make(map[string]string) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) contents := "" for ix := 0; ix < 3; ix++ { @@ -719,7 +719,7 @@ func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -1216,7 +1216,7 @@ func TestSecretBasedAndPreSharedCerts(t *testing.T) { gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) namer := namer_util.NewNamer(clusterName, "fw1") ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -1282,7 +1282,7 @@ func TestMaxSecretBasedAndPreSharedCerts(t *testing.T) { expectCerts := make(map[string]string) expectCertsExtra := make(map[string]string) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) for ix := 0; ix < TargetProxyCertLimit; ix++ { str := strconv.Itoa(ix) @@ -1373,7 +1373,7 @@ func TestSecretBasedToPreSharedCertUpdate(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) lbInfo := &L7RuntimeInfo{ @@ -1429,7 +1429,7 @@ func TestSecretBasedToPreSharedCertUpdateWithErrors(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) lbInfo := &L7RuntimeInfo{ diff --git a/pkg/loadbalancers/url_maps_test.go b/pkg/loadbalancers/url_maps_test.go index fe21ffae90..8843d14b4b 100644 --- a/pkg/loadbalancers/url_maps_test.go +++ b/pkg/loadbalancers/url_maps_test.go @@ -81,7 +81,7 @@ func TestToComputeURLMap(t *testing.T) { }, } - namerFactory := namer_util.NewFrontendNamerFactory(namer) + namerFactory := namer_util.NewFrontendNamerFactory(namer, "") feNamer := namerFactory.NamerForLbName("ns/lb-name") gotComputeURLMap := toCompositeURLMap(gceURLMap, feNamer, meta.GlobalKey("ns-lb-name")) if !mapsEqual(gotComputeURLMap, wantComputeMap) { diff --git a/pkg/neg/controller_test.go b/pkg/neg/controller_test.go index 333f65efeb..47b12ed96f 100644 --- a/pkg/neg/controller_test.go +++ b/pkg/neg/controller_test.go @@ -78,7 +78,7 @@ func newTestController(kubeClient kubernetes.Interface) *Controller { DefaultBackendSvcPort: defaultBackend, EnableCSM: true, } - context := context.NewControllerContext(kubeClient, dynamicClient, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, ctxConfig) + context := context.NewControllerContext(kubeClient, dynamicClient, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, "", ctxConfig) controller := NewController( negtypes.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network"), context, diff --git a/pkg/neg/manager_test.go b/pkg/neg/manager_test.go index fba869fd7a..bdab5a1da0 100644 --- a/pkg/neg/manager_test.go +++ b/pkg/neg/manager_test.go @@ -70,7 +70,7 @@ func NewTestSyncerManager(kubeClient kubernetes.Interface) *syncerManager { ResyncPeriod: 1 * time.Second, DefaultBackendSvcPort: defaultBackend, } - context := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, ctxConfig) + context := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, "", ctxConfig) manager := newSyncerManager( namer, diff --git a/pkg/neg/readiness/reflector_test.go b/pkg/neg/readiness/reflector_test.go index b2f5ee75c3..cdfd63fe4f 100644 --- a/pkg/neg/readiness/reflector_test.go +++ b/pkg/neg/readiness/reflector_test.go @@ -62,7 +62,7 @@ func fakeContext() *context.ControllerContext { } fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) negtypes.MockNetworkEndpointAPIs(fakeGCE) - context := context.NewControllerContext(kubeClient, nil, nil, nil, fakeGCE, namer, ctxConfig) + context := context.NewControllerContext(kubeClient, nil, nil, nil, fakeGCE, namer, "", ctxConfig) return context } diff --git a/pkg/neg/syncers/syncer_test.go b/pkg/neg/syncers/syncer_test.go index eab1269799..8651055748 100644 --- a/pkg/neg/syncers/syncer_test.go +++ b/pkg/neg/syncers/syncer_test.go @@ -88,7 +88,7 @@ func newSyncerTester() *syncerTester { ResyncPeriod: 1 * time.Second, DefaultBackendSvcPort: defaultBackend, } - context := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, nil, namer, ctxConfig) + context := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, nil, namer, "", ctxConfig) negSyncerKey := negtypes.NegSyncerKey{ Namespace: testServiceNamespace, Name: testServiceName, diff --git a/pkg/neg/syncers/transaction_test.go b/pkg/neg/syncers/transaction_test.go index 7a86ad2b0f..87d0582e4d 100644 --- a/pkg/neg/syncers/transaction_test.go +++ b/pkg/neg/syncers/transaction_test.go @@ -1078,7 +1078,7 @@ func newTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud) (negty ResyncPeriod: 1 * time.Second, DefaultBackendSvcPort: defaultBackend, } - context := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, nil, namer, ctxConfig) + context := context.NewControllerContext(kubeClient, nil, backendConfigClient, nil, nil, namer, "", ctxConfig) svcPort := negtypes.NegSyncerKey{ Namespace: testNamespace, Name: testService, diff --git a/pkg/sync/interfaces.go b/pkg/sync/interfaces.go index f91185746e..e933faea57 100644 --- a/pkg/sync/interfaces.go +++ b/pkg/sync/interfaces.go @@ -40,10 +40,11 @@ type Controller interface { GCBackends(toKeep []*v1beta1.Ingress) error // SyncLoadBalancer syncs the front-end load balancer resources for a GCLB given some existing state. SyncLoadBalancer(state interface{}) error - // GCLoadBalancers garbage collects front-end load balancer resources for all ingresses given a list of ingresses to exclude from GC. - GCLoadBalancers(toKeep []*v1beta1.Ingress) error + // GCV1LoadBalancers garbage collects front-end load balancer resources for all ingresses + // given a list of ingresses with v1 naming policy to exclude from GC. + GCV1LoadBalancers(toKeep []*v1beta1.Ingress) error // PostProcess allows for doing some post-processing after an Ingress is synced to a GCLB. PostProcess(state interface{}) error - // MaybeRemoveFinalizers removes finalizers from a list of ingresses one by one after all of its associated resources are deleted. - MaybeRemoveFinalizers(toCleanup []*v1beta1.Ingress) error + // EnsureDeleteV1Finalizers ensures that v1 finalizers are removed for given list of ingresses. + EnsureDeleteV1Finalizers(toCleanup []*v1beta1.Ingress) error } diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go index 08db148558..e137ba147c 100644 --- a/pkg/sync/sync.go +++ b/pkg/sync/sync.go @@ -65,17 +65,18 @@ func (s *IngressSyncer) GC(ings []*v1beta1.Ingress) error { // Partition GC state into ingresses those need cleanup and those don't. // An Ingress is considered to exist and not considered for cleanup, if: // 1) It is a GCLB Ingress. - // 2) It is not a candidate for deletion. + // 2) It is not a deletion candidate. A deletion candidate is an ingress + // with deletion stamp and a finalizer. toCleanup, toKeep := operator.Ingresses(ings).Partition(utils.NeedsCleanup) toKeepIngresses := toKeep.AsList() - lbErr := s.controller.GCLoadBalancers(toKeepIngresses) + lbErr := s.controller.GCV1LoadBalancers(toKeepIngresses) beErr := s.controller.GCBackends(toKeepIngresses) if lbErr != nil { - return fmt.Errorf("error running load balancer garbage collection routine: %v", lbErr) + return fmt.Errorf("error running load balancer garbage collection routine for v1 naming scheme: %v", lbErr) } if beErr != nil { return fmt.Errorf("error running backend garbage collection routine: %v", beErr) } - return s.controller.MaybeRemoveFinalizers(toCleanup.AsList()) + return s.controller.EnsureDeleteV1Finalizers(toCleanup.AsList()) } diff --git a/pkg/test/utils.go b/pkg/test/utils.go index 4fcb4bdb16..52e09195eb 100644 --- a/pkg/test/utils.go +++ b/pkg/test/utils.go @@ -12,6 +12,12 @@ import ( "k8s.io/ingress-gce/pkg/utils" ) +const ( + FinalizerAddFlag = Flag("enable-finalizer-add") + FinalizerRemoveFlag = Flag("enable-finalizer-remove") + EnableV2FrontendNamerFlag = Flag("enable-v2-frontend-namer") +) + var ( BackendPort = intstr.IntOrString{Type: intstr.Int, IntVal: 80} DefaultBeSvcPort = utils.ServicePort{ @@ -80,3 +86,27 @@ func DecodeIngress(data []byte) (*v1beta1.Ingress, error) { return obj.(*v1beta1.Ingress), nil } + +// Flag is a type representing controller flag. +type Flag string + +// FlagSaver is an utility type to capture the value of a flag and reset back to the saved value. +type FlagSaver struct{ flags map[Flag]bool } + +// NewFlagSaver returns a flag saver by initializing the map. +func NewFlagSaver() FlagSaver { + return FlagSaver{make(map[Flag]bool)} +} + +// Save captures the value of given flag. +func (s *FlagSaver) Save(key Flag, flagPointer *bool) { + s.flags[key] = *flagPointer +} + +// Reset resets the value of given flag to a previously saved value. +// This does nothing if the flag value was not captured. +func (s *FlagSaver) Reset(key Flag, flagPointer *bool) { + if val, ok := s.flags[key]; ok { + *flagPointer = val + } +} diff --git a/pkg/utils/common/common.go b/pkg/utils/common/common.go index f908792268..0fb6663e3e 100644 --- a/pkg/utils/common/common.go +++ b/pkg/utils/common/common.go @@ -17,6 +17,9 @@ limitations under the License. package common import ( + "crypto/sha256" + "strconv" + "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" @@ -27,6 +30,32 @@ var ( KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc ) +// charMap is a list of string represented values of first 10 integers and lowercase +// alphabets. This is a lookup table to maintain entropy when converting bytes to string. +var charMap []string + +// WARNING: PLEASE DO NOT CHANGE THE HASH FUNCTION. +func init() { + for i := 0; i < 10; i++ { + charMap = append(charMap, strconv.Itoa(i)) + } + for i := 0; i < 26; i++ { + charMap = append(charMap, string('a'+rune(i))) + } +} + +// ContentHash creates a content hash string of length n of s utilizing sha256. +// WARNING: PLEASE DO NOT CHANGE THE HASH FUNCTION. +func ContentHash(s string, n int) string { + var h string + bytes := sha256.Sum256(([]byte)(s)) + for i := 0; i < n && i < len(bytes); i++ { + idx := int(bytes[i]) % len(charMap) + h += charMap[idx] + } + return h +} + // NamespacedName returns namespaced name string of a given ingress. // Note: This is used for logging. func NamespacedName(ing *v1beta1.Ingress) string { diff --git a/pkg/utils/common/finalizer.go b/pkg/utils/common/finalizer.go index d8fd7c4fd0..d66ec21fb1 100644 --- a/pkg/utils/common/finalizer.go +++ b/pkg/utils/common/finalizer.go @@ -23,52 +23,91 @@ import ( "k8s.io/kubernetes/pkg/util/slice" ) -// FinalizerKey is the string representing the Ingress finalizer. -const FinalizerKey = "networking.gke.io/ingress-finalizer" +const ( + // FinalizerKey is the string representing the Ingress finalizer. + FinalizerKey = "networking.gke.io/ingress-finalizer" + // FinalizerKeyV2 is the string representing the Ingress finalizer version. + // Ingress with V2 finalizer uses V2 frontend naming scheme. + FinalizerKeyV2 = "networking.gke.io/ingress-finalizer-V2" +) + +// IsDeletionCandidate is true if the passed in meta contains an ingress finalizer. +func IsDeletionCandidate(m meta_v1.ObjectMeta) bool { + return IsDeletionCandidateForGivenFinalizer(m, FinalizerKey) || IsDeletionCandidateForGivenFinalizer(m, FinalizerKeyV2) +} + +// IsDeletionCandidateForGivenFinalizer is true if the passed in meta contains the specified finalizer. +func IsDeletionCandidateForGivenFinalizer(m meta_v1.ObjectMeta, key string) bool { + return m.DeletionTimestamp != nil && hasGivenFinalizer(m, key) +} -// IsDeletionCandidate is true if the passed in meta contains the specified finalizer. -func IsDeletionCandidate(m meta_v1.ObjectMeta, key string) bool { - return m.DeletionTimestamp != nil && HasFinalizer(m, key) +// HasFinalizer is true if the passed in meta has an ingress finalizer. +func HasFinalizer(m meta_v1.ObjectMeta) bool { + return HasV1Finalizer(m) || HasV2Finalizer(m) } -// NeedToAddFinalizer is true if the passed in meta does not contain the specified finalizer. -func NeedToAddFinalizer(m meta_v1.ObjectMeta, key string) bool { - return m.DeletionTimestamp == nil && !HasFinalizer(m, key) +// HasV1Finalizer is true if the passed in meta has a v1 finalizer. +func HasV1Finalizer(m meta_v1.ObjectMeta) bool { + return hasGivenFinalizer(m, FinalizerKey) } -// HasFinalizer is true if the passed in meta has the specified finalizer. -func HasFinalizer(m meta_v1.ObjectMeta, key string) bool { +// HasV2Finalizer is true if the passed in meta has a v2 finalizer. +func HasV2Finalizer(m meta_v1.ObjectMeta) bool { + return hasGivenFinalizer(m, FinalizerKeyV2) +} + +// hasGivenFinalizer is true if the passed in meta has the specified finalizer. +func hasGivenFinalizer(m meta_v1.ObjectMeta, key string) bool { return slice.ContainsString(m.Finalizers, key, nil) } -// AddFinalizer tries to add a finalizer to an Ingress. If a finalizer -// already exists, it does nothing. -func AddFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { - ingKey := FinalizerKey - if NeedToAddFinalizer(ing.ObjectMeta, ingKey) { +// EnsureV1Finalizer ensures that v1 finalizer exists on given Ingress. +func EnsureV1Finalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { + return ensureGivenFinalizer(ing, ingClient, FinalizerKey) +} + +// EnsureV2Finalizer ensures that v2 finalizer exists on given Ingress. +func EnsureV2Finalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { + return ensureGivenFinalizer(ing, ingClient, FinalizerKeyV2) +} + +// ensureGivenFinalizer ensures that the specified finalizer exists on given Ingress. +func ensureGivenFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface, finalizerKey string) error { + if needToAddFinalizer(ing.ObjectMeta, finalizerKey) { updated := ing.DeepCopy() - updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, ingKey) + updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, finalizerKey) if _, err := ingClient.Update(updated); err != nil { return fmt.Errorf("error updating Ingress %s/%s: %v", ing.Namespace, ing.Name, err) } - klog.V(3).Infof("Added finalizer %q for Ingress %s/%s", ingKey, ing.Namespace, ing.Name) + klog.V(3).Infof("Added finalizer %q for Ingress %s/%s", finalizerKey, ing.Namespace, ing.Name) } - return nil } -// RemoveFinalizer tries to remove a Finalizer from an Ingress. If a -// finalizer is not on the Ingress, it does nothing. -func RemoveFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { - ingKey := FinalizerKey - if HasFinalizer(ing.ObjectMeta, ingKey) { +// needToAddFinalizer is true if the passed in meta does not contain the specified finalizer. +func needToAddFinalizer(m meta_v1.ObjectMeta, key string) bool { + return m.DeletionTimestamp == nil && !hasGivenFinalizer(m, key) +} + +// EnsureDeleteV1Finalizer ensures that v1 finalizer is deleted from given Ingress. +func EnsureDeleteV1Finalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { + return ensureDeleteGivenFinalizer(ing, ingClient, FinalizerKey) +} + +// EnsureDeleteV2Finalizer ensures that v2 finalizer is deleted from given Ingress. +func EnsureDeleteV2Finalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { + return ensureDeleteGivenFinalizer(ing, ingClient, FinalizerKeyV2) +} + +// ensureDeleteGivenFinalizer ensures that the specified finalizer is deleted from given Ingress. +func ensureDeleteGivenFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface, finalizerKey string) error { + if hasGivenFinalizer(ing.ObjectMeta, finalizerKey) { updated := ing.DeepCopy() - updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, ingKey, nil) + updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, finalizerKey, nil) if _, err := ingClient.Update(updated); err != nil { return fmt.Errorf("error updating Ingress %s/%s: %v", ing.Namespace, ing.Name, err) } - klog.V(3).Infof("Removed finalizer %q for Ingress %s/%s", ingKey, ing.Namespace, ing.Name) + klog.V(3).Infof("Removed finalizer %q for Ingress %s/%s", finalizerKey, ing.Namespace, ing.Name) } - return nil } diff --git a/pkg/utils/namer/frontendnamer.go b/pkg/utils/namer/frontendnamer.go index 653d0cfc2f..9d839d4dc6 100644 --- a/pkg/utils/namer/frontendnamer.go +++ b/pkg/utils/namer/frontendnamer.go @@ -14,13 +14,42 @@ limitations under the License. package namer import ( + "fmt" + "strings" + "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/types" "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/klog" ) const ( + // V1NamingScheme is v1 frontend naming scheme. V1NamingScheme = Scheme("v1") + // V2NamingScheme is v2 frontend naming scheme. V2NamingScheme = Scheme("v2") + // schemaVersionV2 is suffix to be appended to resource prefix for v2 naming scheme. + schemaVersionV2 = "2" + // maximumAllowedCombinedLength is the maximum combined length of namespace and + // name portions in the resource name. + // This is computed by subtracting: k8s1 - 4, dashes - 5, resource prefix - 2, + // clusterUID - 8, suffix hash- 8. + // maximumAllowedCombinedLength = maximum name length of gce resource(63) - 27 + maximumAllowedCombinedLength = 36 + // urlMapPrefixV2 is URL map prefix for v2 naming scheme. + urlMapPrefixV2 = "um" + // forwardingRulePrefixV2 is http forwarding rule prefix for v2 naming scheme. + forwardingRulePrefixV2 = "fr" + // httpsForwardingRulePrefixV2 is https forwarding rule prefix for v2 naming scheme. + httpsForwardingRulePrefixV2 = "fs" + // targetHTTPProxyPrefixV2 is target http proxy prefix for v2 naming scheme. + targetHTTPProxyPrefixV2 = "tp" + // targetHTTPSProxyPrefixV2 is target https proxy prefix for v2 naming scheme. + targetHTTPSProxyPrefixV2 = "ts" + // sslCertPrefixV2 is ssl certificate prefix for v2 naming scheme. + sslCertPrefixV2 = "cr" + // clusterUIDLength is length of cluster UID to be included in resource names. + clusterUIDLength = 8 ) // Scheme is ingress frontend name scheme. @@ -79,14 +108,110 @@ func (ln *V1IngressFrontendNamer) LbName() string { return ln.lbName } +// V2IngressFrontendNamer implements IngresFrontendNamer. +type V2IngressFrontendNamer struct { + ing *v1beta1.Ingress + // prefix for all resource names (ex.: "k8s"). + prefix string + // Load balancer name to be included in resource name. + lbName string + // clusterUID is an 8 character hash to be included in resource names. + // This is immutable after the cluster is created. Kube-system uid which is + // immutable is used as cluster UID for v2 naming scheme. + clusterUID string +} + +// newV2IngressFrontendNamer returns a v2 frontend namer for given ingress, cluster uid and prefix. +func newV2IngressFrontendNamer(ing *v1beta1.Ingress, clusterUID string, prefix string) IngressFrontendNamer { + namer := &V2IngressFrontendNamer{ing: ing, prefix: prefix, clusterUID: clusterUID} + namer.setLbName() + return namer +} + +// ForwardingRule returns the name of forwarding rule based on given protocol. +func (vn *V2IngressFrontendNamer) ForwardingRule(protocol NamerProtocol) string { + switch protocol { + case HTTPProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, forwardingRulePrefixV2, vn.lbName) + case HTTPSProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, httpsForwardingRulePrefixV2, vn.lbName) + default: + klog.Fatalf("invalid ForwardingRule protocol: %q", protocol) + return "invalid" + } +} + +// TargetProxy returns the name of target proxy based on given protocol. +func (vn *V2IngressFrontendNamer) TargetProxy(protocol NamerProtocol) string { + switch protocol { + case HTTPProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, targetHTTPProxyPrefixV2, vn.lbName) + case HTTPSProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, targetHTTPSProxyPrefixV2, vn.lbName) + default: + klog.Fatalf("invalid TargetProxy protocol: %q", protocol) + return "invalid" + } +} + +// UrlMap returns the name of URL map. +func (vn *V2IngressFrontendNamer) UrlMap() string { + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, urlMapPrefixV2, vn.lbName) +} + +// SSLCertName returns the name of the certificate. +func (vn *V2IngressFrontendNamer) SSLCertName(secretHash string) string { + return fmt.Sprintf("%s%s-%s-%s-%s-%s", vn.prefix, schemaVersionV2, sslCertPrefixV2, vn.clusterUID, vn.lbNameToHash(), secretHash) +} + +// IsCertNameForLB returns true if the certName belongs to this cluster's ingress. +// It checks that the hashed lbName exists. +func (vn *V2IngressFrontendNamer) IsCertNameForLB(certName string) bool { + prefix := fmt.Sprintf("%s%s-%s-%s-%s", vn.prefix, schemaVersionV2, sslCertPrefixV2, vn.clusterUID, vn.lbNameToHash()) + return strings.HasPrefix(certName, prefix) +} + +// IsLegacySSLCert always return false as v2 naming scheme does not use legacy certs. +func (vn *V2IngressFrontendNamer) IsLegacySSLCert(certName string) bool { + return false +} + +// LbName returns loadbalancer name. +func (vn *V2IngressFrontendNamer) LbName() string { + return vn.lbName +} + +// setLbName sets loadbalancer name. +func (vn *V2IngressFrontendNamer) setLbName() { + truncFields := TrimFieldsEvenly(maximumAllowedCombinedLength, vn.ing.Namespace, vn.ing.Name) + truncNamespace := truncFields[0] + truncName := truncFields[1] + suffix := vn.suffix(vn.clusterUID, vn.ing.Namespace, vn.ing.Name) + vn.lbName = fmt.Sprintf("%s-%s-%s-%s", vn.clusterUID, truncNamespace, truncName, suffix) +} + +// suffix returns hash string of length 8 of a concatenated string generated from +// uid, namespace and name. These fields define an ingress/ load-balancer uniquely. +func (vn *V2IngressFrontendNamer) suffix(uid, namespace, name string) string { + lbString := strings.Join([]string{uid, namespace, name}, ";") + return common.ContentHash(lbString, 8) +} + +// lbNameToHash returns hash string of length 16 of lbName. +func (vn *V2IngressFrontendNamer) lbNameToHash() string { + return common.ContentHash(vn.lbName, 16) +} + // FrontendNamerFactory implements IngressFrontendNamerFactory. type FrontendNamerFactory struct { - namer *Namer + namer *Namer + clusterUID string } -// NewFrontendNamerFactory returns IngressFrontendNamerFactory given a v1 namer. -func NewFrontendNamerFactory(namer *Namer) IngressFrontendNamerFactory { - return &FrontendNamerFactory{namer: namer} +// NewFrontendNamerFactory returns IngressFrontendNamerFactory given a v1 namer and uid. +func NewFrontendNamerFactory(namer *Namer, uid types.UID) IngressFrontendNamerFactory { + clusterUID := common.ContentHash(string(uid), clusterUIDLength) + return &FrontendNamerFactory{namer: namer, clusterUID: clusterUID} } // Namer implements IngressFrontendNamerFactory. @@ -95,8 +220,7 @@ func (rn *FrontendNamerFactory) Namer(ing *v1beta1.Ingress) IngressFrontendNamer case V1NamingScheme: return newV1IngressFrontendNamer(ing, rn.namer) case V2NamingScheme: - // TODO(smatti): return V2 Namer. - return nil + return newV2IngressFrontendNamer(ing, rn.clusterUID, rn.namer.prefix) default: return newV1IngressFrontendNamer(ing, rn.namer) } diff --git a/pkg/utils/namer/frontendnamer_test.go b/pkg/utils/namer/frontendnamer_test.go index b72a1de868..fbcadb2157 100644 --- a/pkg/utils/namer/frontendnamer_test.go +++ b/pkg/utils/namer/frontendnamer_test.go @@ -26,7 +26,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const clusterUID = "uid1" +const ( + clusterUID = "uid1" + kubeSystemUID = "ksuid123" +) func newIngress(namespace, name string) *v1beta1.Ingress { return &v1beta1.Ingress{ @@ -237,3 +240,152 @@ func TestV1IngressFrontendNamer(t *testing.T) { } } } + +// TestV2IngressFrontendNamer tests v2 frontend namer workflow. +func TestV2IngressFrontendNamer(t *testing.T) { + longString := "01234567890123456789012345678901234567890123456789" + testCases := []struct { + desc string + namespace string + name string + // Expected values. + lbName string + targetHTTPProxy string + targetHTTPSProxy string + sslCert string + forwardingRuleHTTP string + forwardingRuleHTTPS string + urlMap string + }{ + { + "simple case", + "namespace", + "name", + "ksuid123-namespace-name-wddys49o", + "%s2-tp-ksuid123-namespace-name-wddys49o", + "%s2-ts-ksuid123-namespace-name-wddys49o", + "%s2-cr-ksuid123-lb75yy4dn9xa8ib0-%s", + "%s2-fr-ksuid123-namespace-name-wddys49o", + "%s2-fs-ksuid123-namespace-name-wddys49o", + "%s2-um-ksuid123-namespace-name-wddys49o", + }, + { + "62 characters", + longString[:23], + longString[:24], + "ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-cr-ksuid123-dlz1pf382qzmdac9-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-um-ksuid123-012345678901234567-012345678901234567-sce8socf", + }, + { + "63 characters", + longString[:24], + longString[:24], + "ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-cr-ksuid123-8ohtur3hfgw0qe3f-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-um-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + }, + { + "64 characters", + longString[:24], + longString[:25], + "ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-cr-ksuid123-1vm2cunkgqy1exss-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-um-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + }, + { + "long namespace", + longString, + "0", + "ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-tp-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-ts-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-cr-ksuid123-wfasfag894xpvmnr-%s", + "%s2-fr-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-fs-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-um-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + }, + { + "long name", + "0", + longString, + "ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-tp-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-ts-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-cr-ksuid123-50ba1vszcf9aeh73-%s", + "%s2-fr-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-fs-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-um-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + }, + { + "long name and namespace", + longString, + longString, + "ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-cr-ksuid123-7trbunxyryt39g1b-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-um-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + }, + } + for _, prefix := range []string{"k8s", "mci"} { + oldNamer := NewNamerWithPrefix(prefix, clusterUID, "") + secretHash := fmt.Sprintf("%x", sha256.Sum256([]byte("test123")))[:16] + for _, tc := range testCases { + tc.desc = fmt.Sprintf("%s namespaceLength %d nameLength %d prefix %s", tc.desc, len(tc.namespace), len(tc.name), prefix) + t.Run(tc.desc, func(t *testing.T) { + ing := newIngress(tc.namespace, tc.name) + namer := newV2IngressFrontendNamer(ing, kubeSystemUID, oldNamer.prefix) + tc.targetHTTPProxy = fmt.Sprintf(tc.targetHTTPProxy, prefix) + tc.targetHTTPSProxy = fmt.Sprintf(tc.targetHTTPSProxy, prefix) + tc.sslCert = fmt.Sprintf(tc.sslCert, prefix, secretHash) + tc.forwardingRuleHTTP = fmt.Sprintf(tc.forwardingRuleHTTP, prefix) + tc.forwardingRuleHTTPS = fmt.Sprintf(tc.forwardingRuleHTTPS, prefix) + tc.urlMap = fmt.Sprintf(tc.urlMap, prefix) + + // Test behavior of v2 Namer. + if diff := cmp.Diff(tc.lbName, namer.LbName()); diff != "" { + t.Errorf("namer.GetLbName() mismatch (-want +got):\n%s", diff) + } + name := namer.TargetProxy(HTTPProtocol) + if diff := cmp.Diff(tc.targetHTTPProxy, name); diff != "" { + t.Errorf("namer.TargetProxy(HTTPProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.TargetProxy(HTTPSProtocol) + if diff := cmp.Diff(tc.targetHTTPSProxy, name); diff != "" { + t.Errorf("namer.TargetProxy(HTTPSProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.SSLCertName(secretHash) + if diff := cmp.Diff(tc.sslCert, name); diff != "" { + t.Errorf("namer.SSLCertName(%q) mismatch (-want +got):\n%s", secretHash, diff) + } + name = namer.ForwardingRule(HTTPProtocol) + if diff := cmp.Diff(tc.forwardingRuleHTTP, name); diff != "" { + t.Errorf("namer.ForwardingRule(HTTPProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.ForwardingRule(HTTPSProtocol) + if diff := cmp.Diff(tc.forwardingRuleHTTPS, name); diff != "" { + t.Errorf("namer.ForwardingRule(HTTPSProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.UrlMap() + if diff := cmp.Diff(tc.urlMap, name); diff != "" { + t.Errorf("namer.UrlMap() mismatch (-want +got):\n%s", diff) + } + }) + } + } +} diff --git a/pkg/utils/namer/utils.go b/pkg/utils/namer/utils.go index 3d286b0518..b818445a45 100644 --- a/pkg/utils/namer/utils.go +++ b/pkg/utils/namer/utils.go @@ -58,12 +58,21 @@ func TrimFieldsEvenly(max int, fields ...string) []string { // frontendNamingScheme returns naming scheme for given ingress. func frontendNamingScheme(ing *v1beta1.Ingress) Scheme { - // TODO(smatti): return V2NamingScheme if ingress has V2 finalizer switch { - case common.HasFinalizer(ing.ObjectMeta, common.FinalizerKey): + case common.HasV2Finalizer(ing.ObjectMeta): + return V2NamingScheme + case common.HasV1Finalizer(ing.ObjectMeta): return V1NamingScheme default: - klog.V(3).Infof("No finalizer found on Ingress %v, using Legacy naming scheme", ing) + klog.V(3).Infof("No finalizer found on Ingress %v, using V1 naming scheme", ing) return V1NamingScheme } } + +// IsV2NamingScheme returns true if given ingress uses v2 naming scheme. +func IsV2NamingScheme(ing *v1beta1.Ingress) bool { + if ing == nil { + return false + } + return frontendNamingScheme(ing) == V2NamingScheme +} diff --git a/pkg/utils/namer/utils_test.go b/pkg/utils/namer/utils_test.go index 088b7cc293..c8efe97361 100644 --- a/pkg/utils/namer/utils_test.go +++ b/pkg/utils/namer/utils_test.go @@ -13,7 +13,13 @@ limitations under the License. package namer -import "testing" +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/ingress-gce/pkg/utils/common" +) func TestTrimFieldsEvenly(t *testing.T) { t.Parallel() @@ -87,3 +93,28 @@ func TestTrimFieldsEvenly(t *testing.T) { } } } + +// TestFrontendNamingScheme asserts that correct naming scheme is returned for given ingress. +func TestFrontendNamingScheme(t *testing.T) { + testCases := []struct { + finalizer string + expectScheme Scheme + }{ + {"", V1NamingScheme}, + {common.FinalizerKey, V1NamingScheme}, + {common.FinalizerKeyV2, V2NamingScheme}, + } + for _, tc := range testCases { + desc := fmt.Sprintf("Finalizer %q", tc.finalizer) + t.Run(desc, func(t *testing.T) { + ing := newIngress("namespace", "name") + if tc.finalizer != "" { + ing.ObjectMeta.Finalizers = []string{tc.finalizer} + } + + if diff := cmp.Diff(tc.expectScheme, frontendNamingScheme(ing)); diff != "" { + t.Fatalf("Got diff for Frontend naming scheme (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 460bce0a01..dcc88b645f 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -383,5 +383,16 @@ func ServiceKeyFunc(namespace, name string) string { // NeedsCleanup returns true if the ingress needs to have its associated resources deleted. func NeedsCleanup(ing *v1beta1.Ingress) bool { - return common.IsDeletionCandidate(ing.ObjectMeta, common.FinalizerKey) || !IsGLBCIngress(ing) + return common.IsDeletionCandidate(ing.ObjectMeta) || !IsGLBCIngress(ing) +} + +// HasVIP returns true if given ingress has a vip. +func HasVIP(ing *v1beta1.Ingress) bool { + if ing == nil { + return false + } + if lbIPs := ing.Status.LoadBalancer.Ingress; len(lbIPs) == 0 || lbIPs[0].IP == "" { + return false + } + return true }