From a39607ff32b32b028fec74f6d75c421fc317031a Mon Sep 17 00:00:00 2001 From: Pavithra Ramesh Date: Sat, 4 Jan 2020 15:42:46 -0800 Subject: [PATCH] unit tests for L4 ILB Most of the tests in l4_test.go are from gce_loadbalancer_internal_test.go --- pkg/backends/syncer_test.go | 10 +- pkg/controller/service_controller_test.go | 200 ++++++++ pkg/loadbalancers/fakes.go | 7 + pkg/loadbalancers/l4_test.go | 597 ++++++++++++++++++++++ pkg/test/utils.go | 97 ++++ pkg/utils/utils_test.go | 26 + 6 files changed, 935 insertions(+), 2 deletions(-) create mode 100644 pkg/controller/service_controller_test.go create mode 100644 pkg/loadbalancers/l4_test.go diff --git a/pkg/backends/syncer_test.go b/pkg/backends/syncer_test.go index 77c9ccacad..f549980917 100644 --- a/pkg/backends/syncer_test.go +++ b/pkg/backends/syncer_test.go @@ -105,7 +105,12 @@ func (p *portset) check(fakeGCE *gce.Cloud) error { return fmt.Errorf("backend for port %+v should exist, but got: %v", sp.NodePort, err) } } else { - if bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)); !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)) + if err == nil || !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + if sp.PrimaryIPNEGEnabled { + // It is expected that these Backends should not get cleaned up in the GC loop. + continue + } return fmt.Errorf("backend for port %+v should not exist, but got %v", sp, bs) } } @@ -333,7 +338,7 @@ func TestGC(t *testing.T) { } } -// Test GC with both ELB and ILBs +// Test GC with both ELB and ILBs. Add in an L4 ILB NEG which should not be deleted as part of GC. func TestGCMixed(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) @@ -345,6 +350,7 @@ func TestGCMixed(t *testing.T) { {NodePort: 84, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, {NodePort: 85, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, {NodePort: 86, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, + {ID: utils.ServicePortID{Service: types.NamespacedName{Name: "testsvc"}}, PrimaryIPNEGEnabled: true, BackendNamer: defaultNamer}, } ps := newPortset(svcNodePorts) if err := ps.add(svcNodePorts); err != nil { diff --git a/pkg/controller/service_controller_test.go b/pkg/controller/service_controller_test.go new file mode 100644 index 0000000000..ed9f5db32d --- /dev/null +++ b/pkg/controller/service_controller_test.go @@ -0,0 +1,200 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "k8s.io/ingress-gce/pkg/loadbalancers" + "k8s.io/ingress-gce/pkg/neg/types" + "testing" + "time" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + api_v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + //"k8s.io/ingress-gce/pkg/loadbalancers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/context" + "k8s.io/ingress-gce/pkg/test" + "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/legacy-cloud-providers/gce" +) + +func newServiceController() *L4Controller { + kubeClient := fake.NewSimpleClientset() + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + (fakeGCE.Compute().(*cloud.MockGCE)).MockForwardingRules.InsertHook = loadbalancers.InsertForwardingRuleHook + + namer := namer.NewNamer(clusterUID, "") + + stopCh := make(chan struct{}) + ctxConfig := context.ControllerContextConfig{ + Namespace: api_v1.NamespaceAll, + ResyncPeriod: 1 * time.Minute, + } + ctx := context.NewControllerContext(nil, kubeClient, nil, nil, fakeGCE, namer, "" /*kubeSystemUID*/, ctxConfig) + return NewL4Controller(ctx, stopCh) +} + +func addILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Create(svc) + l4c.ctx.ServiceInformer.GetIndexer().Add(svc) +} + +func updateILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Update(svc) + l4c.ctx.ServiceInformer.GetIndexer().Update(svc) +} + +func deleteILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Delete(svc.Name, &v1.DeleteOptions{}) + l4c.ctx.ServiceInformer.GetIndexer().Delete(svc) +} + +func addNEG(l4c *L4Controller, svc *api_v1.Service) { + // Also create a fake NEG for this service since the sync code will try to link the backend service to NEG + negName := l4c.ctx.ClusterNamer.PrimaryIPNEG(svc.Namespace, svc.Name) + neg := &composite.NetworkEndpointGroup{Name: negName} + key := meta.ZonalKey(negName, types.TestZone1) + composite.CreateNetworkEndpointGroup(l4c.ctx.Cloud, key, neg) +} + +func getKeyForSvc(svc *api_v1.Service, t *testing.T) string { + key, err := common.KeyFunc(svc) + if err != nil { + t.Fatalf("Failed to get key for service %v, err : %v", svc, err) + } + return key +} + +func validateSvcStatus(svc *api_v1.Service, expectStatus bool, t *testing.T) { + if common.HasGivenFinalizer(svc.ObjectMeta, common.ILBFinalizerV2) != expectStatus { + t.Fatalf("Expected L4 finalizer present to be %v, but it was %v", expectStatus, !expectStatus) + } + if len(svc.Status.LoadBalancer.Ingress) == 0 || svc.Status.LoadBalancer.Ingress[0].IP == "" { + if expectStatus { + t.Fatalf("Invalid LoadBalancer status field in service - %+v", svc.Status.LoadBalancer) + } + } + if len(svc.Status.LoadBalancer.Ingress) > 0 && !expectStatus { + t.Fatalf("Expected LoadBalancer status to be empty, Got %v", svc.Status.LoadBalancer) + } +} + +// TestProcessCreateOrUpdate verifies the processing loop in L4Controller. +// This test adds a new service, then performs a valid update and then modifies the service type to External and ensures +// that the status field is as expected in each case. +func TestProcessCreateOrUpdate(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + addILBService(l4c, newSvc) + addNEG(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + svc, err := l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, true, t) + + // set the TrafficPolicy of the service to Local + newSvc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + svc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, true, t) + + // Remove the Internal LoadBalancer annotation, this should trigger a cleanup. + delete(newSvc.Annotations, gce.ServiceAnnotationLoadBalancerType) + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + svc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, false, t) +} + +func TestProcessDeletion(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + addILBService(l4c, newSvc) + addNEG(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + svc, err := l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, true, t) + + // Mark the service for deletion by updating timestamp. Use svc instead of newSvc since that has the finalizer. + // TODO use patch instead of update here as well. + svc.DeletionTimestamp = &v1.Time{} + updateILBService(l4c, svc) + err = l4c.sync(getKeyForSvc(svc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", svc.Name, err) + } + svc, err = l4c.client.CoreV1().Services(svc.Namespace).Get(svc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, false, t) + deleteILBService(l4c, svc) + svc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(svc.Name, v1.GetOptions{}) + if svc != nil { + t.Errorf("Expected service to be deleted, but was found - %v", svc) + } +} + +func TestProcessCreateLegacyService(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + // Set the legacy finalizer + newSvc.Finalizers = append(newSvc.Finalizers, common.LegacyILBFinalizer) + addILBService(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that the status field is not updated. + svc, err := l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, false, t) +} diff --git a/pkg/loadbalancers/fakes.go b/pkg/loadbalancers/fakes.go index dcab2ee54b..4ca9a41528 100644 --- a/pkg/loadbalancers/fakes.go +++ b/pkg/loadbalancers/fakes.go @@ -43,3 +43,10 @@ func InsertGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, obj *com } return false, nil } + +func InsertForwardingRuleHook(ctx context.Context, key *meta.Key, obj *compute.ForwardingRule, m *cloud.MockForwardingRules) (b bool, e error) { + if obj.IPAddress == "" { + obj.IPAddress = "10.0.0.1" + } + return false, nil +} diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go new file mode 100644 index 0000000000..679a90d432 --- /dev/null +++ b/pkg/loadbalancers/l4_test.go @@ -0,0 +1,597 @@ +package loadbalancers + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "context" + "fmt" + "k8s.io/apimachinery/pkg/types" + "k8s.io/ingress-gce/pkg/backends" + "strings" + "testing" + + "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/utils" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" + servicehelper "k8s.io/cloud-provider/service/helpers" + "k8s.io/ingress-gce/pkg/test" + namer_util "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/legacy-cloud-providers/gce" +) + +const ( + clusterUID = "aaaaa" +) + +// Does not work since update hooks are not set correctly. +func TestEnsureInternalBackendServiceUpdates(t *testing.T) { + t.Parallel() + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + (fakeGCE.Compute().(*cloud.MockGCE)).MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + bsName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + _, err := l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(svc.Spec.SessionAffinity), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + + // Update the Internal Backend Service with a new ServiceAffinity + _, err = l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + key := meta.RegionalKey(bsName, l.cloud.Region()) + bs, err := composite.GetBackendService(l.cloud, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to get backend service %s - err %v", bsName, err) + } + if bs.SessionAffinity != strings.ToUpper(string(v1.ServiceAffinityNone)) { + t.Errorf("Expected session affinity '%s' in %+v, Got '%s'", strings.ToUpper(string(v1.ServiceAffinityNone)), bs, bs.SessionAffinity) + } + // Change the Connection Draining timeout to a different value manually. Also update session Affinity to trigger + // an update in the Ensure method. The timeout value should not be reconciled. + newTimeout := int64(backends.DefaultConnectionDrainingTimeoutSeconds * 2) + bs.ConnectionDraining.DrainingTimeoutSec = newTimeout + bs.SessionAffinity = strings.ToUpper(string(v1.ServiceAffinityClientIP)) + err = composite.UpdateBackendService(l.cloud, key, bs) + if err != nil { + t.Errorf("Failed to update backend service with new connection draining timeout - err %v", err) + } + bs, err = l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + if bs.SessionAffinity != strings.ToUpper(string(v1.ServiceAffinityNone)) { + t.Errorf("Backend service did not get updated.") + } + if bs.ConnectionDraining.DrainingTimeoutSec != newTimeout { + t.Errorf("Connection Draining timeout got reconciled to %d, expected %d", bs.ConnectionDraining.DrainingTimeoutSec, newTimeout) + } +} + +func TestEnsureInternalLoadBalancer(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) +} + +func TestEnsureInternalLoadBalancerTypeChange(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Now add the latest annotation and change scheme to external + svc.Annotations[gce.ServiceAnnotationLoadBalancerType] = "" + // This will be invoked by service_controller + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDeleted(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) + + // Deleting the loadbalancer and resources again should not cause an error. + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + healthCheckNodePort := int32(10101) + svc.Spec.HealthCheckNodePort = healthCheckNodePort + svc.Spec.Type = v1.ServiceTypeLoadBalancer + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal + + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Global) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + hc, err := composite.GetHealthCheck(l.cloud, key, meta.VersionGA) + if err != nil || hc == nil { + t.Errorf("Failed to get healthcheck, err %v", err) + } + if hc.HttpHealthCheck.Port != int64(healthCheckNodePort) { + t.Errorf("Unexpected port in healthcheck, expected %d, Got %d", healthCheckNodePort, hc.HttpHealthCheck.Port) + } +} + +func TestEnsureInternalLoadBalancerEnableGlobalAccess(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Change service to include the global access annotation + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "true" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + betaRuleDescString := fmt.Sprintf(`{"networking.gke.io/service-name":"%s","networking.gke.io/api-version":"beta"}`, types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}.String()) + key, err := composite.CreateKey(l.cloud, lbName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + fwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if !fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected false value for AllowGlobalAccess") + } + if fwdRule.Description != betaRuleDescString { + t.Errorf("Expected description %s, Got %s", betaRuleDescString, fwdRule.Description) + } + // remove the annotation + delete(svc.Annotations, gce.ServiceAnnotationILBAllowGlobalAccess) + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + gaRuleDescString := fmt.Sprintf(`{"networking.gke.io/service-name":"%s","networking.gke.io/api-version":"ga"}`, types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}.String()) + // Fetch the beta version of the rule to make sure GlobalAccess field is off. Calling the GA API will always show + // this field as false. + fwdRule, err = composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + + if fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected true value for AllowGlobalAccess") + } + if fwdRule.Description != gaRuleDescString { + t.Errorf("Expected description %s, Got %s", gaRuleDescString, fwdRule.Description) + } + // Delete the service + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDisableGlobalAccess(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "true" + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty oadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + key, err := composite.CreateKey(l.cloud, lbName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + + fwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if !fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected false value for AllowGlobalAccess") + } + + // disable global access - setting the annotation to false or removing annotation will disable it + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "false" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected 'true' value for AllowGlobalAccess") + } + + // Delete the service + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerCustomSubnet(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := gce.NewFakeGCECloud(vals) + fakeGCE.AlphaFeatureGate = gce.NewAlphaFeatureGate([]string{gce.AlphaFeatureILBCustomSubnet}) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + + frName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + fwdRule, err := composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if fwdRule.Subnetwork != "" { + t.Errorf("Unexpected subnet value %s in ILB ForwardingRule", fwdRule.Subnetwork) + } + + // Change service to include the global access annotation and request static ip + requestedIP := "4.5.6.7" + svc.Annotations[gce.ServiceAnnotationILBSubnet] = "test-subnet" + svc.Spec.LoadBalancerIP = requestedIP + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + if status.Ingress[0].IP != requestedIP { + t.Fatalf("Reserved IP %s not propagated, Got '%s'", requestedIP, status.Ingress[0].IP) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if !strings.HasSuffix(fwdRule.Subnetwork, "test-subnet") { + t.Errorf("Unexpected subnet value '%s' in ILB ForwardingRule, expected 'test-subnet'", fwdRule.Subnetwork) + } + + // Change to a different subnet + svc.Annotations[gce.ServiceAnnotationILBSubnet] = "another-subnet" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + if status.Ingress[0].IP != requestedIP { + t.Errorf("Reserved IP %s not propagated, Got %s", requestedIP, status.Ingress[0].IP) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if !strings.HasSuffix(fwdRule.Subnetwork, "another-subnet") { + t.Errorf("Unexpected subnet value' %s' in ILB ForwardingRule, expected 'another-subnet'", fwdRule.Subnetwork) + } + // remove the annotation - ILB should revert to default subnet. + delete(svc.Annotations, gce.ServiceAnnotationILBSubnet) + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error %v", err) + } + if fwdRule.Subnetwork != "" { + t.Errorf("Unexpected subnet value '%s' in ILB ForwardingRule.", fwdRule.Subnetwork) + } + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error deleting loadbalancer - err %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func assertInternalLbResources(t *testing.T, apiService *v1.Service, l *L4, nodeNames []string) { + // Check that Firewalls are created for the LoadBalancer and the HealthCheck + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(apiService) + resourceName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), resourceName) + fwNames := []string{ + resourceName, + hcFwName, + } + + for _, fwName := range fwNames { + firewall, err := l.cloud.GetFirewall(fwName) + if err != nil { + t.Fatalf("Failed to fetch firewall rule %s - err %v", fwName, err) + } + if !utils.EqualStringSets(nodeNames, firewall.TargetTags) { + t.Fatalf("Expected firewall rule target tags '%v', Got '%v'", nodeNames, firewall.TargetTags) + } + if len(firewall.SourceRanges) == 0 { + t.Fatalf("Unexpected empty source range for firewall rule %v", firewall) + } + } + + // Check that HealthCheck is created + healthcheck, err := composite.GetHealthCheck(l.cloud, meta.GlobalKey(hcName), meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch healthcheck %s - err %v", hcName, err) + } + if healthcheck.Name != hcName { + t.Errorf("Unexpected name for healthcheck '%s' - expected '%s'", healthcheck.Name, hcName) + } + + // Check that BackendService exists + backendServiceName := resourceName + key := meta.RegionalKey(backendServiceName, l.cloud.Region()) + backendServiceLink := cloud.SelfLink(meta.VersionGA, l.cloud.ProjectID(), "backendServices", key) + + bs, err := composite.GetBackendService(l.cloud, key, meta.VersionGA) + if err != nil { + t.Fatalf("Failed to fetch backend service %s - err %v", backendServiceName, err) + + } + if bs.Protocol != "TCP" { + t.Fatalf("Unexpected protocol '%s' for backend service %v", bs.Protocol, bs) + } + if !utils.EqualStringSets(bs.HealthChecks, []string{healthcheck.SelfLink}) { + t.Errorf("Unexpected healthcheck reference '%v' in backend service, expected '%s'", bs.HealthChecks, + healthcheck.SelfLink) + } + // Check that ForwardingRule is created + fwdRule, err := composite.GetForwardingRule(l.cloud, meta.RegionalKey(resourceName, l.cloud.Region()), meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch forwarding rule %s - err %v", resourceName, err) + + } + if fwdRule.Name != resourceName { + t.Errorf("Unexpected name for healthcheck '%s' - expected '%s'", healthcheck.Name, hcName) + } + if fwdRule.IPProtocol != "TCP" { + t.Errorf("Unexpected protocol '%s' for forwarding rule %v", fwdRule.IPProtocol, fwdRule) + } + if fwdRule.BackendService != backendServiceLink { + t.Errorf("Unexpected backend service link '%s' for forwarding rule, expected '%s'", fwdRule.BackendService, backendServiceLink) + } + if fwdRule.Subnetwork != l.cloud.NetworkURL() { + t.Errorf("Unexpected subnetwork '%s' in forwarding rule, expected '%s'", + fwdRule.Subnetwork, l.cloud.NetworkURL()) + } +} + +func assertInternalLbResourcesDeleted(t *testing.T, apiService *v1.Service, firewallsDeleted bool, l *L4) { + lbName := l.cloud.GetLoadBalancerName(context.TODO(), "", apiService) + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(apiService) + resourceName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), resourceName) + + if firewallsDeleted { + // Check that Firewalls are deleted for the LoadBalancer and the HealthCheck + fwNames := []string{ + resourceName, + hcFwName, + } + + for _, fwName := range fwNames { + firewall, err := l.cloud.GetFirewall(fwName) + if err == nil || firewall != nil { + t.Errorf("Expected error when looking up firewall rule after deletion") + } + } + } + + // Check forwarding rule is deleted + fwdRule, err := l.cloud.GetRegionForwardingRule(lbName, l.cloud.Region()) + if err == nil || fwdRule != nil { + t.Errorf("Expected error when looking up forwarding rule after deletion") + } + + // Check that HealthCheck is deleted + healthcheck, err := l.cloud.GetHealthCheck(hcName) + if err == nil || healthcheck != nil { + t.Errorf("Expected error when looking up healthcheck after deletion") + } +} diff --git a/pkg/test/utils.go b/pkg/test/utils.go index 3636a8ac66..27b20cf7f6 100644 --- a/pkg/test/utils.go +++ b/pkg/test/utils.go @@ -1,6 +1,9 @@ package test import ( + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" api_v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -10,12 +13,15 @@ import ( "k8s.io/ingress-gce/pkg/annotations" backendconfig "k8s.io/ingress-gce/pkg/apis/backendconfig/v1beta1" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/legacy-cloud-providers/gce" ) const ( FinalizerAddFlag = flag("enable-finalizer-add") FinalizerRemoveFlag = flag("enable-finalizer-remove") EnableV2FrontendNamerFlag = flag("enable-v2-frontend-namer") + testServiceName = "ilbtest" + testServiceNamespace = "default" ) var ( @@ -57,6 +63,27 @@ func NewService(name types.NamespacedName, spec api_v1.ServiceSpec) *api_v1.Serv } } +func NewL4ILBService(onlyLocal bool, port int) *api_v1.Service { + svc := &api_v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Annotations: map[string]string{gce.ServiceAnnotationLoadBalancerType: string(gce.LBTypeInternal)}, + }, + Spec: api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeLoadBalancer, + SessionAffinity: api_v1.ServiceAffinityClientIP, + Ports: []api_v1.ServicePort{ + {Name: "testport", Port: int32(port), Protocol: "TCP"}, + }, + }, + } + if onlyLocal { + svc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal + } + return svc +} + // NewBackendConfig returns a BackendConfig with the given spec. func NewBackendConfig(name types.NamespacedName, spec backendconfig.BackendConfigSpec) *backendconfig.BackendConfig { return &backendconfig.BackendConfig{ @@ -110,3 +137,73 @@ func (s *FlagSaver) Reset(key flag, flagPointer *bool) { *flagPointer = val } } + +func CreateAndInsertNodes(gce *gce.Cloud, nodeNames []string, zoneName string) ([]*api_v1.Node, error) { + nodes := []*api_v1.Node{} + + for _, name := range nodeNames { + // Inserting the same node name twice causes an error - here we check if + // the instance exists already before insertion. + exists, err := GCEInstanceExists(name, gce) + if err != nil { + return nil, err + } + if !exists { + err := gce.InsertInstance( + gce.ProjectID(), + zoneName, + &compute.Instance{ + Name: name, + Tags: &compute.Tags{ + Items: []string{name}, + }, + }, + ) + if err != nil { + return nodes, err + } + } + + nodes = append( + nodes, + &api_v1.Node{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + api_v1.LabelHostname: name, + api_v1.LabelZoneFailureDomain: zoneName, + }, + }, + Status: api_v1.NodeStatus{ + NodeInfo: api_v1.NodeSystemInfo{ + KubeProxyVersion: "v1.7.2", + }, + }, + }, + ) + + } + return nodes, nil +} + +func GCEInstanceExists(name string, g *gce.Cloud) (bool, error) { + zones, err := g.GetAllCurrentZones() + if err != nil { + return false, err + } + for _, zone := range zones.List() { + ctx, cancel := cloud.ContextWithCallTimeout() + defer cancel() + if _, err := g.Compute().Instances().Get(ctx, meta.ZonalKey(name, zone)); err != nil { + if utils.IsNotFoundError(err) { + return false, nil + } else { + return false, err + } + } else { + // instance has been found + return true, nil + } + } + return false, nil +} diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index 583e4af086..9b29480040 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/flags" @@ -780,5 +782,29 @@ func TestIsLegacyL4ILBService(t *testing.T) { if IsLegacyL4ILBService(svc) { t.Errorf("Expected False for Legacy service %s, got True", svc.Name) } +} +func TestGetPortRanges(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + Desc string + Input []int + Result []string + }{ + {Desc: "All Unique", Input: []int{8, 66, 23, 13, 89}, Result: []string{"8", "13", "23", "66", "89"}}, + {Desc: "All Unique Sorted", Input: []int{1, 7, 9, 16, 26}, Result: []string{"1", "7", "9", "16", "26"}}, + {Desc: "Ranges", Input: []int{56, 78, 67, 79, 21, 80, 12}, Result: []string{"12", "21", "56", "67", "78-80"}}, + {Desc: "Ranges Sorted", Input: []int{5, 7, 90, 1002, 1003, 1004, 1005, 2501}, Result: []string{"5", "7", "90", "1002-1005", "2501"}}, + {Desc: "Ranges Duplicates", Input: []int{15, 37, 900, 2002, 2003, 2003, 2004, 2004}, Result: []string{"15", "37", "900", "2002-2004"}}, + {Desc: "Duplicates", Input: []int{10, 10, 10, 10, 10}, Result: []string{"10"}}, + {Desc: "Only ranges", Input: []int{18, 19, 20, 21, 22, 55, 56, 77, 78, 79, 3504, 3505, 3506}, Result: []string{"18-22", "55-56", "77-79", "3504-3506"}}, + {Desc: "Single Range", Input: []int{6000, 6001, 6002, 6003, 6004, 6005}, Result: []string{"6000-6005"}}, + {Desc: "One value", Input: []int{12}, Result: []string{"12"}}, + {Desc: "Empty", Input: []int{}, Result: nil}, + } { + result := GetPortRanges(tc.Input) + if diff := cmp.Diff(result, tc.Result); diff != "" { + t.Errorf("GetPortRanges(%s) mismatch, (-want +got): \n%s", tc.Desc, diff) + } + } }