diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index e584b2c3c1..7f6e5f0058 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -359,36 +359,14 @@ func (lbc *LoadBalancerController) SyncBackends(state interface{}) error { } ingSvcPorts := syncState.urlMap.AllServicePorts() - // Create instance groups and set named ports. - igs, err := lbc.instancePool.EnsureInstanceGroupsAndPorts(lbc.ctx.ClusterNamer.InstanceGroup(), nodePorts(ingSvcPorts)) - if err != nil { - return err - } - - nodeNames, err := utils.GetReadyNodeNames(listers.NewNodeLister(lbc.nodeLister)) - if err != nil { - return err - } - // Add/remove instances to the instance groups. - if err = lbc.instancePool.Sync(nodeNames); err != nil { - return err - } - - // TODO: Remove this after deprecation - ing := syncState.ing - if utils.IsGCEMultiClusterIngress(syncState.ing) { - // Add instance group names as annotation on the ingress and return. - if ing.Annotations == nil { - ing.Annotations = map[string]string{} - } - if err = setInstanceGroupsAnnotation(ing.Annotations, igs); err != nil { - return err - } - if err = updateAnnotations(lbc.ctx.KubeClient, ing.Name, ing.Namespace, ing.Annotations); err != nil { + // Only sync instance group when IG is used for this ingress + if len(nodePorts(ingSvcPorts)) > 0 { + if err := lbc.syncInstanceGroup(syncState.ing, ingSvcPorts); err != nil { + klog.Errorf("Failed to sync instance group for ingress %v: %v", syncState.ing, err) return err } - // This short-circuit will stop the syncer from moving to next step. - return ingsync.ErrSkipBackendsSync + } else { + klog.V(2).Infof("Skip syncing instance groups for ingress %v/%v", syncState.ing.Namespace, syncState.ing.Name) } // Sync the backends @@ -398,6 +376,10 @@ func (lbc *LoadBalancerController) SyncBackends(state interface{}) error { // Get the zones our groups live in. zones, err := lbc.Translator.ListZones() + if err != nil { + klog.Errorf("lbc.Translator.ListZones() = %v", err) + return err + } var groupKeys []backends.GroupKey for _, zone := range zones { groupKeys = append(groupKeys, backends.GroupKey{Zone: zone}) @@ -421,6 +403,43 @@ func (lbc *LoadBalancerController) SyncBackends(state interface{}) error { return nil } +// syncInstanceGroup creates instance groups, syncs instances, sets named ports and updates instance group annotation +func (lbc *LoadBalancerController) syncInstanceGroup(ing *v1beta1.Ingress, ingSvcPorts []utils.ServicePort) error { + nodePorts := nodePorts(ingSvcPorts) + klog.V(2).Infof("Syncing Instance Group for ingress %v/%v with nodeports %v", ing.Namespace, ing.Name, nodePorts) + igs, err := lbc.instancePool.EnsureInstanceGroupsAndPorts(lbc.ctx.ClusterNamer.InstanceGroup(), nodePorts) + if err != nil { + return err + } + + nodeNames, err := utils.GetReadyNodeNames(listers.NewNodeLister(lbc.nodeLister)) + if err != nil { + return err + } + // Add/remove instances to the instance groups. + if err = lbc.instancePool.Sync(nodeNames); err != nil { + return err + } + + // TODO: Remove this after deprecation + if utils.IsGCEMultiClusterIngress(ing) { + klog.Warningf("kubemci is used for ingress %v", ing) + // Add instance group names as annotation on the ingress and return. + if ing.Annotations == nil { + ing.Annotations = map[string]string{} + } + if err = setInstanceGroupsAnnotation(ing.Annotations, igs); err != nil { + return err + } + if err = updateAnnotations(lbc.ctx.KubeClient, ing.Name, ing.Namespace, ing.Annotations); err != nil { + return err + } + // This short-circuit will stop the syncer from moving to next step. + return ingsync.ErrSkipBackendsSync + } + return nil +} + // GCBackends implements Controller. func (lbc *LoadBalancerController) GCBackends(toKeep []*v1beta1.Ingress) error { // Only GCE ingress associated resources are managed by this controller. diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 816fb1a12b..caf9d07c83 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -19,6 +19,7 @@ package controller import ( context2 "context" "fmt" + "net/http" "reflect" "sort" "strings" @@ -54,6 +55,7 @@ import ( var ( nodePortCounter = 30000 clusterUID = "aaaaa" + fakeZone = "zone-a" ) // newLoadBalancerController create a loadbalancer controller. @@ -77,7 +79,7 @@ func newLoadBalancerController() *LoadBalancerController { // TODO(rramkumar): Fix this so we don't have to override with our fake lbc.instancePool = instances.NewNodePool(instances.NewFakeInstanceGroups(sets.NewString(), namer), namer) lbc.l7Pool = loadbalancers.NewLoadBalancerPool(fakeGCE, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer, "")) - lbc.instancePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}}) + lbc.instancePool.Init(&instances.FakeZoneLister{Zones: []string{fakeZone}}) lbc.hasSynced = func() bool { return true } @@ -176,6 +178,38 @@ func TestIngressSyncError(t *testing.T) { } } +// TestNEGOnlyIngress asserts that `sync` will not create IG when there is only NEG backends for the ingress +func TestNEGOnlyIngress(t *testing.T) { + lbc := newLoadBalancerController() + + svc := test.NewService(types.NamespacedName{Name: "my-service", Namespace: "default"}, api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeNodePort, + Ports: []api_v1.ServicePort{{Port: 80}}, + }) + negAnnotation := annotations.NegAnnotation{Ingress: true} + svc.Annotations = map[string]string{ + annotations.NEGAnnotationKey: negAnnotation.String(), + } + addService(lbc, svc) + someBackend := backend("my-service", intstr.FromInt(80)) + ing := test.NewIngress(types.NamespacedName{Name: "my-ingress", Namespace: "default"}, + v1beta1.IngressSpec{ + Backend: &someBackend, + }) + addIngress(lbc, ing) + + ingStoreKey := getKey(ing, t) + err := lbc.sync(ingStoreKey) + if err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + + ig, err := lbc.instancePool.Get(lbc.ctx.ClusterNamer.InstanceGroup(), fakeZone) + if err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + t.Errorf("lbc.ctx.Cloud.ListInstanceGroups(%v) = %v, %v, want [], http.StatusNotFound", fakeZone, ig, err) + } +} + // TestIngressCreateDeleteFinalizer asserts that `sync` will will not return an // error for a good ingress config. It also tests garbage collection for // Ingresses that need to be deleted, and keep the ones that don't, depending