diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index c4f0b46b7f..bdf8c323a4 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -90,7 +90,7 @@ func main() { cloud := app.NewGCEClient() defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient) - clusterManager, err := controller.NewClusterManager(cloud, namer, *defaultBackendServicePort, flags.F.HealthCheckPath) + clusterManager, err := controller.NewClusterManager(cloud, namer, flags.F.HealthCheckPath) if err != nil { glog.Fatalf("Error creating cluster manager: %v", err) } @@ -98,7 +98,7 @@ func main() { enableNEG := cloud.AlphaFeatureGate.Enabled(gce.AlphaFeatureNetworkEndpointGroup) stopCh := make(chan struct{}) ctx := context.NewControllerContext(kubeClient, flags.F.WatchNamespace, flags.F.ResyncPeriod, enableNEG) - lbc, err := controller.NewLoadBalancerController(kubeClient, stopCh, ctx, clusterManager, enableNEG) + lbc, err := controller.NewLoadBalancerController(kubeClient, stopCh, ctx, clusterManager, enableNEG, *defaultBackendServicePort) if err != nil { glog.Fatalf("Error creating load balancer controller: %v", err) } diff --git a/pkg/backends/backends.go b/pkg/backends/backends.go index a744861c5d..477e8da028 100644 --- a/pkg/backends/backends.go +++ b/pkg/backends/backends.go @@ -80,11 +80,7 @@ type Backends struct { healthChecker healthchecks.HealthChecker snapshotter storage.Snapshotter prober ProbeProvider - // ignoredPorts are a set of ports excluded from GC, even - // after the Ingress has been deleted. Note that invoking - // a Delete() on these ports will still delete the backend. - ignoredPorts sets.String - namer *utils.Namer + namer *utils.Namer } // BackendService embeds both the GA and alpha compute BackendService types @@ -195,20 +191,14 @@ func NewBackendPool( healthChecker healthchecks.HealthChecker, nodePool instances.NodePool, namer *utils.Namer, - ignorePorts []int64, resyncWithCloud bool) *Backends { - ignored := []string{} - for _, p := range ignorePorts { - ignored = append(ignored, portKey(p)) - } backendPool := &Backends{ cloud: cloud, negGetter: negGetter, nodePool: nodePool, healthChecker: healthChecker, namer: namer, - ignoredPorts: sets.NewString(ignored...), } if !resyncWithCloud { backendPool.snapshotter = storage.NewInMemoryPool() @@ -319,20 +309,6 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp utils. // Uses the given instance groups if non-nil, else creates instance groups. func (b *Backends) Ensure(svcPorts []utils.ServicePort, igs []*compute.InstanceGroup) error { glog.V(3).Infof("Sync: backends %v", svcPorts) - // Ideally callers should pass the instance groups to prevent recomputing them here. - // Igs can be nil in scenarios where we do not have instance groups such as - // while syncing default backend service. - if igs == nil { - ports := []int64{} - for _, p := range svcPorts { - ports = append(ports, p.NodePort) - } - var err error - igs, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, ports) - if err != nil { - return err - } - } // create backends for new ports, perform an edge hop for existing ports for _, port := range svcPorts { if err := b.ensureBackendService(port, igs); err != nil { @@ -608,7 +584,7 @@ func (b *Backends) GC(svcNodePorts []utils.ServicePort) error { return err } nodePort := int64(p) - if knownPorts.Has(portKey(nodePort)) || b.ignoredPorts.Has(portKey(nodePort)) { + if knownPorts.Has(portKey(nodePort)) { continue } glog.V(3).Infof("GCing backend for port %v", p) diff --git a/pkg/backends/backends_test.go b/pkg/backends/backends_test.go index a80cb501af..96b42763ee 100644 --- a/pkg/backends/backends_test.go +++ b/pkg/backends/backends_test.go @@ -64,7 +64,7 @@ func newTestJig(f BackendServices, fakeIGs instances.InstanceGroups, syncWithClo nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) healthCheckProvider := healthchecks.NewFakeHealthCheckProvider() healthChecks := healthchecks.NewHealthChecker(healthCheckProvider, "/", defaultNamer) - bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, []int64{}, syncWithCloud) + bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, syncWithCloud) probes := map[utils.ServicePort]*api_v1.Probe{{NodePort: 443, Protocol: annotations.ProtocolHTTPS}: existingProbe} bp.Init(NewFakeProbeProvider(probes)) @@ -85,12 +85,16 @@ func TestBackendPoolAdd(t *testing.T) { for _, sp := range testCases { // For simplicity, these tests use 80/443 as nodeports t.Run(fmt.Sprintf("Port:%v Protocol:%v", sp.NodePort, sp.Protocol), func(t *testing.T) { + igs, err := pool.nodePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) + if err != nil { + t.Fatalf("Did not expect error when ensuring IG for ServicePort %+v: %v", sp, err) + } // Add a backend for a port, then re-add the same port and // make sure it corrects a broken link from the backend to // the instance group. - err := pool.Ensure([]utils.ServicePort{sp}, nil) + err = pool.Ensure([]utils.ServicePort{sp}, igs) if err != nil { - t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", sp, err) + t.Fatalf("Did not expect error when ensuring a ServicePort %+v: %v", sp, err) } beName := defaultNamer.Backend(sp.NodePort) @@ -105,6 +109,9 @@ func TestBackendPoolAdd(t *testing.T) { // Check that the instance group has the new port. ig, err := fakeIGs.GetInstanceGroup(defaultNamer.InstanceGroup(), defaultZone) + if err != nil { + t.Fatalf("Did not expect error when getting IG's: %v", err) + } var found bool for _, port := range ig.NamedPorts { if port.Port == sp.NodePort { @@ -306,7 +313,8 @@ func TestBackendPoolChaosMonkey(t *testing.T) { pool, _ := newTestJig(f, fakeIGs, false) sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} - pool.Ensure([]utils.ServicePort{sp}, nil) + igs, _ := pool.nodePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) + pool.Ensure([]utils.ServicePort{sp}, igs) beName := defaultNamer.Backend(sp.NodePort) be, _ := f.GetGlobalBackendService(beName) @@ -319,7 +327,8 @@ func TestBackendPoolChaosMonkey(t *testing.T) { f.calls = []int{} f.UpdateGlobalBackendService(be) - pool.Ensure([]utils.ServicePort{sp}, nil) + igs, _ = pool.nodePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) + pool.Ensure([]utils.ServicePort{sp}, igs) for _, call := range f.calls { if call == utils.Create { t.Fatalf("Unexpected create for existing backend service") @@ -426,7 +435,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) { nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) hcp := healthchecks.NewFakeHealthCheckProvider() healthChecks := healthchecks.NewHealthChecker(hcp, "/", defaultNamer) - bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, []int64{}, false) + bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, false) probes := map[utils.ServicePort]*api_v1.Probe{} bp.Init(NewFakeProbeProvider(probes)) @@ -490,8 +499,9 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - // This will add the instance group k8s-ig to the instance pool - pool.Ensure([]utils.ServicePort{{NodePort: 80}}, nil) + sp := utils.ServicePort{NodePort: 80} + igs, _ := pool.nodePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) + pool.Ensure([]utils.ServicePort{sp}, igs) be, err := f.GetGlobalBackendService(defaultNamer.Backend(80)) if err != nil { @@ -509,7 +519,8 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { } // Make sure repeated adds don't clobber the inserted instance group - pool.Ensure([]utils.ServicePort{{NodePort: 80}}, nil) + igs, _ = pool.nodePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) + pool.Ensure([]utils.ServicePort{sp}, igs) be, err = f.GetGlobalBackendService(defaultNamer.Backend(80)) if err != nil { t.Fatalf("%v", err) @@ -549,7 +560,8 @@ func TestBackendCreateBalancingMode(t *testing.T) { return nil } - pool.Ensure([]utils.ServicePort{sp}, nil) + igs, _ := pool.nodePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) + pool.Ensure([]utils.ServicePort{sp}, igs) be, err := f.GetGlobalBackendService(defaultNamer.Backend(sp.NodePort)) if err != nil { t.Fatalf("%v", err) @@ -604,7 +616,7 @@ func TestLinkBackendServiceToNEG(t *testing.T) { nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) hcp := healthchecks.NewFakeHealthCheckProvider() healthChecks := healthchecks.NewHealthChecker(hcp, "/", defaultNamer) - bp := NewBackendPool(f, fakeNEG, healthChecks, nodePool, defaultNamer, []int64{}, false) + bp := NewBackendPool(f, fakeNEG, healthChecks, nodePool, defaultNamer, false) svcPort := utils.ServicePort{ NodePort: 30001, diff --git a/pkg/controller/cluster_manager.go b/pkg/controller/cluster_manager.go index 9494069ded..8ff59eef53 100644 --- a/pkg/controller/cluster_manager.go +++ b/pkg/controller/cluster_manager.go @@ -40,12 +40,11 @@ const ( // ClusterManager manages cluster resource pools. type ClusterManager struct { - ClusterNamer *utils.Namer - defaultBackendNodePort utils.ServicePort - instancePool instances.NodePool - backendPool backends.BackendPool - l7Pool loadbalancers.LoadBalancerPool - firewallPool firewalls.SingleFirewallPool + ClusterNamer *utils.Namer + instancePool instances.NodePool + backendPool backends.BackendPool + l7Pool loadbalancers.LoadBalancerPool + firewallPool firewalls.SingleFirewallPool // TODO: Refactor so we simply init a health check pool. // Currently health checks are tied to backends because each backend needs @@ -110,11 +109,6 @@ func (c *ClusterManager) EnsureLoadBalancer(lb *loadbalancers.L7RuntimeInfo, lbS } func (c *ClusterManager) EnsureInstanceGroupsAndPorts(nodeNames []string, servicePorts []utils.ServicePort) ([]*compute.InstanceGroup, error) { - if len(servicePorts) != 0 { - // Add the default backend node port to the list of named ports for instance groups. - servicePorts = append(servicePorts, c.defaultBackendNodePort) - } - // Convert to slice of NodePort int64s. ports := []int64{} for _, p := range uniq(servicePorts) { @@ -187,7 +181,6 @@ func (c *ClusterManager) GC(lbNames []string, nodePorts []utils.ServicePort) err func NewClusterManager( cloud *gce.GCECloud, namer *utils.Namer, - defaultBackendNodePort utils.ServicePort, defaultHealthCheckPath string) (*ClusterManager, error) { // Names are fundamental to the cluster, the uid allocator makes sure names don't collide. @@ -204,12 +197,10 @@ func NewClusterManager( cluster.healthCheckers = []healthchecks.HealthChecker{healthChecker, defaultBackendHealthChecker} // TODO: This needs to change to a consolidated management of the default backend. - cluster.backendPool = backends.NewBackendPool(cloud, cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{defaultBackendNodePort.NodePort}, true) - defaultBackendPool := backends.NewBackendPool(cloud, cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{}, false) - cluster.defaultBackendNodePort = defaultBackendNodePort + cluster.backendPool = backends.NewBackendPool(cloud, cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer, true) // L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs. - cluster.l7Pool = loadbalancers.NewLoadBalancerPool(cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer) + cluster.l7Pool = loadbalancers.NewLoadBalancerPool(cloud, cluster.ClusterNamer) cluster.firewallPool = firewalls.NewFirewallPool(cloud, cluster.ClusterNamer, gce.LoadBalancerSrcRanges(), flags.F.NodePortRanges.Values()) return &cluster, nil } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 97e3a5801e..3fff25896a 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -84,6 +84,8 @@ type LoadBalancerController struct { hasSynced func() bool // negEnabled indicates whether NEG feature is enabled. negEnabled bool + // defaultBackendSvcPort is the ServicePort for the system default backend. + defaultBackendSvcPort utils.ServicePort } // NewLoadBalancerController creates a controller for gce loadbalancers. @@ -91,22 +93,23 @@ type LoadBalancerController struct { // - clusterManager: A ClusterManager capable of creating all cloud resources // required for L7 loadbalancing. // - resyncPeriod: Watchers relist from the Kubernetes API server this often. -func NewLoadBalancerController(kubeClient kubernetes.Interface, stopCh chan struct{}, ctx *context.ControllerContext, clusterManager *ClusterManager, negEnabled bool) (*LoadBalancerController, error) { +func NewLoadBalancerController(kubeClient kubernetes.Interface, stopCh chan struct{}, ctx *context.ControllerContext, clusterManager *ClusterManager, negEnabled bool, defaultBackendSvcPort utils.ServicePort) (*LoadBalancerController, error) { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(glog.Infof) broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ Interface: kubeClient.Core().Events(""), }) lbc := LoadBalancerController{ - client: kubeClient, - ctx: ctx, - ingLister: StoreToIngressLister{Store: ctx.IngressInformer.GetStore()}, - nodeLister: ctx.NodeInformer.GetIndexer(), - nodes: NewNodeController(ctx, clusterManager), - CloudClusterManager: clusterManager, - stopCh: stopCh, - hasSynced: ctx.HasSynced, - negEnabled: negEnabled, + client: kubeClient, + ctx: ctx, + ingLister: StoreToIngressLister{Store: ctx.IngressInformer.GetStore()}, + nodeLister: ctx.NodeInformer.GetIndexer(), + nodes: NewNodeController(ctx, clusterManager), + CloudClusterManager: clusterManager, + stopCh: stopCh, + hasSynced: ctx.HasSynced, + negEnabled: negEnabled, + defaultBackendSvcPort: defaultBackendSvcPort, } lbc.ingQueue = utils.NewPeriodicTaskQueue("ingresses", lbc.sync) @@ -284,7 +287,7 @@ func (lbc *LoadBalancerController) sync(key string) (retErr error) { } func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ingress, nodeNames []string, gceSvcPorts []utils.ServicePort) error { - urlMap := lbc.Translator.TranslateIngress(ing) + urlMap := lbc.Translator.TranslateIngress(ing, lbc.defaultBackendSvcPort) ingSvcPorts := urlMap.AllServicePorts() igs, err := lbc.CloudClusterManager.EnsureInstanceGroupsAndPorts(nodeNames, ingSvcPorts) if err != nil { @@ -310,6 +313,7 @@ func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ing if err != nil { return err } + lb.UrlMap = urlMap // Create the backend services and higher-level LB resources. if err = lbc.CloudClusterManager.EnsureLoadBalancer(lb, ingSvcPorts, igs); err != nil { @@ -348,7 +352,7 @@ func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ing return fmt.Errorf("unable to get loadbalancer: %v", err) } - if err := l7.UpdateUrlMap(urlMap); err != nil { + if err := l7.UpdateUrlMap(); err != nil { return fmt.Errorf("update URL Map error: %v", err) } @@ -445,7 +449,7 @@ func updateAnnotations(client kubernetes.Interface, name, namespace string, anno func (lbc *LoadBalancerController) ToSvcPorts(ings *extensions.IngressList) []utils.ServicePort { var knownPorts []utils.ServicePort for _, ing := range ings.Items { - urlMap := lbc.Translator.TranslateIngress(&ing) + urlMap := lbc.Translator.TranslateIngress(&ing, lbc.defaultBackendSvcPort) knownPorts = append(knownPorts, urlMap.AllServicePorts()...) } return knownPorts diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index c4275277f2..9350ce2d4e 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -55,7 +55,7 @@ func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalanc kubeClient := fake.NewSimpleClientset() stopCh := make(chan struct{}) ctx := context.NewControllerContext(kubeClient, api_v1.NamespaceAll, 1*time.Second, true) - lb, err := NewLoadBalancerController(kubeClient, stopCh, ctx, cm.ClusterManager, true) + lb, err := NewLoadBalancerController(kubeClient, stopCh, ctx, cm.ClusterManager, true, testDefaultBeNodePort) if err != nil { t.Fatalf("%v", err) } @@ -155,6 +155,7 @@ func gceURLMapFromPrimitive(primitiveMap utils.PrimitivePathMap, pm *nodePortMan } urlMap.PutPathRulesForHost(hostname, pathRules) } + urlMap.DefaultBackend = testDefaultBeNodePort return urlMap } @@ -326,14 +327,14 @@ func TestLbFaultyUpdate(t *testing.T) { t.Fatalf("cm.fakeLbs.CheckURLMap(...) = %v, want nil", err) } - // Change the urlmap directly through the lb pool, resync, and + // Change the urlmap directly, resync, and // make sure the controller corrects it. - forcedUpdate := gceURLMapFromPrimitive(utils.PrimitivePathMap{ + l7.RuntimeInfo().UrlMap = gceURLMapFromPrimitive(utils.PrimitivePathMap{ "foo.example.com": { "/foo1": "foo2svc", }, }, pm) - l7.UpdateUrlMap(forcedUpdate) + l7.UpdateUrlMap() if err := lbc.sync(ingStoreKey); err != nil { t.Fatalf("lbc.sync() = err %v", err) diff --git a/pkg/controller/fakes.go b/pkg/controller/fakes.go index 69ee569e31..ebc5ca132c 100644 --- a/pkg/controller/fakes.go +++ b/pkg/controller/fakes.go @@ -64,14 +64,8 @@ func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager backendPool := backends.NewBackendPool( fakeBackends, fakeNEG, - healthChecker, nodePool, namer, []int64{}, false) - l7Pool := loadbalancers.NewLoadBalancerPool( - fakeLbs, - // TODO: change this - backendPool, - testDefaultBeNodePort, - namer, - ) + healthChecker, nodePool, namer, false) + l7Pool := loadbalancers.NewLoadBalancerPool(fakeLbs, namer) frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(false, false), namer, testSrcRanges, testNodePortRanges) cm := &ClusterManager{ ClusterNamer: namer, diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index 4c58079c9c..623e29693c 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -137,7 +137,7 @@ PortLoop: } // TranslateIngress converts an Ingress into our internal UrlMap representation. -func (t *GCE) TranslateIngress(ing *extensions.Ingress) *utils.GCEURLMap { +func (t *GCE) TranslateIngress(ing *extensions.Ingress, glbcDefaultBackend utils.ServicePort) *utils.GCEURLMap { urlMap := utils.NewGCEURLMap() for _, rule := range ing.Spec.Rules { if rule.HTTP == nil { @@ -166,16 +166,21 @@ func (t *GCE) TranslateIngress(ing *extensions.Ingress) *utils.GCEURLMap { } urlMap.PutPathRulesForHost(host, pathRules) } + // Note that the url map is always populated with some default backend, + // whether it be the one specified in the Ingress, or the system default. if ing.Spec.Backend != nil { svcPort, err := t.getServiceNodePort(*ing.Spec.Backend, ing.Namespace) if err != nil { msg := fmt.Sprintf("%v", err) msg = fmt.Sprintf("failed to identify user specified default backend, %v, using system default", msg) t.recorders.Recorder(ing.Namespace).Eventf(ing, api_v1.EventTypeWarning, "Service", msg) + urlMap.DefaultBackend = glbcDefaultBackend glog.Infof("%v", err) } else { urlMap.DefaultBackend = svcPort } + } else { + urlMap.DefaultBackend = glbcDefaultBackend } return urlMap } diff --git a/pkg/loadbalancers/l7.go b/pkg/loadbalancers/l7.go index 9c2ff335c4..bcaa929263 100644 --- a/pkg/loadbalancers/l7.go +++ b/pkg/loadbalancers/l7.go @@ -84,6 +84,8 @@ type L7RuntimeInfo struct { // The name of a Global Static IP. If specified, the IP associated with // this name is used in the Forwarding Rules for this loadbalancer. StaticIPName string + // UrlMap is our internal representation of a url map. + UrlMap *utils.GCEURLMap } // String returns the load balancer name @@ -117,22 +119,25 @@ type L7 struct { // to create - update - delete and storing the old certs in a list // prevents leakage if there's a failure along the way. oldSSLCerts []*compute.SslCertificate - // glbcDefaultBacked is the backend to use if no path rules match. - // TODO: Expose this to users. - glbcDefaultBackend *compute.BackendService // namer is used to compute names of the various sub-components of an L7. namer *utils.Namer } +// RuntimeInfo returns the L7RuntimeInfo associated with the L7 load balancer. +func (l *L7) RuntimeInfo() *L7RuntimeInfo { + return l.runtimeInfo +} + // UrlMap returns the UrlMap associated with the L7 load balancer. func (l *L7) UrlMap() *compute.UrlMap { return l.um } -func (l *L7) checkUrlMap(backend *compute.BackendService) (err error) { - if l.glbcDefaultBackend == nil { - return fmt.Errorf("cannot create urlmap without default backend") +func (l *L7) checkUrlMap() (err error) { + if l.runtimeInfo.UrlMap == nil { + return fmt.Errorf("cannot create urlmap without internal representation") } + defaultBackendName := l.namer.Backend(l.runtimeInfo.UrlMap.DefaultBackend.NodePort) urlMapName := l.namer.UrlMap(l.Name) urlMap, _ := l.cloud.GetUrlMap(urlMapName) if urlMap != nil { @@ -141,10 +146,10 @@ func (l *L7) checkUrlMap(backend *compute.BackendService) (err error) { return nil } - glog.V(3).Infof("Creating url map %v for backend %v", urlMapName, l.glbcDefaultBackend.Name) + glog.V(3).Infof("Creating url map %v for backend %v", urlMapName, defaultBackendName) newUrlMap := &compute.UrlMap{ Name: urlMapName, - DefaultService: l.glbcDefaultBackend.SelfLink, + DefaultService: utils.BackendServiceRelativeResourcePath(defaultBackendName), } if err = l.cloud.CreateUrlMap(newUrlMap); err != nil { return err @@ -597,7 +602,7 @@ func (l *L7) checkStaticIP() (err error) { } func (l *L7) edgeHop() error { - if err := l.checkUrlMap(l.glbcDefaultBackend); err != nil { + if err := l.checkUrlMap(); err != nil { return err } if l.runtimeInfo.AllowHTTP { @@ -703,22 +708,19 @@ func getNameForPathMatcher(hostRule string) string { // and remove the mapping. When a new path is added to a host (happens // more frequently than service deletion) we just need to lookup the 1 // pathmatcher of the host. -func (l *L7) UpdateUrlMap(ingressRules *utils.GCEURLMap) error { +func (l *L7) UpdateUrlMap() error { if l.um == nil { - return fmt.Errorf("cannot add url without an urlmap") + return fmt.Errorf("cannot update GCE urlmap without an existing GCE urlmap.") } - - // All UrlMaps must have a default backend. If the Ingress has a default - // backend, it applies to all host rules as well as to the urlmap itself. - // If it doesn't the urlmap might have a stale default, so replace it with - // glbc's default backend. - defaultBackendName := l.namer.Backend(ingressRules.DefaultBackend.NodePort) - if defaultBackendName != "" { - l.um.DefaultService = utils.BackendServiceRelativeResourcePath(defaultBackendName) - } else { - l.um.DefaultService = l.glbcDefaultBackend.SelfLink + if l.runtimeInfo.UrlMap == nil { + return fmt.Errorf("cannot update GCE urlmap without internal representation") } + urlMap := l.runtimeInfo.UrlMap + + defaultBackendName := l.namer.Backend(urlMap.DefaultBackend.NodePort) + l.um.DefaultService = utils.BackendServiceRelativeResourcePath(defaultBackendName) + // Every update replaces the entire urlmap. // TODO: when we have multiple loadbalancers point to a single gce url map // this needs modification. For now, there is a 1:1 mapping of urlmaps to @@ -727,7 +729,7 @@ func (l *L7) UpdateUrlMap(ingressRules *utils.GCEURLMap) error { l.um.HostRules = []*compute.HostRule{} l.um.PathMatchers = []*compute.PathMatcher{} - for hostname, rules := range ingressRules.AllRules() { + for hostname, rules := range urlMap.AllRules() { // Create a host rule // Create a path matcher // Add all given endpoint:backends to pathRules in path matcher diff --git a/pkg/loadbalancers/l7s.go b/pkg/loadbalancers/l7s.go index c53540cd73..4ec2bab044 100644 --- a/pkg/loadbalancers/l7s.go +++ b/pkg/loadbalancers/l7s.go @@ -22,10 +22,8 @@ import ( "github.com/golang/glog" - compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/storage" "k8s.io/ingress-gce/pkg/utils" ) @@ -34,17 +32,7 @@ import ( type L7s struct { cloud LoadBalancers snapshotter storage.Snapshotter - // TODO: Remove this field and always ask the BackendPool using the NodePort. - glbcDefaultBackend *compute.BackendService - defaultBackendPool backends.BackendPool - defaultBackendNodePort utils.ServicePort - namer *utils.Namer -} - -// GLBCDefaultBackend returns the BackendService used when no path -// rules match. -func (l *L7s) GLBCDefaultBackend() *compute.BackendService { - return l.glbcDefaultBackend + namer *utils.Namer } // Namer returns the namer associated with the L7s. @@ -55,27 +43,16 @@ func (l *L7s) Namer() *utils.Namer { // NewLoadBalancerPool returns a new loadbalancer pool. // - cloud: implements LoadBalancers. Used to sync L7 loadbalancer resources // with the cloud. -// - defaultBackendPool: a BackendPool used to manage the GCE BackendService for -// the default backend. -// - defaultBackendNodePort: The nodePort of the Kubernetes service representing -// the default backend. -func NewLoadBalancerPool( - cloud LoadBalancers, - defaultBackendPool backends.BackendPool, - defaultBackendNodePort utils.ServicePort, namer *utils.Namer) LoadBalancerPool { - return &L7s{cloud, storage.NewInMemoryPool(), nil, defaultBackendPool, defaultBackendNodePort, namer} +func NewLoadBalancerPool(cloud LoadBalancers, namer *utils.Namer) LoadBalancerPool { + return &L7s{cloud, storage.NewInMemoryPool(), namer} } func (l *L7s) create(ri *L7RuntimeInfo) (*L7, error) { - if l.glbcDefaultBackend == nil { - glog.Warningf("Creating l7 without a default backend") - } return &L7{ - runtimeInfo: ri, - Name: l.namer.LoadBalancer(ri.Name), - cloud: l.cloud, - glbcDefaultBackend: l.glbcDefaultBackend, - namer: l.namer, + runtimeInfo: ri, + Name: l.namer.LoadBalancer(ri.Name), + cloud: l.cloud, + namer: l.namer, }, nil } @@ -142,19 +119,6 @@ func (l *L7s) Delete(name string) error { func (l *L7s) Sync(lbs []*L7RuntimeInfo) error { glog.V(3).Infof("Syncing loadbalancers %v", lbs) - if len(lbs) != 0 { - // Lazily create a default backend so we don't tax users who don't care - // about Ingress by consuming 1 of their 3 GCE BackendServices. This - // BackendService is GC'd when there are no more Ingresses. - if err := l.defaultBackendPool.Ensure([]utils.ServicePort{l.defaultBackendNodePort}, nil); err != nil { - return err - } - defaultBackend, err := l.defaultBackendPool.Get(l.defaultBackendNodePort.NodePort, false) - if err != nil { - return err - } - l.glbcDefaultBackend = defaultBackend.Ga - } // create new loadbalancers, validate existing for _, ri := range lbs { if err := l.Add(ri); err != nil { @@ -184,15 +148,6 @@ func (l *L7s) GC(names []string) error { return err } } - // Tear down the default backend when there are no more loadbalancers. - // This needs to happen after we've deleted all url-maps that might be - // using it. - if len(names) == 0 { - if err := l.defaultBackendPool.Delete(l.defaultBackendNodePort.NodePort); err != nil { - return err - } - l.glbcDefaultBackend = nil - } return nil } @@ -201,9 +156,6 @@ func (l *L7s) Shutdown() error { if err := l.GC([]string{}); err != nil { return err } - if err := l.defaultBackendPool.Shutdown(); err != nil { - return err - } glog.V(2).Infof("Loadbalancer pool shutdown.") return nil } diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index 6569733c97..4bfab5985e 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -27,10 +27,7 @@ import ( "strings" "k8s.io/ingress-gce/pkg/annotations" - "k8s.io/ingress-gce/pkg/backends" - "k8s.io/ingress-gce/pkg/healthchecks" "k8s.io/ingress-gce/pkg/instances" - "k8s.io/ingress-gce/pkg/neg" "k8s.io/ingress-gce/pkg/utils" ) @@ -39,36 +36,34 @@ const ( ) var ( - testDefaultBeNodePort = utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + testDefaultBeNodePort = utils.ServicePort{NodePort: 30000, Protocol: annotations.ProtocolHTTP} ) func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T, namer *utils.Namer) LoadBalancerPool { - fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil }, false) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), namer) - fakeHCP := healthchecks.NewFakeHealthCheckProvider() - fakeNEG := neg.NewFakeNetworkEndpointGroupCloud("test-subnet", "test-network") - healthChecker := healthchecks.NewHealthChecker(fakeHCP, "/", namer) nodePool := instances.NewNodePool(fakeIGs, namer) nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) - backendPool := backends.NewBackendPool( - fakeBackends, fakeNEG, healthChecker, nodePool, namer, []int64{}, false) - return NewLoadBalancerPool(f, backendPool, testDefaultBeNodePort, namer) + return NewLoadBalancerPool(f, namer) } func TestCreateHTTPLoadBalancer(t *testing.T) { // This should NOT create the forwarding rule and target proxy // associated with the HTTPS branch of this loadbalancer. + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{ Name: namer.LoadBalancer("test"), AllowHTTP: true, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - // Run Sync - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err := pool.Get(lbInfo.Name) if err != nil || l7 == nil { @@ -78,9 +73,6 @@ func TestCreateHTTPLoadBalancer(t *testing.T) { if err != nil { t.Fatalf("f.GetUrlMap(%q) = _, %v; want nil", f.UMName(), err) } - if um.DefaultService != pool.(*L7s).GLBCDefaultBackend().SelfLink { - t.Fatalf("got um.DefaultService = %v; want %v", um.DefaultService, pool.(*L7s).GLBCDefaultBackend().SelfLink) - } tp, err := f.GetTargetHttpProxy(f.TPName(false)) if err != nil { t.Fatalf("f.GetTargetHttpProxy(%q) = _, %v; want nil", f.TPName(false), err) @@ -100,24 +92,27 @@ func TestCreateHTTPLoadBalancer(t *testing.T) { func TestCreateHTTPSLoadBalancer(t *testing.T) { // This should NOT create the forwarding rule and target proxy // associated with the HTTP branch of this loadbalancer. + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{ Name: namer.LoadBalancer("test"), AllowHTTP: false, TLS: []*TLSCerts{createCert("key", "cert", "name")}, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } + l7, err := pool.Get(lbInfo.Name) if err != nil || l7 == nil { t.Fatalf("Expected l7 not created") } um, err := f.GetUrlMap(f.UMName()) - if err != nil || - um.DefaultService != pool.(*L7s).GLBCDefaultBackend().SelfLink { - t.Fatalf("%v", err) - } tps, err := f.GetTargetHttpsProxy(f.TPName(true)) if err != nil || tps.UrlMap != um.SelfLink { t.Fatalf("%v", err) @@ -131,6 +126,8 @@ func TestCreateHTTPSLoadBalancer(t *testing.T) { // Tests that a certificate is created from the provided Key/Cert combo // and the proxy is updated to another cert when the provided cert changes func TestCertUpdate(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbName := namer.LoadBalancer("test") certName1 := namer.SSLCertName(lbName, GetCertHash("cert")) @@ -140,13 +137,16 @@ func TestCertUpdate(t *testing.T) { Name: lbName, AllowHTTP: false, TLS: []*TLSCerts{createCert("key", "cert", "name")}, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) // Sync first cert - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } // Verify certs t.Logf("lbName=%q, name=%q", lbName, certName1) @@ -155,13 +155,17 @@ func TestCertUpdate(t *testing.T) { // Sync with different cert lbInfo.TLS = []*TLSCerts{createCert("key2", "cert2", "name")} - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } expectCerts = map[string]string{certName2: lbInfo.TLS[0].Cert} verifyCertAndProxyLink(expectCerts, expectCerts, f, t) } // Test that multiple secrets with the same certificate value don't cause a sync error. func TestMultipleSecretsWithSameCert(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbName := namer.LoadBalancer("test") @@ -172,6 +176,7 @@ func TestMultipleSecretsWithSameCert(t *testing.T) { createCert("key", "cert", "secret-a"), createCert("key", "cert", "secret-b"), }, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) @@ -187,6 +192,8 @@ func TestMultipleSecretsWithSameCert(t *testing.T) { // Tests that controller can overwrite existing, unused certificates func TestCertCreationWithCollision(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbName := namer.LoadBalancer("test") certName1 := namer.SSLCertName(lbName, GetCertHash("cert")) @@ -196,6 +203,7 @@ func TestCertCreationWithCollision(t *testing.T) { Name: lbName, AllowHTTP: false, TLS: []*TLSCerts{createCert("key", "cert", "name")}, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) @@ -210,7 +218,10 @@ func TestCertCreationWithCollision(t *testing.T) { }) // Sync first cert - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } + expectCerts := map[string]string{certName1: lbInfo.TLS[0].Cert} verifyCertAndProxyLink(expectCerts, expectCerts, f, t) @@ -224,13 +235,17 @@ func TestCertCreationWithCollision(t *testing.T) { // Sync with different cert lbInfo.TLS = []*TLSCerts{createCert("key2", "cert2", "name")} - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } expectCerts = map[string]string{certName2: "xyz"} // xyz instead of cert2 because the name collided and cert did not get updated. verifyCertAndProxyLink(expectCerts, expectCerts, f, t) } func TestMultipleCertRetentionAfterRestart(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") cert1 := createCert("key", "cert", "name") cert2 := createCert("key2", "cert2", "name2") @@ -246,6 +261,7 @@ func TestMultipleCertRetentionAfterRestart(t *testing.T) { Name: lbName, AllowHTTP: false, TLS: []*TLSCerts{cert1}, + UrlMap: gceUrlMap, } expectCerts[certName1] = cert1.Cert @@ -278,11 +294,14 @@ func TestMultipleCertRetentionAfterRestart(t *testing.T) { //TestUpgradeToNewCertNames verifies that certs uploaded using the old naming convention // are picked up and deleted when upgrading to the new scheme. func TestUpgradeToNewCertNames(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbName := namer.LoadBalancer("test") lbInfo := &L7RuntimeInfo{ Name: lbName, AllowHTTP: false, + UrlMap: gceUrlMap, } oldCertName := "k8s-ssl-" + lbInfo.Name tlsCert := createCert("key", "cert", "name") @@ -312,7 +331,9 @@ func TestUpgradeToNewCertNames(t *testing.T) { t.Fatalf("Expected cert with name %s, Got %s", oldCertName, proxyCerts[0].Name) } // Sync should replace this oldCert with one following the new naming scheme - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } // We expect to see only the new cert linked to the proxy and available in the load balancer. expectCerts := map[string]string{newCertName: tlsCert.Cert} verifyCertAndProxyLink(expectCerts, expectCerts, f, t) @@ -320,6 +341,8 @@ func TestUpgradeToNewCertNames(t *testing.T) { // Tests uploading 10 certs which is the global limit today. Ensures that creation of the 11th cert fails. func TestMaxCertsUpload(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) var tlsCerts []*TLSCerts expectCerts := make(map[string]string) namer := utils.NewNamer("uid1", "fw1") @@ -335,10 +358,15 @@ func TestMaxCertsUpload(t *testing.T) { Name: lbName, AllowHTTP: false, TLS: tlsCerts, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } + verifyCertAndProxyLink(expectCerts, expectCerts, f, t) failCert := createCert("key100", "cert100", "name100") lbInfo.TLS = append(lbInfo.TLS, failCert) @@ -350,6 +378,8 @@ func TestMaxCertsUpload(t *testing.T) { // specified, to the targetproxy. The targetproxy will present the first occurring cert for a given hostname to the client. // This test verifies this behavior. func TestIdenticalHostnameCerts(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) var tlsCerts []*TLSCerts expectCerts := make(map[string]string) namer := utils.NewNamer("uid1", "fw1") @@ -367,12 +397,15 @@ func TestIdenticalHostnameCerts(t *testing.T) { Name: lbName, AllowHTTP: false, TLS: tlsCerts, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) // Sync multiple times to make sure ordering is preserved for i := 0; i < 10; i++ { - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } verifyCertAndProxyLink(expectCerts, expectCerts, f, t) // Fetch the target proxy certs and go through in order verifyProxyCertsInOrder(" foo.com", f, t) @@ -381,10 +414,13 @@ func TestIdenticalHostnameCerts(t *testing.T) { } func TestIdenticalHostnameCertsPreShared(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{ Name: namer.LoadBalancer("test"), AllowHTTP: false, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) @@ -410,7 +446,9 @@ func TestIdenticalHostnameCertsPreShared(t *testing.T) { lbInfo.TLSName = preSharedCert1.Name + "," + preSharedCert2.Name + "," + preSharedCert3.Name // Sync multiple times to make sure ordering is preserved for i := 0; i < 10; i++ { - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } verifyCertAndProxyLink(expectCerts, expectCerts, f, t) // Fetch the target proxy certs and go through in order verifyProxyCertsInOrder(" foo.com", f, t) @@ -421,6 +459,8 @@ func TestIdenticalHostnameCertsPreShared(t *testing.T) { // TestPreSharedToSecretBasedCertUpdate updates from pre-shared cert // to secret based cert and verifies the pre-shared cert is retained. func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbName := namer.LoadBalancer("test") certName1 := namer.SSLCertName(lbName, GetCertHash("cert")) @@ -429,6 +469,7 @@ func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { lbInfo := &L7RuntimeInfo{ Name: lbName, AllowHTTP: false, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) @@ -450,7 +491,9 @@ func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { lbInfo.TLSName = preSharedCert1.Name + "," + preSharedCert2.Name // Sync pre-shared certs. - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } expectCerts := map[string]string{preSharedCert1.Name: preSharedCert1.Certificate, preSharedCert2.Name: preSharedCert2.Certificate} verifyCertAndProxyLink(expectCerts, expectCerts, f, t) @@ -458,7 +501,9 @@ func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { // Updates from pre-shared cert to secret based cert. lbInfo.TLS = []*TLSCerts{createCert("key", "cert", "name")} lbInfo.TLSName = "" - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } expectCerts[certName1] = lbInfo.TLS[0].Cert // fakeLoadBalancer contains the preshared certs as well, but proxy will use only certName1 expectCertsProxy := map[string]string{certName1: lbInfo.TLS[0].Cert} @@ -557,13 +602,15 @@ func verifyCertAndProxyLink(expectCerts map[string]string, expectCertsProxy map[ func TestCreateHTTPSLoadBalancerAnnotationCert(t *testing.T) { // This should NOT create the forwarding rule and target proxy // associated with the HTTP branch of this loadbalancer. + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) tlsName := "external-cert-name" - namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{ Name: namer.LoadBalancer("test"), AllowHTTP: false, TLSName: tlsName, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) @@ -571,16 +618,14 @@ func TestCreateHTTPSLoadBalancerAnnotationCert(t *testing.T) { Name: tlsName, }) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err := pool.Get(lbInfo.Name) if err != nil || l7 == nil { t.Fatalf("Expected l7 not created") } um, err := f.GetUrlMap(f.UMName()) - if err != nil || - um.DefaultService != pool.(*L7s).GLBCDefaultBackend().SelfLink { - t.Fatalf("%v", err) - } tps, err := f.GetTargetHttpsProxy(f.TPName(true)) if err != nil || tps.UrlMap != um.SelfLink { t.Fatalf("%v", err) @@ -595,25 +640,26 @@ func TestCreateBothLoadBalancers(t *testing.T) { // This should create 2 forwarding rules and target proxies // but they should use the same urlmap, and have the same // static ip. + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{ Name: namer.LoadBalancer("test"), AllowHTTP: true, TLS: []*TLSCerts{{Key: "key", Cert: "cert"}}, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err := pool.Get(lbInfo.Name) if err != nil || l7 == nil { t.Fatalf("Expected l7 not created") } um, err := f.GetUrlMap(f.UMName()) - if err != nil || - um.DefaultService != pool.(*L7s).GLBCDefaultBackend().SelfLink { - t.Fatalf("%v", err) - } tps, err := f.GetTargetHttpsProxy(f.TPName(true)) if err != nil || tps.UrlMap != um.SelfLink { t.Fatalf("%v", err) @@ -650,17 +696,20 @@ func TestUpdateUrlMap(t *testing.T) { um2.DefaultBackend = utils.ServicePort{NodePort: 30004} namer := utils.NewNamer("uid1", "fw1") - lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer("test"), AllowHTTP: true} + lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer("test"), AllowHTTP: true, UrlMap: um1} f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err := pool.Get(lbInfo.Name) if err != nil { t.Fatalf("%v", err) } for _, ir := range []*utils.GCEURLMap{um1, um2} { - if err := l7.UpdateUrlMap(ir); err != nil { + lbInfo.UrlMap = ir + if err := l7.UpdateUrlMap(); err != nil { t.Fatalf("%v", err) } } @@ -688,18 +737,22 @@ func TestUpdateUrlMapNoChanges(t *testing.T) { um2.DefaultBackend = utils.ServicePort{NodePort: 30003} namer := utils.NewNamer("uid1", "fw1") - lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer("test"), AllowHTTP: true} + lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer("test"), AllowHTTP: true, UrlMap: um1} f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err := pool.Get(lbInfo.Name) if err != nil { t.Fatalf("%v", err) } for _, ir := range []*utils.GCEURLMap{um1, um2} { - if err := l7.UpdateUrlMap(ir); err != nil { + lbInfo.UrlMap = ir + if err := l7.UpdateUrlMap(); err != nil { t.Fatalf("%v", err) } + } for _, call := range f.calls { if call == "UpdateUrlMap" { @@ -728,23 +781,24 @@ func TestNameParsing(t *testing.T) { } func TestClusterNameChange(t *testing.T) { + gceUrlMap := utils.NewGCEURLMap() + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{ - Name: namer.LoadBalancer("test"), - TLS: []*TLSCerts{{Key: "key", Cert: "cert"}}, + Name: namer.LoadBalancer("test"), + TLS: []*TLSCerts{{Key: "key", Cert: "cert"}}, + UrlMap: gceUrlMap, } f := NewFakeLoadBalancers(lbInfo.Name, namer) pool := newFakeLoadBalancerPool(f, t, namer) - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err := pool.Get(lbInfo.Name) if err != nil || l7 == nil { t.Fatalf("Expected l7 not created") } um, err := f.GetUrlMap(f.UMName()) - if err != nil || - um.DefaultService != pool.(*L7s).GLBCDefaultBackend().SelfLink { - t.Fatalf("%v", err) - } tps, err := f.GetTargetHttpsProxy(f.TPName(true)) if err != nil || tps.UrlMap != um.SelfLink { t.Fatalf("%v", err) @@ -759,7 +813,9 @@ func TestClusterNameChange(t *testing.T) { f.name = fmt.Sprintf("%v--%v", lbInfo.Name, newName) // Now the components should get renamed with the next suffix. - pool.Sync([]*L7RuntimeInfo{lbInfo}) + if err := pool.Sync([]*L7RuntimeInfo{lbInfo}); err != nil { + t.Fatalf("pool.Sync() = err %v", err) + } l7, err = pool.Get(lbInfo.Name) if err != nil || namer.ParseName(l7.Name).ClusterName != newName { t.Fatalf("Expected L7 name to change.") @@ -768,11 +824,6 @@ func TestClusterNameChange(t *testing.T) { if err != nil || namer.ParseName(um.Name).ClusterName != newName { t.Fatalf("Expected urlmap name to change.") } - if err != nil || - um.DefaultService != pool.(*L7s).GLBCDefaultBackend().SelfLink { - t.Fatalf("%v", err) - } - tps, err = f.GetTargetHttpsProxy(f.TPName(true)) if err != nil || tps.UrlMap != um.SelfLink { t.Fatalf("%v", err)