diff --git a/cmd/glbc/app/init.go b/cmd/glbc/app/init.go index f565b9337e..c27bd12b6e 100644 --- a/cmd/glbc/app/init.go +++ b/cmd/glbc/app/init.go @@ -30,11 +30,11 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/ingress-gce/pkg/annotations" - "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/flags" + "k8s.io/ingress-gce/pkg/utils" ) -func DefaultBackendServicePort(kubeClient kubernetes.Interface) *backends.ServicePort { +func DefaultBackendServicePort(kubeClient kubernetes.Interface) *utils.ServicePort { // TODO: make this not fatal if flags.F.DefaultSvc == "" { glog.Fatalf("Please specify --default-backend") @@ -52,7 +52,7 @@ func DefaultBackendServicePort(kubeClient kubernetes.Interface) *backends.Servic flags.F.DefaultSvc, err) } - return &backends.ServicePort{ + return &utils.ServicePort{ NodePort: int64(nodePort), Protocol: annotations.ProtocolHTTP, // The default backend is HTTP. SvcName: types.NamespacedName{Namespace: parts[0], Name: parts[1]}, diff --git a/pkg/backends/backends.go b/pkg/backends/backends.go index 2c48f6e5c4..a744861c5d 100644 --- a/pkg/backends/backends.go +++ b/pkg/backends/backends.go @@ -29,11 +29,8 @@ import ( compute "google.golang.org/api/compute/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/healthchecks" "k8s.io/ingress-gce/pkg/instances" "k8s.io/ingress-gce/pkg/storage" @@ -128,7 +125,7 @@ func (be *BackendService) GetHealthCheckLink() string { } // ensureProtocol updates the BackendService Protocol with the expected value -func (be *BackendService) ensureProtocol(p ServicePort) (needsUpdate bool) { +func (be *BackendService) ensureProtocol(p utils.ServicePort) (needsUpdate bool) { existingProtocol := be.GetProtocol() if existingProtocol == string(p.Protocol) { return false @@ -185,30 +182,6 @@ func portKey(port int64) string { return fmt.Sprintf("%d", port) } -// ServicePort for tupling port and protocol -type ServicePort struct { - SvcName types.NamespacedName - SvcPort intstr.IntOrString - NodePort int64 - Protocol annotations.AppProtocol - SvcTargetPort string - NEGEnabled bool -} - -// Description returns a string describing the ServicePort. -func (sp ServicePort) Description() string { - if sp.SvcName.String() == "" || sp.SvcPort.String() == "" { - return "" - } - return fmt.Sprintf(`{"kubernetes.io/service-name":"%s","kubernetes.io/service-port":"%s"}`, sp.SvcName.String(), sp.SvcPort.String()) -} - -// isAlpha returns true if the ServicePort is using ProtocolHTTP2 - which means -// we need to use the Alpha API. -func (sp ServicePort) isAlpha() bool { - return sp.Protocol == annotations.ProtocolHTTP2 -} - // NewBackendPool returns a new backend pool. // - cloud: implements BackendServices and syncs backends with a cloud provider // - healthChecker: is capable of producing health checks for backends. @@ -285,7 +258,7 @@ func (b *Backends) Get(port int64, isAlpha bool) (*BackendService, error) { return &BackendService{Ga: beGa, Alpha: beAlpha}, nil } -func (b *Backends) ensureHealthCheck(sp ServicePort) (string, error) { +func (b *Backends) ensureHealthCheck(sp utils.ServicePort) (string, error) { hc := b.healthChecker.New(sp.NodePort, sp.Protocol, sp.NEGEnabled) existingLegacyHC, err := b.healthChecker.GetLegacy(sp.NodePort) if err != nil && !utils.IsNotFoundError(err) { @@ -309,8 +282,8 @@ func (b *Backends) ensureHealthCheck(sp ServicePort) (string, error) { return b.healthChecker.Sync(hc) } -func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp ServicePort, name string) (*BackendService, error) { - isAlpha := sp.isAlpha() +func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp utils.ServicePort, name string) (*BackendService, error) { + isAlpha := sp.IsAlpha() if isAlpha { bsAlpha := &computealpha.BackendService{ Name: name, @@ -344,7 +317,7 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic // Ensure will update or create Backends for the given ports. // Uses the given instance groups if non-nil, else creates instance groups. -func (b *Backends) Ensure(svcPorts []ServicePort, igs []*compute.InstanceGroup) error { +func (b *Backends) Ensure(svcPorts []utils.ServicePort, igs []*compute.InstanceGroup) error { glog.V(3).Infof("Sync: backends %v", svcPorts) // Ideally callers should pass the instance groups to prevent recomputing them here. // Igs can be nil in scenarios where we do not have instance groups such as @@ -372,7 +345,7 @@ func (b *Backends) Ensure(svcPorts []ServicePort, igs []*compute.InstanceGroup) // ensureBackendService will update or create a Backend for the given port. // It assumes that the instance groups have been created and required named port has been added. // If not, then Ensure should be called instead. -func (b *Backends) ensureBackendService(p ServicePort, igs []*compute.InstanceGroup) error { +func (b *Backends) ensureBackendService(p utils.ServicePort, igs []*compute.InstanceGroup) error { // We must track the ports even if creating the backends failed, because // we might've created health-check for them. be := &BackendService{} @@ -388,7 +361,7 @@ func (b *Backends) ensureBackendService(p ServicePort, igs []*compute.InstanceGr // Verify existence of a backend service for the proper port, but do not specify any backends/igs beName := b.namer.Backend(p.NodePort) - be, _ = b.Get(p.NodePort, p.isAlpha()) + be, _ = b.Get(p.NodePort, p.IsAlpha()) if be == nil { namedPort := &compute.NamedPort{ Name: b.namer.NamedPort(p.NodePort), @@ -623,7 +596,7 @@ func getInstanceGroupsToAdd(be *BackendService, igs []*compute.InstanceGroup) [] } // GC garbage collects services corresponding to ports in the given list. -func (b *Backends) GC(svcNodePorts []ServicePort) error { +func (b *Backends) GC(svcNodePorts []utils.ServicePort) error { knownPorts := sets.NewString() for _, p := range svcNodePorts { knownPorts.Insert(portKey(p.NodePort)) @@ -649,7 +622,7 @@ func (b *Backends) GC(svcNodePorts []ServicePort) error { // Shutdown deletes all backends and the default backend. // This will fail if one of the backends is being used by another resource. func (b *Backends) Shutdown() error { - if err := b.GC([]ServicePort{}); err != nil { + if err := b.GC([]utils.ServicePort{}); err != nil { return err } return nil @@ -672,7 +645,7 @@ func (b *Backends) Status(name string) string { return hs.HealthStatus[0].HealthState } -func (b *Backends) Link(port ServicePort, zones []string) error { +func (b *Backends) Link(port utils.ServicePort, zones []string) error { if !port.NEGEnabled { return nil } diff --git a/pkg/backends/backends_test.go b/pkg/backends/backends_test.go index 9ac9c015ef..a80cb501af 100644 --- a/pkg/backends/backends_test.go +++ b/pkg/backends/backends_test.go @@ -65,7 +65,7 @@ func newTestJig(f BackendServices, fakeIGs instances.InstanceGroups, syncWithClo healthCheckProvider := healthchecks.NewFakeHealthCheckProvider() healthChecks := healthchecks.NewHealthChecker(healthCheckProvider, "/", defaultNamer) bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, []int64{}, syncWithCloud) - probes := map[ServicePort]*api_v1.Probe{{NodePort: 443, Protocol: annotations.ProtocolHTTPS}: existingProbe} + probes := map[utils.ServicePort]*api_v1.Probe{{NodePort: 443, Protocol: annotations.ProtocolHTTPS}: existingProbe} bp.Init(NewFakeProbeProvider(probes)) return bp, healthCheckProvider @@ -76,7 +76,7 @@ func TestBackendPoolAdd(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - testCases := []ServicePort{ + testCases := []utils.ServicePort{ {NodePort: 80, Protocol: annotations.ProtocolHTTP}, {NodePort: 443, Protocol: annotations.ProtocolHTTPS}, {NodePort: 3000, Protocol: annotations.ProtocolHTTP2}, @@ -88,7 +88,7 @@ func TestBackendPoolAdd(t *testing.T) { // Add a backend for a port, then re-add the same port and // make sure it corrects a broken link from the backend to // the instance group. - err := pool.Ensure([]ServicePort{sp}, nil) + err := pool.Ensure([]utils.ServicePort{sp}, nil) if err != nil { t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", sp, err) } @@ -138,9 +138,9 @@ func TestBackendPoolAddWithoutWhitelist(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - sp := ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP2} + sp := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP2} - err := pool.Ensure([]ServicePort{sp}, nil) + err := pool.Ensure([]utils.ServicePort{sp}, nil) if !utils.IsHTTPErrorCode(err, http.StatusForbidden) { t.Fatalf("Expected creating %+v through alpha API to be forbidden, got %v", sp, err) } @@ -151,7 +151,7 @@ func TestHealthCheckMigration(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, hcp := newTestJig(f, fakeIGs, false) - p := ServicePort{NodePort: 7000, Protocol: annotations.ProtocolHTTP} + p := utils.ServicePort{NodePort: 7000, Protocol: annotations.ProtocolHTTP} // Create a legacy health check and insert it into the HC provider. legacyHC := &compute.HttpHealthCheck{ @@ -165,10 +165,10 @@ func TestHealthCheckMigration(t *testing.T) { hcp.CreateHttpHealthCheck(legacyHC) // Add the service port to the backend pool - pool.Ensure([]ServicePort{p}, nil) + pool.Ensure([]utils.ServicePort{p}, nil) // Assert the proper health check was created - hc, _ := pool.healthChecker.Get(p.NodePort, p.isAlpha()) + hc, _ := pool.healthChecker.Get(p.NodePort, p.IsAlpha()) if hc == nil || hc.Protocol() != p.Protocol { t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc) } @@ -188,8 +188,8 @@ func TestBackendPoolUpdateHTTPS(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - p := ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} - pool.Ensure([]ServicePort{p}, nil) + p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + pool.Ensure([]utils.ServicePort{p}, nil) beName := defaultNamer.Backend(p.NodePort) be, err := f.GetGlobalBackendService(beName) @@ -202,14 +202,14 @@ func TestBackendPoolUpdateHTTPS(t *testing.T) { } // Assert the proper health check was created - hc, _ := pool.healthChecker.Get(p.NodePort, p.isAlpha()) + hc, _ := pool.healthChecker.Get(p.NodePort, p.IsAlpha()) if hc == nil || hc.Protocol() != p.Protocol { t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc) } // Update service port to encrypted p.Protocol = annotations.ProtocolHTTPS - pool.Ensure([]ServicePort{p}, nil) + pool.Ensure([]utils.ServicePort{p}, nil) be, err = f.GetGlobalBackendService(beName) if err != nil { @@ -222,7 +222,7 @@ func TestBackendPoolUpdateHTTPS(t *testing.T) { } // Assert the proper health check was created - hc, _ = pool.healthChecker.Get(p.NodePort, p.isAlpha()) + hc, _ = pool.healthChecker.Get(p.NodePort, p.IsAlpha()) if hc == nil || hc.Protocol() != p.Protocol { t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc) } @@ -233,8 +233,8 @@ func TestBackendPoolUpdateHTTP2(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - p := ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} - pool.Ensure([]ServicePort{p}, nil) + p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + pool.Ensure([]utils.ServicePort{p}, nil) beName := defaultNamer.Backend(p.NodePort) be, err := f.GetGlobalBackendService(beName) @@ -247,14 +247,14 @@ func TestBackendPoolUpdateHTTP2(t *testing.T) { } // Assert the proper health check was created - hc, _ := pool.healthChecker.Get(p.NodePort, p.isAlpha()) + hc, _ := pool.healthChecker.Get(p.NodePort, p.IsAlpha()) if hc == nil || hc.Protocol() != p.Protocol { t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc) } // Update service port to HTTP2 p.Protocol = annotations.ProtocolHTTP2 - pool.Ensure([]ServicePort{p}, nil) + pool.Ensure([]utils.ServicePort{p}, nil) beAlpha, err := f.GetAlphaGlobalBackendService(beName) if err != nil { @@ -278,8 +278,8 @@ func TestBackendPoolUpdateHTTP2WithoutWhitelist(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - p := ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} - pool.Ensure([]ServicePort{p}, nil) + p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + pool.Ensure([]utils.ServicePort{p}, nil) beName := defaultNamer.Backend(p.NodePort) be, err := f.GetGlobalBackendService(beName) @@ -293,7 +293,7 @@ func TestBackendPoolUpdateHTTP2WithoutWhitelist(t *testing.T) { // Update service port to HTTP2 p.Protocol = annotations.ProtocolHTTP2 - err = pool.Ensure([]ServicePort{p}, nil) + err = pool.Ensure([]utils.ServicePort{p}, nil) if !utils.IsHTTPErrorCode(err, http.StatusForbidden) { t.Fatalf("Expected getting %+v through alpha API to be forbidden, got %v", p, err) @@ -305,8 +305,8 @@ func TestBackendPoolChaosMonkey(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - sp := ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} - pool.Ensure([]ServicePort{sp}, nil) + sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} + pool.Ensure([]utils.ServicePort{sp}, nil) beName := defaultNamer.Backend(sp.NodePort) be, _ := f.GetGlobalBackendService(beName) @@ -319,7 +319,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) { f.calls = []int{} f.UpdateGlobalBackendService(be) - pool.Ensure([]ServicePort{sp}, nil) + pool.Ensure([]utils.ServicePort{sp}, nil) for _, call := range f.calls { if call == utils.Create { t.Fatalf("Unexpected create for existing backend service") @@ -348,12 +348,12 @@ func TestBackendPoolChaosMonkey(t *testing.T) { func TestBackendPoolSync(t *testing.T) { // Call sync on a backend pool with a list of ports, make sure the pool // creates/deletes required ports. - svcNodePorts := []ServicePort{{NodePort: 81, Protocol: annotations.ProtocolHTTP}, {NodePort: 82, Protocol: annotations.ProtocolHTTPS}, {NodePort: 83, Protocol: annotations.ProtocolHTTP}} + svcNodePorts := []utils.ServicePort{{NodePort: 81, Protocol: annotations.ProtocolHTTP}, {NodePort: 82, Protocol: annotations.ProtocolHTTPS}, {NodePort: 83, Protocol: annotations.ProtocolHTTP}} f := NewFakeBackendServices(noOpErrFunc, false) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, true) - pool.Ensure([]ServicePort{{NodePort: 81}}, nil) - pool.Ensure([]ServicePort{{NodePort: 90}}, nil) + pool.Ensure([]utils.ServicePort{{NodePort: 81}}, nil) + pool.Ensure([]utils.ServicePort{{NodePort: 90}}, nil) if err := pool.Ensure(svcNodePorts, nil); err != nil { t.Errorf("Expected backend pool to add node ports, err: %v", err) } @@ -369,8 +369,8 @@ func TestBackendPoolSync(t *testing.T) { } } - svcNodePorts = []ServicePort{{NodePort: 81}} - deletedPorts := []ServicePort{{NodePort: 82}, {NodePort: 83}} + svcNodePorts = []utils.ServicePort{{NodePort: 81}} + deletedPorts := []utils.ServicePort{{NodePort: 82}, {NodePort: 83}} if err := pool.GC(svcNodePorts); err != nil { t.Fatalf("Expected backend pool to GC, err: %v", err) } @@ -427,7 +427,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) { hcp := healthchecks.NewFakeHealthCheckProvider() healthChecks := healthchecks.NewHealthChecker(hcp, "/", defaultNamer) bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, []int64{}, false) - probes := map[ServicePort]*api_v1.Probe{} + probes := map[utils.ServicePort]*api_v1.Probe{} bp.Init(NewFakeProbeProvider(probes)) // Create a legacy HTTP health check @@ -452,7 +452,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) { }) // Have pool sync the above backend service - bp.Ensure([]ServicePort{{NodePort: 80, Protocol: annotations.ProtocolHTTPS}}, nil) + bp.Ensure([]utils.ServicePort{{NodePort: 80, Protocol: annotations.ProtocolHTTPS}}, nil) // Verify the legacy health check has been deleted _, err = hcp.GetHttpHealthCheck(beName) @@ -478,7 +478,7 @@ func TestBackendPoolShutdown(t *testing.T) { pool, _ := newTestJig(f, fakeIGs, false) // Add a backend-service and verify that it doesn't exist after Shutdown() - pool.Ensure([]ServicePort{{NodePort: 80}}, nil) + pool.Ensure([]utils.ServicePort{{NodePort: 80}}, nil) pool.Shutdown() if _, err := f.GetGlobalBackendService(defaultNamer.Backend(80)); err == nil { t.Fatalf("%v", err) @@ -491,7 +491,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { pool, _ := newTestJig(f, fakeIGs, false) // This will add the instance group k8s-ig to the instance pool - pool.Ensure([]ServicePort{{NodePort: 80}}, nil) + pool.Ensure([]utils.ServicePort{{NodePort: 80}}, nil) be, err := f.GetGlobalBackendService(defaultNamer.Backend(80)) if err != nil { @@ -509,7 +509,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { } // Make sure repeated adds don't clobber the inserted instance group - pool.Ensure([]ServicePort{{NodePort: 80}}, nil) + pool.Ensure([]utils.ServicePort{{NodePort: 80}}, nil) be, err = f.GetGlobalBackendService(defaultNamer.Backend(80)) if err != nil { t.Fatalf("%v", err) @@ -533,7 +533,7 @@ func TestBackendCreateBalancingMode(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc, false) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - sp := ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} + sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} modes := []BalancingMode{Rate, Utilization} // block the creation of Backends with the given balancingMode @@ -549,7 +549,7 @@ func TestBackendCreateBalancingMode(t *testing.T) { return nil } - pool.Ensure([]ServicePort{sp}, nil) + pool.Ensure([]utils.ServicePort{sp}, nil) be, err := f.GetGlobalBackendService(defaultNamer.Backend(sp.NodePort)) if err != nil { t.Fatalf("%v", err) @@ -564,7 +564,7 @@ func TestBackendCreateBalancingMode(t *testing.T) { t.Fatalf("Wrong balancing mode, expected %v got %v", modes[(i+1)%len(modes)], b.BalancingMode) } } - pool.GC([]ServicePort{}) + pool.GC([]utils.ServicePort{}) } } @@ -606,7 +606,7 @@ func TestLinkBackendServiceToNEG(t *testing.T) { healthChecks := healthchecks.NewHealthChecker(hcp, "/", defaultNamer) bp := NewBackendPool(f, fakeNEG, healthChecks, nodePool, defaultNamer, []int64{}, false) - svcPort := ServicePort{ + svcPort := utils.ServicePort{ NodePort: 30001, Protocol: annotations.ProtocolHTTP, SvcName: types.NamespacedName{ @@ -617,7 +617,7 @@ func TestLinkBackendServiceToNEG(t *testing.T) { SvcTargetPort: port, NEGEnabled: true, } - if err := bp.Ensure([]ServicePort{svcPort}, nil); err != nil { + if err := bp.Ensure([]utils.ServicePort{svcPort}, nil); err != nil { t.Fatalf("Failed to ensure backend service: %v", err) } @@ -711,7 +711,7 @@ func TestEnsureBackendServiceProtocol(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - svcPorts := []ServicePort{ + svcPorts := []utils.ServicePort{ {NodePort: 80, Protocol: annotations.ProtocolHTTP, SvcPort: intstr.FromInt(1)}, {NodePort: 443, Protocol: annotations.ProtocolHTTPS, SvcPort: intstr.FromInt(2)}, {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, SvcPort: intstr.FromInt(3)}, @@ -722,8 +722,8 @@ func TestEnsureBackendServiceProtocol(t *testing.T) { t.Run( fmt.Sprintf("Updating Port:%v Protocol:%v to Port:%v Protocol:%v", oldPort.NodePort, oldPort.Protocol, newPort.NodePort, newPort.Protocol), func(t *testing.T) { - pool.Ensure([]ServicePort{oldPort}, nil) - be, err := pool.Get(oldPort.NodePort, newPort.isAlpha()) + pool.Ensure([]utils.ServicePort{oldPort}, nil) + be, err := pool.Get(oldPort.NodePort, newPort.IsAlpha()) if err != nil { t.Fatalf("%v", err) } @@ -760,7 +760,7 @@ func TestEnsureBackendServiceDescription(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - svcPorts := []ServicePort{ + svcPorts := []utils.ServicePort{ {NodePort: 80, Protocol: annotations.ProtocolHTTP, SvcPort: intstr.FromInt(1)}, {NodePort: 443, Protocol: annotations.ProtocolHTTPS, SvcPort: intstr.FromInt(2)}, {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, SvcPort: intstr.FromInt(3)}, @@ -771,8 +771,8 @@ func TestEnsureBackendServiceDescription(t *testing.T) { t.Run( fmt.Sprintf("Updating Port:%v Protocol:%v to Port:%v Protocol:%v", oldPort.NodePort, oldPort.Protocol, newPort.NodePort, newPort.Protocol), func(t *testing.T) { - pool.Ensure([]ServicePort{oldPort}, nil) - be, err := pool.Get(oldPort.NodePort, oldPort.isAlpha()) + pool.Ensure([]utils.ServicePort{oldPort}, nil) + be, err := pool.Get(oldPort.NodePort, oldPort.IsAlpha()) if err != nil { t.Fatalf("%v", err) } @@ -799,9 +799,9 @@ func TestEnsureBackendServiceHealthCheckLink(t *testing.T) { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) pool, _ := newTestJig(f, fakeIGs, false) - p := ServicePort{NodePort: 80, Protocol: annotations.ProtocolHTTP, SvcPort: intstr.FromInt(1)} - pool.Ensure([]ServicePort{p}, nil) - be, err := pool.Get(p.NodePort, p.isAlpha()) + p := utils.ServicePort{NodePort: 80, Protocol: annotations.ProtocolHTTP, SvcPort: intstr.FromInt(1)} + pool.Ensure([]utils.ServicePort{p}, nil) + be, err := pool.Get(p.NodePort, p.IsAlpha()) if err != nil { t.Fatalf("%v", err) } diff --git a/pkg/backends/fakes.go b/pkg/backends/fakes.go index ed4398b548..9ba2404e67 100644 --- a/pkg/backends/fakes.go +++ b/pkg/backends/fakes.go @@ -181,16 +181,16 @@ func (f *FakeBackendServices) GetGlobalBackendServiceHealth(name, instanceGroupL // FakeProbeProvider implements the probeProvider interface for tests. type FakeProbeProvider struct { - probes map[ServicePort]*api_v1.Probe + probes map[utils.ServicePort]*api_v1.Probe } // NewFakeProbeProvider returns a struct which satisfies probeProvider interface -func NewFakeProbeProvider(probes map[ServicePort]*api_v1.Probe) *FakeProbeProvider { +func NewFakeProbeProvider(probes map[utils.ServicePort]*api_v1.Probe) *FakeProbeProvider { return &FakeProbeProvider{probes} } // GetProbe returns the probe for a given nodePort -func (pp *FakeProbeProvider) GetProbe(port ServicePort) (*api_v1.Probe, error) { +func (pp *FakeProbeProvider) GetProbe(port utils.ServicePort) (*api_v1.Probe, error) { if probe, exists := pp.probes[port]; exists && probe.HTTPGet != nil { return probe, nil } diff --git a/pkg/backends/interfaces.go b/pkg/backends/interfaces.go index 8b525aeb68..42baa588e1 100644 --- a/pkg/backends/interfaces.go +++ b/pkg/backends/interfaces.go @@ -20,25 +20,26 @@ import ( computealpha "google.golang.org/api/compute/v0.alpha" compute "google.golang.org/api/compute/v1" api_v1 "k8s.io/api/core/v1" + "k8s.io/ingress-gce/pkg/utils" ) // ProbeProvider retrieves a probe struct given a nodePort type ProbeProvider interface { - GetProbe(sp ServicePort) (*api_v1.Probe, error) + GetProbe(sp utils.ServicePort) (*api_v1.Probe, error) } // BackendPool is an interface to manage a pool of kubernetes nodePort services // as gce backendServices, and sync them through the BackendServices interface. type BackendPool interface { Init(p ProbeProvider) - Ensure(ports []ServicePort, igs []*compute.InstanceGroup) error + Ensure(ports []utils.ServicePort, igs []*compute.InstanceGroup) error Get(port int64, isAlpha bool) (*BackendService, error) Delete(port int64) error - GC(ports []ServicePort) error + GC(ports []utils.ServicePort) error Shutdown() error Status(name string) string List() ([]interface{}, error) - Link(port ServicePort, zones []string) error + Link(port utils.ServicePort, zones []string) error } // BackendServices is an interface for managing gce backend services. diff --git a/pkg/controller/cluster_manager.go b/pkg/controller/cluster_manager.go index 70b11370c9..9494069ded 100644 --- a/pkg/controller/cluster_manager.go +++ b/pkg/controller/cluster_manager.go @@ -41,7 +41,7 @@ const ( // ClusterManager manages cluster resource pools. type ClusterManager struct { ClusterNamer *utils.Namer - defaultBackendNodePort backends.ServicePort + defaultBackendNodePort utils.ServicePort instancePool instances.NodePool backendPool backends.BackendPool l7Pool loadbalancers.LoadBalancerPool @@ -100,7 +100,7 @@ func (c *ClusterManager) shutdown() error { // - lbServicePorts are the ports for which we require Backend Services. // - instanceGroups are the groups to be referenced by the Backend Services.. // If GCE runs out of quota, a googleapi 403 is returned. -func (c *ClusterManager) EnsureLoadBalancer(lb *loadbalancers.L7RuntimeInfo, lbServicePorts []backends.ServicePort, instanceGroups []*compute.InstanceGroup) error { +func (c *ClusterManager) EnsureLoadBalancer(lb *loadbalancers.L7RuntimeInfo, lbServicePorts []utils.ServicePort, instanceGroups []*compute.InstanceGroup) error { glog.V(4).Infof("EnsureLoadBalancer(%q lb, %v lbServicePorts, %v instanceGroups)", lb.String(), len(lbServicePorts), len(instanceGroups)) if err := c.backendPool.Ensure(uniq(lbServicePorts), instanceGroups); err != nil { return err @@ -109,7 +109,7 @@ func (c *ClusterManager) EnsureLoadBalancer(lb *loadbalancers.L7RuntimeInfo, lbS return c.l7Pool.Sync([]*loadbalancers.L7RuntimeInfo{lb}) } -func (c *ClusterManager) EnsureInstanceGroupsAndPorts(nodeNames []string, servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) { +func (c *ClusterManager) EnsureInstanceGroupsAndPorts(nodeNames []string, servicePorts []utils.ServicePort) ([]*compute.InstanceGroup, error) { if len(servicePorts) != 0 { // Add the default backend node port to the list of named ports for instance groups. servicePorts = append(servicePorts, c.defaultBackendNodePort) @@ -145,7 +145,7 @@ func (c *ClusterManager) EnsureFirewall(nodeNames []string, endpointPorts []stri // - nodePorts are the ports for which we want BackendServies. BackendServices // for ports not in this list are deleted. // This method ignores googleapi 404 errors (StatusNotFound). -func (c *ClusterManager) GC(lbNames []string, nodePorts []backends.ServicePort) error { +func (c *ClusterManager) GC(lbNames []string, nodePorts []utils.ServicePort) error { // On GC: // * Loadbalancers need to get deleted before backends. // * Backends are refcounted in a shared pool. @@ -187,7 +187,7 @@ func (c *ClusterManager) GC(lbNames []string, nodePorts []backends.ServicePort) func NewClusterManager( cloud *gce.GCECloud, namer *utils.Namer, - defaultBackendNodePort backends.ServicePort, + defaultBackendNodePort utils.ServicePort, defaultHealthCheckPath string) (*ClusterManager, error) { // Names are fundamental to the cluster, the uid allocator makes sure names don't collide. diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index cfe8be3970..97e3a5801e 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -34,7 +34,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/annotations" - "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/controller/translator" "k8s.io/ingress-gce/pkg/firewalls" @@ -244,8 +243,8 @@ func (lbc *LoadBalancerController) sync(key string) (retErr error) { return err } - // gceNodePorts contains the ServicePorts used by only single-cluster ingress. - gceNodePorts := lbc.Translator.ToNodePorts(&gceIngresses) + // gceSvcPorts contains the ServicePorts used by only single-cluster ingress. + gceSvcPorts := lbc.ToSvcPorts(&gceIngresses) nodeNames, err := getReadyNodeNames(listers.NewNodeLister(lbc.nodeLister)) if err != nil { return err @@ -259,7 +258,7 @@ func (lbc *LoadBalancerController) sync(key string) (retErr error) { if !ingExists { glog.V(2).Infof("Ingress %q no longer exists, triggering GC", key) // GC will find GCE resources that were used for this ingress and delete them. - return lbc.CloudClusterManager.GC(lbNames, gceNodePorts) + return lbc.CloudClusterManager.GC(lbNames, gceSvcPorts) } // Get ingress and DeepCopy for assurance that we don't pollute other goroutines with changes. @@ -269,7 +268,7 @@ func (lbc *LoadBalancerController) sync(key string) (retErr error) { } ing = ing.DeepCopy() - ensureErr := lbc.ensureIngress(key, ing, nodeNames, gceNodePorts) + ensureErr := lbc.ensureIngress(key, ing, nodeNames, gceSvcPorts) if ensureErr != nil { lbc.ctx.Recorder(ing.Namespace).Eventf(ing, apiv1.EventTypeWarning, "Sync", fmt.Sprintf("Error during sync: %v", ensureErr.Error())) } @@ -277,16 +276,17 @@ func (lbc *LoadBalancerController) sync(key string) (retErr error) { // Garbage collection will occur regardless of an error occurring. If an error occurred, // it could have been caused by quota issues; therefore, garbage collecting now may // free up enough quota for the next sync to pass. - if gcErr := lbc.CloudClusterManager.GC(lbNames, gceNodePorts); gcErr != nil { + if gcErr := lbc.CloudClusterManager.GC(lbNames, gceSvcPorts); gcErr != nil { retErr = fmt.Errorf("error during sync %v, error during GC %v", retErr, gcErr) } return ensureErr } -func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ingress, nodeNames []string, gceNodePorts []backends.ServicePort) error { - ingNodePorts := lbc.Translator.IngressToNodePorts(ing) - igs, err := lbc.CloudClusterManager.EnsureInstanceGroupsAndPorts(nodeNames, ingNodePorts) +func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ingress, nodeNames []string, gceSvcPorts []utils.ServicePort) error { + urlMap := lbc.Translator.TranslateIngress(ing) + ingSvcPorts := urlMap.AllServicePorts() + igs, err := lbc.CloudClusterManager.EnsureInstanceGroupsAndPorts(nodeNames, ingSvcPorts) if err != nil { return err } @@ -312,11 +312,11 @@ func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ing } // Create the backend services and higher-level LB resources. - if err = lbc.CloudClusterManager.EnsureLoadBalancer(lb, ingNodePorts, igs); err != nil { + if err = lbc.CloudClusterManager.EnsureLoadBalancer(lb, ingSvcPorts, igs); err != nil { return err } - negEndpointPorts := lbc.Translator.GatherEndpointPorts(gceNodePorts) + negEndpointPorts := lbc.Translator.GatherEndpointPorts(gceSvcPorts) // Ensure firewall rule for the cluster and pass any NEG endpoint ports. if err = lbc.CloudClusterManager.EnsureFirewall(nodeNames, negEndpointPorts); err != nil { if fwErr, ok := err.(*firewalls.FirewallXPNError); ok { @@ -329,7 +329,7 @@ func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ing // If NEG enabled, link the backend services to the NEGs. if lbc.negEnabled { - for _, svcPort := range ingNodePorts { + for _, svcPort := range ingSvcPorts { if svcPort.NEGEnabled { zones, err := lbc.Translator.ListZones() if err != nil { @@ -348,11 +348,6 @@ func (lbc *LoadBalancerController) ensureIngress(key string, ing *extensions.Ing return fmt.Errorf("unable to get loadbalancer: %v", err) } - urlMap, err := lbc.Translator.ToURLMap(ing) - if err != nil { - return fmt.Errorf("convert to URL Map error %v", err) - } - if err := l7.UpdateUrlMap(urlMap); err != nil { return fmt.Errorf("update URL Map error: %v", err) } @@ -444,3 +439,14 @@ func updateAnnotations(client kubernetes.Interface, name, namespace string, anno } return nil } + +// ToSvcPorts is a helper method over translator.TranslateIngress to process a list of ingresses. +// Note: This method is used for GC. +func (lbc *LoadBalancerController) ToSvcPorts(ings *extensions.IngressList) []utils.ServicePort { + var knownPorts []utils.ServicePort + for _, ing := range ings.Items { + urlMap := lbc.Translator.TranslateIngress(&ing) + knownPorts = append(knownPorts, urlMap.AllServicePorts()...) + } + return knownPorts +} diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 83794847ce..c4275277f2 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -142,6 +142,22 @@ func getKey(ing *extensions.Ingress, t *testing.T) string { return key } +// gceURLMapFromPrimitive returns a GCEURLMap that is populated from a primitive representation. +// It uses the passed in nodePortManager to construct a stubbed ServicePort based on service names. +func gceURLMapFromPrimitive(primitiveMap utils.PrimitivePathMap, pm *nodePortManager) *utils.GCEURLMap { + urlMap := utils.NewGCEURLMap() + for hostname, rules := range primitiveMap { + pathRules := make([]utils.PathRule, 0) + for path, backend := range rules { + nodePort := pm.getNodePort(backend) + stubSvcPort := utils.ServicePort{NodePort: int64(nodePort)} + pathRules = append(pathRules, utils.PathRule{Path: path, Backend: stubSvcPort}) + } + urlMap.PutPathRulesForHost(hostname, pathRules) + } + return urlMap +} + // nodePortManager is a helper to allocate ports to services and // remember the allocations. type nodePortManager struct { @@ -160,20 +176,6 @@ func (p *nodePortManager) getNodePort(svcName string) int { return p.portMap[svcName] } -// toNodePortSvcNames converts all service names in the given map to gce node -// port names, eg foo -> k8-be- -func (p *nodePortManager) toNodePortSvcNames(paths utils.PrimitivePathMap) utils.PrimitivePathMap { - result := utils.PrimitivePathMap{} - for host, rules := range paths { - ruleMap := map[string]string{} - for path, svc := range rules { - ruleMap[path] = p.namer.Backend(int64(p.portMap[svc])) - } - result[host] = ruleMap - } - return result -} - func newPortManager(st, end int, namer *utils.Namer) *nodePortManager { return &nodePortManager{map[string]int{}, st, end, namer} } @@ -239,8 +241,7 @@ func TestLbCreateDelete(t *testing.T) { if err != nil { t.Fatalf("cm.l7Pool.Get(%q) = _, %v; want nil", ingStoreKey, err) } - expectedUrlMap := utils.GCEURLMapFromPrimitive(pm.toNodePortSvcNames(m)) - expectedUrlMap.DefaultBackendName = cm.Namer.Backend(testDefaultBeNodePort.NodePort) + expectedUrlMap := gceURLMapFromPrimitive(m, pm) if err := cm.fakeLbs.CheckURLMap(l7, expectedUrlMap); err != nil { t.Fatalf("cm.fakeLbs.CheckURLMap(l7, expectedUrlMap) = %v, want nil", err) } @@ -320,19 +321,18 @@ func TestLbFaultyUpdate(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expectedUrlMap := utils.GCEURLMapFromPrimitive(pm.toNodePortSvcNames(inputMap)) - expectedUrlMap.DefaultBackendName = cm.Namer.Backend(testDefaultBeNodePort.NodePort) + expectedUrlMap := gceURLMapFromPrimitive(inputMap, pm) if err := cm.fakeLbs.CheckURLMap(l7, expectedUrlMap); err != nil { t.Fatalf("cm.fakeLbs.CheckURLMap(...) = %v, want nil", err) } // Change the urlmap directly through the lb pool, resync, and // make sure the controller corrects it. - forcedUpdate := utils.GCEURLMapFromPrimitive(utils.PrimitivePathMap{ + forcedUpdate := gceURLMapFromPrimitive(utils.PrimitivePathMap{ "foo.example.com": { "/foo1": "foo2svc", }, - }) + }, pm) l7.UpdateUrlMap(forcedUpdate) if err := lbc.sync(ingStoreKey); err != nil { @@ -346,9 +346,10 @@ func TestLbFaultyUpdate(t *testing.T) { func TestLbDefaulting(t *testing.T) { cm := NewFakeClusterManager(flags.DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm) + pm := newPortManager(1, 65536, cm.Namer) // Make sure the controller plugs in the default values accepted by GCE. ing := newIngress(utils.PrimitivePathMap{"": {"": "foo1svc"}}) - pm := newPortManager(1, 65536, cm.Namer) + addIngress(lbc, ing, pm) ingStoreKey := getKey(ing, t) @@ -364,8 +365,7 @@ func TestLbDefaulting(t *testing.T) { loadbalancers.DefaultPath: "foo1svc", }, } - expectedUrlMap := utils.GCEURLMapFromPrimitive(pm.toNodePortSvcNames(expected)) - expectedUrlMap.DefaultBackendName = cm.Namer.Backend(testDefaultBeNodePort.NodePort) + expectedUrlMap := gceURLMapFromPrimitive(expected, pm) if err := cm.fakeLbs.CheckURLMap(l7, expectedUrlMap); err != nil { t.Fatalf("cm.fakeLbs.CheckURLMap(...) = %v, want nil", err) } @@ -382,7 +382,6 @@ func TestLbNoService(t *testing.T) { } ing := newIngress(inputMap) ing.Namespace = "ns1" - ing.Spec.Backend.ServiceName = "foo1svc" ingStoreKey := getKey(ing, t) // Adds ingress to store, but doesn't create an associated service. @@ -413,8 +412,7 @@ func TestLbNoService(t *testing.T) { t.Fatalf("lbc.sync() = err %v", err) } - expectedUrlMap := utils.GCEURLMapFromPrimitive(pm.toNodePortSvcNames(inputMap)) - expectedUrlMap.DefaultBackendName = cm.Namer.Backend(int64(pm.getNodePort("foo1svc"))) + expectedUrlMap := gceURLMapFromPrimitive(inputMap, pm) if err := cm.fakeLbs.CheckURLMap(l7, expectedUrlMap); err != nil { t.Fatalf("cm.fakeLbs.CheckURLMap(...) = %v, want nil", err) } diff --git a/pkg/controller/fakes.go b/pkg/controller/fakes.go index 29d9ba7fb0..69ee569e31 100644 --- a/pkg/controller/fakes.go +++ b/pkg/controller/fakes.go @@ -32,7 +32,7 @@ import ( ) var ( - testDefaultBeNodePort = backends.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + testDefaultBeNodePort = utils.ServicePort{NodePort: 30000, Protocol: annotations.ProtocolHTTP} testBackendPort = intstr.IntOrString{Type: intstr.Int, IntVal: 80} testSrcRanges = []string{"1.1.1.1/20"} testNodePortRanges = []string{"30000-32767"} diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index 555d33fd90..4c58079c9c 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -36,7 +36,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/annotations" - "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/controller/errors" "k8s.io/ingress-gce/pkg/loadbalancers" "k8s.io/ingress-gce/pkg/utils" @@ -71,84 +70,9 @@ type GCE struct { negEnabled bool } -// ToURLMap converts an ingress to our internal UrlMap representation. -func (t *GCE) ToURLMap(ing *extensions.Ingress) (*utils.GCEURLMap, error) { - urlMap := utils.NewGCEURLMap() - for _, rule := range ing.Spec.Rules { - if rule.HTTP == nil { - continue - } - pathRules := []utils.PathRule{} - for _, p := range rule.HTTP.Paths { - backendName, err := t.toGCEBackendName(&p.Backend, ing.Namespace) - if err != nil { - // If a service doesn't have a nodeport we can still forward traffic - // to all other services under the assumption that the user will - // modify nodeport. - if _, ok := err.(errors.ErrNodePortNotFound); ok { - t.recorders.Recorder(ing.Namespace).Eventf(ing, api_v1.EventTypeWarning, "Service", err.(errors.ErrNodePortNotFound).Error()) - continue - } - - // If a service doesn't have a backend, there's nothing the user - // can do to correct this (the admin might've limited quota). - // So keep requeuing the l7 till all backends exist. - return utils.NewGCEURLMap(), err - } - // The Ingress spec defines empty path as catch-all, so if a user - // asks for a single host and multiple empty paths, all traffic is - // sent to one of the last backend in the rules list. - path := p.Path - if path == "" { - path = loadbalancers.DefaultPath - } - - pathRules = append(pathRules, utils.PathRule{Path: path, BackendName: backendName}) - } - host := rule.Host - if host == "" { - host = loadbalancers.DefaultHost - } - urlMap.PutPathRulesForHost(host, pathRules) - } - var defaultBackendName string - if ing.Spec.Backend != nil { - var err error - defaultBackendName, err = t.toGCEBackendName(ing.Spec.Backend, ing.Namespace) - if err != nil { - msg := fmt.Sprintf("%v", err) - if _, ok := err.(errors.ErrNodePortNotFound); ok { - msg = fmt.Sprintf("couldn't find nodeport for %v/%v", ing.Namespace, ing.Spec.Backend.ServiceName) - } - msg = fmt.Sprintf("failed to identify user specified default backend, %v, using system default", msg) - t.recorders.Recorder(ing.Namespace).Eventf(ing, api_v1.EventTypeWarning, "Service", msg) - } else if defaultBackendName != "" { - port, _ := t.namer.BackendPort(defaultBackendName) - msg := fmt.Sprintf("default backend set to %v:%v", ing.Spec.Backend.ServiceName, port) - t.recorders.Recorder(ing.Namespace).Eventf(ing, api_v1.EventTypeNormal, "Service", msg) - } - } else { - t.recorders.Recorder(ing.Namespace).Eventf(ing, api_v1.EventTypeNormal, "Service", "no user specified default backend, using system default") - } - urlMap.DefaultBackendName = defaultBackendName - return urlMap, nil -} - -func (t *GCE) toGCEBackendName(be *extensions.IngressBackend, ns string) (string, error) { - if be == nil { - return "", nil - } - port, err := t.getServiceNodePort(*be, ns) - if err != nil { - return "", err - } - backendName := t.namer.Backend(port.NodePort) - return backendName, nil -} - // getServiceNodePort looks in the svc store for a matching service:port, // and returns the nodeport. -func (t *GCE) getServiceNodePort(be extensions.IngressBackend, namespace string) (backends.ServicePort, error) { +func (t *GCE) getServiceNodePort(be extensions.IngressBackend, namespace string) (utils.ServicePort, error) { obj, exists, err := t.svcLister.Get( &api_v1.Service{ ObjectMeta: meta_v1.ObjectMeta{ @@ -157,18 +81,18 @@ func (t *GCE) getServiceNodePort(be extensions.IngressBackend, namespace string) }, }) if !exists { - return backends.ServicePort{}, errors.ErrNodePortNotFound{ + return utils.ServicePort{}, errors.ErrNodePortNotFound{ Backend: be, Err: fmt.Errorf("service %v/%v not found in store", namespace, be.ServiceName), } } if err != nil { - return backends.ServicePort{}, errors.ErrNodePortNotFound{Backend: be, Err: err} + return utils.ServicePort{}, errors.ErrNodePortNotFound{Backend: be, Err: err} } svc := obj.(*api_v1.Service) appProtocols, err := annotations.FromService(svc).ApplicationProtocols() if err != nil { - return backends.ServicePort{}, errors.ErrSvcAppProtosParsing{Svc: svc, Err: err} + return utils.ServicePort{}, errors.ErrSvcAppProtosParsing{Svc: svc, Err: err} } var port *api_v1.ServicePort @@ -190,7 +114,7 @@ PortLoop: } if port == nil { - return backends.ServicePort{}, errors.ErrNodePortNotFound{ + return utils.ServicePort{}, errors.ErrNodePortNotFound{ Backend: be, Err: fmt.Errorf("could not find matching nodeport from service"), } @@ -201,7 +125,7 @@ PortLoop: proto = annotations.AppProtocol(protoStr) } - p := backends.ServicePort{ + p := utils.ServicePort{ NodePort: int64(port.NodePort), Protocol: proto, SvcName: types.NamespacedName{Namespace: namespace, Name: be.ServiceName}, @@ -212,42 +136,48 @@ PortLoop: return p, nil } -// ToNodePorts is a helper method over IngressToNodePorts to process a list of ingresses. -func (t *GCE) ToNodePorts(ings *extensions.IngressList) []backends.ServicePort { - var knownPorts []backends.ServicePort - for _, ing := range ings.Items { - knownPorts = append(knownPorts, t.IngressToNodePorts(&ing)...) - } - return knownPorts -} - -// IngressToNodePorts converts a pathlist to a flat list of nodeports for the given ingress. -func (t *GCE) IngressToNodePorts(ing *extensions.Ingress) []backends.ServicePort { - var knownPorts []backends.ServicePort - defaultBackend := ing.Spec.Backend - if defaultBackend != nil { - port, err := t.getServiceNodePort(*defaultBackend, ing.Namespace) - if err != nil { - glog.Infof("%v", err) - } else { - knownPorts = append(knownPorts, port) - } - } +// TranslateIngress converts an Ingress into our internal UrlMap representation. +func (t *GCE) TranslateIngress(ing *extensions.Ingress) *utils.GCEURLMap { + urlMap := utils.NewGCEURLMap() for _, rule := range ing.Spec.Rules { if rule.HTTP == nil { - glog.Errorf("ignoring non http Ingress rule") + glog.Errorf("Ignoring non http rule while translating %v", ing.Name) continue } - for _, path := range rule.HTTP.Paths { - port, err := t.getServiceNodePort(path.Backend, ing.Namespace) + pathRules := []utils.PathRule{} + for _, p := range rule.HTTP.Paths { + svcPort, err := t.getServiceNodePort(p.Backend, ing.Namespace) if err != nil { glog.Infof("%v", err) continue } - knownPorts = append(knownPorts, port) + // The Ingress spec defines empty path as catch-all, so if a user + // asks for a single host and multiple empty paths, all traffic is + // sent to one of the last backend in the rules list. + path := p.Path + if path == "" { + path = loadbalancers.DefaultPath + } + pathRules = append(pathRules, utils.PathRule{Path: path, Backend: svcPort}) + } + host := rule.Host + if host == "" { + host = loadbalancers.DefaultHost + } + urlMap.PutPathRulesForHost(host, pathRules) + } + if ing.Spec.Backend != nil { + svcPort, err := t.getServiceNodePort(*ing.Spec.Backend, ing.Namespace) + if err != nil { + msg := fmt.Sprintf("%v", err) + msg = fmt.Sprintf("failed to identify user specified default backend, %v, using system default", msg) + t.recorders.Recorder(ing.Namespace).Eventf(ing, api_v1.EventTypeWarning, "Service", msg) + glog.Infof("%v", err) + } else { + urlMap.DefaultBackend = svcPort } } - return knownPorts + return urlMap } func getZone(n *api_v1.Node) string { @@ -339,7 +269,7 @@ func (t *GCE) getHTTPProbe(svc api_v1.Service, targetPort intstr.IntOrString, pr } // GatherEndpointPorts returns all ports needed to open NEG endpoints. -func (t *GCE) GatherEndpointPorts(svcPorts []backends.ServicePort) []string { +func (t *GCE) GatherEndpointPorts(svcPorts []utils.ServicePort) []string { portMap := map[int64]bool{} for _, p := range svcPorts { if t.negEnabled && p.NEGEnabled { @@ -369,7 +299,7 @@ func isSimpleHTTPProbe(probe *api_v1.Probe) bool { } // GetProbe returns a probe that's used for the given nodeport -func (t *GCE) GetProbe(port backends.ServicePort) (*api_v1.Probe, error) { +func (t *GCE) GetProbe(port utils.ServicePort) (*api_v1.Probe, error) { sl := t.svcLister.List() // Find the label and target port of the one service with the given nodePort diff --git a/pkg/controller/translator/translator_test.go b/pkg/controller/translator/translator_test.go index 73764b6307..817f1194c2 100644 --- a/pkg/controller/translator/translator_test.go +++ b/pkg/controller/translator/translator_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/annotations" - "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/utils" ) @@ -69,7 +68,7 @@ func gceForTest(negEnabled bool) *GCE { func TestGetProbe(t *testing.T) { translator := gceForTest(false) - nodePortToHealthCheck := map[backends.ServicePort]string{ + nodePortToHealthCheck := map[utils.ServicePort]string{ {NodePort: 3001, Protocol: annotations.ProtocolHTTP}: "/healthz", {NodePort: 3002, Protocol: annotations.ProtocolHTTPS}: "/foo", } @@ -92,7 +91,7 @@ func TestGetProbe(t *testing.T) { func TestGetProbeNamedPort(t *testing.T) { translator := gceForTest(false) - nodePortToHealthCheck := map[backends.ServicePort]string{ + nodePortToHealthCheck := map[utils.ServicePort]string{ {NodePort: 3001, Protocol: annotations.ProtocolHTTP}: "/healthz", } for _, svc := range makeServices(nodePortToHealthCheck, apiv1.NamespaceDefault) { @@ -147,7 +146,7 @@ func TestGetProbeCrossNamespace(t *testing.T) { }, } translator.podLister.Add(firstPod) - nodePortToHealthCheck := map[backends.ServicePort]string{ + nodePortToHealthCheck := map[utils.ServicePort]string{ {NodePort: 3001, Protocol: annotations.ProtocolHTTP}: "/healthz", } for _, svc := range makeServices(nodePortToHealthCheck, apiv1.NamespaceDefault) { @@ -169,7 +168,7 @@ func TestGetProbeCrossNamespace(t *testing.T) { } } -func makePods(nodePortToHealthCheck map[backends.ServicePort]string, ns string) []*apiv1.Pod { +func makePods(nodePortToHealthCheck map[utils.ServicePort]string, ns string) []*apiv1.Pod { delay := 1 * time.Minute var pods []*apiv1.Pod @@ -207,7 +206,7 @@ func makePods(nodePortToHealthCheck map[backends.ServicePort]string, ns string) return pods } -func makeServices(nodePortToHealthCheck map[backends.ServicePort]string, ns string) []*apiv1.Service { +func makeServices(nodePortToHealthCheck map[utils.ServicePort]string, ns string) []*apiv1.Service { var services []*apiv1.Service for np := range nodePortToHealthCheck { svc := &apiv1.Service{ @@ -243,7 +242,7 @@ func TestGatherEndpointPorts(t *testing.T) { ep1 := "ep1" ep2 := "ep2" - svcPorts := []backends.ServicePort{ + svcPorts := []utils.ServicePort{ {NodePort: int64(30001)}, {NodePort: int64(30002)}, { diff --git a/pkg/controller/utils.go b/pkg/controller/utils.go index ee4353379a..bcbe2720e6 100644 --- a/pkg/controller/utils.go +++ b/pkg/controller/utils.go @@ -32,7 +32,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/ingress-gce/pkg/annotations" - "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils" ) @@ -177,12 +176,12 @@ func setInstanceGroupsAnnotation(existing map[string]string, igs []*compute.Inst } // uniq returns an array of unique service ports from the given array. -func uniq(nodePorts []backends.ServicePort) []backends.ServicePort { - portMap := map[int64]backends.ServicePort{} +func uniq(nodePorts []utils.ServicePort) []utils.ServicePort { + portMap := map[int64]utils.ServicePort{} for _, p := range nodePorts { portMap[p.NodePort] = p } - nodePorts = make([]backends.ServicePort, 0, len(portMap)) + nodePorts = make([]utils.ServicePort, 0, len(portMap)) for _, sp := range portMap { nodePorts = append(nodePorts, sp) } diff --git a/pkg/loadbalancers/fakes.go b/pkg/loadbalancers/fakes.go index dd4fd3db2e..7fb389f538 100644 --- a/pkg/loadbalancers/fakes.go +++ b/pkg/loadbalancers/fakes.go @@ -366,8 +366,8 @@ func (f *FakeLoadBalancers) CheckURLMap(l7 *L7, expectedUrlMap *utils.GCEURLMap) if err != nil || um == nil { return fmt.Errorf("f.GetUrlMap(%q) = %v, %v; want _, nil", l7.UrlMap().Name, um, err) } - defaultBackendName := expectedUrlMap.DefaultBackendName - defaultBackendLink := utils.BackendServiceRelativeResourcePath(expectedUrlMap.DefaultBackendName) + defaultBackendName := f.namer.Backend(expectedUrlMap.DefaultBackend.NodePort) + defaultBackendLink := utils.BackendServiceRelativeResourcePath(defaultBackendName) // The urlmap should have a default backend, and each path matcher. if defaultBackendName != "" && l7.UrlMap().DefaultService != defaultBackendLink { return fmt.Errorf("default backend = %v, want %v", l7.UrlMap().DefaultService, defaultBackendLink) @@ -398,7 +398,7 @@ func (f *FakeLoadBalancers) CheckURLMap(l7 *L7, expectedUrlMap *utils.GCEURLMap) return fmt.Errorf("Expected path rules for host %v", hostname) } else if ok, svc := expectedUrlMap.PathExists(hostname, pathRule); !ok { return fmt.Errorf("Expected rule %v for host %v", pathRule, hostname) - } else if utils.BackendServiceRelativeResourcePath(svc) != rule.Service { + } else if utils.BackendServiceRelativeResourcePath(f.namer.Backend(svc.NodePort)) != rule.Service { return fmt.Errorf("Expected service %v found %v", svc, rule.Service) } } diff --git a/pkg/loadbalancers/l7.go b/pkg/loadbalancers/l7.go index 1961a93439..9c2ff335c4 100644 --- a/pkg/loadbalancers/l7.go +++ b/pkg/loadbalancers/l7.go @@ -712,7 +712,7 @@ func (l *L7) UpdateUrlMap(ingressRules *utils.GCEURLMap) error { // backend, it applies to all host rules as well as to the urlmap itself. // If it doesn't the urlmap might have a stale default, so replace it with // glbc's default backend. - defaultBackendName := ingressRules.DefaultBackendName + defaultBackendName := l.namer.Backend(ingressRules.DefaultBackend.NodePort) if defaultBackendName != "" { l.um.DefaultService = utils.BackendServiceRelativeResourcePath(defaultBackendName) } else { @@ -745,7 +745,8 @@ func (l *L7) UpdateUrlMap(ingressRules *utils.GCEURLMap) error { // GCE ensures that matched rule with longest prefix wins. for _, rule := range rules { - beLink := utils.BackendServiceRelativeResourcePath(rule.BackendName) + beName := l.namer.Backend(rule.Backend.NodePort) + beLink := utils.BackendServiceRelativeResourcePath(beName) pathMatcher.PathRules = append( pathMatcher.PathRules, &compute.PathRule{Paths: []string{rule.Path}, Service: beLink}) } diff --git a/pkg/loadbalancers/l7s.go b/pkg/loadbalancers/l7s.go index a70e1ae6a5..c53540cd73 100644 --- a/pkg/loadbalancers/l7s.go +++ b/pkg/loadbalancers/l7s.go @@ -37,7 +37,7 @@ type L7s struct { // TODO: Remove this field and always ask the BackendPool using the NodePort. glbcDefaultBackend *compute.BackendService defaultBackendPool backends.BackendPool - defaultBackendNodePort backends.ServicePort + defaultBackendNodePort utils.ServicePort namer *utils.Namer } @@ -62,7 +62,7 @@ func (l *L7s) Namer() *utils.Namer { func NewLoadBalancerPool( cloud LoadBalancers, defaultBackendPool backends.BackendPool, - defaultBackendNodePort backends.ServicePort, namer *utils.Namer) LoadBalancerPool { + defaultBackendNodePort utils.ServicePort, namer *utils.Namer) LoadBalancerPool { return &L7s{cloud, storage.NewInMemoryPool(), nil, defaultBackendPool, defaultBackendNodePort, namer} } @@ -146,7 +146,7 @@ func (l *L7s) Sync(lbs []*L7RuntimeInfo) error { // Lazily create a default backend so we don't tax users who don't care // about Ingress by consuming 1 of their 3 GCE BackendServices. This // BackendService is GC'd when there are no more Ingresses. - if err := l.defaultBackendPool.Ensure([]backends.ServicePort{l.defaultBackendNodePort}, nil); err != nil { + if err := l.defaultBackendPool.Ensure([]utils.ServicePort{l.defaultBackendNodePort}, nil); err != nil { return err } defaultBackend, err := l.defaultBackendPool.Get(l.defaultBackendNodePort.NodePort, false) diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index e8d7448e61..6569733c97 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -39,7 +39,7 @@ const ( ) var ( - testDefaultBeNodePort = backends.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + testDefaultBeNodePort = utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} ) func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T, namer *utils.Namer) LoadBalancerPool { @@ -637,21 +637,17 @@ func TestCreateBothLoadBalancers(t *testing.T) { } func TestUpdateUrlMap(t *testing.T) { - um1 := utils.GCEURLMapFromPrimitive(utils.PrimitivePathMap{ - "bar.example.com": { - "/bar2": "bar2svc", - }, - }) - um2 := utils.GCEURLMapFromPrimitive(utils.PrimitivePathMap{ - "foo.example.com": { - "/foo1": "foo1svc", - "/foo2": "foo2svc", - }, - "bar.example.com": { - "/bar1": "bar1svc", - }, + um1 := utils.NewGCEURLMap() + um2 := utils.NewGCEURLMap() + + um1.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar2", Backend: utils.ServicePort{NodePort: 30000}}}) + + um2.PutPathRulesForHost("foo.example.com", []utils.PathRule{ + utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30001}}, + utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30002}}, }) - um2.DefaultBackendName = "default" + um2.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30003}}}) + um2.DefaultBackend = utils.ServicePort{NodePort: 30004} namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer("test"), AllowHTTP: true} @@ -674,26 +670,22 @@ func TestUpdateUrlMap(t *testing.T) { } func TestUpdateUrlMapNoChanges(t *testing.T) { - um1 := utils.GCEURLMapFromPrimitive(utils.PrimitivePathMap{ - "foo.example.com": { - "/foo1": "foo1svc", - "/foo2": "foo2svc", - }, - "bar.example.com": { - "/bar1": "bar1svc", - }, + um1 := utils.NewGCEURLMap() + um2 := utils.NewGCEURLMap() + + um1.PutPathRulesForHost("foo.example.com", []utils.PathRule{ + utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30000}}, + utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30001}}, }) - um1.DefaultBackendName = "default" - um2 := utils.GCEURLMapFromPrimitive(utils.PrimitivePathMap{ - "foo.example.com": { - "/foo1": "foo1svc", - "/foo2": "foo2svc", - }, - "bar.example.com": { - "/bar1": "bar1svc", - }, + um1.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30002}}}) + um1.DefaultBackend = utils.ServicePort{NodePort: 30003} + + um2.PutPathRulesForHost("foo.example.com", []utils.PathRule{ + utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30000}}, + utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30001}}, }) - um2.DefaultBackendName = "default" + um2.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30002}}}) + um2.DefaultBackend = utils.ServicePort{NodePort: 30003} namer := utils.NewNamer("uid1", "fw1") lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer("test"), AllowHTTP: true} diff --git a/pkg/utils/gceurlmap.go b/pkg/utils/gceurlmap.go index e96ddb20dd..75db609515 100644 --- a/pkg/utils/gceurlmap.go +++ b/pkg/utils/gceurlmap.go @@ -26,31 +26,15 @@ import ( // 2. All paths for a specific host are unique. // 3. Adding paths for a hostname replaces existing for that host. type GCEURLMap struct { - // DefaultBackendName is the k8s name given to the default backend. - DefaultBackendName string + DefaultBackend ServicePort // hostRules is a mapping from hostnames to path rules for that host. hostRules map[string][]PathRule } // PathRule encapsulates the information for a single path -> backend mapping. type PathRule struct { - Path string - // BackendName is the k8s name given to the backend. - BackendName string -} - -// GCEURLMapFromPrimitive returns a GCEURLMap that is populated from a primitive representation. -// Note: This is useful for tests. -func GCEURLMapFromPrimitive(rawUrlMap map[string]map[string]string) *GCEURLMap { - urlMap := NewGCEURLMap() - for hostname, rules := range rawUrlMap { - pathRules := make([]PathRule, 0) - for path, backend := range rules { - pathRules = append(pathRules, PathRule{Path: path, BackendName: backend}) - } - urlMap.PutPathRulesForHost(hostname, pathRules) - } - return urlMap + Path string + Backend ServicePort } // NewGCEURLMap returns an empty GCEURLMap @@ -68,7 +52,7 @@ func (g *GCEURLMap) PutPathRulesForHost(hostname string, pathRules []PathRule) { uniquePaths := make(map[string]PathRule) for _, pathRule := range pathRules { if _, ok := uniquePaths[pathRule.Path]; ok { - glog.V(4).Infof("Equal paths (%v) for host %v. Using backend %v", pathRule.Path, hostname, pathRule.BackendName) + glog.V(4).Infof("Equal paths (%v) for host %v. Using backend %+v", pathRule.Path, hostname, pathRule.Backend) } uniquePaths[pathRule.Path] = pathRule } @@ -90,6 +74,19 @@ func (g *GCEURLMap) AllRules() map[string][]PathRule { return retVal } +// AllServicePorts return a list of all ServicePorts contained in the GCEURLMap. +func (g *GCEURLMap) AllServicePorts() (svcPorts []ServicePort) { + for _, rules := range g.hostRules { + for _, rule := range rules { + svcPorts = append(svcPorts, rule.Backend) + } + } + if g.DefaultBackend != (ServicePort{}) { + svcPorts = append(svcPorts, g.DefaultBackend) + } + return +} + // HostExists returns true if the given hostname is specified in the GCEURLMap. func (g *GCEURLMap) HostExists(hostname string) bool { _, ok := g.hostRules[hostname] @@ -97,18 +94,18 @@ func (g *GCEURLMap) HostExists(hostname string) bool { } // PathExists returns true if the given path exists for the given hostname. -// It will also return the name of the backend associated with that path. -func (g *GCEURLMap) PathExists(hostname, path string) (bool, string) { +// It will also return the backend associated with that path. +func (g *GCEURLMap) PathExists(hostname, path string) (bool, ServicePort) { pathRules, ok := g.hostRules[hostname] if !ok { - return ok, "" + return ok, ServicePort{} } for _, pathRule := range pathRules { if pathRule.Path == path { - return true, pathRule.BackendName + return true, pathRule.Backend } } - return false, "" + return false, ServicePort{} } // String dumps a readable version of the GCEURLMap. @@ -118,13 +115,9 @@ func (g *GCEURLMap) String() string { msg += fmt.Sprintf("%v\n", host) for _, rule := range rules { msg += fmt.Sprintf("\t%v: ", rule.Path) - if rule.BackendName == "" { - msg += fmt.Sprintf("No backend\n") - } else { - msg += fmt.Sprintf("%v\n", rule.BackendName) - } + msg += fmt.Sprintf("%+v\n", rule.Backend) } } - msg += fmt.Sprintf("Default Backend: %v", g.DefaultBackendName) + msg += fmt.Sprintf("Default Backend: %+v", g.DefaultBackend) return msg } diff --git a/pkg/utils/gceurlmap_test.go b/pkg/utils/gceurlmap_test.go index b5c692d629..ed03884bca 100644 --- a/pkg/utils/gceurlmap_test.go +++ b/pkg/utils/gceurlmap_test.go @@ -25,8 +25,8 @@ func TestGCEURLMap(t *testing.T) { // Add some path rules for a host. rules := []PathRule{ - PathRule{Path: "/test1", BackendName: "test1"}, - PathRule{Path: "/test2", BackendName: "test2"}, + PathRule{Path: "/test1", Backend: ServicePort{NodePort: 30000}}, + PathRule{Path: "/test2", Backend: ServicePort{NodePort: 30001}}, } urlMap.PutPathRulesForHost("example.com", rules) if !urlMap.HostExists("example.com") { @@ -41,7 +41,7 @@ func TestGCEURLMap(t *testing.T) { // Add some path rules for the same host. Ensure this results in an overwrite. rules = []PathRule{ - PathRule{Path: "/test3", BackendName: "test3"}, + PathRule{Path: "/test3", Backend: ServicePort{NodePort: 30002}}, } urlMap.PutPathRulesForHost("example.com", rules) if ok, _ := urlMap.PathExists("example.com", "/test1"); ok { @@ -56,13 +56,13 @@ func TestGCEURLMap(t *testing.T) { // Add some path rules with equal paths. Ensure the last one is taken. rules = []PathRule{ - PathRule{Path: "/test4", BackendName: "test4"}, - PathRule{Path: "/test5", BackendName: "test5"}, - PathRule{Path: "/test4", BackendName: "test4-a"}, + PathRule{Path: "/test4", Backend: ServicePort{NodePort: 30003}}, + PathRule{Path: "/test5", Backend: ServicePort{NodePort: 30004}}, + PathRule{Path: "/test4", Backend: ServicePort{NodePort: 30005}}, } urlMap.PutPathRulesForHost("example.com", rules) _, backend := urlMap.PathExists("example.com", "/test4") - if backend != "test4-a" { - t.Errorf("Expected path /test4 for hostname example.com to point to backend test4-a in %+v", urlMap) + if backend.NodePort != 30005 { + t.Errorf("Expected path /test4 for hostname example.com to point to backend with NodePort 30005 in %+v", urlMap) } } diff --git a/pkg/utils/serviceport.go b/pkg/utils/serviceport.go new file mode 100644 index 0000000000..4c23e54a07 --- /dev/null +++ b/pkg/utils/serviceport.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-gce/pkg/annotations" +) + +// ServicePort maintains configuration for a single backend. +type ServicePort struct { + SvcName types.NamespacedName + SvcPort intstr.IntOrString + NodePort int64 + Protocol annotations.AppProtocol + SvcTargetPort string + NEGEnabled bool +} + +// Description returns a string describing the ServicePort. +func (sp ServicePort) Description() string { + if sp.SvcName.String() == "" || sp.SvcPort.String() == "" { + return "" + } + return fmt.Sprintf(`{"kubernetes.io/service-name":"%s","kubernetes.io/service-port":"%s"}`, sp.SvcName.String(), sp.SvcPort.String()) +} + +// IsAlpha returns true if the ServicePort is using ProtocolHTTP2 - which means +// we need to use the Alpha API. +func (sp ServicePort) IsAlpha() bool { + return sp.Protocol == annotations.ProtocolHTTP2 +}