Skip to content

Commit

Permalink
Add ability to change prefix in the Namer
Browse files Browse the repository at this point in the history
- This allows the multicluster code to share use of the Namer.
- Add more unit testing
- Increases coverage of the namer code.
  • Loading branch information
bowei committed Dec 5, 2017
1 parent 6dcd1bf commit c3178b4
Show file tree
Hide file tree
Showing 13 changed files with 473 additions and 255 deletions.
2 changes: 1 addition & 1 deletion cmd/glbc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error)
if err != nil {
return "", err
}
namer := utils.Namer{}
namer := utils.NewNamer("", "")
for _, ing := range ings.Items {
if len(ing.Status.LoadBalancer.Ingress) != 0 {
c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule"))
Expand Down
119 changes: 54 additions & 65 deletions pkg/backends/backends_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,29 +39,30 @@ import (

const defaultZone = "zone-a"

var noOpErrFunc = func(op int, be *compute.BackendService) error { return nil }

var existingProbe = &api_v1.Probe{
Handler: api_v1.Handler{
HTTPGet: &api_v1.HTTPGetAction{
Scheme: api_v1.URISchemeHTTPS,
Path: "/my-special-path",
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 443,
var (
defaultNamer = utils.NewNamer("uid1", "fw1")
existingProbe = &api_v1.Probe{
Handler: api_v1.Handler{
HTTPGet: &api_v1.HTTPGetAction{
Scheme: api_v1.URISchemeHTTPS,
Path: "/my-special-path",
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 443,
},
},
},
},
}
}
noOpErrFunc = func(op int, be *compute.BackendService) error { return nil }
)

func newTestJig(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) (*Backends, healthchecks.HealthCheckProvider) {
namer := &utils.Namer{}
negGetter := networkendpointgroup.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network")
nodePool := instances.NewNodePool(fakeIGs, namer)
nodePool := instances.NewNodePool(fakeIGs, defaultNamer)
nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}})
healthCheckProvider := healthchecks.NewFakeHealthCheckProvider()
healthChecks := healthchecks.NewHealthChecker(healthCheckProvider, "/", namer)
bp := NewBackendPool(f, negGetter, healthChecks, nodePool, namer, []int64{}, syncWithCloud)
healthChecks := healthchecks.NewHealthChecker(healthCheckProvider, "/", defaultNamer)
bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, []int64{}, syncWithCloud)
probes := map[ServicePort]*api_v1.Probe{{Port: 443, Protocol: utils.ProtocolHTTPS}: existingProbe}
bp.Init(NewFakeProbeProvider(probes))

Expand All @@ -70,9 +71,8 @@ func newTestJig(f BackendServices, fakeIGs instances.InstanceGroups, syncWithClo

func TestBackendPoolAdd(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}

testCases := []ServicePort{
{Port: 80, Protocol: utils.ProtocolHTTP},
Expand All @@ -89,7 +89,7 @@ func TestBackendPoolAdd(t *testing.T) {
if err != nil {
t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err)
}
beName := namer.Backend(nodePort.Port)
beName := defaultNamer.Backend(nodePort.Port)

// Check that the new backend has the right port
be, err := f.GetGlobalBackendService(beName)
Expand All @@ -101,7 +101,7 @@ func TestBackendPoolAdd(t *testing.T) {
}

// Check that the instance group has the new port.
ig, err := fakeIGs.GetInstanceGroup(namer.InstanceGroup(), defaultZone)
ig, err := fakeIGs.GetInstanceGroup(defaultNamer.InstanceGroup(), defaultZone)
var found bool
for _, port := range ig.NamedPorts {
if port.Port == nodePort.Port {
Expand Down Expand Up @@ -131,15 +131,14 @@ func TestBackendPoolAdd(t *testing.T) {

func TestHealthCheckMigration(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, hcp := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}

p := ServicePort{Port: 7000, Protocol: utils.ProtocolHTTP}

// Create a legacy health check and insert it into the HC provider.
legacyHC := &compute.HttpHealthCheck{
Name: namer.Backend(p.Port),
Name: defaultNamer.Backend(p.Port),
RequestPath: "/my-healthz-path",
Host: "k8s.io",
Description: "My custom HC",
Expand Down Expand Up @@ -169,13 +168,12 @@ func TestHealthCheckMigration(t *testing.T) {

func TestBackendPoolUpdate(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}

p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
pool.Ensure([]ServicePort{p}, nil)
beName := namer.Backend(p.Port)
beName := defaultNamer.Backend(p.Port)

be, err := f.GetGlobalBackendService(beName)
if err != nil {
Expand Down Expand Up @@ -215,13 +213,12 @@ func TestBackendPoolUpdate(t *testing.T) {

func TestBackendPoolChaosMonkey(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}

nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP}
pool.Ensure([]ServicePort{nodePort}, nil)
beName := namer.Backend(nodePort.Port)
beName := defaultNamer.Backend(nodePort.Port)

be, _ := f.GetGlobalBackendService(beName)

Expand All @@ -243,9 +240,9 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
if err != nil {
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
}
gotGroup, err := fakeIGs.GetInstanceGroup(namer.InstanceGroup(), defaultZone)
gotGroup, err := fakeIGs.GetInstanceGroup(defaultNamer.InstanceGroup(), defaultZone)
if err != nil {
t.Fatalf("Failed to find instance group %v", namer.InstanceGroup())
t.Fatalf("Failed to find instance group %v", defaultNamer.InstanceGroup())
}
backendLinks := sets.NewString()
for _, be := range gotBackend.Backends {
Expand All @@ -264,7 +261,7 @@ func TestBackendPoolSync(t *testing.T) {
// creates/deletes required ports.
svcNodePorts := []ServicePort{{Port: 81, Protocol: utils.ProtocolHTTP}, {Port: 82, Protocol: utils.ProtocolHTTPS}, {Port: 83, Protocol: utils.ProtocolHTTP}}
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, true)
pool.Ensure([]ServicePort{{Port: 81}}, nil)
pool.Ensure([]ServicePort{{Port: 90}}, nil)
Expand Down Expand Up @@ -305,9 +302,8 @@ func TestBackendPoolSync(t *testing.T) {
f.CreateGlobalBackendService(&compute.BackendService{Name: name})
}

namer := &utils.Namer{}
// This backend should get deleted again since it is managed by this cluster.
f.CreateGlobalBackendService(&compute.BackendService{Name: namer.Backend(deletedPorts[0].Port)})
f.CreateGlobalBackendService(&compute.BackendService{Name: defaultNamer.Backend(deletedPorts[0].Port)})

// TODO: Avoid casting.
// Repopulate the pool with a cloud list, which now includes the 82 port
Expand All @@ -323,7 +319,7 @@ func TestBackendPoolSync(t *testing.T) {
currSet.Insert(b.Name)
}
// Port 81 still exists because it's an in-use service NodePort.
knownBe := namer.Backend(81)
knownBe := defaultNamer.Backend(81)
if !currSet.Has(knownBe) {
t.Fatalf("Expected %v to exist in backend pool", knownBe)
}
Expand All @@ -334,20 +330,19 @@ func TestBackendPoolSync(t *testing.T) {
}

func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
namer := &utils.Namer{}
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
negGetter := networkendpointgroup.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network")
nodePool := instances.NewNodePool(fakeIGs, namer)
nodePool := instances.NewNodePool(fakeIGs, defaultNamer)
nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}})
hcp := healthchecks.NewFakeHealthCheckProvider()
healthChecks := healthchecks.NewHealthChecker(hcp, "/", namer)
bp := NewBackendPool(f, negGetter, healthChecks, nodePool, namer, []int64{}, false)
healthChecks := healthchecks.NewHealthChecker(hcp, "/", defaultNamer)
bp := NewBackendPool(f, negGetter, healthChecks, nodePool, defaultNamer, []int64{}, false)
probes := map[ServicePort]*api_v1.Probe{}
bp.Init(NewFakeProbeProvider(probes))

// Create a legacy HTTP health check
beName := namer.Backend(80)
beName := defaultNamer.Backend(80)
if err := hcp.CreateHttpHealthCheck(&compute.HttpHealthCheck{
Name: beName,
Port: 80,
Expand Down Expand Up @@ -390,30 +385,28 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {

func TestBackendPoolShutdown(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}

// Add a backend-service and verify that it doesn't exist after Shutdown()
pool.Ensure([]ServicePort{{Port: 80}}, nil)
pool.Shutdown()
if _, err := f.GetGlobalBackendService(namer.Backend(80)); err == nil {
if _, err := f.GetGlobalBackendService(defaultNamer.Backend(80)); err == nil {
t.Fatalf("%v", err)
}
}

func TestBackendInstanceGroupClobbering(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}

// This will add the instance group k8s-ig to the instance pool
pool.Ensure([]ServicePort{{Port: 80}}, nil)

be, err := f.GetGlobalBackendService(namer.Backend(80))
be, err := f.GetGlobalBackendService(defaultNamer.Backend(80))
if err != nil {
t.Fatalf("%v", err)
t.Fatalf("f.GetGlobalBackendService(defaultNamer.Backend(80)) = _, %v, want _, nil", err)
}
// Simulate another controller updating the same backend service with
// a different instance group
Expand All @@ -428,7 +421,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {

// Make sure repeated adds don't clobber the inserted instance group
pool.Ensure([]ServicePort{{Port: 80}}, nil)
be, err = f.GetGlobalBackendService(namer.Backend(80))
be, err = f.GetGlobalBackendService(defaultNamer.Backend(80))
if err != nil {
t.Fatalf("%v", err)
}
Expand All @@ -438,7 +431,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
}

// seed expectedGroups with the first group native to this controller
expectedGroups := sets.NewString("k8s-ig")
expectedGroups := sets.NewString("k8s-ig--uid1")
for _, newGroup := range newGroups {
expectedGroups.Insert(newGroup.Group)
}
Expand All @@ -449,10 +442,8 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {

func TestBackendCreateBalancingMode(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)

fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
pool, _ := newTestJig(f, fakeIGs, false)
namer := utils.Namer{}
nodePort := ServicePort{Port: 8080}
modes := []BalancingMode{Rate, Utilization}

Expand All @@ -470,7 +461,7 @@ func TestBackendCreateBalancingMode(t *testing.T) {
}

pool.Ensure([]ServicePort{nodePort}, nil)
be, err := f.GetGlobalBackendService(namer.Backend(nodePort.Port))
be, err := f.GetGlobalBackendService(defaultNamer.Backend(nodePort.Port))
if err != nil {
t.Fatalf("%v", err)
}
Expand Down Expand Up @@ -511,20 +502,16 @@ func TestApplyProbeSettingsToHC(t *testing.T) {
}

func TestLinkBackendServiceToNEG(t *testing.T) {
clusterId := "clusterid"
zones := []string{"zone1", "zone2"}
namespace := "ns"
name := "name"
port := "port"
namer := utils.NewNamer(clusterId, "")
namespace, name, port := "ns", "name", "port"
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer)
fakeNEG := networkendpointgroup.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network")
nodePool := instances.NewNodePool(fakeIGs, namer)
nodePool := instances.NewNodePool(fakeIGs, defaultNamer)
nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}})
hcp := healthchecks.NewFakeHealthCheckProvider()
healthChecks := healthchecks.NewHealthChecker(hcp, "/", namer)
bp := NewBackendPool(f, fakeNEG, healthChecks, nodePool, namer, []int64{}, false)
healthChecks := healthchecks.NewHealthChecker(hcp, "/", defaultNamer)
bp := NewBackendPool(f, fakeNEG, healthChecks, nodePool, defaultNamer, []int64{}, false)

svcPort := ServicePort{
Port: 30001,
Expand All @@ -542,7 +529,9 @@ func TestLinkBackendServiceToNEG(t *testing.T) {
}

for _, zone := range zones {
err := fakeNEG.CreateNetworkEndpointGroup(&computealpha.NetworkEndpointGroup{Name: namer.NEG(namespace, name, port)}, zone)
err := fakeNEG.CreateNetworkEndpointGroup(&computealpha.NetworkEndpointGroup{
Name: defaultNamer.NEG(namespace, name, port),
}, zone)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
Expand All @@ -552,7 +541,7 @@ func TestLinkBackendServiceToNEG(t *testing.T) {
t.Fatalf("Failed to link backend service to NEG: %v", err)
}

bs, err := f.GetGlobalBackendService(namer.Backend(svcPort.Port))
bs, err := f.GetGlobalBackendService(defaultNamer.Backend(svcPort.Port))
if err != nil {
t.Fatalf("Failed to retrieve backend service: %v", err)
}
Expand Down
10 changes: 0 additions & 10 deletions pkg/controller/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,6 @@ const (
defaultPort = 80
defaultHealthCheckPath = "/"

// A backend is created per nodePort, tagged with the nodeport.
// This allows sharing of backends across loadbalancers.
backendPrefix = "k8s-be"

// A single target proxy/urlmap/forwarding rule is created per loadbalancer.
// Tagged with the namespace/name of the Ingress.
targetProxyPrefix = "k8s-tp"
forwardingRulePrefix = "k8s-fw"
urlMapPrefix = "k8s-um"

// Used in the test RunServer method to denote a delete request.
deleteType = "del"

Expand Down
Loading

0 comments on commit c3178b4

Please sign in to comment.