Skip to content

Commit

Permalink
Updating instances interface to accept all named ports at once
Browse files Browse the repository at this point in the history
  • Loading branch information
nikhiljindal committed Oct 6, 2017
1 parent abc8b9d commit 3dcdc26
Show file tree
Hide file tree
Showing 11 changed files with 133 additions and 91 deletions.
50 changes: 28 additions & 22 deletions controllers/gce/backends/backends.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,24 +210,43 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic
return b.Get(namedPort.Port)
}

// Add will get or create a Backend for the given port.
// Ensure will update or create Backends for the given ports.
// Uses the given instance groups if non-nil, else creates instance groups.
func (b *Backends) Add(p ServicePort, igs []*compute.InstanceGroup) error {
// We must track the port even if creating the backend failed, because
// we might've created a health-check for it.
be := &compute.BackendService{}
defer func() { b.snapshotter.Add(portKey(p.Port), be) }()

var err error
func (b *Backends) Ensure(svcPorts []ServicePort, igs []*compute.InstanceGroup) error {
glog.V(3).Infof("Sync: backends %v", svcPorts)
// Ideally callers should pass the instance groups to prevent recomputing them here.
// Igs can be nil in scenarios where we do not have instance groups such as
// while syncing default backend service.
if igs == nil {
igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, p.Port)
ports := []int64{}
for _, p := range svcPorts {
ports = append(ports, p.Port)
}
var err error
igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, ports)
if err != nil {
return err
}
}
// create backends for new ports, perform an edge hop for existing ports
for _, port := range svcPorts {
if err := b.ensureBackendService(port, igs); err != nil {
return err
}
}
return nil
}

// ensureBackendService will update or create a Backend for the given port.
// It assumes that the instance groups have been created and required named port has been added.
// If not, then Ensure should be called instead.
func (b *Backends) ensureBackendService(p ServicePort, igs []*compute.InstanceGroup) error {
// We must track the ports even if creating the backends failed, because
// we might've created health-check for them.
be := &compute.BackendService{}
defer func() { b.snapshotter.Add(portKey(p.Port), be) }()

var err error

// Ensure health check for backend service exists
hcLink, err := b.ensureHealthCheck(p)
Expand Down Expand Up @@ -388,19 +407,6 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr
return fmt.Errorf("received errors when updating backend service: %v", strings.Join(errs, "\n"))
}

// Sync syncs backend services corresponding to ports in the given list.
func (b *Backends) Sync(svcNodePorts []ServicePort, igs []*compute.InstanceGroup) error {
glog.V(3).Infof("Sync: backends %v", svcNodePorts)

// create backends for new ports, perform an edge hop for existing ports
for _, port := range svcNodePorts {
if err := b.Add(port, igs); err != nil {
return err
}
}
return nil
}

// GC garbage collects services corresponding to ports in the given list.
func (b *Backends) GC(svcNodePorts []ServicePort) error {
knownPorts := sets.NewString()
Expand Down
37 changes: 19 additions & 18 deletions controllers/gce/backends/backends_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestBackendPoolAdd(t *testing.T) {
// Add a backend for a port, then re-add the same port and
// make sure it corrects a broken link from the backend to
// the instance group.
err := pool.Add(nodePort, nil)
err := pool.Ensure([]ServicePort{nodePort}, nil)
if err != nil {
t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err)
}
Expand All @@ -95,10 +95,11 @@ func TestBackendPoolAdd(t *testing.T) {
t.Fatalf("Backend %v has wrong port %v, expected %v", be.Name, be.Port, nodePort)
}

// Check that the instance group has the new port
// Check that the instance group has the new port.
ig, err := fakeIGs.GetInstanceGroup(namer.IGName(), defaultZone)
var found bool
for _, port := range fakeIGs.Ports {
if port == nodePort.Port {
for _, port := range ig.NamedPorts {
if port.Port == nodePort.Port {
found = true
}
}
Expand Down Expand Up @@ -143,7 +144,7 @@ func TestHealthCheckMigration(t *testing.T) {
hcp.CreateHttpHealthCheck(legacyHC)

// Add the service port to the backend pool
pool.Add(p, nil)
pool.Ensure([]ServicePort{p}, nil)

// Assert the proper health check was created
hc, _ := pool.healthChecker.Get(p.Port)
Expand All @@ -168,7 +169,7 @@ func TestBackendPoolUpdate(t *testing.T) {
namer := utils.Namer{}

p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
pool.Add(p, nil)
pool.Ensure([]ServicePort{p}, nil)
beName := namer.BeName(p.Port)

be, err := f.GetGlobalBackendService(beName)
Expand All @@ -188,7 +189,7 @@ func TestBackendPoolUpdate(t *testing.T) {

// Update service port to encrypted
p.Protocol = utils.ProtocolHTTPS
pool.Sync([]ServicePort{p}, nil)
pool.Ensure([]ServicePort{p}, nil)

be, err = f.GetGlobalBackendService(beName)
if err != nil {
Expand All @@ -214,7 +215,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
namer := utils.Namer{}

nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP}
pool.Add(nodePort, nil)
pool.Ensure([]ServicePort{nodePort}, nil)
beName := namer.BeName(nodePort.Port)

be, _ := f.GetGlobalBackendService(beName)
Expand All @@ -227,7 +228,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
f.calls = []int{}
f.UpdateGlobalBackendService(be)

pool.Add(nodePort, nil)
pool.Ensure([]ServicePort{nodePort}, nil)
for _, call := range f.calls {
if call == utils.Create {
t.Fatalf("Unexpected create for existing backend service")
Expand Down Expand Up @@ -260,10 +261,10 @@ func TestBackendPoolSync(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool, _ := newTestJig(f, fakeIGs, true)
pool.Add(ServicePort{Port: 81}, nil)
pool.Add(ServicePort{Port: 90}, nil)
if err := pool.Sync(svcNodePorts, nil); err != nil {
t.Errorf("Expected backend pool to sync, err: %v", err)
pool.Ensure([]ServicePort{ServicePort{Port: 81}}, nil)
pool.Ensure([]ServicePort{ServicePort{Port: 90}}, nil)
if err := pool.Ensure(svcNodePorts, nil); err != nil {
t.Errorf("Expected backend pool to add node ports, err: %v", err)
}
if err := pool.GC(svcNodePorts); err != nil {
t.Errorf("Expected backend pool to GC, err: %v", err)
Expand Down Expand Up @@ -361,7 +362,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
})

// Have pool sync the above backend service
bp.Add(ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}, nil)
bp.Ensure([]ServicePort{ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}}, nil)

// Verify the legacy health check has been deleted
_, err = hcp.GetHttpHealthCheck(beName)
Expand All @@ -388,7 +389,7 @@ func TestBackendPoolShutdown(t *testing.T) {
namer := utils.Namer{}

// Add a backend-service and verify that it doesn't exist after Shutdown()
pool.Add(ServicePort{Port: 80}, nil)
pool.Ensure([]ServicePort{ServicePort{Port: 80}}, nil)
pool.Shutdown()
if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil {
t.Fatalf("%v", err)
Expand All @@ -402,7 +403,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
namer := utils.Namer{}

// This will add the instance group k8s-ig to the instance pool
pool.Add(ServicePort{Port: 80}, nil)
pool.Ensure([]ServicePort{ServicePort{Port: 80}}, nil)

be, err := f.GetGlobalBackendService(namer.BeName(80))
if err != nil {
Expand All @@ -420,7 +421,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
}

// Make sure repeated adds don't clobber the inserted instance group
pool.Add(ServicePort{Port: 80}, nil)
pool.Ensure([]ServicePort{ServicePort{Port: 80}}, nil)
be, err = f.GetGlobalBackendService(namer.BeName(80))
if err != nil {
t.Fatalf("%v", err)
Expand Down Expand Up @@ -462,7 +463,7 @@ func TestBackendCreateBalancingMode(t *testing.T) {
return nil
}

pool.Add(nodePort, nil)
pool.Ensure([]ServicePort{nodePort}, nil)
be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port))
if err != nil {
t.Fatalf("%v", err)
Expand Down
3 changes: 1 addition & 2 deletions controllers/gce/backends/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,9 @@ type probeProvider interface {
// as gce backendServices, and sync them through the BackendServices interface.
type BackendPool interface {
Init(p probeProvider)
Add(port ServicePort, igs []*compute.InstanceGroup) error
Ensure(ports []ServicePort, igs []*compute.InstanceGroup) error
Get(port int64) (*compute.BackendService, error)
Delete(port int64) error
Sync(ports []ServicePort, igs []*compute.InstanceGroup) error
GC(ports []ServicePort) error
Shutdown() error
Status(name string) string
Expand Down
18 changes: 5 additions & 13 deletions controllers/gce/controller/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
if err != nil {
return igs, err
}
if err := c.backendPool.Sync(backendServicePorts, igs); err != nil {
if err := c.backendPool.Ensure(backendServicePorts, igs); err != nil {
return igs, err
}
if err := c.instancePool.Sync(nodeNames); err != nil {
Expand Down Expand Up @@ -167,20 +167,12 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
}

func (c *ClusterManager) EnsureInstanceGroupsAndPorts(servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) {
var igs []*compute.InstanceGroup
var err error
ports := []int64{}
for _, p := range servicePorts {
// EnsureInstanceGroupsAndPorts always returns all the instance groups, so we can return
// the output of any call, no need to append the return from all calls.
// TODO: Ideally, we want to call CreateInstaceGroups only the first time and
// then call AddNamedPort multiple times. Need to update the interface to
// achieve this.
igs, _, err = instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, p.Port)
if err != nil {
return nil, err
}
ports = append(ports, p.Port)
}
return igs, nil
igs, _, err := instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, ports)
return igs, err
}

// GC garbage collects unused resources.
Expand Down
7 changes: 1 addition & 6 deletions controllers/gce/controller/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,17 +71,12 @@ func TestInstancesAddedToZones(t *testing.T) {

// Create 2 igs, one per zone.
testIG := "test-ig"
testPort := int64(3001)
lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, testPort)
lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, []int64{int64(3001)})

// node pool syncs kube-nodes, this will add them to both igs.
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()

if cm.fakeIGs.Ports[0] != testPort {
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
}

for z, nodeNames := range zoneToNode {
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
Expand Down
16 changes: 6 additions & 10 deletions controllers/gce/instances/fakes.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ func (z *FakeZoneLister) GetZoneForNode(name string) (string, error) {
type FakeInstanceGroups struct {
instances sets.String
instanceGroups []*compute.InstanceGroup
Ports []int64
getResult *compute.InstanceGroup
listResult *compute.InstanceGroupsListInstances
calls []int
Expand Down Expand Up @@ -150,21 +149,18 @@ func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string,
}

func (f *FakeInstanceGroups) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error {
found := false
for _, ig := range f.instanceGroups {
if ig.Name == igName && ig.Zone == zone {
found = true
var ig *compute.InstanceGroup
for _, igp := range f.instanceGroups {
if igp.Name == igName && igp.Zone == zone {
ig = igp
break
}
}
if !found {
if ig == nil {
return fmt.Errorf("Failed to find instance group %q in zone %q", igName, zone)
}

f.Ports = f.Ports[:0]
for _, port := range namedPorts {
f.Ports = append(f.Ports, port.Port)
}
ig.NamedPorts = namedPorts
return nil
}

Expand Down
33 changes: 20 additions & 13 deletions controllers/gce/instances/instances.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,18 @@ func (i *Instances) Init(zl zoneLister) {
}

// AddInstanceGroup creates or gets an instance group if it doesn't exist
// and adds the given port to it. Returns a list of one instance group per zone,
// all of which have the exact same named port.
func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
// and adds the given ports to it. Returns a list of one instance group per zone,
// all of which have the exact same named ports.
func (i *Instances) AddInstanceGroup(name string, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error) {
igs := []*compute.InstanceGroup{}
namedPort := utils.GetNamedPort(port)
namedPorts := []*compute.NamedPort{}
for _, port := range ports {
namedPorts = append(namedPorts, utils.GetNamedPort(port))
}

zones, err := i.ListZones()
if err != nil {
return igs, namedPort, err
return igs, namedPorts, err
}

defer i.snapshotter.Add(name, struct{}{})
Expand Down Expand Up @@ -99,23 +102,27 @@ func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.Instan
glog.V(3).Infof("Instance group %v already exists in zone %v", name, zone)
}

found := false
existingPorts := map[int64]bool{}
for _, np := range ig.NamedPorts {
if np.Port == port {
existingPorts[np.Port] = true
}
var newPorts []*compute.NamedPort
for _, np := range namedPorts {
if existingPorts[np.Port] {
glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np)
found = true
break
continue
}
newPorts = append(newPorts, np)
}
if !found {
glog.V(3).Infof("Instance group %v/%v does not have port %+v, adding it now.", zone, name, namedPort)
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPort)); err != nil {
if len(newPorts) > 0 {
glog.V(5).Infof("Instance group %v/%v does not have ports %+v, adding them now.", zone, name, namedPorts)
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPorts...)); err != nil {
return nil, nil, err
}
}
igs = append(igs, ig)
}
return igs, namedPort, nil
return igs, namedPorts, nil
}

// DeleteInstanceGroup deletes the given IG by name, from all zones.
Expand Down
Loading

0 comments on commit 3dcdc26

Please sign in to comment.