Skip to content

Commit

Permalink
Unit tests to verify the new L4 mode.
Browse files Browse the repository at this point in the history
Added a controller test to ensure syncers created.
Also added unit tests for newly added utils.
  • Loading branch information
prameshj committed Jan 16, 2020
1 parent 376e173 commit 112a1b1
Show file tree
Hide file tree
Showing 8 changed files with 369 additions and 42 deletions.
48 changes: 48 additions & 0 deletions pkg/neg/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ func newTestController(kubeClient kubernetes.Interface) *Controller {
// TODO(freehan): enable readiness reflector for unit tests
false,
true,
false,
)
return controller
}
Expand Down Expand Up @@ -279,6 +280,31 @@ func TestEnableNEGServiceWithIngress(t *testing.T) {
validateServiceStateAnnotation(t, svc, svcPorts, controller.namer)
}

//TestEnableNEGSeviceWithL4ILB tests L4 ILB service with NEGs enabled.
func TestEnableNEGServiceWithL4ILB(t *testing.T) {
controller := newTestController(fake.NewSimpleClientset())
controller.runL4 = true
defer controller.stop()
for _, randomize := range []bool{false, true} {
controller.serviceLister.Add(newTestILBService(controller, !randomize, 80))
svcClient := controller.client.CoreV1().Services(testServiceNamespace)
svcKey := utils.ServiceKeyFunc(testServiceNamespace, testServiceName)
err := controller.processService(svcKey)
if err != nil {
t.Fatalf("Failed to process service: %v", err)
}
svc, err := svcClient.Get(testServiceName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Service was not created.(*apiv1.Service) successfully, err: %v", err)
}
validateSyncers(t, controller, 1, false)
expectedPortInfoMap := negtypes.NewPortInfoMapForPrimaryIPNEG(testServiceNamespace, testServiceName,
controller.namer, randomize)
validateSyncerManagerWithPortInfoMap(t, controller, testServiceNamespace, testServiceName, expectedPortInfoMap)
validateServiceAnnotationWithPortInfoMap(t, svc, expectedPortInfoMap)
}
}

// TestEnableNEGServiceWithILBIngress tests ILB service with NEG enabled
func TestEnableNEGServiceWithILBIngress(t *testing.T) {
// Not running in parallel since enabling global flag
Expand Down Expand Up @@ -1070,6 +1096,28 @@ func getTestSvcPortTuple(svcPort int32) negtypes.SvcPortTuple {
return negtypes.SvcPortTuple{}
}

func newTestILBService(c *Controller, onlyLocal bool, port int) *apiv1.Service {
svc := &apiv1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: testServiceName,
Namespace: testServiceNamespace,
Annotations: map[string]string{gce.ServiceAnnotationLoadBalancerType: string(gce.LBTypeInternal)},
},
Spec: apiv1.ServiceSpec{
Type: apiv1.ServiceTypeLoadBalancer,
Ports: []apiv1.ServicePort{
{Name: "testport", Port: int32(port)},
},
},
}
if onlyLocal {
svc.Spec.ExternalTrafficPolicy = apiv1.ServiceExternalTrafficPolicyTypeLocal
}

c.client.CoreV1().Services(testServiceNamespace).Create(svc)
return svc
}

func newTestService(c *Controller, negIngress bool, negSvcPorts []int32) *apiv1.Service {
svcAnnotations := map[string]string{}
if negIngress || len(negSvcPorts) > 0 {
Expand Down
26 changes: 22 additions & 4 deletions pkg/neg/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,20 @@ func TestEnsureAndStopSyncer(t *testing.T) {
},
expectEnsureError: false,
},
{
desc: "add a new l4 ilb port for ns2/n1 service",
namespace: svcNamespace1,
name: svcName,
stop: false,
portInfoMap: negtypes.NewPortInfoMap(svcNamespace1, svcName, types.NewSvcPortTupleSet(negtypes.SvcPortTuple{Name: portName2, Port: 3000, TargetPort: "80"}, negtypes.SvcPortTuple{Name: portName0, Port: 4000, TargetPort: "bar"}, negtypes.SvcPortTuple{Name: string(negtypes.VmPrimaryIpEndpointType), Port: 0}), namer, true),
expectInternals: map[negtypes.NegSyncerKey]bool{
getSyncerKey(svcNamespace2, svcName, negtypes.PortInfoMapKey{ServicePort: 3000, Subset: ""}, negtypes.PortInfo{PortTuple: negtypes.SvcPortTuple{Port: 3000, TargetPort: "80"}}): false,
getSyncerKey(svcNamespace1, svcName, negtypes.PortInfoMapKey{ServicePort: 3000, Subset: ""}, negtypes.PortInfo{PortTuple: negtypes.SvcPortTuple{Name: portName2, Port: 3000, TargetPort: "80"}}): true,
getSyncerKey(svcNamespace1, svcName, negtypes.PortInfoMapKey{ServicePort: 4000, Subset: ""}, negtypes.PortInfo{PortTuple: negtypes.SvcPortTuple{Name: portName0, Port: 4000, TargetPort: "bar"}}): true,
getSyncerKey(svcNamespace1, svcName, negtypes.PortInfoMapKey{ServicePort: 0, Subset: ""}, negtypes.PortInfo{PortTuple: negtypes.SvcPortTuple{Name: string(negtypes.VmPrimaryIpEndpointType), Port: 0}}): true,
},
expectEnsureError: false,
},
}

for _, tc := range testCases {
Expand Down Expand Up @@ -350,15 +364,19 @@ func TestGarbageCollectionNEG(t *testing.T) {
t.Fatalf("Failed to ensure syncer: %v", err)
}

for _, networkEndpointType := range []negtypes.NetworkEndpointType{negtypes.VmIpPortEndpointType, negtypes.NonGCPPrivateEndpointType} {
version := meta.VersionGA
for _, networkEndpointType := range []negtypes.NetworkEndpointType{negtypes.VmIpPortEndpointType, negtypes.NonGCPPrivateEndpointType, negtypes.VmPrimaryIpEndpointType} {
if networkEndpointType == negtypes.VmPrimaryIpEndpointType {
version = meta.VersionAlpha
}
negName := manager.namer.NEG("test", "test", 80)
manager.cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{
Version: meta.VersionGA,
Version: version,
Name: negName,
NetworkEndpointType: string(networkEndpointType),
}, negtypes.TestZone1)
manager.cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{
Version: meta.VersionGA,
Version: version,
Name: negName,
NetworkEndpointType: string(networkEndpointType),
}, negtypes.TestZone2)
Expand All @@ -367,7 +385,7 @@ func TestGarbageCollectionNEG(t *testing.T) {
t.Fatalf("Failed to GC: %v", err)
}
for _, zone := range []string{negtypes.TestZone1, negtypes.TestZone2} {
negs, _ := manager.cloud.ListNetworkEndpointGroup(zone, meta.VersionGA)
negs, _ := manager.cloud.ListNetworkEndpointGroup(zone, version)
for _, neg := range negs {
if neg.Name == negName {
t.Errorf("Expect NEG %q in zone %q to be GCed.", negName, zone)
Expand Down
209 changes: 209 additions & 0 deletions pkg/neg/syncers/endpoints_calculator_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package syncers

import (
"fmt"
"reflect"
"testing"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
listers "k8s.io/client-go/listers/core/v1"
negtypes "k8s.io/ingress-gce/pkg/neg/types"
"k8s.io/legacy-cloud-providers/gce"
)

// TestLocalGetEndpointSet verifies the GetEndpointSet method implemented by the LocalL4ILBEndpointsCalculator.
// The L7 implementation is tested in TestToZoneNetworkEndpointMapUtil.
func TestLocalGetEndpointSet(t *testing.T) {
t.Parallel()
_, transactionSyncer := newL4ILBTestTransactionSyncer(negtypes.NewAdapter(gce.NewFakeGCECloud(gce.DefaultTestClusterValues())), false)
nodeNames := []string{testInstance1, testInstance2, testInstance3, testInstance4, testInstance5, testInstance6}
for i := 0; i < len(nodeNames); i++ {
err := transactionSyncer.nodeLister.Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
//Namespace: testServiceNamespace,
Name: nodeNames[i],
},
Status: v1.NodeStatus{
Addresses: []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: fmt.Sprintf("1.2.3.%d", i+1),
},
},
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
},
})
if err != nil {
t.Errorf("Failed to add node %s to syncer's nodeLister, err %v", nodeNames[i], err)
}
}
zoneGetter := negtypes.NewFakeZoneGetter()
nodeLister := listers.NewNodeLister(transactionSyncer.nodeLister)

testCases := []struct {
desc string
endpoints *v1.Endpoints
endpointSets map[string]negtypes.NetworkEndpointSet
networkEndpointType negtypes.NetworkEndpointType
}{
{
desc: "default endpoints",
endpoints: getDefaultEndpoint(),
// only 4 out of 6 nodes are picked since there are > 4 endpoints, but they are found only on 4 nodes.
endpointSets: map[string]negtypes.NetworkEndpointSet{
negtypes.TestZone1: negtypes.NewNetworkEndpointSet(negtypes.NetworkEndpoint{IP: "1.2.3.1", Node: testInstance1}, negtypes.NetworkEndpoint{IP: "1.2.3.2", Node: testInstance2}),
negtypes.TestZone2: negtypes.NewNetworkEndpointSet(negtypes.NetworkEndpoint{IP: "1.2.3.3", Node: testInstance3}, negtypes.NetworkEndpoint{IP: "1.2.3.4", Node: testInstance4}),
},
networkEndpointType: negtypes.VmPrimaryIpEndpointType,
},
{
desc: "no endpoints",
endpoints: &v1.Endpoints{},
// No nodes are picked as there are no service endpoints.
endpointSets: nil,
networkEndpointType: negtypes.VmPrimaryIpEndpointType,
},
}
svcKey := fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace)
ec := NewLocalL4ILBEndpointsCalculator(nodeLister, zoneGetter, svcKey)
for _, tc := range testCases {
retSet, _, err := ec.CalculateEndpoints(tc.endpoints, nil)
if err != nil {
t.Errorf("For case %q, expect nil error, but got %v.", tc.desc, err)
}
if !reflect.DeepEqual(retSet, tc.endpointSets) {
t.Errorf("For case %q, expecting endpoint set %v, but got %v.", tc.desc, tc.endpointSets, retSet)
}
}
}

// TestClusterGetEndpointSet verifies the GetEndpointSet method implemented by the ClusterL4ILBEndpointsCalculator.
func TestClusterGetEndpointSet(t *testing.T) {
t.Parallel()
_, transactionSyncer := newL4ILBTestTransactionSyncer(negtypes.NewAdapter(gce.NewFakeGCECloud(gce.DefaultTestClusterValues())), true)
nodeNames := []string{testInstance1, testInstance2, testInstance3, testInstance4, testInstance5, testInstance6}
for i := 0; i < len(nodeNames); i++ {
err := transactionSyncer.nodeLister.Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[i],
},
Status: v1.NodeStatus{
Addresses: []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: fmt.Sprintf("1.2.3.%d", i+1),
},
},
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
},
})
if err != nil {
t.Errorf("Failed to add node %s to syncer's nodeLister, err %v", nodeNames[i], err)
}
}
zoneGetter := negtypes.NewFakeZoneGetter()
nodeLister := listers.NewNodeLister(transactionSyncer.nodeLister)
testCases := []struct {
desc string
endpoints *v1.Endpoints
endpointSets map[string]negtypes.NetworkEndpointSet
networkEndpointType negtypes.NetworkEndpointType
}{
{
desc: "default endpoints",
endpoints: getDefaultEndpoint(),
// all nodes are picked since, in this mode, endpoints running do not need to run on the selected node.
endpointSets: map[string]negtypes.NetworkEndpointSet{
negtypes.TestZone1: negtypes.NewNetworkEndpointSet(negtypes.NetworkEndpoint{IP: "1.2.3.1", Node: testInstance1}, negtypes.NetworkEndpoint{IP: "1.2.3.2", Node: testInstance2}),
negtypes.TestZone2: negtypes.NewNetworkEndpointSet(negtypes.NetworkEndpoint{IP: "1.2.3.3", Node: testInstance3}, negtypes.NetworkEndpoint{IP: "1.2.3.4", Node: testInstance4},
negtypes.NetworkEndpoint{IP: "1.2.3.5", Node: testInstance5}, negtypes.NetworkEndpoint{IP: "1.2.3.6", Node: testInstance6}),
},
networkEndpointType: negtypes.VmPrimaryIpEndpointType,
},
{
desc: "no endpoints",
// all nodes are picked since, in this mode, endpoints running do not need to run on the selected node.
// Even when there are no service endpoints, nodes are selected at random.
endpoints: &v1.Endpoints{},
endpointSets: map[string]negtypes.NetworkEndpointSet{
negtypes.TestZone1: negtypes.NewNetworkEndpointSet(negtypes.NetworkEndpoint{IP: "1.2.3.1", Node: testInstance1}, negtypes.NetworkEndpoint{IP: "1.2.3.2", Node: testInstance2}),
negtypes.TestZone2: negtypes.NewNetworkEndpointSet(negtypes.NetworkEndpoint{IP: "1.2.3.3", Node: testInstance3}, negtypes.NetworkEndpoint{IP: "1.2.3.4", Node: testInstance4},
negtypes.NetworkEndpoint{IP: "1.2.3.5", Node: testInstance5}, negtypes.NetworkEndpoint{IP: "1.2.3.6", Node: testInstance6}),
},
networkEndpointType: negtypes.VmPrimaryIpEndpointType,
},
}
svcKey := fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace)
ec := NewClusterL4ILBEndpointsCalculator(nodeLister, zoneGetter, svcKey)
for _, tc := range testCases {
retSet, _, err := ec.CalculateEndpoints(tc.endpoints, nil)
if err != nil {
t.Errorf("For case %q, expect nil error, but got %v.", tc.desc, err)
}
if !reflect.DeepEqual(retSet, tc.endpointSets) {
t.Errorf("For case %q, expecting endpoint set %v, but got %v.", tc.desc, tc.endpointSets, retSet)
}
}
}

// TestGetPerZoneSubsetCount verifies the perZoneSubsetCount method.
func TestGetPerZoneSubsetCount(t *testing.T) {
t.Parallel()
zoneCount := 3
result := 0
tcs := []struct {
desc string
randomize bool
startCount int
endCount int
expectedCount int
}{
{desc: "start with endpoints, drop to none, ExternalTrafficPolicy:Local", startCount: 5, endCount: 0, expectedCount: 0},
{desc: "no endpoints, ExternalTrafficPolicy:Local", startCount: 0, endCount: 0, expectedCount: 0},
{desc: "valid endpoints increase, ExternalTrafficPolicy:Local", startCount: 5, endCount: 10, expectedCount: 10 / zoneCount},
// If total number of nodes is less than the number of zones, per zone count will be 1.
{desc: "valid endpoints decrease, ExternalTrafficPolicy:Local", startCount: 5, endCount: 2, expectedCount: 1},
{desc: "valid endpoints > limit, ExternalTrafficPolicy:Local", startCount: 5, endCount: 258, expectedCount: maxSubsetSizeLocal / zoneCount},
{desc: "start with endpoints, drop to none, ExternalTrafficPolicy:Cluster", randomize: true, startCount: 5, endCount: 0, expectedCount: maxSubsetSizeDefault / zoneCount},
{desc: "no endpoints, random true, ExternalTrafficPolicy:Cluster", randomize: true, startCount: 0, endCount: 0, expectedCount: maxSubsetSizeDefault / zoneCount},
{desc: "valid endpoints increase, ExternalTrafficPolicy:Cluster", randomize: true, startCount: 5, endCount: 10, expectedCount: maxSubsetSizeDefault / zoneCount},
{desc: "valid endpoints decrease, ExternalTrafficPolicy:Cluster", randomize: true, startCount: 5, endCount: 2, expectedCount: maxSubsetSizeDefault / zoneCount},
}
for _, tc := range tcs {
if tc.randomize {
result = NewClusterL4ILBEndpointsCalculator(nil, nil, "test").getPerZoneSubsetCount(zoneCount, tc.endCount)
} else {
result = NewLocalL4ILBEndpointsCalculator(nil, nil, "test").getPerZoneSubsetCount(zoneCount, tc.endCount)
}
if result != tc.expectedCount {
t.Errorf("For test case '%s', expected subsetCount of %d, but got %d", tc.desc, tc.expectedCount, result)
}
}
}
Loading

0 comments on commit 112a1b1

Please sign in to comment.