diff --git a/cmd/e2e-test/asm_test.go b/cmd/e2e-test/asm_test.go new file mode 100644 index 0000000000..0771ad543e --- /dev/null +++ b/cmd/e2e-test/asm_test.go @@ -0,0 +1,278 @@ +package main + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + "time" + + istioV1alpha3 "istio.io/api/networking/v1alpha3" + apiappsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/ingress-gce/pkg/e2e" + "k8s.io/klog" +) + +const ( + asmConfigNamespace = "kube-system" + asmConfigName = "ingress-controller-asm-cm-config" + negControllerRestartTimeout = 5 * time.Minute +) + +// TestASMConfig tests the ASM enable/disable, it can't run parallel with other tests. +func TestASMConfig(t *testing.T) { + Framework.RunWithSandbox("TestASMConfig", t, func(t *testing.T, s *e2e.Sandbox) { + for _, tc := range []struct { + desc string + configMap map[string]string + wantConfigMapEvents []string + }{ + { + desc: "Invalid ConfigMap value equals to disable", + configMap: map[string]string{"enable-asm": "INVALID"}, + wantConfigMapEvents: []string{"The map provided a unvalid value for field: enable-asm, value: INVALID"}, + }, + { + desc: "Invalid ConfigMap filed equals to disable", + configMap: map[string]string{"enable-unknow-feild": "INVALID1"}, + wantConfigMapEvents: []string{"The map contains a unknown key-value pair: enable-unknow-feild:INVALID1"}, + }, + { + desc: "Set enable-asm to true should restart the controller", + configMap: map[string]string{"enable-asm": "true"}, + wantConfigMapEvents: []string{"ConfigMapConfigController: Get a update on the ConfigMapConfig, Restarting Ingress controller"}, + }, + // TODO(koonwah): The below case is not fully tested, should update the neg controller to include a enable-asm event to the target CM. + { + desc: "Invalid ConfigMap value equals to disable", + configMap: map[string]string{"enable-asm": "INVALID2"}, + wantConfigMapEvents: []string{"The map provided a unvalid value for field: enable-asm, value: INVALID2", + "ConfigMapConfigController: Get a update on the ConfigMapConfig, Restarting Ingress controller"}, + }, + } { + if err := e2e.UpdateCreateConfigMap(s, asmConfigNamespace, asmConfigName, tc.configMap); err != nil { + t.Error(err) + } + if err := e2e.WaitConfigMapEvents(s, asmConfigNamespace, asmConfigName, tc.wantConfigMapEvents, negControllerRestartTimeout); err != nil { + t.Fatalf("Failed to get events: %v; Error %e", strings.Join(tc.wantConfigMapEvents, ";"), err) + } + } + }) +} + +func TestASMServiceAndDestinationRule(t *testing.T) { + _ = istioV1alpha3.DestinationRule{} + _ = apiv1.ComponentStatus{} + + // This test case will need two namespaces, one will in asm-skip-namespaces. + Framework.RunWithSandbox("TestASMServiceAndDestinationRule", t, func(t *testing.T, sSkip *e2e.Sandbox) { + Framework.RunWithSandbox("TestASMServiceAndDestinationRule", t, func(t *testing.T, s *e2e.Sandbox) { + // Enable ASM mode + ctx := context.Background() + + asmConfig := map[string]string{"enable-asm": "true", + "asm-skip-namespaces": fmt.Sprintf("kube-system,istio-system,%s", sSkip.Namespace)} + if err := e2e.UpdateCreateConfigMap(s, asmConfigNamespace, asmConfigName, + asmConfig); err != nil { + t.Error(err) + } + + var porterPort int32 + porterPort = 80 + svcName := "service" + svcSkipName := "service-skip" + + for _, deployment := range []*apiappsv1.Deployment{ + createPorterDeployment(s.Namespace, "deployment-v1", 1, map[string]string{"app": "porter", "version": "v1"}, porterPort), + createPorterDeployment(s.Namespace, "deployment-v2", 2, map[string]string{"app": "porter", "version": "v2"}, porterPort), + createPorterDeployment(s.Namespace, "deployment-v3", 3, map[string]string{"app": "porter", "version": "v3"}, porterPort), + } { + if err := e2e.CrateDeployment(s, deployment); err != nil { + t.Errorf("Failed to create deployment, Error: %s", err) + } + } + + service := createService(s.Namespace, svcName, map[string]string{"app": "porter"}, porterPort) + serviceSkip := createService(sSkip.Namespace, svcSkipName, map[string]string{"app": "porter"}, porterPort) + + for _, tc := range []struct { + desc string + svc *apiv1.Service + inSkipNamespace bool + }{ + {desc: "NEG Controller should create NEGs for all ports for a service by default", svc: service, inSkipNamespace: false}, + {desc: "NEG Controller shouldn't create NEGs for all ports for a service if it's in a skip namespace", svc: serviceSkip, inSkipNamespace: true}, + } { + sandbox := s + timeout := 5 * time.Minute + NEGCount := 1 // This equals to the service port count. + if tc.inSkipNamespace { + NEGCount = 0 + sandbox = sSkip + timeout = 1 * time.Minute + } + if err := e2e.CrateService(sandbox, tc.svc); err != nil { + t.Errorf("Failed to create service, Error: %s", err) + } + + // Test the Service Annotations + negStatus, err := e2e.WaitServiceNEGAnnotation(sandbox, tc.svc.Namespace, tc.svc.Name, NEGCount, timeout) + if err != nil { + if !(tc.inSkipNamespace && err == wait.ErrWaitTimeout) { + t.Errorf("Failed to wait for Service NEGAnnotation, error: %s", err) + } + } + if tc.inSkipNamespace { + if negStatus != nil { + t.Errorf("Service: %s/%s is in the ASM skip namespace, shoudln't have NEG Status. ASM Config: %v, NEGStatus got: %v", + tc.svc.Namespace, tc.svc.Name, asmConfig, negStatus) + } + } else { + if negName, ok := negStatus.NetworkEndpointGroups[strconv.Itoa(int(porterPort))]; ok { + // No backend pod exists, so the NEG has 0 endpoint. + if err := e2e.WaitForNegs(ctx, Framework.Cloud, negName, negStatus.Zones, false, 0); err != nil { + t.Errorf("Failed to wait Negs, error: %s", err) + } + } else { + t.Fatalf("Service annotation doesn't contain the desired NEG status, want: %d, have: %v", porterPort, negStatus.NetworkEndpointGroups) + } + } + } + + for _, tc := range []struct { + desc string + destinationRuleName string + subsetEndpointCountMap map[string]int + crossNamespace bool + }{ + {desc: "NEG controller should create NEGs for destinationrule", destinationRuleName: "porter-destinationrule", subsetEndpointCountMap: map[string]int{"v1": 1, "v2": 2, "v3": 3}, crossNamespace: false}, + {desc: "NEG controller should update NEGs for destinationrule", destinationRuleName: "porter-destinationrule", subsetEndpointCountMap: map[string]int{"v1": 1, "v2": 2}, crossNamespace: false}, + {desc: "NEG controller should create NEGs for cross namespace destinationrule", destinationRuleName: "porter-destinationrule-1", subsetEndpointCountMap: map[string]int{"v1": 1}, crossNamespace: true}, + } { + sandbox := s + namespace := s.Namespace + destinationRule := istioV1alpha3.DestinationRule{} + subset := []*istioV1alpha3.Subset{} + for v := range tc.subsetEndpointCountMap { + subset = append(subset, &istioV1alpha3.Subset{Name: v, Labels: map[string]string{"version": v}}) + } + destinationRule.Subsets = subset + + // crossNamespace will test DestinationRules that refering a serive located in a different namespace + if tc.crossNamespace { + sandbox = sSkip + namespace = sSkip.Namespace + destinationRule.Host = fmt.Sprintf("%s.%s.svc.cluster.local", svcName, s.Namespace) + } else { + destinationRule.Host = svcName + } + + if err := e2e.UpdateCreateDestinationRule(sandbox, namespace, tc.destinationRuleName, &destinationRule); err != nil { + klog.Errorf("Failed to create destinationRule, error: %s", err) + } + + // One DestinationRule should have count(NEGs) = count(subset)* count(port) + dsNEGStatus, err := e2e.WaitDestinationRuleAnnotation(sandbox, namespace, tc.destinationRuleName, len(subset)*1, 5*time.Minute) + if err != nil { + klog.Errorf("Failed to validate the NEG count. Error: %s", err) + } + + zones := dsNEGStatus.Zones + for subsetVersion, endpointCount := range tc.subsetEndpointCountMap { + negNames, ok := dsNEGStatus.NetworkEndpointGroups[subsetVersion] + if !ok { + t.Fatalf("DestinationRule annotation doesn't contain the desired NEG status, want: %s, have: %v", subsetVersion, dsNEGStatus.NetworkEndpointGroups) + } + negName, ok := negNames[strconv.Itoa(int(porterPort))] + if !ok { + t.Fatalf("DestinationRule annotation doesn't contain the desired NEG status, want: %d, have: %v", porterPort, negNames) + } + if err := e2e.WaitForNegs(ctx, Framework.Cloud, negName, zones, false, endpointCount); err != nil { + t.Errorf("Failed to wait Negs, error: %s", err) + } + + } + + } + + }) + }) +} + +func TestNoIstioASM(t *testing.T) { + + Framework.RunWithSandbox("TestASMConfigOnNoIstioCluster", t, func(t *testing.T, s *e2e.Sandbox) { + + cm := map[string]string{"enable-asm": "true"} + wantConfigMapEvents := []string{"ConfigMapConfigController: Get a update on the ConfigMapConfig, Restarting Ingress controller", + "Cannot find DestinationRule CRD, disabling ASM Mode, please check Istio setup."} + + if err := e2e.UpdateCreateConfigMap(s, asmConfigNamespace, asmConfigName, cm); err != nil { + t.Error(err) + } + defer e2e.DeleteConfigMap(s, asmConfigNamespace, asmConfigName) + if err := e2e.WaitConfigMapEvents(s, asmConfigNamespace, asmConfigName, wantConfigMapEvents, negControllerRestartTimeout); err != nil { + t.Fatalf("Failed to get events: %v; Error %e", wantConfigMapEvents, err) + } + + if err := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { + cmData, err := e2e.GetConfigMap(s, asmConfigNamespace, asmConfigName) + if err != nil { + return false, err + } + if val, ok := cmData["enable-asm"]; ok && val == "false" { + return true, nil + } + return false, fmt.Errorf("ConfigMap: %s/%s, nable-asm is not false. Value: %v", asmConfigNamespace, asmConfigName, cmData) + + }); err != nil { + t.Fatalf("Failed to validate enable-asm = false. Error: %s", err) + } + + }) +} + +func createPorterDeployment(namespace, name string, replics int32, lables map[string]string, port int32) *apiappsv1.Deployment { + env := fmt.Sprintf("SERVE_PORT_%d", port) + deployment := apiappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, + Spec: apiappsv1.DeploymentSpec{ + Replicas: &replics, + Selector: &metav1.LabelSelector{MatchLabels: lables}, + Template: apiv1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: lables}, + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: "hostname", + Image: "gcr.io/kubernetes-e2e-test-images/porter-alpine:1.0", + Env: []apiv1.EnvVar{{Name: env, Value: env}}, + Ports: []apiv1.ContainerPort{{Name: "server", ContainerPort: port}}, + }, + }, + }, + }, + }, + } + return &deployment +} + +func createService(namespace, name string, selector map[string]string, port int32) *apiv1.Service { + svc := apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, + Spec: apiv1.ServiceSpec{ + Selector: selector, + Ports: []apiv1.ServicePort{ + { + Port: port, + Name: "http", + }, + }, + }, + } + return &svc +} diff --git a/cmd/e2e-test/main_test.go b/cmd/e2e-test/main_test.go index 92e217c102..08f1e2477f 100644 --- a/cmd/e2e-test/main_test.go +++ b/cmd/e2e-test/main_test.go @@ -49,6 +49,7 @@ var ( handleSIGINT bool gceEndpointOverride string createILBSubnet bool + enableIstio bool } Framework *e2e.Framework @@ -71,6 +72,7 @@ func init() { flag.BoolVar(&flags.handleSIGINT, "handleSIGINT", true, "catch SIGINT to perform clean") flag.StringVar(&flags.gceEndpointOverride, "gce-endpoint-override", "", "If set, talks to a different GCE API Endpoint. By default it talks to https://www.googleapis.com/compute/v1/") flag.BoolVar(&flags.createILBSubnet, "createILBSubnet", false, "If set, creates a proxy subnet for the L7 ILB") + flag.BoolVar(&flags.enableIstio, "enable-istio", false, "set to true if Istio is enabled.") } // TestMain is the entrypoint for the end-to-end test suite. This is where @@ -126,6 +128,7 @@ func TestMain(m *testing.M) { DestroySandboxes: flags.destroySandboxes, GceEndpointOverride: flags.gceEndpointOverride, CreateILBSubnet: flags.createILBSubnet, + EnableIstio: flags.enableIstio, }) if flags.handleSIGINT { Framework.CatchSIGINT() diff --git a/cmd/e2e-test/neg_test.go b/cmd/e2e-test/neg_test.go index 4ee4f75b6f..ebc46b1b61 100644 --- a/cmd/e2e-test/neg_test.go +++ b/cmd/e2e-test/neg_test.go @@ -21,7 +21,7 @@ import ( "testing" apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" diff --git a/pkg/e2e/fixtures.go b/pkg/e2e/fixtures.go index b49cbf2654..6d65e241e8 100644 --- a/pkg/e2e/fixtures.go +++ b/pkg/e2e/fixtures.go @@ -21,17 +21,27 @@ package e2e import ( "context" "fmt" - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" - "k8s.io/ingress-gce/pkg/utils" "math/rand" "net/http" "reflect" + "sort" + "strings" + "time" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + istioV1alpha3 "istio.io/api/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/ingress-gce/pkg/annotations" + "k8s.io/ingress-gce/pkg/scheme" + "k8s.io/ingress-gce/pkg/utils" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" computebeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -320,3 +330,177 @@ func DeleteILBSubnet(s *Sandbox, name string) error { klog.V(2).Infof("Deleting ILB Subnet %q", name) return s.f.Cloud.BetaSubnetworks().Delete(context.Background(), meta.RegionalKey(name, s.f.Region)) } + +// CrateService creates a service +func CrateService(s *Sandbox, svc *v1.Service) error { + _, err := s.f.Clientset.CoreV1().Services(svc.Namespace).Create(svc) + return err +} + +// CrateDeployment creates a Deployment +func CrateDeployment(s *Sandbox, deployment *apps.Deployment) error { + _, err := s.f.Clientset.AppsV1().Deployments(deployment.Namespace).Create(deployment) + return err +} + +// CreateConfigMap creates a ConfigMap with gaven data as Data fieled. +func CreateConfigMap(s *Sandbox, namespace, name string, data map[string]string) error { + cm := v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Data: data} + _, err := s.f.Clientset.CoreV1().ConfigMaps(namespace).Create(&cm) + return err +} + +// GetConfigMap gets ConfigMap and returns the Data field. +func GetConfigMap(s *Sandbox, namespace, name string) (map[string]string, error) { + cm, err := s.f.Clientset.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return cm.Data, nil +} + +// UpdateCreateConfigMap updates the namespace:name ConfigMap Data fieled, create if the target not exist. +func UpdateCreateConfigMap(s *Sandbox, namespace, name string, data map[string]string) error { + cm := v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Data: data} + _, err := s.f.Clientset.CoreV1().ConfigMaps(namespace).Update(&cm) + if err != nil && errors.IsNotFound(err) { + _, err = s.f.Clientset.CoreV1().ConfigMaps(namespace).Create(&cm) + } + return err +} + +// DeleteConfigMap deletes the namespace:name ConfigMap +func DeleteConfigMap(s *Sandbox, namespace, name string) error { + return s.f.Clientset.CoreV1().ConfigMaps(namespace).Delete(name, &metav1.DeleteOptions{}) +} + +// CreateDestinationRule creates a DestinationRule +func CreateDestinationRule(s *Sandbox, namespace, name string, dr *istioV1alpha3.DestinationRule) error { + usDr, err := CastDestinationRuleTOK8sObj(namespace, name, dr) + if err != nil { + return err + } + if _, err := s.f.DestinationRuleClient.Namespace(namespace).Create(usDr, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("Failed to create destinationrule: %v", err) + } + return nil +} + +// UpdateCreateDestinationRule updates the namespace:name DestinationRule. +func UpdateCreateDestinationRule(s *Sandbox, namespace, name string, dr *istioV1alpha3.DestinationRule) error { + + usDr, err := s.f.DestinationRuleClient.Namespace(namespace).Get(name, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + usDr, err := CastDestinationRuleTOK8sObj(namespace, name, dr) + if err != nil { + return err + } + _, err = s.f.DestinationRuleClient.Namespace(namespace).Create(usDr, metav1.CreateOptions{}) + } else { + spec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dr) + if err != nil { + return fmt.Errorf("Failed convert DestinationRule to Unstructured: %v", err) + } + usDr.Object["spec"] = spec + _, err = s.f.DestinationRuleClient.Namespace(namespace).Update(usDr, metav1.UpdateOptions{}) + return err + } + return nil +} + +// DeleteDestinationRule deletes the namespace:name DestinationRule. +func DeleteDestinationRule(s *Sandbox, namespace, name string) error { + return s.f.DestinationRuleClient.Namespace(namespace).Delete(name, &metav1.DeleteOptions{}) +} + +// WaitConfigMapEvents waits the msgs messages present for namespace:name ConfigMap until timeout. +func WaitConfigMapEvents(s *Sandbox, namespace, name string, msgs []string, timeout time.Duration) error { + cm, err := s.f.Clientset.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if cm == nil { + return fmt.Errorf("Cannot find ConfigMap: %s/%s", namespace, name) + } + return wait.Poll(5*time.Second, timeout, func() (bool, error) { + eventList, err := s.f.Clientset.CoreV1().Events(namespace).Search(scheme.Scheme, cm) + if err != nil { + return false, err + } + if len(eventList.Items) < len(msgs) { + return false, nil + } + allMsg := "" + events := eventList.Items + sort.Slice(events, func(i, j int) bool { + return events[i].LastTimestamp.Before(&events[j].LastTimestamp) + }) + for _, event := range events[len(events)-len(msgs):] { + allMsg += event.Message + } + klog.Infof("WaitDestinationRuleAnnotation, allMsg: %s, want: %v", allMsg, msgs) + + for _, msg := range msgs { + if !strings.Contains(allMsg, msg) { + return false, nil + } + } + return true, nil + }) +} + +// WaitDestinationRuleAnnotation waits until the DestinationRule NEG annotation count equal to negCount. +func WaitDestinationRuleAnnotation(s *Sandbox, namespace, name string, negCount int, timeout time.Duration) (*annotations.DestinationRuleNEGStatus, error) { + var rsl annotations.DestinationRuleNEGStatus + if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { + unsDr, err := s.f.DestinationRuleClient.Namespace(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + ann := unsDr.GetAnnotations() + klog.Infof("Wait for DestinationRule NEG annotation, want count: %d, got annotation: %v", negCount, ann) + if ann != nil { + if val, ok := ann[annotations.NEGStatusKey]; ok { + rsl, err = annotations.ParseDestinationRuleNEGStatus(val) + if err != nil { + return false, err + } + if len(rsl.NetworkEndpointGroups) == negCount { + return true, nil + } + } + } + return false, nil + }); err != nil { + return nil, err + } + return &rsl, nil +} + +// WaitServiceNEGAnnotation waits until the Service NEG annotation equal the negCount. +func WaitServiceNEGAnnotation(s *Sandbox, namespace, name string, negCount int, timeout time.Duration) (*annotations.NegStatus, error) { + var rsl annotations.NegStatus + if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { + svc, err := s.f.Clientset.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + klog.Infof("Wait for Service NEG Annotation, want count: %d, got annotation: %v", negCount, svc.Annotations) + if svc.Annotations != nil { + if val, ok := svc.Annotations[annotations.NEGStatusKey]; ok { + rsl, err = annotations.ParseNegStatus(val) + if err != nil { + return false, err + } + if len(rsl.NetworkEndpointGroups) == negCount { + return true, nil + } + } + } + return false, nil + }); err != nil { + return nil, err + } + + return &rsl, nil +} diff --git a/pkg/e2e/framework.go b/pkg/e2e/framework.go index d9caa47845..417bbed332 100644 --- a/pkg/e2e/framework.go +++ b/pkg/e2e/framework.go @@ -33,6 +33,8 @@ import ( computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" backendconfigclient "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned" @@ -48,8 +50,17 @@ type Options struct { DestroySandboxes bool GceEndpointOverride string CreateILBSubnet bool + EnableIstio bool } +const ( + destinationRuleGroup = "networking.istio.io" + destinationRuleAPIVersion = "v1alpha3" + destinationRulePlural = "destinationrules" + // This must match the spec fields below, and be in the form: . + destinationRuleCRDName = "destinationrules.networking.istio.io" +) + // NewFramework returns a new test framework to run. func NewFramework(config *rest.Config, options Options) *Framework { theCloud, err := NewCloud(options.Project, options.GceEndpointOverride) @@ -60,6 +71,7 @@ func NewFramework(config *rest.Config, options Options) *Framework { if err != nil { klog.Fatalf("Failed to create BackendConfig client: %v", err) } + f := &Framework{ RestConfig: config, Clientset: kubernetes.NewForConfigOrDie(config), @@ -73,13 +85,24 @@ func NewFramework(config *rest.Config, options Options) *Framework { CreateILBSubnet: options.CreateILBSubnet, } f.statusManager = NewStatusManager(f) + if options.EnableIstio { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + klog.Fatalf("Failed to create Dynamic client: %v", err) + } + destrinationGVR := schema.GroupVersionResource{Group: destinationRuleGroup, Version: destinationRuleAPIVersion, Resource: destinationRulePlural} + f.DestinationRuleClient = dynamicClient.Resource(destrinationGVR) + } + return f } // Framework is the end-to-end test framework. type Framework struct { - RestConfig *rest.Config - Clientset *kubernetes.Clientset + RestConfig *rest.Config + Clientset *kubernetes.Clientset + DestinationRuleClient dynamic.NamespaceableResourceInterface + BackendConfigClient *backendconfigclient.Clientset Project string Region string diff --git a/pkg/e2e/helpers.go b/pkg/e2e/helpers.go index 4a5e57d239..abf93e1d92 100644 --- a/pkg/e2e/helpers.go +++ b/pkg/e2e/helpers.go @@ -28,10 +28,13 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + istioV1alpha3 "istio.io/api/networking/v1alpha3" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -316,7 +319,8 @@ func WaitForNegs(ctx context.Context, c cloud.Cloud, negName string, zones []str return wait.Poll(negPollInterval, negPollTimeout, func() (bool, error) { negs, err := fuzz.NetworkEndpointsInNegs(ctx, c, negName, zones) if err != nil { - return false, fmt.Errorf("failed to retrieve NEG %v from zones %v: %v", negName, zones, err) + klog.Infof("WaitForNegs(%q, %v, %v, %v) failed to retrieve NEGs: %v", negName, zones, expectHealthy, expectCount, err) + return false, nil } if err := CheckNegs(negs, expectHealthy, expectCount); err != nil { @@ -514,3 +518,18 @@ func CheckV2Finalizer(ing *v1beta1.Ingress) error { } return nil } + +// CastDestinationRuleTOK8sObj cast a *istioV1alpha3.DestinationRule obj to *unstructured.Unstructured obj. +func CastDestinationRuleTOK8sObj(namespace, name string, dr *istioV1alpha3.DestinationRule) (*unstructured.Unstructured, error) { + usDr := unstructured.Unstructured{} + usDr.SetName(name) + usDr.SetNamespace(namespace) + usDr.SetKind("DestinationRule") + usDr.SetAPIVersion("networking.istio.io/v1alpha3") + spec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dr) + if err != nil { + return nil, fmt.Errorf("Failed convert DestinationRule to Unstructured: %v", err) + } + usDr.Object["spec"] = spec + return &usDr, nil +} diff --git a/pkg/scheme/scheme.go b/pkg/scheme/scheme.go new file mode 100644 index 0000000000..c548d00900 --- /dev/null +++ b/pkg/scheme/scheme.go @@ -0,0 +1,23 @@ +package scheme + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned/scheme" +) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +var Scheme = runtime.NewScheme() + +func init() { + // Register external types for Scheme + corev1.AddToScheme(Scheme) + metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(metav1beta1.AddMetaToScheme(Scheme)) + utilruntime.Must(metav1.AddMetaToScheme(Scheme)) + utilruntime.Must(scheme.AddToScheme(Scheme)) +}