Skip to content

Commit

Permalink
[e2e] abstract common methods
Browse files Browse the repository at this point in the history
  • Loading branch information
fanhaouu committed Sep 12, 2024
1 parent 8b0744c commit b495eb9
Show file tree
Hide file tree
Showing 7 changed files with 149 additions and 191 deletions.
3 changes: 1 addition & 2 deletions test/e2e/e2e_failedpods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/test"
)

var oneHourPodLifetimeSeconds uint = 3600
Expand Down Expand Up @@ -134,7 +133,7 @@ func TestFailedPods(t *testing.T) {
}

func initFailedJob(name, namespace string) *batchv1.Job {
podSpec := test.MakePodSpec("", nil)
podSpec := makePodSpec("", nil)
podSpec.Containers[0].Command = []string{"/bin/false"}
podSpec.RestartPolicy = v1.RestartPolicyNever
labelsSet := labels.Set{"test": name, "name": name}
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/e2e_leaderelection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,9 @@ func TestLeaderElection(t *testing.T) {
time.Sleep(7 * time.Second)

// validate only pods from e2e-testleaderelection-a namespace are evicted.
podListA := getPodNameList(ctx, clientSet, ns1, t)
podListA := getCurrentPodNames(ctx, clientSet, ns1, t)

podListB := getPodNameList(ctx, clientSet, ns2, t)
podListB := getCurrentPodNames(ctx, clientSet, ns2, t)

left := reflect.DeepEqual(podListAOrg, podListA)
right := reflect.DeepEqual(podListBOrg, podListB)
Expand Down
202 changes: 126 additions & 76 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@ import (
"testing"
"time"

"sigs.k8s.io/yaml"

appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
Expand All @@ -44,6 +42,7 @@ import (
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/yaml"

"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
Expand All @@ -63,7 +62,6 @@ import (
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)

func isClientRateLimiterError(err error) bool {
Expand Down Expand Up @@ -195,67 +193,6 @@ func printPodLogs(ctx context.Context, t *testing.T, kubeClient clientset.Interf
}
}

func waitForDeschedulerPodRunning(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) string {
deschedulerPodName := ""
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}

runningPods := []*v1.Pod{}
for _, item := range podList.Items {
if item.Status.Phase != v1.PodRunning {
continue
}
pod := item
runningPods = append(runningPods, &pod)
}

if len(runningPods) != 1 {
t.Logf("Expected a single running pod, got %v instead", len(runningPods))
return false, nil
}

deschedulerPodName = runningPods[0].Name
t.Logf("Found a descheduler pod running: %v", deschedulerPodName)
return true, nil
}); err != nil {
t.Fatalf("Error waiting for a running descheduler: %v", err)
}
return deschedulerPodName
}

func waitForDeschedulerPodAbsent(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) {
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}

if len(podList.Items) > 0 {
t.Logf("Found a descheduler pod. Waiting until it gets deleted")
return false, nil
}

return true, nil
}); err != nil {
t.Fatalf("Error waiting for a descheduler to disapear: %v", err)
}
}

func TestMain(m *testing.M) {
if os.Getenv("DESCHEDULER_IMAGE") == "" {
klog.Errorf("DESCHEDULER_IMAGE env is not set")
Expand Down Expand Up @@ -297,7 +234,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: test.MakePodSpec(priorityClassName, gracePeriod),
Spec: makePodSpec(priorityClassName, gracePeriod),
},
},
}
Expand Down Expand Up @@ -329,10 +266,81 @@ func DsByNameContainer(name, namespace string, labels map[string]string, gracePe
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: test.MakePodSpec("", gracePeriod),
Spec: makePodSpec("", gracePeriod),
},
},
}
}

func buildTestDeployment(name, namespace string, replicas int32, testLabel map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
deployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: testLabel,
},
Spec: appsv1.DeploymentSpec{
Replicas: utilptr.To[int32](replicas),
Selector: &metav1.LabelSelector{
MatchLabels: testLabel,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: testLabel,
},
Spec: makePodSpec("", utilptr.To[int64](0)),
},
},
}

if apply != nil {
apply(deployment)
}

return deployment
}

func makePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
return v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "IfNotPresent",
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",
},
},
},
}},
PriorityClassName: priorityClassName,
TerminationGracePeriodSeconds: gracePeriod,
}
}

func initializeClient(ctx context.Context, t *testing.T) (clientset.Interface, informers.SharedInformerFactory, listersv1.NodeLister, podutil.GetPodsAssignedToNodeFunc) {
Expand Down Expand Up @@ -1705,6 +1713,10 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
podItem, err := clientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}

Expand All @@ -1719,27 +1731,65 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
}
}

func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
if err := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) string {
runningPodName := ""
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}
if len(podList.Items) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))

runningPods := []*v1.Pod{}
for _, item := range podList.Items {
if item.Status.Phase != v1.PodRunning {
continue
}
pod := item
runningPods = append(runningPods, &pod)
}

if len(runningPods) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(runningPods))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)

if desireRunningPodNum == 1 {
runningPodName = runningPods[0].Name
}

return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
return runningPodName
}

func waitForPodsToDisappear(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, namespace string) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}

if len(podList.Items) > 0 {
t.Logf("Found a existing pod. Waiting until it gets deleted")
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
t.Fatalf("Error waiting for pods to disappear: %v", err)
}
}

Expand All @@ -1756,8 +1806,8 @@ func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
return allNodes, workerNodes
}

func getCurrentPodNames(t *testing.T, ctx context.Context, kubeClient clientset.Interface, namespace string) []string {
podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
func getCurrentPodNames(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) []string {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Logf("Unable to list pods: %v", err)
return nil
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/e2e_toomanyrestarts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ func TestTooManyRestarts(t *testing.T) {
rs.Client = clientSet
rs.EventClient = clientSet

preRunNames := sets.NewString(getCurrentPodNames(t, ctx, clientSet, testNamespace.Name)...)
preRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
// Deploy the descheduler with the configured policy
deschedulerPolicyConfigMapObj, err := deschedulerPolicyConfigMap(tc.policy)
if err != nil {
Expand Down Expand Up @@ -228,15 +228,15 @@ func TestTooManyRestarts(t *testing.T) {
if err != nil {
t.Fatalf("Unable to delete %q deployment: %v", deschedulerDeploymentObj.Name, err)
}
waitForDeschedulerPodAbsent(t, ctx, clientSet, testNamespace.Name)
waitForPodsToDisappear(ctx, t, clientSet, deschedulerDeploymentObj.Labels, deschedulerDeploymentObj.Namespace)
}()

t.Logf("Waiting for the descheduler pod running")
deschedulerPodName = waitForDeschedulerPodRunning(t, ctx, clientSet, testNamespace.Name)
deschedulerPodName = waitForPodsRunning(ctx, t, clientSet, deschedulerDeploymentObj.Labels, 1, deschedulerDeploymentObj.Namespace)

// Run RemovePodsHavingTooManyRestarts strategy
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 20*time.Second, true, func(ctx context.Context) (bool, error) {
currentRunNames := sets.NewString(getCurrentPodNames(t, ctx, clientSet, testNamespace.Name)...)
currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
actualEvictedPod := preRunNames.Difference(currentRunNames)
actualEvictedPodCount := uint(actualEvictedPod.Len())
t.Logf("preRunNames: %v, currentRunNames: %v, actualEvictedPodCount: %v\n", preRunNames.List(), currentRunNames.List(), actualEvictedPodCount)
Expand Down
23 changes: 15 additions & 8 deletions test/e2e/e2e_topologyspreadconstraint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/test"
)

const zoneTopologyKey string = "topology.kubernetes.io/zone"
Expand Down Expand Up @@ -126,26 +125,34 @@ func TestTopologySpreadConstraint(t *testing.T) {
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
t.Logf("Creating Deployment %s with %d replicas", name, tc.replicaCount)
deployment := test.BuildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
deployment := buildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
d.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
})
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Deployment %s %v", name, err)
}
defer test.DeleteDeployment(ctx, t, clientSet, deployment)
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
defer func() {
clientSet.AppsV1().Deployments(deployment.Namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{})
waitForPodsToDisappear(ctx, t, clientSet, deployment.Labels, deployment.Namespace)
}()
waitForPodsRunning(ctx, t, clientSet, deployment.Labels, tc.replicaCount, deployment.Namespace)

// Create a "Violator" Deployment that has the same label and is forced to be on the same node using a nodeSelector
violatorDeploymentName := name + "-violator"
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
violatorDeployment := test.BuildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
violatorDeployLabels := tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels
violatorDeployLabels["name"] = violatorDeploymentName
violatorDeployment := buildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, violatorDeployLabels, func(d *appsv1.Deployment) {
d.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
})
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, violatorDeployment, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Deployment %s: %v", violatorDeploymentName, err)
}
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
defer func() {
clientSet.AppsV1().Deployments(violatorDeployment.Namespace).Delete(ctx, violatorDeployment.Name, metav1.DeleteOptions{})
waitForPodsToDisappear(ctx, t, clientSet, violatorDeployment.Labels, violatorDeployment.Namespace)
}()
waitForPodsRunning(ctx, t, clientSet, violatorDeployment.Labels, int(violatorCount), violatorDeployment.Namespace)

evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
Expand Down Expand Up @@ -195,7 +202,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
}

// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
waitForPodsRunning(ctx, t, clientSet, deployment.Labels, tc.replicaCount, deployment.Namespace)

listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)
Expand Down
Loading

0 comments on commit b495eb9

Please sign in to comment.