Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TestTooManyRestarts] e2e: build a descheduler image and run the descheduler as a pod #1474

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ go 1.22.4

require (
github.com/client9/misspell v0.3.4
github.com/ghodss/yaml v1.0.0
github.com/google/go-cmp v0.6.0
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
Expand Down
206 changes: 206 additions & 0 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package e2e

import (
"bufio"
"context"
"fmt"
"math"
Expand All @@ -26,23 +27,29 @@ import (
"testing"
"time"

"github.com/ghodss/yaml"

appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/events"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr"

"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
deschedulerapiv1alpha2 "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
"sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
Expand All @@ -60,6 +67,205 @@ import (
"sigs.k8s.io/descheduler/test"
)

func isClientRateLimiterError(err error) bool {
return strings.Contains(err.Error(), "client rate limiter")
}

func deschedulerPolicyConfigMap(policy *deschedulerapiv1alpha2.DeschedulerPolicy) (*v1.ConfigMap, error) {
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "descheduler-policy-configmap",
Namespace: "kube-system",
},
}
policy.APIVersion = "descheduler/v1alpha2"
policy.Kind = "DeschedulerPolicy"
policyBytes, err := yaml.Marshal(policy)
if err != nil {
return nil, err
}
cm.Data = map[string]string{"policy.yaml": string(policyBytes)}
return cm, nil
}

func deschedulerDeployment(testName string) *appsv1.Deployment {
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "descheduler",
Namespace: "kube-system",
Labels: map[string]string{"app": "descheduler", "test": testName},
},
Spec: appsv1.DeploymentSpec{
Replicas: utilptr.To[int32](1),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "descheduler", "test": testName},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "descheduler", "test": testName},
},
Spec: v1.PodSpec{
PriorityClassName: "system-cluster-critical",
ServiceAccountName: "descheduler-sa",
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []v1.Container{
{
Name: "descheduler",
Image: os.Getenv("DESCHEDULER_IMAGE"),
ImagePullPolicy: "IfNotPresent",
Command: []string{"/bin/descheduler"},
Args: []string{"--policy-config-file", "/policy-dir/policy.yaml", "--descheduling-interval", "100m", "--v", "4"},
Ports: []v1.ContainerPort{{ContainerPort: 10258, Protocol: "TCP"}},
LivenessProbe: &v1.Probe{
FailureThreshold: 3,
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(10258),
Scheme: v1.URISchemeHTTPS,
},
},
InitialDelaySeconds: 3,
PeriodSeconds: 10,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("500m"),
v1.ResourceMemory: resource.MustParse("256Mi"),
},
},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",
},
},
Privileged: utilptr.To[bool](false),
ReadOnlyRootFilesystem: utilptr.To[bool](true),
RunAsNonRoot: utilptr.To[bool](true),
},
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/policy-dir",
Name: "policy-volume",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "policy-volume",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "descheduler-policy-configmap",
},
},
},
},
},
},
},
},
}
}

func printPodLogs(ctx context.Context, t *testing.T, kubeClient clientset.Interface, podName string) {
request := kubeClient.CoreV1().Pods("kube-system").GetLogs(podName, &v1.PodLogOptions{})
readCloser, err := request.Stream(context.TODO())
if err != nil {
t.Logf("Unable to request stream: %v\n", err)
return
}

defer readCloser.Close()
scanner := bufio.NewScanner(readCloser)
for scanner.Scan() {
fmt.Println(string(scanner.Bytes()))
}
if err := scanner.Err(); err != nil {
t.Logf("Unable to scan bytes: %v\n", err)
}
}

func waitForDeschedulerPodRunning(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) string {
deschedulerPodName := ""
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}

runningPods := []*v1.Pod{}
for _, item := range podList.Items {
if item.Status.Phase != v1.PodRunning {
continue
}
pod := item
runningPods = append(runningPods, &pod)
}

if len(runningPods) != 1 {
t.Logf("Expected a single running pod, got %v instead", len(runningPods))
return false, nil
}

deschedulerPodName = runningPods[0].Name
t.Logf("Found a descheduler pod running: %v", deschedulerPodName)
return true, nil
}); err != nil {
t.Fatalf("Error waiting for a running descheduler: %v", err)
}
return deschedulerPodName
}

func waitForDeschedulerPodAbsent(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) {
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
})
if err != nil {
t.Logf("Unable to list pods: %v", err)
if isClientRateLimiterError(err) {
return false, nil
}
return false, err
}

if len(podList.Items) > 0 {
t.Logf("Found a descheduler pod. Waiting until it gets deleted")
return false, nil
}

return true, nil
}); err != nil {
t.Fatalf("Error waiting for a descheduler to disapear: %v", err)
}
}

func TestMain(m *testing.M) {
if os.Getenv("DESCHEDULER_IMAGE") == "" {
klog.Errorf("DESCHEDULER_IMAGE env is not set")
os.Exit(1)
}

os.Exit(m.Run())
}

func initPluginRegistry() {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
Expand Down
Loading
Loading