Skip to content

Commit b31ae2d

Browse files
committed
[e2e] abstract common methods
1 parent 8b0744c commit b31ae2d

7 files changed

+149
-191
lines changed

test/e2e/e2e_failedpods_test.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
2424
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
2525
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
26-
"sigs.k8s.io/descheduler/test"
2726
)
2827

2928
var oneHourPodLifetimeSeconds uint = 3600
@@ -134,7 +133,7 @@ func TestFailedPods(t *testing.T) {
134133
}
135134

136135
func initFailedJob(name, namespace string) *batchv1.Job {
137-
podSpec := test.MakePodSpec("", nil)
136+
podSpec := makePodSpec("", nil)
138137
podSpec.Containers[0].Command = []string{"/bin/false"}
139138
podSpec.RestartPolicy = v1.RestartPolicyNever
140139
labelsSet := labels.Set{"test": name, "name": name}

test/e2e/e2e_leaderelection_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,9 +141,9 @@ func TestLeaderElection(t *testing.T) {
141141
time.Sleep(7 * time.Second)
142142

143143
// validate only pods from e2e-testleaderelection-a namespace are evicted.
144-
podListA := getPodNameList(ctx, clientSet, ns1, t)
144+
podListA := getCurrentPodNames(ctx, clientSet, ns1, t)
145145

146-
podListB := getPodNameList(ctx, clientSet, ns2, t)
146+
podListB := getCurrentPodNames(ctx, clientSet, ns2, t)
147147

148148
left := reflect.DeepEqual(podListAOrg, podListA)
149149
right := reflect.DeepEqual(podListBOrg, podListB)

test/e2e/e2e_test.go

Lines changed: 126 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,6 @@ import (
2727
"testing"
2828
"time"
2929

30-
"sigs.k8s.io/yaml"
31-
3230
appsv1 "k8s.io/api/apps/v1"
3331
v1 "k8s.io/api/core/v1"
3432
schedulingv1 "k8s.io/api/scheduling/v1"
@@ -44,6 +42,7 @@ import (
4442
componentbaseconfig "k8s.io/component-base/config"
4543
"k8s.io/klog/v2"
4644
utilptr "k8s.io/utils/ptr"
45+
"sigs.k8s.io/yaml"
4746

4847
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
4948
"sigs.k8s.io/descheduler/pkg/api"
@@ -63,7 +62,6 @@ import (
6362
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
6463
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
6564
"sigs.k8s.io/descheduler/pkg/utils"
66-
"sigs.k8s.io/descheduler/test"
6765
)
6866

6967
func isClientRateLimiterError(err error) bool {
@@ -195,67 +193,6 @@ func printPodLogs(ctx context.Context, t *testing.T, kubeClient clientset.Interf
195193
}
196194
}
197195

198-
func waitForDeschedulerPodRunning(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) string {
199-
deschedulerPodName := ""
200-
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
201-
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
202-
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
203-
})
204-
if err != nil {
205-
t.Logf("Unable to list pods: %v", err)
206-
if isClientRateLimiterError(err) {
207-
return false, nil
208-
}
209-
return false, err
210-
}
211-
212-
runningPods := []*v1.Pod{}
213-
for _, item := range podList.Items {
214-
if item.Status.Phase != v1.PodRunning {
215-
continue
216-
}
217-
pod := item
218-
runningPods = append(runningPods, &pod)
219-
}
220-
221-
if len(runningPods) != 1 {
222-
t.Logf("Expected a single running pod, got %v instead", len(runningPods))
223-
return false, nil
224-
}
225-
226-
deschedulerPodName = runningPods[0].Name
227-
t.Logf("Found a descheduler pod running: %v", deschedulerPodName)
228-
return true, nil
229-
}); err != nil {
230-
t.Fatalf("Error waiting for a running descheduler: %v", err)
231-
}
232-
return deschedulerPodName
233-
}
234-
235-
func waitForDeschedulerPodAbsent(t *testing.T, ctx context.Context, kubeClient clientset.Interface, testName string) {
236-
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
237-
podList, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
238-
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "descheduler", "test": testName})).String(),
239-
})
240-
if err != nil {
241-
t.Logf("Unable to list pods: %v", err)
242-
if isClientRateLimiterError(err) {
243-
return false, nil
244-
}
245-
return false, err
246-
}
247-
248-
if len(podList.Items) > 0 {
249-
t.Logf("Found a descheduler pod. Waiting until it gets deleted")
250-
return false, nil
251-
}
252-
253-
return true, nil
254-
}); err != nil {
255-
t.Fatalf("Error waiting for a descheduler to disapear: %v", err)
256-
}
257-
}
258-
259196
func TestMain(m *testing.M) {
260197
if os.Getenv("DESCHEDULER_IMAGE") == "" {
261198
klog.Errorf("DESCHEDULER_IMAGE env is not set")
@@ -297,7 +234,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
297234
ObjectMeta: metav1.ObjectMeta{
298235
Labels: labels,
299236
},
300-
Spec: test.MakePodSpec(priorityClassName, gracePeriod),
237+
Spec: makePodSpec(priorityClassName, gracePeriod),
301238
},
302239
},
303240
}
@@ -329,10 +266,81 @@ func DsByNameContainer(name, namespace string, labels map[string]string, gracePe
329266
ObjectMeta: metav1.ObjectMeta{
330267
Labels: labels,
331268
},
332-
Spec: test.MakePodSpec("", gracePeriod),
269+
Spec: makePodSpec("", gracePeriod),
270+
},
271+
},
272+
}
273+
}
274+
275+
func buildTestDeployment(name, namespace string, replicas int32, testLabel map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
276+
deployment := &appsv1.Deployment{
277+
TypeMeta: metav1.TypeMeta{
278+
Kind: "Deployment",
279+
APIVersion: "apps/v1",
280+
},
281+
ObjectMeta: metav1.ObjectMeta{
282+
Name: name,
283+
Namespace: namespace,
284+
Labels: testLabel,
285+
},
286+
Spec: appsv1.DeploymentSpec{
287+
Replicas: utilptr.To[int32](replicas),
288+
Selector: &metav1.LabelSelector{
289+
MatchLabels: testLabel,
290+
},
291+
Template: v1.PodTemplateSpec{
292+
ObjectMeta: metav1.ObjectMeta{
293+
Labels: testLabel,
294+
},
295+
Spec: makePodSpec("", utilptr.To[int64](0)),
333296
},
334297
},
335298
}
299+
300+
if apply != nil {
301+
apply(deployment)
302+
}
303+
304+
return deployment
305+
}
306+
307+
func makePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
308+
return v1.PodSpec{
309+
SecurityContext: &v1.PodSecurityContext{
310+
RunAsNonRoot: utilptr.To(true),
311+
RunAsUser: utilptr.To[int64](1000),
312+
RunAsGroup: utilptr.To[int64](1000),
313+
SeccompProfile: &v1.SeccompProfile{
314+
Type: v1.SeccompProfileTypeRuntimeDefault,
315+
},
316+
},
317+
Containers: []v1.Container{{
318+
Name: "pause",
319+
ImagePullPolicy: "IfNotPresent",
320+
Image: "registry.k8s.io/pause",
321+
Ports: []v1.ContainerPort{{ContainerPort: 80}},
322+
Resources: v1.ResourceRequirements{
323+
Limits: v1.ResourceList{
324+
v1.ResourceCPU: resource.MustParse("100m"),
325+
v1.ResourceMemory: resource.MustParse("200Mi"),
326+
},
327+
Requests: v1.ResourceList{
328+
v1.ResourceCPU: resource.MustParse("100m"),
329+
v1.ResourceMemory: resource.MustParse("100Mi"),
330+
},
331+
},
332+
SecurityContext: &v1.SecurityContext{
333+
AllowPrivilegeEscalation: utilptr.To(false),
334+
Capabilities: &v1.Capabilities{
335+
Drop: []v1.Capability{
336+
"ALL",
337+
},
338+
},
339+
},
340+
}},
341+
PriorityClassName: priorityClassName,
342+
TerminationGracePeriodSeconds: gracePeriod,
343+
}
336344
}
337345

338346
func initializeClient(ctx context.Context, t *testing.T) (clientset.Interface, informers.SharedInformerFactory, listersv1.NodeLister, podutil.GetPodsAssignedToNodeFunc) {
@@ -1705,6 +1713,10 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
17051713
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
17061714
podItem, err := clientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
17071715
if err != nil {
1716+
t.Logf("Unable to list pods: %v", err)
1717+
if isClientRateLimiterError(err) {
1718+
return false, nil
1719+
}
17081720
return false, err
17091721
}
17101722

@@ -1719,27 +1731,65 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
17191731
}
17201732
}
17211733

1722-
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
1723-
if err := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
1734+
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) string {
1735+
runningPodName := ""
1736+
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
17241737
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
17251738
LabelSelector: labels.SelectorFromSet(labelMap).String(),
17261739
})
17271740
if err != nil {
1741+
t.Logf("Unable to list pods: %v", err)
1742+
if isClientRateLimiterError(err) {
1743+
return false, nil
1744+
}
17281745
return false, err
17291746
}
1730-
if len(podList.Items) != desireRunningPodNum {
1731-
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
1747+
1748+
runningPods := []*v1.Pod{}
1749+
for _, item := range podList.Items {
1750+
if item.Status.Phase != v1.PodRunning {
1751+
continue
1752+
}
1753+
pod := item
1754+
runningPods = append(runningPods, &pod)
1755+
}
1756+
1757+
if len(runningPods) != desireRunningPodNum {
1758+
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(runningPods))
17321759
return false, nil
17331760
}
1734-
for _, pod := range podList.Items {
1735-
if pod.Status.Phase != v1.PodRunning {
1736-
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
1761+
1762+
if desireRunningPodNum == 1 {
1763+
runningPodName = runningPods[0].Name
1764+
}
1765+
1766+
return true, nil
1767+
}); err != nil {
1768+
t.Fatalf("Error waiting for pods running: %v", err)
1769+
}
1770+
return runningPodName
1771+
}
1772+
1773+
func waitForPodsToDisappear(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, namespace string) {
1774+
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
1775+
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
1776+
LabelSelector: labels.SelectorFromSet(labelMap).String(),
1777+
})
1778+
if err != nil {
1779+
t.Logf("Unable to list pods: %v", err)
1780+
if isClientRateLimiterError(err) {
17371781
return false, nil
17381782
}
1783+
return false, err
1784+
}
1785+
1786+
if len(podList.Items) > 0 {
1787+
t.Logf("Found a existing pod. Waiting until it gets deleted")
1788+
return false, nil
17391789
}
17401790
return true, nil
17411791
}); err != nil {
1742-
t.Fatalf("Error waiting for pods running: %v", err)
1792+
t.Fatalf("Error waiting for pods to disappear: %v", err)
17431793
}
17441794
}
17451795

@@ -1756,8 +1806,8 @@ func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
17561806
return allNodes, workerNodes
17571807
}
17581808

1759-
func getCurrentPodNames(t *testing.T, ctx context.Context, kubeClient clientset.Interface, namespace string) []string {
1760-
podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
1809+
func getCurrentPodNames(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) []string {
1810+
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
17611811
if err != nil {
17621812
t.Logf("Unable to list pods: %v", err)
17631813
return nil

test/e2e/e2e_toomanyrestarts_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ func TestTooManyRestarts(t *testing.T) {
190190
rs.Client = clientSet
191191
rs.EventClient = clientSet
192192

193-
preRunNames := sets.NewString(getCurrentPodNames(t, ctx, clientSet, testNamespace.Name)...)
193+
preRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
194194
// Deploy the descheduler with the configured policy
195195
deschedulerPolicyConfigMapObj, err := deschedulerPolicyConfigMap(tc.policy)
196196
if err != nil {
@@ -228,15 +228,15 @@ func TestTooManyRestarts(t *testing.T) {
228228
if err != nil {
229229
t.Fatalf("Unable to delete %q deployment: %v", deschedulerDeploymentObj.Name, err)
230230
}
231-
waitForDeschedulerPodAbsent(t, ctx, clientSet, testNamespace.Name)
231+
waitForPodsToDisappear(ctx, t, clientSet, deschedulerDeploymentObj.Labels, deschedulerDeploymentObj.Namespace)
232232
}()
233233

234234
t.Logf("Waiting for the descheduler pod running")
235-
deschedulerPodName = waitForDeschedulerPodRunning(t, ctx, clientSet, testNamespace.Name)
235+
deschedulerPodName = waitForPodsRunning(ctx, t, clientSet, deschedulerDeploymentObj.Labels, 1, deschedulerDeploymentObj.Namespace)
236236

237237
// Run RemovePodsHavingTooManyRestarts strategy
238238
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 20*time.Second, true, func(ctx context.Context) (bool, error) {
239-
currentRunNames := sets.NewString(getCurrentPodNames(t, ctx, clientSet, testNamespace.Name)...)
239+
currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
240240
actualEvictedPod := preRunNames.Difference(currentRunNames)
241241
actualEvictedPodCount := uint(actualEvictedPod.Len())
242242
t.Logf("preRunNames: %v, currentRunNames: %v, actualEvictedPodCount: %v\n", preRunNames.List(), currentRunNames.List(), actualEvictedPodCount)

test/e2e/e2e_topologyspreadconstraint_test.go

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
2121
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
2222
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
23-
"sigs.k8s.io/descheduler/test"
2423
)
2524

2625
const zoneTopologyKey string = "topology.kubernetes.io/zone"
@@ -126,26 +125,34 @@ func TestTopologySpreadConstraint(t *testing.T) {
126125
for name, tc := range testCases {
127126
t.Run(name, func(t *testing.T) {
128127
t.Logf("Creating Deployment %s with %d replicas", name, tc.replicaCount)
129-
deployment := test.BuildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
128+
deployment := buildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
130129
d.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
131130
})
132131
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
133132
t.Fatalf("Error creating Deployment %s %v", name, err)
134133
}
135-
defer test.DeleteDeployment(ctx, t, clientSet, deployment)
136-
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
134+
defer func() {
135+
clientSet.AppsV1().Deployments(deployment.Namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{})
136+
waitForPodsToDisappear(ctx, t, clientSet, deployment.Labels, deployment.Namespace)
137+
}()
138+
waitForPodsRunning(ctx, t, clientSet, deployment.Labels, tc.replicaCount, deployment.Namespace)
137139

138140
// Create a "Violator" Deployment that has the same label and is forced to be on the same node using a nodeSelector
139141
violatorDeploymentName := name + "-violator"
140142
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
141-
violatorDeployment := test.BuildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
143+
violatorDeployLabels := tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels
144+
violatorDeployLabels["name"] = violatorDeploymentName
145+
violatorDeployment := buildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, violatorDeployLabels, func(d *appsv1.Deployment) {
142146
d.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
143147
})
144148
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, violatorDeployment, metav1.CreateOptions{}); err != nil {
145149
t.Fatalf("Error creating Deployment %s: %v", violatorDeploymentName, err)
146150
}
147-
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
148-
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
151+
defer func() {
152+
clientSet.AppsV1().Deployments(violatorDeployment.Namespace).Delete(ctx, violatorDeployment.Name, metav1.DeleteOptions{})
153+
waitForPodsToDisappear(ctx, t, clientSet, violatorDeployment.Labels, violatorDeployment.Namespace)
154+
}()
155+
waitForPodsRunning(ctx, t, clientSet, violatorDeployment.Labels, int(violatorCount), violatorDeployment.Namespace)
149156

150157
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
151158
if err != nil || len(evictionPolicyGroupVersion) == 0 {
@@ -195,7 +202,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
195202
}
196203

197204
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
198-
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
205+
waitForPodsRunning(ctx, t, clientSet, deployment.Labels, tc.replicaCount, deployment.Namespace)
199206

200207
listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
201208
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)

0 commit comments

Comments
 (0)