Skip to content

Commit 0c5ef29

Browse files
committed
UPSTREAM: 131742: [sig-scheduling] SchedulerPreemption [Serial] validates various priority Pods preempt expectedly with the async preemption: replace finalizers with preStop hook and TerminationGracePeriodSeconds
1 parent ff5de42 commit 0c5ef29

File tree

2 files changed

+37
-16
lines changed

2 files changed

+37
-16
lines changed

test/e2e/scheduling/predicates.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@ type pausePodConfig struct {
7575
DeletionGracePeriodSeconds *int64
7676
TopologySpreadConstraints []v1.TopologySpreadConstraint
7777
SchedulingGates []v1.PodSchedulingGate
78+
TerminationGracePeriodSeconds *int64
79+
PreStopHookSleepSeconds *int64
7880
}
7981

8082
var _ = SIGDescribe("SchedulerPredicates", framework.WithSerial(), func() {
@@ -1009,6 +1011,18 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
10091011
if conf.DeletionGracePeriodSeconds != nil {
10101012
pod.ObjectMeta.DeletionGracePeriodSeconds = conf.DeletionGracePeriodSeconds
10111013
}
1014+
if conf.TerminationGracePeriodSeconds != nil {
1015+
pod.Spec.TerminationGracePeriodSeconds = conf.TerminationGracePeriodSeconds
1016+
}
1017+
if conf.PreStopHookSleepSeconds != nil {
1018+
pod.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
1019+
PreStop: &v1.LifecycleHandler{
1020+
Sleep: &v1.SleepAction{
1021+
Seconds: *conf.PreStopHookSleepSeconds,
1022+
},
1023+
},
1024+
}
1025+
}
10121026
return pod
10131027
}
10141028

test/e2e/scheduling/preemption.go

Lines changed: 23 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ import (
5050
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
5151
e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
5252
admissionapi "k8s.io/pod-security-admission/api"
53+
"k8s.io/utils/ptr"
5354
)
5455

5556
type priorityPair struct {
@@ -317,7 +318,8 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
317318
var podRes v1.ResourceList
318319
// Create 10 pods per node that will eat up all the node's resources.
319320
ginkgo.By("Create 10 low-priority pods on each node.")
320-
lowPriorityPods := make([]*v1.Pod, 0, 10*len(nodeList.Items))
321+
nodeListLen := len(nodeList.Items)
322+
lowPriorityPods := make([]*v1.Pod, 0, 10*nodeListLen)
321323
// Create pods in the cluster.
322324
for i, node := range nodeList.Items {
323325
// Update each node to advertise 3 available extended resources
@@ -331,12 +333,6 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
331333
pausePod := createPausePod(ctx, f, pausePodConfig{
332334
Name: fmt.Sprintf("pod%d-%d-%v", i, j, lowPriorityClassName),
333335
PriorityClassName: lowPriorityClassName,
334-
// This victim pod will be preempted by the high priority pod.
335-
// But, the deletion will be blocked by the finalizer.
336-
//
337-
// The finalizer is needed to prevent the medium Pods from being scheduled instead of the high Pods,
338-
// depending on when the scheduler notices the existence of all the high Pods we create.
339-
Finalizers: []string{testFinalizer},
340336
Resources: &v1.ResourceRequirements{
341337
Requests: podRes,
342338
Limits: podRes,
@@ -354,6 +350,15 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
354350
},
355351
},
356352
},
353+
// This victim pod will be preempted by the high priority pod.
354+
// But, the deletion will be blocked by the preStop hook with
355+
// TerminationGracePeriodSeconds set.
356+
//
357+
// The preStop hook + TerminationGracePeriodSeconds are needed to prevent the medium Pods
358+
// from being scheduled instead of the high Pods,
359+
// depending on when the scheduler notices the existence of all the high Pods we create.
360+
TerminationGracePeriodSeconds: ptr.To[int64](80),
361+
PreStopHookSleepSeconds: ptr.To[int64](79),
357362
})
358363
lowPriorityPods = append(lowPriorityPods, pausePod)
359364
framework.Logf("Created pod: %v", pausePod.Name)
@@ -365,8 +370,8 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
365370
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod))
366371
}
367372

368-
highPriorityPods := make([]*v1.Pod, 0, 5*len(nodeList.Items))
369-
mediumPriorityPods := make([]*v1.Pod, 0, 10*len(nodeList.Items))
373+
highPriorityPods := make([]*v1.Pod, 0, 5*nodeListLen)
374+
mediumPriorityPods := make([]*v1.Pod, 0, 10*nodeListLen)
370375

371376
ginkgo.By("Run high/medium priority pods that have same requirements as that of lower priority pod")
372377
for i := range nodeList.Items {
@@ -426,18 +431,20 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
426431
}))
427432
}
428433

429-
ginkgo.By("Remove the finalizer from all low priority pods to proceed the preemption.")
434+
ginkgo.By("Delete all low priority pods to proceed the preemption faster.")
430435
for _, pod := range lowPriorityPods {
431-
// Remove the finalizer so that the pod can be deleted by GC
432-
e2epod.NewPodClient(f).RemoveFinalizer(ctx, pod.Name, testFinalizer)
436+
err := cs.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)})
437+
if err != nil && !apierrors.IsNotFound(err) {
438+
framework.Logf("Deleting %v pod failed: %v", pod.Name, err)
439+
}
433440
}
434441

435442
ginkgo.By("Wait for high priority pods to be scheduled.")
436443
for _, pod := range highPriorityPods {
437444
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod))
438445
}
439446

440-
ginkgo.By("Wait for 5 medium priority pods to be scheduled.")
447+
ginkgo.By(fmt.Sprintf("Wait for %v medium priority pods to be scheduled.", 5*nodeListLen))
441448
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Second, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
442449
scheduled := 0
443450
for _, pod := range mediumPriorityPods {
@@ -450,11 +457,11 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
450457
scheduled++
451458
}
452459
}
453-
if scheduled > 5 {
454-
return false, fmt.Errorf("expected 5 medium priority pods to be scheduled, but got %d", scheduled)
460+
if scheduled > 5*nodeListLen {
461+
return false, fmt.Errorf("expected %v medium priority pods to be scheduled, but got %d", 5*nodeListLen, scheduled)
455462
}
456463

457-
return scheduled == 5, nil
464+
return scheduled == 5*nodeListLen, nil
458465
}))
459466
})
460467

0 commit comments

Comments
 (0)