Skip to content

Commit 7b38bff

Browse files
committed
Move pod level hugepage test from e2e to e2e node
1 parent 98e362c commit 7b38bff

File tree

4 files changed

+671
-453
lines changed

4 files changed

+671
-453
lines changed

test/e2e/common/node/pod_level_resources.go

Lines changed: 37 additions & 212 deletions
Original file line numberDiff line numberDiff line change
@@ -29,17 +29,12 @@ import (
2929
"k8s.io/apimachinery/pkg/api/resource"
3030
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3131
utilerrors "k8s.io/apimachinery/pkg/util/errors"
32-
v1resource "k8s.io/kubernetes/pkg/api/v1/resource"
33-
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
3432
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
3533
"k8s.io/kubernetes/test/e2e/feature"
3634
"k8s.io/kubernetes/test/e2e/framework"
3735
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3836
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3937
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
40-
41-
utils "k8s.io/kubernetes/test/utils"
42-
4338
imageutils "k8s.io/kubernetes/test/utils/image"
4439
admissionapi "k8s.io/pod-security-admission/api"
4540
)
@@ -48,14 +43,9 @@ const (
4843
cgroupv2CPUWeight string = "cpu.weight"
4944
cgroupv2CPULimit string = "cpu.max"
5045
cgroupv2MemLimit string = "memory.max"
51-
52-
cgroupv2HugeTLBPrefix string = "hugetlb"
53-
cgroupv2HugeTLBRsvd string = "rsvd"
54-
55-
cgroupFsPath string = "/sys/fs/cgroup"
56-
mountPath string = "/sysfscgroup"
57-
58-
CPUPeriod string = "100000"
46+
cgroupFsPath string = "/sys/fs/cgroup"
47+
CPUPeriod string = "100000"
48+
mountPath string = "/sysfscgroup"
5949
)
6050

6151
var (
@@ -79,7 +69,6 @@ var _ = SIGDescribe("Pod Level Resources", framework.WithSerial(), feature.PodLe
7969
e2eskipper.Skipf("not supported on cgroupv1 -- skipping")
8070
}
8171
})
82-
8372
podLevelResourcesTests(f)
8473
})
8574

@@ -115,7 +104,7 @@ func isCgroupv2Node(f *framework.Framework, ctx context.Context) bool {
115104

116105
func makeObjectMetadata(name, namespace string) metav1.ObjectMeta {
117106
return metav1.ObjectMeta{
118-
Name: name, Namespace: namespace,
107+
Name: "testpod", Namespace: namespace,
119108
Labels: map[string]string{"time": strconv.Itoa(time.Now().Nanosecond())},
120109
}
121110
}
@@ -124,16 +113,11 @@ type containerInfo struct {
124113
Name string
125114
Resources *resourceInfo
126115
}
127-
128116
type resourceInfo struct {
129-
CPUReq string
130-
CPULim string
131-
MemReq string
132-
MemLim string
133-
HugePagesReq2Mi string
134-
HugePagesLim2Mi string
135-
HugePagesReq1Gi string
136-
HugePagesLim1Gi string
117+
CPUReq string
118+
CPULim string
119+
MemReq string
120+
MemLim string
137121
}
138122

139123
func makeContainer(info containerInfo) v1.Container {
@@ -156,7 +140,7 @@ func makeContainer(info containerInfo) v1.Container {
156140
func getResourceRequirements(info *resourceInfo) v1.ResourceRequirements {
157141
var res v1.ResourceRequirements
158142
if info != nil {
159-
if info.CPUReq != "" || info.MemReq != "" || info.HugePagesReq2Mi != "" || info.HugePagesReq1Gi != "" {
143+
if info.CPUReq != "" || info.MemReq != "" {
160144
res.Requests = make(v1.ResourceList)
161145
}
162146
if info.CPUReq != "" {
@@ -165,14 +149,8 @@ func getResourceRequirements(info *resourceInfo) v1.ResourceRequirements {
165149
if info.MemReq != "" {
166150
res.Requests[v1.ResourceMemory] = resource.MustParse(info.MemReq)
167151
}
168-
if info.HugePagesReq2Mi != "" {
169-
res.Requests[v1.ResourceHugePagesPrefix+"2Mi"] = resource.MustParse(info.HugePagesReq2Mi)
170-
}
171-
if info.HugePagesReq1Gi != "" {
172-
res.Requests[v1.ResourceHugePagesPrefix+"1Gi"] = resource.MustParse(info.HugePagesReq1Gi)
173-
}
174152

175-
if info.CPULim != "" || info.MemLim != "" || info.HugePagesLim2Mi != "" || info.HugePagesLim1Gi != "" {
153+
if info.CPULim != "" || info.MemLim != "" {
176154
res.Limits = make(v1.ResourceList)
177155
}
178156
if info.CPULim != "" {
@@ -181,12 +159,6 @@ func getResourceRequirements(info *resourceInfo) v1.ResourceRequirements {
181159
if info.MemLim != "" {
182160
res.Limits[v1.ResourceMemory] = resource.MustParse(info.MemLim)
183161
}
184-
if info.HugePagesLim2Mi != "" {
185-
res.Limits[v1.ResourceHugePagesPrefix+"2Mi"] = resource.MustParse(info.HugePagesLim2Mi)
186-
}
187-
if info.HugePagesLim1Gi != "" {
188-
res.Limits[v1.ResourceHugePagesPrefix+"1Gi"] = resource.MustParse(info.HugePagesLim1Gi)
189-
}
190162
}
191163
return res
192164
}
@@ -239,7 +211,7 @@ func verifyQoS(gotPod v1.Pod, expectedQoS v1.PodQOSClass) {
239211
}
240212

241213
// TODO(ndixita): dedup the conversion logic in pod resize test and move to helpers/utils.
242-
func verifyPodCgroups(f *framework.Framework, pod *v1.Pod, info *resourceInfo) error {
214+
func verifyPodCgroups(ctx context.Context, f *framework.Framework, pod *v1.Pod, info *resourceInfo) error {
243215
ginkgo.GinkgoHelper()
244216
cmd := fmt.Sprintf("find %s -name '*%s*'", mountPath, strings.ReplaceAll(string(pod.UID), "-", "_"))
245217
framework.Logf("Namespace %s Pod %s - looking for Pod cgroup directory path: %q", f.Namespace, pod.Name, cmd)
@@ -275,70 +247,6 @@ func verifyPodCgroups(f *framework.Framework, pod *v1.Pod, info *resourceInfo) e
275247
if err != nil {
276248
errs = append(errs, fmt.Errorf("failed to verify memory limit cgroup value: %w", err))
277249
}
278-
279-
// Verify cgroup limits for all the hugepage sizes in the pod
280-
for resourceName, resourceAmount := range expectedResources.Limits {
281-
if !v1resource.IsHugePageResourceName(resourceName) {
282-
continue
283-
}
284-
285-
pageSize, err := v1helper.HugePageSizeFromResourceName(resourceName)
286-
if err != nil {
287-
errs = append(errs, fmt.Errorf("encountered error while obtaining hugepage size: %w", err))
288-
}
289-
290-
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize.Value())
291-
if err != nil {
292-
errs = append(errs, fmt.Errorf("encountered error while obtaining hugepage unit size: %w", err))
293-
}
294-
295-
hugepageCgroupv2Limits := []string{
296-
fmt.Sprintf("%s.%s.max", cgroupv2HugeTLBPrefix, sizeString),
297-
fmt.Sprintf("%s.%s.%s.max", cgroupv2HugeTLBPrefix, sizeString, cgroupv2HugeTLBRsvd),
298-
}
299-
expectedHugepageLim := strconv.FormatInt(resourceAmount.Value(), 10)
300-
301-
for _, hugepageCgroupv2Limit := range hugepageCgroupv2Limits {
302-
hugepageLimCgPath := fmt.Sprintf("%s/%s", podCgPath, hugepageCgroupv2Limit)
303-
err = e2epod.VerifyCgroupValue(f, pod, pod.Spec.Containers[0].Name, hugepageLimCgPath, expectedHugepageLim)
304-
if err != nil {
305-
errs = append(errs, fmt.Errorf("failed to verify hugepage limit cgroup value: %w, path: %s", err, hugepageLimCgPath))
306-
}
307-
}
308-
}
309-
310-
return utilerrors.NewAggregate(errs)
311-
}
312-
313-
func verifyContainersCgroupLimits(f *framework.Framework, pod *v1.Pod) error {
314-
var errs []error
315-
for _, container := range pod.Spec.Containers {
316-
if pod.Spec.Resources == nil {
317-
continue
318-
}
319-
320-
if pod.Spec.Resources.Limits.Memory() != nil && container.Resources.Limits.Memory() == nil {
321-
expectedCgroupMemLimit := strconv.FormatInt(pod.Spec.Resources.Limits.Memory().Value(), 10)
322-
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2MemLimit), expectedCgroupMemLimit)
323-
if err != nil {
324-
errs = append(errs, fmt.Errorf("failed to verify memory limit cgroup value: %w", err))
325-
}
326-
}
327-
328-
if pod.Spec.Resources.Limits.Cpu() != nil && container.Resources.Limits.Cpu() == nil {
329-
cpuQuota := kubecm.MilliCPUToQuota(pod.Spec.Resources.Limits.Cpu().MilliValue(), kubecm.QuotaPeriod)
330-
expectedCPULimit := strconv.FormatInt(cpuQuota, 10)
331-
expectedCPULimit = fmt.Sprintf("%s %s", expectedCPULimit, CPUPeriod)
332-
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2CPULimit), expectedCPULimit)
333-
if err != nil {
334-
errs = append(errs, fmt.Errorf("failed to verify cpu limit cgroup value: %w", err))
335-
}
336-
}
337-
338-
// TODO(KevinTMtz) - Check for all hugepages for the pod, for this is
339-
// required to enabled the Containerd Cgroup value, because if not, HugeTLB
340-
// cgroup values will be just set to max
341-
}
342250
return utilerrors.NewAggregate(errs)
343251
}
344252

@@ -349,7 +257,7 @@ func podLevelResourcesTests(f *framework.Framework) {
349257
// and limits for the pod. If pod-level resource specifications
350258
// are specified, totalPodResources is equal to pod-level resources.
351259
// Otherwise, it is calculated by aggregating resource requests and
352-
// limits from all containers within the pod.
260+
// limits from all containers within the pod..
353261
totalPodResources *resourceInfo
354262
}
355263

@@ -358,7 +266,6 @@ func podLevelResourcesTests(f *framework.Framework) {
358266
podResources *resourceInfo
359267
containers []containerInfo
360268
expected expectedPodConfig
361-
hugepages map[string]int
362269
}
363270

364271
tests := []testCase{
@@ -442,108 +349,10 @@ func podLevelResourcesTests(f *framework.Framework) {
442349
totalPodResources: &resourceInfo{CPUReq: "50m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi"},
443350
},
444351
},
445-
{
446-
name: "Guaranteed QoS pod hugepages, no container resources, single page size",
447-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi", HugePagesLim2Mi: "10Mi"},
448-
containers: []containerInfo{{Name: "c1"}, {Name: "c2"}},
449-
expected: expectedPodConfig{
450-
qos: v1.PodQOSGuaranteed,
451-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi", HugePagesReq2Mi: "10Mi", HugePagesLim2Mi: "10Mi"},
452-
},
453-
hugepages: map[string]int{
454-
v1.ResourceHugePagesPrefix + "2Mi": 5,
455-
},
456-
},
457-
{
458-
name: "Burstable QoS pod hugepages, container resources, single page size",
459-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesLim2Mi: "10Mi"},
460-
containers: []containerInfo{{Name: "c1", Resources: &resourceInfo{CPUReq: "20m", CPULim: "50m", HugePagesLim2Mi: "4Mi"}}, {Name: "c2"}},
461-
expected: expectedPodConfig{
462-
qos: v1.PodQOSBurstable,
463-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesReq2Mi: "10Mi", HugePagesLim2Mi: "10Mi"},
464-
},
465-
hugepages: map[string]int{
466-
v1.ResourceHugePagesPrefix + "2Mi": 5,
467-
},
468-
},
469-
{
470-
name: "Burstable QoS pod hugepages, container resources, single page size, pod level does not specify hugepages",
471-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi"},
472-
containers: []containerInfo{{Name: "c1", Resources: &resourceInfo{CPUReq: "20m", CPULim: "50m", HugePagesLim2Mi: "4Mi"}}, {Name: "c2"}},
473-
expected: expectedPodConfig{
474-
qos: v1.PodQOSBurstable,
475-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesReq2Mi: "4Mi", HugePagesLim2Mi: "4Mi"},
476-
},
477-
hugepages: map[string]int{
478-
v1.ResourceHugePagesPrefix + "2Mi": 2,
479-
},
480-
},
481-
{
482-
name: "Guaranteed QoS pod hugepages, no container resources, multiple page size",
483-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi", HugePagesLim2Mi: "10Mi", HugePagesLim1Gi: "1Gi"},
484-
containers: []containerInfo{{Name: "c1"}, {Name: "c2"}},
485-
expected: expectedPodConfig{
486-
qos: v1.PodQOSGuaranteed,
487-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi", HugePagesReq2Mi: "10Mi", HugePagesLim2Mi: "10Mi", HugePagesReq1Gi: "1Gi", HugePagesLim1Gi: "1Gi"},
488-
},
489-
hugepages: map[string]int{
490-
v1.ResourceHugePagesPrefix + "2Mi": 5,
491-
v1.ResourceHugePagesPrefix + "1Gi": 1,
492-
},
493-
},
494-
{
495-
name: "Burstable QoS pod hugepages, container resources, multiple page size",
496-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesLim2Mi: "10Mi", HugePagesLim1Gi: "1Gi"},
497-
containers: []containerInfo{{Name: "c1", Resources: &resourceInfo{CPUReq: "20m", CPULim: "50m", HugePagesLim2Mi: "4Mi", HugePagesLim1Gi: "1Gi"}}, {Name: "c2"}},
498-
expected: expectedPodConfig{
499-
qos: v1.PodQOSBurstable,
500-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesReq2Mi: "10Mi", HugePagesLim2Mi: "10Mi", HugePagesReq1Gi: "1Gi", HugePagesLim1Gi: "1Gi"},
501-
},
502-
hugepages: map[string]int{
503-
v1.ResourceHugePagesPrefix + "2Mi": 5,
504-
v1.ResourceHugePagesPrefix + "1Gi": 1,
505-
},
506-
},
507-
{
508-
name: "Burstable QoS pod hugepages, container resources, multiple page size, pod level does not specify hugepages",
509-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi"},
510-
containers: []containerInfo{{Name: "c1", Resources: &resourceInfo{CPUReq: "20m", CPULim: "50m", HugePagesLim2Mi: "4Mi", HugePagesLim1Gi: "1Gi"}}, {Name: "c2"}},
511-
expected: expectedPodConfig{
512-
qos: v1.PodQOSBurstable,
513-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesReq2Mi: "4Mi", HugePagesLim2Mi: "4Mi", HugePagesReq1Gi: "1Gi", HugePagesLim1Gi: "1Gi"},
514-
},
515-
hugepages: map[string]int{
516-
v1.ResourceHugePagesPrefix + "2Mi": 2,
517-
v1.ResourceHugePagesPrefix + "1Gi": 1,
518-
},
519-
},
520-
{
521-
name: "Burstable QoS pod hugepages, container resources, different page size between pod and container level",
522-
podResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesLim2Mi: "10Mi"},
523-
containers: []containerInfo{{Name: "c1", Resources: &resourceInfo{CPUReq: "20m", CPULim: "50m", HugePagesLim1Gi: "1Gi"}}, {Name: "c2"}},
524-
expected: expectedPodConfig{
525-
qos: v1.PodQOSBurstable,
526-
totalPodResources: &resourceInfo{CPUReq: "100m", CPULim: "100m", MemReq: "50Mi", MemLim: "100Mi", HugePagesReq2Mi: "10Mi", HugePagesLim2Mi: "10Mi", HugePagesReq1Gi: "1Gi", HugePagesLim1Gi: "1Gi"},
527-
},
528-
hugepages: map[string]int{
529-
v1.ResourceHugePagesPrefix + "2Mi": 5,
530-
v1.ResourceHugePagesPrefix + "1Gi": 1,
531-
},
532-
},
533352
}
534353

535354
for _, tc := range tests {
536355
ginkgo.It(tc.name, func(ctx context.Context) {
537-
// Pre-allocate hugepages in the node
538-
if tc.hugepages != nil {
539-
utils.SetHugepages(ctx, tc.hugepages)
540-
541-
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
542-
utils.RestartKubelet(ctx, false)
543-
544-
utils.WaitForHugepages(ctx, f, tc.hugepages)
545-
}
546-
547356
podMetadata := makeObjectMetadata("testpod", f.Namespace.Name)
548357
testPod := makePod(&podMetadata, tc.podResources, tc.containers)
549358

@@ -558,7 +367,7 @@ func podLevelResourcesTests(f *framework.Framework) {
558367
verifyQoS(*pod, tc.expected.qos)
559368

560369
ginkgo.By("verifying pod cgroup values")
561-
err := verifyPodCgroups(f, pod, tc.expected.totalPodResources)
370+
err := verifyPodCgroups(ctx, f, pod, tc.expected.totalPodResources)
562371
framework.ExpectNoError(err, "failed to verify pod's cgroup values: %v", err)
563372

564373
ginkgo.By("verifying containers cgroup limits are same as pod container's cgroup limits")
@@ -568,16 +377,32 @@ func podLevelResourcesTests(f *framework.Framework) {
568377
ginkgo.By("deleting pods")
569378
delErr := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
570379
framework.ExpectNoError(delErr, "failed to delete pod %s", delErr)
380+
})
381+
}
382+
}
571383

572-
// Release pre-allocated hugepages
573-
if tc.hugepages != nil {
574-
utils.ReleaseHugepages(ctx, tc.hugepages)
575-
576-
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
577-
utils.RestartKubelet(ctx, true)
384+
func verifyContainersCgroupLimits(f *framework.Framework, pod *v1.Pod) error {
385+
var errs []error
386+
for _, container := range pod.Spec.Containers {
387+
if pod.Spec.Resources != nil && pod.Spec.Resources.Limits.Memory() != nil &&
388+
container.Resources.Limits.Memory() == nil {
389+
expectedCgroupMemLimit := strconv.FormatInt(pod.Spec.Resources.Limits.Memory().Value(), 10)
390+
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2MemLimit), expectedCgroupMemLimit)
391+
if err != nil {
392+
errs = append(errs, fmt.Errorf("failed to verify memory limit cgroup value: %w", err))
393+
}
394+
}
578395

579-
utils.WaitForHugepages(ctx, f, tc.hugepages)
396+
if pod.Spec.Resources != nil && pod.Spec.Resources.Limits.Cpu() != nil &&
397+
container.Resources.Limits.Cpu() == nil {
398+
cpuQuota := kubecm.MilliCPUToQuota(pod.Spec.Resources.Limits.Cpu().MilliValue(), kubecm.QuotaPeriod)
399+
expectedCPULimit := strconv.FormatInt(cpuQuota, 10)
400+
expectedCPULimit = fmt.Sprintf("%s %s", expectedCPULimit, CPUPeriod)
401+
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2CPULimit), expectedCPULimit)
402+
if err != nil {
403+
errs = append(errs, fmt.Errorf("failed to verify cpu limit cgroup value: %w", err))
580404
}
581-
})
405+
}
582406
}
407+
return utilerrors.NewAggregate(errs)
583408
}

0 commit comments

Comments
 (0)