Skip to content

Commit 23aff5d

Browse files
committed
Split out the pieces of the TrafficDistribution test
1 parent 10cd54b commit 23aff5d

File tree

1 file changed

+49
-25
lines changed

1 file changed

+49
-25
lines changed

test/e2e/network/traffic_distribution.go

Lines changed: 49 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,9 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
105105
pod *v1.Pod
106106
}
107107

108-
////////////////////////////////////////////////////////////////////////////
109-
// Main test specifications.
110-
////////////////////////////////////////////////////////////////////////////
111-
112-
ginkgo.It("should route traffic to an endpoint in the same zone when using PreferClose", func(ctx context.Context) {
108+
// allocateClientsAndServers figures out where to put clients and servers for
109+
// a simple "same-zone" traffic distribution test.
110+
allocateClientsAndServers := func(ctx context.Context) ([]*clientPod, []*serverPod) {
113111
ginkgo.By("finding 3 zones with schedulable nodes")
114112
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
115113
framework.ExpectNoError(err)
@@ -149,41 +147,52 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
149147
clientPods[1].endpoints = []*serverPod{serverPods[1]}
150148
clientPods[2].endpoints = serverPods
151149

150+
return clientPods, serverPods
151+
}
152+
153+
// createService creates the service for a traffic distribution test
154+
createService := func(ctx context.Context, trafficDist string) *v1.Service {
155+
serviceName := "traffic-dist-test-service"
156+
ginkgo.By(fmt.Sprintf("creating a service %q with trafficDistribution %q", serviceName, trafficDist))
157+
return createServiceReportErr(ctx, c, f.Namespace.Name, &v1.Service{
158+
ObjectMeta: metav1.ObjectMeta{
159+
Name: serviceName,
160+
},
161+
Spec: v1.ServiceSpec{
162+
Selector: map[string]string{
163+
"app": f.UniqueName,
164+
},
165+
TrafficDistribution: &trafficDist,
166+
Ports: []v1.ServicePort{{
167+
Port: 80,
168+
TargetPort: intstr.FromInt32(9376),
169+
Protocol: v1.ProtocolTCP,
170+
}},
171+
},
172+
})
173+
}
174+
175+
// createPods creates endpoint pods for svc as described by serverPods, waits for
176+
// the EndpointSlices to be updated, and creates clientPods as described by
177+
// clientPods.
178+
createPods := func(ctx context.Context, svc *v1.Service, clientPods []*clientPod, serverPods []*serverPod) {
152179
var podsToCreate []*v1.Pod
153-
servingPodLabels := map[string]string{"app": f.UniqueName}
154180
for i, sp := range serverPods {
155181
node := sp.node.Name
156182
zone := sp.node.Labels[v1.LabelTopologyZone]
157183
pod := e2epod.NewAgnhostPod(f.Namespace.Name, fmt.Sprintf("server-%d-%s", i, node), nil, nil, nil, "serve-hostname")
158184
ginkgo.By(fmt.Sprintf("creating a server pod %q on node %q in zone %q", pod.Name, node, zone))
159185
nodeSelection := e2epod.NodeSelection{Name: node}
160186
e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
161-
pod.Labels = servingPodLabels
187+
pod.Labels = svc.Spec.Selector
162188

163189
sp.pod = pod
164190
podsToCreate = append(podsToCreate, pod)
165191
}
166192
e2epod.NewPodClient(f).CreateBatch(ctx, podsToCreate)
167193

168-
trafficDist := v1.ServiceTrafficDistributionPreferClose
169-
svc := createServiceReportErr(ctx, c, f.Namespace.Name, &v1.Service{
170-
ObjectMeta: metav1.ObjectMeta{
171-
Name: "traffic-dist-test-service",
172-
},
173-
Spec: v1.ServiceSpec{
174-
Selector: servingPodLabels,
175-
TrafficDistribution: &trafficDist,
176-
Ports: []v1.ServicePort{{
177-
Port: 80,
178-
TargetPort: intstr.FromInt32(9376),
179-
Protocol: v1.ProtocolTCP,
180-
}},
181-
},
182-
})
183-
ginkgo.By(fmt.Sprintf("creating a service=%q with trafficDistribution=%v", svc.GetName(), *svc.Spec.TrafficDistribution))
184-
185194
ginkgo.By("waiting for EndpointSlices to be created")
186-
err = framework.WaitForServiceEndpointsNum(ctx, c, svc.Namespace, svc.Name, len(serverPods), 1*time.Second, e2eservice.ServiceEndpointsTimeout)
195+
err := framework.WaitForServiceEndpointsNum(ctx, c, svc.Namespace, svc.Name, len(serverPods), 1*time.Second, e2eservice.ServiceEndpointsTimeout)
187196
framework.ExpectNoError(err)
188197
slices := endpointSlicesForService(svc.Name)
189198
framework.Logf("got slices:\n%v", format.Object(slices, 1))
@@ -204,7 +213,11 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
204213
podsToCreate = append(podsToCreate, pod)
205214
}
206215
e2epod.NewPodClient(f).CreateBatch(ctx, podsToCreate)
216+
}
207217

218+
// checkTrafficDistribution checks that traffic from clientPods is distributed in
219+
// the expected way.
220+
checkTrafficDistribution := func(ctx context.Context, clientPods []*clientPod) {
208221
for _, cp := range clientPods {
209222
wantedEndpoints := sets.New[string]()
210223
for _, sp := range cp.endpoints {
@@ -241,5 +254,16 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
241254

242255
gomega.Eventually(ctx, requestsFromClient(cp.pod)).WithPolling(5 * time.Second).WithTimeout(e2eservice.KubeProxyLagTimeout).Should(requestsSucceed)
243256
}
257+
}
258+
259+
////////////////////////////////////////////////////////////////////////////
260+
// Main test specifications.
261+
////////////////////////////////////////////////////////////////////////////
262+
263+
ginkgo.It("should route traffic to an endpoint in the same zone when using PreferClose", func(ctx context.Context) {
264+
clientPods, serverPods := allocateClientsAndServers(ctx)
265+
svc := createService(ctx, v1.ServiceTrafficDistributionPreferClose)
266+
createPods(ctx, svc, clientPods, serverPods)
267+
checkTrafficDistribution(ctx, clientPods)
244268
})
245269
})

0 commit comments

Comments
 (0)