Skip to content

Commit 0ee6b0d

Browse files
committed
Add "multi-node" TrafficDistribution test
The existing TrafficDistribution test didn't really distinguish "same zone" from "same node". Add another test that makes sure there are at least 2 nodes in each zone so it can do that. (Keep the original test as well to avoid losing coverage in CI systems with single-schedulable-node-per-zone clusters.)
1 parent 7956b37 commit 0ee6b0d

File tree

1 file changed

+98
-0
lines changed

1 file changed

+98
-0
lines changed

test/e2e/network/traffic_distribution.go

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,36 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
9494
}
9595
}
9696

97+
// getNodesForMultiNode returns a set of nodes for a test case with 3 zones with 2
98+
// nodes each. If there are not suitable nodes/zones, the test is skipped.
99+
getNodesForMultiNode := func(ctx context.Context) ([]*v1.Node, []*v1.Node, []*v1.Node) {
100+
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
101+
framework.ExpectNoError(err)
102+
nodesForZone := make(map[string][]*v1.Node)
103+
for _, node := range nodeList.Items {
104+
zone := node.Labels[v1.LabelTopologyZone]
105+
nodesForZone[zone] = append(nodesForZone[zone], &node)
106+
}
107+
if len(nodesForZone) < 3 {
108+
e2eskipper.Skipf("need at least 3 zones, with at least 2 schedulable nodes each")
109+
}
110+
111+
var multiNodeZones [][]*v1.Node
112+
for _, nodes := range nodesForZone {
113+
if len(nodes) > 1 {
114+
multiNodeZones = append(multiNodeZones, nodes)
115+
}
116+
if len(multiNodeZones) == 3 {
117+
break
118+
}
119+
}
120+
if len(multiNodeZones) < 3 {
121+
e2eskipper.Skipf("need at least 3 zones, with at least 2 schedulable nodes each")
122+
}
123+
124+
return multiNodeZones[0], multiNodeZones[1], multiNodeZones[2]
125+
}
126+
97127
// Data structures for tracking server and client pods
98128
type serverPod struct {
99129
node *v1.Node
@@ -151,6 +181,59 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
151181
return clientPods, serverPods
152182
}
153183

184+
// allocateMultiNodeClientsAndServers figures out where to put clients and servers
185+
// for a "same-zone" traffic distribution test with multiple nodes in each zone.
186+
allocateMultiNodeClientsAndServers := func(ctx context.Context) ([]*clientPod, []*serverPod) {
187+
ginkgo.By("finding a set of zones and nodes for the test")
188+
zone1Nodes, zone2Nodes, zone3Nodes := getNodesForMultiNode(ctx)
189+
190+
var clientPods []*clientPod
191+
var serverPods []*serverPod
192+
193+
// First zone: a client and an endpoint on each node, and both clients
194+
// should talk to both endpoints.
195+
endpointsForZone := []*serverPod{
196+
{node: zone1Nodes[0]},
197+
{node: zone1Nodes[1]},
198+
}
199+
200+
clientPods = append(clientPods,
201+
&clientPod{
202+
node: zone1Nodes[0],
203+
endpoints: endpointsForZone,
204+
},
205+
&clientPod{
206+
node: zone1Nodes[1],
207+
endpoints: endpointsForZone,
208+
},
209+
)
210+
serverPods = append(serverPods, endpointsForZone...)
211+
212+
// Second zone: a client on one node and a server on the other.
213+
endpointsForZone = []*serverPod{
214+
{node: zone2Nodes[1]},
215+
}
216+
217+
clientPods = append(clientPods,
218+
&clientPod{
219+
node: zone2Nodes[0],
220+
endpoints: endpointsForZone,
221+
},
222+
)
223+
serverPods = append(serverPods, endpointsForZone...)
224+
225+
// Third zone: just a client, which should connect to the servers in the
226+
// other two zones.
227+
clientPods = append(clientPods,
228+
&clientPod{
229+
node: zone3Nodes[0],
230+
endpoints: serverPods,
231+
},
232+
)
233+
234+
return clientPods, serverPods
235+
}
236+
154237
// createService creates the service for a traffic distribution test
155238
createService := func(ctx context.Context, trafficDist string) *v1.Service {
156239
serviceName := "traffic-dist-test-service"
@@ -268,10 +351,25 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
268351
checkTrafficDistribution(ctx, clientPods)
269352
})
270353

354+
framework.It("should route traffic correctly between pods on multiple nodes when using PreferClose", func(ctx context.Context) {
355+
clientPods, serverPods := allocateMultiNodeClientsAndServers(ctx)
356+
svc := createService(ctx, v1.ServiceTrafficDistributionPreferClose)
357+
createPods(ctx, svc, clientPods, serverPods)
358+
checkTrafficDistribution(ctx, clientPods)
359+
})
360+
271361
framework.It("should route traffic to an endpoint in the same zone when using PreferSameZone", framework.WithFeatureGate(features.PreferSameTrafficDistribution), func(ctx context.Context) {
272362
clientPods, serverPods := allocateClientsAndServers(ctx)
273363
svc := createService(ctx, v1.ServiceTrafficDistributionPreferSameZone)
274364
createPods(ctx, svc, clientPods, serverPods)
275365
checkTrafficDistribution(ctx, clientPods)
276366
})
367+
368+
framework.It("should route traffic correctly between pods on multiple nodes when using PreferSameZone", framework.WithFeatureGate(features.PreferSameTrafficDistribution), func(ctx context.Context) {
369+
clientPods, serverPods := allocateMultiNodeClientsAndServers(ctx)
370+
svc := createService(ctx, v1.ServiceTrafficDistributionPreferSameZone)
371+
createPods(ctx, svc, clientPods, serverPods)
372+
checkTrafficDistribution(ctx, clientPods)
373+
})
374+
277375
})

0 commit comments

Comments
 (0)