Skip to content

Commit df87d3e

Browse files
committed
feature: use contextal logging for plugins
1 parent 9f91837 commit df87d3e

File tree

15 files changed

+148
-77
lines changed

15 files changed

+148
-77
lines changed

pkg/framework/plugins/defaultevictor/defaultevictor.go

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ type constraint func(pod *v1.Pod) error
4545
// This plugin is only meant to customize other actions (extension points) of the evictor,
4646
// like filtering, sorting, and other ones that might be relevant in the future
4747
type DefaultEvictor struct {
48+
logger klog.Logger
4849
args *DefaultEvictorArgs
4950
constraints []constraint
5051
handle frameworktypes.Handle
@@ -68,8 +69,10 @@ func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle)
6869
if !ok {
6970
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
7071
}
72+
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
7173

7274
ev := &DefaultEvictor{
75+
logger: logger,
7376
handle: handle,
7477
args: defaultEvictorArgs,
7578
}
@@ -113,14 +116,15 @@ func (d *DefaultEvictor) Name() string {
113116
}
114117

115118
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
119+
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.PreEvictionFilterExtensionPoint)
116120
if d.args.NodeFit {
117121
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
118122
if err != nil {
119-
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
123+
logger.Error(err, "unable to list ready nodes", "pod", klog.KObj(pod))
120124
return false
121125
}
122126
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
123-
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
127+
logger.Info("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
124128
return false
125129
}
126130
return true
@@ -129,6 +133,7 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
129133
}
130134

131135
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
136+
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.FilterExtensionPoint)
132137
checkErrs := []error{}
133138

134139
if HaveEvictAnnotation(pod) {
@@ -154,7 +159,7 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
154159
}
155160

156161
if len(checkErrs) > 0 {
157-
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
162+
logger.V(4).Info("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
158163
return false
159164
}
160165

pkg/framework/plugins/example/example.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ var _ fwtypes.DeschedulePlugin = &Example{}
4646
// Example is our plugin (implementing the DeschedulePlugin interface). This
4747
// plugin will evict pods that match a regex and are older than a certain age.
4848
type Example struct {
49+
logger klog.Logger
4950
handle fwtypes.Handle
5051
args *ExampleArgs
5152
podFilter podutil.FilterFunc
@@ -61,6 +62,7 @@ func New(ctx context.Context, args runtime.Object, handle fwtypes.Handle) (fwtyp
6162
if !ok {
6263
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
6364
}
65+
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
6466

6567
// we can use the included and excluded namespaces to filter the pods we want
6668
// to evict.
@@ -90,6 +92,7 @@ func New(ctx context.Context, args runtime.Object, handle fwtypes.Handle) (fwtyp
9092
}
9193

9294
return &Example{
95+
logger: logger,
9396
handle: handle,
9497
podFilter: podFilter,
9598
args: exampleArgs,
@@ -107,7 +110,7 @@ func (d *Example) Name() string {
107110
// of nodes we need to process.
108111
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
109112
var podsToEvict []*v1.Pod
110-
logger := klog.FromContext(ctx)
113+
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", fwtypes.DescheduleExtensionPoint)
111114
logger.Info("Example plugin starting descheduling")
112115

113116
re, err := regexp.Compile(d.args.Regex)

pkg/framework/plugins/nodeutilization/highnodeutilization.go

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
4444
// can schedule according to its plugin. Note that CPU/Memory requests are used
4545
// to calculate nodes' utilization and not the actual resource usage.
4646
type HighNodeUtilization struct {
47+
logger klog.Logger
4748
handle frameworktypes.Handle
4849
args *HighNodeUtilizationArgs
4950
podFilter func(pod *v1.Pod) bool
@@ -64,6 +65,7 @@ func NewHighNodeUtilization(
6465
genericArgs,
6566
)
6667
}
68+
logger := klog.FromContext(ctx).WithValues("plugin", HighNodeUtilizationPluginName)
6769

6870
// this plugins worries only about thresholds but the nodeplugins
6971
// package was made to take two thresholds into account, one for low
@@ -113,13 +115,15 @@ func NewHighNodeUtilization(
113115
)
114116

115117
return &HighNodeUtilization{
118+
logger: logger,
116119
handle: handle,
117120
args: args,
118121
resourceNames: resourceNames,
119122
highThresholds: highThresholds,
120123
criteria: thresholdsToKeysAndValues(args.Thresholds),
121124
podFilter: podFilter,
122125
usageClient: newRequestedUsageClient(
126+
logger.WithValues("client", "RequestedUsageClient"),
123127
resourceNames,
124128
handle.GetPodsAssignedToNodeFunc(),
125129
),
@@ -135,6 +139,8 @@ func (h *HighNodeUtilization) Name() string {
135139
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
136140
// less nodes are used.
137141
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
142+
logger := klog.FromContext(klog.NewContext(ctx, h.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
143+
138144
if err := h.usageClient.sync(ctx, nodes); err != nil {
139145
return &frameworktypes.Status{
140146
Err: fmt.Errorf("error getting node usage: %v", err),
@@ -165,7 +171,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
165171
// schedulable nodes.
166172
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
167173
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
168-
klog.V(2).InfoS(
174+
logger.V(2).Info(
169175
"Node is unschedulable",
170176
"node", klog.KObj(nodesMap[nodeName]),
171177
)
@@ -184,7 +190,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
184190
category := []string{"underutilized", "overutilized"}
185191
for i := range nodeGroups {
186192
for nodeName := range nodeGroups[i] {
187-
klog.InfoS(
193+
logger.Info(
188194
"Node has been classified",
189195
"category", category[i],
190196
"node", klog.KObj(nodesMap[nodeName]),
@@ -208,18 +214,18 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
208214

209215
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
210216

211-
klog.V(1).InfoS("Criteria for a node below target utilization", h.criteria...)
212-
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
217+
logger.V(1).Info("Criteria for a node below target utilization", h.criteria...)
218+
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
213219

214220
if len(lowNodes) == 0 {
215-
klog.V(1).InfoS(
221+
logger.V(1).Info(
216222
"No node is underutilized, nothing to do here, you might tune your thresholds further",
217223
)
218224
return nil
219225
}
220226

221227
if len(lowNodes) <= h.args.NumberOfNodes {
222-
klog.V(1).InfoS(
228+
logger.V(1).Info(
223229
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
224230
"underutilizedNodes", len(lowNodes),
225231
"numberOfNodes", h.args.NumberOfNodes,
@@ -228,12 +234,12 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
228234
}
229235

230236
if len(lowNodes) == len(nodes) {
231-
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
237+
logger.V(1).Info("All nodes are underutilized, nothing to do here")
232238
return nil
233239
}
234240

235241
if len(schedulableNodes) == 0 {
236-
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
242+
logger.V(1).Info("No node is available to schedule the pods, nothing to do here")
237243
return nil
238244
}
239245

pkg/framework/plugins/nodeutilization/lownodeutilization.go

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
4343
// nodes. Note that CPU/Memory requests are used to calculate nodes'
4444
// utilization and not the actual resource usage.
4545
type LowNodeUtilization struct {
46+
logger klog.Logger
4647
handle frameworktypes.Handle
4748
args *LowNodeUtilizationArgs
4849
podFilter func(pod *v1.Pod) bool
@@ -66,6 +67,7 @@ func NewLowNodeUtilization(
6667
genericArgs,
6768
)
6869
}
70+
logger := klog.FromContext(ctx).WithValues("plugin", LowNodeUtilizationPluginName)
6971

7072
// resourceNames holds a list of resources for which the user has
7173
// provided thresholds for. extendedResourceNames holds those as well
@@ -105,16 +107,18 @@ func NewLowNodeUtilization(
105107
// have the correct one or an error is triggered. XXX MetricsServer is
106108
// deprecated, removed once dropped.
107109
var usageClient usageClient = newRequestedUsageClient(
110+
logger.WithValues("client", "RequestedUsageClient"),
108111
extendedResourceNames, handle.GetPodsAssignedToNodeFunc(),
109112
)
110113
if metrics != nil {
111-
usageClient, err = usageClientForMetrics(args, handle, extendedResourceNames)
114+
usageClient, err = usageClientForMetrics(logger, args, handle, extendedResourceNames)
112115
if err != nil {
113116
return nil, err
114117
}
115118
}
116119

117120
return &LowNodeUtilization{
121+
logger: logger,
118122
handle: handle,
119123
args: args,
120124
underCriteria: thresholdsToKeysAndValues(args.Thresholds),
@@ -135,6 +139,8 @@ func (l *LowNodeUtilization) Name() string {
135139
// utilized nodes to under utilized nodes. The goal here is to evenly
136140
// distribute pods across nodes.
137141
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
142+
logger := klog.FromContext(klog.NewContext(ctx, l.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
143+
138144
if err := l.usageClient.sync(ctx, nodes); err != nil {
139145
return &frameworktypes.Status{
140146
Err: fmt.Errorf("error getting node usage: %v", err),
@@ -182,7 +188,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
182188
// underutilized but aren't schedulable are ignored.
183189
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
184190
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
185-
klog.V(2).InfoS(
191+
logger.V(2).Info(
186192
"Node is unschedulable, thus not considered as underutilized",
187193
"node", klog.KObj(nodesMap[nodeName]),
188194
)
@@ -207,7 +213,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
207213
for nodeName := range nodeGroups[i] {
208214
classifiedNodes[nodeName] = true
209215

210-
klog.InfoS(
216+
logger.Info(
211217
"Node has been classified",
212218
"category", categories[i],
213219
"node", klog.KObj(nodesMap[nodeName]),
@@ -233,7 +239,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
233239
// log nodes that are appropriately utilized.
234240
for nodeName := range nodesMap {
235241
if !classifiedNodes[nodeName] {
236-
klog.InfoS(
242+
logger.Info(
237243
"Node is appropriately utilized",
238244
"node", klog.KObj(nodesMap[nodeName]),
239245
"usage", nodesUsageMap[nodeName],
@@ -245,20 +251,20 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
245251
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
246252

247253
// log messages for nodes with low and high utilization
248-
klog.V(1).InfoS("Criteria for a node under utilization", l.underCriteria...)
249-
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
250-
klog.V(1).InfoS("Criteria for a node above target utilization", l.overCriteria...)
251-
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(highNodes))
254+
logger.V(1).Info("Criteria for a node under utilization", l.underCriteria...)
255+
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
256+
logger.V(1).Info("Criteria for a node above target utilization", l.overCriteria...)
257+
logger.V(1).Info("Number of overutilized nodes", "totalNumber", len(highNodes))
252258

253259
if len(lowNodes) == 0 {
254-
klog.V(1).InfoS(
260+
logger.V(1).Info(
255261
"No node is underutilized, nothing to do here, you might tune your thresholds further",
256262
)
257263
return nil
258264
}
259265

260266
if len(lowNodes) <= l.args.NumberOfNodes {
261-
klog.V(1).InfoS(
267+
logger.V(1).Info(
262268
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
263269
"underutilizedNodes", len(lowNodes),
264270
"numberOfNodes", l.args.NumberOfNodes,
@@ -267,12 +273,12 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
267273
}
268274

269275
if len(lowNodes) == len(nodes) {
270-
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
276+
logger.V(1).Info("All nodes are underutilized, nothing to do here")
271277
return nil
272278
}
273279

274280
if len(highNodes) == 0 {
275-
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
281+
logger.V(1).Info("All nodes are under target utilization, nothing to do here")
276282
return nil
277283
}
278284

@@ -349,6 +355,7 @@ func validatePrometheusMetricsUtilization(args *LowNodeUtilizationArgs) error {
349355
// usageClientForMetrics returns the correct usage client based on the
350356
// metrics source. XXX MetricsServer is deprecated, removed once dropped.
351357
func usageClientForMetrics(
358+
logger klog.Logger,
352359
args *LowNodeUtilizationArgs, handle frameworktypes.Handle, resources []v1.ResourceName,
353360
) (usageClient, error) {
354361
metrics := args.MetricsUtilization
@@ -358,6 +365,7 @@ func usageClientForMetrics(
358365
return nil, fmt.Errorf("metrics client not initialized")
359366
}
360367
return newActualUsageClient(
368+
logger.WithValues("client", "ActualUsageClient"),
361369
resources,
362370
handle.GetPodsAssignedToNodeFunc(),
363371
handle.MetricsCollector(),
@@ -368,6 +376,7 @@ func usageClientForMetrics(
368376
return nil, fmt.Errorf("prometheus client not initialized")
369377
}
370378
return newPrometheusUsageClient(
379+
logger.WithValues("client", "PrometheusUsageClient"),
371380
handle.GetPodsAssignedToNodeFunc(),
372381
handle.PrometheusClient(),
373382
metrics.Prometheus.Query,

0 commit comments

Comments
 (0)