• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / ac29a22a-5e08-4090-9245-9f6e04fbdb07

24 Nov 2025 10:46PM UTC coverage: 70.499% (+0.01%) from 70.489%
ac29a22a-5e08-4090-9245-9f6e04fbdb07

push

prow

web-flow
Merge pull request #16157 from fossedihelm/centralize-node-informer

handler: Avoid node GET requests

35 of 49 new or added lines in 4 files covered. (71.43%)

2 existing lines in 1 file now uncovered.

70369 of 99815 relevant lines covered (70.5%)

511.1 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.23
/pkg/virt-handler/node-labeller/node_labeller.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright The KubeVirt Authors.
17
 *
18
 */
19

20
package nodelabeller
21

22
import (
23
        "context"
24
        "fmt"
25
        "os/exec"
26
        "runtime"
27
        "strings"
28
        "time"
29

30
        "k8s.io/client-go/tools/cache"
31
        "k8s.io/client-go/tools/record"
32
        "libvirt.org/go/libvirtxml"
33

34
        v1 "k8s.io/api/core/v1"
35
        "k8s.io/apimachinery/pkg/api/equality"
36
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
37
        "k8s.io/apimachinery/pkg/types"
38
        "k8s.io/apimachinery/pkg/util/wait"
39
        k8scli "k8s.io/client-go/kubernetes/typed/core/v1"
40
        "k8s.io/client-go/util/workqueue"
41

42
        kubevirtv1 "kubevirt.io/api/core/v1"
43
        "kubevirt.io/client-go/log"
44

45
        "kubevirt.io/kubevirt/pkg/apimachinery/patch"
46
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
47
)
48

49
var nodeLabellerLabels = []string{
50
        kubevirtv1.CPUFeatureLabel,
51
        kubevirtv1.CPUModelLabel,
52
        kubevirtv1.SupportedHostModelMigrationCPU,
53
        kubevirtv1.CPUTimerLabel,
54
        kubevirtv1.HypervLabel,
55
        kubevirtv1.RealtimeLabel,
56
        kubevirtv1.SEVLabel,
57
        kubevirtv1.SEVESLabel,
58
        kubevirtv1.SEVSNPLabel,
59
        kubevirtv1.TDXLabel,
60
        kubevirtv1.HostModelCPULabel,
61
        kubevirtv1.HostModelRequiredFeaturesLabel,
62
        kubevirtv1.NodeHostModelIsObsoleteLabel,
63
        kubevirtv1.SupportedMachineTypeLabel,
64
}
65

66
// NodeLabeller struct holds information needed to run node-labeller
67
type NodeLabeller struct {
68
        recorder                record.EventRecorder
69
        nodeClient              k8scli.NodeInterface
70
        nodeStore               cache.Store
71
        host                    string
72
        logger                  *log.FilteredLogger
73
        clusterConfig           *virtconfig.ClusterConfig
74
        hypervFeatures          supportedFeatures
75
        hostCapabilities        supportedModels
76
        queue                   workqueue.TypedRateLimitingInterface[string]
77
        supportedFeatures       []string
78
        cpuModelVendor          string
79
        volumePath              string
80
        domCapabilitiesFileName string
81
        cpuCounter              *libvirtxml.CapsHostCPUCounter
82
        supportedMachines       []libvirtxml.CapsGuestMachine
83
        hostCPUModel            hostCPUModel
84
        SEV                     SEVConfiguration
85
        SecureExecution         SecureExecutionConfiguration
86
        TDX                     TDXConfiguration
87
        arch                    archLabeller
88
}
89

NEW
90
func NewNodeLabeller(clusterConfig *virtconfig.ClusterConfig, nodeClient k8scli.NodeInterface, nodeStore cache.Store, host string, recorder record.EventRecorder, cpuCounter *libvirtxml.CapsHostCPUCounter, supportedMachines []libvirtxml.CapsGuestMachine) (*NodeLabeller, error) {
×
NEW
91
        return newNodeLabeller(clusterConfig, nodeClient, nodeStore, host, NodeLabellerVolumePath, recorder, cpuCounter, supportedMachines)
×
92

×
93
}
×
94
func newNodeLabeller(clusterConfig *virtconfig.ClusterConfig, nodeClient k8scli.NodeInterface, nodeStore cache.Store, host, volumePath string, recorder record.EventRecorder, cpuCounter *libvirtxml.CapsHostCPUCounter, supportedMachines []libvirtxml.CapsGuestMachine) (*NodeLabeller, error) {
28✔
95
        n := &NodeLabeller{
28✔
96
                recorder:      recorder,
28✔
97
                nodeClient:    nodeClient,
28✔
98
                nodeStore:     nodeStore,
28✔
99
                host:          host,
28✔
100
                logger:        log.DefaultLogger(),
28✔
101
                clusterConfig: clusterConfig,
28✔
102
                queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
28✔
103
                        workqueue.DefaultTypedControllerRateLimiter[string](),
28✔
104
                        workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-node-labeller"},
28✔
105
                ),
28✔
106
                volumePath:              volumePath,
28✔
107
                domCapabilitiesFileName: "virsh_domcapabilities.xml",
28✔
108
                cpuCounter:              cpuCounter,
28✔
109
                supportedMachines:       supportedMachines,
28✔
110
                hostCPUModel:            hostCPUModel{requiredFeatures: make(map[string]bool)},
28✔
111
                arch:                    newArchLabeller(runtime.GOARCH),
28✔
112
        }
28✔
113

28✔
114
        err := n.loadAll()
28✔
115
        if err != nil {
28✔
116
                return n, err
×
117
        }
×
118
        return n, nil
28✔
119
}
120

121
// Run runs node-labeller
122
func (n *NodeLabeller) Run(stop chan struct{}) {
×
123
        defer n.queue.ShutDown()
×
124

×
125
        n.logger.Infof("node-labeller is running")
×
126

×
127
        if !n.hasTSCCounter() {
×
128
                n.logger.Error("failed to get tsc cpu frequency, will continue without the tsc frequency label")
×
129
        }
×
130

131
        n.clusterConfig.SetConfigModifiedCallback(func() {
×
132
                n.queue.Add(n.host)
×
133
        })
×
134

135
        interval := 3 * time.Minute
×
136
        go wait.JitterUntil(func() { n.queue.Add(n.host) }, interval, 1.2, true, stop)
×
137
        go wait.Until(n.runWorker, time.Second, stop)
×
138

×
139
        <-stop
×
140
}
141

142
func (n *NodeLabeller) runWorker() {
×
143
        for n.execute() {
×
144
        }
×
145
}
146

147
func (n *NodeLabeller) execute() bool {
26✔
148
        key, quit := n.queue.Get()
26✔
149
        if quit {
26✔
150
                return false
×
151
        }
×
152
        defer n.queue.Done(key)
26✔
153

26✔
154
        err := n.run()
26✔
155

26✔
156
        if err != nil {
27✔
157
                n.logger.Errorf("node-labeller sync error encountered: %v", err)
1✔
158
                n.queue.AddRateLimited(key)
1✔
159
        } else {
26✔
160
                n.queue.Forget(key)
25✔
161
        }
25✔
162
        return true
26✔
163
}
164

165
func (n *NodeLabeller) loadAll() error {
31✔
166
        // host supported features is only available on AMD64 and S390X nodes.
31✔
167
        // This is because hypervisor-cpu-baseline virsh command doesnt work for ARM64 architecture.
31✔
168
        if n.arch.hasHostSupportedFeatures() {
62✔
169
                err := n.loadHostSupportedFeatures()
31✔
170
                if err != nil {
31✔
171
                        n.logger.Errorf("node-labeller could not load supported features: %s", err.Error())
×
172
                        return err
×
173
                }
×
174
        }
175

176
        err := n.loadDomCapabilities()
31✔
177
        if err != nil {
31✔
178
                n.logger.Errorf("node-labeller could not load host dom capabilities: %s", err.Error())
×
179
                return err
×
180
        }
×
181

182
        n.loadHypervFeatures()
31✔
183

31✔
184
        return nil
31✔
185
}
186

187
func (n *NodeLabeller) run() error {
26✔
188
        originalNode, err := n.getNode()
26✔
189
        if err != nil {
26✔
190
                return err
×
191
        }
×
192

193
        if skipNodeLabelling(originalNode) {
27✔
194
                return nil
1✔
195
        }
1✔
196

197
        node := originalNode.DeepCopy()
25✔
198
        //prepare new labels
25✔
199
        newLabels := n.prepareLabels(node)
25✔
200
        //remove old labeller labels
25✔
201
        n.removeLabellerLabels(node)
25✔
202
        //add new labels
25✔
203
        n.addLabellerLabels(node, newLabels)
25✔
204
        return n.patchNode(originalNode, node)
25✔
205
}
206

207
func skipNodeLabelling(node *v1.Node) bool {
26✔
208
        _, exists := node.Annotations[kubevirtv1.LabellerSkipNodeAnnotation]
26✔
209
        return exists
26✔
210
}
26✔
211

212
func (n *NodeLabeller) patchNode(originalNode, node *v1.Node) error {
25✔
213
        if equality.Semantic.DeepEqual(originalNode.Labels, node.Labels) {
25✔
UNCOV
214
                return nil
×
UNCOV
215
        }
×
216

217
        patchBytes, err := patch.New(
25✔
218
                patch.WithTest("/metadata/labels", originalNode.Labels),
25✔
219
                patch.WithReplace("/metadata/labels", node.Labels),
25✔
220
        ).GeneratePayload()
25✔
221

25✔
222
        if err != nil {
25✔
223
                return err
×
224
        }
×
225

226
        _, err = n.nodeClient.Patch(context.Background(), node.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
25✔
227
        return err
25✔
228
}
229

230
func (n *NodeLabeller) loadHypervFeatures() {
31✔
231
        n.hypervFeatures.items = getCapLabels()
31✔
232
}
31✔
233

234
// prepareLabels converts cpu models, features, hyperv features to map[string]string format
235
// e.g. "cpu-feature.node.kubevirt.io/Penryn": "true"
236
func (n *NodeLabeller) prepareLabels(node *v1.Node) map[string]string {
25✔
237
        obsoleteCPUsx86 := n.clusterConfig.GetObsoleteCPUModels()
25✔
238
        hostCpuModel := n.GetHostCpuModel()
25✔
239
        newLabels := make(map[string]string)
25✔
240

25✔
241
        if n.arch.hasHostSupportedFeatures() {
49✔
242
                for key := range n.getSupportedCpuFeatures() {
116✔
243
                        newLabels[kubevirtv1.CPUFeatureLabel+key] = "true"
92✔
244
                }
92✔
245
        }
246

247
        if n.arch.supportsNamedModels() {
49✔
248
                for _, value := range n.getSupportedCpuModels(obsoleteCPUsx86) {
216✔
249
                        newLabels[kubevirtv1.CPUModelLabel+value] = "true"
192✔
250
                }
192✔
251
                for _, value := range n.getKnownCpuModels(obsoleteCPUsx86) {
397✔
252
                        newLabels[kubevirtv1.SupportedHostModelMigrationCPU+value] = "true"
373✔
253
                }
373✔
254
        }
255

256
        for _, machine := range n.supportedMachines {
52✔
257
                labelKey := kubevirtv1.SupportedMachineTypeLabel + machine.Name
27✔
258
                newLabels[labelKey] = "true"
27✔
259
        }
27✔
260

261
        for _, key := range n.hypervFeatures.items {
25✔
262
                newLabels[kubevirtv1.HypervLabel+key] = "true"
×
263
        }
×
264

265
        if n.hasTSCCounter() {
48✔
266
                newLabels[kubevirtv1.CPUTimerLabel+"tsc-frequency"] = fmt.Sprintf("%d", n.cpuCounter.Frequency)
23✔
267
                newLabels[kubevirtv1.CPUTimerLabel+"tsc-scalable"] = fmt.Sprintf("%t", n.cpuCounter.Scaling == "yes")
23✔
268
        }
23✔
269

270
        if n.arch.supportsHostModel() {
49✔
271
                if _, hostModelObsolete := obsoleteCPUsx86[hostCpuModel.Name]; hostModelObsolete {
25✔
272
                        newLabels[kubevirtv1.NodeHostModelIsObsoleteLabel] = "true"
1✔
273
                        err := n.alertIfHostModelIsObsolete(node, hostCpuModel.Name, obsoleteCPUsx86)
1✔
274
                        if err != nil {
1✔
275
                                n.logger.Reason(err).Error(err.Error())
×
276
                        }
×
277
                }
278

279
                for feature := range hostCpuModel.requiredFeatures {
178✔
280
                        newLabels[kubevirtv1.HostModelRequiredFeaturesLabel+feature] = "true"
154✔
281
                }
154✔
282

283
                newLabels[kubevirtv1.CPUModelVendorLabel+n.cpuModelVendor] = "true"
24✔
284
                newLabels[kubevirtv1.HostModelCPULabel+hostCpuModel.Name] = "true"
24✔
285
        }
286

287
        capable, err := isNodeRealtimeCapable()
25✔
288
        if err != nil {
50✔
289
                n.logger.Reason(err).Error("failed to identify if a node is capable of running realtime workloads")
25✔
290
        }
25✔
291
        if capable {
25✔
292
                newLabels[kubevirtv1.RealtimeLabel] = "true"
×
293
        }
×
294

295
        if n.SEV.Supported == "yes" {
47✔
296
                newLabels[kubevirtv1.SEVLabel] = "true"
22✔
297
        }
22✔
298

299
        if n.SEV.SupportedES == "yes" {
47✔
300
                newLabels[kubevirtv1.SEVESLabel] = "true"
22✔
301
        }
22✔
302

303
        if n.SEV.SupportedSNP == "yes" {
46✔
304
                newLabels[kubevirtv1.SEVSNPLabel] = "true"
21✔
305
        }
21✔
306

307
        if n.SecureExecution.Supported == "yes" {
26✔
308
                newLabels[kubevirtv1.SecureExecutionLabel] = "true"
1✔
309
        }
1✔
310

311
        if n.TDX.Supported == "yes" {
26✔
312
                newLabels[kubevirtv1.TDXLabel] = "true"
1✔
313
        }
1✔
314

315
        return newLabels
25✔
316
}
317

318
func (n *NodeLabeller) getNode() (*v1.Node, error) {
26✔
319
        nodeObj, exists, err := n.nodeStore.GetByKey(n.host)
26✔
320
        if err != nil {
26✔
NEW
321
                return nil, err
×
NEW
322
        }
×
323
        if !exists {
26✔
NEW
324
                return nil, fmt.Errorf("node %s does not exist", n.host)
×
NEW
325
        }
×
326

327
        node, ok := nodeObj.(*v1.Node)
26✔
328
        if !ok {
26✔
NEW
329
                return nil, fmt.Errorf("unknown object type found in node informer")
×
NEW
330
        }
×
331

332
        return node, nil
26✔
333
}
334

335
// addNodeLabels adds labels to node.
336
func (n *NodeLabeller) addLabellerLabels(node *v1.Node, labels map[string]string) {
25✔
337
        for key, value := range labels {
1,025✔
338
                node.Labels[key] = value
1,000✔
339
        }
1,000✔
340
}
341

342
// removeLabellerLabels removes labels from node
343
func (n *NodeLabeller) removeLabellerLabels(node *v1.Node) {
26✔
344
        for label := range node.Labels {
194✔
345
                if isNodeLabellerLabel(label) {
299✔
346
                        delete(node.Labels, label)
131✔
347
                }
131✔
348
        }
349
}
350

351
const kernelSchedRealtimeRuntimeInMicrosecods = "kernel.sched_rt_runtime_us"
352

353
// isNodeRealtimeCapable Checks if a node is capable of running realtime workloads. Currently by validating if the kernel system setting value
354
// for `kernel.sched_rt_runtime_us` is set to allow running realtime scheduling with unlimited time (==-1)
355
// TODO: This part should be improved to validate against key attributes that determine best if a host is able to run realtime
356
// workloads at peak performance.
357

358
func isNodeRealtimeCapable() (bool, error) {
25✔
359
        ret, err := exec.Command("sysctl", kernelSchedRealtimeRuntimeInMicrosecods).CombinedOutput()
25✔
360
        if err != nil {
50✔
361
                return false, err
25✔
362
        }
25✔
363
        st := strings.Trim(string(ret), "\n")
×
364
        return fmt.Sprintf("%s = -1", kernelSchedRealtimeRuntimeInMicrosecods) == st, nil
×
365
}
366

367
func isNodeLabellerLabel(label string) bool {
168✔
368
        for _, prefix := range nodeLabellerLabels {
1,198✔
369
                if strings.HasPrefix(label, prefix) {
1,161✔
370
                        return true
131✔
371
                }
131✔
372
        }
373

374
        return false
37✔
375
}
376

377
func (n *NodeLabeller) alertIfHostModelIsObsolete(originalNode *v1.Node, hostModel string, ObsoleteCPUModels map[string]bool) error {
1✔
378
        warningMsg := fmt.Sprintf("This node has %v host-model cpu that is included in ObsoleteCPUModels: %v", hostModel, ObsoleteCPUModels)
1✔
379
        n.recorder.Eventf(originalNode, v1.EventTypeWarning, "HostModelIsObsolete", warningMsg)
1✔
380
        return nil
1✔
381
}
1✔
382

383
func (n *NodeLabeller) hasTSCCounter() bool {
25✔
384
        return n.cpuCounter != nil && n.cpuCounter.Name == "tsc"
25✔
385
}
25✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc