• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / c015ed89-b0d1-4be1-a03d-68cd30a69577

10 Oct 2025 12:57AM UTC coverage: 70.299% (-0.002%) from 70.301%
c015ed89-b0d1-4be1-a03d-68cd30a69577

push

prow

web-flow
Merge pull request #15784 from brianmcarey/golang-v1.24.7

Build KubeVirt with go v1.24.7

17 of 42 new or added lines in 26 files covered. (40.48%)

7 existing lines in 3 files now uncovered.

68912 of 98027 relevant lines covered (70.3%)

620.32 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.67
/pkg/virt-handler/node-labeller/node_labeller.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright The KubeVirt Authors.
17
 *
18
 */
19

20
package nodelabeller
21

22
import (
23
        "context"
24
        "fmt"
25
        "os/exec"
26
        "runtime"
27
        "strings"
28
        "time"
29

30
        "k8s.io/client-go/tools/record"
31
        "libvirt.org/go/libvirtxml"
32

33
        v1 "k8s.io/api/core/v1"
34
        "k8s.io/apimachinery/pkg/api/equality"
35
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
36
        "k8s.io/apimachinery/pkg/types"
37
        "k8s.io/apimachinery/pkg/util/wait"
38
        k8scli "k8s.io/client-go/kubernetes/typed/core/v1"
39
        "k8s.io/client-go/util/workqueue"
40

41
        kubevirtv1 "kubevirt.io/api/core/v1"
42
        "kubevirt.io/client-go/log"
43

44
        "kubevirt.io/kubevirt/pkg/apimachinery/patch"
45
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
46
)
47

48
var nodeLabellerLabels = []string{
49
        kubevirtv1.CPUFeatureLabel,
50
        kubevirtv1.CPUModelLabel,
51
        kubevirtv1.SupportedHostModelMigrationCPU,
52
        kubevirtv1.CPUTimerLabel,
53
        kubevirtv1.HypervLabel,
54
        kubevirtv1.RealtimeLabel,
55
        kubevirtv1.SEVLabel,
56
        kubevirtv1.SEVESLabel,
57
        kubevirtv1.HostModelCPULabel,
58
        kubevirtv1.HostModelRequiredFeaturesLabel,
59
        kubevirtv1.NodeHostModelIsObsoleteLabel,
60
        kubevirtv1.SupportedMachineTypeLabel,
61
}
62

63
// NodeLabeller struct holds information needed to run node-labeller
64
type NodeLabeller struct {
65
        recorder                record.EventRecorder
66
        nodeClient              k8scli.NodeInterface
67
        host                    string
68
        logger                  *log.FilteredLogger
69
        clusterConfig           *virtconfig.ClusterConfig
70
        hypervFeatures          supportedFeatures
71
        hostCapabilities        supportedFeatures
72
        queue                   workqueue.TypedRateLimitingInterface[string]
73
        supportedFeatures       []string
74
        cpuModelVendor          string
75
        volumePath              string
76
        domCapabilitiesFileName string
77
        cpuCounter              *libvirtxml.CapsHostCPUCounter
78
        supportedMachines       []libvirtxml.CapsGuestMachine
79
        hostCPUModel            hostCPUModel
80
        SEV                     SEVConfiguration
81
        SecureExecution         SecureExecutionConfiguration
82
        arch                    archLabeller
83
}
84

85
func NewNodeLabeller(clusterConfig *virtconfig.ClusterConfig, nodeClient k8scli.NodeInterface, host string, recorder record.EventRecorder, cpuCounter *libvirtxml.CapsHostCPUCounter, supportedMachines []libvirtxml.CapsGuestMachine) (*NodeLabeller, error) {
×
86
        return newNodeLabeller(clusterConfig, nodeClient, host, NodeLabellerVolumePath, recorder, cpuCounter, supportedMachines)
×
87

×
88
}
×
89
func newNodeLabeller(clusterConfig *virtconfig.ClusterConfig, nodeClient k8scli.NodeInterface, host, volumePath string, recorder record.EventRecorder, cpuCounter *libvirtxml.CapsHostCPUCounter, supportedMachines []libvirtxml.CapsGuestMachine) (*NodeLabeller, error) {
23✔
90
        n := &NodeLabeller{
23✔
91
                recorder:      recorder,
23✔
92
                nodeClient:    nodeClient,
23✔
93
                host:          host,
23✔
94
                logger:        log.DefaultLogger(),
23✔
95
                clusterConfig: clusterConfig,
23✔
96
                queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
23✔
97
                        workqueue.DefaultTypedControllerRateLimiter[string](),
23✔
98
                        workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-node-labeller"},
23✔
99
                ),
23✔
100
                volumePath:              volumePath,
23✔
101
                domCapabilitiesFileName: "virsh_domcapabilities.xml",
23✔
102
                cpuCounter:              cpuCounter,
23✔
103
                supportedMachines:       supportedMachines,
23✔
104
                hostCPUModel:            hostCPUModel{requiredFeatures: make(map[string]bool)},
23✔
105
                arch:                    newArchLabeller(runtime.GOARCH),
23✔
106
        }
23✔
107

23✔
108
        err := n.loadAll()
23✔
109
        if err != nil {
23✔
110
                return n, err
×
111
        }
×
112
        return n, nil
23✔
113
}
114

115
// Run runs node-labeller
116
func (n *NodeLabeller) Run(threadiness int, stop chan struct{}) {
×
117
        defer n.queue.ShutDown()
×
118

×
119
        n.logger.Infof("node-labeller is running")
×
120

×
121
        if !n.hasTSCCounter() {
×
122
                n.logger.Error("failed to get tsc cpu frequency, will continue without the tsc frequency label")
×
123
        }
×
124

125
        n.clusterConfig.SetConfigModifiedCallback(func() {
×
126
                n.queue.Add(n.host)
×
127
        })
×
128

129
        interval := 3 * time.Minute
×
130
        go wait.JitterUntil(func() { n.queue.Add(n.host) }, interval, 1.2, true, stop)
×
131

132
        for i := 0; i < threadiness; i++ {
×
133
                go wait.Until(n.runWorker, time.Second, stop)
×
134
        }
×
135
        <-stop
×
136
}
137

138
func (n *NodeLabeller) runWorker() {
×
139
        for n.execute() {
×
140
        }
×
141
}
142

143
func (n *NodeLabeller) execute() bool {
21✔
144
        key, quit := n.queue.Get()
21✔
145
        if quit {
21✔
146
                return false
×
147
        }
×
148
        defer n.queue.Done(key)
21✔
149

21✔
150
        err := n.run()
21✔
151

21✔
152
        if err != nil {
22✔
153
                n.logger.Errorf("node-labeller sync error encountered: %v", err)
1✔
154
                n.queue.AddRateLimited(key)
1✔
155
        } else {
21✔
156
                n.queue.Forget(key)
20✔
157
        }
20✔
158
        return true
21✔
159
}
160

161
func (n *NodeLabeller) loadAll() error {
25✔
162
        // host supported features is only available on AMD64 and S390X nodes.
25✔
163
        // This is because hypervisor-cpu-baseline virsh command doesnt work for ARM64 architecture.
25✔
164
        if n.arch.hasHostSupportedFeatures() {
50✔
165
                err := n.loadHostSupportedFeatures()
25✔
166
                if err != nil {
25✔
NEW
167
                        n.logger.Errorf("node-labeller could not load supported features: %s", err.Error())
×
168
                        return err
×
169
                }
×
170
        }
171

172
        err := n.loadDomCapabilities()
25✔
173
        if err != nil {
25✔
NEW
174
                n.logger.Errorf("node-labeller could not load host dom capabilities: %s", err.Error())
×
175
                return err
×
176
        }
×
177

178
        n.loadHypervFeatures()
25✔
179

25✔
180
        return nil
25✔
181
}
182

183
func (n *NodeLabeller) run() error {
21✔
184
        originalNode, err := n.nodeClient.Get(context.Background(), n.host, metav1.GetOptions{})
21✔
185
        if err != nil {
21✔
186
                return err
×
187
        }
×
188

189
        node := originalNode.DeepCopy()
21✔
190

21✔
191
        if !skipNodeLabelling(node) {
41✔
192
                //prepare new labels
20✔
193
                newLabels := n.prepareLabels(node)
20✔
194
                //remove old labeller labels
20✔
195
                n.removeLabellerLabels(node)
20✔
196
                //add new labels
20✔
197
                n.addLabellerLabels(node, newLabels)
20✔
198
        }
20✔
199

200
        err = n.patchNode(originalNode, node)
21✔
201

21✔
202
        return err
21✔
203
}
204

205
func skipNodeLabelling(node *v1.Node) bool {
21✔
206
        _, exists := node.Annotations[kubevirtv1.LabellerSkipNodeAnnotation]
21✔
207
        return exists
21✔
208
}
21✔
209

210
func (n *NodeLabeller) patchNode(originalNode, node *v1.Node) error {
21✔
211
        if equality.Semantic.DeepEqual(originalNode.Labels, node.Labels) {
22✔
212
                return nil
1✔
213
        }
1✔
214

215
        patchBytes, err := patch.New(
20✔
216
                patch.WithTest("/metadata/labels", originalNode.Labels),
20✔
217
                patch.WithReplace("/metadata/labels", node.Labels),
20✔
218
        ).GeneratePayload()
20✔
219

20✔
220
        if err != nil {
20✔
221
                return err
×
222
        }
×
223

224
        _, err = n.nodeClient.Patch(context.Background(), node.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
20✔
225
        return err
20✔
226
}
227

228
func (n *NodeLabeller) loadHypervFeatures() {
25✔
229
        n.hypervFeatures.items = getCapLabels()
25✔
230
}
25✔
231

232
// prepareLabels converts cpu models, features, hyperv features to map[string]string format
233
// e.g. "cpu-feature.node.kubevirt.io/Penryn": "true"
234
func (n *NodeLabeller) prepareLabels(node *v1.Node) map[string]string {
20✔
235
        obsoleteCPUsx86 := n.clusterConfig.GetObsoleteCPUModels()
20✔
236
        hostCpuModel := n.GetHostCpuModel()
20✔
237
        newLabels := make(map[string]string)
20✔
238

20✔
239
        if n.arch.hasHostSupportedFeatures() {
39✔
240
                for key := range n.getSupportedCpuFeatures() {
91✔
241
                        newLabels[kubevirtv1.CPUFeatureLabel+key] = "true"
72✔
242
                }
72✔
243
        }
244

245
        if n.arch.supportsNamedModels() {
39✔
246
                for _, value := range n.getSupportedCpuModels(obsoleteCPUsx86) {
122✔
247
                        newLabels[kubevirtv1.CPUModelLabel+value] = "true"
103✔
248
                        newLabels[kubevirtv1.SupportedHostModelMigrationCPU+value] = "true"
103✔
249
                }
103✔
250
        }
251

252
        for _, machine := range n.supportedMachines {
42✔
253
                labelKey := kubevirtv1.SupportedMachineTypeLabel + machine.Name
22✔
254
                newLabels[labelKey] = "true"
22✔
255
        }
22✔
256

257
        for _, key := range n.hypervFeatures.items {
20✔
258
                newLabels[kubevirtv1.HypervLabel+key] = "true"
×
259
        }
×
260

261
        if n.hasTSCCounter() {
38✔
262
                newLabels[kubevirtv1.CPUTimerLabel+"tsc-frequency"] = fmt.Sprintf("%d", n.cpuCounter.Frequency)
18✔
263
                newLabels[kubevirtv1.CPUTimerLabel+"tsc-scalable"] = fmt.Sprintf("%t", n.cpuCounter.Scaling == "yes")
18✔
264
        }
18✔
265

266
        if n.arch.supportsHostModel() {
39✔
267
                if _, hostModelObsolete := obsoleteCPUsx86[hostCpuModel.Name]; !hostModelObsolete {
37✔
268
                        newLabels[kubevirtv1.SupportedHostModelMigrationCPU+hostCpuModel.Name] = "true"
18✔
269
                } else {
19✔
270
                        newLabels[kubevirtv1.NodeHostModelIsObsoleteLabel] = "true"
1✔
271
                        err := n.alertIfHostModelIsObsolete(node, hostCpuModel.Name, obsoleteCPUsx86)
1✔
272
                        if err != nil {
1✔
273
                                n.logger.Reason(err).Error(err.Error())
×
274
                        }
×
275
                }
276

277
                for feature := range hostCpuModel.requiredFeatures {
131✔
278
                        newLabels[kubevirtv1.HostModelRequiredFeaturesLabel+feature] = "true"
112✔
279
                }
112✔
280

281
                newLabels[kubevirtv1.CPUModelVendorLabel+n.cpuModelVendor] = "true"
19✔
282
                newLabels[kubevirtv1.HostModelCPULabel+hostCpuModel.Name] = "true"
19✔
283
        }
284

285
        capable, err := isNodeRealtimeCapable()
20✔
286
        if err != nil {
40✔
287
                n.logger.Reason(err).Error("failed to identify if a node is capable of running realtime workloads")
20✔
288
        }
20✔
289
        if capable {
20✔
290
                newLabels[kubevirtv1.RealtimeLabel] = "true"
×
291
        }
×
292

293
        if n.SEV.Supported == "yes" {
38✔
294
                newLabels[kubevirtv1.SEVLabel] = "true"
18✔
295
        }
18✔
296

297
        if n.SEV.SupportedES == "yes" {
38✔
298
                newLabels[kubevirtv1.SEVESLabel] = "true"
18✔
299
        }
18✔
300
        if n.SecureExecution.Supported == "yes" {
21✔
301
                newLabels[kubevirtv1.SecureExecutionLabel] = "true"
1✔
302
        }
1✔
303

304
        return newLabels
20✔
305
}
306

307
// addNodeLabels adds labels to node.
308
func (n *NodeLabeller) addLabellerLabels(node *v1.Node, labels map[string]string) {
20✔
309
        for key, value := range labels {
545✔
310
                node.Labels[key] = value
525✔
311
        }
525✔
312
}
313

314
// removeLabellerLabels removes labels from node
315
func (n *NodeLabeller) removeLabellerLabels(node *v1.Node) {
21✔
316
        for label := range node.Labels {
184✔
317
                if isNodeLabellerLabel(label) {
294✔
318
                        delete(node.Labels, label)
131✔
319
                }
131✔
320
        }
321
}
322

323
const kernelSchedRealtimeRuntimeInMicrosecods = "kernel.sched_rt_runtime_us"
324

325
// isNodeRealtimeCapable Checks if a node is capable of running realtime workloads. Currently by validating if the kernel system setting value
326
// for `kernel.sched_rt_runtime_us` is set to allow running realtime scheduling with unlimited time (==-1)
327
// TODO: This part should be improved to validate against key attributes that determine best if a host is able to run realtime
328
// workloads at peak performance.
329

330
func isNodeRealtimeCapable() (bool, error) {
20✔
331
        ret, err := exec.Command("sysctl", kernelSchedRealtimeRuntimeInMicrosecods).CombinedOutput()
20✔
332
        if err != nil {
40✔
333
                return false, err
20✔
334
        }
20✔
335
        st := strings.Trim(string(ret), "\n")
×
336
        return fmt.Sprintf("%s = -1", kernelSchedRealtimeRuntimeInMicrosecods) == st, nil
×
337
}
338

339
func isNodeLabellerLabel(label string) bool {
163✔
340
        for _, prefix := range nodeLabellerLabels {
1,009✔
341
                if strings.HasPrefix(label, prefix) {
977✔
342
                        return true
131✔
343
                }
131✔
344
        }
345

346
        return false
32✔
347
}
348

349
func (n *NodeLabeller) alertIfHostModelIsObsolete(originalNode *v1.Node, hostModel string, ObsoleteCPUModels map[string]bool) error {
1✔
350
        warningMsg := fmt.Sprintf("This node has %v host-model cpu that is included in ObsoleteCPUModels: %v", hostModel, ObsoleteCPUModels)
1✔
351
        n.recorder.Eventf(originalNode, v1.EventTypeWarning, "HostModelIsObsolete", warningMsg)
1✔
352
        return nil
1✔
353
}
1✔
354

355
func (n *NodeLabeller) hasTSCCounter() bool {
20✔
356
        return n.cpuCounter != nil && n.cpuCounter.Name == "tsc"
20✔
357
}
20✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc