• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / b876b80b-592e-43f5-8fb6-f169e2d754aa

04 Nov 2025 03:51PM UTC coverage: 70.283% (-0.005%) from 70.288%
b876b80b-592e-43f5-8fb6-f169e2d754aa

push

prow

web-flow
Merge pull request #16018 from dhiller/q-test-id-4135

chore(quarantine): flaky vmi ns label test

69535 of 98936 relevant lines covered (70.28%)

431.17 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.44
/pkg/virt-controller/services/renderresources.go
1
package services
2

3
import (
4
        "fmt"
5
        "strconv"
6
        "strings"
7

8
        "k8s.io/client-go/tools/cache"
9

10
        "kubevirt.io/client-go/log"
11

12
        k8sv1 "k8s.io/api/core/v1"
13
        "k8s.io/apimachinery/pkg/api/resource"
14
        v1 "kubevirt.io/api/core/v1"
15

16
        "kubevirt.io/kubevirt/pkg/downwardmetrics"
17
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
18
        "kubevirt.io/kubevirt/pkg/tpm"
19
        "kubevirt.io/kubevirt/pkg/util"
20
        "kubevirt.io/kubevirt/pkg/util/hardware"
21
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
22
)
23

24
type ResourceRendererOption func(renderer *ResourceRenderer)
25

26
type ResourceRenderer struct {
27
        vmLimits           k8sv1.ResourceList
28
        vmRequests         k8sv1.ResourceList
29
        calculatedLimits   k8sv1.ResourceList
30
        calculatedRequests k8sv1.ResourceList
31
        resourceClaims     []k8sv1.ResourceClaim
32
}
33

34
type resourcePredicate func(*v1.VirtualMachineInstance) bool
35

36
type VMIResourcePredicates struct {
37
        resourceRules []VMIResourceRule
38
        vmi           *v1.VirtualMachineInstance
39
}
40

41
type VMIResourceRule struct {
42
        predicate resourcePredicate
43
        option    ResourceRendererOption
44
}
45

46
func not(p resourcePredicate) resourcePredicate {
600✔
47
        return func(vmi *v1.VirtualMachineInstance) bool {
1,200✔
48
                return !p(vmi)
600✔
49
        }
600✔
50
}
51
func NewVMIResourceRule(p resourcePredicate, option ResourceRendererOption) VMIResourceRule {
3,900✔
52
        return VMIResourceRule{predicate: p, option: option}
3,900✔
53
}
3,900✔
54

55
func doesVMIRequireDedicatedCPU(vmi *v1.VirtualMachineInstance) bool {
600✔
56
        return vmi.IsCPUDedicated()
600✔
57
}
600✔
58

59
func NewResourceRenderer(vmLimits k8sv1.ResourceList, vmRequests k8sv1.ResourceList, options ...ResourceRendererOption) *ResourceRenderer {
329✔
60
        limits := map[k8sv1.ResourceName]resource.Quantity{}
329✔
61
        requests := map[k8sv1.ResourceName]resource.Quantity{}
329✔
62
        copyResources(vmLimits, limits)
329✔
63
        copyResources(vmRequests, requests)
329✔
64

329✔
65
        resourceRenderer := &ResourceRenderer{
329✔
66
                vmLimits:           limits,
329✔
67
                vmRequests:         requests,
329✔
68
                calculatedLimits:   map[k8sv1.ResourceName]resource.Quantity{},
329✔
69
                calculatedRequests: map[k8sv1.ResourceName]resource.Quantity{},
329✔
70
                resourceClaims:     []k8sv1.ResourceClaim{},
329✔
71
        }
329✔
72

329✔
73
        for _, opt := range options {
1,727✔
74
                opt(resourceRenderer)
1,398✔
75
        }
1,398✔
76
        return resourceRenderer
329✔
77
}
78

79
func (rr *ResourceRenderer) Limits() k8sv1.ResourceList {
358✔
80
        podLimits := map[k8sv1.ResourceName]resource.Quantity{}
358✔
81
        copyResources(rr.calculatedLimits, podLimits)
358✔
82
        copyResources(rr.vmLimits, podLimits)
358✔
83
        return podLimits
358✔
84
}
358✔
85

86
func (rr *ResourceRenderer) Requests() k8sv1.ResourceList {
354✔
87
        podRequests := map[k8sv1.ResourceName]resource.Quantity{}
354✔
88
        copyResources(rr.calculatedRequests, podRequests)
354✔
89
        copyResources(rr.vmRequests, podRequests)
354✔
90
        return podRequests
354✔
91
}
354✔
92

93
func (rr *ResourceRenderer) Claims() []k8sv1.ResourceClaim {
330✔
94
        return rr.resourceClaims
330✔
95
}
330✔
96

97
func (rr *ResourceRenderer) ResourceRequirements() k8sv1.ResourceRequirements {
326✔
98
        return k8sv1.ResourceRequirements{
326✔
99
                Limits:   rr.Limits(),
326✔
100
                Requests: rr.Requests(),
326✔
101
                Claims:   rr.Claims(),
326✔
102
        }
326✔
103
}
326✔
104

105
func WithEphemeralStorageRequest() ResourceRendererOption {
301✔
106
        return func(renderer *ResourceRenderer) {
602✔
107
                // Add ephemeral storage request to container to be used by Kubevirt. This amount of ephemeral storage
301✔
108
                // should be added to the user's request.
301✔
109
                ephemeralStorageOverhead := resource.MustParse(ephemeralStorageOverheadSize)
301✔
110
                ephemeralStorageRequested := renderer.vmRequests[k8sv1.ResourceEphemeralStorage]
301✔
111
                ephemeralStorageRequested.Add(ephemeralStorageOverhead)
301✔
112
                renderer.vmRequests[k8sv1.ResourceEphemeralStorage] = ephemeralStorageRequested
301✔
113

301✔
114
                if ephemeralStorageLimit, ephemeralStorageLimitDefined := renderer.vmLimits[k8sv1.ResourceEphemeralStorage]; ephemeralStorageLimitDefined {
303✔
115
                        ephemeralStorageLimit.Add(ephemeralStorageOverhead)
2✔
116
                        renderer.vmLimits[k8sv1.ResourceEphemeralStorage] = ephemeralStorageLimit
2✔
117
                }
2✔
118
        }
119
}
120

121
// Helper function to extract IO thread CPU count from VMI
122
func getIOThreadsCount(vmi *v1.VirtualMachineInstance) int64 {
310✔
123
        if vmi == nil || vmi.Spec.Domain.IOThreads == nil ||
310✔
124
                vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount == nil {
614✔
125
                return 0
304✔
126
        }
304✔
127
        return int64(*vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount)
6✔
128
}
129

130
func WithoutDedicatedCPU(vmi *v1.VirtualMachineInstance, cpuAllocationRatio int, withCPULimits bool) ResourceRendererOption {
304✔
131
        return func(renderer *ResourceRenderer) {
601✔
132
                cpu := vmi.Spec.Domain.CPU
297✔
133
                vcpus := calcVCPUs(cpu)
297✔
134
                ioThreadCPUs := getIOThreadsCount(vmi) // Get IO thread count
297✔
135
                totalCPUs := vcpus + ioThreadCPUs      // Include IO threads
297✔
136
                if totalCPUs != 0 && cpuAllocationRatio > 0 {
582✔
137
                        val := float64(totalCPUs) / float64(cpuAllocationRatio)
285✔
138
                        vcpusStr := fmt.Sprintf("%g", val)
285✔
139
                        if val < 1 {
567✔
140
                                val *= 1000
282✔
141
                                vcpusStr = fmt.Sprintf("%gm", val)
282✔
142
                        }
282✔
143
                        renderer.calculatedRequests[k8sv1.ResourceCPU] = resource.MustParse(vcpusStr)
285✔
144
                        if withCPULimits {
289✔
145
                                renderer.calculatedLimits[k8sv1.ResourceCPU] = resource.MustParse(strconv.FormatInt(totalCPUs, 10))
4✔
146
                        }
4✔
147
                }
148
        }
149
}
150

151
func WithGPUsDevicePlugins(gpus []v1.GPU) ResourceRendererOption {
303✔
152
        return func(r *ResourceRenderer) {
307✔
153
                res := r.ResourceRequirements()
4✔
154
                for _, g := range gpus {
8✔
155
                        if g.DeviceName != "" && g.ClaimRequest == nil {
7✔
156
                                requestResource(&res, g.DeviceName)
3✔
157
                        }
3✔
158
                }
159
                copyResources(res.Limits, r.calculatedLimits)
4✔
160
                copyResources(res.Requests, r.calculatedRequests)
4✔
161
        }
162
}
163

164
func WithGPUsDRA(gpus []v1.GPU) ResourceRendererOption {
303✔
165
        return func(r *ResourceRenderer) {
306✔
166
                res := r.ResourceRequirements()
3✔
167
                for _, g := range gpus {
7✔
168
                        if g.DeviceName == "" && g.ClaimRequest != nil {
7✔
169
                                requestResourceClaims(&res, &k8sv1.ResourceClaim{
3✔
170
                                        Name:    *g.ClaimRequest.ClaimName,
3✔
171
                                        Request: *g.ClaimRequest.RequestName,
3✔
172
                                })
3✔
173
                        }
3✔
174
                }
175
                copyResources(res.Limits, r.calculatedLimits)
3✔
176
                copyResources(res.Requests, r.calculatedRequests)
3✔
177
                copyResourceClaims(&res, &r.resourceClaims)
3✔
178
        }
179
}
180

181
// WithHostDevicesDevicePlugins adds resource requests/limits only for HostDevices managed by device plugins.
182
func WithHostDevicesDevicePlugins(hostDevices []v1.HostDevice) ResourceRendererOption {
303✔
183
        return func(r *ResourceRenderer) {
307✔
184
                resources := r.ResourceRequirements()
4✔
185
                for _, hd := range hostDevices {
8✔
186
                        if hd.DeviceName != "" && hd.ClaimRequest == nil {
7✔
187
                                requestResource(&resources, hd.DeviceName)
3✔
188
                        }
3✔
189
                }
190
                copyResources(resources.Limits, r.calculatedLimits)
4✔
191
                copyResources(resources.Requests, r.calculatedRequests)
4✔
192
        }
193
}
194

195
// WithHostDevicesDRA adds ResourceClaims for HostDevices provisioned via DRA.
196
func WithHostDevicesDRA(hostDevices []v1.HostDevice) ResourceRendererOption {
302✔
197
        return func(r *ResourceRenderer) {
304✔
198
                resources := r.ResourceRequirements()
2✔
199
                for _, hd := range hostDevices {
5✔
200
                        if hd.DeviceName == "" && hd.ClaimRequest != nil && hd.ClaimRequest.ClaimName != nil && hd.ClaimRequest.RequestName != nil {
5✔
201
                                requestResourceClaims(&resources, &k8sv1.ResourceClaim{
2✔
202
                                        Name:    *hd.ClaimRequest.ClaimName,
2✔
203
                                        Request: *hd.ClaimRequest.RequestName,
2✔
204
                                })
2✔
205
                        }
2✔
206
                }
207
                copyResources(resources.Limits, r.calculatedLimits)
2✔
208
                copyResources(resources.Requests, r.calculatedRequests)
2✔
209
                copyResourceClaims(&resources, &r.resourceClaims)
2✔
210
        }
211
}
212

213
func WithHugePages(vmMemory *v1.Memory, memoryOverhead resource.Quantity) ResourceRendererOption {
300✔
214
        return func(renderer *ResourceRenderer) {
322✔
215
                hugepageType := k8sv1.ResourceName(k8sv1.ResourceHugePagesPrefix + vmMemory.Hugepages.PageSize)
22✔
216
                hugepagesMemReq := renderer.vmRequests.Memory()
22✔
217

22✔
218
                // If requested, use the guest memory to allocate hugepages
22✔
219
                if vmMemory != nil && vmMemory.Guest != nil {
24✔
220
                        requests := hugepagesMemReq.Value()
2✔
221
                        guest := vmMemory.Guest.Value()
2✔
222
                        if requests > guest {
4✔
223
                                hugepagesMemReq = vmMemory.Guest
2✔
224
                        }
2✔
225
                }
226
                renderer.calculatedRequests[hugepageType] = *hugepagesMemReq
22✔
227
                renderer.calculatedLimits[hugepageType] = *hugepagesMemReq
22✔
228

22✔
229
                reqMemDiff := resource.NewScaledQuantity(0, resource.Kilo)
22✔
230
                limMemDiff := resource.NewScaledQuantity(0, resource.Kilo)
22✔
231
                // In case the guest memory and the requested memory are different, add the difference
22✔
232
                // to the overhead
22✔
233
                if vmMemory != nil && vmMemory.Guest != nil {
24✔
234
                        requests := renderer.vmRequests.Memory().Value()
2✔
235
                        limits := renderer.vmLimits.Memory().Value()
2✔
236
                        guest := vmMemory.Guest.Value()
2✔
237
                        if requests > guest {
4✔
238
                                reqMemDiff.Add(*renderer.vmRequests.Memory())
2✔
239
                                reqMemDiff.Sub(*vmMemory.Guest)
2✔
240
                        }
2✔
241
                        if limits > guest {
4✔
242
                                limMemDiff.Add(*renderer.vmLimits.Memory())
2✔
243
                                limMemDiff.Sub(*vmMemory.Guest)
2✔
244
                        }
2✔
245
                }
246
                // Set requested memory equals to overhead memory
247
                reqMemDiff.Add(memoryOverhead)
22✔
248
                renderer.vmRequests[k8sv1.ResourceMemory] = *reqMemDiff
22✔
249
                if _, ok := renderer.vmLimits[k8sv1.ResourceMemory]; ok {
36✔
250
                        limMemDiff.Add(memoryOverhead)
14✔
251
                        renderer.vmLimits[k8sv1.ResourceMemory] = *limMemDiff
14✔
252
                }
14✔
253
        }
254
}
255

256
func WithMemoryRequests(vmiSpecMemory *v1.Memory, overcommit int) ResourceRendererOption {
300✔
257
        return func(renderer *ResourceRenderer) {
452✔
258
                limit, hasLimit := renderer.vmLimits[k8sv1.ResourceMemory]
152✔
259
                request, hasRequest := renderer.vmRequests[k8sv1.ResourceMemory]
152✔
260
                if hasLimit && !limit.IsZero() && (!hasRequest || request.IsZero()) {
160✔
261
                        renderer.vmRequests[k8sv1.ResourceMemory] = limit
8✔
262
                }
8✔
263

264
                if _, exists := renderer.vmRequests[k8sv1.ResourceMemory]; exists {
160✔
265
                        return
8✔
266
                }
8✔
267

268
                var memory *resource.Quantity
144✔
269
                if vmiSpecMemory != nil && vmiSpecMemory.Guest != nil {
147✔
270
                        memory = vmiSpecMemory.Guest
3✔
271
                } else if vmiSpecMemory != nil && vmiSpecMemory.Hugepages != nil {
148✔
272
                        if hugepagesSize, err := resource.ParseQuantity(vmiSpecMemory.Hugepages.PageSize); err == nil {
8✔
273
                                memory = &hugepagesSize
4✔
274
                        }
4✔
275
                }
276

277
                if memory != nil && memory.Value() > 0 {
151✔
278
                        if overcommit == 100 {
11✔
279
                                renderer.vmRequests[k8sv1.ResourceMemory] = *memory
4✔
280
                        } else {
7✔
281
                                value := (memory.Value() * int64(100)) / int64(overcommit)
3✔
282
                                renderer.vmRequests[k8sv1.ResourceMemory] = *resource.NewQuantity(value, memory.Format)
3✔
283
                        }
3✔
284
                }
285
        }
286
}
287

288
func WithMemoryOverhead(guestResourceSpec v1.ResourceRequirements, memoryOverhead resource.Quantity) ResourceRendererOption {
302✔
289
        return func(renderer *ResourceRenderer) {
582✔
290
                memoryRequest := renderer.vmRequests[k8sv1.ResourceMemory]
280✔
291
                if !guestResourceSpec.OvercommitGuestOverhead {
557✔
292
                        memoryRequest.Add(memoryOverhead)
277✔
293
                }
277✔
294
                renderer.vmRequests[k8sv1.ResourceMemory] = memoryRequest
280✔
295

280✔
296
                if memoryLimit, ok := renderer.vmLimits[k8sv1.ResourceMemory]; ok {
297✔
297
                        memoryLimit.Add(memoryOverhead)
17✔
298
                        renderer.vmLimits[k8sv1.ResourceMemory] = memoryLimit
17✔
299
                }
17✔
300
        }
301
}
302

303
func WithAutoMemoryLimits(namespace string, namespaceStore cache.Store) ResourceRendererOption {
303✔
304
        return func(renderer *ResourceRenderer) {
310✔
305
                requestRatio := getMemoryLimitsRatio(namespace, namespaceStore)
7✔
306
                memoryRequest := renderer.vmRequests[k8sv1.ResourceMemory]
7✔
307
                value := int64(float64(memoryRequest.Value()) * requestRatio)
7✔
308
                renderer.calculatedLimits[k8sv1.ResourceMemory] = *resource.NewQuantity(value, memoryRequest.Format)
7✔
309
        }
7✔
310
}
311

312
func WithCPUPinning(vmi *v1.VirtualMachineInstance, annotations map[string]string, additionalCPUs uint32) ResourceRendererOption {
306✔
313
        return func(renderer *ResourceRenderer) {
319✔
314
                cpu := vmi.Spec.Domain.CPU
13✔
315
                vcpus := hardware.GetNumberOfVCPUs(cpu)
13✔
316
                ioThreadCPUs := getIOThreadsCount(vmi)
13✔
317
                if vcpus != 0 {
23✔
318
                        totalCPUs := vcpus + ioThreadCPUs
10✔
319
                        renderer.vmLimits[k8sv1.ResourceCPU] = *resource.NewQuantity(totalCPUs, resource.BinarySI)
10✔
320
                        renderer.vmRequests[k8sv1.ResourceCPU] = *resource.NewQuantity(totalCPUs, resource.BinarySI) // Ensure requests match limits for dedicated CPUs
10✔
321
                } else {
13✔
322
                        ioThreadsCount := resource.NewQuantity(ioThreadCPUs, resource.BinarySI)
3✔
323
                        if cpuLimit, ok := renderer.vmLimits[k8sv1.ResourceCPU]; ok {
5✔
324
                                cpuLimit.Add(*ioThreadsCount)
2✔
325
                                renderer.vmLimits[k8sv1.ResourceCPU] = cpuLimit
2✔
326
                        }
2✔
327
                        if cpuRequest, ok := renderer.vmRequests[k8sv1.ResourceCPU]; ok {
5✔
328
                                cpuRequest.Add(*ioThreadsCount)
2✔
329
                                renderer.vmRequests[k8sv1.ResourceCPU] = cpuRequest
2✔
330
                        }
2✔
331
                }
332

333
                if cpu.IsolateEmulatorThread {
24✔
334
                        emulatorThreadCPUs := resource.NewQuantity(1, resource.BinarySI)
11✔
335
                        limits := renderer.vmLimits[k8sv1.ResourceCPU]
11✔
336
                        _, emulatorThreadCompleteToEvenParityAnnotationExists := annotations[v1.EmulatorThreadCompleteToEvenParity]
11✔
337
                        if emulatorThreadCompleteToEvenParityAnnotationExists &&
11✔
338
                                (limits.Value()+int64(additionalCPUs))%2 == 0 {
13✔
339
                                emulatorThreadCPUs = resource.NewQuantity(2, resource.BinarySI)
2✔
340
                        }
2✔
341
                        limits.Add(*emulatorThreadCPUs)
11✔
342
                        renderer.vmLimits[k8sv1.ResourceCPU] = limits
11✔
343
                        if cpuRequest, ok := renderer.vmRequests[k8sv1.ResourceCPU]; ok {
22✔
344
                                cpuRequest.Add(*emulatorThreadCPUs)
11✔
345
                                renderer.vmRequests[k8sv1.ResourceCPU] = cpuRequest
11✔
346
                        }
11✔
347
                }
348

349
                // Align memory limits with requests for consistency
350
                if memRequest, ok := renderer.vmRequests[k8sv1.ResourceMemory]; ok {
15✔
351
                        renderer.vmLimits[k8sv1.ResourceMemory] = memRequest
2✔
352
                }
2✔
353
        }
354
}
355

356
func WithNetworkResources(networkToResourceMap map[string]string) ResourceRendererOption {
302✔
357
        return func(renderer *ResourceRenderer) {
308✔
358
                resources := renderer.ResourceRequirements()
6✔
359
                for _, resourceName := range networkToResourceMap {
13✔
360
                        if resourceName != "" {
9✔
361
                                requestResource(&resources, resourceName)
2✔
362
                        }
2✔
363
                }
364
                copyResources(resources.Limits, renderer.calculatedLimits)
6✔
365
                copyResources(resources.Requests, renderer.calculatedRequests)
6✔
366
        }
367
}
368

369
func WithSEV() ResourceRendererOption {
301✔
370
        return func(renderer *ResourceRenderer) {
308✔
371
                resources := renderer.ResourceRequirements()
7✔
372
                requestResource(&resources, SevDevice)
7✔
373
                copyResources(resources.Limits, renderer.calculatedLimits)
7✔
374
                copyResources(resources.Requests, renderer.calculatedRequests)
7✔
375
        }
7✔
376
}
377

378
func WithPersistentReservation() ResourceRendererOption {
300✔
379
        return func(renderer *ResourceRenderer) {
300✔
380
                resources := renderer.ResourceRequirements()
×
381
                requestResource(&resources, PrDevice)
×
382
                copyResources(resources.Limits, renderer.calculatedLimits)
×
383
                copyResources(resources.Requests, renderer.calculatedRequests)
×
384
        }
×
385
}
386

387
func copyResources(srcResources, dstResources k8sv1.ResourceList) {
2,434✔
388
        for key, value := range srcResources {
5,221✔
389
                dstResources[key] = value
2,787✔
390
        }
2,787✔
391
}
392

393
func requestResourceClaims(resources *k8sv1.ResourceRequirements, claim *k8sv1.ResourceClaim) {
5✔
394
        if resources.Claims == nil {
5✔
395
                resources.Claims = []k8sv1.ResourceClaim{*claim}
×
396
                return
×
397
        }
×
398
        resources.Claims = append(resources.Claims, *claim)
5✔
399
}
400

401
func copyResourceClaims(resources *k8sv1.ResourceRequirements, claims *[]k8sv1.ResourceClaim) {
5✔
402
        existing := make(map[string]struct{})
5✔
403
        for _, c := range *claims {
6✔
404
                existing[c.Name] = struct{}{}
1✔
405
        }
1✔
406

407
        for _, value := range resources.Claims {
11✔
408
                if _, found := existing[value.Name]; found {
7✔
409
                        continue // skip duplicates by Name
1✔
410
                }
411
                *claims = append(*claims, value)
5✔
412
                existing[value.Name] = struct{}{}
5✔
413
        }
414
}
415

416
// GetMemoryOverhead computes the estimation of total
417
// memory needed for the domain to operate properly.
418
// This includes the memory needed for the guest and memory
419
// for Qemu and OS overhead.
420
// The return value is overhead memory quantity
421
//
422
// Note: This is the best estimation we were able to come up with
423
//
424
//        and is still not 100% accurate
425
func GetMemoryOverhead(vmi *v1.VirtualMachineInstance, cpuArch string, additionalOverheadRatio *string) resource.Quantity {
338✔
426
        domain := vmi.Spec.Domain
338✔
427
        vmiMemoryReq := domain.Resources.Requests.Memory()
338✔
428

338✔
429
        overhead := *resource.NewScaledQuantity(0, resource.Kilo)
338✔
430

338✔
431
        // Add the memory needed for pagetables (one bit for every 512b of RAM size)
338✔
432
        pagetableMemory := resource.NewScaledQuantity(vmiMemoryReq.ScaledValue(resource.Kilo), resource.Kilo)
338✔
433
        pagetableMemory.Set(pagetableMemory.Value() / 512)
338✔
434
        overhead.Add(*pagetableMemory)
338✔
435

338✔
436
        // Add fixed overhead for KubeVirt components, as seen in a random run, rounded up to the nearest MiB
338✔
437
        // Note: shared libraries are included in the size, so every library is counted (wrongly) as many times as there are
338✔
438
        //   processes using it. However, the extra memory is only in the order of 10MiB and makes for a nice safety margin.
338✔
439
        overhead.Add(resource.MustParse(VirtLauncherMonitorOverhead))
338✔
440
        overhead.Add(resource.MustParse(VirtLauncherOverhead))
338✔
441
        overhead.Add(resource.MustParse(VirtlogdOverhead))
338✔
442
        overhead.Add(resource.MustParse(VirtqemudOverhead))
338✔
443
        overhead.Add(resource.MustParse(QemuOverhead))
338✔
444

338✔
445
        // Add CPU table overhead (8 MiB per vCPU and 8 MiB per IO thread)
338✔
446
        // overhead per vcpu in MiB
338✔
447
        coresMemory := resource.MustParse("8Mi")
338✔
448
        var vcpus int64
338✔
449
        if domain.CPU != nil {
384✔
450
                vcpus = hardware.GetNumberOfVCPUs(domain.CPU)
46✔
451
        } else {
338✔
452
                // Currently, a default guest CPU topology is set by the API webhook mutator, if not set by a user.
292✔
453
                // However, this wasn't always the case.
292✔
454
                // In case when the guest topology isn't set, take value from resources request or limits.
292✔
455
                resources := vmi.Spec.Domain.Resources
292✔
456
                if cpuLimit, ok := resources.Limits[k8sv1.ResourceCPU]; ok {
295✔
457
                        vcpus = cpuLimit.Value()
3✔
458
                } else if cpuRequests, ok := resources.Requests[k8sv1.ResourceCPU]; ok {
300✔
459
                        vcpus = cpuRequests.Value()
8✔
460
                }
8✔
461
        }
462

463
        // if neither CPU topology nor request or limits provided, set vcpus to 1
464
        if vcpus < 1 {
634✔
465
                vcpus = 1
296✔
466
        }
296✔
467
        value := coresMemory.Value() * vcpus
338✔
468
        coresMemory = *resource.NewQuantity(value, coresMemory.Format)
338✔
469
        overhead.Add(coresMemory)
338✔
470

338✔
471
        // static overhead for IOThread
338✔
472
        overhead.Add(resource.MustParse("8Mi"))
338✔
473

338✔
474
        // Add video RAM overhead
338✔
475
        if domain.Devices.AutoattachGraphicsDevice == nil || *domain.Devices.AutoattachGraphicsDevice == true {
673✔
476
                overhead.Add(resource.MustParse("32Mi"))
335✔
477
        }
335✔
478

479
        // When use uefi boot on aarch64 with edk2 package, qemu will create 2 pflash(64Mi each, 128Mi in total)
480
        // it should be considered for memory overhead
481
        // Additional information can be found here: https://github.com/qemu/qemu/blob/master/hw/arm/virt.c#L120
482
        if cpuArch == "arm64" {
351✔
483
                overhead.Add(resource.MustParse("128Mi"))
13✔
484
        }
13✔
485

486
        // Additional overhead of 1G for VFIO devices. VFIO requires all guest RAM to be locked
487
        // in addition to MMIO memory space to allow DMA. 1G is often the size of reserved MMIO space on x86 systems.
488
        // Additial information can be found here: https://www.redhat.com/archives/libvir-list/2015-November/msg00329.html
489
        if util.IsVFIOVMI(vmi) {
351✔
490
                overhead.Add(resource.MustParse("1Gi"))
13✔
491
        }
13✔
492

493
        // DownardMetrics volumes are using emptyDirs backed by memory.
494
        // the max. disk size is only 256Ki.
495
        if downwardmetrics.HasDownwardMetricDisk(vmi) {
341✔
496
                overhead.Add(resource.MustParse("1Mi"))
3✔
497
        }
3✔
498

499
        addProbeOverheads(vmi, &overhead)
338✔
500

338✔
501
        // Consider memory overhead for SEV guests.
338✔
502
        // Additional information can be found here: https://libvirt.org/kbase/launch_security_sev.html#memory
338✔
503
        if util.IsSEVVMI(vmi) || util.IsSEVSNPVMI(vmi) || util.IsSEVESVMI(vmi) {
345✔
504
                overhead.Add(resource.MustParse("256Mi"))
7✔
505
        }
7✔
506

507
        // Having a TPM device will spawn a swtpm process
508
        // In `ps`, swtpm has VSZ of 53808 and RSS of 3496, so 53Mi should do
509
        if tpm.HasDevice(&vmi.Spec) {
341✔
510
                overhead.Add(resource.MustParse("53Mi"))
3✔
511
        }
3✔
512

513
        if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
349✔
514
                overhead.Add(resource.MustParse("100Mi"))
11✔
515
        }
11✔
516

517
        // Multiplying the ratio is expected to be the last calculation before returning overhead
518
        if additionalOverheadRatio != nil && *additionalOverheadRatio != "" {
343✔
519
                ratio, err := strconv.ParseFloat(*additionalOverheadRatio, 64)
5✔
520
                if err != nil {
6✔
521
                        // This error should never happen as it's already validated by webhooks
1✔
522
                        log.Log.Warningf("cannot add additional overhead to virt infra overhead calculation: %v", err)
1✔
523
                        return overhead
1✔
524
                }
1✔
525

526
                overhead = multiplyMemory(overhead, ratio)
4✔
527
        }
528

529
        return overhead
337✔
530
}
531

532
// Request a resource by name. This function bumps the number of resources,
533
// both its limits and requests attributes.
534
//
535
// If we were operating with a regular resource (CPU, memory, network
536
// bandwidth), we would need to take care of QoS. For example,
537
// https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
538
// explains that when Limits are set but Requests are not then scheduler
539
// assumes that Requests are the same as Limits for a particular resource.
540
//
541
// But this function is not called for this standard resources but for
542
// resources managed by device plugins. The device plugin design document says
543
// the following on the matter:
544
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/resource-management/device-plugin.md#end-user-story
545
//
546
// ```
547
// Devices can be selected using the same process as for OIRs in the pod spec.
548
// Devices have no impact on QOS. However, for the alpha, we expect the request
549
// to have limits == requests.
550
// ```
551
//
552
// Which suggests that, for resources managed by device plugins, 1) limits
553
// should be equal to requests; and 2) QoS rules do not apVFIO//
554
// Hence we don't copy Limits value to Requests if the latter is missing.
555
func requestResource(resources *k8sv1.ResourceRequirements, resourceName string) {
20✔
556
        name := k8sv1.ResourceName(resourceName)
20✔
557
        bumpResources(resources.Limits, name)
20✔
558
        bumpResources(resources.Requests, name)
20✔
559
}
20✔
560

561
func bumpResources(resources k8sv1.ResourceList, name k8sv1.ResourceName) {
40✔
562
        unitQuantity := *resource.NewQuantity(1, resource.DecimalSI)
40✔
563

40✔
564
        val, ok := resources[name]
40✔
565
        if ok {
48✔
566
                val.Add(unitQuantity)
8✔
567
                resources[name] = val
8✔
568
        } else {
40✔
569
                resources[name] = unitQuantity
32✔
570
        }
32✔
571
}
572

573
func calcVCPUs(cpu *v1.CPU) int64 {
297✔
574
        if cpu != nil {
333✔
575
                return hardware.GetNumberOfVCPUs(cpu)
36✔
576
        }
36✔
577
        return int64(1)
261✔
578
}
579

580
func getRequiredResources(vmi *v1.VirtualMachineInstance, allowEmulation bool) k8sv1.ResourceList {
300✔
581
        res := k8sv1.ResourceList{}
300✔
582
        if netvmispec.RequiresTunDevice(vmi) {
599✔
583
                res[TunDevice] = resource.MustParse("1")
299✔
584
        }
299✔
585
        if netvmispec.RequiresVirtioNetDevice(vmi, allowEmulation) {
315✔
586
                // Note that about network interface, allowEmulation does not make
15✔
587
                // any difference on eventual Domain xml, but uniformly making
15✔
588
                // /dev/vhost-net unavailable and libvirt implicitly fallback
15✔
589
                // to use QEMU userland NIC emulation.
15✔
590
                res[VhostNetDevice] = resource.MustParse("1")
15✔
591
        }
15✔
592
        if !allowEmulation {
599✔
593
                res[KvmDevice] = resource.MustParse("1")
299✔
594
        }
299✔
595
        if util.IsAutoAttachVSOCK(vmi) {
301✔
596
                res[VhostVsockDevice] = resource.MustParse("1")
1✔
597
        }
1✔
598
        return res
300✔
599
}
600

601
func WithVirtualizationResources(virtResources k8sv1.ResourceList) ResourceRendererOption {
300✔
602
        return func(renderer *ResourceRenderer) {
600✔
603
                copyResources(virtResources, renderer.vmLimits)
300✔
604
        }
300✔
605
}
606

607
func validatePermittedHostDevices(spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) error {
300✔
608
        errors := make([]string, 0)
300✔
609

300✔
610
        if hostDevs := config.GetPermittedHostDevices(); hostDevs != nil {
300✔
611
                // build a map of all permitted host devices
×
612
                supportedHostDevicesMap := make(map[string]bool)
×
613
                for _, dev := range hostDevs.PciHostDevices {
×
614
                        supportedHostDevicesMap[dev.ResourceName] = true
×
615
                }
×
616
                for _, dev := range hostDevs.MediatedDevices {
×
617
                        supportedHostDevicesMap[dev.ResourceName] = true
×
618
                }
×
619
                for _, dev := range hostDevs.USB {
×
620
                        supportedHostDevicesMap[dev.ResourceName] = true
×
621
                }
×
622
                //TODO @alayp: add proper validation for DRA GPUs in beta
623
                if !config.GPUsWithDRAGateEnabled() {
×
624
                        for _, hostDev := range spec.Domain.Devices.GPUs {
×
625
                                if _, exist := supportedHostDevicesMap[hostDev.DeviceName]; !exist {
×
626
                                        errors = append(errors, fmt.Sprintf("GPU %s is not permitted in permittedHostDevices configuration", hostDev.DeviceName))
×
627
                                }
×
628
                        }
629
                }
630
                for _, hostDev := range spec.Domain.Devices.HostDevices {
×
631
                        if _, exist := supportedHostDevicesMap[hostDev.DeviceName]; !exist {
×
632
                                errors = append(errors, fmt.Sprintf("HostDevice %s is not permitted in permittedHostDevices configuration", hostDev.DeviceName))
×
633
                        }
×
634
                }
635
        }
636

637
        if len(errors) != 0 {
300✔
638
                return fmt.Errorf("%s", strings.Join(errors, " "))
×
639
        }
×
640

641
        return nil
300✔
642
}
643

644
func sidecarResources(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
21✔
645
        resources := k8sv1.ResourceRequirements{
21✔
646
                Requests: k8sv1.ResourceList{},
21✔
647
                Limits:   k8sv1.ResourceList{},
21✔
648
        }
21✔
649
        if reqCpu := config.GetSupportContainerRequest(v1.SideCar, k8sv1.ResourceCPU); reqCpu != nil {
23✔
650
                resources.Requests[k8sv1.ResourceCPU] = *reqCpu
2✔
651
        }
2✔
652
        if reqMem := config.GetSupportContainerRequest(v1.SideCar, k8sv1.ResourceMemory); reqMem != nil {
23✔
653
                resources.Requests[k8sv1.ResourceMemory] = *reqMem
2✔
654
        }
2✔
655

656
        // add default cpu and memory limits to enable cpu pinning if requested
657
        // TODO(vladikr): make the hookSidecar express resources
658
        if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
25✔
659
                resources.Limits[k8sv1.ResourceCPU] = resource.MustParse("200m")
4✔
660
                if limCpu := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceCPU); limCpu != nil {
5✔
661
                        resources.Limits[k8sv1.ResourceCPU] = *limCpu
1✔
662
                }
1✔
663
                resources.Limits[k8sv1.ResourceMemory] = resource.MustParse("64M")
4✔
664
                if limMem := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceMemory); limMem != nil {
5✔
665
                        resources.Limits[k8sv1.ResourceMemory] = *limMem
1✔
666
                }
1✔
667
                resources.Requests[k8sv1.ResourceCPU] = resources.Limits[k8sv1.ResourceCPU]
4✔
668
                resources.Requests[k8sv1.ResourceMemory] = resources.Limits[k8sv1.ResourceMemory]
4✔
669
        } else {
17✔
670
                if limCpu := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceCPU); limCpu != nil {
18✔
671
                        resources.Limits[k8sv1.ResourceCPU] = *limCpu
1✔
672
                }
1✔
673
                if limMem := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceMemory); limMem != nil {
18✔
674
                        resources.Limits[k8sv1.ResourceMemory] = *limMem
1✔
675
                }
1✔
676
        }
677
        return resources
21✔
678
}
679

680
func initContainerResourceRequirementsForVMI(vmi *v1.VirtualMachineInstance, containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
15✔
681
        if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
16✔
682
                return k8sv1.ResourceRequirements{
1✔
683
                        Limits:   initContainerDedicatedCPURequiredResources(containerType, config),
1✔
684
                        Requests: initContainerDedicatedCPURequiredResources(containerType, config),
1✔
685
                }
1✔
686
        } else {
15✔
687
                return k8sv1.ResourceRequirements{
14✔
688
                        Limits:   initContainerMinimalLimits(containerType, config),
14✔
689
                        Requests: initContainerMinimalRequests(containerType, config),
14✔
690
                }
14✔
691
        }
14✔
692
}
693

694
func initContainerDedicatedCPURequiredResources(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
2✔
695
        res := k8sv1.ResourceList{
2✔
696
                k8sv1.ResourceCPU:    resource.MustParse("10m"),
2✔
697
                k8sv1.ResourceMemory: resource.MustParse("40M"),
2✔
698
        }
2✔
699
        if cpuLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceCPU); cpuLim != nil {
2✔
700
                res[k8sv1.ResourceCPU] = *cpuLim
×
701
        }
×
702
        if memLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceMemory); memLim != nil {
2✔
703
                res[k8sv1.ResourceMemory] = *memLim
×
704
        }
×
705
        return res
2✔
706
}
707

708
func initContainerMinimalLimits(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
14✔
709
        res := k8sv1.ResourceList{
14✔
710
                k8sv1.ResourceCPU:    resource.MustParse("100m"),
14✔
711
                k8sv1.ResourceMemory: resource.MustParse("40M"),
14✔
712
        }
14✔
713
        if cpuLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceCPU); cpuLim != nil {
14✔
714
                res[k8sv1.ResourceCPU] = *cpuLim
×
715
        }
×
716
        if memLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceMemory); memLim != nil {
14✔
717
                res[k8sv1.ResourceMemory] = *memLim
×
718
        }
×
719
        return res
14✔
720
}
721

722
func initContainerMinimalRequests(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
14✔
723
        res := k8sv1.ResourceList{
14✔
724
                k8sv1.ResourceCPU:    resource.MustParse("10m"),
14✔
725
                k8sv1.ResourceMemory: resource.MustParse("1M"),
14✔
726
        }
14✔
727
        if cpuReq := config.GetSupportContainerRequest(containerType, k8sv1.ResourceCPU); cpuReq != nil {
14✔
728
                res[k8sv1.ResourceCPU] = *cpuReq
×
729
        }
×
730
        if memReq := config.GetSupportContainerRequest(containerType, k8sv1.ResourceMemory); memReq != nil {
14✔
731
                res[k8sv1.ResourceMemory] = *memReq
×
732
        }
×
733
        return res
14✔
734
}
735

736
func hotplugContainerResourceRequirementsForVMI(config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
21✔
737
        return k8sv1.ResourceRequirements{
21✔
738
                Limits:   hotplugContainerLimits(config),
21✔
739
                Requests: hotplugContainerRequests(config),
21✔
740
        }
21✔
741
}
21✔
742

743
func hotplugContainerLimits(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
21✔
744
        cpuQuantity := resource.MustParse("100m")
21✔
745
        if cpu := config.GetSupportContainerLimit(v1.HotplugAttachment, k8sv1.ResourceCPU); cpu != nil {
26✔
746
                cpuQuantity = *cpu
5✔
747
        }
5✔
748
        memQuantity := resource.MustParse("80M")
21✔
749
        if mem := config.GetSupportContainerLimit(v1.HotplugAttachment, k8sv1.ResourceMemory); mem != nil {
26✔
750
                memQuantity = *mem
5✔
751
        }
5✔
752
        return k8sv1.ResourceList{
21✔
753
                k8sv1.ResourceCPU:    cpuQuantity,
21✔
754
                k8sv1.ResourceMemory: memQuantity,
21✔
755
        }
21✔
756
}
757

758
func hotplugContainerRequests(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
21✔
759
        cpuQuantity := resource.MustParse("10m")
21✔
760
        if cpu := config.GetSupportContainerRequest(v1.HotplugAttachment, k8sv1.ResourceCPU); cpu != nil {
26✔
761
                cpuQuantity = *cpu
5✔
762
        }
5✔
763
        memQuantity := resource.MustParse("2M")
21✔
764
        if mem := config.GetSupportContainerRequest(v1.HotplugAttachment, k8sv1.ResourceMemory); mem != nil {
26✔
765
                memQuantity = *mem
5✔
766
        }
5✔
767
        return k8sv1.ResourceList{
21✔
768
                k8sv1.ResourceCPU:    cpuQuantity,
21✔
769
                k8sv1.ResourceMemory: memQuantity,
21✔
770
        }
21✔
771
}
772

773
func vmExportContainerResourceRequirements(config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
20✔
774
        return k8sv1.ResourceRequirements{
20✔
775
                Limits:   vmExportContainerLimits(config),
20✔
776
                Requests: vmExportContainerRequests(config),
20✔
777
        }
20✔
778
}
20✔
779

780
func vmExportContainerLimits(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
20✔
781
        cpuQuantity := resource.MustParse("1")
20✔
782
        if cpu := config.GetSupportContainerLimit(v1.VMExport, k8sv1.ResourceCPU); cpu != nil {
20✔
783
                cpuQuantity = *cpu
×
784
        }
×
785
        memQuantity := resource.MustParse("1024Mi")
20✔
786
        if mem := config.GetSupportContainerLimit(v1.VMExport, k8sv1.ResourceMemory); mem != nil {
20✔
787
                memQuantity = *mem
×
788
        }
×
789
        return k8sv1.ResourceList{
20✔
790
                k8sv1.ResourceCPU:    cpuQuantity,
20✔
791
                k8sv1.ResourceMemory: memQuantity,
20✔
792
        }
20✔
793
}
794

795
func vmExportContainerRequests(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
20✔
796
        cpuQuantity := resource.MustParse("100m")
20✔
797
        if cpu := config.GetSupportContainerRequest(v1.VMExport, k8sv1.ResourceCPU); cpu != nil {
20✔
798
                cpuQuantity = *cpu
×
799
        }
×
800
        memQuantity := resource.MustParse("200Mi")
20✔
801
        if mem := config.GetSupportContainerRequest(v1.VMExport, k8sv1.ResourceMemory); mem != nil {
20✔
802
                memQuantity = *mem
×
803
        }
×
804
        return k8sv1.ResourceList{
20✔
805
                k8sv1.ResourceCPU:    cpuQuantity,
20✔
806
                k8sv1.ResourceMemory: memQuantity,
20✔
807
        }
20✔
808
}
809

810
func multiplyMemory(mem resource.Quantity, multiplication float64) resource.Quantity {
8✔
811
        overheadAddition := float64(mem.ScaledValue(resource.Kilo)) * (multiplication - 1.0)
8✔
812
        additionalOverhead := resource.NewScaledQuantity(int64(overheadAddition), resource.Kilo)
8✔
813

8✔
814
        mem.Add(*additionalOverhead)
8✔
815
        return mem
8✔
816
}
8✔
817

818
func getMemoryLimitsRatio(namespace string, namespaceStore cache.Store) float64 {
7✔
819
        if namespaceStore == nil {
7✔
820
                return DefaultMemoryLimitOverheadRatio
×
821
        }
×
822

823
        obj, exists, err := namespaceStore.GetByKey(namespace)
7✔
824
        if err != nil {
7✔
825
                log.Log.Warningf("Error retrieving namespace from informer. Using the default memory limits ratio. %s", err.Error())
×
826
                return DefaultMemoryLimitOverheadRatio
×
827
        } else if !exists {
10✔
828
                log.Log.Warningf("namespace %s does not exist. Using the default memory limits ratio.", namespace)
3✔
829
                return DefaultMemoryLimitOverheadRatio
3✔
830
        }
3✔
831

832
        ns, ok := obj.(*k8sv1.Namespace)
4✔
833
        if !ok {
4✔
834
                log.Log.Errorf("couldn't cast object to Namespace: %+v", obj)
×
835
                return DefaultMemoryLimitOverheadRatio
×
836
        }
×
837

838
        value, ok := ns.GetLabels()[v1.AutoMemoryLimitsRatioLabel]
4✔
839
        if !ok {
4✔
840
                return DefaultMemoryLimitOverheadRatio
×
841
        }
×
842

843
        limitRatioValue, err := strconv.ParseFloat(value, 64)
4✔
844
        if err != nil || limitRatioValue < 1.0 {
5✔
845
                log.Log.Warningf("%s is an invalid value for %s label in namespace %s. Using the default one: %f", value, v1.AutoMemoryLimitsRatioLabel, namespace, DefaultMemoryLimitOverheadRatio)
1✔
846
                return DefaultMemoryLimitOverheadRatio
1✔
847
        }
1✔
848

849
        return limitRatioValue
3✔
850
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc