• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / 85cf11f1-0f3d-4ae3-b5ee-a2888c90054f

28 Feb 2026 03:28AM UTC coverage: 71.368% (+0.1%) from 71.243%
85cf11f1-0f3d-4ae3-b5ee-a2888c90054f

push

prow

web-flow
Merge pull request #16776 from 0xFelix/virt-template-deployment-tls

feat(virt-operator): Allow setting TLS options for virt-template

26 of 30 new or added lines in 2 files covered. (86.67%)

1992 existing lines in 28 files now uncovered.

75450 of 105719 relevant lines covered (71.37%)

536.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.56
/pkg/virt-controller/services/renderresources.go
1
package services
2

3
import (
4
        "fmt"
5
        "strconv"
6
        "strings"
7

8
        "k8s.io/client-go/tools/cache"
9

10
        "kubevirt.io/client-go/log"
11

12
        k8sv1 "k8s.io/api/core/v1"
13
        "k8s.io/apimachinery/pkg/api/resource"
14
        v1 "kubevirt.io/api/core/v1"
15

16
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
17
        "kubevirt.io/kubevirt/pkg/util"
18
        "kubevirt.io/kubevirt/pkg/util/hardware"
19
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
20
)
21

22
type ResourceRendererOption func(renderer *ResourceRenderer)
23

24
type ResourceRenderer struct {
25
        vmLimits           k8sv1.ResourceList
26
        vmRequests         k8sv1.ResourceList
27
        calculatedLimits   k8sv1.ResourceList
28
        calculatedRequests k8sv1.ResourceList
29
        resourceClaims     []k8sv1.ResourceClaim
30
}
31

32
type resourcePredicate func(*v1.VirtualMachineInstance) bool
33

34
type VMIResourcePredicates struct {
35
        resourceRules []VMIResourceRule
36
        vmi           *v1.VirtualMachineInstance
37
}
38

39
type VMIResourceRule struct {
40
        predicate resourcePredicate
41
        option    ResourceRendererOption
42
}
43

44
func not(p resourcePredicate) resourcePredicate {
620✔
45
        return func(vmi *v1.VirtualMachineInstance) bool {
1,240✔
46
                return !p(vmi)
620✔
47
        }
620✔
48
}
49
func NewVMIResourceRule(p resourcePredicate, option ResourceRendererOption) VMIResourceRule {
4,340✔
50
        return VMIResourceRule{predicate: p, option: option}
4,340✔
51
}
4,340✔
52

53
func doesVMIRequireDedicatedCPU(vmi *v1.VirtualMachineInstance) bool {
620✔
54
        return vmi.IsCPUDedicated()
620✔
55
}
620✔
56

57
func NewResourceRenderer(vmLimits k8sv1.ResourceList, vmRequests k8sv1.ResourceList, options ...ResourceRendererOption) *ResourceRenderer {
340✔
58
        limits := map[k8sv1.ResourceName]resource.Quantity{}
340✔
59
        requests := map[k8sv1.ResourceName]resource.Quantity{}
340✔
60
        copyResources(vmLimits, limits)
340✔
61
        copyResources(vmRequests, requests)
340✔
62

340✔
63
        resourceRenderer := &ResourceRenderer{
340✔
64
                vmLimits:           limits,
340✔
65
                vmRequests:         requests,
340✔
66
                calculatedLimits:   map[k8sv1.ResourceName]resource.Quantity{},
340✔
67
                calculatedRequests: map[k8sv1.ResourceName]resource.Quantity{},
340✔
68
                resourceClaims:     []k8sv1.ResourceClaim{},
340✔
69
        }
340✔
70

340✔
71
        for _, opt := range options {
1,782✔
72
                opt(resourceRenderer)
1,442✔
73
        }
1,442✔
74
        return resourceRenderer
340✔
75
}
76

77
func (rr *ResourceRenderer) Limits() k8sv1.ResourceList {
372✔
78
        podLimits := map[k8sv1.ResourceName]resource.Quantity{}
372✔
79
        copyResources(rr.calculatedLimits, podLimits)
372✔
80
        copyResources(rr.vmLimits, podLimits)
372✔
81
        return podLimits
372✔
82
}
372✔
83

84
func (rr *ResourceRenderer) Requests() k8sv1.ResourceList {
368✔
85
        podRequests := map[k8sv1.ResourceName]resource.Quantity{}
368✔
86
        copyResources(rr.calculatedRequests, podRequests)
368✔
87
        copyResources(rr.vmRequests, podRequests)
368✔
88
        return podRequests
368✔
89
}
368✔
90

91
func (rr *ResourceRenderer) Claims() []k8sv1.ResourceClaim {
343✔
92
        return rr.resourceClaims
343✔
93
}
343✔
94

95
func (rr *ResourceRenderer) ResourceRequirements() k8sv1.ResourceRequirements {
339✔
96
        return k8sv1.ResourceRequirements{
339✔
97
                Limits:   rr.Limits(),
339✔
98
                Requests: rr.Requests(),
339✔
99
                Claims:   rr.Claims(),
339✔
100
        }
339✔
101
}
339✔
102

103
func WithEphemeralStorageRequest() ResourceRendererOption {
311✔
104
        return func(renderer *ResourceRenderer) {
622✔
105
                // Add ephemeral storage request to container to be used by Kubevirt. This amount of ephemeral storage
311✔
106
                // should be added to the user's request.
311✔
107
                ephemeralStorageOverhead := resource.MustParse(ephemeralStorageOverheadSize)
311✔
108
                ephemeralStorageRequested := renderer.vmRequests[k8sv1.ResourceEphemeralStorage]
311✔
109
                ephemeralStorageRequested.Add(ephemeralStorageOverhead)
311✔
110
                renderer.vmRequests[k8sv1.ResourceEphemeralStorage] = ephemeralStorageRequested
311✔
111

311✔
112
                if ephemeralStorageLimit, ephemeralStorageLimitDefined := renderer.vmLimits[k8sv1.ResourceEphemeralStorage]; ephemeralStorageLimitDefined {
313✔
113
                        ephemeralStorageLimit.Add(ephemeralStorageOverhead)
2✔
114
                        renderer.vmLimits[k8sv1.ResourceEphemeralStorage] = ephemeralStorageLimit
2✔
115
                }
2✔
116
        }
117
}
118

119
// Helper function to extract IO thread CPU count from VMI
120
func getIOThreadsCount(vmi *v1.VirtualMachineInstance) int64 {
320✔
121
        if vmi == nil || vmi.Spec.Domain.IOThreads == nil ||
320✔
122
                vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount == nil {
634✔
123
                return 0
314✔
124
        }
314✔
125
        return int64(*vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount)
6✔
126
}
127

128
func WithoutDedicatedCPU(vmi *v1.VirtualMachineInstance, cpuAllocationRatio int, withCPULimits bool) ResourceRendererOption {
314✔
129
        return func(renderer *ResourceRenderer) {
621✔
130
                cpu := vmi.Spec.Domain.CPU
307✔
131
                vcpus := calcVCPUs(cpu)
307✔
132
                ioThreadCPUs := getIOThreadsCount(vmi) // Get IO thread count
307✔
133
                totalCPUs := vcpus + ioThreadCPUs      // Include IO threads
307✔
134
                if totalCPUs != 0 && cpuAllocationRatio > 0 {
602✔
135
                        val := float64(totalCPUs) / float64(cpuAllocationRatio)
295✔
136
                        vcpusStr := fmt.Sprintf("%g", val)
295✔
137
                        if val < 1 {
587✔
138
                                val *= 1000
292✔
139
                                vcpusStr = fmt.Sprintf("%gm", val)
292✔
140
                        }
292✔
141
                        renderer.calculatedRequests[k8sv1.ResourceCPU] = resource.MustParse(vcpusStr)
295✔
142
                        if withCPULimits {
299✔
143
                                renderer.calculatedLimits[k8sv1.ResourceCPU] = resource.MustParse(strconv.FormatInt(totalCPUs, 10))
4✔
144
                        }
4✔
145
                }
146
        }
147
}
148

149
func WithGPUsDevicePlugins(gpus []v1.GPU) ResourceRendererOption {
313✔
150
        return func(r *ResourceRenderer) {
317✔
151
                res := r.ResourceRequirements()
4✔
152
                for _, g := range gpus {
8✔
153
                        if g.DeviceName != "" && g.ClaimRequest == nil {
7✔
154
                                requestResource(&res, g.DeviceName)
3✔
155
                        }
3✔
156
                }
157
                copyResources(res.Limits, r.calculatedLimits)
4✔
158
                copyResources(res.Requests, r.calculatedRequests)
4✔
159
        }
160
}
161

162
func WithGPUsDRA(gpus []v1.GPU) ResourceRendererOption {
313✔
163
        return func(r *ResourceRenderer) {
316✔
164
                res := r.ResourceRequirements()
3✔
165
                for _, g := range gpus {
7✔
166
                        if g.DeviceName == "" && g.ClaimRequest != nil {
7✔
167
                                requestResourceClaims(&res, &k8sv1.ResourceClaim{
3✔
168
                                        Name:    *g.ClaimRequest.ClaimName,
3✔
169
                                        Request: *g.ClaimRequest.RequestName,
3✔
170
                                })
3✔
171
                        }
3✔
172
                }
173
                copyResources(res.Limits, r.calculatedLimits)
3✔
174
                copyResources(res.Requests, r.calculatedRequests)
3✔
175
                copyResourceClaims(&res, &r.resourceClaims)
3✔
176
        }
177
}
178

179
// WithHostDevicesDevicePlugins adds resource requests/limits only for HostDevices managed by device plugins.
180
func WithHostDevicesDevicePlugins(hostDevices []v1.HostDevice) ResourceRendererOption {
313✔
181
        return func(r *ResourceRenderer) {
317✔
182
                resources := r.ResourceRequirements()
4✔
183
                for _, hd := range hostDevices {
8✔
184
                        if hd.DeviceName != "" && hd.ClaimRequest == nil {
7✔
185
                                requestResource(&resources, hd.DeviceName)
3✔
186
                        }
3✔
187
                }
188
                copyResources(resources.Limits, r.calculatedLimits)
4✔
189
                copyResources(resources.Requests, r.calculatedRequests)
4✔
190
        }
191
}
192

193
// WithHostDevicesDRA adds ResourceClaims for HostDevices provisioned via DRA.
194
func WithHostDevicesDRA(hostDevices []v1.HostDevice) ResourceRendererOption {
312✔
195
        return func(r *ResourceRenderer) {
314✔
196
                resources := r.ResourceRequirements()
2✔
197
                for _, hd := range hostDevices {
5✔
198
                        if hd.DeviceName == "" && hd.ClaimRequest != nil && hd.ClaimRequest.ClaimName != nil && hd.ClaimRequest.RequestName != nil {
5✔
199
                                requestResourceClaims(&resources, &k8sv1.ResourceClaim{
2✔
200
                                        Name:    *hd.ClaimRequest.ClaimName,
2✔
201
                                        Request: *hd.ClaimRequest.RequestName,
2✔
202
                                })
2✔
203
                        }
2✔
204
                }
205
                copyResources(resources.Limits, r.calculatedLimits)
2✔
206
                copyResources(resources.Requests, r.calculatedRequests)
2✔
207
                copyResourceClaims(&resources, &r.resourceClaims)
2✔
208
        }
209
}
210

211
func WithHugePages(vmMemory *v1.Memory, memoryOverhead resource.Quantity) ResourceRendererOption {
310✔
212
        return func(renderer *ResourceRenderer) {
332✔
213
                hugepageType := k8sv1.ResourceName(k8sv1.ResourceHugePagesPrefix + vmMemory.Hugepages.PageSize)
22✔
214
                hugepagesMemReq := renderer.vmRequests.Memory()
22✔
215

22✔
216
                // If requested, use the guest memory to allocate hugepages
22✔
217
                if vmMemory != nil && vmMemory.Guest != nil {
24✔
218
                        requests := hugepagesMemReq.Value()
2✔
219
                        guest := vmMemory.Guest.Value()
2✔
220
                        if requests > guest {
4✔
221
                                hugepagesMemReq = vmMemory.Guest
2✔
222
                        }
2✔
223
                }
224
                renderer.calculatedRequests[hugepageType] = *hugepagesMemReq
22✔
225
                renderer.calculatedLimits[hugepageType] = *hugepagesMemReq
22✔
226

22✔
227
                reqMemDiff := resource.NewScaledQuantity(0, resource.Kilo)
22✔
228
                limMemDiff := resource.NewScaledQuantity(0, resource.Kilo)
22✔
229
                // In case the guest memory and the requested memory are different, add the difference
22✔
230
                // to the overhead
22✔
231
                if vmMemory != nil && vmMemory.Guest != nil {
24✔
232
                        requests := renderer.vmRequests.Memory().Value()
2✔
233
                        limits := renderer.vmLimits.Memory().Value()
2✔
234
                        guest := vmMemory.Guest.Value()
2✔
235
                        if requests > guest {
4✔
236
                                reqMemDiff.Add(*renderer.vmRequests.Memory())
2✔
237
                                reqMemDiff.Sub(*vmMemory.Guest)
2✔
238
                        }
2✔
239
                        if limits > guest {
4✔
240
                                limMemDiff.Add(*renderer.vmLimits.Memory())
2✔
241
                                limMemDiff.Sub(*vmMemory.Guest)
2✔
242
                        }
2✔
243
                }
244
                // Set requested memory equals to overhead memory
245
                reqMemDiff.Add(memoryOverhead)
22✔
246
                renderer.vmRequests[k8sv1.ResourceMemory] = *reqMemDiff
22✔
247
                if _, ok := renderer.vmLimits[k8sv1.ResourceMemory]; ok {
36✔
248
                        limMemDiff.Add(memoryOverhead)
14✔
249
                        renderer.vmLimits[k8sv1.ResourceMemory] = *limMemDiff
14✔
250
                }
14✔
251
        }
252
}
253

254
func WithMemoryRequests(vmiSpecMemory *v1.Memory, overcommit int) ResourceRendererOption {
310✔
255
        return func(renderer *ResourceRenderer) {
463✔
256
                limit, hasLimit := renderer.vmLimits[k8sv1.ResourceMemory]
153✔
257
                request, hasRequest := renderer.vmRequests[k8sv1.ResourceMemory]
153✔
258
                if hasLimit && !limit.IsZero() && (!hasRequest || request.IsZero()) {
161✔
259
                        renderer.vmRequests[k8sv1.ResourceMemory] = limit
8✔
260
                }
8✔
261

262
                if _, exists := renderer.vmRequests[k8sv1.ResourceMemory]; exists {
161✔
263
                        return
8✔
264
                }
8✔
265

266
                var memory *resource.Quantity
145✔
267
                if vmiSpecMemory != nil && vmiSpecMemory.Guest != nil {
148✔
268
                        memory = vmiSpecMemory.Guest
3✔
269
                } else if vmiSpecMemory != nil && vmiSpecMemory.Hugepages != nil {
149✔
270
                        if hugepagesSize, err := resource.ParseQuantity(vmiSpecMemory.Hugepages.PageSize); err == nil {
8✔
271
                                memory = &hugepagesSize
4✔
272
                        }
4✔
273
                }
274

275
                if memory != nil && memory.Value() > 0 {
152✔
276
                        if overcommit == 100 {
11✔
277
                                renderer.vmRequests[k8sv1.ResourceMemory] = *memory
4✔
278
                        } else {
7✔
279
                                value := (memory.Value() * int64(100)) / int64(overcommit)
3✔
280
                                renderer.vmRequests[k8sv1.ResourceMemory] = *resource.NewQuantity(value, memory.Format)
3✔
281
                        }
3✔
282
                }
283
        }
284
}
285

286
func WithMemoryOverhead(guestResourceSpec v1.ResourceRequirements, memoryOverhead resource.Quantity) ResourceRendererOption {
312✔
287
        return func(renderer *ResourceRenderer) {
602✔
288
                memoryRequest := renderer.vmRequests[k8sv1.ResourceMemory]
290✔
289
                if !guestResourceSpec.OvercommitGuestOverhead {
577✔
290
                        memoryRequest.Add(memoryOverhead)
287✔
291
                }
287✔
292
                renderer.vmRequests[k8sv1.ResourceMemory] = memoryRequest
290✔
293

290✔
294
                if memoryLimit, ok := renderer.vmLimits[k8sv1.ResourceMemory]; ok {
307✔
295
                        memoryLimit.Add(memoryOverhead)
17✔
296
                        renderer.vmLimits[k8sv1.ResourceMemory] = memoryLimit
17✔
297
                }
17✔
298
        }
299
}
300

301
func WithAutoMemoryLimits(namespace string, namespaceStore cache.Store) ResourceRendererOption {
313✔
302
        return func(renderer *ResourceRenderer) {
320✔
303
                requestRatio := getMemoryLimitsRatio(namespace, namespaceStore)
7✔
304
                memoryRequest := renderer.vmRequests[k8sv1.ResourceMemory]
7✔
305
                value := int64(float64(memoryRequest.Value()) * requestRatio)
7✔
306
                renderer.calculatedLimits[k8sv1.ResourceMemory] = *resource.NewQuantity(value, memoryRequest.Format)
7✔
307
        }
7✔
308
}
309

310
func WithCPUPinning(vmi *v1.VirtualMachineInstance, annotations map[string]string, additionalCPUs uint32) ResourceRendererOption {
316✔
311
        return func(renderer *ResourceRenderer) {
329✔
312
                cpu := vmi.Spec.Domain.CPU
13✔
313
                vcpus := hardware.GetNumberOfVCPUs(cpu)
13✔
314
                ioThreadCPUs := getIOThreadsCount(vmi)
13✔
315
                if vcpus != 0 {
23✔
316
                        totalCPUs := vcpus + ioThreadCPUs
10✔
317
                        renderer.vmLimits[k8sv1.ResourceCPU] = *resource.NewQuantity(totalCPUs, resource.BinarySI)
10✔
318
                        renderer.vmRequests[k8sv1.ResourceCPU] = *resource.NewQuantity(totalCPUs, resource.BinarySI) // Ensure requests match limits for dedicated CPUs
10✔
319
                } else {
13✔
320
                        ioThreadsCount := resource.NewQuantity(ioThreadCPUs, resource.BinarySI)
3✔
321
                        if cpuLimit, ok := renderer.vmLimits[k8sv1.ResourceCPU]; ok {
5✔
322
                                cpuLimit.Add(*ioThreadsCount)
2✔
323
                                renderer.vmLimits[k8sv1.ResourceCPU] = cpuLimit
2✔
324
                        }
2✔
325
                        if cpuRequest, ok := renderer.vmRequests[k8sv1.ResourceCPU]; ok {
5✔
326
                                cpuRequest.Add(*ioThreadsCount)
2✔
327
                                renderer.vmRequests[k8sv1.ResourceCPU] = cpuRequest
2✔
328
                        }
2✔
329
                }
330

331
                if cpu.IsolateEmulatorThread {
24✔
332
                        emulatorThreadCPUs := resource.NewQuantity(1, resource.BinarySI)
11✔
333
                        limits := renderer.vmLimits[k8sv1.ResourceCPU]
11✔
334
                        _, emulatorThreadCompleteToEvenParityAnnotationExists := annotations[v1.EmulatorThreadCompleteToEvenParity]
11✔
335
                        if emulatorThreadCompleteToEvenParityAnnotationExists &&
11✔
336
                                (limits.Value()+int64(additionalCPUs))%2 == 0 {
13✔
337
                                emulatorThreadCPUs = resource.NewQuantity(2, resource.BinarySI)
2✔
338
                        }
2✔
339
                        limits.Add(*emulatorThreadCPUs)
11✔
340
                        renderer.vmLimits[k8sv1.ResourceCPU] = limits
11✔
341
                        if cpuRequest, ok := renderer.vmRequests[k8sv1.ResourceCPU]; ok {
22✔
342
                                cpuRequest.Add(*emulatorThreadCPUs)
11✔
343
                                renderer.vmRequests[k8sv1.ResourceCPU] = cpuRequest
11✔
344
                        }
11✔
345
                }
346

347
                // Align memory limits with requests for consistency
348
                if memRequest, ok := renderer.vmRequests[k8sv1.ResourceMemory]; ok {
15✔
349
                        renderer.vmLimits[k8sv1.ResourceMemory] = memRequest
2✔
350
                }
2✔
351
        }
352
}
353

354
func WithNetworkResources(networkToResourceMap map[string]string) ResourceRendererOption {
312✔
355
        return func(renderer *ResourceRenderer) {
318✔
356
                resources := renderer.ResourceRequirements()
6✔
357
                for _, resourceName := range networkToResourceMap {
13✔
358
                        if resourceName != "" {
9✔
359
                                requestResource(&resources, resourceName)
2✔
360
                        }
2✔
361
                }
362
                copyResources(resources.Limits, renderer.calculatedLimits)
6✔
363
                copyResources(resources.Requests, renderer.calculatedRequests)
6✔
364
        }
365
}
366

367
func WithSEV() ResourceRendererOption {
311✔
368
        return func(renderer *ResourceRenderer) {
318✔
369
                resources := renderer.ResourceRequirements()
7✔
370
                requestResource(&resources, SevDevice)
7✔
371
                copyResources(resources.Limits, renderer.calculatedLimits)
7✔
372
                copyResources(resources.Requests, renderer.calculatedRequests)
7✔
373
        }
7✔
374
}
375

376
func WithTDX() ResourceRendererOption {
311✔
377
        return func(renderer *ResourceRenderer) {
314✔
378
                resources := renderer.ResourceRequirements()
3✔
379
                requestResource(&resources, TdxDevice)
3✔
380
                copyResources(resources.Limits, renderer.calculatedLimits)
3✔
381
                copyResources(resources.Requests, renderer.calculatedRequests)
3✔
382
        }
3✔
383
}
384

385
func WithPersistentReservation() ResourceRendererOption {
310✔
386
        return func(renderer *ResourceRenderer) {
310✔
UNCOV
387
                resources := renderer.ResourceRequirements()
×
UNCOV
388
                requestResource(&resources, PrDevice)
×
UNCOV
389
                copyResources(resources.Limits, renderer.calculatedLimits)
×
UNCOV
390
                copyResources(resources.Requests, renderer.calculatedRequests)
×
UNCOV
391
        }
×
392
}
393

394
func copyResources(srcResources, dstResources k8sv1.ResourceList) {
2,528✔
395
        for key, value := range srcResources {
5,430✔
396
                dstResources[key] = value
2,902✔
397
        }
2,902✔
398
}
399

400
func requestResourceClaims(resources *k8sv1.ResourceRequirements, claim *k8sv1.ResourceClaim) {
5✔
401
        if resources.Claims == nil {
5✔
UNCOV
402
                resources.Claims = []k8sv1.ResourceClaim{*claim}
×
UNCOV
403
                return
×
UNCOV
404
        }
×
405
        resources.Claims = append(resources.Claims, *claim)
5✔
406
}
407

408
func copyResourceClaims(resources *k8sv1.ResourceRequirements, claims *[]k8sv1.ResourceClaim) {
5✔
409
        existing := make(map[string]struct{})
5✔
410
        for _, c := range *claims {
6✔
411
                existing[c.Name] = struct{}{}
1✔
412
        }
1✔
413

414
        for _, value := range resources.Claims {
11✔
415
                if _, found := existing[value.Name]; found {
7✔
416
                        continue // skip duplicates by Name
1✔
417
                }
418
                *claims = append(*claims, value)
5✔
419
                existing[value.Name] = struct{}{}
5✔
420
        }
421
}
422

423
// Request a resource by name. This function bumps the number of resources,
424
// both its limits and requests attributes.
425
//
426
// If we were operating with a regular resource (CPU, memory, network
427
// bandwidth), we would need to take care of QoS. For example,
428
// https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
429
// explains that when Limits are set but Requests are not then scheduler
430
// assumes that Requests are the same as Limits for a particular resource.
431
//
432
// But this function is not called for this standard resources but for
433
// resources managed by device plugins. The device plugin design document says
434
// the following on the matter:
435
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/resource-management/device-plugin.md#end-user-story
436
//
437
// ```
438
// Devices can be selected using the same process as for OIRs in the pod spec.
439
// Devices have no impact on QOS. However, for the alpha, we expect the request
440
// to have limits == requests.
441
// ```
442
//
443
// Which suggests that, for resources managed by device plugins, 1) limits
444
// should be equal to requests; and 2) QoS rules do not apVFIO//
445
// Hence we don't copy Limits value to Requests if the latter is missing.
446
func requestResource(resources *k8sv1.ResourceRequirements, resourceName string) {
23✔
447
        name := k8sv1.ResourceName(resourceName)
23✔
448
        bumpResources(resources.Limits, name)
23✔
449
        bumpResources(resources.Requests, name)
23✔
450
}
23✔
451

452
func bumpResources(resources k8sv1.ResourceList, name k8sv1.ResourceName) {
46✔
453
        unitQuantity := *resource.NewQuantity(1, resource.DecimalSI)
46✔
454

46✔
455
        val, ok := resources[name]
46✔
456
        if ok {
54✔
457
                val.Add(unitQuantity)
8✔
458
                resources[name] = val
8✔
459
        } else {
46✔
460
                resources[name] = unitQuantity
38✔
461
        }
38✔
462
}
463

464
func calcVCPUs(cpu *v1.CPU) int64 {
307✔
465
        if cpu != nil {
343✔
466
                return hardware.GetNumberOfVCPUs(cpu)
36✔
467
        }
36✔
468
        return int64(1)
271✔
469
}
470

471
func getRequiredResources(vmi *v1.VirtualMachineInstance, hypervisorResource k8sv1.ResourceName, allowEmulation bool) k8sv1.ResourceList {
310✔
472
        res := k8sv1.ResourceList{}
310✔
473
        if netvmispec.RequiresTunDevice(vmi) {
619✔
474
                res[TunDevice] = resource.MustParse("1")
309✔
475
        }
309✔
476
        if netvmispec.RequiresVirtioNetDevice(vmi, allowEmulation) {
323✔
477
                // Note that about network interface, allowEmulation does not make
13✔
478
                // any difference on eventual Domain xml, but uniformly making
13✔
479
                // /dev/vhost-net unavailable and libvirt implicitly fallback
13✔
480
                // to use QEMU userland NIC emulation.
13✔
481
                res[VhostNetDevice] = resource.MustParse("1")
13✔
482
        }
13✔
483
        if !allowEmulation {
619✔
484
                res[hypervisorResource] = resource.MustParse("1")
309✔
485
        }
309✔
486
        if util.IsAutoAttachVSOCK(vmi) {
311✔
487
                res[VhostVsockDevice] = resource.MustParse("1")
1✔
488
        }
1✔
489
        return res
310✔
490
}
491

492
func WithVirtualizationResources(virtResources k8sv1.ResourceList) ResourceRendererOption {
310✔
493
        return func(renderer *ResourceRenderer) {
620✔
494
                copyResources(virtResources, renderer.vmLimits)
310✔
495
        }
310✔
496
}
497

498
func validatePermittedHostDevices(spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) error {
310✔
499
        errors := make([]string, 0)
310✔
500

310✔
501
        if hostDevs := config.GetPermittedHostDevices(); hostDevs != nil {
310✔
502
                // build a map of all permitted host devices
×
503
                supportedHostDevicesMap := make(map[string]bool)
×
UNCOV
504
                for _, dev := range hostDevs.PciHostDevices {
×
505
                        supportedHostDevicesMap[dev.ResourceName] = true
×
506
                }
×
507
                for _, dev := range hostDevs.MediatedDevices {
×
508
                        supportedHostDevicesMap[dev.ResourceName] = true
×
509
                }
×
UNCOV
510
                for _, dev := range hostDevs.USB {
×
UNCOV
511
                        supportedHostDevicesMap[dev.ResourceName] = true
×
512
                }
×
513
                //TODO @alayp: add proper validation for DRA GPUs in beta
514
                if !config.GPUsWithDRAGateEnabled() {
×
515
                        for _, hostDev := range spec.Domain.Devices.GPUs {
×
UNCOV
516
                                if _, exist := supportedHostDevicesMap[hostDev.DeviceName]; !exist {
×
UNCOV
517
                                        errors = append(errors, fmt.Sprintf("GPU %s is not permitted in permittedHostDevices configuration", hostDev.DeviceName))
×
UNCOV
518
                                }
×
519
                        }
520
                }
521
                for _, hostDev := range spec.Domain.Devices.HostDevices {
×
UNCOV
522
                        if _, exist := supportedHostDevicesMap[hostDev.DeviceName]; !exist {
×
UNCOV
523
                                errors = append(errors, fmt.Sprintf("HostDevice %s is not permitted in permittedHostDevices configuration", hostDev.DeviceName))
×
UNCOV
524
                        }
×
525
                }
526
        }
527

528
        if len(errors) != 0 {
310✔
UNCOV
529
                return fmt.Errorf("%s", strings.Join(errors, " "))
×
UNCOV
530
        }
×
531

532
        return nil
310✔
533
}
534

535
func sidecarResources(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
21✔
536
        resources := k8sv1.ResourceRequirements{
21✔
537
                Requests: k8sv1.ResourceList{},
21✔
538
                Limits:   k8sv1.ResourceList{},
21✔
539
        }
21✔
540
        if reqCpu := config.GetSupportContainerRequest(v1.SideCar, k8sv1.ResourceCPU); reqCpu != nil {
23✔
541
                resources.Requests[k8sv1.ResourceCPU] = *reqCpu
2✔
542
        }
2✔
543
        if reqMem := config.GetSupportContainerRequest(v1.SideCar, k8sv1.ResourceMemory); reqMem != nil {
23✔
544
                resources.Requests[k8sv1.ResourceMemory] = *reqMem
2✔
545
        }
2✔
546

547
        // add default cpu and memory limits to enable cpu pinning if requested
548
        // TODO(vladikr): make the hookSidecar express resources
549
        if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
25✔
550
                resources.Limits[k8sv1.ResourceCPU] = resource.MustParse("200m")
4✔
551
                if limCpu := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceCPU); limCpu != nil {
5✔
552
                        resources.Limits[k8sv1.ResourceCPU] = *limCpu
1✔
553
                }
1✔
554
                resources.Limits[k8sv1.ResourceMemory] = resource.MustParse("64M")
4✔
555
                if limMem := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceMemory); limMem != nil {
5✔
556
                        resources.Limits[k8sv1.ResourceMemory] = *limMem
1✔
557
                }
1✔
558
                resources.Requests[k8sv1.ResourceCPU] = resources.Limits[k8sv1.ResourceCPU]
4✔
559
                resources.Requests[k8sv1.ResourceMemory] = resources.Limits[k8sv1.ResourceMemory]
4✔
560
        } else {
17✔
561
                if limCpu := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceCPU); limCpu != nil {
18✔
562
                        resources.Limits[k8sv1.ResourceCPU] = *limCpu
1✔
563
                }
1✔
564
                if limMem := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceMemory); limMem != nil {
18✔
565
                        resources.Limits[k8sv1.ResourceMemory] = *limMem
1✔
566
                }
1✔
567
        }
568
        return resources
21✔
569
}
570

571
func initContainerResourceRequirementsForVMI(vmi *v1.VirtualMachineInstance, containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
15✔
572
        if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
16✔
573
                return k8sv1.ResourceRequirements{
1✔
574
                        Limits:   initContainerDedicatedCPURequiredResources(containerType, config),
1✔
575
                        Requests: initContainerDedicatedCPURequiredResources(containerType, config),
1✔
576
                }
1✔
577
        } else {
15✔
578
                return k8sv1.ResourceRequirements{
14✔
579
                        Limits:   initContainerMinimalLimits(containerType, config),
14✔
580
                        Requests: initContainerMinimalRequests(containerType, config),
14✔
581
                }
14✔
582
        }
14✔
583
}
584

585
func initContainerDedicatedCPURequiredResources(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
2✔
586
        res := k8sv1.ResourceList{
2✔
587
                k8sv1.ResourceCPU:    resource.MustParse("10m"),
2✔
588
                k8sv1.ResourceMemory: resource.MustParse("40M"),
2✔
589
        }
2✔
590
        if cpuLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceCPU); cpuLim != nil {
2✔
UNCOV
591
                res[k8sv1.ResourceCPU] = *cpuLim
×
UNCOV
592
        }
×
593
        if memLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceMemory); memLim != nil {
2✔
UNCOV
594
                res[k8sv1.ResourceMemory] = *memLim
×
UNCOV
595
        }
×
596
        return res
2✔
597
}
598

599
func initContainerMinimalLimits(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
14✔
600
        res := k8sv1.ResourceList{
14✔
601
                k8sv1.ResourceCPU:    resource.MustParse("100m"),
14✔
602
                k8sv1.ResourceMemory: resource.MustParse("40M"),
14✔
603
        }
14✔
604
        if cpuLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceCPU); cpuLim != nil {
14✔
UNCOV
605
                res[k8sv1.ResourceCPU] = *cpuLim
×
UNCOV
606
        }
×
607
        if memLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceMemory); memLim != nil {
14✔
UNCOV
608
                res[k8sv1.ResourceMemory] = *memLim
×
UNCOV
609
        }
×
610
        return res
14✔
611
}
612

613
func initContainerMinimalRequests(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
14✔
614
        res := k8sv1.ResourceList{
14✔
615
                k8sv1.ResourceCPU:    resource.MustParse("10m"),
14✔
616
                k8sv1.ResourceMemory: resource.MustParse("1M"),
14✔
617
        }
14✔
618
        if cpuReq := config.GetSupportContainerRequest(containerType, k8sv1.ResourceCPU); cpuReq != nil {
14✔
UNCOV
619
                res[k8sv1.ResourceCPU] = *cpuReq
×
UNCOV
620
        }
×
621
        if memReq := config.GetSupportContainerRequest(containerType, k8sv1.ResourceMemory); memReq != nil {
14✔
UNCOV
622
                res[k8sv1.ResourceMemory] = *memReq
×
UNCOV
623
        }
×
624
        return res
14✔
625
}
626

627
func hotplugContainerResourceRequirementsForVMI(config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
21✔
628
        return k8sv1.ResourceRequirements{
21✔
629
                Limits:   hotplugContainerLimits(config),
21✔
630
                Requests: hotplugContainerRequests(config),
21✔
631
        }
21✔
632
}
21✔
633

634
func hotplugContainerLimits(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
21✔
635
        cpuQuantity := resource.MustParse("100m")
21✔
636
        if cpu := config.GetSupportContainerLimit(v1.HotplugAttachment, k8sv1.ResourceCPU); cpu != nil {
26✔
637
                cpuQuantity = *cpu
5✔
638
        }
5✔
639
        memQuantity := resource.MustParse("80M")
21✔
640
        if mem := config.GetSupportContainerLimit(v1.HotplugAttachment, k8sv1.ResourceMemory); mem != nil {
26✔
641
                memQuantity = *mem
5✔
642
        }
5✔
643
        return k8sv1.ResourceList{
21✔
644
                k8sv1.ResourceCPU:    cpuQuantity,
21✔
645
                k8sv1.ResourceMemory: memQuantity,
21✔
646
        }
21✔
647
}
648

649
func hotplugContainerRequests(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
21✔
650
        cpuQuantity := resource.MustParse("10m")
21✔
651
        if cpu := config.GetSupportContainerRequest(v1.HotplugAttachment, k8sv1.ResourceCPU); cpu != nil {
26✔
652
                cpuQuantity = *cpu
5✔
653
        }
5✔
654
        memQuantity := resource.MustParse("2M")
21✔
655
        if mem := config.GetSupportContainerRequest(v1.HotplugAttachment, k8sv1.ResourceMemory); mem != nil {
26✔
656
                memQuantity = *mem
5✔
657
        }
5✔
658
        return k8sv1.ResourceList{
21✔
659
                k8sv1.ResourceCPU:    cpuQuantity,
21✔
660
                k8sv1.ResourceMemory: memQuantity,
21✔
661
        }
21✔
662
}
663

664
func hotplugPodTolerations() []k8sv1.Toleration {
9✔
665
        return []k8sv1.Toleration{
9✔
666
                {
9✔
667
                        Key:      k8sv1.TaintNodeUnschedulable,
9✔
668
                        Operator: k8sv1.TolerationOpExists,
9✔
669
                        Effect:   k8sv1.TaintEffectNoSchedule,
9✔
670
                },
9✔
671
                {
9✔
672
                        Key:      k8sv1.TaintNodeNetworkUnavailable,
9✔
673
                        Operator: k8sv1.TolerationOpExists,
9✔
674
                        Effect:   k8sv1.TaintEffectNoSchedule,
9✔
675
                },
9✔
676
                {
9✔
677
                        Key:      k8sv1.TaintNodeDiskPressure,
9✔
678
                        Operator: k8sv1.TolerationOpExists,
9✔
679
                        Effect:   k8sv1.TaintEffectNoSchedule,
9✔
680
                },
9✔
681
                {
9✔
682
                        Key:      k8sv1.TaintNodeMemoryPressure,
9✔
683
                        Operator: k8sv1.TolerationOpExists,
9✔
684
                        Effect:   k8sv1.TaintEffectNoSchedule,
9✔
685
                },
9✔
686
                {
9✔
687
                        Key:      k8sv1.TaintNodePIDPressure,
9✔
688
                        Operator: k8sv1.TolerationOpExists,
9✔
689
                        Effect:   k8sv1.TaintEffectNoSchedule,
9✔
690
                },
9✔
691
        }
9✔
692
}
9✔
693

694
func vmExportContainerResourceRequirements(config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
20✔
695
        return k8sv1.ResourceRequirements{
20✔
696
                Limits:   vmExportContainerLimits(config),
20✔
697
                Requests: vmExportContainerRequests(config),
20✔
698
        }
20✔
699
}
20✔
700

701
func vmExportContainerLimits(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
20✔
702
        cpuQuantity := resource.MustParse("1")
20✔
703
        if cpu := config.GetSupportContainerLimit(v1.VMExport, k8sv1.ResourceCPU); cpu != nil {
20✔
UNCOV
704
                cpuQuantity = *cpu
×
UNCOV
705
        }
×
706
        memQuantity := resource.MustParse("1024Mi")
20✔
707
        if mem := config.GetSupportContainerLimit(v1.VMExport, k8sv1.ResourceMemory); mem != nil {
20✔
UNCOV
708
                memQuantity = *mem
×
UNCOV
709
        }
×
710
        return k8sv1.ResourceList{
20✔
711
                k8sv1.ResourceCPU:    cpuQuantity,
20✔
712
                k8sv1.ResourceMemory: memQuantity,
20✔
713
        }
20✔
714
}
715

716
func vmExportContainerRequests(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
20✔
717
        cpuQuantity := resource.MustParse("100m")
20✔
718
        if cpu := config.GetSupportContainerRequest(v1.VMExport, k8sv1.ResourceCPU); cpu != nil {
20✔
UNCOV
719
                cpuQuantity = *cpu
×
UNCOV
720
        }
×
721
        memQuantity := resource.MustParse("200Mi")
20✔
722
        if mem := config.GetSupportContainerRequest(v1.VMExport, k8sv1.ResourceMemory); mem != nil {
20✔
UNCOV
723
                memQuantity = *mem
×
724
        }
×
725
        return k8sv1.ResourceList{
20✔
726
                k8sv1.ResourceCPU:    cpuQuantity,
20✔
727
                k8sv1.ResourceMemory: memQuantity,
20✔
728
        }
20✔
729
}
730

731
func getMemoryLimitsRatio(namespace string, namespaceStore cache.Store) float64 {
7✔
732
        if namespaceStore == nil {
7✔
UNCOV
733
                return DefaultMemoryLimitOverheadRatio
×
UNCOV
734
        }
×
735

736
        obj, exists, err := namespaceStore.GetByKey(namespace)
7✔
737
        if err != nil {
7✔
738
                log.Log.Warningf("Error retrieving namespace from informer. Using the default memory limits ratio. %s", err.Error())
×
739
                return DefaultMemoryLimitOverheadRatio
×
740
        } else if !exists {
10✔
741
                log.Log.Warningf("namespace %s does not exist. Using the default memory limits ratio.", namespace)
3✔
742
                return DefaultMemoryLimitOverheadRatio
3✔
743
        }
3✔
744

745
        ns, ok := obj.(*k8sv1.Namespace)
4✔
746
        if !ok {
4✔
UNCOV
747
                log.Log.Errorf("couldn't cast object to Namespace: %+v", obj)
×
UNCOV
748
                return DefaultMemoryLimitOverheadRatio
×
UNCOV
749
        }
×
750

751
        value, ok := ns.GetLabels()[v1.AutoMemoryLimitsRatioLabel]
4✔
752
        if !ok {
4✔
UNCOV
753
                return DefaultMemoryLimitOverheadRatio
×
UNCOV
754
        }
×
755

756
        limitRatioValue, err := strconv.ParseFloat(value, 64)
4✔
757
        if err != nil || limitRatioValue < 1.0 {
5✔
758
                log.Log.Warningf("%s is an invalid value for %s label in namespace %s. Using the default one: %f", value, v1.AutoMemoryLimitsRatioLabel, namespace, DefaultMemoryLimitOverheadRatio)
1✔
759
                return DefaultMemoryLimitOverheadRatio
1✔
760
        }
1✔
761

762
        return limitRatioValue
3✔
763
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc