• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / d81f3799-2d0d-4f91-b833-b0d10a101bab

16 Nov 2025 03:02PM UTC coverage: 70.444% (+0.05%) from 70.392%
d81f3799-2d0d-4f91-b833-b0d10a101bab

push

prow

web-flow
Merge pull request #15922 from ShellyKa13/utility-volumes

VEP 90: Add new Utility volumes type in VMI spec

384 of 440 new or added lines in 14 files covered. (87.27%)

23 existing lines in 5 files now uncovered.

70115 of 99533 relevant lines covered (70.44%)

434.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.14
/pkg/controller/controller.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright The KubeVirt Authors.
17
 *
18
 */
19

20
package controller
21

22
import (
23
        "context"
24
        "fmt"
25
        "runtime/debug"
26
        "strings"
27
        "time"
28

29
        k8sv1 "k8s.io/api/core/v1"
30
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31
        "k8s.io/apimachinery/pkg/fields"
32
        "k8s.io/apimachinery/pkg/labels"
33
        "k8s.io/apimachinery/pkg/runtime"
34
        "k8s.io/apimachinery/pkg/types"
35
        "k8s.io/apimachinery/pkg/watch"
36
        "k8s.io/client-go/tools/cache"
37
        "k8s.io/client-go/util/workqueue"
38

39
        v1 "kubevirt.io/api/core/v1"
40
        "kubevirt.io/client-go/kubecli"
41
        "kubevirt.io/client-go/log"
42
        cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
43

44
        "kubevirt.io/kubevirt/pkg/apimachinery/patch"
45
)
46

47
const (
48
        // BurstReplicas is the maximum amount of requests in a row for CRUD operations on resources by controllers,
49
        // to avoid unintentional DoS
50
        BurstReplicas uint = 250
51
)
52

53
// Reasons for vmi events
54
const (
55
        // FailedCreatePodReason is added in an event and in a vmi controller condition
56
        // when a pod for a vmi controller failed to be created.
57
        FailedCreatePodReason = "FailedCreate"
58
        // SuccessfulCreatePodReason is added in an event when a pod for a vmi controller
59
        // is successfully created.
60
        SuccessfulCreatePodReason = "SuccessfulCreate"
61
        // FailedDeletePodReason is added in an event and in a vmi controller condition
62
        // when a pod for a vmi controller failed to be deleted.
63
        FailedDeletePodReason = "FailedDelete"
64
        // SuccessfulDeletePodReason is added in an event when a pod for a vmi controller
65
        // is successfully deleted.
66
        SuccessfulDeletePodReason = "SuccessfulDelete"
67
        // FailedHandOverPodReason is added in an event and in a vmi controller condition
68
        // when transferring the pod ownership from the controller to virt-hander fails.
69
        FailedHandOverPodReason = "FailedHandOver"
70
        // FailedBackendStorageCreateReason is added when the creation of the backend storage PVC fails.
71
        FailedBackendStorageCreateReason = "FailedBackendStorageCreate"
72
        // FailedBackendStorageProbeReason is added when probing the backend storage PVC fails.
73
        FailedBackendStorageProbeReason = "FailedBackendStorageProbe"
74
        // BackendStorageNotReadyReason is added when the backend storage PVC is pending.
75
        BackendStorageNotReadyReason = "BackendStorageNotReady"
76
        // SuccessfulHandOverPodReason is added in an event
77
        // when the pod ownership transfer from the controller to virt-hander succeeds.
78
        SuccessfulHandOverPodReason = "SuccessfulHandOver"
79
        // FailedDataVolumeImportReason is added in an event when a dynamically generated
80
        // dataVolume reaches the failed status phase.
81
        FailedDataVolumeImportReason = "FailedDataVolumeImport"
82
        // FailedGuaranteePodResourcesReason is added in an event and in a vmi controller condition
83
        // when a pod has been created without a Guaranteed resources.
84
        FailedGuaranteePodResourcesReason = "FailedGuaranteeResources"
85
        // FailedGatherhingClusterTopologyHints is added if the cluster topology hints can't be collected for a VMI by virt-controller
86
        FailedGatherhingClusterTopologyHints = "FailedGatherhingClusterTopologyHints"
87
        // FailedPvcNotFoundReason is added in an event
88
        // when a PVC for a volume was not found.
89
        FailedPvcNotFoundReason = "FailedPvcNotFound"
90
        // SuccessfulMigrationReason is added when a migration attempt completes successfully
91
        SuccessfulMigrationReason = "SuccessfulMigration"
92
        // FailedMigrationReason is added when a migration attempt fails
93
        FailedMigrationReason = "FailedMigration"
94
        // SuccessfulAbortMigrationReason is added when an attempt to abort migration completes successfully
95
        SuccessfulAbortMigrationReason = "SuccessfulAbortMigration"
96
        // MigrationTargetPodUnschedulable is added a migration target pod enters Unschedulable phase
97
        MigrationTargetPodUnschedulable = "migrationTargetPodUnschedulable"
98
        // FailedAbortMigrationReason is added when an attempt to abort migration fails
99
        FailedAbortMigrationReason = "FailedAbortMigration"
100
        // UtilityVolumeMigrationPendingReason is added when a migration is pending due to utility volumes
101
        UtilityVolumeMigrationPendingReason = "UtilityVolumeMigrationPending"
102
        // MissingAttachmentPodReason is set when we have a hotplugged volume, but the attachment pod is missing
103
        MissingAttachmentPodReason = "MissingAttachmentPod"
104
        // PVCNotReadyReason is set when the PVC is not ready to be hot plugged.
105
        PVCNotReadyReason = "PVCNotReady"
106
        // FailedHotplugSyncReason is set when a hotplug specific failure occurs during sync
107
        FailedHotplugSyncReason = "FailedHotplugSync"
108
        // ErrImagePullReason is set when an error has occured while pulling an image for a containerDisk VM volume.
109
        ErrImagePullReason = "ErrImagePull"
110
        // ImagePullBackOffReason is set when an error has occured while pulling an image for a containerDisk VM volume,
111
        // and that kubelet is backing off before retrying.
112
        ImagePullBackOffReason = "ImagePullBackOff"
113
        // NoSuitableNodesForHostModelMigration is set when a VMI with host-model CPU mode tries to migrate but no node
114
        // is suitable for migration (since CPU model / required features are not supported)
115
        NoSuitableNodesForHostModelMigration = "NoSuitableNodesForHostModelMigration"
116
        // FailedPodPatchReason is set when a pod patch error occurs during sync
117
        FailedPodPatchReason = "FailedPodPatch"
118
        // MigrationBackoffReason is set when an error has occured while migrating
119
        // and virt-controller is backing off before retrying.
120
        MigrationBackoffReason = "MigrationBackoff"
121
)
122

123
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, kubevirtNamespace and field selector.
124
func NewListWatchFromClient(c cache.Getter, resource string, namespace string, fieldSelector fields.Selector, labelSelector labels.Selector) *cache.ListWatch {
×
125
        listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
×
126
                options.FieldSelector = fieldSelector.String()
×
127
                options.LabelSelector = labelSelector.String()
×
128
                return c.Get().
×
129
                        Namespace(namespace).
×
130
                        Resource(resource).
×
131
                        VersionedParams(&options, metav1.ParameterCodec).
×
132
                        Do(context.Background()).
×
133
                        Get()
×
134
        }
×
135
        watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
×
136
                options.FieldSelector = fieldSelector.String()
×
137
                options.LabelSelector = labelSelector.String()
×
138
                options.Watch = true
×
139
                return c.Get().
×
140
                        Namespace(namespace).
×
141
                        Resource(resource).
×
142
                        VersionedParams(&options, metav1.ParameterCodec).
×
143
                        Watch(context.Background())
×
144
        }
×
145
        return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
×
146
}
147

148
func HandlePanic() {
9✔
149
        if r := recover(); r != nil {
11✔
150
                // Ignoring error - There is nothing to do, if logging fails
2✔
151
                _ = log.Log.Level(log.FATAL).Log("stacktrace", debug.Stack(), "msg", r)
2✔
152
        }
2✔
153
}
154

155
func NewResourceEventHandlerFuncsForWorkqueue(queue workqueue.RateLimitingInterface) cache.ResourceEventHandlerFuncs {
×
156
        return cache.ResourceEventHandlerFuncs{
×
157
                AddFunc: func(obj interface{}) {
×
158
                        key, err := KeyFunc(obj)
×
159
                        if err == nil {
×
160
                                queue.Add(key)
×
161
                        }
×
162
                },
163
                UpdateFunc: func(old interface{}, new interface{}) {
×
164
                        key, err := KeyFunc(new)
×
165
                        if err == nil {
×
166
                                queue.Add(key)
×
167
                        }
×
168
                },
169
                DeleteFunc: func(obj interface{}) {
×
170
                        key, err := KeyFunc(obj)
×
171
                        if err == nil {
×
172
                                queue.Add(key)
×
173
                        }
×
174
                },
175
        }
176
}
177

178
func MigrationKey(migration *v1.VirtualMachineInstanceMigration) string {
59✔
179
        return fmt.Sprintf("%v/%v", migration.ObjectMeta.Namespace, migration.ObjectMeta.Name)
59✔
180
}
59✔
181

182
func VirtualMachineInstanceKey(vmi *v1.VirtualMachineInstance) string {
1,595✔
183
        return fmt.Sprintf("%v/%v", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name)
1,595✔
184
}
1,595✔
185

186
func VirtualMachineKey(vm *v1.VirtualMachine) string {
253✔
187
        return fmt.Sprintf("%v/%v", vm.ObjectMeta.Namespace, vm.ObjectMeta.Name)
253✔
188
}
253✔
189

190
func PodKey(pod *k8sv1.Pod) string {
54✔
191
        return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
54✔
192
}
54✔
193

194
func DataVolumeKey(dataVolume *cdiv1.DataVolume) string {
×
195
        return fmt.Sprintf("%v/%v", dataVolume.Namespace, dataVolume.Name)
×
196
}
×
197

198
func VirtualMachineInstanceKeys(vmis []*v1.VirtualMachineInstance) []string {
8✔
199
        keys := []string{}
8✔
200
        for _, vmi := range vmis {
28✔
201
                keys = append(keys, VirtualMachineInstanceKey(vmi))
20✔
202
        }
20✔
203
        return keys
8✔
204
}
205

206
func VirtualMachineKeys(vms []*v1.VirtualMachine) []string {
12✔
207
        keys := []string{}
12✔
208
        for _, vm := range vms {
41✔
209
                keys = append(keys, VirtualMachineKey(vm))
29✔
210
        }
29✔
211
        return keys
12✔
212
}
213

214
func HasFinalizer(object metav1.Object, finalizer string) bool {
641✔
215
        for _, f := range object.GetFinalizers() {
1,038✔
216
                if f == finalizer {
786✔
217
                        return true
389✔
218
                }
389✔
219
        }
220
        return false
252✔
221
}
222

223
func RemoveFinalizer(object metav1.Object, finalizer string) {
34✔
224
        filtered := []string{}
34✔
225
        for _, f := range object.GetFinalizers() {
66✔
226
                if f != finalizer {
43✔
227
                        filtered = append(filtered, f)
11✔
228
                }
11✔
229
        }
230
        object.SetFinalizers(filtered)
34✔
231
}
232

233
func AddFinalizer(object metav1.Object, finalizer string) {
160✔
234
        if HasFinalizer(object, finalizer) {
202✔
235
                return
42✔
236
        }
42✔
237
        object.SetFinalizers(append(object.GetFinalizers(), finalizer))
118✔
238
}
239

240
func ObservedLatestApiVersionAnnotation(object metav1.Object) bool {
556✔
241
        annotations := object.GetAnnotations()
556✔
242
        if annotations == nil {
556✔
243
                return false
×
244
        }
×
245

246
        version, ok := annotations[v1.ControllerAPILatestVersionObservedAnnotation]
556✔
247
        if !ok || version != v1.ApiLatestVersion {
556✔
248
                return false
×
249
        }
×
250
        return true
556✔
251
}
252

253
func SetLatestApiVersionAnnotation(object metav1.Object) {
971✔
254
        annotations := object.GetAnnotations()
971✔
255
        if annotations == nil {
1,942✔
256
                annotations = make(map[string]string)
971✔
257
        }
971✔
258

259
        annotations[v1.ControllerAPILatestVersionObservedAnnotation] = v1.ApiLatestVersion
971✔
260
        annotations[v1.ControllerAPIStorageVersionObservedAnnotation] = v1.ApiStorageVersion
971✔
261
        object.SetAnnotations(annotations)
971✔
262
}
263

264
func ApplyVolumeRequestOnVMISpec(vmiSpec *v1.VirtualMachineInstanceSpec, request *v1.VirtualMachineVolumeRequest) *v1.VirtualMachineInstanceSpec {
65✔
265
        if request.AddVolumeOptions != nil {
108✔
266
                alreadyAdded := false
43✔
267
                for _, volume := range vmiSpec.Volumes {
150✔
268
                        if volume.Name == request.AddVolumeOptions.Name {
114✔
269
                                alreadyAdded = true
7✔
270
                                break
7✔
271
                        }
272
                }
273

274
                if !alreadyAdded {
79✔
275
                        newVolume := v1.Volume{
36✔
276
                                Name: request.AddVolumeOptions.Name,
36✔
277
                        }
36✔
278

36✔
279
                        if request.AddVolumeOptions.VolumeSource.PersistentVolumeClaim != nil {
53✔
280
                                pvcSource := request.AddVolumeOptions.VolumeSource.PersistentVolumeClaim.DeepCopy()
17✔
281
                                pvcSource.Hotpluggable = true
17✔
282
                                newVolume.VolumeSource.PersistentVolumeClaim = pvcSource
17✔
283
                        } else if request.AddVolumeOptions.VolumeSource.DataVolume != nil {
37✔
284
                                dvSource := request.AddVolumeOptions.VolumeSource.DataVolume.DeepCopy()
1✔
285
                                dvSource.Hotpluggable = true
1✔
286
                                newVolume.VolumeSource.DataVolume = dvSource
1✔
287
                        }
1✔
288

289
                        vmiSpec.Volumes = append(vmiSpec.Volumes, newVolume)
36✔
290

36✔
291
                        if request.AddVolumeOptions.Disk != nil {
72✔
292
                                newDisk := request.AddVolumeOptions.Disk.DeepCopy()
36✔
293
                                newDisk.Name = request.AddVolumeOptions.Name
36✔
294

36✔
295
                                vmiSpec.Domain.Devices.Disks = append(vmiSpec.Domain.Devices.Disks, *newDisk)
36✔
296
                        }
36✔
297
                }
298

299
        } else if request.RemoveVolumeOptions != nil {
44✔
300

22✔
301
                newVolumesList := []v1.Volume{}
22✔
302
                newDisksList := []v1.Disk{}
22✔
303

22✔
304
                for _, volume := range vmiSpec.Volumes {
52✔
305
                        if volume.Name != request.RemoveVolumeOptions.Name {
40✔
306
                                newVolumesList = append(newVolumesList, volume)
10✔
307
                        }
10✔
308
                }
309

310
                for _, disk := range vmiSpec.Domain.Devices.Disks {
52✔
311
                        if disk.Name != request.RemoveVolumeOptions.Name {
40✔
312
                                newDisksList = append(newDisksList, disk)
10✔
313
                        }
10✔
314
                }
315

316
                vmiSpec.Volumes = newVolumesList
22✔
317
                vmiSpec.Domain.Devices.Disks = newDisksList
22✔
318
        }
319

320
        return vmiSpec
65✔
321
}
322

323
func CurrentVMIPod(vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) (*k8sv1.Pod, error) {
294✔
324

294✔
325
        // current pod is the most recent pod created on the current VMI node
294✔
326
        // OR the most recent pod created if no VMI node is set.
294✔
327

294✔
328
        // Get all pods from the namespace
294✔
329
        objs, err := podIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
294✔
330
        if err != nil {
294✔
331
                return nil, err
×
332
        }
×
333
        pods := []*k8sv1.Pod{}
294✔
334
        for _, obj := range objs {
5,701✔
335
                pod := obj.(*k8sv1.Pod)
5,407✔
336
                pods = append(pods, pod)
5,407✔
337
        }
5,407✔
338

339
        var curPod *k8sv1.Pod = nil
294✔
340
        for _, pod := range pods {
5,701✔
341
                if !metav1.IsControlledBy(pod, vmi) {
5,437✔
342
                        continue
30✔
343
                }
344

345
                if vmi.Status.NodeName != "" &&
5,377✔
346
                        vmi.Status.NodeName != pod.Spec.NodeName {
5,387✔
347
                        // This pod isn't scheduled to the current node.
10✔
348
                        // This can occur during the initial migration phases when
10✔
349
                        // a new target node is being prepared for the VMI.
10✔
350
                        continue
10✔
351
                }
352

353
                if curPod == nil || curPod.CreationTimestamp.Before(&pod.CreationTimestamp) {
5,596✔
354
                        curPod = pod
229✔
355
                }
229✔
356
        }
357

358
        return curPod, nil
294✔
359
}
360

361
func VMIActivePodsCount(vmi *v1.VirtualMachineInstance, vmiPodIndexer cache.Indexer) int {
42✔
362

42✔
363
        objs, err := vmiPodIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
42✔
364
        if err != nil {
42✔
365
                return 0
×
366
        }
×
367

368
        running := 0
42✔
369
        for _, obj := range objs {
84✔
370
                pod := obj.(*k8sv1.Pod)
42✔
371

42✔
372
                if pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed {
44✔
373
                        // not interested in terminated pods
2✔
374
                        continue
2✔
375
                } else if !metav1.IsControlledBy(pod, vmi) {
53✔
376
                        // not interested pods not associated with the vmi
13✔
377
                        continue
13✔
378
                }
379
                running++
27✔
380
        }
381

382
        return running
42✔
383
}
384

385
func GeneratePatchBytes(ops []string) []byte {
×
386
        return []byte(fmt.Sprintf("[%s]", strings.Join(ops, ", ")))
×
387
}
×
388

389
func SetVMIPhaseTransitionTimestamp(oldStatus *v1.VirtualMachineInstanceStatus, newStatus *v1.VirtualMachineInstanceStatus) {
138✔
390
        if oldStatus.Phase != newStatus.Phase {
196✔
391
                for _, transitionTimeStamp := range newStatus.PhaseTransitionTimestamps {
58✔
392
                        if transitionTimeStamp.Phase == newStatus.Phase {
×
393
                                // already exists.
×
394
                                return
×
395
                        }
×
396
                }
397

398
                now := metav1.NewTime(time.Now())
58✔
399
                newStatus.PhaseTransitionTimestamps = append(newStatus.PhaseTransitionTimestamps, v1.VirtualMachineInstancePhaseTransitionTimestamp{
58✔
400
                        Phase:                    newStatus.Phase,
58✔
401
                        PhaseTransitionTimestamp: now,
58✔
402
                })
58✔
403
        }
404
}
405

406
func SetVMIMigrationPhaseTransitionTimestamp(oldVMIMigration *v1.VirtualMachineInstanceMigration, newVMIMigration *v1.VirtualMachineInstanceMigration) {
117✔
407
        if oldVMIMigration.Status.Phase != newVMIMigration.Status.Phase {
163✔
408
                for _, transitionTimeStamp := range newVMIMigration.Status.PhaseTransitionTimestamps {
46✔
409
                        if transitionTimeStamp.Phase == newVMIMigration.Status.Phase {
×
410
                                // already exists.
×
411
                                return
×
412
                        }
×
413
                }
414

415
                now := metav1.NewTime(time.Now())
46✔
416
                newVMIMigration.Status.PhaseTransitionTimestamps = append(newVMIMigration.Status.PhaseTransitionTimestamps, v1.VirtualMachineInstanceMigrationPhaseTransitionTimestamp{
46✔
417
                        Phase:                    newVMIMigration.Status.Phase,
46✔
418
                        PhaseTransitionTimestamp: now,
46✔
419
                })
46✔
420
        }
421
}
422

423
func SetSourcePod(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) {
117✔
424
        if migration.Status.Phase != v1.MigrationPending {
202✔
425
                return
85✔
426
        }
85✔
427
        sourcePod, err := CurrentVMIPod(vmi, podIndexer)
32✔
428
        if err != nil {
32✔
429
                log.Log.Object(vmi).Reason(err).Warning("migration source pod not found")
×
430
        }
×
431
        if sourcePod != nil {
59✔
432
                if migration.Status.MigrationState == nil {
54✔
433
                        migration.Status.MigrationState = &v1.VirtualMachineInstanceMigrationState{}
27✔
434
                }
27✔
435
                migration.Status.MigrationState.SourcePod = sourcePod.Name
27✔
436
        }
437

438
}
439

440
func VMIHasHotplugVolumes(vmi *v1.VirtualMachineInstance) bool {
35✔
441
        for _, volumeStatus := range vmi.Status.VolumeStatus {
41✔
442
                if volumeStatus.HotplugVolume != nil {
12✔
443
                        return true
6✔
444
                }
6✔
445
        }
446
        for _, volume := range vmi.Spec.Volumes {
29✔
447
                if volume.DataVolume != nil && volume.DataVolume.Hotpluggable {
×
448
                        return true
×
449
                }
×
450
                if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.Hotpluggable {
×
451
                        return true
×
452
                }
×
453
        }
454
        return false
29✔
455
}
456

457
func VMIHasUtilityVolumes(vmi *v1.VirtualMachineInstance) bool {
88✔
458
        return len(vmi.Spec.UtilityVolumes) > 0
88✔
459
}
88✔
460

461
func vmiHasCondition(vmi *v1.VirtualMachineInstance, conditionType v1.VirtualMachineInstanceConditionType) bool {
38✔
462
        vmiConditionManager := NewVirtualMachineInstanceConditionManager()
38✔
463
        return vmiConditionManager.HasCondition(vmi, conditionType)
38✔
464
}
38✔
465

466
func VMIHasHotplugCPU(vmi *v1.VirtualMachineInstance) bool {
19✔
467
        return vmiHasCondition(vmi, v1.VirtualMachineInstanceVCPUChange)
19✔
468
}
19✔
469

470
func VMIHasHotplugMemory(vmi *v1.VirtualMachineInstance) bool {
19✔
471
        return vmiHasCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
19✔
472
}
19✔
473

474
func AttachmentPods(ownerPod *k8sv1.Pod, podIndexer cache.Indexer) ([]*k8sv1.Pod, error) {
220✔
475
        objs, err := podIndexer.ByIndex(cache.NamespaceIndex, ownerPod.Namespace)
220✔
476
        if err != nil {
220✔
477
                return nil, err
×
478
        }
×
479
        attachmentPods := []*k8sv1.Pod{}
220✔
480
        for _, obj := range objs {
552✔
481
                pod := obj.(*k8sv1.Pod)
332✔
482
                if !metav1.IsControlledBy(pod, ownerPod) {
641✔
483
                        continue
309✔
484
                }
485
                attachmentPods = append(attachmentPods, pod)
23✔
486
        }
487
        return attachmentPods, nil
220✔
488
}
489

490
// IsPodReady treats the pod as ready to be handed over to virt-handler, as soon as all pods except
491
// the compute pod are ready.
492
func IsPodReady(pod *k8sv1.Pod) bool {
167✔
493
        if IsPodDownOrGoingDown(pod) {
187✔
494
                return false
20✔
495
        }
20✔
496

497
        for _, containerStatus := range pod.Status.ContainerStatuses {
298✔
498
                // The compute container potentially holds a readiness probe for the VMI. Therefore
151✔
499
                // don't wait for the compute container to become ready (the VMI later on will trigger the change to ready)
151✔
500
                // and only check that the container started
151✔
501
                if containerStatus.Name == "compute" {
271✔
502
                        if containerStatus.State.Running == nil {
127✔
503
                                return false
7✔
504
                        }
7✔
505
                } else if containerStatus.Name == "istio-proxy" {
35✔
506
                        // When using istio the istio-proxy container will not be ready
4✔
507
                        // until there is a service pointing to this pod.
4✔
508
                        // We need to start the VM anyway
4✔
509
                        if containerStatus.State.Running == nil {
5✔
510
                                return false
1✔
511
                        }
1✔
512

513
                } else if containerStatus.Ready == false {
28✔
514
                        return false
1✔
515
                }
1✔
516
        }
517

518
        return pod.Status.Phase == k8sv1.PodRunning
138✔
519
}
520

521
func IsPodDownOrGoingDown(pod *k8sv1.Pod) bool {
297✔
522
        return PodIsDown(pod) || isComputeContainerDown(pod) || pod.DeletionTimestamp != nil
297✔
523
}
297✔
524

525
func IsPodFailedOrGoingDown(pod *k8sv1.Pod) bool {
12✔
526
        return isPodFailed(pod) || isComputeContainerFailed(pod) || pod.DeletionTimestamp != nil
12✔
527
}
12✔
528

529
func isComputeContainerDown(pod *k8sv1.Pod) bool {
272✔
530
        for _, containerStatus := range pod.Status.ContainerStatuses {
526✔
531
                if containerStatus.Name == "compute" {
482✔
532
                        return containerStatus.State.Terminated != nil
228✔
533
                }
228✔
534
        }
535
        return false
44✔
536
}
537

538
func isComputeContainerFailed(pod *k8sv1.Pod) bool {
10✔
539
        for _, containerStatus := range pod.Status.ContainerStatuses {
15✔
540
                if containerStatus.Name == "compute" {
10✔
541
                        return containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode != 0
5✔
542
                }
5✔
543
        }
544
        return false
5✔
545
}
546

547
func PodIsDown(pod *k8sv1.Pod) bool {
463✔
548
        return pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed
463✔
549
}
463✔
550

551
func isPodFailed(pod *k8sv1.Pod) bool {
12✔
552
        return pod.Status.Phase == k8sv1.PodFailed
12✔
553
}
12✔
554

555
func PodExists(pod *k8sv1.Pod) bool {
460✔
556
        return pod != nil
460✔
557
}
460✔
UNCOV
558
func GetHotplugVolumes(vmi *v1.VirtualMachineInstance, virtlauncherPod *k8sv1.Pod) []*v1.Volume {
×
UNCOV
559
        hotplugVolumes := make([]*v1.Volume, 0)
×
UNCOV
560
        podVolumes := virtlauncherPod.Spec.Volumes
×
UNCOV
561
        vmiVolumes := vmi.Spec.Volumes
×
UNCOV
562

×
UNCOV
563
        podVolumeMap := make(map[string]k8sv1.Volume)
×
UNCOV
564
        for _, podVolume := range podVolumes {
×
UNCOV
565
                podVolumeMap[podVolume.Name] = podVolume
×
UNCOV
566
        }
×
UNCOV
567
        for _, vmiVolume := range vmiVolumes {
×
UNCOV
568
                if _, ok := podVolumeMap[vmiVolume.Name]; !ok && (vmiVolume.DataVolume != nil || vmiVolume.PersistentVolumeClaim != nil || vmiVolume.MemoryDump != nil) {
×
UNCOV
569
                        hotplugVolumes = append(hotplugVolumes, vmiVolume.DeepCopy())
×
UNCOV
570
                }
×
571
        }
UNCOV
572
        return hotplugVolumes
×
573
}
574

575
func SyncPodAnnotations(clientset kubecli.KubevirtClient, pod *k8sv1.Pod, newAnnotations map[string]string) (*k8sv1.Pod, error) {
75✔
576
        patchSet := patch.New()
75✔
577
        for key, newValue := range newAnnotations {
134✔
578
                if podAnnotationValue, keyExist := pod.Annotations[key]; !keyExist || podAnnotationValue != newValue {
64✔
579
                        patchSet.AddOption(
5✔
580
                                patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(key)), newValue),
5✔
581
                        )
5✔
582
                }
5✔
583
        }
584
        if patchSet.IsEmpty() {
147✔
585
                return pod, nil
72✔
586
        }
72✔
587
        patchBytes, err := patchSet.GeneratePayload()
3✔
588
        if err != nil {
3✔
589
                return pod, fmt.Errorf("failed to generate patch payload: %w", err)
×
590
        }
×
591
        patchedPod, err := clientset.CoreV1().Pods(pod.Namespace).Patch(context.Background(), pod.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
3✔
592
        if err != nil {
3✔
593
                log.Log.Object(pod).Errorf("failed to sync pod annotations: %v", err)
×
594
                return nil, err
×
595
        }
×
596
        return patchedPod, nil
3✔
597
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc