• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / c987cb86-ce8b-4858-98a7-905a03cc91f8

24 Oct 2025 10:17AM UTC coverage: 70.351% (-0.002%) from 70.353%
c987cb86-ce8b-4858-98a7-905a03cc91f8

push

prow

web-flow
Merge pull request #15663 from oshoval/deviceinfo

virt-controller, migration: Add missing network-info

23 of 31 new or added lines in 4 files covered. (74.19%)

3 existing lines in 1 file now uncovered.

69124 of 98256 relevant lines covered (70.35%)

398.15 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.83
/pkg/controller/controller.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright The KubeVirt Authors.
17
 *
18
 */
19

20
package controller
21

22
import (
23
        "context"
24
        "fmt"
25
        "runtime/debug"
26
        "strings"
27
        "time"
28

29
        k8sv1 "k8s.io/api/core/v1"
30
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31
        "k8s.io/apimachinery/pkg/fields"
32
        "k8s.io/apimachinery/pkg/labels"
33
        "k8s.io/apimachinery/pkg/runtime"
34
        "k8s.io/apimachinery/pkg/types"
35
        "k8s.io/apimachinery/pkg/watch"
36
        "k8s.io/client-go/tools/cache"
37
        "k8s.io/client-go/util/workqueue"
38

39
        v1 "kubevirt.io/api/core/v1"
40
        "kubevirt.io/client-go/kubecli"
41
        "kubevirt.io/client-go/log"
42
        cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
43

44
        "kubevirt.io/kubevirt/pkg/apimachinery/patch"
45
)
46

47
const (
48
        // BurstReplicas is the maximum amount of requests in a row for CRUD operations on resources by controllers,
49
        // to avoid unintentional DoS
50
        BurstReplicas uint = 250
51
)
52

53
// Reasons for vmi events
54
const (
55
        // FailedCreatePodReason is added in an event and in a vmi controller condition
56
        // when a pod for a vmi controller failed to be created.
57
        FailedCreatePodReason = "FailedCreate"
58
        // SuccessfulCreatePodReason is added in an event when a pod for a vmi controller
59
        // is successfully created.
60
        SuccessfulCreatePodReason = "SuccessfulCreate"
61
        // FailedDeletePodReason is added in an event and in a vmi controller condition
62
        // when a pod for a vmi controller failed to be deleted.
63
        FailedDeletePodReason = "FailedDelete"
64
        // SuccessfulDeletePodReason is added in an event when a pod for a vmi controller
65
        // is successfully deleted.
66
        SuccessfulDeletePodReason = "SuccessfulDelete"
67
        // FailedHandOverPodReason is added in an event and in a vmi controller condition
68
        // when transferring the pod ownership from the controller to virt-hander fails.
69
        FailedHandOverPodReason = "FailedHandOver"
70
        // FailedBackendStorageCreateReason is added when the creation of the backend storage PVC fails.
71
        FailedBackendStorageCreateReason = "FailedBackendStorageCreate"
72
        // FailedBackendStorageProbeReason is added when probing the backend storage PVC fails.
73
        FailedBackendStorageProbeReason = "FailedBackendStorageProbe"
74
        // BackendStorageNotReadyReason is added when the backend storage PVC is pending.
75
        BackendStorageNotReadyReason = "BackendStorageNotReady"
76
        // SuccessfulHandOverPodReason is added in an event
77
        // when the pod ownership transfer from the controller to virt-hander succeeds.
78
        SuccessfulHandOverPodReason = "SuccessfulHandOver"
79
        // FailedDataVolumeImportReason is added in an event when a dynamically generated
80
        // dataVolume reaches the failed status phase.
81
        FailedDataVolumeImportReason = "FailedDataVolumeImport"
82
        // FailedGuaranteePodResourcesReason is added in an event and in a vmi controller condition
83
        // when a pod has been created without a Guaranteed resources.
84
        FailedGuaranteePodResourcesReason = "FailedGuaranteeResources"
85
        // FailedGatherhingClusterTopologyHints is added if the cluster topology hints can't be collected for a VMI by virt-controller
86
        FailedGatherhingClusterTopologyHints = "FailedGatherhingClusterTopologyHints"
87
        // FailedPvcNotFoundReason is added in an event
88
        // when a PVC for a volume was not found.
89
        FailedPvcNotFoundReason = "FailedPvcNotFound"
90
        // SuccessfulMigrationReason is added when a migration attempt completes successfully
91
        SuccessfulMigrationReason = "SuccessfulMigration"
92
        // FailedMigrationReason is added when a migration attempt fails
93
        FailedMigrationReason = "FailedMigration"
94
        // SuccessfulAbortMigrationReason is added when an attempt to abort migration completes successfully
95
        SuccessfulAbortMigrationReason = "SuccessfulAbortMigration"
96
        // MigrationTargetPodUnschedulable is added a migration target pod enters Unschedulable phase
97
        MigrationTargetPodUnschedulable = "migrationTargetPodUnschedulable"
98
        // FailedAbortMigrationReason is added when an attempt to abort migration fails
99
        FailedAbortMigrationReason = "FailedAbortMigration"
100
        // MissingAttachmentPodReason is set when we have a hotplugged volume, but the attachment pod is missing
101
        MissingAttachmentPodReason = "MissingAttachmentPod"
102
        // PVCNotReadyReason is set when the PVC is not ready to be hot plugged.
103
        PVCNotReadyReason = "PVCNotReady"
104
        // FailedHotplugSyncReason is set when a hotplug specific failure occurs during sync
105
        FailedHotplugSyncReason = "FailedHotplugSync"
106
        // ErrImagePullReason is set when an error has occured while pulling an image for a containerDisk VM volume.
107
        ErrImagePullReason = "ErrImagePull"
108
        // ImagePullBackOffReason is set when an error has occured while pulling an image for a containerDisk VM volume,
109
        // and that kubelet is backing off before retrying.
110
        ImagePullBackOffReason = "ImagePullBackOff"
111
        // NoSuitableNodesForHostModelMigration is set when a VMI with host-model CPU mode tries to migrate but no node
112
        // is suitable for migration (since CPU model / required features are not supported)
113
        NoSuitableNodesForHostModelMigration = "NoSuitableNodesForHostModelMigration"
114
        // FailedPodPatchReason is set when a pod patch error occurs during sync
115
        FailedPodPatchReason = "FailedPodPatch"
116
        // MigrationBackoffReason is set when an error has occured while migrating
117
        // and virt-controller is backing off before retrying.
118
        MigrationBackoffReason = "MigrationBackoff"
119
)
120

121
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, kubevirtNamespace and field selector.
122
func NewListWatchFromClient(c cache.Getter, resource string, namespace string, fieldSelector fields.Selector, labelSelector labels.Selector) *cache.ListWatch {
×
123
        listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
×
124
                options.FieldSelector = fieldSelector.String()
×
125
                options.LabelSelector = labelSelector.String()
×
126
                return c.Get().
×
127
                        Namespace(namespace).
×
128
                        Resource(resource).
×
129
                        VersionedParams(&options, metav1.ParameterCodec).
×
130
                        Do(context.Background()).
×
131
                        Get()
×
132
        }
×
133
        watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
×
134
                options.FieldSelector = fieldSelector.String()
×
135
                options.LabelSelector = labelSelector.String()
×
136
                options.Watch = true
×
137
                return c.Get().
×
138
                        Namespace(namespace).
×
139
                        Resource(resource).
×
140
                        VersionedParams(&options, metav1.ParameterCodec).
×
141
                        Watch(context.Background())
×
142
        }
×
143
        return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
×
144
}
145

146
func HandlePanic() {
9✔
147
        if r := recover(); r != nil {
11✔
148
                // Ignoring error - There is nothing to do, if logging fails
2✔
149
                _ = log.Log.Level(log.FATAL).Log("stacktrace", debug.Stack(), "msg", r)
2✔
150
        }
2✔
151
}
152

153
func NewResourceEventHandlerFuncsForWorkqueue(queue workqueue.RateLimitingInterface) cache.ResourceEventHandlerFuncs {
×
154
        return cache.ResourceEventHandlerFuncs{
×
155
                AddFunc: func(obj interface{}) {
×
156
                        key, err := KeyFunc(obj)
×
157
                        if err == nil {
×
158
                                queue.Add(key)
×
159
                        }
×
160
                },
161
                UpdateFunc: func(old interface{}, new interface{}) {
×
162
                        key, err := KeyFunc(new)
×
163
                        if err == nil {
×
164
                                queue.Add(key)
×
165
                        }
×
166
                },
167
                DeleteFunc: func(obj interface{}) {
×
168
                        key, err := KeyFunc(obj)
×
169
                        if err == nil {
×
170
                                queue.Add(key)
×
171
                        }
×
172
                },
173
        }
174
}
175

176
func MigrationKey(migration *v1.VirtualMachineInstanceMigration) string {
53✔
177
        return fmt.Sprintf("%v/%v", migration.ObjectMeta.Namespace, migration.ObjectMeta.Name)
53✔
178
}
53✔
179

180
func VirtualMachineInstanceKey(vmi *v1.VirtualMachineInstance) string {
1,593✔
181
        return fmt.Sprintf("%v/%v", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name)
1,593✔
182
}
1,593✔
183

184
func VirtualMachineKey(vm *v1.VirtualMachine) string {
242✔
185
        return fmt.Sprintf("%v/%v", vm.ObjectMeta.Namespace, vm.ObjectMeta.Name)
242✔
186
}
242✔
187

188
func PodKey(pod *k8sv1.Pod) string {
54✔
189
        return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
54✔
190
}
54✔
191

192
func DataVolumeKey(dataVolume *cdiv1.DataVolume) string {
×
193
        return fmt.Sprintf("%v/%v", dataVolume.Namespace, dataVolume.Name)
×
194
}
×
195

196
func VirtualMachineInstanceKeys(vmis []*v1.VirtualMachineInstance) []string {
8✔
197
        keys := []string{}
8✔
198
        for _, vmi := range vmis {
28✔
199
                keys = append(keys, VirtualMachineInstanceKey(vmi))
20✔
200
        }
20✔
201
        return keys
8✔
202
}
203

204
func VirtualMachineKeys(vms []*v1.VirtualMachine) []string {
3✔
205
        keys := []string{}
3✔
206
        for _, vm := range vms {
21✔
207
                keys = append(keys, VirtualMachineKey(vm))
18✔
208
        }
18✔
209
        return keys
3✔
210
}
211

212
func HasFinalizer(object metav1.Object, finalizer string) bool {
613✔
213
        for _, f := range object.GetFinalizers() {
1,004✔
214
                if f == finalizer {
774✔
215
                        return true
383✔
216
                }
383✔
217
        }
218
        return false
230✔
219
}
220

221
func RemoveFinalizer(object metav1.Object, finalizer string) {
34✔
222
        filtered := []string{}
34✔
223
        for _, f := range object.GetFinalizers() {
66✔
224
                if f != finalizer {
43✔
225
                        filtered = append(filtered, f)
11✔
226
                }
11✔
227
        }
228
        object.SetFinalizers(filtered)
34✔
229
}
230

231
func AddFinalizer(object metav1.Object, finalizer string) {
160✔
232
        if HasFinalizer(object, finalizer) {
202✔
233
                return
42✔
234
        }
42✔
235
        object.SetFinalizers(append(object.GetFinalizers(), finalizer))
118✔
236
}
237

238
func ObservedLatestApiVersionAnnotation(object metav1.Object) bool {
545✔
239
        annotations := object.GetAnnotations()
545✔
240
        if annotations == nil {
545✔
241
                return false
×
242
        }
×
243

244
        version, ok := annotations[v1.ControllerAPILatestVersionObservedAnnotation]
545✔
245
        if !ok || version != v1.ApiLatestVersion {
545✔
246
                return false
×
247
        }
×
248
        return true
545✔
249
}
250

251
func SetLatestApiVersionAnnotation(object metav1.Object) {
932✔
252
        annotations := object.GetAnnotations()
932✔
253
        if annotations == nil {
1,864✔
254
                annotations = make(map[string]string)
932✔
255
        }
932✔
256

257
        annotations[v1.ControllerAPILatestVersionObservedAnnotation] = v1.ApiLatestVersion
932✔
258
        annotations[v1.ControllerAPIStorageVersionObservedAnnotation] = v1.ApiStorageVersion
932✔
259
        object.SetAnnotations(annotations)
932✔
260
}
261

262
func ApplyVolumeRequestOnVMISpec(vmiSpec *v1.VirtualMachineInstanceSpec, request *v1.VirtualMachineVolumeRequest) *v1.VirtualMachineInstanceSpec {
65✔
263
        if request.AddVolumeOptions != nil {
108✔
264
                alreadyAdded := false
43✔
265
                for _, volume := range vmiSpec.Volumes {
150✔
266
                        if volume.Name == request.AddVolumeOptions.Name {
114✔
267
                                alreadyAdded = true
7✔
268
                                break
7✔
269
                        }
270
                }
271

272
                if !alreadyAdded {
79✔
273
                        newVolume := v1.Volume{
36✔
274
                                Name: request.AddVolumeOptions.Name,
36✔
275
                        }
36✔
276

36✔
277
                        if request.AddVolumeOptions.VolumeSource.PersistentVolumeClaim != nil {
53✔
278
                                pvcSource := request.AddVolumeOptions.VolumeSource.PersistentVolumeClaim.DeepCopy()
17✔
279
                                pvcSource.Hotpluggable = true
17✔
280
                                newVolume.VolumeSource.PersistentVolumeClaim = pvcSource
17✔
281
                        } else if request.AddVolumeOptions.VolumeSource.DataVolume != nil {
37✔
282
                                dvSource := request.AddVolumeOptions.VolumeSource.DataVolume.DeepCopy()
1✔
283
                                dvSource.Hotpluggable = true
1✔
284
                                newVolume.VolumeSource.DataVolume = dvSource
1✔
285
                        }
1✔
286

287
                        vmiSpec.Volumes = append(vmiSpec.Volumes, newVolume)
36✔
288

36✔
289
                        if request.AddVolumeOptions.Disk != nil {
72✔
290
                                newDisk := request.AddVolumeOptions.Disk.DeepCopy()
36✔
291
                                newDisk.Name = request.AddVolumeOptions.Name
36✔
292

36✔
293
                                vmiSpec.Domain.Devices.Disks = append(vmiSpec.Domain.Devices.Disks, *newDisk)
36✔
294
                        }
36✔
295
                }
296

297
        } else if request.RemoveVolumeOptions != nil {
44✔
298

22✔
299
                newVolumesList := []v1.Volume{}
22✔
300
                newDisksList := []v1.Disk{}
22✔
301

22✔
302
                for _, volume := range vmiSpec.Volumes {
52✔
303
                        if volume.Name != request.RemoveVolumeOptions.Name {
40✔
304
                                newVolumesList = append(newVolumesList, volume)
10✔
305
                        }
10✔
306
                }
307

308
                for _, disk := range vmiSpec.Domain.Devices.Disks {
52✔
309
                        if disk.Name != request.RemoveVolumeOptions.Name {
40✔
310
                                newDisksList = append(newDisksList, disk)
10✔
311
                        }
10✔
312
                }
313

314
                vmiSpec.Volumes = newVolumesList
22✔
315
                vmiSpec.Domain.Devices.Disks = newDisksList
22✔
316
        }
317

318
        return vmiSpec
65✔
319
}
320

321
func CurrentVMIPod(vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) (*k8sv1.Pod, error) {
285✔
322

285✔
323
        // current pod is the most recent pod created on the current VMI node
285✔
324
        // OR the most recent pod created if no VMI node is set.
285✔
325

285✔
326
        // Get all pods from the namespace
285✔
327
        objs, err := podIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
285✔
328
        if err != nil {
285✔
329
                return nil, err
×
330
        }
×
331
        pods := []*k8sv1.Pod{}
285✔
332
        for _, obj := range objs {
5,678✔
333
                pod := obj.(*k8sv1.Pod)
5,393✔
334
                pods = append(pods, pod)
5,393✔
335
        }
5,393✔
336

337
        var curPod *k8sv1.Pod = nil
285✔
338
        for _, pod := range pods {
5,678✔
339
                if !metav1.IsControlledBy(pod, vmi) {
5,413✔
340
                        continue
20✔
341
                }
342

343
                if vmi.Status.NodeName != "" &&
5,373✔
344
                        vmi.Status.NodeName != pod.Spec.NodeName {
5,383✔
345
                        // This pod isn't scheduled to the current node.
10✔
346
                        // This can occur during the initial migration phases when
10✔
347
                        // a new target node is being prepared for the VMI.
10✔
348
                        continue
10✔
349
                }
350

351
                if curPod == nil || curPod.CreationTimestamp.Before(&pod.CreationTimestamp) {
5,588✔
352
                        curPod = pod
225✔
353
                }
225✔
354
        }
355

356
        return curPod, nil
285✔
357
}
358

359
func VMIActivePodsCount(vmi *v1.VirtualMachineInstance, vmiPodIndexer cache.Indexer) int {
36✔
360

36✔
361
        objs, err := vmiPodIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
36✔
362
        if err != nil {
36✔
363
                return 0
×
364
        }
×
365

366
        running := 0
36✔
367
        for _, obj := range objs {
72✔
368
                pod := obj.(*k8sv1.Pod)
36✔
369

36✔
370
                if pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed {
38✔
371
                        // not interested in terminated pods
2✔
372
                        continue
2✔
373
                } else if !metav1.IsControlledBy(pod, vmi) {
42✔
374
                        // not interested pods not associated with the vmi
8✔
375
                        continue
8✔
376
                }
377
                running++
26✔
378
        }
379

380
        return running
36✔
381
}
382

383
func GeneratePatchBytes(ops []string) []byte {
×
384
        return []byte(fmt.Sprintf("[%s]", strings.Join(ops, ", ")))
×
385
}
×
386

387
func SetVMIPhaseTransitionTimestamp(oldStatus *v1.VirtualMachineInstanceStatus, newStatus *v1.VirtualMachineInstanceStatus) {
138✔
388
        if oldStatus.Phase != newStatus.Phase {
196✔
389
                for _, transitionTimeStamp := range newStatus.PhaseTransitionTimestamps {
58✔
390
                        if transitionTimeStamp.Phase == newStatus.Phase {
×
391
                                // already exists.
×
392
                                return
×
393
                        }
×
394
                }
395

396
                now := metav1.NewTime(time.Now())
58✔
397
                newStatus.PhaseTransitionTimestamps = append(newStatus.PhaseTransitionTimestamps, v1.VirtualMachineInstancePhaseTransitionTimestamp{
58✔
398
                        Phase:                    newStatus.Phase,
58✔
399
                        PhaseTransitionTimestamp: now,
58✔
400
                })
58✔
401
        }
402
}
403

404
func SetVMIMigrationPhaseTransitionTimestamp(oldVMIMigration *v1.VirtualMachineInstanceMigration, newVMIMigration *v1.VirtualMachineInstanceMigration) {
107✔
405
        if oldVMIMigration.Status.Phase != newVMIMigration.Status.Phase {
150✔
406
                for _, transitionTimeStamp := range newVMIMigration.Status.PhaseTransitionTimestamps {
43✔
407
                        if transitionTimeStamp.Phase == newVMIMigration.Status.Phase {
×
408
                                // already exists.
×
409
                                return
×
410
                        }
×
411
                }
412

413
                now := metav1.NewTime(time.Now())
43✔
414
                newVMIMigration.Status.PhaseTransitionTimestamps = append(newVMIMigration.Status.PhaseTransitionTimestamps, v1.VirtualMachineInstanceMigrationPhaseTransitionTimestamp{
43✔
415
                        Phase:                    newVMIMigration.Status.Phase,
43✔
416
                        PhaseTransitionTimestamp: now,
43✔
417
                })
43✔
418
        }
419
}
420

421
func SetSourcePod(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) {
107✔
422
        if migration.Status.Phase != v1.MigrationPending {
189✔
423
                return
82✔
424
        }
82✔
425
        sourcePod, err := CurrentVMIPod(vmi, podIndexer)
25✔
426
        if err != nil {
25✔
427
                log.Log.Object(vmi).Reason(err).Warning("migration source pod not found")
×
428
        }
×
429
        if sourcePod != nil {
50✔
430
                if migration.Status.MigrationState == nil {
50✔
431
                        migration.Status.MigrationState = &v1.VirtualMachineInstanceMigrationState{}
25✔
432
                }
25✔
433
                migration.Status.MigrationState.SourcePod = sourcePod.Name
25✔
434
        }
435

436
}
437

438
func VMIHasHotplugVolumes(vmi *v1.VirtualMachineInstance) bool {
33✔
439
        for _, volumeStatus := range vmi.Status.VolumeStatus {
39✔
440
                if volumeStatus.HotplugVolume != nil {
12✔
441
                        return true
6✔
442
                }
6✔
443
        }
444
        for _, volume := range vmi.Spec.Volumes {
27✔
445
                if volume.DataVolume != nil && volume.DataVolume.Hotpluggable {
×
446
                        return true
×
447
                }
×
448
                if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.Hotpluggable {
×
449
                        return true
×
450
                }
×
451
        }
452
        return false
27✔
453
}
454

455
func vmiHasCondition(vmi *v1.VirtualMachineInstance, conditionType v1.VirtualMachineInstanceConditionType) bool {
38✔
456
        vmiConditionManager := NewVirtualMachineInstanceConditionManager()
38✔
457
        return vmiConditionManager.HasCondition(vmi, conditionType)
38✔
458
}
38✔
459

460
func VMIHasHotplugCPU(vmi *v1.VirtualMachineInstance) bool {
19✔
461
        return vmiHasCondition(vmi, v1.VirtualMachineInstanceVCPUChange)
19✔
462
}
19✔
463

464
func VMIHasHotplugMemory(vmi *v1.VirtualMachineInstance) bool {
19✔
465
        return vmiHasCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
19✔
466
}
19✔
467

468
func AttachmentPods(ownerPod *k8sv1.Pod, podIndexer cache.Indexer) ([]*k8sv1.Pod, error) {
215✔
469
        objs, err := podIndexer.ByIndex(cache.NamespaceIndex, ownerPod.Namespace)
215✔
470
        if err != nil {
215✔
471
                return nil, err
×
472
        }
×
473
        attachmentPods := []*k8sv1.Pod{}
215✔
474
        for _, obj := range objs {
543✔
475
                pod := obj.(*k8sv1.Pod)
328✔
476
                if !metav1.IsControlledBy(pod, ownerPod) {
633✔
477
                        continue
305✔
478
                }
479
                attachmentPods = append(attachmentPods, pod)
23✔
480
        }
481
        return attachmentPods, nil
215✔
482
}
483

484
// IsPodReady treats the pod as ready to be handed over to virt-handler, as soon as all pods except
485
// the compute pod are ready.
486
func IsPodReady(pod *k8sv1.Pod) bool {
165✔
487
        if IsPodDownOrGoingDown(pod) {
185✔
488
                return false
20✔
489
        }
20✔
490

491
        for _, containerStatus := range pod.Status.ContainerStatuses {
294✔
492
                // The compute container potentially holds a readiness probe for the VMI. Therefore
149✔
493
                // don't wait for the compute container to become ready (the VMI later on will trigger the change to ready)
149✔
494
                // and only check that the container started
149✔
495
                if containerStatus.Name == "compute" {
268✔
496
                        if containerStatus.State.Running == nil {
126✔
497
                                return false
7✔
498
                        }
7✔
499
                } else if containerStatus.Name == "istio-proxy" {
34✔
500
                        // When using istio the istio-proxy container will not be ready
4✔
501
                        // until there is a service pointing to this pod.
4✔
502
                        // We need to start the VM anyway
4✔
503
                        if containerStatus.State.Running == nil {
5✔
504
                                return false
1✔
505
                        }
1✔
506

507
                } else if containerStatus.Ready == false {
27✔
508
                        return false
1✔
509
                }
1✔
510
        }
511

512
        return pod.Status.Phase == k8sv1.PodRunning
136✔
513
}
514

515
func IsPodDownOrGoingDown(pod *k8sv1.Pod) bool {
294✔
516
        return PodIsDown(pod) || isComputeContainerDown(pod) || pod.DeletionTimestamp != nil
294✔
517
}
294✔
518

519
func IsPodFailedOrGoingDown(pod *k8sv1.Pod) bool {
12✔
520
        return isPodFailed(pod) || isComputeContainerFailed(pod) || pod.DeletionTimestamp != nil
12✔
521
}
12✔
522

523
func isComputeContainerDown(pod *k8sv1.Pod) bool {
269✔
524
        for _, containerStatus := range pod.Status.ContainerStatuses {
520✔
525
                if containerStatus.Name == "compute" {
477✔
526
                        return containerStatus.State.Terminated != nil
226✔
527
                }
226✔
528
        }
529
        return false
43✔
530
}
531

532
func isComputeContainerFailed(pod *k8sv1.Pod) bool {
10✔
533
        for _, containerStatus := range pod.Status.ContainerStatuses {
15✔
534
                if containerStatus.Name == "compute" {
10✔
535
                        return containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode != 0
5✔
536
                }
5✔
537
        }
538
        return false
5✔
539
}
540

541
func PodIsDown(pod *k8sv1.Pod) bool {
458✔
542
        return pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed
458✔
543
}
458✔
544

545
func isPodFailed(pod *k8sv1.Pod) bool {
12✔
546
        return pod.Status.Phase == k8sv1.PodFailed
12✔
547
}
12✔
548

549
func PodExists(pod *k8sv1.Pod) bool {
456✔
550
        return pod != nil
456✔
551
}
456✔
552

553
func GetHotplugVolumes(vmi *v1.VirtualMachineInstance, virtlauncherPod *k8sv1.Pod) []*v1.Volume {
143✔
554
        hotplugVolumes := make([]*v1.Volume, 0)
143✔
555
        podVolumes := virtlauncherPod.Spec.Volumes
143✔
556
        vmiVolumes := vmi.Spec.Volumes
143✔
557

143✔
558
        podVolumeMap := make(map[string]k8sv1.Volume)
143✔
559
        for _, podVolume := range podVolumes {
169✔
560
                podVolumeMap[podVolume.Name] = podVolume
26✔
561
        }
26✔
562
        for _, vmiVolume := range vmiVolumes {
185✔
563
                if _, ok := podVolumeMap[vmiVolume.Name]; !ok && (vmiVolume.DataVolume != nil || vmiVolume.PersistentVolumeClaim != nil || vmiVolume.MemoryDump != nil) {
61✔
564
                        hotplugVolumes = append(hotplugVolumes, vmiVolume.DeepCopy())
19✔
565
                }
19✔
566
        }
567
        return hotplugVolumes
143✔
568
}
569

570
func SyncPodAnnotations(clientset kubecli.KubevirtClient, pod *k8sv1.Pod, newAnnotations map[string]string) (*k8sv1.Pod, error) {
74✔
571
        patchSet := patch.New()
74✔
572
        for key, newValue := range newAnnotations {
132✔
573
                if podAnnotationValue, keyExist := pod.Annotations[key]; !keyExist || podAnnotationValue != newValue {
63✔
574
                        patchSet.AddOption(
5✔
575
                                patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(key)), newValue),
5✔
576
                        )
5✔
577
                }
5✔
578
        }
579
        if patchSet.IsEmpty() {
145✔
580
                return pod, nil
71✔
581
        }
71✔
582
        patchBytes, err := patchSet.GeneratePayload()
3✔
583
        if err != nil {
3✔
NEW
584
                return pod, fmt.Errorf("failed to generate patch payload: %w", err)
×
NEW
585
        }
×
586
        patchedPod, err := clientset.CoreV1().Pods(pod.Namespace).Patch(context.Background(), pod.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
3✔
587
        if err != nil {
3✔
NEW
588
                log.Log.Object(pod).Errorf("failed to sync pod annotations: %v", err)
×
NEW
589
                return nil, err
×
NEW
590
        }
×
591
        return patchedPod, nil
3✔
592
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc