• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / 55c8662e-3cac-41c2-9b0b-b09f98b851a1

09 Dec 2025 08:21AM UTC coverage: 70.666% (-0.01%) from 70.68%
55c8662e-3cac-41c2-9b0b-b09f98b851a1

push

prow

web-flow
Merge pull request #16081 from ShellyKa13/vmbackup

VMBackup: introduce new VM backup API

1189 of 1731 new or added lines in 35 files covered. (68.69%)

12 existing lines in 4 files now uncovered.

71582 of 101296 relevant lines covered (70.67%)

416.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.79
/pkg/virt-handler/vm.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright The KubeVirt Authors.
17
 *
18
 */
19

20
package virthandler
21

22
import (
23
        "bytes"
24
        "context"
25
        goerror "errors"
26
        "fmt"
27
        "os"
28
        "path/filepath"
29
        "regexp"
30
        "sort"
31
        "strconv"
32
        "strings"
33
        "time"
34

35
        "github.com/mitchellh/go-ps"
36
        "github.com/opencontainers/runc/libcontainer/cgroups"
37
        "golang.org/x/sys/unix"
38
        "libvirt.org/go/libvirtxml"
39

40
        k8sv1 "k8s.io/api/core/v1"
41
        "k8s.io/apimachinery/pkg/api/equality"
42
        "k8s.io/apimachinery/pkg/api/resource"
43
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
44
        "k8s.io/apimachinery/pkg/util/errors"
45
        "k8s.io/apimachinery/pkg/util/wait"
46
        "k8s.io/client-go/tools/cache"
47
        "k8s.io/client-go/tools/record"
48
        "k8s.io/client-go/util/workqueue"
49

50
        v1 "kubevirt.io/api/core/v1"
51
        "kubevirt.io/client-go/kubecli"
52
        "kubevirt.io/client-go/log"
53

54
        "kubevirt.io/kubevirt/pkg/config"
55
        "kubevirt.io/kubevirt/pkg/controller"
56
        drautil "kubevirt.io/kubevirt/pkg/dra"
57
        "kubevirt.io/kubevirt/pkg/executor"
58
        hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
59
        hotplugdisk "kubevirt.io/kubevirt/pkg/hotplug-disk"
60
        "kubevirt.io/kubevirt/pkg/network/domainspec"
61
        neterrors "kubevirt.io/kubevirt/pkg/network/errors"
62
        netsetup "kubevirt.io/kubevirt/pkg/network/setup"
63
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
64
        "kubevirt.io/kubevirt/pkg/safepath"
65
        "kubevirt.io/kubevirt/pkg/storage/cbt"
66
        "kubevirt.io/kubevirt/pkg/storage/reservation"
67
        storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
68
        "kubevirt.io/kubevirt/pkg/util"
69
        "kubevirt.io/kubevirt/pkg/util/hardware"
70
        "kubevirt.io/kubevirt/pkg/util/migrations"
71
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
72
        "kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
73
        virtcache "kubevirt.io/kubevirt/pkg/virt-handler/cache"
74
        "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
75
        cmdclient "kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
76
        containerdisk "kubevirt.io/kubevirt/pkg/virt-handler/container-disk"
77
        deviceManager "kubevirt.io/kubevirt/pkg/virt-handler/device-manager"
78
        "kubevirt.io/kubevirt/pkg/virt-handler/heartbeat"
79
        hotplugvolume "kubevirt.io/kubevirt/pkg/virt-handler/hotplug-disk"
80
        "kubevirt.io/kubevirt/pkg/virt-handler/isolation"
81
        launcherclients "kubevirt.io/kubevirt/pkg/virt-handler/launcher-clients"
82
        migrationproxy "kubevirt.io/kubevirt/pkg/virt-handler/migration-proxy"
83
        multipathmonitor "kubevirt.io/kubevirt/pkg/virt-handler/multipath-monitor"
84
        "kubevirt.io/kubevirt/pkg/virt-handler/selinux"
85
        "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
86
)
87

88
type netstat interface {
89
        UpdateStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error
90
        Teardown(vmi *v1.VirtualMachineInstance)
91
}
92

93
type downwardMetricsManager interface {
94
        Run(stopCh chan struct{})
95
        StartServer(vmi *v1.VirtualMachineInstance, pid int) error
96
        StopServer(vmi *v1.VirtualMachineInstance)
97
}
98

99
type VirtualMachineController struct {
100
        *BaseController
101
        capabilities             *libvirtxml.Caps
102
        clientset                kubecli.KubevirtClient
103
        containerDiskMounter     containerdisk.Mounter
104
        downwardMetricsManager   downwardMetricsManager
105
        hotplugVolumeMounter     hotplugvolume.VolumeMounter
106
        hostCpuModel             string
107
        ioErrorRetryManager      *FailRetryManager
108
        deviceManagerController  *deviceManager.DeviceController
109
        heartBeat                *heartbeat.HeartBeat
110
        heartBeatInterval        time.Duration
111
        netConf                  netconf
112
        sriovHotplugExecutorPool *executor.RateLimitedExecutorPool
113
        vmiExpectations          *controller.UIDTrackingControllerExpectations
114
        vmiGlobalStore           cache.Store
115
        multipathSocketMonitor   *multipathmonitor.MultipathSocketMonitor
116
}
117

118
var getCgroupManager = func(vmi *v1.VirtualMachineInstance, host string) (cgroup.Manager, error) {
×
119
        return cgroup.NewManagerFromVM(vmi, host)
×
120
}
×
121

122
func NewVirtualMachineController(
123
        recorder record.EventRecorder,
124
        clientset kubecli.KubevirtClient,
125
        nodeStore cache.Store,
126
        host string,
127
        virtPrivateDir string,
128
        kubeletPodsDir string,
129
        launcherClients launcherclients.LauncherClientsManager,
130
        vmiInformer cache.SharedIndexInformer,
131
        vmiGlobalStore cache.Store,
132
        domainInformer cache.SharedInformer,
133
        maxDevices int,
134
        clusterConfig *virtconfig.ClusterConfig,
135
        podIsolationDetector isolation.PodIsolationDetector,
136
        migrationProxy migrationproxy.ProxyManager,
137
        downwardMetricsManager downwardMetricsManager,
138
        capabilities *libvirtxml.Caps,
139
        hostCpuModel string,
140
        netConf netconf,
141
        netStat netstat,
142
) (*VirtualMachineController, error) {
114✔
143

114✔
144
        queue := workqueue.NewTypedRateLimitingQueueWithConfig[string](
114✔
145
                workqueue.DefaultTypedControllerRateLimiter[string](),
114✔
146
                workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-vm"},
114✔
147
        )
114✔
148
        logger := log.Log.With("controller", "vm")
114✔
149

114✔
150
        baseCtrl, err := NewBaseController(
114✔
151
                logger,
114✔
152
                host,
114✔
153
                recorder,
114✔
154
                clientset,
114✔
155
                queue,
114✔
156
                vmiInformer,
114✔
157
                domainInformer,
114✔
158
                clusterConfig,
114✔
159
                podIsolationDetector,
114✔
160
                launcherClients,
114✔
161
                migrationProxy,
114✔
162
                "/proc/%d/root/var/run",
114✔
163
                netStat,
114✔
164
        )
114✔
165
        if err != nil {
114✔
166
                return nil, err
×
167
        }
×
168

169
        containerDiskState := filepath.Join(virtPrivateDir, "container-disk-mount-state")
114✔
170
        if err := os.MkdirAll(containerDiskState, 0700); err != nil {
114✔
171
                return nil, err
×
172
        }
×
173

174
        hotplugState := filepath.Join(virtPrivateDir, "hotplug-volume-mount-state")
114✔
175
        if err := os.MkdirAll(hotplugState, 0700); err != nil {
114✔
176
                return nil, err
×
177
        }
×
178

179
        c := &VirtualMachineController{
114✔
180
                BaseController:           baseCtrl,
114✔
181
                capabilities:             capabilities,
114✔
182
                clientset:                clientset,
114✔
183
                containerDiskMounter:     containerdisk.NewMounter(podIsolationDetector, containerDiskState, clusterConfig),
114✔
184
                downwardMetricsManager:   downwardMetricsManager,
114✔
185
                hotplugVolumeMounter:     hotplugvolume.NewVolumeMounter(hotplugState, kubeletPodsDir, host),
114✔
186
                hostCpuModel:             hostCpuModel,
114✔
187
                ioErrorRetryManager:      NewFailRetryManager("io-error-retry", 10*time.Second, 3*time.Minute, 30*time.Second),
114✔
188
                heartBeatInterval:        1 * time.Minute,
114✔
189
                netConf:                  netConf,
114✔
190
                sriovHotplugExecutorPool: executor.NewRateLimitedExecutorPool(executor.NewExponentialLimitedBackoffCreator()),
114✔
191
                vmiExpectations:          controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
114✔
192
                vmiGlobalStore:           vmiGlobalStore,
114✔
193
                multipathSocketMonitor:   multipathmonitor.NewMultipathSocketMonitor(),
114✔
194
        }
114✔
195

114✔
196
        _, err = vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
114✔
197
                AddFunc:    c.addDeleteFunc,
114✔
198
                DeleteFunc: c.addDeleteFunc,
114✔
199
                UpdateFunc: c.updateFunc,
114✔
200
        })
114✔
201
        if err != nil {
114✔
202
                return nil, err
×
203
        }
×
204

205
        _, err = domainInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
114✔
206
                AddFunc:    c.addDomainFunc,
114✔
207
                DeleteFunc: c.deleteDomainFunc,
114✔
208
                UpdateFunc: c.updateDomainFunc,
114✔
209
        })
114✔
210
        if err != nil {
114✔
211
                return nil, err
×
212
        }
×
213

214
        permissions := "rw"
114✔
215
        if cgroups.IsCgroup2UnifiedMode() {
114✔
216
                // Need 'rwm' permissions otherwise ebpf filtering program attached by runc
×
217
                // will deny probing the device file with 'access' syscall. That in turn
×
218
                // will lead to virtqemud failure on VM startup.
×
219
                // This has been fixed upstream:
×
220
                //   https://github.com/opencontainers/runc/pull/2796
×
221
                // but the workaround is still needed to support previous versions without
×
222
                // the patch.
×
223
                permissions = "rwm"
×
224
        }
×
225

226
        c.deviceManagerController = deviceManager.NewDeviceController(
114✔
227
                c.host,
114✔
228
                maxDevices,
114✔
229
                permissions,
114✔
230
                deviceManager.PermanentHostDevicePlugins(maxDevices, permissions),
114✔
231
                clusterConfig,
114✔
232
                nodeStore)
114✔
233
        c.heartBeat = heartbeat.NewHeartBeat(clientset.CoreV1(), c.deviceManagerController, clusterConfig, host)
114✔
234

114✔
235
        return c, nil
114✔
236
}
237

238
func (c *VirtualMachineController) Run(threadiness int, stopCh chan struct{}) {
×
239
        defer c.queue.ShutDown()
×
240
        c.logger.Info("Starting virt-handler vms controller.")
×
241

×
242
        go c.deviceManagerController.Run(stopCh)
×
243

×
244
        go c.downwardMetricsManager.Run(stopCh)
×
245

×
246
        cache.WaitForCacheSync(stopCh, c.hasSynced)
×
247

×
248
        // queue keys for previous Domains on the host that no longer exist
×
249
        // in the cache. This ensures we perform local cleanup of deleted VMs.
×
250
        for _, domain := range c.domainStore.List() {
×
251
                d := domain.(*api.Domain)
×
252
                vmiRef := v1.NewVMIReferenceWithUUID(
×
253
                        d.ObjectMeta.Namespace,
×
254
                        d.ObjectMeta.Name,
×
255
                        d.Spec.Metadata.KubeVirt.UID)
×
256

×
257
                key := controller.VirtualMachineInstanceKey(vmiRef)
×
258

×
259
                _, exists, _ := c.vmiStore.GetByKey(key)
×
260
                if !exists {
×
261
                        c.queue.Add(key)
×
262
                }
×
263
        }
264
        c.multipathSocketMonitor.Run()
×
265

×
266
        heartBeatDone := c.heartBeat.Run(c.heartBeatInterval, stopCh)
×
267

×
268
        go c.ioErrorRetryManager.Run(stopCh)
×
269

×
270
        // Start the actual work
×
271
        for i := 0; i < threadiness; i++ {
×
272
                go wait.Until(c.runWorker, time.Second, stopCh)
×
273
        }
×
274

275
        <-heartBeatDone
×
276
        <-stopCh
×
277
        c.multipathSocketMonitor.Close()
×
278
        c.logger.Info("Stopping virt-handler vms controller.")
×
279
}
280

281
func (c *VirtualMachineController) runWorker() {
×
282
        for c.Execute() {
×
283
        }
×
284
}
285

286
func (c *VirtualMachineController) Execute() bool {
49✔
287
        key, quit := c.queue.Get()
49✔
288
        if quit {
49✔
289
                return false
×
290
        }
×
291
        defer c.queue.Done(key)
49✔
292
        if err := c.execute(key); err != nil {
55✔
293
                c.logger.Reason(err).Infof("re-enqueuing VirtualMachineInstance %v", key)
6✔
294
                c.queue.AddRateLimited(key)
6✔
295
        } else {
49✔
296
                c.logger.V(4).Infof("processed VirtualMachineInstance %v", key)
43✔
297
                c.queue.Forget(key)
43✔
298
        }
43✔
299
        return true
49✔
300
}
301

302
func (c *VirtualMachineController) execute(key string) error {
49✔
303
        vmi, vmiExists, err := c.getVMIFromCache(key)
49✔
304
        if err != nil {
50✔
305
                return err
1✔
306
        }
1✔
307

308
        if !vmiExists {
56✔
309
                // the vmiInformer probably has to catch up to the domainInformer
8✔
310
                // which already sees the vmi, so let's fetch it from the global
8✔
311
                // vmi informer to make sure the vmi has actually been deleted
8✔
312
                c.logger.V(4).Infof("fetching vmi for key %v from the global informer", key)
8✔
313
                obj, exists, err := c.vmiGlobalStore.GetByKey(key)
8✔
314
                if err != nil {
8✔
315
                        return err
×
316
                }
×
317
                if exists {
8✔
318
                        vmi = obj.(*v1.VirtualMachineInstance)
×
319
                }
×
320
                vmiExists = exists
8✔
321
        }
322

323
        if !vmiExists {
56✔
324
                c.vmiExpectations.DeleteExpectations(key)
8✔
325
        } else if !c.vmiExpectations.SatisfiedExpectations(key) {
48✔
326
                return nil
×
327
        }
×
328

329
        domain, domainExists, domainCachedUID, err := c.getDomainFromCache(key)
48✔
330
        if err != nil {
48✔
331
                return err
×
332
        }
×
333
        c.logger.Object(vmi).V(4).Infof("domain exists %v", domainExists)
48✔
334

48✔
335
        if !vmiExists && string(domainCachedUID) != "" {
56✔
336
                // it's possible to discover the UID from cache even if the domain
8✔
337
                // doesn't technically exist anymore
8✔
338
                vmi.UID = domainCachedUID
8✔
339
                c.logger.Object(vmi).Infof("Using cached UID for vmi found in domain cache")
8✔
340
        }
8✔
341

342
        // As a last effort, if the UID still can't be determined attempt
343
        // to retrieve it from the ghost record
344
        if string(vmi.UID) == "" {
49✔
345
                uid := virtcache.GhostRecordGlobalStore.LastKnownUID(key)
1✔
346
                if uid != "" {
1✔
347
                        c.logger.Object(vmi).V(3).Infof("ghost record cache provided %s as UID", uid)
×
348
                        vmi.UID = uid
×
349
                }
×
350
        }
351

352
        if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
49✔
353
                oldVMI := v1.NewVMIReferenceFromNameWithNS(vmi.Namespace, vmi.Name)
1✔
354
                oldVMI.UID = domain.Spec.Metadata.KubeVirt.UID
1✔
355
                expired, initialized, err := c.launcherClients.IsLauncherClientUnresponsive(oldVMI)
1✔
356
                if err != nil {
1✔
357
                        return err
×
358
                }
×
359
                // If we found an outdated domain which is also not alive anymore, clean up
360
                if !initialized {
1✔
361
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
362
                        return nil
×
363
                } else if expired {
1✔
364
                        c.logger.Object(oldVMI).Infof("Detected stale vmi %s that still needs cleanup before new vmi %s with identical name/namespace can be processed", oldVMI.UID, vmi.UID)
×
365
                        err = c.processVmCleanup(oldVMI)
×
366
                        if err != nil {
×
367
                                return err
×
368
                        }
×
369
                        // Make sure we re-enqueue the key to ensure this new VMI is processed
370
                        // after the stale domain is removed
371
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*5)
×
372
                }
373

374
                return nil
1✔
375
        }
376

377
        if domainExists &&
47✔
378
                (domainMigrated(domain) || domain.DeletionTimestamp != nil) {
47✔
379
                c.logger.Object(vmi).V(4).Info("detected orphan vmi")
×
380
                return c.deleteVM(vmi)
×
381
        }
×
382

383
        if migrations.IsMigrating(vmi) && (vmi.Status.Phase == v1.Failed) {
47✔
384
                c.logger.V(1).Infof("cleaning up VMI key %v as migration is in progress and the vmi is failed", key)
×
385
                err = c.processVmCleanup(vmi)
×
386
                if err != nil {
×
387
                        return err
×
388
                }
×
389
        }
390

391
        if vmi.DeletionTimestamp == nil && isMigrationInProgress(vmi, domain) {
49✔
392
                c.logger.V(4).Infof("ignoring key %v as migration is in progress", key)
2✔
393
                return nil
2✔
394
        }
2✔
395

396
        if vmiExists && !c.isVMIOwnedByNode(vmi) {
46✔
397
                c.logger.Object(vmi).V(4).Info("ignoring vmi as it is not owned by this node")
1✔
398
                return nil
1✔
399
        }
1✔
400

401
        if vmiExists && vmi.IsMigrationSource() {
44✔
402
                c.logger.Object(vmi).V(4).Info("ignoring vmi as it is a migration source")
×
403
                return nil
×
404
        }
×
405

406
        return c.sync(key,
44✔
407
                vmi.DeepCopy(),
44✔
408
                vmiExists,
44✔
409
                domain,
44✔
410
                domainExists)
44✔
411

412
}
413

414
type vmiIrrecoverableError struct {
415
        msg string
416
}
417

418
func (e *vmiIrrecoverableError) Error() string { return e.msg }
3✔
419

420
func formatIrrecoverableErrorMessage(domain *api.Domain) string {
1✔
421
        msg := "unknown reason"
1✔
422
        if domainPausedFailedPostCopy(domain) {
2✔
423
                msg = "VMI is irrecoverable due to failed post-copy migration"
1✔
424
        }
1✔
425
        return msg
1✔
426
}
427

428
func domainPausedFailedPostCopy(domain *api.Domain) bool {
32✔
429
        return domain != nil && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedPostcopyFailed
32✔
430
}
32✔
431

432
// teardownNetwork performs network cache cleanup for a specific VMI.
433
func (c *VirtualMachineController) teardownNetwork(vmi *v1.VirtualMachineInstance) {
5✔
434
        if string(vmi.UID) == "" {
5✔
435
                return
×
436
        }
×
437
        if err := c.netConf.Teardown(vmi); err != nil {
5✔
438
                c.logger.Reason(err).Errorf("failed to delete VMI Network cache files: %s", err.Error())
×
439
        }
×
440
        c.netStat.Teardown(vmi)
5✔
441
}
442

443
func canUpdateToMounted(currentPhase v1.VolumePhase) bool {
12✔
444
        return currentPhase == v1.VolumeBound || currentPhase == v1.VolumePending || currentPhase == v1.HotplugVolumeAttachedToNode
12✔
445
}
12✔
446

447
func canUpdateToUnmounted(currentPhase v1.VolumePhase) bool {
8✔
448
        return currentPhase == v1.VolumeReady || currentPhase == v1.HotplugVolumeMounted || currentPhase == v1.HotplugVolumeAttachedToNode
8✔
449
}
8✔
450

451
func (c *VirtualMachineController) generateEventsForVolumeStatusChange(vmi *v1.VirtualMachineInstance, newStatusMap map[string]v1.VolumeStatus) {
29✔
452
        newStatusMapCopy := make(map[string]v1.VolumeStatus)
29✔
453
        for k, v := range newStatusMap {
61✔
454
                newStatusMapCopy[k] = v
32✔
455
        }
32✔
456
        for _, oldStatus := range vmi.Status.VolumeStatus {
59✔
457
                newStatus, ok := newStatusMap[oldStatus.Name]
30✔
458
                if !ok {
30✔
459
                        // status got removed
×
460
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, VolumeUnplugged, fmt.Sprintf("Volume %s has been unplugged", oldStatus.Name))
×
461
                        continue
×
462
                }
463
                if newStatus.Phase != oldStatus.Phase {
42✔
464
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, newStatus.Reason, newStatus.Message)
12✔
465
                }
12✔
466
                delete(newStatusMapCopy, newStatus.Name)
30✔
467
        }
468
        // Send events for any new statuses.
469
        for _, v := range newStatusMapCopy {
31✔
470
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v.Reason, v.Message)
2✔
471
        }
2✔
472
}
473

474
func (c *VirtualMachineController) updateHotplugVolumeStatus(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, specVolumeMap map[string]struct{}) (v1.VolumeStatus, bool) {
26✔
475
        needsRefresh := false
26✔
476
        if volumeStatus.Target == "" {
47✔
477
                needsRefresh = true
21✔
478
                mounted, err := c.hotplugVolumeMounter.IsMounted(vmi, volumeStatus.Name, volumeStatus.HotplugVolume.AttachPodUID)
21✔
479
                if err != nil {
21✔
480
                        c.logger.Object(vmi).Errorf("error occurred while checking if volume is mounted: %v", err)
×
481
                }
×
482
                if mounted {
33✔
483
                        if _, ok := specVolumeMap[volumeStatus.Name]; ok && canUpdateToMounted(volumeStatus.Phase) {
15✔
484
                                log.DefaultLogger().Infof("Marking volume %s as mounted in pod, it can now be attached", volumeStatus.Name)
3✔
485
                                // mounted, and still in spec, and in phase we can change, update status to mounted.
3✔
486
                                volumeStatus.Phase = v1.HotplugVolumeMounted
3✔
487
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been mounted in virt-launcher pod", volumeStatus.Name)
3✔
488
                                volumeStatus.Reason = VolumeMountedToPodReason
3✔
489
                        }
3✔
490
                } else {
9✔
491
                        // Not mounted, check if the volume is in the spec, if not update status
9✔
492
                        if _, ok := specVolumeMap[volumeStatus.Name]; !ok && canUpdateToUnmounted(volumeStatus.Phase) {
13✔
493
                                log.DefaultLogger().Infof("Marking volume %s as unmounted from pod, it can now be detached", volumeStatus.Name)
4✔
494
                                // Not mounted.
4✔
495
                                volumeStatus.Phase = v1.HotplugVolumeUnMounted
4✔
496
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been unmounted from virt-launcher pod", volumeStatus.Name)
4✔
497
                                volumeStatus.Reason = VolumeUnMountedFromPodReason
4✔
498
                        }
4✔
499
                }
500
        } else {
5✔
501
                // Successfully attached to VM.
5✔
502
                volumeStatus.Phase = v1.VolumeReady
5✔
503
                volumeStatus.Message = fmt.Sprintf("Successfully attach hotplugged volume %s to VM", volumeStatus.Name)
5✔
504
                volumeStatus.Reason = VolumeReadyReason
5✔
505
        }
5✔
506
        return volumeStatus, needsRefresh
26✔
507
}
508

509
func needToComputeChecksums(vmi *v1.VirtualMachineInstance) bool {
28✔
510
        containerDisks := map[string]*v1.Volume{}
28✔
511
        for _, volume := range vmi.Spec.Volumes {
31✔
512
                if volume.VolumeSource.ContainerDisk != nil {
5✔
513
                        containerDisks[volume.Name] = &volume
2✔
514
                }
2✔
515
        }
516

517
        for i := range vmi.Status.VolumeStatus {
32✔
518
                _, isContainerDisk := containerDisks[vmi.Status.VolumeStatus[i].Name]
4✔
519
                if !isContainerDisk {
7✔
520
                        continue
3✔
521
                }
522

523
                if vmi.Status.VolumeStatus[i].ContainerDiskVolume == nil ||
1✔
524
                        vmi.Status.VolumeStatus[i].ContainerDiskVolume.Checksum == 0 {
2✔
525
                        return true
1✔
526
                }
1✔
527
        }
528

529
        if util.HasKernelBootContainerImage(vmi) {
27✔
530
                if vmi.Status.KernelBootStatus == nil {
×
531
                        return true
×
532
                }
×
533

534
                kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
×
535

×
536
                if kernelBootContainer.KernelPath != "" &&
×
537
                        (vmi.Status.KernelBootStatus.KernelInfo == nil ||
×
538
                                vmi.Status.KernelBootStatus.KernelInfo.Checksum == 0) {
×
539
                        return true
×
540

×
541
                }
×
542

543
                if kernelBootContainer.InitrdPath != "" &&
×
544
                        (vmi.Status.KernelBootStatus.InitrdInfo == nil ||
×
545
                                vmi.Status.KernelBootStatus.InitrdInfo.Checksum == 0) {
×
546
                        return true
×
547

×
548
                }
×
549
        }
550

551
        return false
27✔
552
}
553

554
// updateChecksumInfo is kept for compatibility with older virt-handlers
555
// that validate checksum calculations in vmi.status. This validation was
556
// removed in PR #14021, but we had to keep the checksum calculations for upgrades.
557
// Once we're sure old handlers won't interrupt upgrades, this can be removed.
558
func (c *VirtualMachineController) updateChecksumInfo(vmi *v1.VirtualMachineInstance, syncError error) error {
35✔
559
        // If the imageVolume feature gate is enabled, upgrade support isn't required,
35✔
560
        // and we can skip the checksum calculation. By the time the feature gate is GA,
35✔
561
        // the checksum calculation should be removed.
35✔
562
        if syncError != nil || vmi.DeletionTimestamp != nil || !needToComputeChecksums(vmi) || c.clusterConfig.ImageVolumeEnabled() {
69✔
563
                return nil
34✔
564
        }
34✔
565

566
        diskChecksums, err := c.containerDiskMounter.ComputeChecksums(vmi)
1✔
567
        if goerror.Is(err, containerdisk.ErrDiskContainerGone) {
1✔
568
                c.logger.Errorf("cannot compute checksums as containerdisk/kernelboot containers seem to have been terminated")
×
569
                return nil
×
570
        }
×
571
        if err != nil {
1✔
572
                return err
×
573
        }
×
574

575
        // containerdisks
576
        for i := range vmi.Status.VolumeStatus {
2✔
577
                checksum, exists := diskChecksums.ContainerDiskChecksums[vmi.Status.VolumeStatus[i].Name]
1✔
578
                if !exists {
1✔
579
                        // not a containerdisk
×
580
                        continue
×
581
                }
582

583
                vmi.Status.VolumeStatus[i].ContainerDiskVolume = &v1.ContainerDiskInfo{
1✔
584
                        Checksum: checksum,
1✔
585
                }
1✔
586
        }
587

588
        // kernelboot
589
        if util.HasKernelBootContainerImage(vmi) {
2✔
590
                vmi.Status.KernelBootStatus = &v1.KernelBootStatus{}
1✔
591

1✔
592
                if diskChecksums.KernelBootChecksum.Kernel != nil {
2✔
593
                        vmi.Status.KernelBootStatus.KernelInfo = &v1.KernelInfo{
1✔
594
                                Checksum: *diskChecksums.KernelBootChecksum.Kernel,
1✔
595
                        }
1✔
596
                }
1✔
597

598
                if diskChecksums.KernelBootChecksum.Initrd != nil {
2✔
599
                        vmi.Status.KernelBootStatus.InitrdInfo = &v1.InitrdInfo{
1✔
600
                                Checksum: *diskChecksums.KernelBootChecksum.Initrd,
1✔
601
                        }
1✔
602
                }
1✔
603
        }
604

605
        return nil
1✔
606
}
607

608
func (c *VirtualMachineController) updateVolumeStatusesFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
60✔
609
        // The return value is only used by unit tests
60✔
610
        hasHotplug := false
60✔
611

60✔
612
        if len(vmi.Status.VolumeStatus) == 0 {
92✔
613
                return false
32✔
614
        }
32✔
615

616
        diskDeviceMap := make(map[string]string)
28✔
617
        if domain != nil {
55✔
618
                for _, disk := range domain.Spec.Devices.Disks {
35✔
619
                        // don't care about empty cdroms
8✔
620
                        if disk.Source.File != "" || disk.Source.Dev != "" {
14✔
621
                                diskDeviceMap[disk.Alias.GetName()] = disk.Target.Device
6✔
622
                        }
6✔
623
                }
624
        }
625
        specVolumeMap := make(map[string]struct{})
28✔
626
        for _, volume := range vmi.Spec.Volumes {
44✔
627
                specVolumeMap[volume.Name] = struct{}{}
16✔
628
        }
16✔
629
        for _, utilityVolume := range vmi.Spec.UtilityVolumes {
28✔
630
                specVolumeMap[utilityVolume.Name] = struct{}{}
×
631
        }
×
632
        newStatusMap := make(map[string]v1.VolumeStatus)
28✔
633
        var newStatuses []v1.VolumeStatus
28✔
634
        needsRefresh := false
28✔
635
        for _, volumeStatus := range vmi.Status.VolumeStatus {
57✔
636
                tmpNeedsRefresh := false
29✔
637
                // relying on the fact that target will be "" if not in the map
29✔
638
                // see updateHotplugVolumeStatus
29✔
639
                volumeStatus.Target = diskDeviceMap[volumeStatus.Name]
29✔
640
                if volumeStatus.HotplugVolume != nil {
55✔
641
                        hasHotplug = true
26✔
642
                        volumeStatus, tmpNeedsRefresh = c.updateHotplugVolumeStatus(vmi, volumeStatus, specVolumeMap)
26✔
643
                        needsRefresh = needsRefresh || tmpNeedsRefresh
26✔
644
                }
26✔
645
                if volumeStatus.MemoryDumpVolume != nil {
35✔
646
                        volumeStatus, tmpNeedsRefresh = c.updateMemoryDumpInfo(vmi, volumeStatus, domain)
6✔
647
                        needsRefresh = needsRefresh || tmpNeedsRefresh
6✔
648
                }
6✔
649
                newStatuses = append(newStatuses, volumeStatus)
29✔
650
                newStatusMap[volumeStatus.Name] = volumeStatus
29✔
651
        }
652
        sort.SliceStable(newStatuses, func(i, j int) bool {
29✔
653
                return strings.Compare(newStatuses[i].Name, newStatuses[j].Name) == -1
1✔
654
        })
1✔
655
        if needsRefresh {
49✔
656
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
21✔
657
        }
21✔
658
        c.generateEventsForVolumeStatusChange(vmi, newStatusMap)
28✔
659
        vmi.Status.VolumeStatus = newStatuses
28✔
660

28✔
661
        return hasHotplug
28✔
662
}
663

664
func (c *VirtualMachineController) updateGuestInfoFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
665

35✔
666
        if domain == nil || domain.Status.OSInfo.Name == "" || vmi.Status.GuestOSInfo.Name == domain.Status.OSInfo.Name {
69✔
667
                return
34✔
668
        }
34✔
669

670
        vmi.Status.GuestOSInfo.Name = domain.Status.OSInfo.Name
1✔
671
        vmi.Status.GuestOSInfo.Version = domain.Status.OSInfo.Version
1✔
672
        vmi.Status.GuestOSInfo.KernelRelease = domain.Status.OSInfo.KernelRelease
1✔
673
        vmi.Status.GuestOSInfo.PrettyName = domain.Status.OSInfo.PrettyName
1✔
674
        vmi.Status.GuestOSInfo.VersionID = domain.Status.OSInfo.VersionId
1✔
675
        vmi.Status.GuestOSInfo.KernelVersion = domain.Status.OSInfo.KernelVersion
1✔
676
        vmi.Status.GuestOSInfo.Machine = domain.Status.OSInfo.Machine
1✔
677
        vmi.Status.GuestOSInfo.ID = domain.Status.OSInfo.Id
1✔
678
}
679

680
func (c *VirtualMachineController) updateAccessCredentialConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
35✔
681

35✔
682
        if domain == nil || domain.Spec.Metadata.KubeVirt.AccessCredential == nil {
67✔
683
                return
32✔
684
        }
32✔
685

686
        message := domain.Spec.Metadata.KubeVirt.AccessCredential.Message
3✔
687
        status := k8sv1.ConditionFalse
3✔
688
        if domain.Spec.Metadata.KubeVirt.AccessCredential.Succeeded {
5✔
689
                status = k8sv1.ConditionTrue
2✔
690
        }
2✔
691

692
        add := false
3✔
693
        condition := condManager.GetCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
3✔
694
        if condition == nil {
4✔
695
                add = true
1✔
696
        } else if condition.Status != status || condition.Message != message {
4✔
697
                // if not as expected, remove, then add.
1✔
698
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
699
                add = true
1✔
700
        }
1✔
701
        if add {
5✔
702
                newCondition := v1.VirtualMachineInstanceCondition{
2✔
703
                        Type:               v1.VirtualMachineInstanceAccessCredentialsSynchronized,
2✔
704
                        LastTransitionTime: metav1.Now(),
2✔
705
                        Status:             status,
2✔
706
                        Message:            message,
2✔
707
                }
2✔
708
                vmi.Status.Conditions = append(vmi.Status.Conditions, newCondition)
2✔
709
                if status == k8sv1.ConditionTrue {
3✔
710
                        eventMessage := "Access credentials sync successful."
1✔
711
                        if message != "" {
1✔
712
                                eventMessage = fmt.Sprintf("Access credentials sync successful: %s", message)
×
713
                        }
×
714
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.AccessCredentialsSyncSuccess.String(), eventMessage)
1✔
715
                } else {
1✔
716
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.AccessCredentialsSyncFailed.String(),
1✔
717
                                fmt.Sprintf("Access credentials sync failed: %s", message),
1✔
718
                        )
1✔
719
                }
1✔
720
        }
721
}
722

723
func (c *VirtualMachineController) updateLiveMigrationConditions(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager) {
36✔
724
        // Calculate whether the VM is migratable
36✔
725
        liveMigrationCondition, isBlockMigration := c.calculateLiveMigrationCondition(vmi)
36✔
726
        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceIsMigratable) {
63✔
727
                vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
27✔
728
        } else {
36✔
729
                cond := condManager.GetCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
9✔
730
                if !equality.Semantic.DeepEqual(cond, liveMigrationCondition) {
10✔
731
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
1✔
732
                        vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
1✔
733
                }
1✔
734
        }
735
        // Set VMI Migration Method
736
        if isBlockMigration {
41✔
737
                vmi.Status.MigrationMethod = v1.BlockMigration
5✔
738
        } else {
36✔
739
                vmi.Status.MigrationMethod = v1.LiveMigration
31✔
740
        }
31✔
741
        storageLiveMigCond := c.calculateLiveStorageMigrationCondition(vmi)
36✔
742
        condManager.UpdateCondition(vmi, storageLiveMigCond)
36✔
743
        evictable := migrations.VMIMigratableOnEviction(c.clusterConfig, vmi)
36✔
744
        if evictable && liveMigrationCondition.Status == k8sv1.ConditionFalse {
37✔
745
                c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), "EvictionStrategy is set but vmi is not migratable; %s", liveMigrationCondition.Message)
1✔
746
        }
1✔
747
}
748

749
func (c *VirtualMachineController) updateGuestAgentConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
35✔
750

35✔
751
        // Update the condition when GA is connected
35✔
752
        channelConnected := false
35✔
753
        if domain != nil {
57✔
754
                for _, channel := range domain.Spec.Devices.Channels {
25✔
755
                        if channel.Target != nil {
6✔
756
                                c.logger.V(4).Infof("Channel: %s, %s", channel.Target.Name, channel.Target.State)
3✔
757
                                if channel.Target.Name == "org.qemu.guest_agent.0" {
6✔
758
                                        if channel.Target.State == "connected" {
5✔
759
                                                channelConnected = true
2✔
760
                                        }
2✔
761
                                }
762

763
                        }
764
                }
765
        }
766

767
        switch {
35✔
768
        case channelConnected && !condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected):
1✔
769
                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
770
                        Type:          v1.VirtualMachineInstanceAgentConnected,
1✔
771
                        LastProbeTime: metav1.Now(),
1✔
772
                        Status:        k8sv1.ConditionTrue,
1✔
773
                }
1✔
774
                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
775
        case !channelConnected:
33✔
776
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAgentConnected)
33✔
777
        }
778

779
        if condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected) {
37✔
780
                client, err := c.launcherClients.GetLauncherClient(vmi)
2✔
781
                if err != nil {
2✔
782
                        return err
×
783
                }
×
784

785
                guestInfo, err := client.GetGuestInfo()
2✔
786
                if err != nil {
2✔
787
                        return err
×
788
                }
×
789

790
                var supported = false
2✔
791
                var reason = ""
2✔
792

2✔
793
                // For current versions, virt-launcher's supported commands will always contain data.
2✔
794
                // For backwards compatibility: during upgrade from a previous version of KubeVirt,
2✔
795
                // virt-launcher might not provide any supported commands. If the list of supported
2✔
796
                // commands is empty, fall back to previous behavior.
2✔
797
                if len(guestInfo.SupportedCommands) > 0 {
2✔
798
                        supported, reason = isGuestAgentSupported(vmi, guestInfo.SupportedCommands)
×
799
                        c.logger.V(3).Object(vmi).Info(reason)
×
800
                } else {
2✔
801
                        for _, version := range c.clusterConfig.GetSupportedAgentVersions() {
10✔
802
                                supported = supported || regexp.MustCompile(version).MatchString(guestInfo.GAVersion)
8✔
803
                        }
8✔
804
                        if !supported {
4✔
805
                                reason = fmt.Sprintf("Guest agent version '%s' is not supported", guestInfo.GAVersion)
2✔
806
                        }
2✔
807
                }
808

809
                if !supported {
4✔
810
                        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent) {
3✔
811
                                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
812
                                        Type:          v1.VirtualMachineInstanceUnsupportedAgent,
1✔
813
                                        LastProbeTime: metav1.Now(),
1✔
814
                                        Status:        k8sv1.ConditionTrue,
1✔
815
                                        Reason:        reason,
1✔
816
                                }
1✔
817
                                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
818
                        }
1✔
819
                } else {
×
820
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent)
×
821
                }
×
822

823
        }
824
        return nil
35✔
825
}
826

827
func (c *VirtualMachineController) updatePausedConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
35✔
828

35✔
829
        // Update paused condition in case VMI was paused / unpaused
35✔
830
        if domain != nil && domain.Status.Status == api.Paused {
37✔
831
                if !condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
4✔
832
                        c.calculatePausedCondition(vmi, domain.Status.Reason)
2✔
833
                }
2✔
834
        } else if condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
34✔
835
                c.logger.Object(vmi).V(3).Info("Removing paused condition")
1✔
836
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstancePaused)
1✔
837
        }
1✔
838
}
839

840
func dumpTargetFile(vmiName, volName string) string {
7✔
841
        targetFileName := fmt.Sprintf("%s-%s-%s.memory.dump", vmiName, volName, time.Now().Format("20060102-150405"))
7✔
842
        return targetFileName
7✔
843
}
7✔
844

845
func (c *VirtualMachineController) updateMemoryDumpInfo(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, domain *api.Domain) (v1.VolumeStatus, bool) {
6✔
846
        needsRefresh := false
6✔
847
        switch volumeStatus.Phase {
6✔
848
        case v1.HotplugVolumeMounted:
1✔
849
                needsRefresh = true
1✔
850
                c.logger.Object(vmi).V(3).Infof("Memory dump volume %s attached, marking it in progress", volumeStatus.Name)
1✔
851
                volumeStatus.Phase = v1.MemoryDumpVolumeInProgress
1✔
852
                volumeStatus.Message = fmt.Sprintf("Memory dump Volume %s is attached, getting memory dump", volumeStatus.Name)
1✔
853
                volumeStatus.Reason = VolumeMountedToPodReason
1✔
854
                volumeStatus.MemoryDumpVolume.TargetFileName = dumpTargetFile(vmi.Name, volumeStatus.Name)
1✔
855
        case v1.MemoryDumpVolumeInProgress:
3✔
856
                var memoryDumpMetadata *api.MemoryDumpMetadata
3✔
857
                if domain != nil {
6✔
858
                        memoryDumpMetadata = domain.Spec.Metadata.KubeVirt.MemoryDump
3✔
859
                }
3✔
860
                if memoryDumpMetadata == nil || memoryDumpMetadata.FileName != volumeStatus.MemoryDumpVolume.TargetFileName {
4✔
861
                        // memory dump wasnt triggered yet
1✔
862
                        return volumeStatus, needsRefresh
1✔
863
                }
1✔
864
                needsRefresh = true
2✔
865
                if memoryDumpMetadata.StartTimestamp != nil {
4✔
866
                        volumeStatus.MemoryDumpVolume.StartTimestamp = memoryDumpMetadata.StartTimestamp
2✔
867
                }
2✔
868
                if memoryDumpMetadata.EndTimestamp != nil && memoryDumpMetadata.Failed {
3✔
869
                        c.logger.Object(vmi).Errorf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
870
                        volumeStatus.Message = fmt.Sprintf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
871
                        volumeStatus.Phase = v1.MemoryDumpVolumeFailed
1✔
872
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
873
                } else if memoryDumpMetadata.Completed {
3✔
874
                        c.logger.Object(vmi).V(3).Infof("Marking memory dump to volume %s has completed", volumeStatus.Name)
1✔
875
                        volumeStatus.Phase = v1.MemoryDumpVolumeCompleted
1✔
876
                        volumeStatus.Message = fmt.Sprintf("Memory dump to Volume %s has completed successfully", volumeStatus.Name)
1✔
877
                        volumeStatus.Reason = VolumeReadyReason
1✔
878
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
879
                }
1✔
880
        }
881

882
        return volumeStatus, needsRefresh
5✔
883
}
884

885
func (c *VirtualMachineController) updateFSFreezeStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
886

35✔
887
        if domain == nil || domain.Status.FSFreezeStatus.Status == "" {
68✔
888
                return
33✔
889
        }
33✔
890

891
        if domain.Status.FSFreezeStatus.Status == api.FSThawed {
3✔
892
                vmi.Status.FSFreezeStatus = ""
1✔
893
        } else {
2✔
894
                vmi.Status.FSFreezeStatus = domain.Status.FSFreezeStatus.Status
1✔
895
        }
1✔
896

897
}
898

899
func IsoGuestVolumePath(namespace, name string, volume *v1.Volume) string {
×
900
        const basepath = "/var/run"
×
901
        switch {
×
902
        case volume.CloudInitNoCloud != nil:
×
903
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "noCloud.iso")
×
904
        case volume.CloudInitConfigDrive != nil:
×
905
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "configdrive.iso")
×
906
        case volume.ConfigMap != nil:
×
907
                return config.GetConfigMapDiskPath(volume.Name)
×
908
        case volume.DownwardAPI != nil:
×
909
                return config.GetDownwardAPIDiskPath(volume.Name)
×
910
        case volume.Secret != nil:
×
911
                return config.GetSecretDiskPath(volume.Name)
×
912
        case volume.ServiceAccount != nil:
×
913
                return config.GetServiceAccountDiskPath()
×
914
        case volume.Sysprep != nil:
×
915
                return config.GetSysprepDiskPath(volume.Name)
×
916
        default:
×
917
                return ""
×
918
        }
919
}
920

921
func (c *VirtualMachineController) updateIsoSizeStatus(vmi *v1.VirtualMachineInstance) {
35✔
922
        var podUID string
35✔
923
        if vmi.Status.Phase != v1.Running {
52✔
924
                return
17✔
925
        }
17✔
926

927
        for k, v := range vmi.Status.ActivePods {
29✔
928
                if v == vmi.Status.NodeName {
11✔
929
                        podUID = string(k)
×
930
                        break
×
931
                }
932
        }
933
        if podUID == "" {
36✔
934
                log.DefaultLogger().Warningf("failed to find pod UID for VMI %s", vmi.Name)
18✔
935
                return
18✔
936
        }
18✔
937

938
        volumes := make(map[string]v1.Volume)
×
939
        for _, volume := range vmi.Spec.Volumes {
×
940
                volumes[volume.Name] = volume
×
941
        }
×
942

943
        for _, disk := range vmi.Spec.Domain.Devices.Disks {
×
944
                volume, ok := volumes[disk.Name]
×
945
                if !ok {
×
946
                        log.DefaultLogger().Warningf("No matching volume with name %s found", disk.Name)
×
947
                        continue
×
948
                }
949

950
                volPath := IsoGuestVolumePath(vmi.Namespace, vmi.Name, &volume)
×
951
                if volPath == "" {
×
952
                        continue
×
953
                }
954

955
                res, err := c.podIsolationDetector.Detect(vmi)
×
956
                if err != nil {
×
957
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
958
                        continue
×
959
                }
960

961
                rootPath, err := res.MountRoot()
×
962
                if err != nil {
×
963
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
964
                        continue
×
965
                }
966

967
                safeVolPath, err := rootPath.AppendAndResolveWithRelativeRoot(volPath)
×
968
                if err != nil {
×
969
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
970
                        continue
×
971
                }
972
                fileInfo, err := safepath.StatAtNoFollow(safeVolPath)
×
973
                if err != nil {
×
974
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
975
                        continue
×
976
                }
977

978
                for i := range vmi.Status.VolumeStatus {
×
979
                        if vmi.Status.VolumeStatus[i].Name == volume.Name {
×
980
                                vmi.Status.VolumeStatus[i].Size = fileInfo.Size()
×
981
                                continue
×
982
                        }
983
                }
984
        }
985
}
986

987
func (c *VirtualMachineController) updateSELinuxContext(vmi *v1.VirtualMachineInstance) error {
35✔
988
        _, present, err := selinux.NewSELinux()
35✔
989
        if err != nil {
70✔
990
                return err
35✔
991
        }
35✔
992
        if present {
×
993
                context, err := selinux.GetVirtLauncherContext(vmi)
×
994
                if err != nil {
×
995
                        return err
×
996
                }
×
997
                vmi.Status.SelinuxContext = context
×
998
        } else {
×
999
                vmi.Status.SelinuxContext = "none"
×
1000
        }
×
1001

1002
        return nil
×
1003
}
1004

1005
func (c *VirtualMachineController) updateMemoryInfo(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
35✔
1006
        if domain == nil || vmi == nil || domain.Spec.CurrentMemory == nil {
69✔
1007
                return nil
34✔
1008
        }
34✔
1009
        if vmi.Status.Memory == nil {
1✔
1010
                vmi.Status.Memory = &v1.MemoryStatus{}
×
1011
        }
×
1012
        currentGuest := parseLibvirtQuantity(int64(domain.Spec.CurrentMemory.Value), domain.Spec.CurrentMemory.Unit)
1✔
1013
        vmi.Status.Memory.GuestCurrent = currentGuest
1✔
1014
        return nil
1✔
1015
}
1016

1017
func (c *VirtualMachineController) updateVMIStatusFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
35✔
1018
        c.updateIsoSizeStatus(vmi)
35✔
1019
        err := c.updateSELinuxContext(vmi)
35✔
1020
        if err != nil {
70✔
1021
                c.logger.Reason(err).Errorf("couldn't find the SELinux context for %s", vmi.Name)
35✔
1022
        }
35✔
1023
        c.updateGuestInfoFromDomain(vmi, domain)
35✔
1024
        c.updateVolumeStatusesFromDomain(vmi, domain)
35✔
1025
        c.updateFSFreezeStatus(vmi, domain)
35✔
1026
        c.updateBackupStatus(vmi, domain)
35✔
1027
        c.updateMachineType(vmi, domain)
35✔
1028
        if err = c.updateMemoryInfo(vmi, domain); err != nil {
35✔
1029
                return err
×
1030
        }
×
1031
        cbt.SetChangedBlockTrackingOnVMIFromDomain(vmi, domain)
35✔
1032
        err = c.netStat.UpdateStatus(vmi, domain)
35✔
1033
        return err
35✔
1034
}
1035

1036
func (c *VirtualMachineController) updateVMIConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
35✔
1037
        c.updateAccessCredentialConditions(vmi, domain, condManager)
35✔
1038
        c.updateLiveMigrationConditions(vmi, condManager)
35✔
1039
        err := c.updateGuestAgentConditions(vmi, domain, condManager)
35✔
1040
        if err != nil {
35✔
1041
                return err
×
1042
        }
×
1043
        c.updatePausedConditions(vmi, domain, condManager)
35✔
1044

35✔
1045
        return nil
35✔
1046
}
1047

1048
func (c *VirtualMachineController) updateVMIStatus(oldStatus *v1.VirtualMachineInstanceStatus, vmi *v1.VirtualMachineInstance, domain *api.Domain, syncError error) (err error) {
36✔
1049
        condManager := controller.NewVirtualMachineInstanceConditionManager()
36✔
1050

36✔
1051
        // Don't update the VirtualMachineInstance if it is already in a final state
36✔
1052
        if vmi.IsFinal() {
37✔
1053
                return nil
1✔
1054
        }
1✔
1055

1056
        // Update VMI status fields based on what is reported on the domain
1057
        err = c.updateVMIStatusFromDomain(vmi, domain)
35✔
1058
        if err != nil {
35✔
1059
                return err
×
1060
        }
×
1061

1062
        // Calculate the new VirtualMachineInstance state based on what libvirt reported
1063
        err = c.setVmPhaseForStatusReason(domain, vmi)
35✔
1064
        if err != nil {
35✔
1065
                return err
×
1066
        }
×
1067

1068
        // Update conditions on VMI Status
1069
        err = c.updateVMIConditions(vmi, domain, condManager)
35✔
1070
        if err != nil {
35✔
1071
                return err
×
1072
        }
×
1073

1074
        // Store containerdisks and kernelboot checksums
1075
        if err := c.updateChecksumInfo(vmi, syncError); err != nil {
35✔
1076
                return err
×
1077
        }
×
1078

1079
        // Handle sync error
1080
        c.handleSyncError(vmi, condManager, syncError)
35✔
1081

35✔
1082
        controller.SetVMIPhaseTransitionTimestamp(oldStatus, &vmi.Status)
35✔
1083

35✔
1084
        // Only issue vmi update if status has changed
35✔
1085
        if !equality.Semantic.DeepEqual(*oldStatus, vmi.Status) {
69✔
1086
                key := controller.VirtualMachineInstanceKey(vmi)
34✔
1087
                c.vmiExpectations.SetExpectations(key, 1, 0)
34✔
1088
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
34✔
1089
                if err != nil {
34✔
1090
                        c.vmiExpectations.SetExpectations(key, 0, 0)
×
1091
                        return err
×
1092
                }
×
1093
        }
1094

1095
        // Record an event on the VMI when the VMI's phase changes
1096
        if oldStatus.Phase != vmi.Status.Phase {
45✔
1097
                c.recordPhaseChangeEvent(vmi)
10✔
1098
        }
10✔
1099

1100
        return nil
35✔
1101
}
1102

1103
type virtLauncherCriticalSecurebootError struct {
1104
        msg string
1105
}
1106

1107
func (e *virtLauncherCriticalSecurebootError) Error() string { return e.msg }
×
1108

1109
func (c *VirtualMachineController) handleSyncError(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager, syncError error) {
35✔
1110
        var criticalNetErr *neterrors.CriticalNetworkError
35✔
1111
        if goerror.As(syncError, &criticalNetErr) {
36✔
1112
                c.logger.Errorf("virt-launcher crashed due to a network error. Updating VMI %s status to Failed", vmi.Name)
1✔
1113
                vmi.Status.Phase = v1.Failed
1✔
1114
        }
1✔
1115
        if _, ok := syncError.(*virtLauncherCriticalSecurebootError); ok {
35✔
1116
                c.logger.Errorf("virt-launcher does not support the Secure Boot setting. Updating VMI %s status to Failed", vmi.Name)
×
1117
                vmi.Status.Phase = v1.Failed
×
1118
        }
×
1119

1120
        if _, ok := syncError.(*vmiIrrecoverableError); ok {
36✔
1121
                c.logger.Errorf("virt-launcher reached an irrecoverable error. Updating VMI %s status to Failed", vmi.Name)
1✔
1122
                vmi.Status.Phase = v1.Failed
1✔
1123
        }
1✔
1124
        condManager.CheckFailure(vmi, syncError, "Synchronizing with the Domain failed.")
35✔
1125
}
1126

1127
func (c *VirtualMachineController) recordPhaseChangeEvent(vmi *v1.VirtualMachineInstance) {
10✔
1128
        switch vmi.Status.Phase {
10✔
1129
        case v1.Running:
5✔
1130
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Started.String(), VMIStarted)
5✔
1131
        case v1.Succeeded:
×
1132
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Stopped.String(), VMIShutdown)
×
1133
        case v1.Failed:
5✔
1134
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Stopped.String(), VMICrashed)
5✔
1135
        }
1136
}
1137

1138
func (c *VirtualMachineController) calculatePausedCondition(vmi *v1.VirtualMachineInstance, reason api.StateChangeReason) {
2✔
1139
        now := metav1.NewTime(time.Now())
2✔
1140
        switch reason {
2✔
1141
        case api.ReasonPausedMigration:
×
1142
                if !isVMIPausedDuringMigration(vmi) || !c.isMigrationSource(vmi) {
×
1143
                        c.logger.Object(vmi).V(3).Infof("Domain is paused after migration by qemu, no condition needed")
×
1144
                        return
×
1145
                }
×
1146
                c.logger.Object(vmi).V(3).Info("Adding paused by migration monitor condition")
×
1147
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1148
                        Type:               v1.VirtualMachineInstancePaused,
×
1149
                        Status:             k8sv1.ConditionTrue,
×
1150
                        LastProbeTime:      now,
×
1151
                        LastTransitionTime: now,
×
1152
                        Reason:             "PausedByMigrationMonitor",
×
1153
                        Message:            "VMI was paused by the migration monitor",
×
1154
                })
×
1155
        case api.ReasonPausedUser:
1✔
1156
                c.logger.Object(vmi).V(3).Info("Adding paused condition")
1✔
1157
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
1✔
1158
                        Type:               v1.VirtualMachineInstancePaused,
1✔
1159
                        Status:             k8sv1.ConditionTrue,
1✔
1160
                        LastProbeTime:      now,
1✔
1161
                        LastTransitionTime: now,
1✔
1162
                        Reason:             "PausedByUser",
1✔
1163
                        Message:            "VMI was paused by user",
1✔
1164
                })
1✔
1165
        case api.ReasonPausedIOError:
×
1166
                c.logger.Object(vmi).V(3).Info("Adding paused condition")
×
1167
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1168
                        Type:               v1.VirtualMachineInstancePaused,
×
1169
                        Status:             k8sv1.ConditionTrue,
×
1170
                        LastProbeTime:      now,
×
1171
                        LastTransitionTime: now,
×
1172
                        Reason:             "PausedIOError",
×
1173
                        Message:            "VMI was paused, low-level IO error detected",
×
1174
                })
×
1175
        default:
1✔
1176
                c.logger.Object(vmi).V(3).Infof("Domain is paused for unknown reason, %s", reason)
1✔
1177
        }
1178
}
1179

1180
func newNonMigratableCondition(msg string, reason string) *v1.VirtualMachineInstanceCondition {
14✔
1181
        return &v1.VirtualMachineInstanceCondition{
14✔
1182
                Type:    v1.VirtualMachineInstanceIsMigratable,
14✔
1183
                Status:  k8sv1.ConditionFalse,
14✔
1184
                Message: msg,
14✔
1185
                Reason:  reason,
14✔
1186
        }
14✔
1187
}
14✔
1188

1189
func (c *VirtualMachineController) calculateLiveMigrationCondition(vmi *v1.VirtualMachineInstance) (*v1.VirtualMachineInstanceCondition, bool) {
56✔
1190
        isBlockMigration, blockErr := c.checkVolumesForMigration(vmi)
56✔
1191

56✔
1192
        err := c.checkNetworkInterfacesForMigration(vmi)
56✔
1193
        if err != nil {
57✔
1194
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonInterfaceNotMigratable), isBlockMigration
1✔
1195
        }
1✔
1196

1197
        if err := c.isHostModelMigratable(vmi); err != nil {
55✔
1198
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonCPUModeNotMigratable), isBlockMigration
×
1199
        }
×
1200

1201
        if vmiContainsPCIHostDevice(vmi) {
57✔
1202
                return newNonMigratableCondition("VMI uses a PCI host devices", v1.VirtualMachineInstanceReasonHostDeviceNotMigratable), isBlockMigration
2✔
1203
        }
2✔
1204

1205
        if util.IsSEVVMI(vmi) {
54✔
1206
                return newNonMigratableCondition("VMI uses SEV", v1.VirtualMachineInstanceReasonSEVNotMigratable), isBlockMigration
1✔
1207
        } else if util.IsTDXVMI(vmi) {
54✔
1208
                return newNonMigratableCondition("VMI uses TDX", v1.VirtualMachineInstanceReasonTDXNotMigratable), isBlockMigration
1✔
1209
        }
1✔
1210

1211
        if util.IsSecureExecutionVMI(vmi) {
52✔
1212
                return newNonMigratableCondition("VMI uses Secure Execution", v1.VirtualMachineInstanceReasonSecureExecutionNotMigratable), isBlockMigration
1✔
1213
        }
1✔
1214

1215
        if reservation.HasVMIPersistentReservation(vmi) {
51✔
1216
                return newNonMigratableCondition("VMI uses SCSI persistent reservation", v1.VirtualMachineInstanceReasonPRNotMigratable), isBlockMigration
1✔
1217
        }
1✔
1218

1219
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
50✔
1220
                return newNonMigratableCondition(tscRequirement.Reason, v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable), isBlockMigration
1✔
1221
        }
1✔
1222

1223
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
49✔
1224
                return newNonMigratableCondition("VMI uses hyperv passthrough", v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable), isBlockMigration
1✔
1225
        }
1✔
1226

1227
        if blockErr != nil {
52✔
1228
                return newNonMigratableCondition(blockErr.Error(), v1.VirtualMachineInstanceReasonDisksNotMigratable), isBlockMigration
5✔
1229
        }
5✔
1230

1231
        return &v1.VirtualMachineInstanceCondition{
42✔
1232
                Type:   v1.VirtualMachineInstanceIsMigratable,
42✔
1233
                Status: k8sv1.ConditionTrue,
42✔
1234
        }, isBlockMigration
42✔
1235
}
1236

1237
func vmiContainsPCIHostDevice(vmi *v1.VirtualMachineInstance) bool {
91✔
1238
        return len(vmi.Spec.Domain.Devices.HostDevices) > 0 || len(vmi.Spec.Domain.Devices.GPUs) > 0
91✔
1239
}
91✔
1240

1241
type multipleNonMigratableCondition struct {
1242
        reasons []string
1243
        msgs    []string
1244
}
1245

1246
func newMultipleNonMigratableCondition() *multipleNonMigratableCondition {
36✔
1247
        return &multipleNonMigratableCondition{}
36✔
1248
}
36✔
1249

1250
func (cond *multipleNonMigratableCondition) addNonMigratableCondition(reason, msg string) {
1✔
1251
        cond.reasons = append(cond.reasons, reason)
1✔
1252
        cond.msgs = append(cond.msgs, msg)
1✔
1253
}
1✔
1254

1255
func (cond *multipleNonMigratableCondition) String() string {
1✔
1256
        var buffer bytes.Buffer
1✔
1257
        for i, c := range cond.reasons {
2✔
1258
                if i > 0 {
1✔
1259
                        buffer.WriteString(", ")
×
1260
                }
×
1261
                buffer.WriteString(fmt.Sprintf("%s: %s", c, cond.msgs[i]))
1✔
1262
        }
1263
        return buffer.String()
1✔
1264
}
1265

1266
func (cond *multipleNonMigratableCondition) generateStorageLiveMigrationCondition() *v1.VirtualMachineInstanceCondition {
36✔
1267
        switch len(cond.reasons) {
36✔
1268
        case 0:
35✔
1269
                return &v1.VirtualMachineInstanceCondition{
35✔
1270
                        Type:   v1.VirtualMachineInstanceIsStorageLiveMigratable,
35✔
1271
                        Status: k8sv1.ConditionTrue,
35✔
1272
                }
35✔
1273
        default:
1✔
1274
                return &v1.VirtualMachineInstanceCondition{
1✔
1275
                        Type:    v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1276
                        Status:  k8sv1.ConditionFalse,
1✔
1277
                        Message: cond.String(),
1✔
1278
                        Reason:  v1.VirtualMachineInstanceReasonNotMigratable,
1✔
1279
                }
1✔
1280
        }
1281
}
1282

1283
func (c *VirtualMachineController) calculateLiveStorageMigrationCondition(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstanceCondition {
36✔
1284
        multiCond := newMultipleNonMigratableCondition()
36✔
1285

36✔
1286
        if err := c.checkNetworkInterfacesForMigration(vmi); err != nil {
37✔
1287
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonInterfaceNotMigratable, err.Error())
1✔
1288
        }
1✔
1289

1290
        if err := c.isHostModelMigratable(vmi); err != nil {
36✔
1291
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonCPUModeNotMigratable, err.Error())
×
1292
        }
×
1293

1294
        if vmiContainsPCIHostDevice(vmi) {
36✔
1295
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHostDeviceNotMigratable, "VMI uses a PCI host devices")
×
1296
        }
×
1297

1298
        if util.IsSEVVMI(vmi) {
36✔
1299
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonSEVNotMigratable, "VMI uses SEV")
×
1300
        } else if util.IsTDXVMI(vmi) {
36✔
1301
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonTDXNotMigratable, "VMI uses TDX")
×
1302
        }
×
1303

1304
        if reservation.HasVMIPersistentReservation(vmi) {
36✔
1305
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonPRNotMigratable, "VMI uses SCSI persistent reservation")
×
1306
        }
×
1307

1308
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
36✔
1309
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable, tscRequirement.Reason)
×
1310
        }
×
1311

1312
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
36✔
1313
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable, "VMI uses hyperv passthrough")
×
1314
        }
×
1315

1316
        return multiCond.generateStorageLiveMigrationCondition()
36✔
1317
}
1318

1319
func (c *VirtualMachineController) deleteVM(vmi *v1.VirtualMachineInstance) error {
4✔
1320
        err := c.processVmDelete(vmi)
4✔
1321
        if err != nil {
4✔
1322
                return err
×
1323
        }
×
1324
        // we can perform the cleanup immediately after
1325
        // the successful delete here because we don't have
1326
        // to report the deletion results on the VMI status
1327
        // in this case.
1328
        err = c.processVmCleanup(vmi)
4✔
1329
        if err != nil {
4✔
1330
                return err
×
1331
        }
×
1332

1333
        return nil
4✔
1334
}
1335

1336
// Determine if gracefulShutdown has been triggered by virt-launcher
1337
func (c *VirtualMachineController) hasGracefulShutdownTrigger(domain *api.Domain) bool {
44✔
1338
        if domain == nil {
58✔
1339
                return false
14✔
1340
        }
14✔
1341
        gracePeriod := domain.Spec.Metadata.KubeVirt.GracePeriod
30✔
1342

30✔
1343
        return gracePeriod != nil &&
30✔
1344
                gracePeriod.MarkedForGracefulShutdown != nil &&
30✔
1345
                *gracePeriod.MarkedForGracefulShutdown
30✔
1346
}
1347

1348
func (c *VirtualMachineController) sync(key string,
1349
        vmi *v1.VirtualMachineInstance,
1350
        vmiExists bool,
1351
        domain *api.Domain,
1352
        domainExists bool) error {
44✔
1353

44✔
1354
        oldStatus := vmi.Status.DeepCopy()
44✔
1355
        oldSpec := vmi.Spec.DeepCopy()
44✔
1356

44✔
1357
        // set to true when domain needs to be shutdown.
44✔
1358
        shouldShutdown := false
44✔
1359
        // set to true when domain needs to be removed from libvirt.
44✔
1360
        shouldDelete := false
44✔
1361
        // set to true when VirtualMachineInstance is active or about to become active.
44✔
1362
        shouldUpdate := false
44✔
1363
        // set to true when unrecoverable domain needs to be destroyed non-gracefully.
44✔
1364
        forceShutdownIrrecoverable := false
44✔
1365

44✔
1366
        c.logger.V(3).Infof("Processing event %v", key)
44✔
1367

44✔
1368
        if vmiExists && domainExists {
66✔
1369
                c.logger.Object(vmi).Infof("VMI is in phase: %v | Domain status: %v, reason: %v", vmi.Status.Phase, domain.Status.Status, domain.Status.Reason)
22✔
1370
        } else if vmiExists {
58✔
1371
                c.logger.Object(vmi).Infof("VMI is in phase: %v | Domain does not exist", vmi.Status.Phase)
14✔
1372
        } else if domainExists {
30✔
1373
                vmiRef := v1.NewVMIReferenceWithUUID(domain.ObjectMeta.Namespace, domain.ObjectMeta.Name, domain.Spec.Metadata.KubeVirt.UID)
8✔
1374
                c.logger.Object(vmiRef).Infof("VMI does not exist | Domain status: %v, reason: %v", domain.Status.Status, domain.Status.Reason)
8✔
1375
        } else {
8✔
1376
                c.logger.Info("VMI does not exist | Domain does not exist")
×
1377
        }
×
1378

1379
        domainAlive := domainExists &&
44✔
1380
                domain.Status.Status != api.Shutoff &&
44✔
1381
                domain.Status.Status != api.Crashed &&
44✔
1382
                domain.Status.Status != ""
44✔
1383

44✔
1384
        forceShutdownIrrecoverable = domainExists && domainPausedFailedPostCopy(domain)
44✔
1385

44✔
1386
        gracefulShutdown := c.hasGracefulShutdownTrigger(domain)
44✔
1387
        if gracefulShutdown && vmi.IsRunning() {
44✔
1388
                if domainAlive {
×
1389
                        c.logger.Object(vmi).V(3).Info("Shutting down due to graceful shutdown signal.")
×
1390
                        shouldShutdown = true
×
1391
                } else {
×
1392
                        shouldDelete = true
×
1393
                }
×
1394
        }
1395

1396
        // Determine removal of VirtualMachineInstance from cache should result in deletion.
1397
        if !vmiExists {
52✔
1398
                if domainAlive {
14✔
1399
                        // The VirtualMachineInstance is deleted on the cluster, and domain is alive,
6✔
1400
                        // then shut down the domain.
6✔
1401
                        c.logger.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
6✔
1402
                        shouldShutdown = true
6✔
1403
                } else {
8✔
1404
                        // The VirtualMachineInstance is deleted on the cluster, and domain is not alive
2✔
1405
                        // then delete the domain.
2✔
1406
                        c.logger.Object(vmi).V(3).Info("Deleting domain for deleted VirtualMachineInstance object.")
2✔
1407
                        shouldDelete = true
2✔
1408
                }
2✔
1409
        }
1410

1411
        // Determine if VirtualMachineInstance is being deleted.
1412
        if vmiExists && vmi.ObjectMeta.DeletionTimestamp != nil {
46✔
1413
                if domainAlive {
3✔
1414
                        c.logger.Object(vmi).V(3).Info("Shutting down domain for VirtualMachineInstance with deletion timestamp.")
1✔
1415
                        shouldShutdown = true
1✔
1416
                } else {
2✔
1417
                        c.logger.Object(vmi).V(3).Info("Deleting domain for VirtualMachineInstance with deletion timestamp.")
1✔
1418
                        shouldDelete = true
1✔
1419
                }
1✔
1420
        }
1421

1422
        // Determine if domain needs to be deleted as a result of VirtualMachineInstance
1423
        // shutting down naturally (guest internal invoked shutdown)
1424
        if vmiExists && vmi.IsFinal() {
45✔
1425
                c.logger.Object(vmi).V(3).Info("Removing domain and ephemeral data for finalized vmi.")
1✔
1426
                shouldDelete = true
1✔
1427
        }
1✔
1428

1429
        if !domainAlive && domainExists && !vmi.IsFinal() {
46✔
1430
                c.logger.Object(vmi).V(3).Info("Deleting inactive domain for vmi.")
2✔
1431
                shouldDelete = true
2✔
1432
        }
2✔
1433

1434
        // Determine if an active (or about to be active) VirtualMachineInstance should be updated.
1435
        if vmiExists && !vmi.IsFinal() {
79✔
1436
                // requiring the phase of the domain and VirtualMachineInstance to be in sync is an
35✔
1437
                // optimization that prevents unnecessary re-processing VMIs during the start flow.
35✔
1438
                phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
35✔
1439
                if err != nil {
35✔
1440
                        return err
×
1441
                }
×
1442
                if vmi.Status.Phase == phase {
61✔
1443
                        shouldUpdate = true
26✔
1444
                }
26✔
1445

1446
                if shouldDelay, delay := c.ioErrorRetryManager.ShouldDelay(string(vmi.UID), func() bool {
70✔
1447
                        return isIOError(shouldUpdate, domainExists, domain)
35✔
1448
                }); shouldDelay {
35✔
1449
                        shouldUpdate = false
×
1450
                        c.logger.Object(vmi).Infof("Delay vm update for %f seconds", delay.Seconds())
×
1451
                        c.queue.AddAfter(key, delay)
×
1452
                }
×
1453
        }
1454

1455
        var syncErr error
44✔
1456

44✔
1457
        // Process the VirtualMachineInstance update in this order.
44✔
1458
        // * Shutdown and Deletion due to VirtualMachineInstance deletion, process stopping, graceful shutdown trigger, etc...
44✔
1459
        // * Cleanup of already shutdown and Deleted VMIs
44✔
1460
        // * Update due to spec change and initial start flow.
44✔
1461
        switch {
44✔
1462
        case shouldShutdown:
7✔
1463
                c.logger.Object(vmi).V(3).Info("Processing shutdown.")
7✔
1464
                syncErr = c.processVmShutdown(vmi, domain)
7✔
1465
        case forceShutdownIrrecoverable:
1✔
1466
                msg := formatIrrecoverableErrorMessage(domain)
1✔
1467
                c.logger.Object(vmi).V(3).Infof("Processing a destruction of an irrecoverable domain - %s.", msg)
1✔
1468
                syncErr = c.processVmDestroy(vmi, domain)
1✔
1469
                if syncErr == nil {
2✔
1470
                        syncErr = &vmiIrrecoverableError{msg}
1✔
1471
                }
1✔
1472
        case shouldDelete:
4✔
1473
                c.logger.Object(vmi).V(3).Info("Processing deletion.")
4✔
1474
                syncErr = c.deleteVM(vmi)
4✔
1475
        case shouldUpdate:
24✔
1476
                c.logger.Object(vmi).V(3).Info("Processing vmi update")
24✔
1477
                syncErr = c.processVmUpdate(vmi, domain)
24✔
1478
        default:
8✔
1479
                c.logger.Object(vmi).V(3).Info("No update processing required")
8✔
1480
        }
1481
        if syncErr != nil && !vmi.IsFinal() {
49✔
1482
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.SyncFailed.String(), syncErr.Error())
5✔
1483

5✔
1484
                // `syncErr` will be propagated anyway, and it will be logged in `re-enqueueing`
5✔
1485
                // so there is no need to log it twice in hot path without increased verbosity.
5✔
1486
                c.logger.Object(vmi).Reason(syncErr).Error("Synchronizing the VirtualMachineInstance failed.")
5✔
1487
        }
5✔
1488

1489
        // Update the VirtualMachineInstance status, if the VirtualMachineInstance exists
1490
        if vmiExists {
80✔
1491
                vmi.Spec = *oldSpec
36✔
1492
                if err := c.updateVMIStatus(oldStatus, vmi, domain, syncErr); err != nil {
36✔
1493
                        c.logger.Object(vmi).Reason(err).Error("Updating the VirtualMachineInstance status failed.")
×
1494
                        return err
×
1495
                }
×
1496
        }
1497

1498
        if syncErr != nil {
49✔
1499
                return syncErr
5✔
1500
        }
5✔
1501

1502
        c.logger.Object(vmi).V(3).Info("Synchronization loop succeeded.")
39✔
1503
        return nil
39✔
1504

1505
}
1506

1507
func (c *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstance) error {
5✔
1508
        vmiId := string(vmi.UID)
5✔
1509

5✔
1510
        c.logger.Object(vmi).Infof("Performing final local cleanup for vmi with uid %s", vmiId)
5✔
1511

5✔
1512
        c.migrationProxy.StopTargetListener(vmiId)
5✔
1513
        c.migrationProxy.StopSourceListener(vmiId)
5✔
1514

5✔
1515
        c.downwardMetricsManager.StopServer(vmi)
5✔
1516

5✔
1517
        // Unmount container disks and clean up remaining files
5✔
1518
        if err := c.containerDiskMounter.Unmount(vmi); err != nil {
5✔
1519
                return err
×
1520
        }
×
1521

1522
        // UnmountAll does the cleanup on the "best effort" basis: it is
1523
        // safe to pass a nil cgroupManager.
1524
        cgroupManager, _ := getCgroupManager(vmi, c.host)
5✔
1525
        if err := c.hotplugVolumeMounter.UnmountAll(vmi, cgroupManager); err != nil {
5✔
1526
                return err
×
1527
        }
×
1528

1529
        c.teardownNetwork(vmi)
5✔
1530

5✔
1531
        c.sriovHotplugExecutorPool.Delete(vmi.UID)
5✔
1532

5✔
1533
        // Watch dog file and command client must be the last things removed here
5✔
1534
        c.launcherClients.CloseLauncherClient(vmi)
5✔
1535

5✔
1536
        // Remove the domain from cache in the event that we're performing
5✔
1537
        // a final cleanup and never received the "DELETE" event. This is
5✔
1538
        // possible if the VMI pod goes away before we receive the final domain
5✔
1539
        // "DELETE"
5✔
1540
        domain := api.NewDomainReferenceFromName(vmi.Namespace, vmi.Name)
5✔
1541
        c.logger.Object(domain).Infof("Removing domain from cache during final cleanup")
5✔
1542
        return c.domainStore.Delete(domain)
5✔
1543
}
1544

1545
func (c *VirtualMachineController) processVmDestroy(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
1546
        tryGracefully := false
1✔
1547
        return c.helperVmShutdown(vmi, domain, tryGracefully)
1✔
1548
}
1✔
1549

1550
func (c *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
7✔
1551
        tryGracefully := true
7✔
1552
        return c.helperVmShutdown(vmi, domain, tryGracefully)
7✔
1553
}
7✔
1554

1555
const firstGracefulShutdownAttempt = -1
1556

1557
// Determines if a domain's grace period has expired during shutdown.
1558
// If the grace period has started but not expired, timeLeft represents
1559
// the time in seconds left until the period expires.
1560
// If the grace period has not started, timeLeft will be set to -1.
1561
func (c *VirtualMachineController) hasGracePeriodExpired(terminationGracePeriod *int64, dom *api.Domain) (bool, int64) {
6✔
1562
        var hasExpired bool
6✔
1563
        var timeLeft int64
6✔
1564

6✔
1565
        gracePeriod := int64(0)
6✔
1566
        if terminationGracePeriod != nil {
7✔
1567
                gracePeriod = *terminationGracePeriod
1✔
1568
        } else if dom != nil && dom.Spec.Metadata.KubeVirt.GracePeriod != nil {
11✔
1569
                gracePeriod = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds
5✔
1570
        }
5✔
1571

1572
        // If gracePeriod == 0, then there will be no startTime set, deletion
1573
        // should occur immediately during shutdown.
1574
        if gracePeriod == 0 {
7✔
1575
                hasExpired = true
1✔
1576
                return hasExpired, timeLeft
1✔
1577
        }
1✔
1578

1579
        startTime := int64(0)
5✔
1580
        if dom != nil && dom.Spec.Metadata.KubeVirt.GracePeriod != nil && dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp != nil {
8✔
1581
                startTime = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp.UTC().Unix()
3✔
1582
        }
3✔
1583

1584
        if startTime == 0 {
7✔
1585
                // If gracePeriod > 0, then the shutdown signal needs to be sent
2✔
1586
                // and the gracePeriod start time needs to be set.
2✔
1587
                timeLeft = firstGracefulShutdownAttempt
2✔
1588
                return hasExpired, timeLeft
2✔
1589
        }
2✔
1590

1591
        now := time.Now().UTC().Unix()
3✔
1592
        diff := now - startTime
3✔
1593

3✔
1594
        if diff >= gracePeriod {
4✔
1595
                hasExpired = true
1✔
1596
                return hasExpired, timeLeft
1✔
1597
        }
1✔
1598

1599
        timeLeft = gracePeriod - diff
2✔
1600
        if timeLeft < 1 {
2✔
1601
                timeLeft = 1
×
1602
        }
×
1603
        return hasExpired, timeLeft
2✔
1604
}
1605

1606
func (c *VirtualMachineController) helperVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, tryGracefully bool) error {
8✔
1607

8✔
1608
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
8✔
1609
        client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
8✔
1610
        if err != nil {
8✔
1611
                return err
×
1612
        }
×
1613

1614
        if domainHasGracePeriod(domain) && tryGracefully {
12✔
1615
                if expired, timeLeft := c.hasGracePeriodExpired(vmi.Spec.TerminationGracePeriodSeconds, domain); !expired {
7✔
1616
                        return c.handleVMIShutdown(vmi, domain, client, timeLeft)
3✔
1617
                }
3✔
1618
                c.logger.Object(vmi).Infof("Grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
1619
        } else {
4✔
1620
                c.logger.Object(vmi).Infof("Graceful shutdown not set, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
4✔
1621
        }
4✔
1622

1623
        err = client.KillVirtualMachine(vmi)
5✔
1624
        if err != nil && !cmdclient.IsDisconnected(err) {
5✔
1625
                // Only report err if it wasn't the result of a disconnect.
×
1626
                //
×
1627
                // Both virt-launcher and virt-handler are trying to destroy
×
1628
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
1629
                // disconnected during the kill request, which shouldn't be
×
1630
                // considered an error.
×
1631
                return err
×
1632
        }
×
1633

1634
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMIStopping)
5✔
1635

5✔
1636
        return nil
5✔
1637
}
1638

1639
func (c *VirtualMachineController) handleVMIShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, client cmdclient.LauncherClient, timeLeft int64) error {
3✔
1640
        if domain.Status.Status != api.Shutdown {
6✔
1641
                return c.shutdownVMI(vmi, client, timeLeft)
3✔
1642
        }
3✔
1643
        c.logger.V(4).Object(vmi).Infof("%s is already shutting down.", vmi.GetObjectMeta().GetName())
×
1644
        return nil
×
1645
}
1646

1647
func (c *VirtualMachineController) shutdownVMI(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient, timeLeft int64) error {
3✔
1648
        err := client.ShutdownVirtualMachine(vmi)
3✔
1649
        if err != nil && !cmdclient.IsDisconnected(err) {
3✔
1650
                // Only report err if it wasn't the result of a disconnect.
×
1651
                //
×
1652
                // Both virt-launcher and virt-handler are trying to destroy
×
1653
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
1654
                // disconnected during the kill request, which shouldn't be
×
1655
                // considered an error.
×
1656
                return err
×
1657
        }
×
1658

1659
        c.logger.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
3✔
1660

3✔
1661
        // Only create a VMIGracefulShutdown event for the first attempt as we can
3✔
1662
        // easily hit the default burst limit of 25 for the
3✔
1663
        // EventSourceObjectSpamFilter when gracefully shutting down VMIs with a
3✔
1664
        // large TerminationGracePeriodSeconds value set. Hitting this limit can
3✔
1665
        // result in the eventual VMIShutdown event being dropped.
3✔
1666
        if timeLeft == firstGracefulShutdownAttempt {
5✔
1667
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), VMIGracefulShutdown)
2✔
1668
        }
2✔
1669

1670
        // Make sure that we don't hot-loop in case we send the first domain notification
1671
        if timeLeft == firstGracefulShutdownAttempt {
5✔
1672
                timeLeft = 5
2✔
1673
                if vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds < timeLeft {
2✔
1674
                        timeLeft = *vmi.Spec.TerminationGracePeriodSeconds
×
1675
                }
×
1676
        }
1677
        // In case we have a long grace period, we want to resend the graceful shutdown every 5 seconds
1678
        // That's important since a booting OS can miss ACPI signals
1679
        if timeLeft > 5 {
4✔
1680
                timeLeft = 5
1✔
1681
        }
1✔
1682

1683
        // pending graceful shutdown.
1684
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(timeLeft)*time.Second)
3✔
1685
        return nil
3✔
1686
}
1687

1688
func (c *VirtualMachineController) processVmDelete(vmi *v1.VirtualMachineInstance) error {
4✔
1689

4✔
1690
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
4✔
1691
        client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
4✔
1692

4✔
1693
        // If the pod has been torn down, we know the VirtualMachineInstance is down.
4✔
1694
        if err == nil {
8✔
1695

4✔
1696
                c.logger.Object(vmi).Infof("Signaled deletion for %s", vmi.GetObjectMeta().GetName())
4✔
1697

4✔
1698
                // pending deletion.
4✔
1699
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMISignalDeletion)
4✔
1700

4✔
1701
                err = client.DeleteDomain(vmi)
4✔
1702
                if err != nil && !cmdclient.IsDisconnected(err) {
4✔
1703
                        // Only report err if it wasn't the result of a disconnect.
×
1704
                        //
×
1705
                        // Both virt-launcher and virt-handler are trying to destroy
×
1706
                        // the VirtualMachineInstance at the same time. It's possible the client may get
×
1707
                        // disconnected during the kill request, which shouldn't be
×
1708
                        // considered an error.
×
1709
                        return err
×
1710
                }
×
1711
        }
1712

1713
        return nil
4✔
1714

1715
}
1716

1717
func (c *VirtualMachineController) isVMIOwnedByNode(vmi *v1.VirtualMachineInstance) bool {
37✔
1718
        nodeName, ok := vmi.Labels[v1.NodeNameLabel]
37✔
1719

37✔
1720
        if ok && nodeName != "" && nodeName == c.host {
73✔
1721
                return true
36✔
1722
        }
36✔
1723

1724
        return vmi.Status.NodeName != "" && vmi.Status.NodeName == c.host
1✔
1725
}
1726

1727
func (c *VirtualMachineController) checkNetworkInterfacesForMigration(vmi *v1.VirtualMachineInstance) error {
95✔
1728
        return netvmispec.VerifyVMIMigratable(vmi, c.clusterConfig.GetNetworkBindings())
95✔
1729
}
95✔
1730

1731
func isReadOnlyDisk(disk *v1.Disk) bool {
8✔
1732
        isReadOnlyCDRom := disk.CDRom != nil && (disk.CDRom.ReadOnly == nil || *disk.CDRom.ReadOnly)
8✔
1733

8✔
1734
        return isReadOnlyCDRom
8✔
1735
}
8✔
1736

1737
func (c *VirtualMachineController) checkVolumesForMigration(vmi *v1.VirtualMachineInstance) (blockMigrate bool, err error) {
67✔
1738
        volumeStatusMap := make(map[string]v1.VolumeStatus)
67✔
1739

67✔
1740
        for _, volumeStatus := range vmi.Status.VolumeStatus {
82✔
1741
                volumeStatusMap[volumeStatus.Name] = volumeStatus
15✔
1742
        }
15✔
1743

1744
        if len(vmi.Status.MigratedVolumes) > 0 {
67✔
1745
                blockMigrate = true
×
1746
        }
×
1747

1748
        filesystems := storagetypes.GetFilesystemsFromVolumes(vmi)
67✔
1749

67✔
1750
        // Check if all VMI volumes can be shared between the source and the destination
67✔
1751
        // of a live migration. blockMigrate will be returned as false, only if all volumes
67✔
1752
        // are shared and the VMI has no local disks
67✔
1753
        // Some combinations of disks makes the VMI no suitable for live migration.
67✔
1754
        // A relevant error will be returned in this case.
67✔
1755
        for _, volume := range vmi.Spec.Volumes {
96✔
1756
                volSrc := volume.VolumeSource
29✔
1757
                if volSrc.PersistentVolumeClaim != nil || volSrc.DataVolume != nil {
41✔
1758
                        var claimName string
12✔
1759
                        if volSrc.PersistentVolumeClaim != nil {
19✔
1760
                                claimName = volSrc.PersistentVolumeClaim.ClaimName
7✔
1761
                        } else {
12✔
1762
                                claimName = volSrc.DataVolume.Name
5✔
1763
                        }
5✔
1764

1765
                        volumeStatus, ok := volumeStatusMap[volume.Name]
12✔
1766

12✔
1767
                        if !ok || volumeStatus.PersistentVolumeClaimInfo == nil {
13✔
1768
                                return true, fmt.Errorf("cannot migrate VMI: Unable to determine if PVC %v is shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
1✔
1769
                        } else if !storagetypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !storagetypes.IsMigratedVolume(volumeStatus.Name, vmi) {
18✔
1770
                                return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
6✔
1771
                        }
6✔
1772

1773
                } else if volSrc.HostDisk != nil {
20✔
1774
                        // Check if this is a translated PVC.
3✔
1775
                        volumeStatus, ok := volumeStatusMap[volume.Name]
3✔
1776
                        if ok && volumeStatus.PersistentVolumeClaimInfo != nil {
3✔
1777
                                if !storagetypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !storagetypes.IsMigratedVolume(volumeStatus.Name, vmi) {
×
1778
                                        return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", volumeStatus.PersistentVolumeClaimInfo.ClaimName)
×
1779
                                } else {
×
1780
                                        continue
×
1781
                                }
1782
                        }
1783

1784
                        shared := volSrc.HostDisk.Shared != nil && *volSrc.HostDisk.Shared
3✔
1785
                        if !shared {
4✔
1786
                                return true, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
1✔
1787
                        }
1✔
1788
                } else {
14✔
1789
                        if _, ok := filesystems[volume.Name]; ok {
18✔
1790
                                c.logger.Object(vmi).Infof("Volume %s is shared with virtiofs, allow live migration", volume.Name)
4✔
1791
                                continue
4✔
1792
                        }
1793

1794
                        isVolumeUsedByReadOnlyDisk := false
10✔
1795
                        for _, disk := range vmi.Spec.Domain.Devices.Disks {
18✔
1796
                                if isReadOnlyDisk(&disk) && disk.Name == volume.Name {
10✔
1797
                                        isVolumeUsedByReadOnlyDisk = true
2✔
1798
                                        break
2✔
1799
                                }
1800
                        }
1801

1802
                        if isVolumeUsedByReadOnlyDisk {
12✔
1803
                                continue
2✔
1804
                        }
1805

1806
                        if vmi.Status.MigrationMethod == "" || vmi.Status.MigrationMethod == v1.LiveMigration {
16✔
1807
                                c.logger.Object(vmi).Infof("migration is block migration because of %s volume", volume.Name)
8✔
1808
                        }
8✔
1809
                        blockMigrate = true
8✔
1810
                }
1811
        }
1812
        return
59✔
1813
}
1814

1815
func isVMIPausedDuringMigration(vmi *v1.VirtualMachineInstance) bool {
×
1816
        return vmi.Status.MigrationState != nil &&
×
1817
                vmi.Status.MigrationState.Mode == v1.MigrationPaused &&
×
1818
                !vmi.Status.MigrationState.Completed
×
1819
}
×
1820

1821
func (c *VirtualMachineController) affinePitThread(vmi *v1.VirtualMachineInstance) error {
×
1822
        res, err := c.podIsolationDetector.Detect(vmi)
×
1823
        if err != nil {
×
1824
                return err
×
1825
        }
×
1826
        var Mask unix.CPUSet
×
1827
        Mask.Zero()
×
1828
        qemuprocess, err := res.GetQEMUProcess()
×
1829
        if err != nil {
×
1830
                return err
×
1831
        }
×
1832
        qemupid := qemuprocess.Pid()
×
1833
        if qemupid == -1 {
×
1834
                return nil
×
1835
        }
×
1836

1837
        pitpid, err := res.KvmPitPid()
×
1838
        if err != nil {
×
1839
                return err
×
1840
        }
×
1841
        if pitpid == -1 {
×
1842
                return nil
×
1843
        }
×
1844
        if vmi.IsRealtimeEnabled() {
×
1845
                param := schedParam{priority: 2}
×
1846
                err = schedSetScheduler(pitpid, schedFIFO, param)
×
1847
                if err != nil {
×
1848
                        return fmt.Errorf("failed to set FIFO scheduling and priority 2 for thread %d: %w", pitpid, err)
×
1849
                }
×
1850
        }
1851
        vcpus, err := getVCPUThreadIDs(qemupid)
×
1852
        if err != nil {
×
1853
                return err
×
1854
        }
×
1855
        vpid, ok := vcpus["0"]
×
1856
        if ok == false {
×
1857
                return nil
×
1858
        }
×
1859
        vcpupid, err := strconv.Atoi(vpid)
×
1860
        if err != nil {
×
1861
                return err
×
1862
        }
×
1863
        err = unix.SchedGetaffinity(vcpupid, &Mask)
×
1864
        if err != nil {
×
1865
                return err
×
1866
        }
×
1867
        return unix.SchedSetaffinity(pitpid, &Mask)
×
1868
}
1869

1870
func (c *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
×
1871
        if err := cgroupManager.CreateChildCgroup("housekeeping", "cpuset"); err != nil {
×
1872
                c.logger.Reason(err).Error("CreateChildCgroup ")
×
1873
                return err
×
1874
        }
×
1875

1876
        key := controller.VirtualMachineInstanceKey(vmi)
×
1877
        domain, domainExists, _, err := c.getDomainFromCache(key)
×
1878
        if err != nil {
×
1879
                return err
×
1880
        }
×
1881
        // bail out if domain does not exist
1882
        if domainExists == false {
×
1883
                return nil
×
1884
        }
×
1885

1886
        if domain.Spec.CPUTune == nil || domain.Spec.CPUTune.EmulatorPin == nil {
×
1887
                return nil
×
1888
        }
×
1889

1890
        hkcpus, err := hardware.ParseCPUSetLine(domain.Spec.CPUTune.EmulatorPin.CPUSet, 100)
×
1891
        if err != nil {
×
1892
                return err
×
1893
        }
×
1894

1895
        c.logger.V(3).Object(vmi).Infof("housekeeping cpu: %v", hkcpus)
×
1896

×
1897
        err = cgroupManager.SetCpuSet("housekeeping", hkcpus)
×
1898
        if err != nil {
×
1899
                return err
×
1900
        }
×
1901

1902
        tids, err := cgroupManager.GetCgroupThreads()
×
1903
        if err != nil {
×
1904
                return err
×
1905
        }
×
1906
        hktids := make([]int, 0, 10)
×
1907

×
1908
        for _, tid := range tids {
×
1909
                proc, err := ps.FindProcess(tid)
×
1910
                if err != nil {
×
1911
                        c.logger.Object(vmi).Errorf("Failure to find process: %s", err.Error())
×
1912
                        return err
×
1913
                }
×
1914
                if proc == nil {
×
1915
                        return fmt.Errorf("failed to find process with tid: %d", tid)
×
1916
                }
×
1917
                comm := proc.Executable()
×
1918
                if strings.Contains(comm, "CPU ") && strings.Contains(comm, "KVM") {
×
1919
                        continue
×
1920
                }
1921
                hktids = append(hktids, tid)
×
1922
        }
1923

1924
        c.logger.V(3).Object(vmi).Infof("hk thread ids: %v", hktids)
×
1925
        for _, tid := range hktids {
×
1926
                err = cgroupManager.AttachTID("cpuset", "housekeeping", tid)
×
1927
                if err != nil {
×
1928
                        c.logger.Object(vmi).Errorf("Error attaching tid %d: %v", tid, err.Error())
×
1929
                        return err
×
1930
                }
×
1931
        }
1932

1933
        return nil
×
1934
}
1935

1936
func (c *VirtualMachineController) vmUpdateHelperDefault(vmi *v1.VirtualMachineInstance, domainExists bool) error {
23✔
1937
        client, err := c.launcherClients.GetLauncherClient(vmi)
23✔
1938
        if err != nil {
23✔
1939
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
1940
        }
×
1941

1942
        preallocatedVolumes := c.getPreallocatedVolumes(vmi)
23✔
1943

23✔
1944
        err = hostdisk.ReplacePVCByHostDisk(vmi)
23✔
1945
        if err != nil {
23✔
1946
                return err
×
1947
        }
×
1948

1949
        cgroupManager, err := getCgroupManager(vmi, c.host)
23✔
1950
        if err != nil {
23✔
1951
                return err
×
1952
        }
×
1953

1954
        var errorTolerantFeaturesError []error
23✔
1955
        readyToProceed, err := c.handleVMIState(vmi, cgroupManager, &errorTolerantFeaturesError)
23✔
1956
        if err != nil {
27✔
1957
                return err
4✔
1958
        }
4✔
1959

1960
        if !readyToProceed {
21✔
1961
                return nil
2✔
1962
        }
2✔
1963

1964
        // Synchronize the VirtualMachineInstance state
1965
        err = c.syncVirtualMachine(client, vmi, preallocatedVolumes)
17✔
1966
        if err != nil {
17✔
1967
                return err
×
1968
        }
×
1969

1970
        // Post-sync housekeeping
1971
        err = c.handleHousekeeping(vmi, cgroupManager, domainExists)
17✔
1972
        if err != nil {
17✔
1973
                return err
×
1974
        }
×
1975

1976
        return errors.NewAggregate(errorTolerantFeaturesError)
17✔
1977
}
1978

1979
// handleVMIState: Decides whether to call handleRunningVMI or handleStartingVMI based on the VMI's state.
1980
func (c *VirtualMachineController) handleVMIState(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) (bool, error) {
23✔
1981
        if vmi.IsRunning() {
38✔
1982
                return true, c.handleRunningVMI(vmi, cgroupManager, errorTolerantFeaturesError)
15✔
1983
        } else if !vmi.IsFinal() {
31✔
1984
                return c.handleStartingVMI(vmi, cgroupManager)
8✔
1985
        }
8✔
1986
        return true, nil
×
1987
}
1988

1989
// handleRunningVMI contains the logic specifically for running VMs (hotplugging in running state, metrics, network updates)
1990
func (c *VirtualMachineController) handleRunningVMI(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) error {
15✔
1991
        if err := c.hotplugSriovInterfaces(vmi); err != nil {
15✔
1992
                c.logger.Object(vmi).Error(err.Error())
×
1993
        }
×
1994

1995
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
16✔
1996
                if !goerror.Is(err, os.ErrNotExist) {
2✔
1997
                        return err
1✔
1998
                }
1✔
1999
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "HotplugFailed", err.Error())
×
2000
        }
2001

2002
        if err := c.getMemoryDump(vmi); err != nil {
14✔
2003
                return err
×
2004
        }
×
2005

2006
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
14✔
2007
        if err != nil {
14✔
2008
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
2009
        }
×
2010

2011
        if err := c.downwardMetricsManager.StartServer(vmi, isolationRes.Pid()); err != nil {
14✔
2012
                return err
×
2013
        }
×
2014

2015
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForLiveUpdate(vmi), c.netConf); err != nil {
14✔
2016
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "NicHotplug", err.Error())
×
2017
                *errorTolerantFeaturesError = append(*errorTolerantFeaturesError, err)
×
2018
        }
×
2019

2020
        return nil
14✔
2021
}
2022

2023
// handleStartingVMI: Contains the logic for starting VMs (container disks, initial network setup, device ownership).
2024
func (c *VirtualMachineController) handleStartingVMI(
2025
        vmi *v1.VirtualMachineInstance,
2026
        cgroupManager cgroup.Manager,
2027
) (bool, error) {
8✔
2028
        // give containerDisks some time to become ready before throwing errors on retries
8✔
2029
        info := c.launcherClients.GetLauncherClientInfo(vmi)
8✔
2030
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
10✔
2031
                if err != nil {
3✔
2032
                        return false, err
1✔
2033
                }
1✔
2034
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
2035
                return false, nil
1✔
2036
        }
2037

2038
        var err error
6✔
2039
        err = c.containerDiskMounter.MountAndVerify(vmi)
6✔
2040
        if err != nil {
7✔
2041
                return false, err
1✔
2042
        }
1✔
2043

2044
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
5✔
2045
                if !goerror.Is(err, os.ErrNotExist) {
×
2046
                        return false, err
×
2047
                }
×
2048
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "HotplugFailed", err.Error())
×
2049
        }
2050

2051
        if !c.hotplugVolumesReady(vmi) {
6✔
2052
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
2053
                return false, nil
1✔
2054
        }
1✔
2055

2056
        if c.clusterConfig.GPUsWithDRAGateEnabled() {
4✔
2057
                if !drautil.IsAllDRAGPUsReconciled(vmi, vmi.Status.DeviceStatus) {
×
2058
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, "WaitingForDRAGPUAttributes",
×
2059
                                "Waiting for Dynamic Resource Allocation GPU attributes to be reconciled")
×
2060
                        return false, nil
×
2061
                }
×
2062
        }
2063

2064
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForVMStartup(vmi), c.netConf); err != nil {
5✔
2065
                return false, fmt.Errorf("failed to configure vmi network: %w", err)
1✔
2066
        }
1✔
2067

2068
        if err := c.setupDevicesOwnerships(vmi, c.recorder); err != nil {
3✔
2069
                return false, err
×
2070
        }
×
2071

2072
        if err := c.adjustResources(vmi); err != nil {
3✔
2073
                return false, err
×
2074
        }
×
2075

2076
        if c.shouldWaitForSEVAttestation(vmi) {
3✔
2077
                return false, nil
×
2078
        }
×
2079

2080
        return true, nil
3✔
2081
}
2082

2083
func (c *VirtualMachineController) adjustResources(vmi *v1.VirtualMachineInstance) error {
3✔
2084
        err := c.podIsolationDetector.AdjustResources(vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio)
3✔
2085
        if err != nil {
3✔
2086
                return fmt.Errorf("failed to adjust resources: %v", err)
×
2087
        }
×
2088
        return nil
3✔
2089
}
2090

2091
func (c *VirtualMachineController) shouldWaitForSEVAttestation(vmi *v1.VirtualMachineInstance) bool {
3✔
2092
        if util.IsSEVAttestationRequested(vmi) {
3✔
2093
                sev := vmi.Spec.Domain.LaunchSecurity.SEV
×
2094
                // Wait for the session parameters to be provided
×
2095
                return sev.Session == "" || sev.DHCert == ""
×
2096
        }
×
2097
        return false
3✔
2098
}
2099

2100
func (c *VirtualMachineController) syncVirtualMachine(client cmdclient.LauncherClient, vmi *v1.VirtualMachineInstance, preallocatedVolumes []string) error {
17✔
2101
        smbios := c.clusterConfig.GetSMBIOS()
17✔
2102
        period := c.clusterConfig.GetMemBalloonStatsPeriod()
17✔
2103

17✔
2104
        options := virtualMachineOptions(smbios, period, preallocatedVolumes, c.capabilities, c.clusterConfig)
17✔
2105
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
17✔
2106

17✔
2107
        err := client.SyncVirtualMachine(vmi, options)
17✔
2108
        if err != nil {
17✔
2109
                if strings.Contains(err.Error(), "EFI OVMF rom missing") {
×
2110
                        return &virtLauncherCriticalSecurebootError{fmt.Sprintf("mismatch of Secure Boot setting and bootloaders: %v", err)}
×
2111
                }
×
2112
        }
2113

2114
        return err
17✔
2115
}
2116

2117
func (c *VirtualMachineController) handleHousekeeping(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, domainExists bool) error {
17✔
2118
        if vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread {
17✔
2119
                err := c.configureHousekeepingCgroup(vmi, cgroupManager)
×
2120
                if err != nil {
×
2121
                        return err
×
2122
                }
×
2123
        }
2124

2125
        // Configure vcpu scheduler for realtime workloads and affine PIT thread for dedicated CPU
2126
        if vmi.IsRealtimeEnabled() && !vmi.IsRunning() && !vmi.IsFinal() {
17✔
2127
                c.logger.Object(vmi).Info("Configuring vcpus for real time workloads")
×
2128
                if err := c.configureVCPUScheduler(vmi); err != nil {
×
2129
                        return err
×
2130
                }
×
2131
        }
2132
        if vmi.IsCPUDedicated() && !vmi.IsRunning() && !vmi.IsFinal() {
17✔
2133
                c.logger.V(3).Object(vmi).Info("Affining PIT thread")
×
2134
                if err := c.affinePitThread(vmi); err != nil {
×
2135
                        return err
×
2136
                }
×
2137
        }
2138
        if !domainExists {
20✔
2139
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Created.String(), VMIDefined)
3✔
2140
        }
3✔
2141

2142
        if vmi.IsRunning() {
31✔
2143
                // Umount any disks no longer mounted
14✔
2144
                if err := c.hotplugVolumeMounter.Unmount(vmi, cgroupManager); err != nil {
14✔
2145
                        return err
×
2146
                }
×
2147
        }
2148
        return nil
17✔
2149
}
2150

2151
func (c *VirtualMachineController) getPreallocatedVolumes(vmi *v1.VirtualMachineInstance) []string {
23✔
2152
        var preallocatedVolumes []string
23✔
2153
        for _, volumeStatus := range vmi.Status.VolumeStatus {
27✔
2154
                if volumeStatus.PersistentVolumeClaimInfo != nil && volumeStatus.PersistentVolumeClaimInfo.Preallocated {
4✔
2155
                        preallocatedVolumes = append(preallocatedVolumes, volumeStatus.Name)
×
2156
                }
×
2157
        }
2158
        return preallocatedVolumes
23✔
2159
}
2160

2161
func (c *VirtualMachineController) hotplugSriovInterfaces(vmi *v1.VirtualMachineInstance) error {
15✔
2162
        sriovSpecInterfaces := netvmispec.FilterSRIOVInterfaces(vmi.Spec.Domain.Devices.Interfaces)
15✔
2163

15✔
2164
        sriovSpecIfacesNames := netvmispec.IndexInterfaceSpecByName(sriovSpecInterfaces)
15✔
2165
        attachedSriovStatusIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
15✔
2166
                _, exist := sriovSpecIfacesNames[iface.Name]
×
2167
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceDomain) &&
×
2168
                        netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
2169
        })
×
2170

2171
        desiredSriovMultusPluggedIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
15✔
2172
                _, exist := sriovSpecIfacesNames[iface.Name]
×
2173
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
2174
        })
×
2175

2176
        if len(desiredSriovMultusPluggedIfaces) == len(attachedSriovStatusIfaces) {
30✔
2177
                c.sriovHotplugExecutorPool.Delete(vmi.UID)
15✔
2178
                return nil
15✔
2179
        }
15✔
2180

2181
        rateLimitedExecutor := c.sriovHotplugExecutorPool.LoadOrStore(vmi.UID)
×
2182
        return rateLimitedExecutor.Exec(func() error {
×
2183
                return c.hotplugSriovInterfacesCommand(vmi)
×
2184
        })
×
2185
}
2186

2187
func (c *VirtualMachineController) hotplugSriovInterfacesCommand(vmi *v1.VirtualMachineInstance) error {
×
2188
        const errMsgPrefix = "failed to hot-plug SR-IOV interfaces"
×
2189

×
2190
        client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
×
2191
        if err != nil {
×
2192
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2193
        }
×
2194

2195
        if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
×
2196
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), err.Error())
×
2197
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2198
        }
×
2199

2200
        c.logger.V(3).Object(vmi).Info("sending hot-plug host-devices command")
×
2201
        if err := client.HotplugHostDevices(vmi); err != nil {
×
2202
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2203
        }
×
2204

2205
        return nil
×
2206
}
2207

2208
func memoryDumpPath(volumeStatus v1.VolumeStatus) string {
×
2209
        target := hotplugdisk.GetVolumeMountDir(volumeStatus.Name)
×
2210
        dumpPath := filepath.Join(target, volumeStatus.MemoryDumpVolume.TargetFileName)
×
2211
        return dumpPath
×
2212
}
×
2213

2214
func (c *VirtualMachineController) getMemoryDump(vmi *v1.VirtualMachineInstance) error {
14✔
2215
        const errMsgPrefix = "failed to getting memory dump"
14✔
2216

14✔
2217
        for _, volumeStatus := range vmi.Status.VolumeStatus {
17✔
2218
                if volumeStatus.MemoryDumpVolume == nil || volumeStatus.Phase != v1.MemoryDumpVolumeInProgress {
6✔
2219
                        continue
3✔
2220
                }
2221
                client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
×
2222
                if err != nil {
×
2223
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2224
                }
×
2225

2226
                c.logger.V(3).Object(vmi).Info("sending memory dump command")
×
2227
                err = client.VirtualMachineMemoryDump(vmi, memoryDumpPath(volumeStatus))
×
2228
                if err != nil {
×
2229
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2230
                }
×
2231
        }
2232

2233
        return nil
14✔
2234
}
2235

2236
func (c *VirtualMachineController) hotplugVolumesReady(vmi *v1.VirtualMachineInstance) bool {
5✔
2237
        hasHotplugVolume := false
5✔
2238
        for _, v := range vmi.Spec.Volumes {
6✔
2239
                if storagetypes.IsHotplugVolume(&v) {
2✔
2240
                        hasHotplugVolume = true
1✔
2241
                        break
1✔
2242
                }
2243
        }
2244
        if len(vmi.Spec.UtilityVolumes) > 0 {
5✔
2245
                hasHotplugVolume = true
×
2246
        }
×
2247
        if !hasHotplugVolume {
9✔
2248
                return true
4✔
2249
        }
4✔
2250
        if len(vmi.Status.VolumeStatus) == 0 {
1✔
2251
                return false
×
2252
        }
×
2253
        for _, vs := range vmi.Status.VolumeStatus {
2✔
2254
                if vs.HotplugVolume != nil && !(vs.Phase == v1.VolumeReady || vs.Phase == v1.HotplugVolumeMounted) {
2✔
2255
                        // wait for volume to be mounted
1✔
2256
                        return false
1✔
2257
                }
1✔
2258
        }
2259
        return true
×
2260
}
2261

2262
func (c *VirtualMachineController) processVmUpdate(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
24✔
2263
        shouldReturn, err := c.checkLauncherClient(vmi)
24✔
2264
        if shouldReturn {
25✔
2265
                return err
1✔
2266
        }
1✔
2267

2268
        return c.vmUpdateHelperDefault(vmi, domain != nil)
23✔
2269
}
2270

2271
func (c *VirtualMachineController) setVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) error {
35✔
2272
        phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
35✔
2273
        if err != nil {
35✔
2274
                return err
×
2275
        }
×
2276
        vmi.Status.Phase = phase
35✔
2277
        return nil
35✔
2278
}
2279

2280
func vmiHasTerminationGracePeriod(vmi *v1.VirtualMachineInstance) bool {
×
2281
        // if not set we use the default graceperiod
×
2282
        return vmi.Spec.TerminationGracePeriodSeconds == nil ||
×
2283
                (vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds != 0)
×
2284
}
×
2285

2286
func domainHasGracePeriod(domain *api.Domain) bool {
8✔
2287
        return domain != nil &&
8✔
2288
                domain.Spec.Metadata.KubeVirt.GracePeriod != nil &&
8✔
2289
                domain.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds != 0
8✔
2290
}
8✔
2291

2292
func isACPIEnabled(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
×
2293
        return (vmiHasTerminationGracePeriod(vmi) || (vmi.Spec.TerminationGracePeriodSeconds == nil && domainHasGracePeriod(domain))) &&
×
2294
                domain != nil &&
×
2295
                domain.Spec.Features != nil &&
×
2296
                domain.Spec.Features.ACPI != nil
×
2297
}
×
2298

2299
func (c *VirtualMachineController) calculateVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) (v1.VirtualMachineInstancePhase, error) {
70✔
2300

70✔
2301
        if domain == nil {
96✔
2302
                switch {
26✔
2303
                case vmi.IsScheduled():
24✔
2304
                        isUnresponsive, isInitialized, err := c.launcherClients.IsLauncherClientUnresponsive(vmi)
24✔
2305

24✔
2306
                        if err != nil {
24✔
2307
                                return vmi.Status.Phase, err
×
2308
                        }
×
2309
                        if !isInitialized {
26✔
2310
                                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
2✔
2311
                                return vmi.Status.Phase, err
2✔
2312
                        } else if isUnresponsive {
28✔
2313
                                // virt-launcher is gone and VirtualMachineInstance never transitioned
4✔
2314
                                // from scheduled to Running.
4✔
2315
                                return v1.Failed, nil
4✔
2316
                        }
4✔
2317
                        return v1.Scheduled, nil
18✔
2318
                case !vmi.IsRunning() && !vmi.IsFinal():
×
2319
                        return v1.Scheduled, nil
×
2320
                case !vmi.IsFinal():
2✔
2321
                        // That is unexpected. We should not be able to delete a VirtualMachineInstance before we stop it.
2✔
2322
                        // However, if someone directly interacts with libvirt it is possible
2✔
2323
                        return v1.Failed, nil
2✔
2324
                }
2325
        } else {
44✔
2326
                switch domain.Status.Status {
44✔
2327
                case api.Shutoff, api.Crashed:
×
2328
                        switch domain.Status.Reason {
×
2329
                        case api.ReasonCrashed, api.ReasonPanicked:
×
2330
                                return v1.Failed, nil
×
2331
                        case api.ReasonDestroyed:
×
2332
                                if isACPIEnabled(vmi, domain) {
×
2333
                                        // When ACPI is available, the domain was tried to be shutdown,
×
2334
                                        // and destroyed means that the domain was destroyed after the graceperiod expired.
×
2335
                                        // Without ACPI a destroyed domain is ok.
×
2336
                                        return v1.Failed, nil
×
2337
                                }
×
2338
                                if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.Failed && vmi.Status.MigrationState.Mode == v1.MigrationPostCopy {
×
2339
                                        // A VMI that failed a post-copy migration should never succeed
×
2340
                                        return v1.Failed, nil
×
2341
                                }
×
2342
                                return v1.Succeeded, nil
×
2343
                        case api.ReasonShutdown, api.ReasonSaved, api.ReasonFromSnapshot:
×
2344
                                return v1.Succeeded, nil
×
2345
                        case api.ReasonMigrated:
×
2346
                                // if the domain migrated, we no longer know the phase.
×
2347
                                return vmi.Status.Phase, nil
×
2348
                        }
2349
                case api.Paused:
4✔
2350
                        switch domain.Status.Reason {
4✔
2351
                        case api.ReasonPausedPostcopyFailed:
2✔
2352
                                return v1.Failed, nil
2✔
2353
                        default:
2✔
2354
                                return v1.Running, nil
2✔
2355
                        }
2356
                case api.Running, api.Blocked, api.PMSuspended:
40✔
2357
                        return v1.Running, nil
40✔
2358
                }
2359
        }
2360
        return vmi.Status.Phase, nil
×
2361
}
2362

2363
func (c *VirtualMachineController) addDeleteFunc(obj interface{}) {
×
2364
        key, err := controller.KeyFunc(obj)
×
2365
        if err == nil {
×
2366
                c.vmiExpectations.SetExpectations(key, 0, 0)
×
2367
                c.queue.Add(key)
×
2368
        }
×
2369
}
2370

2371
func (c *VirtualMachineController) updateFunc(_, new interface{}) {
×
2372
        key, err := controller.KeyFunc(new)
×
2373
        if err == nil {
×
2374
                c.vmiExpectations.SetExpectations(key, 0, 0)
×
2375
                c.queue.Add(key)
×
2376
        }
×
2377
}
2378

2379
func (c *VirtualMachineController) addDomainFunc(obj interface{}) {
×
2380
        key, err := controller.KeyFunc(obj)
×
2381
        if err == nil {
×
2382
                c.queue.Add(key)
×
2383
        }
×
2384
}
2385
func (c *VirtualMachineController) deleteDomainFunc(obj interface{}) {
×
2386
        domain, ok := obj.(*api.Domain)
×
2387
        if !ok {
×
2388
                tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
×
2389
                if !ok {
×
2390
                        c.logger.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
×
2391
                        return
×
2392
                }
×
2393
                domain, ok = tombstone.Obj.(*api.Domain)
×
2394
                if !ok {
×
2395
                        c.logger.Reason(fmt.Errorf("tombstone contained object that is not a domain %#v", obj)).Error("Failed to process delete notification")
×
2396
                        return
×
2397
                }
×
2398
        }
2399
        c.logger.V(3).Object(domain).Info("Domain deleted")
×
2400
        key, err := controller.KeyFunc(obj)
×
2401
        if err == nil {
×
2402
                c.queue.Add(key)
×
2403
        }
×
2404
}
2405
func (c *VirtualMachineController) updateDomainFunc(_, new interface{}) {
×
2406
        key, err := controller.KeyFunc(new)
×
2407
        if err == nil {
×
2408
                c.queue.Add(key)
×
2409
        }
×
2410
}
2411

2412
func (c *VirtualMachineController) isHostModelMigratable(vmi *v1.VirtualMachineInstance) error {
93✔
2413
        if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == v1.CPUModeHostModel {
95✔
2414
                if c.hostCpuModel == "" {
3✔
2415
                        err := fmt.Errorf("the node \"%s\" does not allow migration with host-model", vmi.Status.NodeName)
1✔
2416
                        c.logger.Object(vmi).Errorf("%s", err.Error())
1✔
2417
                        return err
1✔
2418
                }
1✔
2419
        }
2420
        return nil
92✔
2421
}
2422

2423
func isIOError(shouldUpdate, domainExists bool, domain *api.Domain) bool {
35✔
2424
        return shouldUpdate && domainExists && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedIOError
35✔
2425
}
35✔
2426

2427
func (c *VirtualMachineController) updateMachineType(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
2428
        if domain == nil || vmi == nil {
48✔
2429
                return
13✔
2430
        }
13✔
2431
        if domain.Spec.OS.Type.Machine != "" {
23✔
2432
                vmi.Status.Machine = &v1.Machine{Type: domain.Spec.OS.Type.Machine}
1✔
2433
        }
1✔
2434
}
2435

2436
func parseLibvirtQuantity(value int64, unit string) *resource.Quantity {
15✔
2437
        switch unit {
15✔
2438
        case "b", "bytes":
2✔
2439
                return resource.NewQuantity(value, resource.BinarySI)
2✔
2440
        case "KB":
1✔
2441
                return resource.NewQuantity(value*1000, resource.DecimalSI)
1✔
2442
        case "MB":
1✔
2443
                return resource.NewQuantity(value*1000*1000, resource.DecimalSI)
1✔
2444
        case "GB":
1✔
2445
                return resource.NewQuantity(value*1000*1000*1000, resource.DecimalSI)
1✔
2446
        case "TB":
1✔
2447
                return resource.NewQuantity(value*1000*1000*1000*1000, resource.DecimalSI)
1✔
2448
        case "k", "KiB":
3✔
2449
                return resource.NewQuantity(value*1024, resource.BinarySI)
3✔
2450
        case "M", "MiB":
2✔
2451
                return resource.NewQuantity(value*1024*1024, resource.BinarySI)
2✔
2452
        case "G", "GiB":
2✔
2453
                return resource.NewQuantity(value*1024*1024*1024, resource.BinarySI)
2✔
2454
        case "T", "TiB":
2✔
2455
                return resource.NewQuantity(value*1024*1024*1024*1024, resource.BinarySI)
2✔
2456
        }
2457
        return nil
×
2458
}
2459

2460
func (c *VirtualMachineController) updateBackupStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
2461
        if domain == nil ||
35✔
2462
                domain.Spec.Metadata.KubeVirt.Backup == nil ||
35✔
2463
                vmi.Status.ChangedBlockTracking == nil ||
35✔
2464
                vmi.Status.ChangedBlockTracking.BackupStatus == nil {
70✔
2465
                return
35✔
2466
        }
35✔
NEW
2467
        backupMetadata := domain.Spec.Metadata.KubeVirt.Backup
×
NEW
2468
        vmi.Status.ChangedBlockTracking.BackupStatus.Completed = backupMetadata.Completed
×
NEW
2469
        if backupMetadata.StartTimestamp != nil {
×
NEW
2470
                vmi.Status.ChangedBlockTracking.BackupStatus.StartTimestamp = backupMetadata.StartTimestamp
×
NEW
2471
        }
×
NEW
2472
        if backupMetadata.EndTimestamp != nil {
×
NEW
2473
                vmi.Status.ChangedBlockTracking.BackupStatus.EndTimestamp = backupMetadata.EndTimestamp
×
NEW
2474
        }
×
NEW
2475
        if backupMetadata.BackupMsg != "" {
×
NEW
2476
                vmi.Status.ChangedBlockTracking.BackupStatus.BackupMsg = &backupMetadata.BackupMsg
×
NEW
2477
        }
×
2478
        // TODO: Handle backup failure (backupMetadata.Failed) and abort status (backupMetadata.AbortStatus)
2479
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc