• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / d81f3799-2d0d-4f91-b833-b0d10a101bab

16 Nov 2025 03:02PM UTC coverage: 70.444% (+0.05%) from 70.392%
d81f3799-2d0d-4f91-b833-b0d10a101bab

push

prow

web-flow
Merge pull request #15922 from ShellyKa13/utility-volumes

VEP 90: Add new Utility volumes type in VMI spec

384 of 440 new or added lines in 14 files covered. (87.27%)

23 existing lines in 5 files now uncovered.

70115 of 99533 relevant lines covered (70.44%)

434.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.03
/pkg/virt-handler/vm.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright The KubeVirt Authors.
17
 *
18
 */
19

20
package virthandler
21

22
import (
23
        "bytes"
24
        "context"
25
        goerror "errors"
26
        "fmt"
27
        "os"
28
        "path/filepath"
29
        "regexp"
30
        "sort"
31
        "strconv"
32
        "strings"
33
        "time"
34

35
        "github.com/mitchellh/go-ps"
36
        "github.com/opencontainers/runc/libcontainer/cgroups"
37
        "golang.org/x/sys/unix"
38
        "libvirt.org/go/libvirtxml"
39

40
        k8sv1 "k8s.io/api/core/v1"
41
        "k8s.io/apimachinery/pkg/api/equality"
42
        "k8s.io/apimachinery/pkg/api/resource"
43
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
44
        "k8s.io/apimachinery/pkg/util/errors"
45
        "k8s.io/apimachinery/pkg/util/wait"
46
        "k8s.io/client-go/tools/cache"
47
        "k8s.io/client-go/tools/record"
48
        "k8s.io/client-go/util/workqueue"
49

50
        v1 "kubevirt.io/api/core/v1"
51
        "kubevirt.io/client-go/kubecli"
52
        "kubevirt.io/client-go/log"
53

54
        "kubevirt.io/kubevirt/pkg/config"
55
        "kubevirt.io/kubevirt/pkg/controller"
56
        drautil "kubevirt.io/kubevirt/pkg/dra"
57
        "kubevirt.io/kubevirt/pkg/executor"
58
        hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
59
        hotplugdisk "kubevirt.io/kubevirt/pkg/hotplug-disk"
60
        "kubevirt.io/kubevirt/pkg/network/domainspec"
61
        neterrors "kubevirt.io/kubevirt/pkg/network/errors"
62
        netsetup "kubevirt.io/kubevirt/pkg/network/setup"
63
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
64
        "kubevirt.io/kubevirt/pkg/safepath"
65
        "kubevirt.io/kubevirt/pkg/storage/cbt"
66
        "kubevirt.io/kubevirt/pkg/storage/reservation"
67
        storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
68
        "kubevirt.io/kubevirt/pkg/util"
69
        "kubevirt.io/kubevirt/pkg/util/hardware"
70
        "kubevirt.io/kubevirt/pkg/util/migrations"
71
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
72
        "kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
73
        virtcache "kubevirt.io/kubevirt/pkg/virt-handler/cache"
74
        "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
75
        cmdclient "kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
76
        containerdisk "kubevirt.io/kubevirt/pkg/virt-handler/container-disk"
77
        deviceManager "kubevirt.io/kubevirt/pkg/virt-handler/device-manager"
78
        "kubevirt.io/kubevirt/pkg/virt-handler/heartbeat"
79
        hotplugvolume "kubevirt.io/kubevirt/pkg/virt-handler/hotplug-disk"
80
        "kubevirt.io/kubevirt/pkg/virt-handler/isolation"
81
        launcherclients "kubevirt.io/kubevirt/pkg/virt-handler/launcher-clients"
82
        migrationproxy "kubevirt.io/kubevirt/pkg/virt-handler/migration-proxy"
83
        multipathmonitor "kubevirt.io/kubevirt/pkg/virt-handler/multipath-monitor"
84
        "kubevirt.io/kubevirt/pkg/virt-handler/selinux"
85
        "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
86
)
87

88
type netstat interface {
89
        UpdateStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error
90
        Teardown(vmi *v1.VirtualMachineInstance)
91
}
92

93
type downwardMetricsManager interface {
94
        Run(stopCh chan struct{})
95
        StartServer(vmi *v1.VirtualMachineInstance, pid int) error
96
        StopServer(vmi *v1.VirtualMachineInstance)
97
}
98

99
type VirtualMachineController struct {
100
        *BaseController
101
        capabilities             *libvirtxml.Caps
102
        clientset                kubecli.KubevirtClient
103
        containerDiskMounter     containerdisk.Mounter
104
        downwardMetricsManager   downwardMetricsManager
105
        hotplugVolumeMounter     hotplugvolume.VolumeMounter
106
        hostCpuModel             string
107
        ioErrorRetryManager      *FailRetryManager
108
        deviceManagerController  *deviceManager.DeviceController
109
        heartBeat                *heartbeat.HeartBeat
110
        heartBeatInterval        time.Duration
111
        netConf                  netconf
112
        sriovHotplugExecutorPool *executor.RateLimitedExecutorPool
113
        vmiExpectations          *controller.UIDTrackingControllerExpectations
114
        vmiGlobalStore           cache.Store
115
        multipathSocketMonitor   *multipathmonitor.MultipathSocketMonitor
116
}
117

118
var getCgroupManager = func(vmi *v1.VirtualMachineInstance, host string) (cgroup.Manager, error) {
×
119
        return cgroup.NewManagerFromVM(vmi, host)
×
120
}
×
121

122
func NewVirtualMachineController(
123
        recorder record.EventRecorder,
124
        clientset kubecli.KubevirtClient,
125
        host string,
126
        virtPrivateDir string,
127
        kubeletPodsDir string,
128
        launcherClients launcherclients.LauncherClientsManager,
129
        vmiInformer cache.SharedIndexInformer,
130
        vmiGlobalStore cache.Store,
131
        domainInformer cache.SharedInformer,
132
        maxDevices int,
133
        clusterConfig *virtconfig.ClusterConfig,
134
        podIsolationDetector isolation.PodIsolationDetector,
135
        migrationProxy migrationproxy.ProxyManager,
136
        downwardMetricsManager downwardMetricsManager,
137
        capabilities *libvirtxml.Caps,
138
        hostCpuModel string,
139
        netConf netconf,
140
        netStat netstat,
141
) (*VirtualMachineController, error) {
114✔
142

114✔
143
        queue := workqueue.NewTypedRateLimitingQueueWithConfig[string](
114✔
144
                workqueue.DefaultTypedControllerRateLimiter[string](),
114✔
145
                workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-vm"},
114✔
146
        )
114✔
147
        logger := log.Log.With("controller", "vm")
114✔
148

114✔
149
        baseCtrl, err := NewBaseController(
114✔
150
                logger,
114✔
151
                host,
114✔
152
                recorder,
114✔
153
                clientset,
114✔
154
                queue,
114✔
155
                vmiInformer,
114✔
156
                domainInformer,
114✔
157
                clusterConfig,
114✔
158
                podIsolationDetector,
114✔
159
                launcherClients,
114✔
160
                migrationProxy,
114✔
161
                "/proc/%d/root/var/run",
114✔
162
                netStat,
114✔
163
        )
114✔
164
        if err != nil {
114✔
165
                return nil, err
×
166
        }
×
167

168
        containerDiskState := filepath.Join(virtPrivateDir, "container-disk-mount-state")
114✔
169
        if err := os.MkdirAll(containerDiskState, 0700); err != nil {
114✔
170
                return nil, err
×
171
        }
×
172

173
        hotplugState := filepath.Join(virtPrivateDir, "hotplug-volume-mount-state")
114✔
174
        if err := os.MkdirAll(hotplugState, 0700); err != nil {
114✔
175
                return nil, err
×
176
        }
×
177

178
        c := &VirtualMachineController{
114✔
179
                BaseController:           baseCtrl,
114✔
180
                capabilities:             capabilities,
114✔
181
                clientset:                clientset,
114✔
182
                containerDiskMounter:     containerdisk.NewMounter(podIsolationDetector, containerDiskState, clusterConfig),
114✔
183
                downwardMetricsManager:   downwardMetricsManager,
114✔
184
                hotplugVolumeMounter:     hotplugvolume.NewVolumeMounter(hotplugState, kubeletPodsDir, host),
114✔
185
                hostCpuModel:             hostCpuModel,
114✔
186
                ioErrorRetryManager:      NewFailRetryManager("io-error-retry", 10*time.Second, 3*time.Minute, 30*time.Second),
114✔
187
                heartBeatInterval:        1 * time.Minute,
114✔
188
                netConf:                  netConf,
114✔
189
                sriovHotplugExecutorPool: executor.NewRateLimitedExecutorPool(executor.NewExponentialLimitedBackoffCreator()),
114✔
190
                vmiExpectations:          controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
114✔
191
                vmiGlobalStore:           vmiGlobalStore,
114✔
192
                multipathSocketMonitor:   multipathmonitor.NewMultipathSocketMonitor(),
114✔
193
        }
114✔
194

114✔
195
        _, err = vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
114✔
196
                AddFunc:    c.addDeleteFunc,
114✔
197
                DeleteFunc: c.addDeleteFunc,
114✔
198
                UpdateFunc: c.updateFunc,
114✔
199
        })
114✔
200
        if err != nil {
114✔
201
                return nil, err
×
202
        }
×
203

204
        _, err = domainInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
114✔
205
                AddFunc:    c.addDomainFunc,
114✔
206
                DeleteFunc: c.deleteDomainFunc,
114✔
207
                UpdateFunc: c.updateDomainFunc,
114✔
208
        })
114✔
209
        if err != nil {
114✔
210
                return nil, err
×
211
        }
×
212

213
        permissions := "rw"
114✔
214
        if cgroups.IsCgroup2UnifiedMode() {
114✔
215
                // Need 'rwm' permissions otherwise ebpf filtering program attached by runc
×
216
                // will deny probing the device file with 'access' syscall. That in turn
×
217
                // will lead to virtqemud failure on VM startup.
×
218
                // This has been fixed upstream:
×
219
                //   https://github.com/opencontainers/runc/pull/2796
×
220
                // but the workaround is still needed to support previous versions without
×
221
                // the patch.
×
222
                permissions = "rwm"
×
223
        }
×
224

225
        c.deviceManagerController = deviceManager.NewDeviceController(
114✔
226
                c.host,
114✔
227
                maxDevices,
114✔
228
                permissions,
114✔
229
                deviceManager.PermanentHostDevicePlugins(maxDevices, permissions),
114✔
230
                clusterConfig,
114✔
231
                clientset.CoreV1())
114✔
232
        c.heartBeat = heartbeat.NewHeartBeat(clientset.CoreV1(), c.deviceManagerController, clusterConfig, host)
114✔
233

114✔
234
        return c, nil
114✔
235
}
236

237
func (c *VirtualMachineController) Run(threadiness int, stopCh chan struct{}) {
×
238
        defer c.queue.ShutDown()
×
239
        c.logger.Info("Starting virt-handler vms controller.")
×
240

×
241
        go c.deviceManagerController.Run(stopCh)
×
242

×
243
        go c.downwardMetricsManager.Run(stopCh)
×
244

×
245
        cache.WaitForCacheSync(stopCh, c.hasSynced)
×
246

×
247
        // queue keys for previous Domains on the host that no longer exist
×
248
        // in the cache. This ensures we perform local cleanup of deleted VMs.
×
249
        for _, domain := range c.domainStore.List() {
×
250
                d := domain.(*api.Domain)
×
251
                vmiRef := v1.NewVMIReferenceWithUUID(
×
252
                        d.ObjectMeta.Namespace,
×
253
                        d.ObjectMeta.Name,
×
254
                        d.Spec.Metadata.KubeVirt.UID)
×
255

×
256
                key := controller.VirtualMachineInstanceKey(vmiRef)
×
257

×
258
                _, exists, _ := c.vmiStore.GetByKey(key)
×
259
                if !exists {
×
260
                        c.queue.Add(key)
×
261
                }
×
262
        }
263
        c.multipathSocketMonitor.Run()
×
264

×
265
        heartBeatDone := c.heartBeat.Run(c.heartBeatInterval, stopCh)
×
266

×
267
        go c.ioErrorRetryManager.Run(stopCh)
×
268

×
269
        // Start the actual work
×
270
        for i := 0; i < threadiness; i++ {
×
271
                go wait.Until(c.runWorker, time.Second, stopCh)
×
272
        }
×
273

274
        <-heartBeatDone
×
275
        <-stopCh
×
276
        c.multipathSocketMonitor.Close()
×
277
        c.logger.Info("Stopping virt-handler vms controller.")
×
278
}
279

280
func (c *VirtualMachineController) runWorker() {
×
281
        for c.Execute() {
×
282
        }
×
283
}
284

285
func (c *VirtualMachineController) Execute() bool {
49✔
286
        key, quit := c.queue.Get()
49✔
287
        if quit {
49✔
288
                return false
×
289
        }
×
290
        defer c.queue.Done(key)
49✔
291
        if err := c.execute(key); err != nil {
55✔
292
                c.logger.Reason(err).Infof("re-enqueuing VirtualMachineInstance %v", key)
6✔
293
                c.queue.AddRateLimited(key)
6✔
294
        } else {
49✔
295
                c.logger.V(4).Infof("processed VirtualMachineInstance %v", key)
43✔
296
                c.queue.Forget(key)
43✔
297
        }
43✔
298
        return true
49✔
299
}
300

301
func (c *VirtualMachineController) execute(key string) error {
49✔
302
        vmi, vmiExists, err := c.getVMIFromCache(key)
49✔
303
        if err != nil {
50✔
304
                return err
1✔
305
        }
1✔
306

307
        if !vmiExists {
56✔
308
                // the vmiInformer probably has to catch up to the domainInformer
8✔
309
                // which already sees the vmi, so let's fetch it from the global
8✔
310
                // vmi informer to make sure the vmi has actually been deleted
8✔
311
                c.logger.V(4).Infof("fetching vmi for key %v from the global informer", key)
8✔
312
                obj, exists, err := c.vmiGlobalStore.GetByKey(key)
8✔
313
                if err != nil {
8✔
314
                        return err
×
315
                }
×
316
                if exists {
8✔
317
                        vmi = obj.(*v1.VirtualMachineInstance)
×
318
                }
×
319
                vmiExists = exists
8✔
320
        }
321

322
        if !vmiExists {
56✔
323
                c.vmiExpectations.DeleteExpectations(key)
8✔
324
        } else if !c.vmiExpectations.SatisfiedExpectations(key) {
48✔
325
                return nil
×
326
        }
×
327

328
        domain, domainExists, domainCachedUID, err := c.getDomainFromCache(key)
48✔
329
        if err != nil {
48✔
330
                return err
×
331
        }
×
332
        c.logger.Object(vmi).V(4).Infof("domain exists %v", domainExists)
48✔
333

48✔
334
        if !vmiExists && string(domainCachedUID) != "" {
56✔
335
                // it's possible to discover the UID from cache even if the domain
8✔
336
                // doesn't technically exist anymore
8✔
337
                vmi.UID = domainCachedUID
8✔
338
                c.logger.Object(vmi).Infof("Using cached UID for vmi found in domain cache")
8✔
339
        }
8✔
340

341
        // As a last effort, if the UID still can't be determined attempt
342
        // to retrieve it from the ghost record
343
        if string(vmi.UID) == "" {
49✔
344
                uid := virtcache.GhostRecordGlobalStore.LastKnownUID(key)
1✔
345
                if uid != "" {
1✔
346
                        c.logger.Object(vmi).V(3).Infof("ghost record cache provided %s as UID", uid)
×
347
                        vmi.UID = uid
×
348
                }
×
349
        }
350

351
        if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
49✔
352
                oldVMI := v1.NewVMIReferenceFromNameWithNS(vmi.Namespace, vmi.Name)
1✔
353
                oldVMI.UID = domain.Spec.Metadata.KubeVirt.UID
1✔
354
                expired, initialized, err := c.launcherClients.IsLauncherClientUnresponsive(oldVMI)
1✔
355
                if err != nil {
1✔
356
                        return err
×
357
                }
×
358
                // If we found an outdated domain which is also not alive anymore, clean up
359
                if !initialized {
1✔
360
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
361
                        return nil
×
362
                } else if expired {
1✔
363
                        c.logger.Object(oldVMI).Infof("Detected stale vmi %s that still needs cleanup before new vmi %s with identical name/namespace can be processed", oldVMI.UID, vmi.UID)
×
364
                        err = c.processVmCleanup(oldVMI)
×
365
                        if err != nil {
×
366
                                return err
×
367
                        }
×
368
                        // Make sure we re-enqueue the key to ensure this new VMI is processed
369
                        // after the stale domain is removed
370
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*5)
×
371
                }
372

373
                return nil
1✔
374
        }
375

376
        if domainExists &&
47✔
377
                (domainMigrated(domain) || domain.DeletionTimestamp != nil) {
47✔
378
                c.logger.Object(vmi).V(4).Info("detected orphan vmi")
×
379
                return c.deleteVM(vmi)
×
380
        }
×
381

382
        if migrations.IsMigrating(vmi) && (vmi.Status.Phase == v1.Failed) {
47✔
383
                c.logger.V(1).Infof("cleaning up VMI key %v as migration is in progress and the vmi is failed", key)
×
384
                err = c.processVmCleanup(vmi)
×
385
                if err != nil {
×
386
                        return err
×
387
                }
×
388
        }
389

390
        if vmi.DeletionTimestamp == nil && isMigrationInProgress(vmi, domain) {
49✔
391
                c.logger.V(4).Infof("ignoring key %v as migration is in progress", key)
2✔
392
                return nil
2✔
393
        }
2✔
394

395
        if vmiExists && !c.isVMIOwnedByNode(vmi) {
46✔
396
                c.logger.Object(vmi).V(4).Info("ignoring vmi as it is not owned by this node")
1✔
397
                return nil
1✔
398
        }
1✔
399

400
        if vmiExists && vmi.IsMigrationSource() {
44✔
401
                c.logger.Object(vmi).V(4).Info("ignoring vmi as it is a migration source")
×
402
                return nil
×
403
        }
×
404

405
        return c.sync(key,
44✔
406
                vmi.DeepCopy(),
44✔
407
                vmiExists,
44✔
408
                domain,
44✔
409
                domainExists)
44✔
410

411
}
412

413
type vmiIrrecoverableError struct {
414
        msg string
415
}
416

417
func (e *vmiIrrecoverableError) Error() string { return e.msg }
3✔
418

419
func formatIrrecoverableErrorMessage(domain *api.Domain) string {
1✔
420
        msg := "unknown reason"
1✔
421
        if domainPausedFailedPostCopy(domain) {
2✔
422
                msg = "VMI is irrecoverable due to failed post-copy migration"
1✔
423
        }
1✔
424
        return msg
1✔
425
}
426

427
func domainPausedFailedPostCopy(domain *api.Domain) bool {
32✔
428
        return domain != nil && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedPostcopyFailed
32✔
429
}
32✔
430

431
// teardownNetwork performs network cache cleanup for a specific VMI.
432
func (c *VirtualMachineController) teardownNetwork(vmi *v1.VirtualMachineInstance) {
5✔
433
        if string(vmi.UID) == "" {
5✔
434
                return
×
435
        }
×
436
        if err := c.netConf.Teardown(vmi); err != nil {
5✔
437
                c.logger.Reason(err).Errorf("failed to delete VMI Network cache files: %s", err.Error())
×
438
        }
×
439
        c.netStat.Teardown(vmi)
5✔
440
}
441

442
func canUpdateToMounted(currentPhase v1.VolumePhase) bool {
12✔
443
        return currentPhase == v1.VolumeBound || currentPhase == v1.VolumePending || currentPhase == v1.HotplugVolumeAttachedToNode
12✔
444
}
12✔
445

446
func canUpdateToUnmounted(currentPhase v1.VolumePhase) bool {
8✔
447
        return currentPhase == v1.VolumeReady || currentPhase == v1.HotplugVolumeMounted || currentPhase == v1.HotplugVolumeAttachedToNode
8✔
448
}
8✔
449

450
func (c *VirtualMachineController) generateEventsForVolumeStatusChange(vmi *v1.VirtualMachineInstance, newStatusMap map[string]v1.VolumeStatus) {
29✔
451
        newStatusMapCopy := make(map[string]v1.VolumeStatus)
29✔
452
        for k, v := range newStatusMap {
61✔
453
                newStatusMapCopy[k] = v
32✔
454
        }
32✔
455
        for _, oldStatus := range vmi.Status.VolumeStatus {
59✔
456
                newStatus, ok := newStatusMap[oldStatus.Name]
30✔
457
                if !ok {
30✔
458
                        // status got removed
×
459
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, VolumeUnplugged, fmt.Sprintf("Volume %s has been unplugged", oldStatus.Name))
×
460
                        continue
×
461
                }
462
                if newStatus.Phase != oldStatus.Phase {
42✔
463
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, newStatus.Reason, newStatus.Message)
12✔
464
                }
12✔
465
                delete(newStatusMapCopy, newStatus.Name)
30✔
466
        }
467
        // Send events for any new statuses.
468
        for _, v := range newStatusMapCopy {
31✔
469
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v.Reason, v.Message)
2✔
470
        }
2✔
471
}
472

473
func (c *VirtualMachineController) updateHotplugVolumeStatus(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, specVolumeMap map[string]struct{}) (v1.VolumeStatus, bool) {
26✔
474
        needsRefresh := false
26✔
475
        if volumeStatus.Target == "" {
47✔
476
                needsRefresh = true
21✔
477
                mounted, err := c.hotplugVolumeMounter.IsMounted(vmi, volumeStatus.Name, volumeStatus.HotplugVolume.AttachPodUID)
21✔
478
                if err != nil {
21✔
479
                        c.logger.Object(vmi).Errorf("error occurred while checking if volume is mounted: %v", err)
×
480
                }
×
481
                if mounted {
33✔
482
                        if _, ok := specVolumeMap[volumeStatus.Name]; ok && canUpdateToMounted(volumeStatus.Phase) {
15✔
483
                                log.DefaultLogger().Infof("Marking volume %s as mounted in pod, it can now be attached", volumeStatus.Name)
3✔
484
                                // mounted, and still in spec, and in phase we can change, update status to mounted.
3✔
485
                                volumeStatus.Phase = v1.HotplugVolumeMounted
3✔
486
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been mounted in virt-launcher pod", volumeStatus.Name)
3✔
487
                                volumeStatus.Reason = VolumeMountedToPodReason
3✔
488
                        }
3✔
489
                } else {
9✔
490
                        // Not mounted, check if the volume is in the spec, if not update status
9✔
491
                        if _, ok := specVolumeMap[volumeStatus.Name]; !ok && canUpdateToUnmounted(volumeStatus.Phase) {
13✔
492
                                log.DefaultLogger().Infof("Marking volume %s as unmounted from pod, it can now be detached", volumeStatus.Name)
4✔
493
                                // Not mounted.
4✔
494
                                volumeStatus.Phase = v1.HotplugVolumeUnMounted
4✔
495
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been unmounted from virt-launcher pod", volumeStatus.Name)
4✔
496
                                volumeStatus.Reason = VolumeUnMountedFromPodReason
4✔
497
                        }
4✔
498
                }
499
        } else {
5✔
500
                // Successfully attached to VM.
5✔
501
                volumeStatus.Phase = v1.VolumeReady
5✔
502
                volumeStatus.Message = fmt.Sprintf("Successfully attach hotplugged volume %s to VM", volumeStatus.Name)
5✔
503
                volumeStatus.Reason = VolumeReadyReason
5✔
504
        }
5✔
505
        return volumeStatus, needsRefresh
26✔
506
}
507

508
func needToComputeChecksums(vmi *v1.VirtualMachineInstance) bool {
28✔
509
        containerDisks := map[string]*v1.Volume{}
28✔
510
        for _, volume := range vmi.Spec.Volumes {
31✔
511
                if volume.VolumeSource.ContainerDisk != nil {
5✔
512
                        containerDisks[volume.Name] = &volume
2✔
513
                }
2✔
514
        }
515

516
        for i := range vmi.Status.VolumeStatus {
32✔
517
                _, isContainerDisk := containerDisks[vmi.Status.VolumeStatus[i].Name]
4✔
518
                if !isContainerDisk {
7✔
519
                        continue
3✔
520
                }
521

522
                if vmi.Status.VolumeStatus[i].ContainerDiskVolume == nil ||
1✔
523
                        vmi.Status.VolumeStatus[i].ContainerDiskVolume.Checksum == 0 {
2✔
524
                        return true
1✔
525
                }
1✔
526
        }
527

528
        if util.HasKernelBootContainerImage(vmi) {
27✔
529
                if vmi.Status.KernelBootStatus == nil {
×
530
                        return true
×
531
                }
×
532

533
                kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
×
534

×
535
                if kernelBootContainer.KernelPath != "" &&
×
536
                        (vmi.Status.KernelBootStatus.KernelInfo == nil ||
×
537
                                vmi.Status.KernelBootStatus.KernelInfo.Checksum == 0) {
×
538
                        return true
×
539

×
540
                }
×
541

542
                if kernelBootContainer.InitrdPath != "" &&
×
543
                        (vmi.Status.KernelBootStatus.InitrdInfo == nil ||
×
544
                                vmi.Status.KernelBootStatus.InitrdInfo.Checksum == 0) {
×
545
                        return true
×
546

×
547
                }
×
548
        }
549

550
        return false
27✔
551
}
552

553
// updateChecksumInfo is kept for compatibility with older virt-handlers
554
// that validate checksum calculations in vmi.status. This validation was
555
// removed in PR #14021, but we had to keep the checksum calculations for upgrades.
556
// Once we're sure old handlers won't interrupt upgrades, this can be removed.
557
func (c *VirtualMachineController) updateChecksumInfo(vmi *v1.VirtualMachineInstance, syncError error) error {
35✔
558
        // If the imageVolume feature gate is enabled, upgrade support isn't required,
35✔
559
        // and we can skip the checksum calculation. By the time the feature gate is GA,
35✔
560
        // the checksum calculation should be removed.
35✔
561
        if syncError != nil || vmi.DeletionTimestamp != nil || !needToComputeChecksums(vmi) || c.clusterConfig.ImageVolumeEnabled() {
69✔
562
                return nil
34✔
563
        }
34✔
564

565
        diskChecksums, err := c.containerDiskMounter.ComputeChecksums(vmi)
1✔
566
        if goerror.Is(err, containerdisk.ErrDiskContainerGone) {
1✔
567
                c.logger.Errorf("cannot compute checksums as containerdisk/kernelboot containers seem to have been terminated")
×
568
                return nil
×
569
        }
×
570
        if err != nil {
1✔
571
                return err
×
572
        }
×
573

574
        // containerdisks
575
        for i := range vmi.Status.VolumeStatus {
2✔
576
                checksum, exists := diskChecksums.ContainerDiskChecksums[vmi.Status.VolumeStatus[i].Name]
1✔
577
                if !exists {
1✔
578
                        // not a containerdisk
×
579
                        continue
×
580
                }
581

582
                vmi.Status.VolumeStatus[i].ContainerDiskVolume = &v1.ContainerDiskInfo{
1✔
583
                        Checksum: checksum,
1✔
584
                }
1✔
585
        }
586

587
        // kernelboot
588
        if util.HasKernelBootContainerImage(vmi) {
2✔
589
                vmi.Status.KernelBootStatus = &v1.KernelBootStatus{}
1✔
590

1✔
591
                if diskChecksums.KernelBootChecksum.Kernel != nil {
2✔
592
                        vmi.Status.KernelBootStatus.KernelInfo = &v1.KernelInfo{
1✔
593
                                Checksum: *diskChecksums.KernelBootChecksum.Kernel,
1✔
594
                        }
1✔
595
                }
1✔
596

597
                if diskChecksums.KernelBootChecksum.Initrd != nil {
2✔
598
                        vmi.Status.KernelBootStatus.InitrdInfo = &v1.InitrdInfo{
1✔
599
                                Checksum: *diskChecksums.KernelBootChecksum.Initrd,
1✔
600
                        }
1✔
601
                }
1✔
602
        }
603

604
        return nil
1✔
605
}
606

607
func (c *VirtualMachineController) updateVolumeStatusesFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
60✔
608
        // The return value is only used by unit tests
60✔
609
        hasHotplug := false
60✔
610

60✔
611
        if len(vmi.Status.VolumeStatus) == 0 {
92✔
612
                return false
32✔
613
        }
32✔
614

615
        diskDeviceMap := make(map[string]string)
28✔
616
        if domain != nil {
55✔
617
                for _, disk := range domain.Spec.Devices.Disks {
35✔
618
                        // don't care about empty cdroms
8✔
619
                        if disk.Source.File != "" || disk.Source.Dev != "" {
14✔
620
                                diskDeviceMap[disk.Alias.GetName()] = disk.Target.Device
6✔
621
                        }
6✔
622
                }
623
        }
624
        specVolumeMap := make(map[string]struct{})
28✔
625
        for _, volume := range vmi.Spec.Volumes {
44✔
626
                specVolumeMap[volume.Name] = struct{}{}
16✔
627
        }
16✔
628
        for _, utilityVolume := range vmi.Spec.UtilityVolumes {
28✔
NEW
629
                specVolumeMap[utilityVolume.Name] = struct{}{}
×
UNCOV
630
        }
×
631
        newStatusMap := make(map[string]v1.VolumeStatus)
28✔
632
        var newStatuses []v1.VolumeStatus
28✔
633
        needsRefresh := false
28✔
634
        for _, volumeStatus := range vmi.Status.VolumeStatus {
57✔
635
                tmpNeedsRefresh := false
29✔
636
                // relying on the fact that target will be "" if not in the map
29✔
637
                // see updateHotplugVolumeStatus
29✔
638
                volumeStatus.Target = diskDeviceMap[volumeStatus.Name]
29✔
639
                if volumeStatus.HotplugVolume != nil {
55✔
640
                        hasHotplug = true
26✔
641
                        volumeStatus, tmpNeedsRefresh = c.updateHotplugVolumeStatus(vmi, volumeStatus, specVolumeMap)
26✔
642
                        needsRefresh = needsRefresh || tmpNeedsRefresh
26✔
643
                }
26✔
644
                if volumeStatus.MemoryDumpVolume != nil {
35✔
645
                        volumeStatus, tmpNeedsRefresh = c.updateMemoryDumpInfo(vmi, volumeStatus, domain)
6✔
646
                        needsRefresh = needsRefresh || tmpNeedsRefresh
6✔
647
                }
6✔
648
                newStatuses = append(newStatuses, volumeStatus)
29✔
649
                newStatusMap[volumeStatus.Name] = volumeStatus
29✔
650
        }
651
        sort.SliceStable(newStatuses, func(i, j int) bool {
29✔
652
                return strings.Compare(newStatuses[i].Name, newStatuses[j].Name) == -1
1✔
653
        })
1✔
654
        if needsRefresh {
49✔
655
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
21✔
656
        }
21✔
657
        c.generateEventsForVolumeStatusChange(vmi, newStatusMap)
28✔
658
        vmi.Status.VolumeStatus = newStatuses
28✔
659

28✔
660
        return hasHotplug
28✔
661
}
662

663
func (c *VirtualMachineController) updateGuestInfoFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
664

35✔
665
        if domain == nil || domain.Status.OSInfo.Name == "" || vmi.Status.GuestOSInfo.Name == domain.Status.OSInfo.Name {
69✔
666
                return
34✔
667
        }
34✔
668

669
        vmi.Status.GuestOSInfo.Name = domain.Status.OSInfo.Name
1✔
670
        vmi.Status.GuestOSInfo.Version = domain.Status.OSInfo.Version
1✔
671
        vmi.Status.GuestOSInfo.KernelRelease = domain.Status.OSInfo.KernelRelease
1✔
672
        vmi.Status.GuestOSInfo.PrettyName = domain.Status.OSInfo.PrettyName
1✔
673
        vmi.Status.GuestOSInfo.VersionID = domain.Status.OSInfo.VersionId
1✔
674
        vmi.Status.GuestOSInfo.KernelVersion = domain.Status.OSInfo.KernelVersion
1✔
675
        vmi.Status.GuestOSInfo.Machine = domain.Status.OSInfo.Machine
1✔
676
        vmi.Status.GuestOSInfo.ID = domain.Status.OSInfo.Id
1✔
677
}
678

679
func (c *VirtualMachineController) updateAccessCredentialConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
35✔
680

35✔
681
        if domain == nil || domain.Spec.Metadata.KubeVirt.AccessCredential == nil {
67✔
682
                return
32✔
683
        }
32✔
684

685
        message := domain.Spec.Metadata.KubeVirt.AccessCredential.Message
3✔
686
        status := k8sv1.ConditionFalse
3✔
687
        if domain.Spec.Metadata.KubeVirt.AccessCredential.Succeeded {
5✔
688
                status = k8sv1.ConditionTrue
2✔
689
        }
2✔
690

691
        add := false
3✔
692
        condition := condManager.GetCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
3✔
693
        if condition == nil {
4✔
694
                add = true
1✔
695
        } else if condition.Status != status || condition.Message != message {
4✔
696
                // if not as expected, remove, then add.
1✔
697
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
698
                add = true
1✔
699
        }
1✔
700
        if add {
5✔
701
                newCondition := v1.VirtualMachineInstanceCondition{
2✔
702
                        Type:               v1.VirtualMachineInstanceAccessCredentialsSynchronized,
2✔
703
                        LastTransitionTime: metav1.Now(),
2✔
704
                        Status:             status,
2✔
705
                        Message:            message,
2✔
706
                }
2✔
707
                vmi.Status.Conditions = append(vmi.Status.Conditions, newCondition)
2✔
708
                if status == k8sv1.ConditionTrue {
3✔
709
                        eventMessage := "Access credentials sync successful."
1✔
710
                        if message != "" {
1✔
711
                                eventMessage = fmt.Sprintf("Access credentials sync successful: %s", message)
×
712
                        }
×
713
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.AccessCredentialsSyncSuccess.String(), eventMessage)
1✔
714
                } else {
1✔
715
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.AccessCredentialsSyncFailed.String(),
1✔
716
                                fmt.Sprintf("Access credentials sync failed: %s", message),
1✔
717
                        )
1✔
718
                }
1✔
719
        }
720
}
721

722
func (c *VirtualMachineController) updateLiveMigrationConditions(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager) {
36✔
723
        // Calculate whether the VM is migratable
36✔
724
        liveMigrationCondition, isBlockMigration := c.calculateLiveMigrationCondition(vmi)
36✔
725
        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceIsMigratable) {
63✔
726
                vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
27✔
727
        } else {
36✔
728
                cond := condManager.GetCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
9✔
729
                if !equality.Semantic.DeepEqual(cond, liveMigrationCondition) {
10✔
730
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
1✔
731
                        vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
1✔
732
                }
1✔
733
        }
734
        // Set VMI Migration Method
735
        if isBlockMigration {
41✔
736
                vmi.Status.MigrationMethod = v1.BlockMigration
5✔
737
        } else {
36✔
738
                vmi.Status.MigrationMethod = v1.LiveMigration
31✔
739
        }
31✔
740
        storageLiveMigCond := c.calculateLiveStorageMigrationCondition(vmi)
36✔
741
        condManager.UpdateCondition(vmi, storageLiveMigCond)
36✔
742
        evictable := migrations.VMIMigratableOnEviction(c.clusterConfig, vmi)
36✔
743
        if evictable && liveMigrationCondition.Status == k8sv1.ConditionFalse {
37✔
744
                c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), "EvictionStrategy is set but vmi is not migratable; %s", liveMigrationCondition.Message)
1✔
745
        }
1✔
746
}
747

748
func (c *VirtualMachineController) updateGuestAgentConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
35✔
749

35✔
750
        // Update the condition when GA is connected
35✔
751
        channelConnected := false
35✔
752
        if domain != nil {
57✔
753
                for _, channel := range domain.Spec.Devices.Channels {
25✔
754
                        if channel.Target != nil {
6✔
755
                                c.logger.V(4).Infof("Channel: %s, %s", channel.Target.Name, channel.Target.State)
3✔
756
                                if channel.Target.Name == "org.qemu.guest_agent.0" {
6✔
757
                                        if channel.Target.State == "connected" {
5✔
758
                                                channelConnected = true
2✔
759
                                        }
2✔
760
                                }
761

762
                        }
763
                }
764
        }
765

766
        switch {
35✔
767
        case channelConnected && !condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected):
1✔
768
                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
769
                        Type:          v1.VirtualMachineInstanceAgentConnected,
1✔
770
                        LastProbeTime: metav1.Now(),
1✔
771
                        Status:        k8sv1.ConditionTrue,
1✔
772
                }
1✔
773
                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
774
        case !channelConnected:
33✔
775
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAgentConnected)
33✔
776
        }
777

778
        if condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected) {
37✔
779
                client, err := c.launcherClients.GetLauncherClient(vmi)
2✔
780
                if err != nil {
2✔
781
                        return err
×
782
                }
×
783

784
                guestInfo, err := client.GetGuestInfo()
2✔
785
                if err != nil {
2✔
786
                        return err
×
787
                }
×
788

789
                var supported = false
2✔
790
                var reason = ""
2✔
791

2✔
792
                // For current versions, virt-launcher's supported commands will always contain data.
2✔
793
                // For backwards compatibility: during upgrade from a previous version of KubeVirt,
2✔
794
                // virt-launcher might not provide any supported commands. If the list of supported
2✔
795
                // commands is empty, fall back to previous behavior.
2✔
796
                if len(guestInfo.SupportedCommands) > 0 {
2✔
797
                        supported, reason = isGuestAgentSupported(vmi, guestInfo.SupportedCommands)
×
798
                        c.logger.V(3).Object(vmi).Info(reason)
×
799
                } else {
2✔
800
                        for _, version := range c.clusterConfig.GetSupportedAgentVersions() {
10✔
801
                                supported = supported || regexp.MustCompile(version).MatchString(guestInfo.GAVersion)
8✔
802
                        }
8✔
803
                        if !supported {
4✔
804
                                reason = fmt.Sprintf("Guest agent version '%s' is not supported", guestInfo.GAVersion)
2✔
805
                        }
2✔
806
                }
807

808
                if !supported {
4✔
809
                        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent) {
3✔
810
                                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
811
                                        Type:          v1.VirtualMachineInstanceUnsupportedAgent,
1✔
812
                                        LastProbeTime: metav1.Now(),
1✔
813
                                        Status:        k8sv1.ConditionTrue,
1✔
814
                                        Reason:        reason,
1✔
815
                                }
1✔
816
                                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
817
                        }
1✔
818
                } else {
×
819
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent)
×
820
                }
×
821

822
        }
823
        return nil
35✔
824
}
825

826
func (c *VirtualMachineController) updatePausedConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
35✔
827

35✔
828
        // Update paused condition in case VMI was paused / unpaused
35✔
829
        if domain != nil && domain.Status.Status == api.Paused {
37✔
830
                if !condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
4✔
831
                        c.calculatePausedCondition(vmi, domain.Status.Reason)
2✔
832
                }
2✔
833
        } else if condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
34✔
834
                c.logger.Object(vmi).V(3).Info("Removing paused condition")
1✔
835
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstancePaused)
1✔
836
        }
1✔
837
}
838

839
func dumpTargetFile(vmiName, volName string) string {
7✔
840
        targetFileName := fmt.Sprintf("%s-%s-%s.memory.dump", vmiName, volName, time.Now().Format("20060102-150405"))
7✔
841
        return targetFileName
7✔
842
}
7✔
843

844
func (c *VirtualMachineController) updateMemoryDumpInfo(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, domain *api.Domain) (v1.VolumeStatus, bool) {
6✔
845
        needsRefresh := false
6✔
846
        switch volumeStatus.Phase {
6✔
847
        case v1.HotplugVolumeMounted:
1✔
848
                needsRefresh = true
1✔
849
                c.logger.Object(vmi).V(3).Infof("Memory dump volume %s attached, marking it in progress", volumeStatus.Name)
1✔
850
                volumeStatus.Phase = v1.MemoryDumpVolumeInProgress
1✔
851
                volumeStatus.Message = fmt.Sprintf("Memory dump Volume %s is attached, getting memory dump", volumeStatus.Name)
1✔
852
                volumeStatus.Reason = VolumeMountedToPodReason
1✔
853
                volumeStatus.MemoryDumpVolume.TargetFileName = dumpTargetFile(vmi.Name, volumeStatus.Name)
1✔
854
        case v1.MemoryDumpVolumeInProgress:
3✔
855
                var memoryDumpMetadata *api.MemoryDumpMetadata
3✔
856
                if domain != nil {
6✔
857
                        memoryDumpMetadata = domain.Spec.Metadata.KubeVirt.MemoryDump
3✔
858
                }
3✔
859
                if memoryDumpMetadata == nil || memoryDumpMetadata.FileName != volumeStatus.MemoryDumpVolume.TargetFileName {
4✔
860
                        // memory dump wasnt triggered yet
1✔
861
                        return volumeStatus, needsRefresh
1✔
862
                }
1✔
863
                needsRefresh = true
2✔
864
                if memoryDumpMetadata.StartTimestamp != nil {
4✔
865
                        volumeStatus.MemoryDumpVolume.StartTimestamp = memoryDumpMetadata.StartTimestamp
2✔
866
                }
2✔
867
                if memoryDumpMetadata.EndTimestamp != nil && memoryDumpMetadata.Failed {
3✔
868
                        c.logger.Object(vmi).Errorf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
869
                        volumeStatus.Message = fmt.Sprintf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
870
                        volumeStatus.Phase = v1.MemoryDumpVolumeFailed
1✔
871
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
872
                } else if memoryDumpMetadata.Completed {
3✔
873
                        c.logger.Object(vmi).V(3).Infof("Marking memory dump to volume %s has completed", volumeStatus.Name)
1✔
874
                        volumeStatus.Phase = v1.MemoryDumpVolumeCompleted
1✔
875
                        volumeStatus.Message = fmt.Sprintf("Memory dump to Volume %s has completed successfully", volumeStatus.Name)
1✔
876
                        volumeStatus.Reason = VolumeReadyReason
1✔
877
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
878
                }
1✔
879
        }
880

881
        return volumeStatus, needsRefresh
5✔
882
}
883

884
func (c *VirtualMachineController) updateFSFreezeStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
885

35✔
886
        if domain == nil || domain.Status.FSFreezeStatus.Status == "" {
68✔
887
                return
33✔
888
        }
33✔
889

890
        if domain.Status.FSFreezeStatus.Status == api.FSThawed {
3✔
891
                vmi.Status.FSFreezeStatus = ""
1✔
892
        } else {
2✔
893
                vmi.Status.FSFreezeStatus = domain.Status.FSFreezeStatus.Status
1✔
894
        }
1✔
895

896
}
897

898
func IsoGuestVolumePath(namespace, name string, volume *v1.Volume) string {
×
899
        const basepath = "/var/run"
×
900
        switch {
×
901
        case volume.CloudInitNoCloud != nil:
×
902
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "noCloud.iso")
×
903
        case volume.CloudInitConfigDrive != nil:
×
904
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "configdrive.iso")
×
905
        case volume.ConfigMap != nil:
×
906
                return config.GetConfigMapDiskPath(volume.Name)
×
907
        case volume.DownwardAPI != nil:
×
908
                return config.GetDownwardAPIDiskPath(volume.Name)
×
909
        case volume.Secret != nil:
×
910
                return config.GetSecretDiskPath(volume.Name)
×
911
        case volume.ServiceAccount != nil:
×
912
                return config.GetServiceAccountDiskPath()
×
913
        case volume.Sysprep != nil:
×
914
                return config.GetSysprepDiskPath(volume.Name)
×
915
        default:
×
916
                return ""
×
917
        }
918
}
919

920
func (c *VirtualMachineController) updateIsoSizeStatus(vmi *v1.VirtualMachineInstance) {
35✔
921
        var podUID string
35✔
922
        if vmi.Status.Phase != v1.Running {
52✔
923
                return
17✔
924
        }
17✔
925

926
        for k, v := range vmi.Status.ActivePods {
29✔
927
                if v == vmi.Status.NodeName {
11✔
928
                        podUID = string(k)
×
929
                        break
×
930
                }
931
        }
932
        if podUID == "" {
36✔
933
                log.DefaultLogger().Warningf("failed to find pod UID for VMI %s", vmi.Name)
18✔
934
                return
18✔
935
        }
18✔
936

937
        volumes := make(map[string]v1.Volume)
×
938
        for _, volume := range vmi.Spec.Volumes {
×
939
                volumes[volume.Name] = volume
×
940
        }
×
941

942
        for _, disk := range vmi.Spec.Domain.Devices.Disks {
×
943
                volume, ok := volumes[disk.Name]
×
944
                if !ok {
×
945
                        log.DefaultLogger().Warningf("No matching volume with name %s found", disk.Name)
×
946
                        continue
×
947
                }
948

949
                volPath := IsoGuestVolumePath(vmi.Namespace, vmi.Name, &volume)
×
950
                if volPath == "" {
×
951
                        continue
×
952
                }
953

954
                res, err := c.podIsolationDetector.Detect(vmi)
×
955
                if err != nil {
×
956
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
957
                        continue
×
958
                }
959

960
                rootPath, err := res.MountRoot()
×
961
                if err != nil {
×
962
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
963
                        continue
×
964
                }
965

966
                safeVolPath, err := rootPath.AppendAndResolveWithRelativeRoot(volPath)
×
967
                if err != nil {
×
968
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
969
                        continue
×
970
                }
971
                fileInfo, err := safepath.StatAtNoFollow(safeVolPath)
×
972
                if err != nil {
×
973
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
974
                        continue
×
975
                }
976

977
                for i := range vmi.Status.VolumeStatus {
×
978
                        if vmi.Status.VolumeStatus[i].Name == volume.Name {
×
979
                                vmi.Status.VolumeStatus[i].Size = fileInfo.Size()
×
980
                                continue
×
981
                        }
982
                }
983
        }
984
}
985

986
func (c *VirtualMachineController) updateSELinuxContext(vmi *v1.VirtualMachineInstance) error {
35✔
987
        _, present, err := selinux.NewSELinux()
35✔
988
        if err != nil {
70✔
989
                return err
35✔
990
        }
35✔
991
        if present {
×
992
                context, err := selinux.GetVirtLauncherContext(vmi)
×
993
                if err != nil {
×
994
                        return err
×
995
                }
×
996
                vmi.Status.SelinuxContext = context
×
997
        } else {
×
998
                vmi.Status.SelinuxContext = "none"
×
999
        }
×
1000

1001
        return nil
×
1002
}
1003

1004
func (c *VirtualMachineController) updateMemoryInfo(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
35✔
1005
        if domain == nil || vmi == nil || domain.Spec.CurrentMemory == nil {
69✔
1006
                return nil
34✔
1007
        }
34✔
1008
        if vmi.Status.Memory == nil {
1✔
1009
                vmi.Status.Memory = &v1.MemoryStatus{}
×
1010
        }
×
1011
        currentGuest := parseLibvirtQuantity(int64(domain.Spec.CurrentMemory.Value), domain.Spec.CurrentMemory.Unit)
1✔
1012
        vmi.Status.Memory.GuestCurrent = currentGuest
1✔
1013
        return nil
1✔
1014
}
1015

1016
func (c *VirtualMachineController) updateVMIStatusFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
35✔
1017
        c.updateIsoSizeStatus(vmi)
35✔
1018
        err := c.updateSELinuxContext(vmi)
35✔
1019
        if err != nil {
70✔
1020
                c.logger.Reason(err).Errorf("couldn't find the SELinux context for %s", vmi.Name)
35✔
1021
        }
35✔
1022
        c.updateGuestInfoFromDomain(vmi, domain)
35✔
1023
        c.updateVolumeStatusesFromDomain(vmi, domain)
35✔
1024
        c.updateFSFreezeStatus(vmi, domain)
35✔
1025
        c.updateMachineType(vmi, domain)
35✔
1026
        if err = c.updateMemoryInfo(vmi, domain); err != nil {
35✔
1027
                return err
×
1028
        }
×
1029
        cbt.SetChangedBlockTrackingOnVMIFromDomain(vmi, domain)
35✔
1030
        err = c.netStat.UpdateStatus(vmi, domain)
35✔
1031
        return err
35✔
1032
}
1033

1034
func (c *VirtualMachineController) updateVMIConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
35✔
1035
        c.updateAccessCredentialConditions(vmi, domain, condManager)
35✔
1036
        c.updateLiveMigrationConditions(vmi, condManager)
35✔
1037
        err := c.updateGuestAgentConditions(vmi, domain, condManager)
35✔
1038
        if err != nil {
35✔
1039
                return err
×
1040
        }
×
1041
        c.updatePausedConditions(vmi, domain, condManager)
35✔
1042

35✔
1043
        return nil
35✔
1044
}
1045

1046
func (c *VirtualMachineController) updateVMIStatus(oldStatus *v1.VirtualMachineInstanceStatus, vmi *v1.VirtualMachineInstance, domain *api.Domain, syncError error) (err error) {
36✔
1047
        condManager := controller.NewVirtualMachineInstanceConditionManager()
36✔
1048

36✔
1049
        // Don't update the VirtualMachineInstance if it is already in a final state
36✔
1050
        if vmi.IsFinal() {
37✔
1051
                return nil
1✔
1052
        }
1✔
1053

1054
        // Update VMI status fields based on what is reported on the domain
1055
        err = c.updateVMIStatusFromDomain(vmi, domain)
35✔
1056
        if err != nil {
35✔
1057
                return err
×
1058
        }
×
1059

1060
        // Calculate the new VirtualMachineInstance state based on what libvirt reported
1061
        err = c.setVmPhaseForStatusReason(domain, vmi)
35✔
1062
        if err != nil {
35✔
1063
                return err
×
1064
        }
×
1065

1066
        // Update conditions on VMI Status
1067
        err = c.updateVMIConditions(vmi, domain, condManager)
35✔
1068
        if err != nil {
35✔
1069
                return err
×
1070
        }
×
1071

1072
        // Store containerdisks and kernelboot checksums
1073
        if err := c.updateChecksumInfo(vmi, syncError); err != nil {
35✔
1074
                return err
×
1075
        }
×
1076

1077
        // Handle sync error
1078
        c.handleSyncError(vmi, condManager, syncError)
35✔
1079

35✔
1080
        controller.SetVMIPhaseTransitionTimestamp(oldStatus, &vmi.Status)
35✔
1081

35✔
1082
        // Only issue vmi update if status has changed
35✔
1083
        if !equality.Semantic.DeepEqual(*oldStatus, vmi.Status) {
69✔
1084
                key := controller.VirtualMachineInstanceKey(vmi)
34✔
1085
                c.vmiExpectations.SetExpectations(key, 1, 0)
34✔
1086
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
34✔
1087
                if err != nil {
34✔
1088
                        c.vmiExpectations.SetExpectations(key, 0, 0)
×
1089
                        return err
×
1090
                }
×
1091
        }
1092

1093
        // Record an event on the VMI when the VMI's phase changes
1094
        if oldStatus.Phase != vmi.Status.Phase {
45✔
1095
                c.recordPhaseChangeEvent(vmi)
10✔
1096
        }
10✔
1097

1098
        return nil
35✔
1099
}
1100

1101
type virtLauncherCriticalSecurebootError struct {
1102
        msg string
1103
}
1104

1105
func (e *virtLauncherCriticalSecurebootError) Error() string { return e.msg }
×
1106

1107
func (c *VirtualMachineController) handleSyncError(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager, syncError error) {
35✔
1108
        var criticalNetErr *neterrors.CriticalNetworkError
35✔
1109
        if goerror.As(syncError, &criticalNetErr) {
36✔
1110
                c.logger.Errorf("virt-launcher crashed due to a network error. Updating VMI %s status to Failed", vmi.Name)
1✔
1111
                vmi.Status.Phase = v1.Failed
1✔
1112
        }
1✔
1113
        if _, ok := syncError.(*virtLauncherCriticalSecurebootError); ok {
35✔
1114
                c.logger.Errorf("virt-launcher does not support the Secure Boot setting. Updating VMI %s status to Failed", vmi.Name)
×
1115
                vmi.Status.Phase = v1.Failed
×
1116
        }
×
1117

1118
        if _, ok := syncError.(*vmiIrrecoverableError); ok {
36✔
1119
                c.logger.Errorf("virt-launcher reached an irrecoverable error. Updating VMI %s status to Failed", vmi.Name)
1✔
1120
                vmi.Status.Phase = v1.Failed
1✔
1121
        }
1✔
1122
        condManager.CheckFailure(vmi, syncError, "Synchronizing with the Domain failed.")
35✔
1123
}
1124

1125
func (c *VirtualMachineController) recordPhaseChangeEvent(vmi *v1.VirtualMachineInstance) {
10✔
1126
        switch vmi.Status.Phase {
10✔
1127
        case v1.Running:
5✔
1128
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Started.String(), VMIStarted)
5✔
1129
        case v1.Succeeded:
×
1130
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Stopped.String(), VMIShutdown)
×
1131
        case v1.Failed:
5✔
1132
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Stopped.String(), VMICrashed)
5✔
1133
        }
1134
}
1135

1136
func (c *VirtualMachineController) calculatePausedCondition(vmi *v1.VirtualMachineInstance, reason api.StateChangeReason) {
2✔
1137
        now := metav1.NewTime(time.Now())
2✔
1138
        switch reason {
2✔
1139
        case api.ReasonPausedMigration:
×
1140
                if !isVMIPausedDuringMigration(vmi) || !c.isMigrationSource(vmi) {
×
1141
                        c.logger.Object(vmi).V(3).Infof("Domain is paused after migration by qemu, no condition needed")
×
1142
                        return
×
1143
                }
×
1144
                c.logger.Object(vmi).V(3).Info("Adding paused by migration monitor condition")
×
1145
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1146
                        Type:               v1.VirtualMachineInstancePaused,
×
1147
                        Status:             k8sv1.ConditionTrue,
×
1148
                        LastProbeTime:      now,
×
1149
                        LastTransitionTime: now,
×
1150
                        Reason:             "PausedByMigrationMonitor",
×
1151
                        Message:            "VMI was paused by the migration monitor",
×
1152
                })
×
1153
        case api.ReasonPausedUser:
1✔
1154
                c.logger.Object(vmi).V(3).Info("Adding paused condition")
1✔
1155
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
1✔
1156
                        Type:               v1.VirtualMachineInstancePaused,
1✔
1157
                        Status:             k8sv1.ConditionTrue,
1✔
1158
                        LastProbeTime:      now,
1✔
1159
                        LastTransitionTime: now,
1✔
1160
                        Reason:             "PausedByUser",
1✔
1161
                        Message:            "VMI was paused by user",
1✔
1162
                })
1✔
1163
        case api.ReasonPausedIOError:
×
1164
                c.logger.Object(vmi).V(3).Info("Adding paused condition")
×
1165
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1166
                        Type:               v1.VirtualMachineInstancePaused,
×
1167
                        Status:             k8sv1.ConditionTrue,
×
1168
                        LastProbeTime:      now,
×
1169
                        LastTransitionTime: now,
×
1170
                        Reason:             "PausedIOError",
×
1171
                        Message:            "VMI was paused, low-level IO error detected",
×
1172
                })
×
1173
        default:
1✔
1174
                c.logger.Object(vmi).V(3).Infof("Domain is paused for unknown reason, %s", reason)
1✔
1175
        }
1176
}
1177

1178
func newNonMigratableCondition(msg string, reason string) *v1.VirtualMachineInstanceCondition {
14✔
1179
        return &v1.VirtualMachineInstanceCondition{
14✔
1180
                Type:    v1.VirtualMachineInstanceIsMigratable,
14✔
1181
                Status:  k8sv1.ConditionFalse,
14✔
1182
                Message: msg,
14✔
1183
                Reason:  reason,
14✔
1184
        }
14✔
1185
}
14✔
1186

1187
func (c *VirtualMachineController) calculateLiveMigrationCondition(vmi *v1.VirtualMachineInstance) (*v1.VirtualMachineInstanceCondition, bool) {
56✔
1188
        isBlockMigration, blockErr := c.checkVolumesForMigration(vmi)
56✔
1189

56✔
1190
        err := c.checkNetworkInterfacesForMigration(vmi)
56✔
1191
        if err != nil {
57✔
1192
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonInterfaceNotMigratable), isBlockMigration
1✔
1193
        }
1✔
1194

1195
        if err := c.isHostModelMigratable(vmi); err != nil {
55✔
1196
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonCPUModeNotMigratable), isBlockMigration
×
1197
        }
×
1198

1199
        if vmiContainsPCIHostDevice(vmi) {
57✔
1200
                return newNonMigratableCondition("VMI uses a PCI host devices", v1.VirtualMachineInstanceReasonHostDeviceNotMigratable), isBlockMigration
2✔
1201
        }
2✔
1202

1203
        if util.IsSEVVMI(vmi) {
54✔
1204
                return newNonMigratableCondition("VMI uses SEV", v1.VirtualMachineInstanceReasonSEVNotMigratable), isBlockMigration
1✔
1205
        } else if util.IsTDXVMI(vmi) {
54✔
1206
                return newNonMigratableCondition("VMI uses TDX", v1.VirtualMachineInstanceReasonTDXNotMigratable), isBlockMigration
1✔
1207
        }
1✔
1208

1209
        if util.IsSecureExecutionVMI(vmi) {
52✔
1210
                return newNonMigratableCondition("VMI uses Secure Execution", v1.VirtualMachineInstanceReasonSecureExecutionNotMigratable), isBlockMigration
1✔
1211
        }
1✔
1212

1213
        if reservation.HasVMIPersistentReservation(vmi) {
51✔
1214
                return newNonMigratableCondition("VMI uses SCSI persistent reservation", v1.VirtualMachineInstanceReasonPRNotMigratable), isBlockMigration
1✔
1215
        }
1✔
1216

1217
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
50✔
1218
                return newNonMigratableCondition(tscRequirement.Reason, v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable), isBlockMigration
1✔
1219
        }
1✔
1220

1221
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
49✔
1222
                return newNonMigratableCondition("VMI uses hyperv passthrough", v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable), isBlockMigration
1✔
1223
        }
1✔
1224

1225
        if blockErr != nil {
52✔
1226
                return newNonMigratableCondition(blockErr.Error(), v1.VirtualMachineInstanceReasonDisksNotMigratable), isBlockMigration
5✔
1227
        }
5✔
1228

1229
        return &v1.VirtualMachineInstanceCondition{
42✔
1230
                Type:   v1.VirtualMachineInstanceIsMigratable,
42✔
1231
                Status: k8sv1.ConditionTrue,
42✔
1232
        }, isBlockMigration
42✔
1233
}
1234

1235
func vmiContainsPCIHostDevice(vmi *v1.VirtualMachineInstance) bool {
91✔
1236
        return len(vmi.Spec.Domain.Devices.HostDevices) > 0 || len(vmi.Spec.Domain.Devices.GPUs) > 0
91✔
1237
}
91✔
1238

1239
type multipleNonMigratableCondition struct {
1240
        reasons []string
1241
        msgs    []string
1242
}
1243

1244
func newMultipleNonMigratableCondition() *multipleNonMigratableCondition {
36✔
1245
        return &multipleNonMigratableCondition{}
36✔
1246
}
36✔
1247

1248
func (cond *multipleNonMigratableCondition) addNonMigratableCondition(reason, msg string) {
1✔
1249
        cond.reasons = append(cond.reasons, reason)
1✔
1250
        cond.msgs = append(cond.msgs, msg)
1✔
1251
}
1✔
1252

1253
func (cond *multipleNonMigratableCondition) String() string {
1✔
1254
        var buffer bytes.Buffer
1✔
1255
        for i, c := range cond.reasons {
2✔
1256
                if i > 0 {
1✔
1257
                        buffer.WriteString(", ")
×
1258
                }
×
1259
                buffer.WriteString(fmt.Sprintf("%s: %s", c, cond.msgs[i]))
1✔
1260
        }
1261
        return buffer.String()
1✔
1262
}
1263

1264
func (cond *multipleNonMigratableCondition) generateStorageLiveMigrationCondition() *v1.VirtualMachineInstanceCondition {
36✔
1265
        switch len(cond.reasons) {
36✔
1266
        case 0:
35✔
1267
                return &v1.VirtualMachineInstanceCondition{
35✔
1268
                        Type:   v1.VirtualMachineInstanceIsStorageLiveMigratable,
35✔
1269
                        Status: k8sv1.ConditionTrue,
35✔
1270
                }
35✔
1271
        default:
1✔
1272
                return &v1.VirtualMachineInstanceCondition{
1✔
1273
                        Type:    v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1274
                        Status:  k8sv1.ConditionFalse,
1✔
1275
                        Message: cond.String(),
1✔
1276
                        Reason:  v1.VirtualMachineInstanceReasonNotMigratable,
1✔
1277
                }
1✔
1278
        }
1279
}
1280

1281
func (c *VirtualMachineController) calculateLiveStorageMigrationCondition(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstanceCondition {
36✔
1282
        multiCond := newMultipleNonMigratableCondition()
36✔
1283

36✔
1284
        if err := c.checkNetworkInterfacesForMigration(vmi); err != nil {
37✔
1285
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonInterfaceNotMigratable, err.Error())
1✔
1286
        }
1✔
1287

1288
        if err := c.isHostModelMigratable(vmi); err != nil {
36✔
1289
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonCPUModeNotMigratable, err.Error())
×
1290
        }
×
1291

1292
        if vmiContainsPCIHostDevice(vmi) {
36✔
1293
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHostDeviceNotMigratable, "VMI uses a PCI host devices")
×
1294
        }
×
1295

1296
        if util.IsSEVVMI(vmi) {
36✔
1297
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonSEVNotMigratable, "VMI uses SEV")
×
1298
        } else if util.IsTDXVMI(vmi) {
36✔
1299
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonTDXNotMigratable, "VMI uses TDX")
×
1300
        }
×
1301

1302
        if reservation.HasVMIPersistentReservation(vmi) {
36✔
1303
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonPRNotMigratable, "VMI uses SCSI persistent reservation")
×
1304
        }
×
1305

1306
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
36✔
1307
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable, tscRequirement.Reason)
×
1308
        }
×
1309

1310
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
36✔
1311
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable, "VMI uses hyperv passthrough")
×
1312
        }
×
1313

1314
        return multiCond.generateStorageLiveMigrationCondition()
36✔
1315
}
1316

1317
func (c *VirtualMachineController) deleteVM(vmi *v1.VirtualMachineInstance) error {
4✔
1318
        err := c.processVmDelete(vmi)
4✔
1319
        if err != nil {
4✔
1320
                return err
×
1321
        }
×
1322
        // we can perform the cleanup immediately after
1323
        // the successful delete here because we don't have
1324
        // to report the deletion results on the VMI status
1325
        // in this case.
1326
        err = c.processVmCleanup(vmi)
4✔
1327
        if err != nil {
4✔
1328
                return err
×
1329
        }
×
1330

1331
        return nil
4✔
1332
}
1333

1334
// Determine if gracefulShutdown has been triggered by virt-launcher
1335
func (c *VirtualMachineController) hasGracefulShutdownTrigger(domain *api.Domain) bool {
44✔
1336
        if domain == nil {
58✔
1337
                return false
14✔
1338
        }
14✔
1339
        gracePeriod := domain.Spec.Metadata.KubeVirt.GracePeriod
30✔
1340

30✔
1341
        return gracePeriod != nil &&
30✔
1342
                gracePeriod.MarkedForGracefulShutdown != nil &&
30✔
1343
                *gracePeriod.MarkedForGracefulShutdown
30✔
1344
}
1345

1346
func (c *VirtualMachineController) sync(key string,
1347
        vmi *v1.VirtualMachineInstance,
1348
        vmiExists bool,
1349
        domain *api.Domain,
1350
        domainExists bool) error {
44✔
1351

44✔
1352
        oldStatus := vmi.Status.DeepCopy()
44✔
1353
        oldSpec := vmi.Spec.DeepCopy()
44✔
1354

44✔
1355
        // set to true when domain needs to be shutdown.
44✔
1356
        shouldShutdown := false
44✔
1357
        // set to true when domain needs to be removed from libvirt.
44✔
1358
        shouldDelete := false
44✔
1359
        // set to true when VirtualMachineInstance is active or about to become active.
44✔
1360
        shouldUpdate := false
44✔
1361
        // set to true when unrecoverable domain needs to be destroyed non-gracefully.
44✔
1362
        forceShutdownIrrecoverable := false
44✔
1363

44✔
1364
        c.logger.V(3).Infof("Processing event %v", key)
44✔
1365

44✔
1366
        if vmiExists && domainExists {
66✔
1367
                c.logger.Object(vmi).Infof("VMI is in phase: %v | Domain status: %v, reason: %v", vmi.Status.Phase, domain.Status.Status, domain.Status.Reason)
22✔
1368
        } else if vmiExists {
58✔
1369
                c.logger.Object(vmi).Infof("VMI is in phase: %v | Domain does not exist", vmi.Status.Phase)
14✔
1370
        } else if domainExists {
30✔
1371
                vmiRef := v1.NewVMIReferenceWithUUID(domain.ObjectMeta.Namespace, domain.ObjectMeta.Name, domain.Spec.Metadata.KubeVirt.UID)
8✔
1372
                c.logger.Object(vmiRef).Infof("VMI does not exist | Domain status: %v, reason: %v", domain.Status.Status, domain.Status.Reason)
8✔
1373
        } else {
8✔
1374
                c.logger.Info("VMI does not exist | Domain does not exist")
×
1375
        }
×
1376

1377
        domainAlive := domainExists &&
44✔
1378
                domain.Status.Status != api.Shutoff &&
44✔
1379
                domain.Status.Status != api.Crashed &&
44✔
1380
                domain.Status.Status != ""
44✔
1381

44✔
1382
        forceShutdownIrrecoverable = domainExists && domainPausedFailedPostCopy(domain)
44✔
1383

44✔
1384
        gracefulShutdown := c.hasGracefulShutdownTrigger(domain)
44✔
1385
        if gracefulShutdown && vmi.IsRunning() {
44✔
1386
                if domainAlive {
×
1387
                        c.logger.Object(vmi).V(3).Info("Shutting down due to graceful shutdown signal.")
×
1388
                        shouldShutdown = true
×
1389
                } else {
×
1390
                        shouldDelete = true
×
1391
                }
×
1392
        }
1393

1394
        // Determine removal of VirtualMachineInstance from cache should result in deletion.
1395
        if !vmiExists {
52✔
1396
                if domainAlive {
14✔
1397
                        // The VirtualMachineInstance is deleted on the cluster, and domain is alive,
6✔
1398
                        // then shut down the domain.
6✔
1399
                        c.logger.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
6✔
1400
                        shouldShutdown = true
6✔
1401
                } else {
8✔
1402
                        // The VirtualMachineInstance is deleted on the cluster, and domain is not alive
2✔
1403
                        // then delete the domain.
2✔
1404
                        c.logger.Object(vmi).V(3).Info("Deleting domain for deleted VirtualMachineInstance object.")
2✔
1405
                        shouldDelete = true
2✔
1406
                }
2✔
1407
        }
1408

1409
        // Determine if VirtualMachineInstance is being deleted.
1410
        if vmiExists && vmi.ObjectMeta.DeletionTimestamp != nil {
46✔
1411
                if domainAlive {
3✔
1412
                        c.logger.Object(vmi).V(3).Info("Shutting down domain for VirtualMachineInstance with deletion timestamp.")
1✔
1413
                        shouldShutdown = true
1✔
1414
                } else {
2✔
1415
                        c.logger.Object(vmi).V(3).Info("Deleting domain for VirtualMachineInstance with deletion timestamp.")
1✔
1416
                        shouldDelete = true
1✔
1417
                }
1✔
1418
        }
1419

1420
        // Determine if domain needs to be deleted as a result of VirtualMachineInstance
1421
        // shutting down naturally (guest internal invoked shutdown)
1422
        if vmiExists && vmi.IsFinal() {
45✔
1423
                c.logger.Object(vmi).V(3).Info("Removing domain and ephemeral data for finalized vmi.")
1✔
1424
                shouldDelete = true
1✔
1425
        }
1✔
1426

1427
        if !domainAlive && domainExists && !vmi.IsFinal() {
46✔
1428
                c.logger.Object(vmi).V(3).Info("Deleting inactive domain for vmi.")
2✔
1429
                shouldDelete = true
2✔
1430
        }
2✔
1431

1432
        // Determine if an active (or about to be active) VirtualMachineInstance should be updated.
1433
        if vmiExists && !vmi.IsFinal() {
79✔
1434
                // requiring the phase of the domain and VirtualMachineInstance to be in sync is an
35✔
1435
                // optimization that prevents unnecessary re-processing VMIs during the start flow.
35✔
1436
                phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
35✔
1437
                if err != nil {
35✔
1438
                        return err
×
1439
                }
×
1440
                if vmi.Status.Phase == phase {
61✔
1441
                        shouldUpdate = true
26✔
1442
                }
26✔
1443

1444
                if shouldDelay, delay := c.ioErrorRetryManager.ShouldDelay(string(vmi.UID), func() bool {
70✔
1445
                        return isIOError(shouldUpdate, domainExists, domain)
35✔
1446
                }); shouldDelay {
35✔
1447
                        shouldUpdate = false
×
1448
                        c.logger.Object(vmi).Infof("Delay vm update for %f seconds", delay.Seconds())
×
1449
                        c.queue.AddAfter(key, delay)
×
1450
                }
×
1451
        }
1452

1453
        var syncErr error
44✔
1454

44✔
1455
        // Process the VirtualMachineInstance update in this order.
44✔
1456
        // * Shutdown and Deletion due to VirtualMachineInstance deletion, process stopping, graceful shutdown trigger, etc...
44✔
1457
        // * Cleanup of already shutdown and Deleted VMIs
44✔
1458
        // * Update due to spec change and initial start flow.
44✔
1459
        switch {
44✔
1460
        case shouldShutdown:
7✔
1461
                c.logger.Object(vmi).V(3).Info("Processing shutdown.")
7✔
1462
                syncErr = c.processVmShutdown(vmi, domain)
7✔
1463
        case forceShutdownIrrecoverable:
1✔
1464
                msg := formatIrrecoverableErrorMessage(domain)
1✔
1465
                c.logger.Object(vmi).V(3).Infof("Processing a destruction of an irrecoverable domain - %s.", msg)
1✔
1466
                syncErr = c.processVmDestroy(vmi, domain)
1✔
1467
                if syncErr == nil {
2✔
1468
                        syncErr = &vmiIrrecoverableError{msg}
1✔
1469
                }
1✔
1470
        case shouldDelete:
4✔
1471
                c.logger.Object(vmi).V(3).Info("Processing deletion.")
4✔
1472
                syncErr = c.deleteVM(vmi)
4✔
1473
        case shouldUpdate:
24✔
1474
                c.logger.Object(vmi).V(3).Info("Processing vmi update")
24✔
1475
                syncErr = c.processVmUpdate(vmi, domain)
24✔
1476
        default:
8✔
1477
                c.logger.Object(vmi).V(3).Info("No update processing required")
8✔
1478
        }
1479
        if syncErr != nil && !vmi.IsFinal() {
49✔
1480
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.SyncFailed.String(), syncErr.Error())
5✔
1481

5✔
1482
                // `syncErr` will be propagated anyway, and it will be logged in `re-enqueueing`
5✔
1483
                // so there is no need to log it twice in hot path without increased verbosity.
5✔
1484
                c.logger.Object(vmi).Reason(syncErr).Error("Synchronizing the VirtualMachineInstance failed.")
5✔
1485
        }
5✔
1486

1487
        // Update the VirtualMachineInstance status, if the VirtualMachineInstance exists
1488
        if vmiExists {
80✔
1489
                vmi.Spec = *oldSpec
36✔
1490
                if err := c.updateVMIStatus(oldStatus, vmi, domain, syncErr); err != nil {
36✔
1491
                        c.logger.Object(vmi).Reason(err).Error("Updating the VirtualMachineInstance status failed.")
×
1492
                        return err
×
1493
                }
×
1494
        }
1495

1496
        if syncErr != nil {
49✔
1497
                return syncErr
5✔
1498
        }
5✔
1499

1500
        c.logger.Object(vmi).V(3).Info("Synchronization loop succeeded.")
39✔
1501
        return nil
39✔
1502

1503
}
1504

1505
func (c *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstance) error {
5✔
1506
        vmiId := string(vmi.UID)
5✔
1507

5✔
1508
        c.logger.Object(vmi).Infof("Performing final local cleanup for vmi with uid %s", vmiId)
5✔
1509

5✔
1510
        c.migrationProxy.StopTargetListener(vmiId)
5✔
1511
        c.migrationProxy.StopSourceListener(vmiId)
5✔
1512

5✔
1513
        c.downwardMetricsManager.StopServer(vmi)
5✔
1514

5✔
1515
        // Unmount container disks and clean up remaining files
5✔
1516
        if err := c.containerDiskMounter.Unmount(vmi); err != nil {
5✔
1517
                return err
×
1518
        }
×
1519

1520
        // UnmountAll does the cleanup on the "best effort" basis: it is
1521
        // safe to pass a nil cgroupManager.
1522
        cgroupManager, _ := getCgroupManager(vmi, c.host)
5✔
1523
        if err := c.hotplugVolumeMounter.UnmountAll(vmi, cgroupManager); err != nil {
5✔
1524
                return err
×
1525
        }
×
1526

1527
        c.teardownNetwork(vmi)
5✔
1528

5✔
1529
        c.sriovHotplugExecutorPool.Delete(vmi.UID)
5✔
1530

5✔
1531
        // Watch dog file and command client must be the last things removed here
5✔
1532
        c.launcherClients.CloseLauncherClient(vmi)
5✔
1533

5✔
1534
        // Remove the domain from cache in the event that we're performing
5✔
1535
        // a final cleanup and never received the "DELETE" event. This is
5✔
1536
        // possible if the VMI pod goes away before we receive the final domain
5✔
1537
        // "DELETE"
5✔
1538
        domain := api.NewDomainReferenceFromName(vmi.Namespace, vmi.Name)
5✔
1539
        c.logger.Object(domain).Infof("Removing domain from cache during final cleanup")
5✔
1540
        return c.domainStore.Delete(domain)
5✔
1541
}
1542

1543
func (c *VirtualMachineController) processVmDestroy(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
1544
        tryGracefully := false
1✔
1545
        return c.helperVmShutdown(vmi, domain, tryGracefully)
1✔
1546
}
1✔
1547

1548
func (c *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
7✔
1549
        tryGracefully := true
7✔
1550
        return c.helperVmShutdown(vmi, domain, tryGracefully)
7✔
1551
}
7✔
1552

1553
const firstGracefulShutdownAttempt = -1
1554

1555
// Determines if a domain's grace period has expired during shutdown.
1556
// If the grace period has started but not expired, timeLeft represents
1557
// the time in seconds left until the period expires.
1558
// If the grace period has not started, timeLeft will be set to -1.
1559
func (c *VirtualMachineController) hasGracePeriodExpired(terminationGracePeriod *int64, dom *api.Domain) (bool, int64) {
6✔
1560
        var hasExpired bool
6✔
1561
        var timeLeft int64
6✔
1562

6✔
1563
        gracePeriod := int64(0)
6✔
1564
        if terminationGracePeriod != nil {
7✔
1565
                gracePeriod = *terminationGracePeriod
1✔
1566
        } else if dom != nil && dom.Spec.Metadata.KubeVirt.GracePeriod != nil {
11✔
1567
                gracePeriod = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds
5✔
1568
        }
5✔
1569

1570
        // If gracePeriod == 0, then there will be no startTime set, deletion
1571
        // should occur immediately during shutdown.
1572
        if gracePeriod == 0 {
7✔
1573
                hasExpired = true
1✔
1574
                return hasExpired, timeLeft
1✔
1575
        }
1✔
1576

1577
        startTime := int64(0)
5✔
1578
        if dom != nil && dom.Spec.Metadata.KubeVirt.GracePeriod != nil && dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp != nil {
8✔
1579
                startTime = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp.UTC().Unix()
3✔
1580
        }
3✔
1581

1582
        if startTime == 0 {
7✔
1583
                // If gracePeriod > 0, then the shutdown signal needs to be sent
2✔
1584
                // and the gracePeriod start time needs to be set.
2✔
1585
                timeLeft = firstGracefulShutdownAttempt
2✔
1586
                return hasExpired, timeLeft
2✔
1587
        }
2✔
1588

1589
        now := time.Now().UTC().Unix()
3✔
1590
        diff := now - startTime
3✔
1591

3✔
1592
        if diff >= gracePeriod {
4✔
1593
                hasExpired = true
1✔
1594
                return hasExpired, timeLeft
1✔
1595
        }
1✔
1596

1597
        timeLeft = gracePeriod - diff
2✔
1598
        if timeLeft < 1 {
2✔
1599
                timeLeft = 1
×
1600
        }
×
1601
        return hasExpired, timeLeft
2✔
1602
}
1603

1604
func (c *VirtualMachineController) helperVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, tryGracefully bool) error {
8✔
1605

8✔
1606
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
8✔
1607
        client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
8✔
1608
        if err != nil {
8✔
1609
                return err
×
1610
        }
×
1611

1612
        if domainHasGracePeriod(domain) && tryGracefully {
12✔
1613
                if expired, timeLeft := c.hasGracePeriodExpired(vmi.Spec.TerminationGracePeriodSeconds, domain); !expired {
7✔
1614
                        return c.handleVMIShutdown(vmi, domain, client, timeLeft)
3✔
1615
                }
3✔
1616
                c.logger.Object(vmi).Infof("Grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
1617
        } else {
4✔
1618
                c.logger.Object(vmi).Infof("Graceful shutdown not set, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
4✔
1619
        }
4✔
1620

1621
        err = client.KillVirtualMachine(vmi)
5✔
1622
        if err != nil && !cmdclient.IsDisconnected(err) {
5✔
1623
                // Only report err if it wasn't the result of a disconnect.
×
1624
                //
×
1625
                // Both virt-launcher and virt-handler are trying to destroy
×
1626
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
1627
                // disconnected during the kill request, which shouldn't be
×
1628
                // considered an error.
×
1629
                return err
×
1630
        }
×
1631

1632
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMIStopping)
5✔
1633

5✔
1634
        return nil
5✔
1635
}
1636

1637
func (c *VirtualMachineController) handleVMIShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, client cmdclient.LauncherClient, timeLeft int64) error {
3✔
1638
        if domain.Status.Status != api.Shutdown {
6✔
1639
                return c.shutdownVMI(vmi, client, timeLeft)
3✔
1640
        }
3✔
1641
        c.logger.V(4).Object(vmi).Infof("%s is already shutting down.", vmi.GetObjectMeta().GetName())
×
1642
        return nil
×
1643
}
1644

1645
func (c *VirtualMachineController) shutdownVMI(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient, timeLeft int64) error {
3✔
1646
        err := client.ShutdownVirtualMachine(vmi)
3✔
1647
        if err != nil && !cmdclient.IsDisconnected(err) {
3✔
1648
                // Only report err if it wasn't the result of a disconnect.
×
1649
                //
×
1650
                // Both virt-launcher and virt-handler are trying to destroy
×
1651
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
1652
                // disconnected during the kill request, which shouldn't be
×
1653
                // considered an error.
×
1654
                return err
×
1655
        }
×
1656

1657
        c.logger.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
3✔
1658

3✔
1659
        // Only create a VMIGracefulShutdown event for the first attempt as we can
3✔
1660
        // easily hit the default burst limit of 25 for the
3✔
1661
        // EventSourceObjectSpamFilter when gracefully shutting down VMIs with a
3✔
1662
        // large TerminationGracePeriodSeconds value set. Hitting this limit can
3✔
1663
        // result in the eventual VMIShutdown event being dropped.
3✔
1664
        if timeLeft == firstGracefulShutdownAttempt {
5✔
1665
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), VMIGracefulShutdown)
2✔
1666
        }
2✔
1667

1668
        // Make sure that we don't hot-loop in case we send the first domain notification
1669
        if timeLeft == firstGracefulShutdownAttempt {
5✔
1670
                timeLeft = 5
2✔
1671
                if vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds < timeLeft {
2✔
1672
                        timeLeft = *vmi.Spec.TerminationGracePeriodSeconds
×
1673
                }
×
1674
        }
1675
        // In case we have a long grace period, we want to resend the graceful shutdown every 5 seconds
1676
        // That's important since a booting OS can miss ACPI signals
1677
        if timeLeft > 5 {
4✔
1678
                timeLeft = 5
1✔
1679
        }
1✔
1680

1681
        // pending graceful shutdown.
1682
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(timeLeft)*time.Second)
3✔
1683
        return nil
3✔
1684
}
1685

1686
func (c *VirtualMachineController) processVmDelete(vmi *v1.VirtualMachineInstance) error {
4✔
1687

4✔
1688
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
4✔
1689
        client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
4✔
1690

4✔
1691
        // If the pod has been torn down, we know the VirtualMachineInstance is down.
4✔
1692
        if err == nil {
8✔
1693

4✔
1694
                c.logger.Object(vmi).Infof("Signaled deletion for %s", vmi.GetObjectMeta().GetName())
4✔
1695

4✔
1696
                // pending deletion.
4✔
1697
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMISignalDeletion)
4✔
1698

4✔
1699
                err = client.DeleteDomain(vmi)
4✔
1700
                if err != nil && !cmdclient.IsDisconnected(err) {
4✔
1701
                        // Only report err if it wasn't the result of a disconnect.
×
1702
                        //
×
1703
                        // Both virt-launcher and virt-handler are trying to destroy
×
1704
                        // the VirtualMachineInstance at the same time. It's possible the client may get
×
1705
                        // disconnected during the kill request, which shouldn't be
×
1706
                        // considered an error.
×
1707
                        return err
×
1708
                }
×
1709
        }
1710

1711
        return nil
4✔
1712

1713
}
1714

1715
func (c *VirtualMachineController) isVMIOwnedByNode(vmi *v1.VirtualMachineInstance) bool {
37✔
1716
        nodeName, ok := vmi.Labels[v1.NodeNameLabel]
37✔
1717

37✔
1718
        if ok && nodeName != "" && nodeName == c.host {
73✔
1719
                return true
36✔
1720
        }
36✔
1721

1722
        return vmi.Status.NodeName != "" && vmi.Status.NodeName == c.host
1✔
1723
}
1724

1725
func (c *VirtualMachineController) checkNetworkInterfacesForMigration(vmi *v1.VirtualMachineInstance) error {
95✔
1726
        return netvmispec.VerifyVMIMigratable(vmi, c.clusterConfig.GetNetworkBindings())
95✔
1727
}
95✔
1728

1729
func isReadOnlyDisk(disk *v1.Disk) bool {
8✔
1730
        isReadOnlyCDRom := disk.CDRom != nil && (disk.CDRom.ReadOnly == nil || *disk.CDRom.ReadOnly)
8✔
1731

8✔
1732
        return isReadOnlyCDRom
8✔
1733
}
8✔
1734

1735
func (c *VirtualMachineController) checkVolumesForMigration(vmi *v1.VirtualMachineInstance) (blockMigrate bool, err error) {
67✔
1736
        volumeStatusMap := make(map[string]v1.VolumeStatus)
67✔
1737

67✔
1738
        for _, volumeStatus := range vmi.Status.VolumeStatus {
82✔
1739
                volumeStatusMap[volumeStatus.Name] = volumeStatus
15✔
1740
        }
15✔
1741

1742
        if len(vmi.Status.MigratedVolumes) > 0 {
67✔
1743
                blockMigrate = true
×
1744
        }
×
1745

1746
        filesystems := storagetypes.GetFilesystemsFromVolumes(vmi)
67✔
1747

67✔
1748
        // Check if all VMI volumes can be shared between the source and the destination
67✔
1749
        // of a live migration. blockMigrate will be returned as false, only if all volumes
67✔
1750
        // are shared and the VMI has no local disks
67✔
1751
        // Some combinations of disks makes the VMI no suitable for live migration.
67✔
1752
        // A relevant error will be returned in this case.
67✔
1753
        for _, volume := range vmi.Spec.Volumes {
96✔
1754
                volSrc := volume.VolumeSource
29✔
1755
                if volSrc.PersistentVolumeClaim != nil || volSrc.DataVolume != nil {
41✔
1756
                        var claimName string
12✔
1757
                        if volSrc.PersistentVolumeClaim != nil {
19✔
1758
                                claimName = volSrc.PersistentVolumeClaim.ClaimName
7✔
1759
                        } else {
12✔
1760
                                claimName = volSrc.DataVolume.Name
5✔
1761
                        }
5✔
1762

1763
                        volumeStatus, ok := volumeStatusMap[volume.Name]
12✔
1764

12✔
1765
                        if !ok || volumeStatus.PersistentVolumeClaimInfo == nil {
13✔
1766
                                return true, fmt.Errorf("cannot migrate VMI: Unable to determine if PVC %v is shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
1✔
1767
                        } else if !storagetypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !storagetypes.IsMigratedVolume(volumeStatus.Name, vmi) {
18✔
1768
                                return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
6✔
1769
                        }
6✔
1770

1771
                } else if volSrc.HostDisk != nil {
20✔
1772
                        // Check if this is a translated PVC.
3✔
1773
                        volumeStatus, ok := volumeStatusMap[volume.Name]
3✔
1774
                        if ok && volumeStatus.PersistentVolumeClaimInfo != nil {
3✔
NEW
1775
                                if !storagetypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !storagetypes.IsMigratedVolume(volumeStatus.Name, vmi) {
×
1776
                                        return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", volumeStatus.PersistentVolumeClaimInfo.ClaimName)
×
1777
                                } else {
×
1778
                                        continue
×
1779
                                }
1780
                        }
1781

1782
                        shared := volSrc.HostDisk.Shared != nil && *volSrc.HostDisk.Shared
3✔
1783
                        if !shared {
4✔
1784
                                return true, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
1✔
1785
                        }
1✔
1786
                } else {
14✔
1787
                        if _, ok := filesystems[volume.Name]; ok {
18✔
1788
                                c.logger.Object(vmi).Infof("Volume %s is shared with virtiofs, allow live migration", volume.Name)
4✔
1789
                                continue
4✔
1790
                        }
1791

1792
                        isVolumeUsedByReadOnlyDisk := false
10✔
1793
                        for _, disk := range vmi.Spec.Domain.Devices.Disks {
18✔
1794
                                if isReadOnlyDisk(&disk) && disk.Name == volume.Name {
10✔
1795
                                        isVolumeUsedByReadOnlyDisk = true
2✔
1796
                                        break
2✔
1797
                                }
1798
                        }
1799

1800
                        if isVolumeUsedByReadOnlyDisk {
12✔
1801
                                continue
2✔
1802
                        }
1803

1804
                        if vmi.Status.MigrationMethod == "" || vmi.Status.MigrationMethod == v1.LiveMigration {
16✔
1805
                                c.logger.Object(vmi).Infof("migration is block migration because of %s volume", volume.Name)
8✔
1806
                        }
8✔
1807
                        blockMigrate = true
8✔
1808
                }
1809
        }
1810
        return
59✔
1811
}
1812

1813
func isVMIPausedDuringMigration(vmi *v1.VirtualMachineInstance) bool {
×
1814
        return vmi.Status.MigrationState != nil &&
×
1815
                vmi.Status.MigrationState.Mode == v1.MigrationPaused &&
×
1816
                !vmi.Status.MigrationState.Completed
×
1817
}
×
1818

1819
func (c *VirtualMachineController) affinePitThread(vmi *v1.VirtualMachineInstance) error {
×
1820
        res, err := c.podIsolationDetector.Detect(vmi)
×
1821
        if err != nil {
×
1822
                return err
×
1823
        }
×
1824
        var Mask unix.CPUSet
×
1825
        Mask.Zero()
×
1826
        qemuprocess, err := res.GetQEMUProcess()
×
1827
        if err != nil {
×
1828
                return err
×
1829
        }
×
1830
        qemupid := qemuprocess.Pid()
×
1831
        if qemupid == -1 {
×
1832
                return nil
×
1833
        }
×
1834

1835
        pitpid, err := res.KvmPitPid()
×
1836
        if err != nil {
×
1837
                return err
×
1838
        }
×
1839
        if pitpid == -1 {
×
1840
                return nil
×
1841
        }
×
1842
        if vmi.IsRealtimeEnabled() {
×
1843
                param := schedParam{priority: 2}
×
1844
                err = schedSetScheduler(pitpid, schedFIFO, param)
×
1845
                if err != nil {
×
1846
                        return fmt.Errorf("failed to set FIFO scheduling and priority 2 for thread %d: %w", pitpid, err)
×
1847
                }
×
1848
        }
1849
        vcpus, err := getVCPUThreadIDs(qemupid)
×
1850
        if err != nil {
×
1851
                return err
×
1852
        }
×
1853
        vpid, ok := vcpus["0"]
×
1854
        if ok == false {
×
1855
                return nil
×
1856
        }
×
1857
        vcpupid, err := strconv.Atoi(vpid)
×
1858
        if err != nil {
×
1859
                return err
×
1860
        }
×
1861
        err = unix.SchedGetaffinity(vcpupid, &Mask)
×
1862
        if err != nil {
×
1863
                return err
×
1864
        }
×
1865
        return unix.SchedSetaffinity(pitpid, &Mask)
×
1866
}
1867

1868
func (c *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
×
1869
        if err := cgroupManager.CreateChildCgroup("housekeeping", "cpuset"); err != nil {
×
1870
                c.logger.Reason(err).Error("CreateChildCgroup ")
×
1871
                return err
×
1872
        }
×
1873

1874
        key := controller.VirtualMachineInstanceKey(vmi)
×
1875
        domain, domainExists, _, err := c.getDomainFromCache(key)
×
1876
        if err != nil {
×
1877
                return err
×
1878
        }
×
1879
        // bail out if domain does not exist
1880
        if domainExists == false {
×
1881
                return nil
×
1882
        }
×
1883

1884
        if domain.Spec.CPUTune == nil || domain.Spec.CPUTune.EmulatorPin == nil {
×
1885
                return nil
×
1886
        }
×
1887

1888
        hkcpus, err := hardware.ParseCPUSetLine(domain.Spec.CPUTune.EmulatorPin.CPUSet, 100)
×
1889
        if err != nil {
×
1890
                return err
×
1891
        }
×
1892

1893
        c.logger.V(3).Object(vmi).Infof("housekeeping cpu: %v", hkcpus)
×
1894

×
1895
        err = cgroupManager.SetCpuSet("housekeeping", hkcpus)
×
1896
        if err != nil {
×
1897
                return err
×
1898
        }
×
1899

1900
        tids, err := cgroupManager.GetCgroupThreads()
×
1901
        if err != nil {
×
1902
                return err
×
1903
        }
×
1904
        hktids := make([]int, 0, 10)
×
1905

×
1906
        for _, tid := range tids {
×
1907
                proc, err := ps.FindProcess(tid)
×
1908
                if err != nil {
×
1909
                        c.logger.Object(vmi).Errorf("Failure to find process: %s", err.Error())
×
1910
                        return err
×
1911
                }
×
1912
                if proc == nil {
×
1913
                        return fmt.Errorf("failed to find process with tid: %d", tid)
×
1914
                }
×
1915
                comm := proc.Executable()
×
1916
                if strings.Contains(comm, "CPU ") && strings.Contains(comm, "KVM") {
×
1917
                        continue
×
1918
                }
1919
                hktids = append(hktids, tid)
×
1920
        }
1921

1922
        c.logger.V(3).Object(vmi).Infof("hk thread ids: %v", hktids)
×
1923
        for _, tid := range hktids {
×
1924
                err = cgroupManager.AttachTID("cpuset", "housekeeping", tid)
×
1925
                if err != nil {
×
1926
                        c.logger.Object(vmi).Errorf("Error attaching tid %d: %v", tid, err.Error())
×
1927
                        return err
×
1928
                }
×
1929
        }
1930

1931
        return nil
×
1932
}
1933

1934
func (c *VirtualMachineController) vmUpdateHelperDefault(vmi *v1.VirtualMachineInstance, domainExists bool) error {
23✔
1935
        client, err := c.launcherClients.GetLauncherClient(vmi)
23✔
1936
        if err != nil {
23✔
1937
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
1938
        }
×
1939

1940
        preallocatedVolumes := c.getPreallocatedVolumes(vmi)
23✔
1941

23✔
1942
        err = hostdisk.ReplacePVCByHostDisk(vmi)
23✔
1943
        if err != nil {
23✔
1944
                return err
×
1945
        }
×
1946

1947
        cgroupManager, err := getCgroupManager(vmi, c.host)
23✔
1948
        if err != nil {
23✔
1949
                return err
×
1950
        }
×
1951

1952
        var errorTolerantFeaturesError []error
23✔
1953
        readyToProceed, err := c.handleVMIState(vmi, cgroupManager, &errorTolerantFeaturesError)
23✔
1954
        if err != nil {
27✔
1955
                return err
4✔
1956
        }
4✔
1957

1958
        if !readyToProceed {
21✔
1959
                return nil
2✔
1960
        }
2✔
1961

1962
        // Synchronize the VirtualMachineInstance state
1963
        err = c.syncVirtualMachine(client, vmi, preallocatedVolumes)
17✔
1964
        if err != nil {
17✔
1965
                return err
×
1966
        }
×
1967

1968
        // Post-sync housekeeping
1969
        err = c.handleHousekeeping(vmi, cgroupManager, domainExists)
17✔
1970
        if err != nil {
17✔
1971
                return err
×
1972
        }
×
1973

1974
        return errors.NewAggregate(errorTolerantFeaturesError)
17✔
1975
}
1976

1977
// handleVMIState: Decides whether to call handleRunningVMI or handleStartingVMI based on the VMI's state.
1978
func (c *VirtualMachineController) handleVMIState(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) (bool, error) {
23✔
1979
        if vmi.IsRunning() {
38✔
1980
                return true, c.handleRunningVMI(vmi, cgroupManager, errorTolerantFeaturesError)
15✔
1981
        } else if !vmi.IsFinal() {
31✔
1982
                return c.handleStartingVMI(vmi, cgroupManager)
8✔
1983
        }
8✔
1984
        return true, nil
×
1985
}
1986

1987
// handleRunningVMI contains the logic specifically for running VMs (hotplugging in running state, metrics, network updates)
1988
func (c *VirtualMachineController) handleRunningVMI(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) error {
15✔
1989
        if err := c.hotplugSriovInterfaces(vmi); err != nil {
15✔
1990
                c.logger.Object(vmi).Error(err.Error())
×
1991
        }
×
1992

1993
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
16✔
1994
                if !goerror.Is(err, os.ErrNotExist) {
2✔
1995
                        return err
1✔
1996
                }
1✔
1997
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "HotplugFailed", err.Error())
×
1998
        }
1999

2000
        if err := c.getMemoryDump(vmi); err != nil {
14✔
2001
                return err
×
2002
        }
×
2003

2004
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
14✔
2005
        if err != nil {
14✔
2006
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
2007
        }
×
2008

2009
        if err := c.downwardMetricsManager.StartServer(vmi, isolationRes.Pid()); err != nil {
14✔
2010
                return err
×
2011
        }
×
2012

2013
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForLiveUpdate(vmi), c.netConf); err != nil {
14✔
2014
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "NicHotplug", err.Error())
×
2015
                *errorTolerantFeaturesError = append(*errorTolerantFeaturesError, err)
×
2016
        }
×
2017

2018
        return nil
14✔
2019
}
2020

2021
// handleStartingVMI: Contains the logic for starting VMs (container disks, initial network setup, device ownership).
2022
func (c *VirtualMachineController) handleStartingVMI(
2023
        vmi *v1.VirtualMachineInstance,
2024
        cgroupManager cgroup.Manager,
2025
) (bool, error) {
8✔
2026
        // give containerDisks some time to become ready before throwing errors on retries
8✔
2027
        info := c.launcherClients.GetLauncherClientInfo(vmi)
8✔
2028
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
10✔
2029
                if err != nil {
3✔
2030
                        return false, err
1✔
2031
                }
1✔
2032
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
2033
                return false, nil
1✔
2034
        }
2035

2036
        var err error
6✔
2037
        err = c.containerDiskMounter.MountAndVerify(vmi)
6✔
2038
        if err != nil {
7✔
2039
                return false, err
1✔
2040
        }
1✔
2041

2042
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
5✔
2043
                if !goerror.Is(err, os.ErrNotExist) {
×
2044
                        return false, err
×
2045
                }
×
2046
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "HotplugFailed", err.Error())
×
2047
        }
2048

2049
        if !c.hotplugVolumesReady(vmi) {
6✔
2050
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
2051
                return false, nil
1✔
2052
        }
1✔
2053

2054
        if c.clusterConfig.GPUsWithDRAGateEnabled() {
4✔
2055
                if !drautil.IsAllDRAGPUsReconciled(vmi, vmi.Status.DeviceStatus) {
×
2056
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, "WaitingForDRAGPUAttributes",
×
2057
                                "Waiting for Dynamic Resource Allocation GPU attributes to be reconciled")
×
2058
                        return false, nil
×
2059
                }
×
2060
        }
2061

2062
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForVMStartup(vmi), c.netConf); err != nil {
5✔
2063
                return false, fmt.Errorf("failed to configure vmi network: %w", err)
1✔
2064
        }
1✔
2065

2066
        if err := c.setupDevicesOwnerships(vmi, c.recorder); err != nil {
3✔
2067
                return false, err
×
2068
        }
×
2069

2070
        if err := c.adjustResources(vmi); err != nil {
3✔
2071
                return false, err
×
2072
        }
×
2073

2074
        if c.shouldWaitForSEVAttestation(vmi) {
3✔
2075
                return false, nil
×
2076
        }
×
2077

2078
        return true, nil
3✔
2079
}
2080

2081
func (c *VirtualMachineController) adjustResources(vmi *v1.VirtualMachineInstance) error {
3✔
2082
        err := c.podIsolationDetector.AdjustResources(vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio)
3✔
2083
        if err != nil {
3✔
2084
                return fmt.Errorf("failed to adjust resources: %v", err)
×
2085
        }
×
2086
        return nil
3✔
2087
}
2088

2089
func (c *VirtualMachineController) shouldWaitForSEVAttestation(vmi *v1.VirtualMachineInstance) bool {
3✔
2090
        if util.IsSEVAttestationRequested(vmi) {
3✔
2091
                sev := vmi.Spec.Domain.LaunchSecurity.SEV
×
2092
                // Wait for the session parameters to be provided
×
2093
                return sev.Session == "" || sev.DHCert == ""
×
2094
        }
×
2095
        return false
3✔
2096
}
2097

2098
func (c *VirtualMachineController) syncVirtualMachine(client cmdclient.LauncherClient, vmi *v1.VirtualMachineInstance, preallocatedVolumes []string) error {
17✔
2099
        smbios := c.clusterConfig.GetSMBIOS()
17✔
2100
        period := c.clusterConfig.GetMemBalloonStatsPeriod()
17✔
2101

17✔
2102
        options := virtualMachineOptions(smbios, period, preallocatedVolumes, c.capabilities, c.clusterConfig)
17✔
2103
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
17✔
2104

17✔
2105
        err := client.SyncVirtualMachine(vmi, options)
17✔
2106
        if err != nil {
17✔
2107
                if strings.Contains(err.Error(), "EFI OVMF rom missing") {
×
2108
                        return &virtLauncherCriticalSecurebootError{fmt.Sprintf("mismatch of Secure Boot setting and bootloaders: %v", err)}
×
2109
                }
×
2110
        }
2111

2112
        return err
17✔
2113
}
2114

2115
func (c *VirtualMachineController) handleHousekeeping(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, domainExists bool) error {
17✔
2116
        if vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread {
17✔
2117
                err := c.configureHousekeepingCgroup(vmi, cgroupManager)
×
2118
                if err != nil {
×
2119
                        return err
×
2120
                }
×
2121
        }
2122

2123
        // Configure vcpu scheduler for realtime workloads and affine PIT thread for dedicated CPU
2124
        if vmi.IsRealtimeEnabled() && !vmi.IsRunning() && !vmi.IsFinal() {
17✔
2125
                c.logger.Object(vmi).Info("Configuring vcpus for real time workloads")
×
2126
                if err := c.configureVCPUScheduler(vmi); err != nil {
×
2127
                        return err
×
2128
                }
×
2129
        }
2130
        if vmi.IsCPUDedicated() && !vmi.IsRunning() && !vmi.IsFinal() {
17✔
2131
                c.logger.V(3).Object(vmi).Info("Affining PIT thread")
×
2132
                if err := c.affinePitThread(vmi); err != nil {
×
2133
                        return err
×
2134
                }
×
2135
        }
2136
        if !domainExists {
20✔
2137
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Created.String(), VMIDefined)
3✔
2138
        }
3✔
2139

2140
        if vmi.IsRunning() {
31✔
2141
                // Umount any disks no longer mounted
14✔
2142
                if err := c.hotplugVolumeMounter.Unmount(vmi, cgroupManager); err != nil {
14✔
2143
                        return err
×
2144
                }
×
2145
        }
2146
        return nil
17✔
2147
}
2148

2149
func (c *VirtualMachineController) getPreallocatedVolumes(vmi *v1.VirtualMachineInstance) []string {
23✔
2150
        var preallocatedVolumes []string
23✔
2151
        for _, volumeStatus := range vmi.Status.VolumeStatus {
27✔
2152
                if volumeStatus.PersistentVolumeClaimInfo != nil && volumeStatus.PersistentVolumeClaimInfo.Preallocated {
4✔
2153
                        preallocatedVolumes = append(preallocatedVolumes, volumeStatus.Name)
×
2154
                }
×
2155
        }
2156
        return preallocatedVolumes
23✔
2157
}
2158

2159
func (c *VirtualMachineController) hotplugSriovInterfaces(vmi *v1.VirtualMachineInstance) error {
15✔
2160
        sriovSpecInterfaces := netvmispec.FilterSRIOVInterfaces(vmi.Spec.Domain.Devices.Interfaces)
15✔
2161

15✔
2162
        sriovSpecIfacesNames := netvmispec.IndexInterfaceSpecByName(sriovSpecInterfaces)
15✔
2163
        attachedSriovStatusIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
15✔
2164
                _, exist := sriovSpecIfacesNames[iface.Name]
×
2165
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceDomain) &&
×
2166
                        netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
2167
        })
×
2168

2169
        desiredSriovMultusPluggedIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
15✔
2170
                _, exist := sriovSpecIfacesNames[iface.Name]
×
2171
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
2172
        })
×
2173

2174
        if len(desiredSriovMultusPluggedIfaces) == len(attachedSriovStatusIfaces) {
30✔
2175
                c.sriovHotplugExecutorPool.Delete(vmi.UID)
15✔
2176
                return nil
15✔
2177
        }
15✔
2178

2179
        rateLimitedExecutor := c.sriovHotplugExecutorPool.LoadOrStore(vmi.UID)
×
2180
        return rateLimitedExecutor.Exec(func() error {
×
2181
                return c.hotplugSriovInterfacesCommand(vmi)
×
2182
        })
×
2183
}
2184

2185
func (c *VirtualMachineController) hotplugSriovInterfacesCommand(vmi *v1.VirtualMachineInstance) error {
×
2186
        const errMsgPrefix = "failed to hot-plug SR-IOV interfaces"
×
2187

×
2188
        client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
×
2189
        if err != nil {
×
2190
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2191
        }
×
2192

2193
        if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
×
2194
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), err.Error())
×
2195
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2196
        }
×
2197

2198
        c.logger.V(3).Object(vmi).Info("sending hot-plug host-devices command")
×
2199
        if err := client.HotplugHostDevices(vmi); err != nil {
×
2200
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2201
        }
×
2202

2203
        return nil
×
2204
}
2205

2206
func memoryDumpPath(volumeStatus v1.VolumeStatus) string {
×
2207
        target := hotplugdisk.GetVolumeMountDir(volumeStatus.Name)
×
2208
        dumpPath := filepath.Join(target, volumeStatus.MemoryDumpVolume.TargetFileName)
×
2209
        return dumpPath
×
2210
}
×
2211

2212
func (c *VirtualMachineController) getMemoryDump(vmi *v1.VirtualMachineInstance) error {
14✔
2213
        const errMsgPrefix = "failed to getting memory dump"
14✔
2214

14✔
2215
        for _, volumeStatus := range vmi.Status.VolumeStatus {
17✔
2216
                if volumeStatus.MemoryDumpVolume == nil || volumeStatus.Phase != v1.MemoryDumpVolumeInProgress {
6✔
2217
                        continue
3✔
2218
                }
2219
                client, err := c.launcherClients.GetVerifiedLauncherClient(vmi)
×
2220
                if err != nil {
×
2221
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2222
                }
×
2223

2224
                c.logger.V(3).Object(vmi).Info("sending memory dump command")
×
2225
                err = client.VirtualMachineMemoryDump(vmi, memoryDumpPath(volumeStatus))
×
2226
                if err != nil {
×
2227
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
2228
                }
×
2229
        }
2230

2231
        return nil
14✔
2232
}
2233

2234
func (c *VirtualMachineController) hotplugVolumesReady(vmi *v1.VirtualMachineInstance) bool {
5✔
2235
        hasHotplugVolume := false
5✔
2236
        for _, v := range vmi.Spec.Volumes {
6✔
2237
                if storagetypes.IsHotplugVolume(&v) {
2✔
2238
                        hasHotplugVolume = true
1✔
2239
                        break
1✔
2240
                }
2241
        }
2242
        if len(vmi.Spec.UtilityVolumes) > 0 {
5✔
NEW
2243
                hasHotplugVolume = true
×
NEW
2244
        }
×
2245
        if !hasHotplugVolume {
9✔
2246
                return true
4✔
2247
        }
4✔
2248
        if len(vmi.Status.VolumeStatus) == 0 {
1✔
2249
                return false
×
2250
        }
×
2251
        for _, vs := range vmi.Status.VolumeStatus {
2✔
2252
                if vs.HotplugVolume != nil && !(vs.Phase == v1.VolumeReady || vs.Phase == v1.HotplugVolumeMounted) {
2✔
2253
                        // wait for volume to be mounted
1✔
2254
                        return false
1✔
2255
                }
1✔
2256
        }
2257
        return true
×
2258
}
2259

2260
func (c *VirtualMachineController) processVmUpdate(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
24✔
2261
        shouldReturn, err := c.checkLauncherClient(vmi)
24✔
2262
        if shouldReturn {
25✔
2263
                return err
1✔
2264
        }
1✔
2265

2266
        return c.vmUpdateHelperDefault(vmi, domain != nil)
23✔
2267
}
2268

2269
func (c *VirtualMachineController) setVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) error {
35✔
2270
        phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
35✔
2271
        if err != nil {
35✔
2272
                return err
×
2273
        }
×
2274
        vmi.Status.Phase = phase
35✔
2275
        return nil
35✔
2276
}
2277

2278
func vmiHasTerminationGracePeriod(vmi *v1.VirtualMachineInstance) bool {
×
2279
        // if not set we use the default graceperiod
×
2280
        return vmi.Spec.TerminationGracePeriodSeconds == nil ||
×
2281
                (vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds != 0)
×
2282
}
×
2283

2284
func domainHasGracePeriod(domain *api.Domain) bool {
8✔
2285
        return domain != nil &&
8✔
2286
                domain.Spec.Metadata.KubeVirt.GracePeriod != nil &&
8✔
2287
                domain.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds != 0
8✔
2288
}
8✔
2289

2290
func isACPIEnabled(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
×
2291
        return (vmiHasTerminationGracePeriod(vmi) || (vmi.Spec.TerminationGracePeriodSeconds == nil && domainHasGracePeriod(domain))) &&
×
2292
                domain != nil &&
×
2293
                domain.Spec.Features != nil &&
×
2294
                domain.Spec.Features.ACPI != nil
×
2295
}
×
2296

2297
func (c *VirtualMachineController) calculateVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) (v1.VirtualMachineInstancePhase, error) {
70✔
2298

70✔
2299
        if domain == nil {
96✔
2300
                switch {
26✔
2301
                case vmi.IsScheduled():
24✔
2302
                        isUnresponsive, isInitialized, err := c.launcherClients.IsLauncherClientUnresponsive(vmi)
24✔
2303

24✔
2304
                        if err != nil {
24✔
2305
                                return vmi.Status.Phase, err
×
2306
                        }
×
2307
                        if !isInitialized {
26✔
2308
                                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
2✔
2309
                                return vmi.Status.Phase, err
2✔
2310
                        } else if isUnresponsive {
28✔
2311
                                // virt-launcher is gone and VirtualMachineInstance never transitioned
4✔
2312
                                // from scheduled to Running.
4✔
2313
                                return v1.Failed, nil
4✔
2314
                        }
4✔
2315
                        return v1.Scheduled, nil
18✔
2316
                case !vmi.IsRunning() && !vmi.IsFinal():
×
2317
                        return v1.Scheduled, nil
×
2318
                case !vmi.IsFinal():
2✔
2319
                        // That is unexpected. We should not be able to delete a VirtualMachineInstance before we stop it.
2✔
2320
                        // However, if someone directly interacts with libvirt it is possible
2✔
2321
                        return v1.Failed, nil
2✔
2322
                }
2323
        } else {
44✔
2324
                switch domain.Status.Status {
44✔
2325
                case api.Shutoff, api.Crashed:
×
2326
                        switch domain.Status.Reason {
×
2327
                        case api.ReasonCrashed, api.ReasonPanicked:
×
2328
                                return v1.Failed, nil
×
2329
                        case api.ReasonDestroyed:
×
2330
                                if isACPIEnabled(vmi, domain) {
×
2331
                                        // When ACPI is available, the domain was tried to be shutdown,
×
2332
                                        // and destroyed means that the domain was destroyed after the graceperiod expired.
×
2333
                                        // Without ACPI a destroyed domain is ok.
×
2334
                                        return v1.Failed, nil
×
2335
                                }
×
2336
                                if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.Failed && vmi.Status.MigrationState.Mode == v1.MigrationPostCopy {
×
2337
                                        // A VMI that failed a post-copy migration should never succeed
×
2338
                                        return v1.Failed, nil
×
2339
                                }
×
2340
                                return v1.Succeeded, nil
×
2341
                        case api.ReasonShutdown, api.ReasonSaved, api.ReasonFromSnapshot:
×
2342
                                return v1.Succeeded, nil
×
2343
                        case api.ReasonMigrated:
×
2344
                                // if the domain migrated, we no longer know the phase.
×
2345
                                return vmi.Status.Phase, nil
×
2346
                        }
2347
                case api.Paused:
4✔
2348
                        switch domain.Status.Reason {
4✔
2349
                        case api.ReasonPausedPostcopyFailed:
2✔
2350
                                return v1.Failed, nil
2✔
2351
                        default:
2✔
2352
                                return v1.Running, nil
2✔
2353
                        }
2354
                case api.Running, api.Blocked, api.PMSuspended:
40✔
2355
                        return v1.Running, nil
40✔
2356
                }
2357
        }
2358
        return vmi.Status.Phase, nil
×
2359
}
2360

2361
func (c *VirtualMachineController) addDeleteFunc(obj interface{}) {
×
2362
        key, err := controller.KeyFunc(obj)
×
2363
        if err == nil {
×
2364
                c.vmiExpectations.SetExpectations(key, 0, 0)
×
2365
                c.queue.Add(key)
×
2366
        }
×
2367
}
2368

2369
func (c *VirtualMachineController) updateFunc(_, new interface{}) {
×
2370
        key, err := controller.KeyFunc(new)
×
2371
        if err == nil {
×
2372
                c.vmiExpectations.SetExpectations(key, 0, 0)
×
2373
                c.queue.Add(key)
×
2374
        }
×
2375
}
2376

2377
func (c *VirtualMachineController) addDomainFunc(obj interface{}) {
×
2378
        key, err := controller.KeyFunc(obj)
×
2379
        if err == nil {
×
2380
                c.queue.Add(key)
×
2381
        }
×
2382
}
2383
func (c *VirtualMachineController) deleteDomainFunc(obj interface{}) {
×
2384
        domain, ok := obj.(*api.Domain)
×
2385
        if !ok {
×
2386
                tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
×
2387
                if !ok {
×
2388
                        c.logger.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
×
2389
                        return
×
2390
                }
×
2391
                domain, ok = tombstone.Obj.(*api.Domain)
×
2392
                if !ok {
×
2393
                        c.logger.Reason(fmt.Errorf("tombstone contained object that is not a domain %#v", obj)).Error("Failed to process delete notification")
×
2394
                        return
×
2395
                }
×
2396
        }
2397
        c.logger.V(3).Object(domain).Info("Domain deleted")
×
2398
        key, err := controller.KeyFunc(obj)
×
2399
        if err == nil {
×
2400
                c.queue.Add(key)
×
2401
        }
×
2402
}
2403
func (c *VirtualMachineController) updateDomainFunc(_, new interface{}) {
×
2404
        key, err := controller.KeyFunc(new)
×
2405
        if err == nil {
×
2406
                c.queue.Add(key)
×
2407
        }
×
2408
}
2409

2410
func (c *VirtualMachineController) isHostModelMigratable(vmi *v1.VirtualMachineInstance) error {
93✔
2411
        if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == v1.CPUModeHostModel {
95✔
2412
                if c.hostCpuModel == "" {
3✔
2413
                        err := fmt.Errorf("the node \"%s\" does not allow migration with host-model", vmi.Status.NodeName)
1✔
2414
                        c.logger.Object(vmi).Errorf("%s", err.Error())
1✔
2415
                        return err
1✔
2416
                }
1✔
2417
        }
2418
        return nil
92✔
2419
}
2420

2421
func isIOError(shouldUpdate, domainExists bool, domain *api.Domain) bool {
35✔
2422
        return shouldUpdate && domainExists && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedIOError
35✔
2423
}
35✔
2424

2425
func (c *VirtualMachineController) updateMachineType(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
35✔
2426
        if domain == nil || vmi == nil {
48✔
2427
                return
13✔
2428
        }
13✔
2429
        if domain.Spec.OS.Type.Machine != "" {
23✔
2430
                vmi.Status.Machine = &v1.Machine{Type: domain.Spec.OS.Type.Machine}
1✔
2431
        }
1✔
2432
}
2433

2434
func parseLibvirtQuantity(value int64, unit string) *resource.Quantity {
15✔
2435
        switch unit {
15✔
2436
        case "b", "bytes":
2✔
2437
                return resource.NewQuantity(value, resource.BinarySI)
2✔
2438
        case "KB":
1✔
2439
                return resource.NewQuantity(value*1000, resource.DecimalSI)
1✔
2440
        case "MB":
1✔
2441
                return resource.NewQuantity(value*1000*1000, resource.DecimalSI)
1✔
2442
        case "GB":
1✔
2443
                return resource.NewQuantity(value*1000*1000*1000, resource.DecimalSI)
1✔
2444
        case "TB":
1✔
2445
                return resource.NewQuantity(value*1000*1000*1000*1000, resource.DecimalSI)
1✔
2446
        case "k", "KiB":
3✔
2447
                return resource.NewQuantity(value*1024, resource.BinarySI)
3✔
2448
        case "M", "MiB":
2✔
2449
                return resource.NewQuantity(value*1024*1024, resource.BinarySI)
2✔
2450
        case "G", "GiB":
2✔
2451
                return resource.NewQuantity(value*1024*1024*1024, resource.BinarySI)
2✔
2452
        case "T", "TiB":
2✔
2453
                return resource.NewQuantity(value*1024*1024*1024*1024, resource.BinarySI)
2✔
2454
        }
2455
        return nil
×
2456
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc