• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / 31fe7e0e-87b3-448f-b95e-00b206aeb4e9

06 Mar 2025 01:19PM UTC coverage: 71.479% (+0.001%) from 71.478%
31fe7e0e-87b3-448f-b95e-00b206aeb4e9

push

prow

web-flow
Merge pull request #14109 from orelmisan/test-old-lanes-ref

test.sh: Rm leftover references to old network lanes

61928 of 86638 relevant lines covered (71.48%)

0.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.77
/pkg/virt-handler/vm.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright 2017 Red Hat, Inc.
17
 *
18
 */
19

20
package virthandler
21

22
import (
23
        "bytes"
24
        "context"
25
        "encoding/json"
26
        goerror "errors"
27
        "fmt"
28
        "io"
29
        "net"
30
        "os"
31
        "path/filepath"
32
        "regexp"
33
        "runtime"
34
        "sort"
35
        "strconv"
36
        "strings"
37
        "time"
38

39
        "github.com/mitchellh/go-ps"
40
        "github.com/opencontainers/runc/libcontainer/cgroups"
41
        "golang.org/x/sys/unix"
42
        "libvirt.org/go/libvirtxml"
43

44
        k8sv1 "k8s.io/api/core/v1"
45
        "k8s.io/apimachinery/pkg/api/equality"
46
        "k8s.io/apimachinery/pkg/api/resource"
47
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
48
        "k8s.io/apimachinery/pkg/types"
49
        "k8s.io/apimachinery/pkg/util/errors"
50
        "k8s.io/apimachinery/pkg/util/wait"
51
        "k8s.io/client-go/tools/cache"
52
        "k8s.io/client-go/tools/record"
53
        "k8s.io/client-go/util/workqueue"
54

55
        v1 "kubevirt.io/api/core/v1"
56
        "kubevirt.io/client-go/kubecli"
57
        "kubevirt.io/client-go/log"
58

59
        "kubevirt.io/kubevirt/pkg/config"
60
        "kubevirt.io/kubevirt/pkg/controller"
61
        diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
62
        "kubevirt.io/kubevirt/pkg/executor"
63
        cmdv1 "kubevirt.io/kubevirt/pkg/handler-launcher-com/cmd/v1"
64
        hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
65
        hotplugdisk "kubevirt.io/kubevirt/pkg/hotplug-disk"
66
        netcache "kubevirt.io/kubevirt/pkg/network/cache"
67
        "kubevirt.io/kubevirt/pkg/network/domainspec"
68
        neterrors "kubevirt.io/kubevirt/pkg/network/errors"
69
        netsetup "kubevirt.io/kubevirt/pkg/network/setup"
70
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
71
        "kubevirt.io/kubevirt/pkg/pointer"
72
        "kubevirt.io/kubevirt/pkg/safepath"
73
        "kubevirt.io/kubevirt/pkg/storage/reservation"
74
        pvctypes "kubevirt.io/kubevirt/pkg/storage/types"
75
        storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
76
        "kubevirt.io/kubevirt/pkg/util"
77
        virtutil "kubevirt.io/kubevirt/pkg/util"
78
        "kubevirt.io/kubevirt/pkg/util/hardware"
79
        "kubevirt.io/kubevirt/pkg/util/migrations"
80
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
81
        "kubevirt.io/kubevirt/pkg/virt-controller/services"
82
        "kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
83
        virtcache "kubevirt.io/kubevirt/pkg/virt-handler/cache"
84
        "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
85
        cmdclient "kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
86
        container_disk "kubevirt.io/kubevirt/pkg/virt-handler/container-disk"
87
        device_manager "kubevirt.io/kubevirt/pkg/virt-handler/device-manager"
88
        "kubevirt.io/kubevirt/pkg/virt-handler/heartbeat"
89
        hotplug_volume "kubevirt.io/kubevirt/pkg/virt-handler/hotplug-disk"
90
        "kubevirt.io/kubevirt/pkg/virt-handler/isolation"
91
        migrationproxy "kubevirt.io/kubevirt/pkg/virt-handler/migration-proxy"
92
        "kubevirt.io/kubevirt/pkg/virt-handler/selinux"
93
        "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
94
        "kubevirt.io/kubevirt/pkg/virtiofs"
95
)
96

97
type netconf interface {
98
        Setup(vmi *v1.VirtualMachineInstance, networks []v1.Network, launcherPid int) error
99
        Teardown(vmi *v1.VirtualMachineInstance) error
100
}
101

102
type netstat interface {
103
        UpdateStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error
104
        Teardown(vmi *v1.VirtualMachineInstance)
105
        PodInterfaceVolatileDataIsCached(vmi *v1.VirtualMachineInstance, ifaceName string) bool
106
        CachePodInterfaceVolatileData(vmi *v1.VirtualMachineInstance, ifaceName string, data *netcache.PodIfaceCacheData)
107
}
108

109
type downwardMetricsManager interface {
110
        Run(stopCh chan struct{})
111
        StartServer(vmi *v1.VirtualMachineInstance, pid int) error
112
        StopServer(vmi *v1.VirtualMachineInstance)
113
}
114

115
const (
116
        failedDetectIsolationFmt              = "failed to detect isolation for launcher pod: %v"
117
        unableCreateVirtLauncherConnectionFmt = "unable to create virt-launcher client connection: %v"
118
        // This value was determined after consulting with libvirt developers and performing extensive testing.
119
        parallelMultifdMigrationThreads = uint(8)
120
)
121

122
const (
123
        //VolumeReadyReason is the reason set when the volume is ready.
124
        VolumeReadyReason = "VolumeReady"
125
        //VolumeUnMountedFromPodReason is the reason set when the volume is unmounted from the virtlauncher pod
126
        VolumeUnMountedFromPodReason = "VolumeUnMountedFromPod"
127
        //VolumeMountedToPodReason is the reason set when the volume is mounted to the virtlauncher pod
128
        VolumeMountedToPodReason = "VolumeMountedToPod"
129
        //VolumeUnplugged is the reason set when the volume is completely unplugged from the VMI
130
        VolumeUnplugged = "VolumeUnplugged"
131
        //VMIDefined is the reason set when a VMI is defined
132
        VMIDefined = "VirtualMachineInstance defined."
133
        //VMIStarted is the reason set when a VMI is started
134
        VMIStarted = "VirtualMachineInstance started."
135
        //VMIShutdown is the reason set when a VMI is shutdown
136
        VMIShutdown = "The VirtualMachineInstance was shut down."
137
        //VMICrashed is the reason set when a VMI crashed
138
        VMICrashed = "The VirtualMachineInstance crashed."
139
        //VMIAbortingMigration is the reason set when migration is being aborted
140
        VMIAbortingMigration = "VirtualMachineInstance is aborting migration."
141
        //VMIMigrating in the reason set when the VMI is migrating
142
        VMIMigrating = "VirtualMachineInstance is migrating."
143
        //VMIMigrationTargetPrepared is the reason set when the migration target has been prepared
144
        VMIMigrationTargetPrepared = "VirtualMachineInstance Migration Target Prepared."
145
        //VMIStopping is the reason set when the VMI is stopping
146
        VMIStopping = "VirtualMachineInstance stopping"
147
        //VMIGracefulShutdown is the reason set when the VMI is gracefully shut down
148
        VMIGracefulShutdown = "Signaled Graceful Shutdown"
149
        //VMISignalDeletion is the reason set when the VMI has signal deletion
150
        VMISignalDeletion = "Signaled Deletion"
151

152
        // MemoryHotplugFailedReason is the reason set when the VM cannot hotplug memory
153
        memoryHotplugFailedReason = "Memory Hotplug Failed"
154
)
155

156
var getCgroupManager = func(vmi *v1.VirtualMachineInstance) (cgroup.Manager, error) {
×
157
        return cgroup.NewManagerFromVM(vmi)
×
158
}
×
159

160
func NewController(
161
        recorder record.EventRecorder,
162
        clientset kubecli.KubevirtClient,
163
        host string,
164
        migrationIpAddress string,
165
        virtShareDir string,
166
        virtPrivateDir string,
167
        kubeletPodsDir string,
168
        vmiSourceInformer cache.SharedIndexInformer,
169
        vmiTargetInformer cache.SharedIndexInformer,
170
        domainInformer cache.SharedInformer,
171
        maxDevices int,
172
        clusterConfig *virtconfig.ClusterConfig,
173
        podIsolationDetector isolation.PodIsolationDetector,
174
        migrationProxy migrationproxy.ProxyManager,
175
        downwardMetricsManager downwardMetricsManager,
176
        capabilities *libvirtxml.Caps,
177
        hostCpuModel string,
178
        netConf netconf,
179
        netStat netstat,
180
        netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator,
181
) (*VirtualMachineController, error) {
1✔
182

1✔
183
        queue := workqueue.NewTypedRateLimitingQueueWithConfig[string](
1✔
184
                workqueue.DefaultTypedControllerRateLimiter[string](),
1✔
185
                workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-vm"},
1✔
186
        )
1✔
187

1✔
188
        containerDiskState := filepath.Join(virtPrivateDir, "container-disk-mount-state")
1✔
189
        if err := os.MkdirAll(containerDiskState, 0700); err != nil {
1✔
190
                return nil, err
×
191
        }
×
192

193
        hotplugState := filepath.Join(virtPrivateDir, "hotplug-volume-mount-state")
1✔
194
        if err := os.MkdirAll(hotplugState, 0700); err != nil {
1✔
195
                return nil, err
×
196
        }
×
197

198
        c := &VirtualMachineController{
1✔
199
                queue:                            queue,
1✔
200
                recorder:                         recorder,
1✔
201
                clientset:                        clientset,
1✔
202
                host:                             host,
1✔
203
                migrationIpAddress:               migrationIpAddress,
1✔
204
                virtShareDir:                     virtShareDir,
1✔
205
                vmiSourceStore:                   vmiSourceInformer.GetStore(),
1✔
206
                vmiTargetStore:                   vmiTargetInformer.GetStore(),
1✔
207
                domainStore:                      domainInformer.GetStore(),
1✔
208
                heartBeatInterval:                1 * time.Minute,
1✔
209
                migrationProxy:                   migrationProxy,
1✔
210
                podIsolationDetector:             podIsolationDetector,
1✔
211
                containerDiskMounter:             container_disk.NewMounter(podIsolationDetector, containerDiskState, clusterConfig),
1✔
212
                hotplugVolumeMounter:             hotplug_volume.NewVolumeMounter(hotplugState, kubeletPodsDir),
1✔
213
                clusterConfig:                    clusterConfig,
1✔
214
                virtLauncherFSRunDirPattern:      "/proc/%d/root/var/run",
1✔
215
                capabilities:                     capabilities,
1✔
216
                hostCpuModel:                     hostCpuModel,
1✔
217
                vmiExpectations:                  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
1✔
218
                sriovHotplugExecutorPool:         executor.NewRateLimitedExecutorPool(executor.NewExponentialLimitedBackoffCreator()),
1✔
219
                ioErrorRetryManager:              NewFailRetryManager("io-error-retry", 10*time.Second, 3*time.Minute, 30*time.Second),
1✔
220
                netConf:                          netConf,
1✔
221
                netStat:                          netStat,
1✔
222
                netBindingPluginMemoryCalculator: netBindingPluginMemoryCalculator,
1✔
223
        }
1✔
224

1✔
225
        c.hasSynced = func() bool {
1✔
226
                return domainInformer.HasSynced() && vmiSourceInformer.HasSynced() && vmiTargetInformer.HasSynced()
×
227
        }
×
228

229
        _, err := vmiSourceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
230
                AddFunc:    c.addFunc,
1✔
231
                DeleteFunc: c.deleteFunc,
1✔
232
                UpdateFunc: c.updateFunc,
1✔
233
        })
1✔
234
        if err != nil {
1✔
235
                return nil, err
×
236
        }
×
237

238
        _, err = vmiTargetInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
239
                AddFunc:    c.addFunc,
1✔
240
                DeleteFunc: c.deleteFunc,
1✔
241
                UpdateFunc: c.updateFunc,
1✔
242
        })
1✔
243
        if err != nil {
1✔
244
                return nil, err
×
245
        }
×
246

247
        _, err = domainInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
248
                AddFunc:    c.addDomainFunc,
1✔
249
                DeleteFunc: c.deleteDomainFunc,
1✔
250
                UpdateFunc: c.updateDomainFunc,
1✔
251
        })
1✔
252
        if err != nil {
1✔
253
                return nil, err
×
254
        }
×
255

256
        c.launcherClients = virtcache.LauncherClientInfoByVMI{}
1✔
257

1✔
258
        c.downwardMetricsManager = downwardMetricsManager
1✔
259

1✔
260
        c.domainNotifyPipes = make(map[string]string)
1✔
261

1✔
262
        permissions := "rw"
1✔
263
        if cgroups.IsCgroup2UnifiedMode() {
1✔
264
                // Need 'rwm' permissions otherwise ebpf filtering program attached by runc
×
265
                // will deny probing the device file with 'access' syscall. That in turn
×
266
                // will lead to virtqemud failure on VM startup.
×
267
                // This has been fixed upstream:
×
268
                //   https://github.com/opencontainers/runc/pull/2796
×
269
                // but the workaround is still needed to support previous versions without
×
270
                // the patch.
×
271
                permissions = "rwm"
×
272
        }
×
273

274
        c.deviceManagerController = device_manager.NewDeviceController(
1✔
275
                c.host,
1✔
276
                maxDevices,
1✔
277
                permissions,
1✔
278
                device_manager.PermanentHostDevicePlugins(maxDevices, permissions),
1✔
279
                clusterConfig,
1✔
280
                clientset.CoreV1())
1✔
281
        c.heartBeat = heartbeat.NewHeartBeat(clientset.CoreV1(), c.deviceManagerController, clusterConfig, host)
1✔
282

1✔
283
        return c, nil
1✔
284
}
285

286
type netBindingPluginMemoryCalculator interface {
287
        Calculate(vmi *v1.VirtualMachineInstance, registeredPlugins map[string]v1.InterfaceBindingPlugin) resource.Quantity
288
}
289

290
type VirtualMachineController struct {
291
        recorder                 record.EventRecorder
292
        clientset                kubecli.KubevirtClient
293
        host                     string
294
        migrationIpAddress       string
295
        virtShareDir             string
296
        virtPrivateDir           string
297
        queue                    workqueue.TypedRateLimitingInterface[string]
298
        vmiSourceStore           cache.Store
299
        vmiTargetStore           cache.Store
300
        domainStore              cache.Store
301
        launcherClients          virtcache.LauncherClientInfoByVMI
302
        heartBeatInterval        time.Duration
303
        deviceManagerController  *device_manager.DeviceController
304
        migrationProxy           migrationproxy.ProxyManager
305
        podIsolationDetector     isolation.PodIsolationDetector
306
        containerDiskMounter     container_disk.Mounter
307
        hotplugVolumeMounter     hotplug_volume.VolumeMounter
308
        clusterConfig            *virtconfig.ClusterConfig
309
        sriovHotplugExecutorPool *executor.RateLimitedExecutorPool
310
        downwardMetricsManager   downwardMetricsManager
311

312
        netConf                          netconf
313
        netStat                          netstat
314
        netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator
315

316
        domainNotifyPipes           map[string]string
317
        virtLauncherFSRunDirPattern string
318
        heartBeat                   *heartbeat.HeartBeat
319
        capabilities                *libvirtxml.Caps
320
        hostCpuModel                string
321
        vmiExpectations             *controller.UIDTrackingControllerExpectations
322
        ioErrorRetryManager         *FailRetryManager
323
        hasSynced                   func() bool
324
}
325

326
type virtLauncherCriticalSecurebootError struct {
327
        msg string
328
}
329

330
func (e *virtLauncherCriticalSecurebootError) Error() string { return e.msg }
×
331

332
type vmiIrrecoverableError struct {
333
        msg string
334
}
335

336
func (e *vmiIrrecoverableError) Error() string { return e.msg }
×
337

338
func formatIrrecoverableErrorMessage(domain *api.Domain) string {
×
339
        msg := "unknown reason"
×
340
        if domainPausedFailedPostCopy(domain) {
×
341
                msg = "VMI is irrecoverable due to failed post-copy migration"
×
342
        }
×
343
        return msg
×
344
}
345

346
func handleDomainNotifyPipe(domainPipeStopChan chan struct{}, ln net.Listener, virtShareDir string, vmi *v1.VirtualMachineInstance) {
1✔
347

1✔
348
        fdChan := make(chan net.Conn, 100)
1✔
349

1✔
350
        // Close listener and exit when stop encountered
1✔
351
        go func() {
2✔
352
                <-domainPipeStopChan
1✔
353
                log.Log.Object(vmi).Infof("closing notify pipe listener for vmi")
1✔
354
                if err := ln.Close(); err != nil {
1✔
355
                        log.Log.Object(vmi).Infof("failed closing notify pipe listener for vmi: %v", err)
×
356
                }
×
357
        }()
358

359
        // Listen for new connections,
360
        go func(vmi *v1.VirtualMachineInstance, ln net.Listener, domainPipeStopChan chan struct{}) {
2✔
361
                for {
2✔
362
                        fd, err := ln.Accept()
1✔
363
                        if err != nil {
2✔
364
                                if goerror.Is(err, net.ErrClosed) {
2✔
365
                                        // As Accept blocks, closing it is our mechanism to exit this loop
1✔
366
                                        return
1✔
367
                                }
1✔
368
                                log.Log.Reason(err).Error("Domain pipe accept error encountered.")
×
369
                                // keep listening until stop invoked
×
370
                                time.Sleep(1 * time.Second)
×
371
                        } else {
1✔
372
                                fdChan <- fd
1✔
373
                        }
1✔
374
                }
375
        }(vmi, ln, domainPipeStopChan)
376

377
        // Process new connections
378
        // exit when stop encountered
379
        go func(vmi *v1.VirtualMachineInstance, fdChan chan net.Conn, domainPipeStopChan chan struct{}) {
2✔
380
                for {
2✔
381
                        select {
1✔
382
                        case <-domainPipeStopChan:
1✔
383
                                return
1✔
384
                        case fd := <-fdChan:
1✔
385
                                go func(vmi *v1.VirtualMachineInstance) {
2✔
386
                                        defer fd.Close()
1✔
387

1✔
388
                                        // pipe the VMI domain-notify.sock to the virt-handler domain-notify.sock
1✔
389
                                        // so virt-handler receives notifications from the VMI
1✔
390
                                        conn, err := net.Dial("unix", filepath.Join(virtShareDir, "domain-notify.sock"))
1✔
391
                                        if err != nil {
2✔
392
                                                log.Log.Reason(err).Error("error connecting to domain-notify.sock for proxy connection")
1✔
393
                                                return
1✔
394
                                        }
1✔
395
                                        defer conn.Close()
1✔
396

1✔
397
                                        log.Log.Object(vmi).Infof("Accepted new notify pipe connection for vmi")
1✔
398
                                        copyErr := make(chan error, 2)
1✔
399
                                        go func() {
2✔
400
                                                _, err := io.Copy(fd, conn)
1✔
401
                                                copyErr <- err
1✔
402
                                        }()
1✔
403
                                        go func() {
2✔
404
                                                _, err := io.Copy(conn, fd)
1✔
405
                                                copyErr <- err
1✔
406
                                        }()
1✔
407

408
                                        // wait until one of the copy routines exit then
409
                                        // let the fd close
410
                                        err = <-copyErr
1✔
411
                                        if err != nil {
2✔
412
                                                log.Log.Object(vmi).Infof("closing notify pipe connection for vmi with error: %v", err)
1✔
413
                                        } else {
2✔
414
                                                log.Log.Object(vmi).Infof("gracefully closed notify pipe connection for vmi")
1✔
415
                                        }
1✔
416

417
                                }(vmi)
418
                        }
419
                }
420
        }(vmi, fdChan, domainPipeStopChan)
421
}
422

423
func (c *VirtualMachineController) startDomainNotifyPipe(domainPipeStopChan chan struct{}, vmi *v1.VirtualMachineInstance) error {
×
424

×
425
        res, err := c.podIsolationDetector.Detect(vmi)
×
426
        if err != nil {
×
427
                return fmt.Errorf("failed to detect isolation for launcher pod when setting up notify pipe: %v", err)
×
428
        }
×
429

430
        // inject the domain-notify.sock into the VMI pod.
431
        root, err := res.MountRoot()
×
432
        if err != nil {
×
433
                return err
×
434
        }
×
435
        socketDir, err := root.AppendAndResolveWithRelativeRoot(c.virtShareDir)
×
436
        if err != nil {
×
437
                return err
×
438
        }
×
439

440
        listener, err := safepath.ListenUnixNoFollow(socketDir, "domain-notify-pipe.sock")
×
441
        if err != nil {
×
442
                log.Log.Reason(err).Error("failed to create unix socket for proxy service")
×
443
                return err
×
444
        }
×
445
        socketPath, err := safepath.JoinNoFollow(socketDir, "domain-notify-pipe.sock")
×
446
        if err != nil {
×
447
                return err
×
448
        }
×
449

450
        if util.IsNonRootVMI(vmi) {
×
451
                err := diskutils.DefaultOwnershipManager.SetFileOwnership(socketPath)
×
452
                if err != nil {
×
453
                        log.Log.Reason(err).Error("unable to change ownership for domain notify")
×
454
                        return err
×
455
                }
×
456
        }
457

458
        handleDomainNotifyPipe(domainPipeStopChan, listener, c.virtShareDir, vmi)
×
459

×
460
        return nil
×
461
}
462

463
// Determines if a domain's grace period has expired during shutdown.
464
// If the grace period has started but not expired, timeLeft represents
465
// the time in seconds left until the period expires.
466
// If the grace period has not started, timeLeft will be set to -1.
467
func (c *VirtualMachineController) hasGracePeriodExpired(dom *api.Domain) (hasExpired bool, timeLeft int64) {
1✔
468

1✔
469
        hasExpired = false
1✔
470
        timeLeft = 0
1✔
471

1✔
472
        if dom == nil {
1✔
473
                hasExpired = true
×
474
                return
×
475
        }
×
476

477
        startTime := int64(0)
1✔
478
        if dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp != nil {
2✔
479
                startTime = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp.UTC().Unix()
1✔
480
        }
1✔
481
        gracePeriod := dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds
1✔
482

1✔
483
        // If gracePeriod == 0, then there will be no startTime set, deletion
1✔
484
        // should occur immediately during shutdown.
1✔
485
        if gracePeriod == 0 {
1✔
486
                hasExpired = true
×
487
                return
×
488
        } else if startTime == 0 {
2✔
489
                // If gracePeriod > 0, then the shutdown signal needs to be sent
1✔
490
                // and the gracePeriod start time needs to be set.
1✔
491
                timeLeft = -1
1✔
492
                return
1✔
493
        }
1✔
494

495
        now := time.Now().UTC().Unix()
1✔
496
        diff := now - startTime
1✔
497

1✔
498
        if diff >= gracePeriod {
2✔
499
                hasExpired = true
1✔
500
                return
1✔
501
        }
1✔
502

503
        timeLeft = int64(gracePeriod - diff)
×
504
        if timeLeft < 1 {
×
505
                timeLeft = 1
×
506
        }
×
507
        return
×
508
}
509

510
func (c *VirtualMachineController) hasTargetDetectedReadyDomain(vmi *v1.VirtualMachineInstance) (bool, int64) {
1✔
511
        // give the target node 60 seconds to discover the libvirt domain via the domain informer
1✔
512
        // before allowing the VMI to be processed. This closes the gap between the
1✔
513
        // VMI's status getting updated to reflect the new source node, and the domain
1✔
514
        // informer firing the event to alert the source node of the new domain.
1✔
515
        migrationTargetDelayTimeout := 60
1✔
516

1✔
517
        if vmi.Status.MigrationState != nil &&
1✔
518
                vmi.Status.MigrationState.TargetNodeDomainDetected &&
1✔
519
                vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp != nil {
2✔
520

1✔
521
                return true, 0
1✔
522
        }
1✔
523

524
        nowUnix := time.Now().UTC().Unix()
×
525
        migrationEndUnix := vmi.Status.MigrationState.EndTimestamp.Time.UTC().Unix()
×
526

×
527
        diff := nowUnix - migrationEndUnix
×
528

×
529
        if diff > int64(migrationTargetDelayTimeout) {
×
530
                return false, 0
×
531
        }
×
532

533
        timeLeft := int64(migrationTargetDelayTimeout) - diff
×
534

×
535
        enqueueTime := timeLeft
×
536
        if enqueueTime < 5 {
×
537
                enqueueTime = 5
×
538
        }
×
539

540
        // re-enqueue the key to ensure it gets processed again within the right time.
541
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(enqueueTime)*time.Second)
×
542

×
543
        return false, timeLeft
×
544
}
545

546
// teardownNetwork performs network cache cleanup for a specific VMI.
547
func (c *VirtualMachineController) teardownNetwork(vmi *v1.VirtualMachineInstance) {
1✔
548
        if string(vmi.UID) == "" {
2✔
549
                return
1✔
550
        }
1✔
551
        if err := c.netConf.Teardown(vmi); err != nil {
1✔
552
                log.Log.Reason(err).Errorf("failed to delete VMI Network cache files: %s", err.Error())
×
553
        }
×
554
        c.netStat.Teardown(vmi)
1✔
555
}
556

557
func domainPausedFailedPostCopy(domain *api.Domain) bool {
1✔
558
        return domain != nil && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedPostcopyFailed
1✔
559
}
1✔
560

561
func domainMigrated(domain *api.Domain) bool {
1✔
562
        return domain != nil && domain.Status.Status == api.Shutoff && domain.Status.Reason == api.ReasonMigrated
1✔
563
}
1✔
564

565
func canUpdateToMounted(currentPhase v1.VolumePhase) bool {
1✔
566
        return currentPhase == v1.VolumeBound || currentPhase == v1.VolumePending || currentPhase == v1.HotplugVolumeAttachedToNode
1✔
567
}
1✔
568

569
func canUpdateToUnmounted(currentPhase v1.VolumePhase) bool {
1✔
570
        return currentPhase == v1.VolumeReady || currentPhase == v1.HotplugVolumeMounted || currentPhase == v1.HotplugVolumeAttachedToNode
1✔
571
}
1✔
572

573
func (c *VirtualMachineController) setMigrationProgressStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
574
        if domain == nil ||
1✔
575
                domain.Spec.Metadata.KubeVirt.Migration == nil ||
1✔
576
                vmi.Status.MigrationState == nil ||
1✔
577
                !c.isMigrationSource(vmi) {
2✔
578
                return
1✔
579
        }
1✔
580

581
        migrationMetadata := domain.Spec.Metadata.KubeVirt.Migration
1✔
582
        if migrationMetadata.UID != vmi.Status.MigrationState.MigrationUID {
1✔
583
                return
×
584
        }
×
585

586
        if vmi.Status.MigrationState.EndTimestamp == nil && migrationMetadata.EndTimestamp != nil {
2✔
587
                if migrationMetadata.Failed {
1✔
588
                        vmi.Status.MigrationState.FailureReason = migrationMetadata.FailureReason
×
589
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("VirtualMachineInstance migration uid %s failed. reason:%s", string(migrationMetadata.UID), migrationMetadata.FailureReason))
×
590
                }
×
591
        }
592

593
        if vmi.Status.MigrationState.StartTimestamp == nil {
2✔
594
                vmi.Status.MigrationState.StartTimestamp = migrationMetadata.StartTimestamp
1✔
595
        }
1✔
596
        if vmi.Status.MigrationState.EndTimestamp == nil {
2✔
597
                vmi.Status.MigrationState.EndTimestamp = migrationMetadata.EndTimestamp
1✔
598
        }
1✔
599
        vmi.Status.MigrationState.AbortStatus = v1.MigrationAbortStatus(migrationMetadata.AbortStatus)
1✔
600
        vmi.Status.MigrationState.Completed = migrationMetadata.Completed
1✔
601
        vmi.Status.MigrationState.Failed = migrationMetadata.Failed
1✔
602
        vmi.Status.MigrationState.Mode = migrationMetadata.Mode
1✔
603
}
604

605
func (c *VirtualMachineController) migrationSourceUpdateVMIStatus(origVMI *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
606

1✔
607
        vmi := origVMI.DeepCopy()
1✔
608
        oldStatus := vmi.DeepCopy().Status
1✔
609

1✔
610
        // if a migration happens very quickly, it's possible parts of the in
1✔
611
        // progress status wasn't set. We need to make sure we set this even
1✔
612
        // if the migration has completed
1✔
613
        c.setMigrationProgressStatus(vmi, domain)
1✔
614

1✔
615
        // handle migrations differently than normal status updates.
1✔
616
        //
1✔
617
        // When a successful migration is detected, we must transfer ownership of the VMI
1✔
618
        // from the source node (this node) to the target node (node the domain was migrated to).
1✔
619
        //
1✔
620
        // Transfer ownership by...
1✔
621
        // 1. Marking vmi.Status.MigrationState as completed
1✔
622
        // 2. Update the vmi.Status.NodeName to reflect the target node's name
1✔
623
        // 3. Update the VMI's NodeNameLabel annotation to reflect the target node's name
1✔
624
        // 4. Clear the LauncherContainerImageVersion which virt-controller will detect
1✔
625
        //    and accurately based on the version used on the target pod
1✔
626
        //
1✔
627
        // After a migration, the VMI's phase is no longer owned by this node. Only the
1✔
628
        // MigrationState status field is eligible to be mutated.
1✔
629
        migrationHost := ""
1✔
630
        if vmi.Status.MigrationState != nil {
2✔
631
                migrationHost = vmi.Status.MigrationState.TargetNode
1✔
632
        }
1✔
633

634
        if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.EndTimestamp == nil {
1✔
635
                now := metav1.NewTime(time.Now())
×
636
                vmi.Status.MigrationState.EndTimestamp = &now
×
637
        }
×
638

639
        targetNodeDetectedDomain, timeLeft := c.hasTargetDetectedReadyDomain(vmi)
1✔
640
        // If we can't detect where the migration went to, then we have no
1✔
641
        // way of transferring ownership. The only option here is to move the
1✔
642
        // vmi to failed.  The cluster vmi controller will then tear down the
1✔
643
        // resulting pods.
1✔
644
        if migrationHost == "" {
1✔
645
                // migrated to unknown host.
×
646
                vmi.Status.Phase = v1.Failed
×
647
                vmi.Status.MigrationState.Completed = true
×
648
                vmi.Status.MigrationState.Failed = true
×
649

×
650
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance migrated to unknown host."))
×
651
        } else if !targetNodeDetectedDomain {
1✔
652
                if timeLeft <= 0 {
×
653
                        vmi.Status.Phase = v1.Failed
×
654
                        vmi.Status.MigrationState.Completed = true
×
655
                        vmi.Status.MigrationState.Failed = true
×
656

×
657
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance's domain was never observed on the target after the migration completed within the timeout period."))
×
658
                } else {
×
659
                        log.Log.Object(vmi).Info("Waiting on the target node to observe the migrated domain before performing the handoff")
×
660
                }
×
661
        } else if vmi.Status.MigrationState != nil {
2✔
662
                // this is the migration ACK.
1✔
663
                // At this point we know that the migration has completed and that
1✔
664
                // the target node has seen the domain event.
1✔
665
                vmi.Labels[v1.NodeNameLabel] = migrationHost
1✔
666
                delete(vmi.Labels, v1.OutdatedLauncherImageLabel)
1✔
667
                vmi.Status.LauncherContainerImageVersion = ""
1✔
668
                vmi.Status.NodeName = migrationHost
1✔
669
                // clean the evacuation node name since have already migrated to a new node
1✔
670
                vmi.Status.EvacuationNodeName = ""
1✔
671
                vmi.Status.MigrationState.Completed = true
1✔
672
                // update the vmi migrationTransport to indicate that next migration should use unix URI
1✔
673
                // new workloads will set the migrationTransport on their creation, however, legacy workloads
1✔
674
                // can make the switch only after the first migration
1✔
675
                vmi.Status.MigrationTransport = v1.MigrationTransportUnix
1✔
676
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance migrated to node %s.", migrationHost))
1✔
677
                log.Log.Object(vmi).Infof("migration completed to node %s", migrationHost)
1✔
678
        }
1✔
679

680
        if !equality.Semantic.DeepEqual(oldStatus, vmi.Status) {
2✔
681
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
682
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
683
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
1✔
684
                if err != nil {
1✔
685
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
×
686
                        return err
×
687
                }
×
688
        }
689
        return nil
1✔
690
}
691

692
func domainIsActiveOnTarget(domain *api.Domain) bool {
1✔
693

1✔
694
        if domain == nil {
1✔
695
                return false
×
696
        }
×
697

698
        // It's possible for the domain to be active on the target node if the domain is
699
        // 1. Running
700
        // 2. User initiated Paused
701
        if domain.Status.Status == api.Running {
2✔
702
                return true
1✔
703
        } else if domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedUser {
1✔
704
                return true
×
705
        }
×
706
        return false
×
707

708
}
709

710
func (c *VirtualMachineController) migrationTargetUpdateVMIStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
711

1✔
712
        vmiCopy := vmi.DeepCopy()
1✔
713

1✔
714
        if migrations.MigrationFailed(vmi) {
2✔
715
                // nothing left to report on the target node if the migration failed
1✔
716
                return nil
1✔
717
        }
1✔
718

719
        domainExists := domain != nil
1✔
720

1✔
721
        // Handle post migration
1✔
722
        if domainExists && vmi.Status.MigrationState != nil && !vmi.Status.MigrationState.TargetNodeDomainDetected {
2✔
723
                // record that we've see the domain populated on the target's node
1✔
724
                log.Log.Object(vmi).Info("The target node received the migrated domain")
1✔
725
                vmiCopy.Status.MigrationState.TargetNodeDomainDetected = true
1✔
726

1✔
727
                // adjust QEMU process memlock limits in order to enable old virt-launcher pod's to
1✔
728
                // perform hotplug host-devices on post migration.
1✔
729
                if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
1✔
730
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "Failed to update target node qemu memory limits during live migration")
×
731
                }
×
732

733
        }
734

735
        if domainExists &&
1✔
736
                domainIsActiveOnTarget(domain) &&
1✔
737
                vmi.Status.MigrationState != nil &&
1✔
738
                vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp == nil {
2✔
739

1✔
740
                // record the moment we detected the domain is running.
1✔
741
                // This is used as a trigger to help coordinate when CNI drivers
1✔
742
                // fail over the IP to the new pod.
1✔
743
                log.Log.Object(vmi).Info("The target node received the running migrated domain")
1✔
744
                now := metav1.Now()
1✔
745
                vmiCopy.Status.MigrationState.TargetNodeDomainReadyTimestamp = &now
1✔
746
                c.finalizeMigration(vmiCopy)
1✔
747
        }
1✔
748

749
        if !migrations.IsMigrating(vmi) {
2✔
750
                destSrcPortsMap := c.migrationProxy.GetTargetListenerPorts(string(vmi.UID))
1✔
751
                if len(destSrcPortsMap) == 0 {
1✔
752
                        msg := "target migration listener is not up for this vmi"
×
753
                        log.Log.Object(vmi).Error(msg)
×
754
                        return fmt.Errorf(msg)
×
755
                }
×
756

757
                hostAddress := ""
1✔
758
                // advertise the listener address to the source node
1✔
759
                if vmi.Status.MigrationState != nil {
2✔
760
                        hostAddress = vmi.Status.MigrationState.TargetNodeAddress
1✔
761
                }
1✔
762
                if hostAddress != c.migrationIpAddress {
2✔
763
                        portsList := make([]string, 0, len(destSrcPortsMap))
1✔
764

1✔
765
                        for k := range destSrcPortsMap {
2✔
766
                                portsList = append(portsList, k)
1✔
767
                        }
1✔
768
                        portsStrList := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(portsList)), ","), "[]")
1✔
769
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.PreparingTarget.String(), fmt.Sprintf("Migration Target is listening at %s, on ports: %s", c.migrationIpAddress, portsStrList))
1✔
770
                        vmiCopy.Status.MigrationState.TargetNodeAddress = c.migrationIpAddress
1✔
771
                        vmiCopy.Status.MigrationState.TargetDirectMigrationNodePorts = destSrcPortsMap
1✔
772
                }
773

774
                // If the migrated VMI requires dedicated CPUs, report the new pod CPU set to the source node
775
                // via the VMI migration status in order to patch the domain pre migration
776
                if vmi.IsCPUDedicated() {
1✔
777
                        err := c.reportDedicatedCPUSetForMigratingVMI(vmiCopy)
×
778
                        if err != nil {
×
779
                                return err
×
780
                        }
×
781
                        err = c.reportTargetTopologyForMigratingVMI(vmiCopy)
×
782
                        if err != nil {
×
783
                                return err
×
784
                        }
×
785
                }
786
        }
787

788
        // update the VMI if necessary
789
        if !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) {
2✔
790
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
791
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
792
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmiCopy, metav1.UpdateOptions{})
1✔
793
                if err != nil {
1✔
794
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
×
795
                        return err
×
796
                }
×
797
        }
798

799
        return nil
1✔
800
}
801

802
func (c *VirtualMachineController) generateEventsForVolumeStatusChange(vmi *v1.VirtualMachineInstance, newStatusMap map[string]v1.VolumeStatus) {
1✔
803
        newStatusMapCopy := make(map[string]v1.VolumeStatus)
1✔
804
        for k, v := range newStatusMap {
2✔
805
                newStatusMapCopy[k] = v
1✔
806
        }
1✔
807
        for _, oldStatus := range vmi.Status.VolumeStatus {
2✔
808
                newStatus, ok := newStatusMap[oldStatus.Name]
1✔
809
                if !ok {
1✔
810
                        // status got removed
×
811
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, VolumeUnplugged, fmt.Sprintf("Volume %s has been unplugged", oldStatus.Name))
×
812
                        continue
×
813
                }
814
                if newStatus.Phase != oldStatus.Phase {
2✔
815
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, newStatus.Reason, newStatus.Message)
1✔
816
                }
1✔
817
                delete(newStatusMapCopy, newStatus.Name)
1✔
818
        }
819
        // Send events for any new statuses.
820
        for _, v := range newStatusMapCopy {
2✔
821
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v.Reason, v.Message)
1✔
822
        }
1✔
823
}
824

825
func (c *VirtualMachineController) updateHotplugVolumeStatus(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, specVolumeMap map[string]v1.Volume) (v1.VolumeStatus, bool) {
1✔
826
        needsRefresh := false
1✔
827
        if volumeStatus.Target == "" {
2✔
828
                needsRefresh = true
1✔
829
                mounted, err := c.hotplugVolumeMounter.IsMounted(vmi, volumeStatus.Name, volumeStatus.HotplugVolume.AttachPodUID)
1✔
830
                if err != nil {
1✔
831
                        log.Log.Object(vmi).Errorf("error occurred while checking if volume is mounted: %v", err)
×
832
                }
×
833
                if mounted {
2✔
834
                        if _, ok := specVolumeMap[volumeStatus.Name]; ok && canUpdateToMounted(volumeStatus.Phase) {
2✔
835
                                log.DefaultLogger().Infof("Marking volume %s as mounted in pod, it can now be attached", volumeStatus.Name)
1✔
836
                                // mounted, and still in spec, and in phase we can change, update status to mounted.
1✔
837
                                volumeStatus.Phase = v1.HotplugVolumeMounted
1✔
838
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been mounted in virt-launcher pod", volumeStatus.Name)
1✔
839
                                volumeStatus.Reason = VolumeMountedToPodReason
1✔
840
                        }
1✔
841
                } else {
1✔
842
                        // Not mounted, check if the volume is in the spec, if not update status
1✔
843
                        if _, ok := specVolumeMap[volumeStatus.Name]; !ok && canUpdateToUnmounted(volumeStatus.Phase) {
2✔
844
                                log.DefaultLogger().Infof("Marking volume %s as unmounted from pod, it can now be detached", volumeStatus.Name)
1✔
845
                                // Not mounted.
1✔
846
                                volumeStatus.Phase = v1.HotplugVolumeUnMounted
1✔
847
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been unmounted from virt-launcher pod", volumeStatus.Name)
1✔
848
                                volumeStatus.Reason = VolumeUnMountedFromPodReason
1✔
849
                        }
1✔
850
                }
851
        } else {
1✔
852
                // Successfully attached to VM.
1✔
853
                volumeStatus.Phase = v1.VolumeReady
1✔
854
                volumeStatus.Message = fmt.Sprintf("Successfully attach hotplugged volume %s to VM", volumeStatus.Name)
1✔
855
                volumeStatus.Reason = VolumeReadyReason
1✔
856
        }
1✔
857
        return volumeStatus, needsRefresh
1✔
858
}
859

860
func needToComputeChecksums(vmi *v1.VirtualMachineInstance) bool {
1✔
861
        containerDisks := map[string]*v1.Volume{}
1✔
862
        for _, volume := range vmi.Spec.Volumes {
2✔
863
                if volume.VolumeSource.ContainerDisk != nil {
2✔
864
                        containerDisks[volume.Name] = &volume
1✔
865
                }
1✔
866
        }
867

868
        for i := range vmi.Status.VolumeStatus {
2✔
869
                _, isContainerDisk := containerDisks[vmi.Status.VolumeStatus[i].Name]
1✔
870
                if !isContainerDisk {
2✔
871
                        continue
1✔
872
                }
873

874
                if vmi.Status.VolumeStatus[i].ContainerDiskVolume == nil ||
1✔
875
                        vmi.Status.VolumeStatus[i].ContainerDiskVolume.Checksum == 0 {
2✔
876
                        return true
1✔
877
                }
1✔
878
        }
879

880
        if util.HasKernelBootContainerImage(vmi) {
1✔
881
                if vmi.Status.KernelBootStatus == nil {
×
882
                        return true
×
883
                }
×
884

885
                kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
×
886

×
887
                if kernelBootContainer.KernelPath != "" &&
×
888
                        (vmi.Status.KernelBootStatus.KernelInfo == nil ||
×
889
                                vmi.Status.KernelBootStatus.KernelInfo.Checksum == 0) {
×
890
                        return true
×
891

×
892
                }
×
893

894
                if kernelBootContainer.InitrdPath != "" &&
×
895
                        (vmi.Status.KernelBootStatus.InitrdInfo == nil ||
×
896
                                vmi.Status.KernelBootStatus.InitrdInfo.Checksum == 0) {
×
897
                        return true
×
898

×
899
                }
×
900
        }
901

902
        return false
1✔
903
}
904

905
func (c *VirtualMachineController) updateChecksumInfo(vmi *v1.VirtualMachineInstance, syncError error) error {
1✔
906

1✔
907
        if syncError != nil || vmi.DeletionTimestamp != nil || !needToComputeChecksums(vmi) {
2✔
908
                return nil
1✔
909
        }
1✔
910

911
        diskChecksums, err := c.containerDiskMounter.ComputeChecksums(vmi)
1✔
912
        if goerror.Is(err, container_disk.ErrDiskContainerGone) {
1✔
913
                log.Log.Errorf("cannot compute checksums as containerdisk/kernelboot containers seem to have been terminated")
×
914
                return nil
×
915
        }
×
916
        if err != nil {
1✔
917
                return err
×
918
        }
×
919

920
        // containerdisks
921
        for i := range vmi.Status.VolumeStatus {
2✔
922
                checksum, exists := diskChecksums.ContainerDiskChecksums[vmi.Status.VolumeStatus[i].Name]
1✔
923
                if !exists {
1✔
924
                        // not a containerdisk
×
925
                        continue
×
926
                }
927

928
                vmi.Status.VolumeStatus[i].ContainerDiskVolume = &v1.ContainerDiskInfo{
1✔
929
                        Checksum: checksum,
1✔
930
                }
1✔
931
        }
932

933
        // kernelboot
934
        if util.HasKernelBootContainerImage(vmi) {
2✔
935
                vmi.Status.KernelBootStatus = &v1.KernelBootStatus{}
1✔
936

1✔
937
                if diskChecksums.KernelBootChecksum.Kernel != nil {
2✔
938
                        vmi.Status.KernelBootStatus.KernelInfo = &v1.KernelInfo{
1✔
939
                                Checksum: *diskChecksums.KernelBootChecksum.Kernel,
1✔
940
                        }
1✔
941
                }
1✔
942

943
                if diskChecksums.KernelBootChecksum.Initrd != nil {
2✔
944
                        vmi.Status.KernelBootStatus.InitrdInfo = &v1.InitrdInfo{
1✔
945
                                Checksum: *diskChecksums.KernelBootChecksum.Initrd,
1✔
946
                        }
1✔
947
                }
1✔
948
        }
949

950
        return nil
1✔
951
}
952

953
func (c *VirtualMachineController) updateVolumeStatusesFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
1✔
954
        // used by unit test
1✔
955
        hasHotplug := false
1✔
956

1✔
957
        if domain == nil {
2✔
958
                return hasHotplug
1✔
959
        }
1✔
960

961
        if len(vmi.Status.VolumeStatus) > 0 {
2✔
962
                diskDeviceMap := make(map[string]string)
1✔
963
                for _, disk := range domain.Spec.Devices.Disks {
2✔
964
                        diskDeviceMap[disk.Alias.GetName()] = disk.Target.Device
1✔
965
                }
1✔
966
                specVolumeMap := make(map[string]v1.Volume)
1✔
967
                for _, volume := range vmi.Spec.Volumes {
2✔
968
                        specVolumeMap[volume.Name] = volume
1✔
969
                }
1✔
970
                newStatusMap := make(map[string]v1.VolumeStatus)
1✔
971
                newStatuses := make([]v1.VolumeStatus, 0)
1✔
972
                needsRefresh := false
1✔
973
                for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
974
                        tmpNeedsRefresh := false
1✔
975
                        if _, ok := diskDeviceMap[volumeStatus.Name]; ok {
2✔
976
                                volumeStatus.Target = diskDeviceMap[volumeStatus.Name]
1✔
977
                        }
1✔
978
                        if volumeStatus.HotplugVolume != nil {
2✔
979
                                hasHotplug = true
1✔
980
                                volumeStatus, tmpNeedsRefresh = c.updateHotplugVolumeStatus(vmi, volumeStatus, specVolumeMap)
1✔
981
                                needsRefresh = needsRefresh || tmpNeedsRefresh
1✔
982
                        }
1✔
983
                        if volumeStatus.MemoryDumpVolume != nil {
2✔
984
                                volumeStatus, tmpNeedsRefresh = c.updateMemoryDumpInfo(vmi, volumeStatus, domain)
1✔
985
                                needsRefresh = needsRefresh || tmpNeedsRefresh
1✔
986
                        }
1✔
987
                        newStatuses = append(newStatuses, volumeStatus)
1✔
988
                        newStatusMap[volumeStatus.Name] = volumeStatus
1✔
989
                }
990
                sort.SliceStable(newStatuses, func(i, j int) bool {
2✔
991
                        return strings.Compare(newStatuses[i].Name, newStatuses[j].Name) == -1
1✔
992
                })
1✔
993
                if needsRefresh {
2✔
994
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
1✔
995
                }
1✔
996
                c.generateEventsForVolumeStatusChange(vmi, newStatusMap)
1✔
997
                vmi.Status.VolumeStatus = newStatuses
1✔
998
        }
999
        return hasHotplug
1✔
1000
}
1001

1002
func (c *VirtualMachineController) updateGuestInfoFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
1003

1✔
1004
        if domain == nil {
2✔
1005
                return
1✔
1006
        }
1✔
1007

1008
        if vmi.Status.GuestOSInfo.Name != domain.Status.OSInfo.Name {
2✔
1009
                vmi.Status.GuestOSInfo.Name = domain.Status.OSInfo.Name
1✔
1010
                vmi.Status.GuestOSInfo.Version = domain.Status.OSInfo.Version
1✔
1011
                vmi.Status.GuestOSInfo.KernelRelease = domain.Status.OSInfo.KernelRelease
1✔
1012
                vmi.Status.GuestOSInfo.PrettyName = domain.Status.OSInfo.PrettyName
1✔
1013
                vmi.Status.GuestOSInfo.VersionID = domain.Status.OSInfo.VersionId
1✔
1014
                vmi.Status.GuestOSInfo.KernelVersion = domain.Status.OSInfo.KernelVersion
1✔
1015
                vmi.Status.GuestOSInfo.Machine = domain.Status.OSInfo.Machine
1✔
1016
                vmi.Status.GuestOSInfo.ID = domain.Status.OSInfo.Id
1✔
1017
        }
1✔
1018
}
1019

1020
func (c *VirtualMachineController) updateAccessCredentialConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1021

1✔
1022
        if domain == nil || domain.Spec.Metadata.KubeVirt.AccessCredential == nil {
2✔
1023
                return
1✔
1024
        }
1✔
1025

1026
        message := domain.Spec.Metadata.KubeVirt.AccessCredential.Message
1✔
1027
        status := k8sv1.ConditionFalse
1✔
1028
        if domain.Spec.Metadata.KubeVirt.AccessCredential.Succeeded {
2✔
1029
                status = k8sv1.ConditionTrue
1✔
1030
        }
1✔
1031

1032
        add := false
1✔
1033
        condition := condManager.GetCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
1034
        if condition == nil {
2✔
1035
                add = true
1✔
1036
        } else if condition.Status != status || condition.Message != message {
3✔
1037
                // if not as expected, remove, then add.
1✔
1038
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
1039
                add = true
1✔
1040
        }
1✔
1041
        if add {
2✔
1042
                newCondition := v1.VirtualMachineInstanceCondition{
1✔
1043
                        Type:               v1.VirtualMachineInstanceAccessCredentialsSynchronized,
1✔
1044
                        LastTransitionTime: metav1.Now(),
1✔
1045
                        Status:             status,
1✔
1046
                        Message:            message,
1✔
1047
                }
1✔
1048
                vmi.Status.Conditions = append(vmi.Status.Conditions, newCondition)
1✔
1049
                if status == k8sv1.ConditionTrue {
2✔
1050
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.AccessCredentialsSyncSuccess.String(), message)
1✔
1051
                } else {
2✔
1052
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.AccessCredentialsSyncFailed.String(), message)
1✔
1053
                }
1✔
1054
        }
1055
}
1056

1057
func (c *VirtualMachineController) updateLiveMigrationConditions(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1058
        // Calculate whether the VM is migratable
1✔
1059
        liveMigrationCondition, isBlockMigration := c.calculateLiveMigrationCondition(vmi)
1✔
1060
        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceIsMigratable) {
2✔
1061
                vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
1✔
1062
                // Set VMI Migration Method
1✔
1063
                if isBlockMigration {
2✔
1064
                        vmi.Status.MigrationMethod = v1.BlockMigration
1✔
1065
                } else {
2✔
1066
                        vmi.Status.MigrationMethod = v1.LiveMigration
1✔
1067
                }
1✔
1068
        } else {
1✔
1069
                cond := condManager.GetCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
1✔
1070
                if !equality.Semantic.DeepEqual(cond, liveMigrationCondition) {
1✔
1071
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
×
1072
                        vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
×
1073
                }
×
1074
        }
1075
        storageLiveMigCond := c.calculateLiveStorageMigrationCondition(vmi)
1✔
1076
        condManager.UpdateCondition(vmi, storageLiveMigCond)
1✔
1077
        evictable := migrations.VMIMigratableOnEviction(c.clusterConfig, vmi)
1✔
1078
        if evictable && liveMigrationCondition.Status == k8sv1.ConditionFalse {
2✔
1079
                c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), "EvictionStrategy is set but vmi is not migratable; %s", liveMigrationCondition.Message)
1✔
1080
        }
1✔
1081
}
1082

1083
func (c *VirtualMachineController) updateGuestAgentConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
1✔
1084

1✔
1085
        // Update the condition when GA is connected
1✔
1086
        channelConnected := false
1✔
1087
        if domain != nil {
2✔
1088
                for _, channel := range domain.Spec.Devices.Channels {
2✔
1089
                        if channel.Target != nil {
2✔
1090
                                log.Log.V(4).Infof("Channel: %s, %s", channel.Target.Name, channel.Target.State)
1✔
1091
                                if channel.Target.Name == "org.qemu.guest_agent.0" {
2✔
1092
                                        if channel.Target.State == "connected" {
2✔
1093
                                                channelConnected = true
1✔
1094
                                        }
1✔
1095
                                }
1096

1097
                        }
1098
                }
1099
        }
1100

1101
        switch {
1✔
1102
        case channelConnected && !condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected):
1✔
1103
                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
1104
                        Type:          v1.VirtualMachineInstanceAgentConnected,
1✔
1105
                        LastProbeTime: metav1.Now(),
1✔
1106
                        Status:        k8sv1.ConditionTrue,
1✔
1107
                }
1✔
1108
                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
1109
        case !channelConnected:
1✔
1110
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAgentConnected)
1✔
1111
        }
1112

1113
        if condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected) {
2✔
1114
                client, err := c.getLauncherClient(vmi)
1✔
1115
                if err != nil {
1✔
1116
                        return err
×
1117
                }
×
1118

1119
                guestInfo, err := client.GetGuestInfo()
1✔
1120
                if err != nil {
1✔
1121
                        return err
×
1122
                }
×
1123

1124
                var supported = false
1✔
1125
                var reason = ""
1✔
1126

1✔
1127
                // For current versions, virt-launcher's supported commands will always contain data.
1✔
1128
                // For backwards compatibility: during upgrade from a previous version of KubeVirt,
1✔
1129
                // virt-launcher might not provide any supported commands. If the list of supported
1✔
1130
                // commands is empty, fall back to previous behavior.
1✔
1131
                if len(guestInfo.SupportedCommands) > 0 {
1✔
1132
                        supported, reason = isGuestAgentSupported(vmi, guestInfo.SupportedCommands)
×
1133
                        log.Log.V(3).Object(vmi).Info(reason)
×
1134
                } else {
1✔
1135
                        for _, version := range c.clusterConfig.GetSupportedAgentVersions() {
2✔
1136
                                supported = supported || regexp.MustCompile(version).MatchString(guestInfo.GAVersion)
1✔
1137
                        }
1✔
1138
                        if !supported {
2✔
1139
                                reason = fmt.Sprintf("Guest agent version '%s' is not supported", guestInfo.GAVersion)
1✔
1140
                        }
1✔
1141
                }
1142

1143
                if !supported {
2✔
1144
                        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent) {
2✔
1145
                                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
1146
                                        Type:          v1.VirtualMachineInstanceUnsupportedAgent,
1✔
1147
                                        LastProbeTime: metav1.Now(),
1✔
1148
                                        Status:        k8sv1.ConditionTrue,
1✔
1149
                                        Reason:        reason,
1✔
1150
                                }
1✔
1151
                                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
1152
                        }
1✔
1153
                } else {
×
1154
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent)
×
1155
                }
×
1156

1157
        }
1158
        return nil
1✔
1159
}
1160

1161
func (c *VirtualMachineController) updatePausedConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1162

1✔
1163
        // Update paused condition in case VMI was paused / unpaused
1✔
1164
        if domain != nil && domain.Status.Status == api.Paused {
2✔
1165
                if !condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
2✔
1166
                        reason := domain.Status.Reason
1✔
1167
                        if c.isVMIPausedDuringMigration(vmi) {
1✔
1168
                                reason = api.ReasonPausedMigration
×
1169
                        }
×
1170
                        calculatePausedCondition(vmi, reason)
1✔
1171
                }
1172
        } else if condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
2✔
1173
                log.Log.Object(vmi).V(3).Info("Removing paused condition")
1✔
1174
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstancePaused)
1✔
1175
        }
1✔
1176
}
1177

1178
func dumpTargetFile(vmiName, volName string) string {
1✔
1179
        targetFileName := fmt.Sprintf("%s-%s-%s.memory.dump", vmiName, volName, time.Now().Format("20060102-150405"))
1✔
1180
        return targetFileName
1✔
1181
}
1✔
1182

1183
func (c *VirtualMachineController) updateMemoryDumpInfo(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, domain *api.Domain) (v1.VolumeStatus, bool) {
1✔
1184
        needsRefresh := false
1✔
1185
        switch volumeStatus.Phase {
1✔
1186
        case v1.HotplugVolumeMounted:
1✔
1187
                needsRefresh = true
1✔
1188
                log.Log.Object(vmi).V(3).Infof("Memory dump volume %s attached, marking it in progress", volumeStatus.Name)
1✔
1189
                volumeStatus.Phase = v1.MemoryDumpVolumeInProgress
1✔
1190
                volumeStatus.Message = fmt.Sprintf("Memory dump Volume %s is attached, getting memory dump", volumeStatus.Name)
1✔
1191
                volumeStatus.Reason = VolumeMountedToPodReason
1✔
1192
                volumeStatus.MemoryDumpVolume.TargetFileName = dumpTargetFile(vmi.Name, volumeStatus.Name)
1✔
1193
        case v1.MemoryDumpVolumeInProgress:
1✔
1194
                memoryDumpMetadata := domain.Spec.Metadata.KubeVirt.MemoryDump
1✔
1195
                if memoryDumpMetadata == nil || memoryDumpMetadata.FileName != volumeStatus.MemoryDumpVolume.TargetFileName {
2✔
1196
                        // memory dump wasnt triggered yet
1✔
1197
                        return volumeStatus, needsRefresh
1✔
1198
                }
1✔
1199
                needsRefresh = true
1✔
1200
                if memoryDumpMetadata.StartTimestamp != nil {
2✔
1201
                        volumeStatus.MemoryDumpVolume.StartTimestamp = memoryDumpMetadata.StartTimestamp
1✔
1202
                }
1✔
1203
                if memoryDumpMetadata.EndTimestamp != nil && memoryDumpMetadata.Failed {
2✔
1204
                        log.Log.Object(vmi).Errorf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
1205
                        volumeStatus.Message = fmt.Sprintf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
1206
                        volumeStatus.Phase = v1.MemoryDumpVolumeFailed
1✔
1207
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
1208
                } else if memoryDumpMetadata.Completed {
3✔
1209
                        log.Log.Object(vmi).V(3).Infof("Marking memory dump to volume %s has completed", volumeStatus.Name)
1✔
1210
                        volumeStatus.Phase = v1.MemoryDumpVolumeCompleted
1✔
1211
                        volumeStatus.Message = fmt.Sprintf("Memory dump to Volume %s has completed successfully", volumeStatus.Name)
1✔
1212
                        volumeStatus.Reason = VolumeReadyReason
1✔
1213
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
1214
                }
1✔
1215
        }
1216

1217
        return volumeStatus, needsRefresh
1✔
1218
}
1219

1220
func (c *VirtualMachineController) updateFSFreezeStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
1221

1✔
1222
        if domain == nil || domain.Status.FSFreezeStatus.Status == "" {
2✔
1223
                return
1✔
1224
        }
1✔
1225

1226
        if domain.Status.FSFreezeStatus.Status == api.FSThawed {
2✔
1227
                vmi.Status.FSFreezeStatus = ""
1✔
1228
        } else {
2✔
1229
                vmi.Status.FSFreezeStatus = domain.Status.FSFreezeStatus.Status
1✔
1230
        }
1✔
1231

1232
}
1233

1234
func IsoGuestVolumePath(namespace, name string, volume *v1.Volume) string {
×
1235
        const basepath = "/var/run"
×
1236
        switch {
×
1237
        case volume.CloudInitNoCloud != nil:
×
1238
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "noCloud.iso")
×
1239
        case volume.CloudInitConfigDrive != nil:
×
1240
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "configdrive.iso")
×
1241
        case volume.ConfigMap != nil:
×
1242
                return config.GetConfigMapDiskPath(volume.Name)
×
1243
        case volume.DownwardAPI != nil:
×
1244
                return config.GetDownwardAPIDiskPath(volume.Name)
×
1245
        case volume.Secret != nil:
×
1246
                return config.GetSecretDiskPath(volume.Name)
×
1247
        case volume.ServiceAccount != nil:
×
1248
                return config.GetServiceAccountDiskPath()
×
1249
        case volume.Sysprep != nil:
×
1250
                return config.GetSysprepDiskPath(volume.Name)
×
1251
        default:
×
1252
                return ""
×
1253
        }
1254
}
1255

1256
func (c *VirtualMachineController) updateIsoSizeStatus(vmi *v1.VirtualMachineInstance) {
1✔
1257
        var podUID string
1✔
1258
        if vmi.Status.Phase != v1.Running {
2✔
1259
                return
1✔
1260
        }
1✔
1261

1262
        for k, v := range vmi.Status.ActivePods {
2✔
1263
                if v == vmi.Status.NodeName {
2✔
1264
                        podUID = string(k)
1✔
1265
                        break
1✔
1266
                }
1267
        }
1268
        if podUID == "" {
2✔
1269
                log.DefaultLogger().Warningf("failed to find pod UID for VMI %s", vmi.Name)
1✔
1270
                return
1✔
1271
        }
1✔
1272

1273
        volumes := make(map[string]v1.Volume)
1✔
1274
        for _, volume := range vmi.Spec.Volumes {
1✔
1275
                volumes[volume.Name] = volume
×
1276
        }
×
1277

1278
        for _, disk := range vmi.Spec.Domain.Devices.Disks {
1✔
1279
                volume, ok := volumes[disk.Name]
×
1280
                if !ok {
×
1281
                        log.DefaultLogger().Warningf("No matching volume with name %s found", disk.Name)
×
1282
                        continue
×
1283
                }
1284

1285
                volPath := IsoGuestVolumePath(vmi.Namespace, vmi.Name, &volume)
×
1286
                if volPath == "" {
×
1287
                        continue
×
1288
                }
1289

1290
                res, err := c.podIsolationDetector.Detect(vmi)
×
1291
                if err != nil {
×
1292
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
1293
                        continue
×
1294
                }
1295

1296
                rootPath, err := res.MountRoot()
×
1297
                if err != nil {
×
1298
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
1299
                        continue
×
1300
                }
1301

1302
                safeVolPath, err := rootPath.AppendAndResolveWithRelativeRoot(volPath)
×
1303
                if err != nil {
×
1304
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
1305
                        continue
×
1306
                }
1307
                fileInfo, err := safepath.StatAtNoFollow(safeVolPath)
×
1308
                if err != nil {
×
1309
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
1310
                        continue
×
1311
                }
1312

1313
                for i := range vmi.Status.VolumeStatus {
×
1314
                        if vmi.Status.VolumeStatus[i].Name == volume.Name {
×
1315
                                vmi.Status.VolumeStatus[i].Size = fileInfo.Size()
×
1316
                                continue
×
1317
                        }
1318
                }
1319
        }
1320
}
1321

1322
func (c *VirtualMachineController) updateSELinuxContext(vmi *v1.VirtualMachineInstance) error {
1✔
1323
        _, present, err := selinux.NewSELinux()
1✔
1324
        if err != nil {
2✔
1325
                return err
1✔
1326
        }
1✔
1327
        if present {
×
1328
                context, err := selinux.GetVirtLauncherContext(vmi)
×
1329
                if err != nil {
×
1330
                        return err
×
1331
                }
×
1332
                vmi.Status.SelinuxContext = context
×
1333
        } else {
×
1334
                vmi.Status.SelinuxContext = "none"
×
1335
        }
×
1336

1337
        return nil
×
1338
}
1339

1340
func (c *VirtualMachineController) updateVMIStatusFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
1341
        c.updateIsoSizeStatus(vmi)
1✔
1342
        err := c.updateSELinuxContext(vmi)
1✔
1343
        if err != nil {
2✔
1344
                log.Log.Reason(err).Errorf("couldn't find the SELinux context for %s", vmi.Name)
1✔
1345
        }
1✔
1346
        c.setMigrationProgressStatus(vmi, domain)
1✔
1347
        c.updateGuestInfoFromDomain(vmi, domain)
1✔
1348
        c.updateVolumeStatusesFromDomain(vmi, domain)
1✔
1349
        c.updateFSFreezeStatus(vmi, domain)
1✔
1350
        c.updateMachineType(vmi, domain)
1✔
1351
        if err = c.updateMemoryInfo(vmi, domain); err != nil {
1✔
1352
                return err
×
1353
        }
×
1354
        err = c.netStat.UpdateStatus(vmi, domain)
1✔
1355
        return err
1✔
1356
}
1357

1358
func (c *VirtualMachineController) updateVMIConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
1✔
1359
        c.updateAccessCredentialConditions(vmi, domain, condManager)
1✔
1360
        c.updateLiveMigrationConditions(vmi, condManager)
1✔
1361
        err := c.updateGuestAgentConditions(vmi, domain, condManager)
1✔
1362
        if err != nil {
1✔
1363
                return err
×
1364
        }
×
1365
        c.updatePausedConditions(vmi, domain, condManager)
1✔
1366

1✔
1367
        return nil
1✔
1368
}
1369

1370
func (c *VirtualMachineController) updateVMIStatus(origVMI *v1.VirtualMachineInstance, domain *api.Domain, syncError error) (err error) {
1✔
1371
        condManager := controller.NewVirtualMachineInstanceConditionManager()
1✔
1372

1✔
1373
        // Don't update the VirtualMachineInstance if it is already in a final state
1✔
1374
        if origVMI.IsFinal() {
2✔
1375
                return nil
1✔
1376
        } else if origVMI.Status.NodeName != "" && origVMI.Status.NodeName != c.host {
3✔
1377
                // Only update the VMI's phase if this node owns the VMI.
1✔
1378
                // not owned by this host, likely the result of a migration
1✔
1379
                return nil
1✔
1380
        } else if domainMigrated(domain) {
3✔
1381
                return c.migrationSourceUpdateVMIStatus(origVMI, domain)
1✔
1382
        }
1✔
1383

1384
        vmi := origVMI.DeepCopy()
1✔
1385
        oldStatus := *vmi.Status.DeepCopy()
1✔
1386

1✔
1387
        // Update VMI status fields based on what is reported on the domain
1✔
1388
        err = c.updateVMIStatusFromDomain(vmi, domain)
1✔
1389
        if err != nil {
1✔
1390
                return err
×
1391
        }
×
1392

1393
        // Calculate the new VirtualMachineInstance state based on what libvirt reported
1394
        err = c.setVmPhaseForStatusReason(domain, vmi)
1✔
1395
        if err != nil {
1✔
1396
                return err
×
1397
        }
×
1398

1399
        // Update conditions on VMI Status
1400
        err = c.updateVMIConditions(vmi, domain, condManager)
1✔
1401
        if err != nil {
1✔
1402
                return err
×
1403
        }
×
1404

1405
        // Store containerdisks and kernelboot checksums
1406
        if err := c.updateChecksumInfo(vmi, syncError); err != nil {
1✔
1407
                return err
×
1408
        }
×
1409

1410
        // Handle sync error
1411
        handleSyncError(vmi, condManager, syncError)
1✔
1412

1✔
1413
        controller.SetVMIPhaseTransitionTimestamp(origVMI, vmi)
1✔
1414

1✔
1415
        // Only issue vmi update if status has changed
1✔
1416
        if !equality.Semantic.DeepEqual(oldStatus, vmi.Status) {
2✔
1417
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
1418
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
1419
                _, err = c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
1✔
1420
                if err != nil {
2✔
1421
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
1422
                        return err
1✔
1423
                }
1✔
1424
        }
1425

1426
        // Record an event on the VMI when the VMI's phase changes
1427
        if oldStatus.Phase != vmi.Status.Phase {
2✔
1428
                c.recordPhaseChangeEvent(vmi)
1✔
1429
        }
1✔
1430

1431
        return nil
1✔
1432
}
1433

1434
func handleSyncError(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager, syncError error) {
1✔
1435
        var criticalNetErr *neterrors.CriticalNetworkError
1✔
1436
        if goerror.As(syncError, &criticalNetErr) {
2✔
1437
                log.Log.Errorf("virt-launcher crashed due to a network error. Updating VMI %s status to Failed", vmi.Name)
1✔
1438
                vmi.Status.Phase = v1.Failed
1✔
1439
        }
1✔
1440
        if _, ok := syncError.(*virtLauncherCriticalSecurebootError); ok {
1✔
1441
                log.Log.Errorf("virt-launcher does not support the Secure Boot setting. Updating VMI %s status to Failed", vmi.Name)
×
1442
                vmi.Status.Phase = v1.Failed
×
1443
        }
×
1444

1445
        if _, ok := syncError.(*vmiIrrecoverableError); ok {
1✔
1446
                log.Log.Errorf("virt-launcher reached an irrecoverable error. Updating VMI %s status to Failed", vmi.Name)
×
1447
                vmi.Status.Phase = v1.Failed
×
1448
        }
×
1449
        condManager.CheckFailure(vmi, syncError, "Synchronizing with the Domain failed.")
1✔
1450
}
1451

1452
func (c *VirtualMachineController) recordPhaseChangeEvent(vmi *v1.VirtualMachineInstance) {
1✔
1453
        switch vmi.Status.Phase {
1✔
1454
        case v1.Running:
1✔
1455
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Started.String(), VMIStarted)
1✔
1456
        case v1.Succeeded:
×
1457
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Stopped.String(), VMIShutdown)
×
1458
        case v1.Failed:
1✔
1459
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Stopped.String(), VMICrashed)
1✔
1460
        }
1461
}
1462

1463
func calculatePausedCondition(vmi *v1.VirtualMachineInstance, reason api.StateChangeReason) {
1✔
1464
        now := metav1.NewTime(time.Now())
1✔
1465
        switch reason {
1✔
1466
        case api.ReasonPausedMigration:
×
1467
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
×
1468
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1469
                        Type:               v1.VirtualMachineInstancePaused,
×
1470
                        Status:             k8sv1.ConditionTrue,
×
1471
                        LastProbeTime:      now,
×
1472
                        LastTransitionTime: now,
×
1473
                        Reason:             "PausedByMigrationMonitor",
×
1474
                        Message:            "VMI was paused by the migration monitor",
×
1475
                })
×
1476
        case api.ReasonPausedUser:
1✔
1477
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
1✔
1478
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
1✔
1479
                        Type:               v1.VirtualMachineInstancePaused,
1✔
1480
                        Status:             k8sv1.ConditionTrue,
1✔
1481
                        LastProbeTime:      now,
1✔
1482
                        LastTransitionTime: now,
1✔
1483
                        Reason:             "PausedByUser",
1✔
1484
                        Message:            "VMI was paused by user",
1✔
1485
                })
1✔
1486
        case api.ReasonPausedIOError:
×
1487
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
×
1488
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1489
                        Type:               v1.VirtualMachineInstancePaused,
×
1490
                        Status:             k8sv1.ConditionTrue,
×
1491
                        LastProbeTime:      now,
×
1492
                        LastTransitionTime: now,
×
1493
                        Reason:             "PausedIOError",
×
1494
                        Message:            "VMI was paused, low-level IO error detected",
×
1495
                })
×
1496
        default:
×
1497
                log.Log.Object(vmi).V(3).Infof("Domain is paused for unknown reason, %s", reason)
×
1498
        }
1499
}
1500

1501
func newNonMigratableCondition(msg string, reason string) *v1.VirtualMachineInstanceCondition {
1✔
1502
        return &v1.VirtualMachineInstanceCondition{
1✔
1503
                Type:    v1.VirtualMachineInstanceIsMigratable,
1✔
1504
                Status:  k8sv1.ConditionFalse,
1✔
1505
                Message: msg,
1✔
1506
                Reason:  reason,
1✔
1507
        }
1✔
1508
}
1✔
1509

1510
func (c *VirtualMachineController) calculateLiveMigrationCondition(vmi *v1.VirtualMachineInstance) (*v1.VirtualMachineInstanceCondition, bool) {
1✔
1511
        isBlockMigration, err := c.checkVolumesForMigration(vmi)
1✔
1512
        if err != nil {
2✔
1513
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonDisksNotMigratable), isBlockMigration
1✔
1514
        }
1✔
1515

1516
        err = c.checkNetworkInterfacesForMigration(vmi)
1✔
1517
        if err != nil {
2✔
1518
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonInterfaceNotMigratable), isBlockMigration
1✔
1519
        }
1✔
1520

1521
        if err := c.isHostModelMigratable(vmi); err != nil {
1✔
1522
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonCPUModeNotMigratable), isBlockMigration
×
1523
        }
×
1524

1525
        if vmiContainsPCIHostDevice(vmi) {
2✔
1526
                return newNonMigratableCondition("VMI uses a PCI host devices", v1.VirtualMachineInstanceReasonHostDeviceNotMigratable), isBlockMigration
1✔
1527
        }
1✔
1528

1529
        if util.IsSEVVMI(vmi) {
2✔
1530
                return newNonMigratableCondition("VMI uses SEV", v1.VirtualMachineInstanceReasonSEVNotMigratable), isBlockMigration
1✔
1531
        }
1✔
1532

1533
        if reservation.HasVMIPersistentReservation(vmi) {
2✔
1534
                return newNonMigratableCondition("VMI uses SCSI persitent reservation", v1.VirtualMachineInstanceReasonPRNotMigratable), isBlockMigration
1✔
1535
        }
1✔
1536

1537
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
2✔
1538
                return newNonMigratableCondition(tscRequirement.Reason, v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable), isBlockMigration
1✔
1539
        }
1✔
1540

1541
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
2✔
1542
                return newNonMigratableCondition("VMI uses hyperv passthrough", v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable), isBlockMigration
1✔
1543
        }
1✔
1544

1545
        return &v1.VirtualMachineInstanceCondition{
1✔
1546
                Type:   v1.VirtualMachineInstanceIsMigratable,
1✔
1547
                Status: k8sv1.ConditionTrue,
1✔
1548
        }, isBlockMigration
1✔
1549
}
1550

1551
func vmiContainsPCIHostDevice(vmi *v1.VirtualMachineInstance) bool {
1✔
1552
        return len(vmi.Spec.Domain.Devices.HostDevices) > 0 || len(vmi.Spec.Domain.Devices.GPUs) > 0
1✔
1553
}
1✔
1554

1555
type multipleNonMigratableCondition struct {
1556
        reasons []string
1557
        msgs    []string
1558
}
1559

1560
func newMultipleNonMigratableCondition() *multipleNonMigratableCondition {
1✔
1561
        return &multipleNonMigratableCondition{}
1✔
1562
}
1✔
1563

1564
func (cond *multipleNonMigratableCondition) addNonMigratableCondition(reason, msg string) {
1✔
1565
        cond.reasons = append(cond.reasons, reason)
1✔
1566
        cond.msgs = append(cond.msgs, msg)
1✔
1567
}
1✔
1568

1569
func (cond *multipleNonMigratableCondition) String() string {
1✔
1570
        var buffer bytes.Buffer
1✔
1571
        for i, c := range cond.reasons {
2✔
1572
                if i > 0 {
1✔
1573
                        buffer.WriteString(", ")
×
1574
                }
×
1575
                buffer.WriteString(fmt.Sprintf("%s: %s", c, cond.msgs[i]))
1✔
1576
        }
1577
        return buffer.String()
1✔
1578
}
1579

1580
func (cond *multipleNonMigratableCondition) generateStorageLiveMigrationCondition() *v1.VirtualMachineInstanceCondition {
1✔
1581
        switch len(cond.reasons) {
1✔
1582
        case 0:
1✔
1583
                return &v1.VirtualMachineInstanceCondition{
1✔
1584
                        Type:   v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1585
                        Status: k8sv1.ConditionTrue,
1✔
1586
                }
1✔
1587
        default:
1✔
1588
                return &v1.VirtualMachineInstanceCondition{
1✔
1589
                        Type:    v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1590
                        Status:  k8sv1.ConditionFalse,
1✔
1591
                        Message: cond.String(),
1✔
1592
                        Reason:  v1.VirtualMachineInstanceReasonNotMigratable,
1✔
1593
                }
1✔
1594
        }
1595
}
1596

1597
func (c *VirtualMachineController) calculateLiveStorageMigrationCondition(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstanceCondition {
1✔
1598
        multiCond := newMultipleNonMigratableCondition()
1✔
1599

1✔
1600
        if err := c.checkNetworkInterfacesForMigration(vmi); err != nil {
2✔
1601
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonInterfaceNotMigratable, err.Error())
1✔
1602
        }
1✔
1603

1604
        if err := c.isHostModelMigratable(vmi); err != nil {
1✔
1605
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonCPUModeNotMigratable, err.Error())
×
1606
        }
×
1607

1608
        if vmiContainsPCIHostDevice(vmi) {
1✔
1609
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHostDeviceNotMigratable, "VMI uses a PCI host devices")
×
1610
        }
×
1611

1612
        if util.IsSEVVMI(vmi) {
1✔
1613
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonSEVNotMigratable, "VMI uses SEV")
×
1614
        }
×
1615

1616
        if reservation.HasVMIPersistentReservation(vmi) {
1✔
1617
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonPRNotMigratable, "VMI uses SCSI persitent reservation")
×
1618
        }
×
1619

1620
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
1✔
1621
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable, tscRequirement.Reason)
×
1622
        }
×
1623

1624
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
1✔
1625
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable, "VMI uses hyperv passthrough")
×
1626
        }
×
1627

1628
        return multiCond.generateStorageLiveMigrationCondition()
1✔
1629
}
1630

1631
func (c *VirtualMachineController) Run(threadiness int, stopCh chan struct{}) {
×
1632
        defer c.queue.ShutDown()
×
1633
        log.Log.Info("Starting virt-handler controller.")
×
1634

×
1635
        go c.deviceManagerController.Run(stopCh)
×
1636

×
1637
        go c.downwardMetricsManager.Run(stopCh)
×
1638

×
1639
        cache.WaitForCacheSync(stopCh, c.hasSynced)
×
1640

×
1641
        // queue keys for previous Domains on the host that no longer exist
×
1642
        // in the cache. This ensures we perform local cleanup of deleted VMs.
×
1643
        for _, domain := range c.domainStore.List() {
×
1644
                d := domain.(*api.Domain)
×
1645
                vmiRef := v1.NewVMIReferenceWithUUID(
×
1646
                        d.ObjectMeta.Namespace,
×
1647
                        d.ObjectMeta.Name,
×
1648
                        d.Spec.Metadata.KubeVirt.UID)
×
1649

×
1650
                key := controller.VirtualMachineInstanceKey(vmiRef)
×
1651

×
1652
                _, exists, _ := c.vmiSourceStore.GetByKey(key)
×
1653
                if !exists {
×
1654
                        c.queue.Add(key)
×
1655
                }
×
1656
        }
1657

1658
        heartBeatDone := c.heartBeat.Run(c.heartBeatInterval, stopCh)
×
1659

×
1660
        go c.ioErrorRetryManager.Run(stopCh)
×
1661

×
1662
        // Start the actual work
×
1663
        for i := 0; i < threadiness; i++ {
×
1664
                go wait.Until(c.runWorker, time.Second, stopCh)
×
1665
        }
×
1666

1667
        <-heartBeatDone
×
1668
        <-stopCh
×
1669
        log.Log.Info("Stopping virt-handler controller.")
×
1670
}
1671

1672
func (c *VirtualMachineController) runWorker() {
×
1673
        for c.Execute() {
×
1674
        }
×
1675
}
1676

1677
func (c *VirtualMachineController) Execute() bool {
1✔
1678
        key, quit := c.queue.Get()
1✔
1679
        if quit {
1✔
1680
                return false
×
1681
        }
×
1682
        defer c.queue.Done(key)
1✔
1683
        if err := c.execute(key); err != nil {
2✔
1684
                log.Log.Reason(err).Infof("re-enqueuing VirtualMachineInstance %v", key)
1✔
1685
                c.queue.AddRateLimited(key)
1✔
1686
        } else {
2✔
1687
                log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
1✔
1688
                c.queue.Forget(key)
1✔
1689
        }
1✔
1690
        return true
1✔
1691
}
1692

1693
func (c *VirtualMachineController) getVMIFromCache(key string) (vmi *v1.VirtualMachineInstance, exists bool, err error) {
1✔
1694

1✔
1695
        // Fetch the latest Vm state from cache
1✔
1696
        obj, exists, err := c.vmiSourceStore.GetByKey(key)
1✔
1697
        if err != nil {
1✔
1698
                return nil, false, err
×
1699
        }
×
1700

1701
        if !exists {
2✔
1702
                obj, exists, err = c.vmiTargetStore.GetByKey(key)
1✔
1703
                if err != nil {
1✔
1704
                        return nil, false, err
×
1705
                }
×
1706
        }
1707

1708
        // Retrieve the VirtualMachineInstance
1709
        if !exists {
2✔
1710
                namespace, name, err := cache.SplitMetaNamespaceKey(key)
1✔
1711
                if err != nil {
2✔
1712
                        // TODO log and don't retry
1✔
1713
                        return nil, false, err
1✔
1714
                }
1✔
1715
                vmi = v1.NewVMIReferenceFromNameWithNS(namespace, name)
1✔
1716
        } else {
1✔
1717
                vmi = obj.(*v1.VirtualMachineInstance)
1✔
1718
        }
1✔
1719
        return vmi, exists, nil
1✔
1720
}
1721

1722
func (c *VirtualMachineController) getDomainFromCache(key string) (domain *api.Domain, exists bool, cachedUID types.UID, err error) {
1✔
1723

1✔
1724
        obj, exists, err := c.domainStore.GetByKey(key)
1✔
1725

1✔
1726
        if err != nil {
1✔
1727
                return nil, false, "", err
×
1728
        }
×
1729

1730
        if exists {
2✔
1731
                domain = obj.(*api.Domain)
1✔
1732
                cachedUID = domain.Spec.Metadata.KubeVirt.UID
1✔
1733

1✔
1734
                // We're using the DeletionTimestamp to signify that the
1✔
1735
                // Domain is deleted rather than sending the DELETE watch event.
1✔
1736
                if domain.ObjectMeta.DeletionTimestamp != nil {
1✔
1737
                        exists = false
×
1738
                        domain = nil
×
1739
                }
×
1740
        }
1741
        return domain, exists, cachedUID, nil
1✔
1742
}
1743

1744
func (c *VirtualMachineController) migrationOrphanedSourceNodeExecute(vmi *v1.VirtualMachineInstance, domainExists bool) error {
×
1745

×
1746
        if domainExists {
×
1747
                err := c.processVmDelete(vmi)
×
1748
                if err != nil {
×
1749
                        return err
×
1750
                }
×
1751
                // we can perform the cleanup immediately after
1752
                // the successful delete here because we don't have
1753
                // to report the deletion results on the VMI status
1754
                // in this case.
1755
                err = c.processVmCleanup(vmi)
×
1756
                if err != nil {
×
1757
                        return err
×
1758
                }
×
1759
        } else {
×
1760
                err := c.processVmCleanup(vmi)
×
1761
                if err != nil {
×
1762
                        return err
×
1763
                }
×
1764
        }
1765
        return nil
×
1766
}
1767

1768
func (c *VirtualMachineController) migrationTargetExecute(vmi *v1.VirtualMachineInstance, vmiExists bool, domain *api.Domain) error {
1✔
1769

1✔
1770
        // set to true when preparation of migration target should be aborted.
1✔
1771
        shouldAbort := false
1✔
1772
        // set to true when VirtualMachineInstance migration target needs to be prepared
1✔
1773
        shouldUpdate := false
1✔
1774
        // set true when the current migration target has exitted and needs to be cleaned up.
1✔
1775
        shouldCleanUp := false
1✔
1776

1✔
1777
        if vmiExists && vmi.IsRunning() {
2✔
1778
                shouldUpdate = true
1✔
1779
        }
1✔
1780

1781
        if !vmiExists || vmi.DeletionTimestamp != nil {
2✔
1782
                shouldAbort = true
1✔
1783
        } else if vmi.IsFinal() {
2✔
1784
                shouldAbort = true
×
1785
        } else if c.hasStaleClientConnections(vmi) {
2✔
1786
                // if stale client exists, force cleanup.
1✔
1787
                // This can happen as a result of a previously
1✔
1788
                // failed attempt to migrate the vmi to this node.
1✔
1789
                shouldCleanUp = true
1✔
1790
        }
1✔
1791

1792
        domainExists := domain != nil
1✔
1793
        if shouldAbort {
2✔
1794
                if domainExists {
1✔
1795
                        err := c.processVmDelete(vmi)
×
1796
                        if err != nil {
×
1797
                                return err
×
1798
                        }
×
1799
                }
1800

1801
                err := c.processVmCleanup(vmi)
1✔
1802
                if err != nil {
1✔
1803
                        return err
×
1804
                }
×
1805
        } else if shouldCleanUp {
2✔
1806
                log.Log.Object(vmi).Infof("Stale client for migration target found. Cleaning up.")
1✔
1807

1✔
1808
                err := c.processVmCleanup(vmi)
1✔
1809
                if err != nil {
1✔
1810
                        return err
×
1811
                }
×
1812

1813
                // if we're still the migration target, we need to keep trying until the migration fails.
1814
                // it's possible we're simply waiting for another target pod to come online.
1815
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
1816

1817
        } else if shouldUpdate {
2✔
1818
                log.Log.Object(vmi).Info("Processing vmi migration target update")
1✔
1819

1✔
1820
                // prepare the POD for the migration
1✔
1821
                err := c.processVmUpdate(vmi, domain)
1✔
1822
                if err != nil {
1✔
1823
                        return err
×
1824
                }
×
1825

1826
                err = c.migrationTargetUpdateVMIStatus(vmi, domain)
1✔
1827
                if err != nil {
1✔
1828
                        return err
×
1829
                }
×
1830
        }
1831

1832
        return nil
1✔
1833
}
1834

1835
// Determine if gracefulShutdown has been triggered by virt-launcher
1836
func (c *VirtualMachineController) hasGracefulShutdownTrigger(domain *api.Domain) bool {
1✔
1837
        if domain == nil {
2✔
1838
                return false
1✔
1839
        }
1✔
1840
        gracePeriod := domain.Spec.Metadata.KubeVirt.GracePeriod
1✔
1841

1✔
1842
        return gracePeriod != nil &&
1✔
1843
                gracePeriod.MarkedForGracefulShutdown != nil &&
1✔
1844
                *gracePeriod.MarkedForGracefulShutdown
1✔
1845
}
1846

1847
func (c *VirtualMachineController) defaultExecute(key string,
1848
        vmi *v1.VirtualMachineInstance,
1849
        vmiExists bool,
1850
        domain *api.Domain,
1851
        domainExists bool) error {
1✔
1852

1✔
1853
        // set to true when domain needs to be shutdown.
1✔
1854
        shouldShutdown := false
1✔
1855
        // set to true when domain needs to be removed from libvirt.
1✔
1856
        shouldDelete := false
1✔
1857
        // optimization. set to true when processing already deleted domain.
1✔
1858
        shouldCleanUp := false
1✔
1859
        // set to true when VirtualMachineInstance is active or about to become active.
1✔
1860
        shouldUpdate := false
1✔
1861
        // set true to ensure that no updates to the current VirtualMachineInstance state will occur
1✔
1862
        forceIgnoreSync := false
1✔
1863
        // set to true when unrecoverable domain needs to be destroyed non-gracefully.
1✔
1864
        forceShutdownIrrecoverable := false
1✔
1865

1✔
1866
        log.Log.V(3).Infof("Processing event %v", key)
1✔
1867

1✔
1868
        if vmiExists && domainExists {
2✔
1869
                log.Log.Object(vmi).Infof("VMI is in phase: %v | Domain status: %v, reason: %v", vmi.Status.Phase, domain.Status.Status, domain.Status.Reason)
1✔
1870
        } else if vmiExists {
3✔
1871
                log.Log.Object(vmi).Infof("VMI is in phase: %v | Domain does not exist", vmi.Status.Phase)
1✔
1872
        } else if domainExists {
3✔
1873
                vmiRef := v1.NewVMIReferenceWithUUID(domain.ObjectMeta.Namespace, domain.ObjectMeta.Name, domain.Spec.Metadata.KubeVirt.UID)
1✔
1874
                log.Log.Object(vmiRef).Infof("VMI does not exist | Domain status: %v, reason: %v", domain.Status.Status, domain.Status.Reason)
1✔
1875
        } else {
1✔
1876
                log.Log.Info("VMI does not exist | Domain does not exist")
×
1877
        }
×
1878

1879
        domainAlive := domainExists &&
1✔
1880
                domain.Status.Status != api.Shutoff &&
1✔
1881
                domain.Status.Status != api.Crashed &&
1✔
1882
                domain.Status.Status != ""
1✔
1883

1✔
1884
        domainMigrated := domainExists && domainMigrated(domain)
1✔
1885
        forceShutdownIrrecoverable = domainExists && domainPausedFailedPostCopy(domain)
1✔
1886

1✔
1887
        gracefulShutdown := c.hasGracefulShutdownTrigger(domain)
1✔
1888
        if gracefulShutdown && vmi.IsRunning() {
1✔
1889
                if domainAlive {
×
1890
                        log.Log.Object(vmi).V(3).Info("Shutting down due to graceful shutdown signal.")
×
1891
                        shouldShutdown = true
×
1892
                } else {
×
1893
                        shouldDelete = true
×
1894
                }
×
1895
        }
1896

1897
        // Determine removal of VirtualMachineInstance from cache should result in deletion.
1898
        if !vmiExists {
2✔
1899
                switch {
1✔
1900
                case domainAlive:
1✔
1901
                        // The VirtualMachineInstance is deleted on the cluster, and domain is alive,
1✔
1902
                        // then shut down the domain.
1✔
1903
                        log.Log.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
1✔
1904
                        shouldShutdown = true
1✔
1905
                case domainExists:
1✔
1906
                        // The VirtualMachineInstance is deleted on the cluster, and domain is not alive
1✔
1907
                        // then delete the domain.
1✔
1908
                        log.Log.Object(vmi).V(3).Info("Deleting domain for deleted VirtualMachineInstance object.")
1✔
1909
                        shouldDelete = true
1✔
1910
                default:
×
1911
                        // If neither the domain nor the vmi object exist locally,
×
1912
                        // then ensure any remaining local ephemeral data is cleaned up.
×
1913
                        shouldCleanUp = true
×
1914
                }
1915
        }
1916

1917
        // Determine if VirtualMachineInstance is being deleted.
1918
        if vmiExists && vmi.ObjectMeta.DeletionTimestamp != nil {
2✔
1919
                switch {
1✔
1920
                case domainAlive:
×
1921
                        log.Log.Object(vmi).V(3).Info("Shutting down domain for VirtualMachineInstance with deletion timestamp.")
×
1922
                        shouldShutdown = true
×
1923
                case domainExists:
×
1924
                        log.Log.Object(vmi).V(3).Info("Deleting domain for VirtualMachineInstance with deletion timestamp.")
×
1925
                        shouldDelete = true
×
1926
                default:
1✔
1927
                        if vmi.IsFinal() {
1✔
1928
                                shouldCleanUp = true
×
1929
                        }
×
1930
                }
1931
        }
1932

1933
        // Determine if domain needs to be deleted as a result of VirtualMachineInstance
1934
        // shutting down naturally (guest internal invoked shutdown)
1935
        if domainExists && vmiExists && vmi.IsFinal() {
1✔
1936
                log.Log.Object(vmi).V(3).Info("Removing domain and ephemeral data for finalized vmi.")
×
1937
                shouldDelete = true
×
1938
        } else if !domainExists && vmiExists && vmi.IsFinal() {
2✔
1939
                log.Log.Object(vmi).V(3).Info("Cleaning up local data for finalized vmi.")
1✔
1940
                shouldCleanUp = true
1✔
1941
        }
1✔
1942

1943
        // Determine if an active (or about to be active) VirtualMachineInstance should be updated.
1944
        if vmiExists && !vmi.IsFinal() {
2✔
1945
                // requiring the phase of the domain and VirtualMachineInstance to be in sync is an
1✔
1946
                // optimization that prevents unnecessary re-processing VMIs during the start flow.
1✔
1947
                phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
1✔
1948
                if err != nil {
1✔
1949
                        return err
×
1950
                }
×
1951
                if vmi.Status.Phase == phase {
2✔
1952
                        shouldUpdate = true
1✔
1953
                }
1✔
1954

1955
                if shouldDelay, delay := c.ioErrorRetryManager.ShouldDelay(string(vmi.UID), func() bool {
2✔
1956
                        return isIOError(shouldUpdate, domainExists, domain)
1✔
1957
                }); shouldDelay {
1✔
1958
                        shouldUpdate = false
×
1959
                        log.Log.Object(vmi).Infof("Delay vm update for %f seconds", delay.Seconds())
×
1960
                        c.queue.AddAfter(key, delay)
×
1961
                }
×
1962
        }
1963

1964
        // NOTE: This must be the last check that occurs before checking the sync booleans!
1965
        //
1966
        // Special logic for domains migrated from a source node.
1967
        // Don't delete/destroy domain until the handoff occurs.
1968
        if domainMigrated {
2✔
1969
                // only allow the sync to occur on the domain once we've done
1✔
1970
                // the node handoff. Otherwise we potentially lose the fact that
1✔
1971
                // the domain migrated because we'll attempt to delete the locally
1✔
1972
                // shut off domain during the sync.
1✔
1973
                if vmiExists &&
1✔
1974
                        !vmi.IsFinal() &&
1✔
1975
                        vmi.DeletionTimestamp == nil &&
1✔
1976
                        vmi.Status.NodeName != "" &&
1✔
1977
                        vmi.Status.NodeName == c.host {
2✔
1978

1✔
1979
                        // If the domain migrated but the VMI still thinks this node
1✔
1980
                        // is the host, force ignore the sync until the VMI's status
1✔
1981
                        // is updated to reflect the node the domain migrated to.
1✔
1982
                        forceIgnoreSync = true
1✔
1983
                }
1✔
1984
        }
1985

1986
        var syncErr error
1✔
1987

1✔
1988
        // Process the VirtualMachineInstance update in this order.
1✔
1989
        // * Shutdown and Deletion due to VirtualMachineInstance deletion, process stopping, graceful shutdown trigger, etc...
1✔
1990
        // * Cleanup of already shutdown and Deleted VMIs
1✔
1991
        // * Update due to spec change and initial start flow.
1✔
1992
        switch {
1✔
1993
        case forceIgnoreSync:
1✔
1994
                log.Log.Object(vmi).V(3).Info("No update processing required: forced ignore")
1✔
1995
        case shouldShutdown:
1✔
1996
                log.Log.Object(vmi).V(3).Info("Processing shutdown.")
1✔
1997
                syncErr = c.processVmShutdown(vmi, domain)
1✔
1998
        case forceShutdownIrrecoverable:
×
1999
                msg := formatIrrecoverableErrorMessage(domain)
×
2000
                log.Log.Object(vmi).V(3).Infof("Processing a destruction of an irrecoverable domain - %s.", msg)
×
2001
                syncErr = c.processVmDestroy(vmi, domain)
×
2002
                if syncErr == nil {
×
2003
                        syncErr = &vmiIrrecoverableError{msg}
×
2004
                }
×
2005
        case shouldDelete:
1✔
2006
                log.Log.Object(vmi).V(3).Info("Processing deletion.")
1✔
2007
                syncErr = c.processVmDelete(vmi)
1✔
2008
        case shouldCleanUp:
1✔
2009
                log.Log.Object(vmi).V(3).Info("Processing local ephemeral data cleanup for shutdown domain.")
1✔
2010
                syncErr = c.processVmCleanup(vmi)
1✔
2011
        case shouldUpdate:
1✔
2012
                log.Log.Object(vmi).V(3).Info("Processing vmi update")
1✔
2013
                syncErr = c.processVmUpdate(vmi, domain)
1✔
2014
        default:
1✔
2015
                log.Log.Object(vmi).V(3).Info("No update processing required")
1✔
2016
        }
2017

2018
        if syncErr != nil && !vmi.IsFinal() {
2✔
2019
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.SyncFailed.String(), syncErr.Error())
1✔
2020

1✔
2021
                // `syncErr` will be propagated anyway, and it will be logged in `re-enqueueing`
1✔
2022
                // so there is no need to log it twice in hot path without increased verbosity.
1✔
2023
                log.Log.Object(vmi).Reason(syncErr).Error("Synchronizing the VirtualMachineInstance failed.")
1✔
2024
        }
1✔
2025

2026
        // Update the VirtualMachineInstance status, if the VirtualMachineInstance exists
2027
        if vmiExists {
2✔
2028
                if err := c.updateVMIStatus(vmi, domain, syncErr); err != nil {
2✔
2029
                        log.Log.Object(vmi).Reason(err).Error("Updating the VirtualMachineInstance status failed.")
1✔
2030
                        return err
1✔
2031
                }
1✔
2032
        }
2033

2034
        if syncErr != nil {
2✔
2035
                return syncErr
1✔
2036
        }
1✔
2037

2038
        log.Log.Object(vmi).V(3).Info("Synchronization loop succeeded.")
1✔
2039
        return nil
1✔
2040

2041
}
2042

2043
func (c *VirtualMachineController) execute(key string) error {
1✔
2044
        vmi, vmiExists, err := c.getVMIFromCache(key)
1✔
2045
        if err != nil {
2✔
2046
                return err
1✔
2047
        }
1✔
2048

2049
        if !vmiExists {
2✔
2050
                c.vmiExpectations.DeleteExpectations(key)
1✔
2051
        } else if !c.vmiExpectations.SatisfiedExpectations(key) {
2✔
2052
                return nil
×
2053
        }
×
2054

2055
        domain, domainExists, domainCachedUID, err := c.getDomainFromCache(key)
1✔
2056
        if err != nil {
1✔
2057
                return err
×
2058
        }
×
2059

2060
        if !vmiExists && string(domainCachedUID) != "" {
2✔
2061
                // it's possible to discover the UID from cache even if the domain
1✔
2062
                // doesn't technically exist anymore
1✔
2063
                vmi.UID = domainCachedUID
1✔
2064
                log.Log.Object(vmi).Infof("Using cached UID for vmi found in domain cache")
1✔
2065
        }
1✔
2066

2067
        // As a last effort, if the UID still can't be determined attempt
2068
        // to retrieve it from the ghost record
2069
        if string(vmi.UID) == "" {
2✔
2070
                uid := virtcache.GhostRecordGlobalStore.LastKnownUID(key)
1✔
2071
                if uid != "" {
1✔
2072
                        log.Log.Object(vmi).V(3).Infof("ghost record cache provided %s as UID", uid)
×
2073
                        vmi.UID = uid
×
2074
                }
×
2075
        }
2076

2077
        if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
2✔
2078
                oldVMI := v1.NewVMIReferenceFromNameWithNS(vmi.Namespace, vmi.Name)
1✔
2079
                oldVMI.UID = domain.Spec.Metadata.KubeVirt.UID
1✔
2080
                expired, initialized, err := c.isLauncherClientUnresponsive(oldVMI)
1✔
2081
                if err != nil {
1✔
2082
                        return err
×
2083
                }
×
2084
                // If we found an outdated domain which is also not alive anymore, clean up
2085
                if !initialized {
1✔
2086
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
2087
                        return nil
×
2088
                } else if expired {
1✔
2089
                        log.Log.Object(oldVMI).Infof("Detected stale vmi %s that still needs cleanup before new vmi %s with identical name/namespace can be processed", oldVMI.UID, vmi.UID)
×
2090
                        err = c.processVmCleanup(oldVMI)
×
2091
                        if err != nil {
×
2092
                                return err
×
2093
                        }
×
2094
                        // Make sure we re-enqueue the key to ensure this new VMI is processed
2095
                        // after the stale domain is removed
2096
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*5)
×
2097
                }
2098

2099
                return nil
1✔
2100
        }
2101

2102
        // Take different execution paths depending on the state of the migration and the
2103
        // node this is executed on.
2104

2105
        if vmiExists && c.isPreMigrationTarget(vmi) {
2✔
2106
                // 1. PRE-MIGRATION TARGET PREPARATION PATH
1✔
2107
                //
1✔
2108
                // If this node is the target of the vmi's migration, take
1✔
2109
                // a different execute path. The target execute path prepares
1✔
2110
                // the local environment for the migration, but does not
1✔
2111
                // start the VMI
1✔
2112
                return c.migrationTargetExecute(vmi, vmiExists, domain)
1✔
2113
        } else if vmiExists && c.isOrphanedMigrationSource(vmi) {
2✔
2114
                // 3. POST-MIGRATION SOURCE CLEANUP
×
2115
                //
×
2116
                // After a migration, the migrated domain still exists in the old
×
2117
                // source's domain cache. Ensure that any node that isn't currently
×
2118
                // the target or owner of the VMI handles deleting the domain locally.
×
2119
                return c.migrationOrphanedSourceNodeExecute(vmi, domainExists)
×
2120
        }
×
2121
        return c.defaultExecute(key,
1✔
2122
                vmi,
1✔
2123
                vmiExists,
1✔
2124
                domain,
1✔
2125
                domainExists)
1✔
2126

2127
}
2128

2129
func (c *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstance) error {
1✔
2130

1✔
2131
        vmiId := string(vmi.UID)
1✔
2132

1✔
2133
        log.Log.Object(vmi).Infof("Performing final local cleanup for vmi with uid %s", vmiId)
1✔
2134

1✔
2135
        c.migrationProxy.StopTargetListener(vmiId)
1✔
2136
        c.migrationProxy.StopSourceListener(vmiId)
1✔
2137

1✔
2138
        c.downwardMetricsManager.StopServer(vmi)
1✔
2139

1✔
2140
        // Unmount container disks and clean up remaining files
1✔
2141
        if err := c.containerDiskMounter.Unmount(vmi); err != nil {
1✔
2142
                return err
×
2143
        }
×
2144

2145
        // UnmountAll does the cleanup on the "best effort" basis: it is
2146
        // safe to pass a nil cgroupManager.
2147
        cgroupManager, _ := getCgroupManager(vmi)
1✔
2148
        if err := c.hotplugVolumeMounter.UnmountAll(vmi, cgroupManager); err != nil {
1✔
2149
                return err
×
2150
        }
×
2151

2152
        c.teardownNetwork(vmi)
1✔
2153

1✔
2154
        c.sriovHotplugExecutorPool.Delete(vmi.UID)
1✔
2155

1✔
2156
        // Watch dog file and command client must be the last things removed here
1✔
2157
        if err := c.closeLauncherClient(vmi); err != nil {
1✔
2158
                return err
×
2159
        }
×
2160

2161
        // Remove the domain from cache in the event that we're performing
2162
        // a final cleanup and never received the "DELETE" event. This is
2163
        // possible if the VMI pod goes away before we receive the final domain
2164
        // "DELETE"
2165
        domain := api.NewDomainReferenceFromName(vmi.Namespace, vmi.Name)
1✔
2166
        log.Log.Object(domain).Infof("Removing domain from cache during final cleanup")
1✔
2167
        return c.domainStore.Delete(domain)
1✔
2168
}
2169

2170
func (c *VirtualMachineController) closeLauncherClient(vmi *v1.VirtualMachineInstance) error {
1✔
2171

1✔
2172
        // UID is required in order to close socket
1✔
2173
        if string(vmi.GetUID()) == "" {
2✔
2174
                return nil
1✔
2175
        }
1✔
2176

2177
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2178
        if exists && clientInfo.Client != nil {
2✔
2179
                clientInfo.Client.Close()
1✔
2180
                close(clientInfo.DomainPipeStopChan)
1✔
2181
        }
1✔
2182

2183
        virtcache.GhostRecordGlobalStore.Delete(vmi.Namespace, vmi.Name)
1✔
2184
        c.launcherClients.Delete(vmi.UID)
1✔
2185
        return nil
1✔
2186
}
2187

2188
// used by unit tests to add mock clients
2189
func (c *VirtualMachineController) addLauncherClient(vmUID types.UID, info *virtcache.LauncherClientInfo) error {
1✔
2190
        c.launcherClients.Store(vmUID, info)
1✔
2191
        return nil
1✔
2192
}
1✔
2193

2194
func (c *VirtualMachineController) isLauncherClientUnresponsive(vmi *v1.VirtualMachineInstance) (unresponsive bool, initialized bool, err error) {
1✔
2195
        var socketFile string
1✔
2196

1✔
2197
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2198
        if exists {
2✔
2199
                if clientInfo.Ready == true {
2✔
2200
                        // use cached socket if we previously established a connection
1✔
2201
                        socketFile = clientInfo.SocketFile
1✔
2202
                } else {
2✔
2203
                        socketFile, err = cmdclient.FindSocketOnHost(vmi)
1✔
2204
                        if err != nil {
2✔
2205
                                // socket does not exist, but let's see if the pod is still there
1✔
2206
                                if _, err = cmdclient.FindPodDirOnHost(vmi); err != nil {
1✔
2207
                                        // no pod meanst that waiting for it to initialize makes no sense
×
2208
                                        return true, true, nil
×
2209
                                }
×
2210
                                // pod is still there, if there is no socket let's wait for it to become ready
2211
                                if clientInfo.NotInitializedSince.Before(time.Now().Add(-3 * time.Minute)) {
2✔
2212
                                        return true, true, nil
1✔
2213
                                }
1✔
2214
                                return false, false, nil
1✔
2215
                        }
2216
                        clientInfo.Ready = true
×
2217
                        clientInfo.SocketFile = socketFile
×
2218
                }
2219
        } else {
×
2220
                clientInfo := &virtcache.LauncherClientInfo{
×
2221
                        NotInitializedSince: time.Now(),
×
2222
                        Ready:               false,
×
2223
                }
×
2224
                c.launcherClients.Store(vmi.UID, clientInfo)
×
2225
                // attempt to find the socket if the established connection doesn't currently exist.
×
2226
                socketFile, err = cmdclient.FindSocketOnHost(vmi)
×
2227
                // no socket file, no VMI, so it's unresponsive
×
2228
                if err != nil {
×
2229
                        // socket does not exist, but let's see if the pod is still there
×
2230
                        if _, err = cmdclient.FindPodDirOnHost(vmi); err != nil {
×
2231
                                // no pod meanst that waiting for it to initialize makes no sense
×
2232
                                return true, true, nil
×
2233
                        }
×
2234
                        return false, false, nil
×
2235
                }
2236
                clientInfo.Ready = true
×
2237
                clientInfo.SocketFile = socketFile
×
2238
        }
2239
        return cmdclient.IsSocketUnresponsive(socketFile), true, nil
1✔
2240
}
2241

2242
func (c *VirtualMachineController) getLauncherClient(vmi *v1.VirtualMachineInstance) (cmdclient.LauncherClient, error) {
1✔
2243
        var err error
1✔
2244

1✔
2245
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2246
        if exists && clientInfo.Client != nil {
2✔
2247
                return clientInfo.Client, nil
1✔
2248
        }
1✔
2249

2250
        socketFile, err := cmdclient.FindSocketOnHost(vmi)
×
2251
        if err != nil {
×
2252
                return nil, err
×
2253
        }
×
2254

2255
        err = virtcache.GhostRecordGlobalStore.Add(vmi.Namespace, vmi.Name, socketFile, vmi.UID)
×
2256
        if err != nil {
×
2257
                return nil, err
×
2258
        }
×
2259

2260
        client, err := cmdclient.NewClient(socketFile)
×
2261
        if err != nil {
×
2262
                return nil, err
×
2263
        }
×
2264

2265
        domainPipeStopChan := make(chan struct{})
×
2266
        //we pipe in the domain socket into the VMI's filesystem
×
2267
        err = c.startDomainNotifyPipe(domainPipeStopChan, vmi)
×
2268
        if err != nil {
×
2269
                client.Close()
×
2270
                close(domainPipeStopChan)
×
2271
                return nil, err
×
2272
        }
×
2273

2274
        c.launcherClients.Store(vmi.UID, &virtcache.LauncherClientInfo{
×
2275
                Client:              client,
×
2276
                SocketFile:          socketFile,
×
2277
                DomainPipeStopChan:  domainPipeStopChan,
×
2278
                NotInitializedSince: time.Now(),
×
2279
                Ready:               true,
×
2280
        })
×
2281

×
2282
        return client, nil
×
2283
}
2284

2285
func (c *VirtualMachineController) processVmDestroy(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
×
2286
        tryGracefully := false
×
2287
        return c.helperVmShutdown(vmi, domain, tryGracefully)
×
2288
}
×
2289

2290
func (c *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
2291
        tryGracefully := true
1✔
2292
        return c.helperVmShutdown(vmi, domain, tryGracefully)
1✔
2293
}
1✔
2294

2295
func (c *VirtualMachineController) helperVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, tryGracefully bool) error {
1✔
2296

1✔
2297
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
1✔
2298
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
2299
        if err != nil {
1✔
2300
                return err
×
2301
        }
×
2302

2303
        if domainHasGracePeriod(domain) && tryGracefully {
2✔
2304
                if expired, timeLeft := c.hasGracePeriodExpired(domain); !expired {
2✔
2305
                        return c.handleVMIShutdown(vmi, domain, client, timeLeft)
1✔
2306
                }
1✔
2307
                log.Log.Object(vmi).Infof("Grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
2308
        } else {
1✔
2309
                log.Log.Object(vmi).Infof("Graceful shutdown not set, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
2310
        }
1✔
2311

2312
        err = client.KillVirtualMachine(vmi)
1✔
2313
        if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2314
                // Only report err if it wasn't the result of a disconnect.
×
2315
                //
×
2316
                // Both virt-launcher and virt-handler are trying to destroy
×
2317
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
2318
                // disconnected during the kill request, which shouldn't be
×
2319
                // considered an error.
×
2320
                return err
×
2321
        }
×
2322

2323
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMIStopping)
1✔
2324

1✔
2325
        return nil
1✔
2326
}
2327

2328
func (c *VirtualMachineController) handleVMIShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, client cmdclient.LauncherClient, timeLeft int64) error {
1✔
2329
        if domain.Status.Status != api.Shutdown {
2✔
2330
                return c.shutdownVMI(vmi, client, timeLeft)
1✔
2331
        }
1✔
2332
        log.Log.V(4).Object(vmi).Infof("%s is already shutting down.", vmi.GetObjectMeta().GetName())
×
2333
        return nil
×
2334
}
2335

2336
func (c *VirtualMachineController) shutdownVMI(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient, timeLeft int64) error {
1✔
2337
        err := client.ShutdownVirtualMachine(vmi)
1✔
2338
        if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2339
                // Only report err if it wasn't the result of a disconnect.
×
2340
                //
×
2341
                // Both virt-launcher and virt-handler are trying to destroy
×
2342
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
2343
                // disconnected during the kill request, which shouldn't be
×
2344
                // considered an error.
×
2345
                return err
×
2346
        }
×
2347

2348
        log.Log.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
1✔
2349

1✔
2350
        // Make sure that we don't hot-loop in case we send the first domain notification
1✔
2351
        if timeLeft == -1 {
2✔
2352
                timeLeft = 5
1✔
2353
                if vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds < timeLeft {
1✔
2354
                        timeLeft = *vmi.Spec.TerminationGracePeriodSeconds
×
2355
                }
×
2356
        }
2357
        // In case we have a long grace period, we want to resend the graceful shutdown every 5 seconds
2358
        // That's important since a booting OS can miss ACPI signals
2359
        if timeLeft > 5 {
1✔
2360
                timeLeft = 5
×
2361
        }
×
2362

2363
        // pending graceful shutdown.
2364
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(timeLeft)*time.Second)
1✔
2365
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), VMIGracefulShutdown)
1✔
2366
        return nil
1✔
2367
}
2368

2369
func (c *VirtualMachineController) processVmDelete(vmi *v1.VirtualMachineInstance) error {
1✔
2370

1✔
2371
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
1✔
2372
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
2373

1✔
2374
        // If the pod has been torn down, we know the VirtualMachineInstance is down.
1✔
2375
        if err == nil {
2✔
2376

1✔
2377
                log.Log.Object(vmi).Infof("Signaled deletion for %s", vmi.GetObjectMeta().GetName())
1✔
2378

1✔
2379
                // pending deletion.
1✔
2380
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMISignalDeletion)
1✔
2381

1✔
2382
                err = client.DeleteDomain(vmi)
1✔
2383
                if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2384
                        // Only report err if it wasn't the result of a disconnect.
×
2385
                        //
×
2386
                        // Both virt-launcher and virt-handler are trying to destroy
×
2387
                        // the VirtualMachineInstance at the same time. It's possible the client may get
×
2388
                        // disconnected during the kill request, which shouldn't be
×
2389
                        // considered an error.
×
2390
                        return err
×
2391
                }
×
2392
        }
2393

2394
        return nil
1✔
2395

2396
}
2397

2398
func (c *VirtualMachineController) hasStaleClientConnections(vmi *v1.VirtualMachineInstance) bool {
1✔
2399
        _, err := c.getVerifiedLauncherClient(vmi)
1✔
2400
        if err == nil {
2✔
2401
                // current client connection is good.
1✔
2402
                return false
1✔
2403
        }
1✔
2404

2405
        // no connection, but ghost file exists.
2406
        if virtcache.GhostRecordGlobalStore.Exists(vmi.Namespace, vmi.Name) {
2✔
2407
                return true
1✔
2408
        }
1✔
2409

2410
        return false
×
2411

2412
}
2413

2414
func (c *VirtualMachineController) getVerifiedLauncherClient(vmi *v1.VirtualMachineInstance) (client cmdclient.LauncherClient, err error) {
1✔
2415
        client, err = c.getLauncherClient(vmi)
1✔
2416
        if err != nil {
1✔
2417
                return
×
2418
        }
×
2419

2420
        // Verify connectivity.
2421
        // It's possible the pod has already been torn down along with the VirtualMachineInstance.
2422
        err = client.Ping()
1✔
2423
        return
1✔
2424
}
2425

2426
func (c *VirtualMachineController) isOrphanedMigrationSource(vmi *v1.VirtualMachineInstance) bool {
1✔
2427
        nodeName, ok := vmi.Labels[v1.NodeNameLabel]
1✔
2428

1✔
2429
        if ok && nodeName != "" && nodeName != c.host {
1✔
2430
                return true
×
2431
        }
×
2432

2433
        return false
1✔
2434
}
2435

2436
func (c *VirtualMachineController) isPreMigrationTarget(vmi *v1.VirtualMachineInstance) bool {
1✔
2437

1✔
2438
        migrationTargetNodeName, ok := vmi.Labels[v1.MigrationTargetNodeNameLabel]
1✔
2439

1✔
2440
        if ok &&
1✔
2441
                migrationTargetNodeName != "" &&
1✔
2442
                migrationTargetNodeName != vmi.Status.NodeName &&
1✔
2443
                migrationTargetNodeName == c.host {
2✔
2444
                return true
1✔
2445
        }
1✔
2446

2447
        return false
1✔
2448
}
2449

2450
func (c *VirtualMachineController) checkNetworkInterfacesForMigration(vmi *v1.VirtualMachineInstance) error {
1✔
2451
        return netvmispec.VerifyVMIMigratable(vmi, c.clusterConfig.GetNetworkBindings())
1✔
2452
}
1✔
2453

2454
func (c *VirtualMachineController) checkVolumesForMigration(vmi *v1.VirtualMachineInstance) (blockMigrate bool, err error) {
1✔
2455
        volumeStatusMap := make(map[string]v1.VolumeStatus)
1✔
2456

1✔
2457
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
2458
                volumeStatusMap[volumeStatus.Name] = volumeStatus
1✔
2459
        }
1✔
2460

2461
        if len(vmi.Status.MigratedVolumes) > 0 {
1✔
2462
                blockMigrate = true
×
2463
        }
×
2464

2465
        filesystems := storagetypes.GetFilesystemsFromVolumes(vmi)
1✔
2466

1✔
2467
        // Check if all VMI volumes can be shared between the source and the destination
1✔
2468
        // of a live migration. blockMigrate will be returned as false, only if all volumes
1✔
2469
        // are shared and the VMI has no local disks
1✔
2470
        // Some combinations of disks makes the VMI no suitable for live migration.
1✔
2471
        // A relevant error will be returned in this case.
1✔
2472
        for _, volume := range vmi.Spec.Volumes {
2✔
2473
                volSrc := volume.VolumeSource
1✔
2474
                if volSrc.PersistentVolumeClaim != nil || volSrc.DataVolume != nil {
2✔
2475

1✔
2476
                        var claimName string
1✔
2477
                        if volSrc.PersistentVolumeClaim != nil {
2✔
2478
                                claimName = volSrc.PersistentVolumeClaim.ClaimName
1✔
2479
                        } else {
2✔
2480
                                claimName = volSrc.DataVolume.Name
1✔
2481
                        }
1✔
2482

2483
                        volumeStatus, ok := volumeStatusMap[volume.Name]
1✔
2484

1✔
2485
                        if !ok || volumeStatus.PersistentVolumeClaimInfo == nil {
1✔
2486
                                return true, fmt.Errorf("cannot migrate VMI: Unable to determine if PVC %v is shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
×
2487
                        } else if !pvctypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !pvctypes.IsMigratedVolume(volumeStatus.Name, vmi) {
2✔
2488
                                return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
1✔
2489
                        }
1✔
2490

2491
                } else if volSrc.HostDisk != nil {
2✔
2492
                        shared := volSrc.HostDisk.Shared != nil && *volSrc.HostDisk.Shared
1✔
2493
                        if !shared {
2✔
2494
                                return true, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
1✔
2495
                        }
1✔
2496
                } else {
1✔
2497
                        if _, ok := filesystems[volume.Name]; ok {
2✔
2498
                                log.Log.Object(vmi).Infof("Volume %s is shared with virtiofs, allow live migration", volume.Name)
1✔
2499
                                continue
1✔
2500
                        }
2501

2502
                        isVolumeUsedByReadOnlyDisk := false
1✔
2503
                        for _, disk := range vmi.Spec.Domain.Devices.Disks {
2✔
2504
                                if isReadOnlyDisk(&disk) && disk.Name == volume.Name {
2✔
2505
                                        isVolumeUsedByReadOnlyDisk = true
1✔
2506
                                        break
1✔
2507
                                }
2508
                        }
2509

2510
                        if isVolumeUsedByReadOnlyDisk {
2✔
2511
                                continue
1✔
2512
                        }
2513

2514
                        if vmi.Status.MigrationMethod == "" || vmi.Status.MigrationMethod == v1.LiveMigration {
2✔
2515
                                log.Log.Object(vmi).Infof("migration is block migration because of %s volume", volume.Name)
1✔
2516
                        }
1✔
2517
                        blockMigrate = true
1✔
2518
                }
2519
        }
2520
        return
1✔
2521
}
2522

2523
func (c *VirtualMachineController) isVMIPausedDuringMigration(vmi *v1.VirtualMachineInstance) bool {
1✔
2524
        return vmi.Status.MigrationState != nil &&
1✔
2525
                vmi.Status.MigrationState.Mode == v1.MigrationPaused &&
1✔
2526
                !vmi.Status.MigrationState.Completed
1✔
2527
}
1✔
2528

2529
func (c *VirtualMachineController) isMigrationSource(vmi *v1.VirtualMachineInstance) bool {
1✔
2530

1✔
2531
        if vmi.Status.MigrationState != nil &&
1✔
2532
                vmi.Status.MigrationState.SourceNode == c.host &&
1✔
2533
                vmi.Status.MigrationState.TargetNodeAddress != "" &&
1✔
2534
                !vmi.Status.MigrationState.Completed {
2✔
2535

1✔
2536
                return true
1✔
2537
        }
1✔
2538
        return false
1✔
2539

2540
}
2541

2542
func (c *VirtualMachineController) handleTargetMigrationProxy(vmi *v1.VirtualMachineInstance) error {
1✔
2543
        // handle starting/stopping target migration proxy
1✔
2544
        migrationTargetSockets := []string{}
1✔
2545
        res, err := c.podIsolationDetector.Detect(vmi)
1✔
2546
        if err != nil {
1✔
2547
                return err
×
2548
        }
×
2549

2550
        // Get the libvirt connection socket file on the destination pod.
2551
        socketFile := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "libvirt/virtqemud-sock"), res.Pid())
1✔
2552
        // the migration-proxy is no longer shared via host mount, so we
1✔
2553
        // pass in the virt-launcher's baseDir to reach the unix sockets.
1✔
2554
        baseDir := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "kubevirt"), res.Pid())
1✔
2555
        migrationTargetSockets = append(migrationTargetSockets, socketFile)
1✔
2556

1✔
2557
        migrationPortsRange := migrationproxy.GetMigrationPortsList(vmi.IsBlockMigration())
1✔
2558
        for _, port := range migrationPortsRange {
2✔
2559
                key := migrationproxy.ConstructProxyKey(string(vmi.UID), port)
1✔
2560
                // a proxy between the target direct qemu channel and the connector in the destination pod
1✔
2561
                destSocketFile := migrationproxy.SourceUnixFile(baseDir, key)
1✔
2562
                migrationTargetSockets = append(migrationTargetSockets, destSocketFile)
1✔
2563
        }
1✔
2564
        err = c.migrationProxy.StartTargetListener(string(vmi.UID), migrationTargetSockets)
1✔
2565
        if err != nil {
1✔
2566
                return err
×
2567
        }
×
2568
        return nil
1✔
2569
}
2570

2571
func (c *VirtualMachineController) handlePostMigrationProxyCleanup(vmi *v1.VirtualMachineInstance) {
1✔
2572
        if vmi.Status.MigrationState == nil || vmi.Status.MigrationState.Completed || vmi.Status.MigrationState.Failed {
2✔
2573
                c.migrationProxy.StopTargetListener(string(vmi.UID))
1✔
2574
                c.migrationProxy.StopSourceListener(string(vmi.UID))
1✔
2575
        }
1✔
2576
}
2577

2578
func (c *VirtualMachineController) handleSourceMigrationProxy(vmi *v1.VirtualMachineInstance) error {
1✔
2579

1✔
2580
        res, err := c.podIsolationDetector.Detect(vmi)
1✔
2581
        if err != nil {
1✔
2582
                return err
×
2583
        }
×
2584
        // the migration-proxy is no longer shared via host mount, so we
2585
        // pass in the virt-launcher's baseDir to reach the unix sockets.
2586
        baseDir := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "kubevirt"), res.Pid())
1✔
2587
        c.migrationProxy.StopTargetListener(string(vmi.UID))
1✔
2588
        if vmi.Status.MigrationState.TargetDirectMigrationNodePorts == nil {
1✔
2589
                msg := "No migration proxy has been created for this vmi"
×
2590
                return fmt.Errorf("%s", msg)
×
2591
        }
×
2592
        err = c.migrationProxy.StartSourceListener(
1✔
2593
                string(vmi.UID),
1✔
2594
                vmi.Status.MigrationState.TargetNodeAddress,
1✔
2595
                vmi.Status.MigrationState.TargetDirectMigrationNodePorts,
1✔
2596
                baseDir,
1✔
2597
        )
1✔
2598
        if err != nil {
1✔
2599
                return err
×
2600
        }
×
2601

2602
        return nil
1✔
2603
}
2604

2605
func (c *VirtualMachineController) getLauncherClientInfo(vmi *v1.VirtualMachineInstance) *virtcache.LauncherClientInfo {
1✔
2606
        launcherInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2607
        if !exists {
1✔
2608
                return nil
×
2609
        }
×
2610
        return launcherInfo
1✔
2611
}
2612

2613
func isMigrationInProgress(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
1✔
2614
        var domainMigrationMetadata *api.MigrationMetadata
1✔
2615

1✔
2616
        if domain == nil ||
1✔
2617
                vmi.Status.MigrationState == nil ||
1✔
2618
                domain.Spec.Metadata.KubeVirt.Migration == nil {
2✔
2619
                return false
1✔
2620
        }
1✔
2621
        domainMigrationMetadata = domain.Spec.Metadata.KubeVirt.Migration
1✔
2622

1✔
2623
        if vmi.Status.MigrationState.MigrationUID == domainMigrationMetadata.UID &&
1✔
2624
                domainMigrationMetadata.StartTimestamp != nil {
2✔
2625
                return true
1✔
2626
        }
1✔
2627
        return false
×
2628
}
2629

2630
func (c *VirtualMachineController) vmUpdateHelperMigrationSource(origVMI *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
2631

1✔
2632
        client, err := c.getLauncherClient(origVMI)
1✔
2633
        if err != nil {
1✔
2634
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2635
        }
×
2636

2637
        if origVMI.Status.MigrationState.AbortRequested {
2✔
2638
                err = c.handleMigrationAbort(origVMI, client)
1✔
2639
                if err != nil {
1✔
2640
                        return err
×
2641
                }
×
2642
        } else {
1✔
2643
                if isMigrationInProgress(origVMI, domain) {
2✔
2644
                        // we already started this migration, no need to rerun this
1✔
2645
                        log.DefaultLogger().Errorf("migration %s has already been started", origVMI.Status.MigrationState.MigrationUID)
1✔
2646
                        return nil
1✔
2647
                }
1✔
2648

2649
                err = c.handleSourceMigrationProxy(origVMI)
1✔
2650
                if err != nil {
1✔
2651
                        return fmt.Errorf("failed to handle migration proxy: %v", err)
×
2652
                }
×
2653

2654
                migrationConfiguration := origVMI.Status.MigrationState.MigrationConfiguration
1✔
2655
                if migrationConfiguration == nil {
2✔
2656
                        migrationConfiguration = c.clusterConfig.GetMigrationConfiguration()
1✔
2657
                }
1✔
2658

2659
                options := &cmdclient.MigrationOptions{
1✔
2660
                        Bandwidth:               *migrationConfiguration.BandwidthPerMigration,
1✔
2661
                        ProgressTimeout:         *migrationConfiguration.ProgressTimeout,
1✔
2662
                        CompletionTimeoutPerGiB: *migrationConfiguration.CompletionTimeoutPerGiB,
1✔
2663
                        UnsafeMigration:         *migrationConfiguration.UnsafeMigrationOverride,
1✔
2664
                        AllowAutoConverge:       *migrationConfiguration.AllowAutoConverge,
1✔
2665
                        AllowPostCopy:           *migrationConfiguration.AllowPostCopy,
1✔
2666
                        AllowWorkloadDisruption: *migrationConfiguration.AllowWorkloadDisruption,
1✔
2667
                }
1✔
2668

1✔
2669
                configureParallelMigrationThreads(options, origVMI)
1✔
2670

1✔
2671
                marshalledOptions, err := json.Marshal(options)
1✔
2672
                if err != nil {
1✔
2673
                        log.Log.Object(origVMI).Warning("failed to marshall matched migration options")
×
2674
                } else {
1✔
2675
                        log.Log.Object(origVMI).Infof("migration options matched for vmi %s: %s", origVMI.Name, string(marshalledOptions))
1✔
2676
                }
1✔
2677

2678
                vmi := origVMI.DeepCopy()
1✔
2679
                err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2680
                if err != nil {
1✔
2681
                        return err
×
2682
                }
×
2683

2684
                err = client.MigrateVirtualMachine(vmi, options)
1✔
2685
                if err != nil {
1✔
2686
                        return err
×
2687
                }
×
2688
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrating.String(), VMIMigrating)
1✔
2689
        }
2690
        return nil
1✔
2691
}
2692

2693
func replaceMigratedVolumesStatus(vmi *v1.VirtualMachineInstance) {
1✔
2694
        replaceVolsStatus := make(map[string]*v1.PersistentVolumeClaimInfo)
1✔
2695
        for _, v := range vmi.Status.MigratedVolumes {
1✔
2696
                replaceVolsStatus[v.SourcePVCInfo.ClaimName] = v.DestinationPVCInfo
×
2697
        }
×
2698
        for i, v := range vmi.Status.VolumeStatus {
1✔
2699
                if v.PersistentVolumeClaimInfo == nil {
×
2700
                        continue
×
2701
                }
2702
                if status, ok := replaceVolsStatus[v.PersistentVolumeClaimInfo.ClaimName]; ok {
×
2703
                        vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo = status
×
2704
                }
×
2705
        }
2706

2707
}
2708

2709
func (c *VirtualMachineController) vmUpdateHelperMigrationTarget(origVMI *v1.VirtualMachineInstance) error {
1✔
2710
        client, err := c.getLauncherClient(origVMI)
1✔
2711
        if err != nil {
1✔
2712
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2713
        }
×
2714

2715
        vmi := origVMI.DeepCopy()
1✔
2716

1✔
2717
        if migrations.MigrationFailed(vmi) {
2✔
2718
                // if the migration failed, signal the target pod it's okay to exit
1✔
2719
                err = client.SignalTargetPodCleanup(vmi)
1✔
2720
                if err != nil {
1✔
2721
                        return err
×
2722
                }
×
2723
                log.Log.Object(vmi).Infof("Signaled target pod for failed migration to clean up")
1✔
2724
                // nothing left to do here if the migration failed.
1✔
2725
                // Re-enqueue to trigger handler final cleanup
1✔
2726
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
1✔
2727
                return nil
1✔
2728
        } else if migrations.IsMigrating(vmi) {
2✔
2729
                // If the migration has already started,
1✔
2730
                // then there's nothing left to prepare on the target side
1✔
2731
                return nil
1✔
2732
        }
1✔
2733
        // The VolumeStatus is used to retrive additional information for the volume handling.
2734
        // For example, for filesystem PVC, the information are used to create a right size image.
2735
        // In the case of migrated volumes, we need to replace the original volume information with the
2736
        // destination volume properties.
2737
        replaceMigratedVolumesStatus(vmi)
1✔
2738
        err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2739
        if err != nil {
1✔
2740
                return err
×
2741
        }
×
2742

2743
        // give containerDisks some time to become ready before throwing errors on retries
2744
        info := c.getLauncherClientInfo(vmi)
1✔
2745
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
1✔
2746
                if err != nil {
×
2747
                        return err
×
2748
                }
×
2749
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
2750
                return nil
×
2751
        }
2752

2753
        // Mount container disks
2754
        err = c.containerDiskMounter.MountAndVerify(vmi)
1✔
2755
        if err != nil {
1✔
2756
                return err
×
2757
        }
×
2758

2759
        // Mount hotplug disks
2760
        if attachmentPodUID := vmi.Status.MigrationState.TargetAttachmentPodUID; attachmentPodUID != types.UID("") {
1✔
2761
                cgroupManager, err := getCgroupManager(vmi)
×
2762
                if err != nil {
×
2763
                        return err
×
2764
                }
×
2765
                if err := c.hotplugVolumeMounter.MountFromPod(vmi, attachmentPodUID, cgroupManager); err != nil {
×
2766
                        return fmt.Errorf("failed to mount hotplug volumes: %v", err)
×
2767
                }
×
2768
        }
2769

2770
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
2771
        if err != nil {
1✔
2772
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
2773
        }
×
2774

2775
        if err := c.netConf.Setup(vmi, netsetup.FilterNetsForMigrationTarget(vmi), isolationRes.Pid()); err != nil {
1✔
2776
                return fmt.Errorf("failed to configure vmi network for migration target: %w", err)
×
2777
        }
×
2778

2779
        virtLauncherRootMount, err := isolationRes.MountRoot()
1✔
2780
        if err != nil {
1✔
2781
                return err
×
2782
        }
×
2783

2784
        err = c.claimDeviceOwnership(virtLauncherRootMount, "kvm")
1✔
2785
        if err != nil {
1✔
2786
                return fmt.Errorf("failed to set up file ownership for /dev/kvm: %v", err)
×
2787
        }
×
2788
        if virtutil.IsAutoAttachVSOCK(vmi) {
1✔
2789
                if err := c.claimDeviceOwnership(virtLauncherRootMount, "vhost-vsock"); err != nil {
×
2790
                        return fmt.Errorf("failed to set up file ownership for /dev/vhost-vsock: %v", err)
×
2791
                }
×
2792
        }
2793

2794
        lessPVCSpaceToleration := c.clusterConfig.GetLessPVCSpaceToleration()
1✔
2795
        minimumPVCReserveBytes := c.clusterConfig.GetMinimumReservePVCBytes()
1✔
2796

1✔
2797
        // initialize disks images for empty PVC
1✔
2798
        hostDiskCreator := hostdisk.NewHostDiskCreator(c.recorder, lessPVCSpaceToleration, minimumPVCReserveBytes, virtLauncherRootMount)
1✔
2799
        err = hostDiskCreator.Create(vmi)
1✔
2800
        if err != nil {
1✔
2801
                return fmt.Errorf("preparing host-disks failed: %v", err)
×
2802
        }
×
2803

2804
        if virtutil.IsNonRootVMI(vmi) {
1✔
2805
                if err := c.nonRootSetup(origVMI); err != nil {
×
2806
                        return err
×
2807
                }
×
2808
        }
2809

2810
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, c.clusterConfig)
1✔
2811
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
2812

1✔
2813
        if err := client.SyncMigrationTarget(vmi, options); err != nil {
1✔
2814
                return fmt.Errorf("syncing migration target failed: %v", err)
×
2815
        }
×
2816
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.PreparingTarget.String(), VMIMigrationTargetPrepared)
1✔
2817

1✔
2818
        err = c.handleTargetMigrationProxy(vmi)
1✔
2819
        if err != nil {
1✔
2820
                return fmt.Errorf("failed to handle post sync migration proxy: %v", err)
×
2821
        }
×
2822
        return nil
1✔
2823
}
2824

2825
func (c *VirtualMachineController) affinePitThread(vmi *v1.VirtualMachineInstance) error {
×
2826
        res, err := c.podIsolationDetector.Detect(vmi)
×
2827
        if err != nil {
×
2828
                return err
×
2829
        }
×
2830
        var Mask unix.CPUSet
×
2831
        Mask.Zero()
×
2832
        qemuprocess, err := res.GetQEMUProcess()
×
2833
        if err != nil {
×
2834
                return err
×
2835
        }
×
2836
        qemupid := qemuprocess.Pid()
×
2837
        if qemupid == -1 {
×
2838
                return nil
×
2839
        }
×
2840

2841
        pitpid, err := res.KvmPitPid()
×
2842
        if err != nil {
×
2843
                return err
×
2844
        }
×
2845
        if pitpid == -1 {
×
2846
                return nil
×
2847
        }
×
2848
        if vmi.IsRealtimeEnabled() {
×
2849
                param := schedParam{priority: 2}
×
2850
                err = schedSetScheduler(pitpid, schedFIFO, param)
×
2851
                if err != nil {
×
2852
                        return fmt.Errorf("failed to set FIFO scheduling and priority 2 for thread %d: %w", pitpid, err)
×
2853
                }
×
2854
        }
2855
        vcpus, err := getVCPUThreadIDs(qemupid)
×
2856
        if err != nil {
×
2857
                return err
×
2858
        }
×
2859
        vpid, ok := vcpus["0"]
×
2860
        if ok == false {
×
2861
                return nil
×
2862
        }
×
2863
        vcpupid, err := strconv.Atoi(vpid)
×
2864
        if err != nil {
×
2865
                return err
×
2866
        }
×
2867
        err = unix.SchedGetaffinity(vcpupid, &Mask)
×
2868
        if err != nil {
×
2869
                return err
×
2870
        }
×
2871
        return unix.SchedSetaffinity(pitpid, &Mask)
×
2872
}
2873

2874
func (c *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
×
2875
        if err := cgroupManager.CreateChildCgroup("housekeeping", "cpuset"); err != nil {
×
2876
                log.Log.Reason(err).Error("CreateChildCgroup ")
×
2877
                return err
×
2878
        }
×
2879

2880
        key := controller.VirtualMachineInstanceKey(vmi)
×
2881
        domain, domainExists, _, err := c.getDomainFromCache(key)
×
2882
        if err != nil {
×
2883
                return err
×
2884
        }
×
2885
        // bail out if domain does not exist
2886
        if domainExists == false {
×
2887
                return nil
×
2888
        }
×
2889

2890
        if domain.Spec.CPUTune == nil || domain.Spec.CPUTune.EmulatorPin == nil {
×
2891
                return nil
×
2892
        }
×
2893

2894
        hkcpus, err := hardware.ParseCPUSetLine(domain.Spec.CPUTune.EmulatorPin.CPUSet, 100)
×
2895
        if err != nil {
×
2896
                return err
×
2897
        }
×
2898

2899
        log.Log.V(3).Object(vmi).Infof("housekeeping cpu: %v", hkcpus)
×
2900

×
2901
        err = cgroupManager.SetCpuSet("housekeeping", hkcpus)
×
2902
        if err != nil {
×
2903
                return err
×
2904
        }
×
2905

2906
        tids, err := cgroupManager.GetCgroupThreads()
×
2907
        if err != nil {
×
2908
                return err
×
2909
        }
×
2910
        hktids := make([]int, 0, 10)
×
2911

×
2912
        for _, tid := range tids {
×
2913
                proc, err := ps.FindProcess(tid)
×
2914
                if err != nil {
×
2915
                        log.Log.Object(vmi).Errorf("Failure to find process: %s", err.Error())
×
2916
                        return err
×
2917
                }
×
2918
                if proc == nil {
×
2919
                        return fmt.Errorf("failed to find process with tid: %d", tid)
×
2920
                }
×
2921
                comm := proc.Executable()
×
2922
                if strings.Contains(comm, "CPU ") && strings.Contains(comm, "KVM") {
×
2923
                        continue
×
2924
                }
2925
                hktids = append(hktids, tid)
×
2926
        }
2927

2928
        log.Log.V(3).Object(vmi).Infof("hk thread ids: %v", hktids)
×
2929
        for _, tid := range hktids {
×
2930
                err = cgroupManager.AttachTID("cpuset", "housekeeping", tid)
×
2931
                if err != nil {
×
2932
                        log.Log.Object(vmi).Errorf("Error attaching tid %d: %v", tid, err.Error())
×
2933
                        return err
×
2934
                }
×
2935
        }
2936

2937
        return nil
×
2938
}
2939

2940
func (c *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMachineInstance, domainExists bool) error {
1✔
2941
        client, err := c.getLauncherClient(origVMI)
1✔
2942
        if err != nil {
1✔
2943
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2944
        }
×
2945

2946
        vmi := origVMI.DeepCopy()
1✔
2947
        preallocatedVolumes := c.getPreallocatedVolumes(vmi)
1✔
2948

1✔
2949
        err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2950
        if err != nil {
1✔
2951
                return err
×
2952
        }
×
2953

2954
        cgroupManager, err := getCgroupManager(vmi)
1✔
2955
        if err != nil {
1✔
2956
                return err
×
2957
        }
×
2958

2959
        var errorTolerantFeaturesError []error
1✔
2960
        readyToProceed, err := c.handleVMIState(vmi, cgroupManager, &errorTolerantFeaturesError)
1✔
2961
        if err != nil {
2✔
2962
                return err
1✔
2963
        }
1✔
2964

2965
        if !readyToProceed {
2✔
2966
                return nil
1✔
2967
        }
1✔
2968

2969
        // Synchronize the VirtualMachineInstance state
2970
        err = c.syncVirtualMachine(client, vmi, preallocatedVolumes)
1✔
2971
        if err != nil {
1✔
2972
                return err
×
2973
        }
×
2974

2975
        // Post-sync housekeeping
2976
        err = c.handleHousekeeping(vmi, cgroupManager, domainExists)
1✔
2977
        if err != nil {
1✔
2978
                return err
×
2979
        }
×
2980

2981
        return errors.NewAggregate(errorTolerantFeaturesError)
1✔
2982
}
2983

2984
// handleVMIState: Decides whether to call handleRunningVMI or handleStartingVMI based on the VMI's state.
2985
func (c *VirtualMachineController) handleVMIState(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) (bool, error) {
1✔
2986
        if vmi.IsRunning() {
2✔
2987
                return true, c.handleRunningVMI(vmi, cgroupManager, errorTolerantFeaturesError)
1✔
2988
        } else if !vmi.IsFinal() {
3✔
2989
                return c.handleStartingVMI(vmi, cgroupManager)
1✔
2990
        }
1✔
2991
        return true, nil
×
2992
}
2993

2994
// handleRunningVMI contains the logic specifically for running VMs (hotplugging in running state, metrics, network updates)
2995
func (c *VirtualMachineController) handleRunningVMI(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) error {
1✔
2996
        if err := c.hotplugSriovInterfaces(vmi); err != nil {
1✔
2997
                log.Log.Object(vmi).Error(err.Error())
×
2998
        }
×
2999

3000
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
2✔
3001
                return err
1✔
3002
        }
1✔
3003

3004
        if err := c.getMemoryDump(vmi); err != nil {
1✔
3005
                return err
×
3006
        }
×
3007

3008
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
3009
        if err != nil {
1✔
3010
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
3011
        }
×
3012

3013
        if err := c.downwardMetricsManager.StartServer(vmi, isolationRes.Pid()); err != nil {
1✔
3014
                return err
×
3015
        }
×
3016

3017
        if err := c.netConf.Setup(vmi, netsetup.FilterNetsForLiveUpdate(vmi), isolationRes.Pid()); err != nil {
1✔
3018
                log.Log.Object(vmi).Error(err.Error())
×
3019
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "NicHotplug", err.Error())
×
3020
                *errorTolerantFeaturesError = append(*errorTolerantFeaturesError, err)
×
3021
        }
×
3022

3023
        return nil
1✔
3024
}
3025

3026
// handleStartingVMI: Contains the logic for starting VMs (container disks, initial network setup, device ownership).
3027
func (c *VirtualMachineController) handleStartingVMI(
3028
        vmi *v1.VirtualMachineInstance,
3029
        cgroupManager cgroup.Manager,
3030
) (bool, error) {
1✔
3031
        // give containerDisks some time to become ready before throwing errors on retries
1✔
3032
        info := c.getLauncherClientInfo(vmi)
1✔
3033
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
2✔
3034
                if err != nil {
2✔
3035
                        return false, err
1✔
3036
                }
1✔
3037
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3038
                return false, nil
1✔
3039
        }
3040

3041
        var err error
1✔
3042
        err = c.containerDiskMounter.MountAndVerify(vmi)
1✔
3043
        if err != nil {
2✔
3044
                return false, err
1✔
3045
        }
1✔
3046

3047
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
1✔
3048
                return false, err
×
3049
        }
×
3050

3051
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
3052
        if err != nil {
1✔
3053
                return false, fmt.Errorf(failedDetectIsolationFmt, err)
×
3054
        }
×
3055

3056
        if err := c.netConf.Setup(vmi, netsetup.FilterNetsForVMStartup(vmi), isolationRes.Pid()); err != nil {
2✔
3057
                return false, fmt.Errorf("failed to configure vmi network: %w", err)
1✔
3058
        }
1✔
3059

3060
        if err := c.setupDevicesOwnerships(vmi, isolationRes); err != nil {
1✔
3061
                return false, err
×
3062
        }
×
3063

3064
        if err := c.adjustResources(vmi); err != nil {
1✔
3065
                return false, err
×
3066
        }
×
3067

3068
        if c.shouldWaitForSEVAttestation(vmi) {
1✔
3069
                return false, nil
×
3070
        }
×
3071

3072
        return true, nil
1✔
3073
}
3074

3075
func (c *VirtualMachineController) adjustResources(vmi *v1.VirtualMachineInstance) error {
1✔
3076
        err := c.podIsolationDetector.AdjustResources(vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio)
1✔
3077
        if err != nil {
1✔
3078
                return fmt.Errorf("failed to adjust resources: %v", err)
×
3079
        }
×
3080
        return nil
1✔
3081
}
3082

3083
func (c *VirtualMachineController) shouldWaitForSEVAttestation(vmi *v1.VirtualMachineInstance) bool {
1✔
3084
        if util.IsSEVAttestationRequested(vmi) {
1✔
3085
                sev := vmi.Spec.Domain.LaunchSecurity.SEV
×
3086
                // Wait for the session parameters to be provided
×
3087
                return sev.Session == "" || sev.DHCert == ""
×
3088
        }
×
3089
        return false
1✔
3090
}
3091

3092
func (c *VirtualMachineController) setupDevicesOwnerships(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) error {
1✔
3093
        virtLauncherRootMount, err := isolationRes.MountRoot()
1✔
3094
        if err != nil {
1✔
3095
                return err
×
3096
        }
×
3097

3098
        err = c.claimDeviceOwnership(virtLauncherRootMount, "kvm")
1✔
3099
        if err != nil {
1✔
3100
                return fmt.Errorf("failed to set up file ownership for /dev/kvm: %v", err)
×
3101
        }
×
3102

3103
        if virtutil.IsAutoAttachVSOCK(vmi) {
1✔
3104
                if err := c.claimDeviceOwnership(virtLauncherRootMount, "vhost-vsock"); err != nil {
×
3105
                        return fmt.Errorf("failed to set up file ownership for /dev/vhost-vsock: %v", err)
×
3106
                }
×
3107
        }
3108

3109
        if err := c.configureHostDisks(vmi, isolationRes, virtLauncherRootMount); err != nil {
1✔
3110
                return err
×
3111
        }
×
3112

3113
        if err := c.configureSEVDeviceOwnership(vmi, isolationRes, virtLauncherRootMount); err != nil {
1✔
3114
                return err
×
3115
        }
×
3116

3117
        if virtutil.IsNonRootVMI(vmi) {
1✔
3118
                if err := c.nonRootSetup(vmi); err != nil {
×
3119
                        return err
×
3120
                }
×
3121
        }
3122

3123
        if err := c.configureVirtioFS(vmi, isolationRes); err != nil {
1✔
3124
                return err
×
3125
        }
×
3126

3127
        return nil
1✔
3128
}
3129

3130
func (c *VirtualMachineController) configureHostDisks(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult, virtLauncherRootMount *safepath.Path) error {
1✔
3131
        lessPVCSpaceToleration := c.clusterConfig.GetLessPVCSpaceToleration()
1✔
3132
        minimumPVCReserveBytes := c.clusterConfig.GetMinimumReservePVCBytes()
1✔
3133

1✔
3134
        hostDiskCreator := hostdisk.NewHostDiskCreator(c.recorder, lessPVCSpaceToleration, minimumPVCReserveBytes, virtLauncherRootMount)
1✔
3135
        if err := hostDiskCreator.Create(vmi); err != nil {
1✔
3136
                return fmt.Errorf("preparing host-disks failed: %v", err)
×
3137
        }
×
3138
        return nil
1✔
3139
}
3140

3141
func (c *VirtualMachineController) configureSEVDeviceOwnership(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult, virtLauncherRootMount *safepath.Path) error {
1✔
3142
        if virtutil.IsSEVVMI(vmi) {
1✔
3143
                sevDevice, err := safepath.JoinNoFollow(virtLauncherRootMount, filepath.Join("dev", "sev"))
×
3144
                if err != nil {
×
3145
                        return err
×
3146
                }
×
3147
                if err := diskutils.DefaultOwnershipManager.SetFileOwnership(sevDevice); err != nil {
×
3148
                        return fmt.Errorf("failed to set SEV device owner: %v", err)
×
3149
                }
×
3150
        }
3151
        return nil
1✔
3152
}
3153

3154
func (c *VirtualMachineController) configureVirtioFS(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) error {
1✔
3155
        for _, fs := range vmi.Spec.Domain.Devices.Filesystems {
1✔
3156
                socketPath, err := isolation.SafeJoin(isolationRes, virtiofs.VirtioFSSocketPath(fs.Name))
×
3157
                if err != nil {
×
3158
                        return err
×
3159
                }
×
3160
                if err := diskutils.DefaultOwnershipManager.SetFileOwnership(socketPath); err != nil {
×
3161
                        return err
×
3162
                }
×
3163
        }
3164
        return nil
1✔
3165
}
3166

3167
func (c *VirtualMachineController) syncVirtualMachine(client cmdclient.LauncherClient, vmi *v1.VirtualMachineInstance, preallocatedVolumes []string) error {
1✔
3168
        smbios := c.clusterConfig.GetSMBIOS()
1✔
3169
        period := c.clusterConfig.GetMemBalloonStatsPeriod()
1✔
3170

1✔
3171
        options := virtualMachineOptions(smbios, period, preallocatedVolumes, c.capabilities, c.clusterConfig)
1✔
3172
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
3173

1✔
3174
        err := client.SyncVirtualMachine(vmi, options)
1✔
3175
        if err != nil {
1✔
3176
                if strings.Contains(err.Error(), "EFI OVMF rom missing") {
×
3177
                        return &virtLauncherCriticalSecurebootError{fmt.Sprintf("mismatch of Secure Boot setting and bootloaders: %v", err)}
×
3178
                }
×
3179
        }
3180

3181
        return err
1✔
3182
}
3183

3184
func (c *VirtualMachineController) handleHousekeeping(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, domainExists bool) error {
1✔
3185

1✔
3186
        if vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread {
1✔
3187
                err := c.configureHousekeepingCgroup(vmi, cgroupManager)
×
3188
                if err != nil {
×
3189
                        return err
×
3190
                }
×
3191
        }
3192

3193
        // Configure vcpu scheduler for realtime workloads and affine PIT thread for dedicated CPU
3194
        if vmi.IsRealtimeEnabled() && !vmi.IsRunning() && !vmi.IsFinal() {
1✔
3195
                log.Log.Object(vmi).Info("Configuring vcpus for real time workloads")
×
3196
                if err := c.configureVCPUScheduler(vmi); err != nil {
×
3197
                        return err
×
3198
                }
×
3199
        }
3200
        if vmi.IsCPUDedicated() && !vmi.IsRunning() && !vmi.IsFinal() {
1✔
3201
                log.Log.V(3).Object(vmi).Info("Affining PIT thread")
×
3202
                if err := c.affinePitThread(vmi); err != nil {
×
3203
                        return err
×
3204
                }
×
3205
        }
3206
        if !domainExists {
2✔
3207
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Created.String(), VMIDefined)
1✔
3208
        }
1✔
3209

3210
        if vmi.IsRunning() {
2✔
3211
                // Umount any disks no longer mounted
1✔
3212
                if err := c.hotplugVolumeMounter.Unmount(vmi, cgroupManager); err != nil {
1✔
3213
                        return err
×
3214
                }
×
3215
        }
3216
        return nil
1✔
3217
}
3218

3219
func (c *VirtualMachineController) getPreallocatedVolumes(vmi *v1.VirtualMachineInstance) []string {
1✔
3220
        preallocatedVolumes := []string{}
1✔
3221
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
3222
                if volumeStatus.PersistentVolumeClaimInfo != nil && volumeStatus.PersistentVolumeClaimInfo.Preallocated {
1✔
3223
                        preallocatedVolumes = append(preallocatedVolumes, volumeStatus.Name)
×
3224
                }
×
3225
        }
3226
        return preallocatedVolumes
1✔
3227
}
3228

3229
func (c *VirtualMachineController) hotplugSriovInterfaces(vmi *v1.VirtualMachineInstance) error {
1✔
3230
        sriovSpecInterfaces := netvmispec.FilterSRIOVInterfaces(vmi.Spec.Domain.Devices.Interfaces)
1✔
3231

1✔
3232
        sriovSpecIfacesNames := netvmispec.IndexInterfaceSpecByName(sriovSpecInterfaces)
1✔
3233
        attachedSriovStatusIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
1✔
3234
                _, exist := sriovSpecIfacesNames[iface.Name]
×
3235
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceDomain) &&
×
3236
                        netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
3237
        })
×
3238

3239
        desiredSriovMultusPluggedIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
1✔
3240
                _, exist := sriovSpecIfacesNames[iface.Name]
×
3241
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
3242
        })
×
3243

3244
        if len(desiredSriovMultusPluggedIfaces) == len(attachedSriovStatusIfaces) {
2✔
3245
                c.sriovHotplugExecutorPool.Delete(vmi.UID)
1✔
3246
                return nil
1✔
3247
        }
1✔
3248

3249
        rateLimitedExecutor := c.sriovHotplugExecutorPool.LoadOrStore(vmi.UID)
×
3250
        return rateLimitedExecutor.Exec(func() error {
×
3251
                return c.hotplugSriovInterfacesCommand(vmi)
×
3252
        })
×
3253
}
3254

3255
func (c *VirtualMachineController) hotplugSriovInterfacesCommand(vmi *v1.VirtualMachineInstance) error {
×
3256
        const errMsgPrefix = "failed to hot-plug SR-IOV interfaces"
×
3257

×
3258
        client, err := c.getVerifiedLauncherClient(vmi)
×
3259
        if err != nil {
×
3260
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3261
        }
×
3262

3263
        if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
×
3264
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), err.Error())
×
3265
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3266
        }
×
3267

3268
        log.Log.V(3).Object(vmi).Info("sending hot-plug host-devices command")
×
3269
        if err := client.HotplugHostDevices(vmi); err != nil {
×
3270
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3271
        }
×
3272

3273
        return nil
×
3274
}
3275

3276
func memoryDumpPath(volumeStatus v1.VolumeStatus) string {
×
3277
        target := hotplugdisk.GetVolumeMountDir(volumeStatus.Name)
×
3278
        dumpPath := filepath.Join(target, volumeStatus.MemoryDumpVolume.TargetFileName)
×
3279
        return dumpPath
×
3280
}
×
3281

3282
func (c *VirtualMachineController) getMemoryDump(vmi *v1.VirtualMachineInstance) error {
1✔
3283
        const errMsgPrefix = "failed to getting memory dump"
1✔
3284

1✔
3285
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
3286
                if volumeStatus.MemoryDumpVolume == nil || volumeStatus.Phase != v1.MemoryDumpVolumeInProgress {
2✔
3287
                        continue
1✔
3288
                }
3289
                client, err := c.getVerifiedLauncherClient(vmi)
×
3290
                if err != nil {
×
3291
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3292
                }
×
3293

3294
                log.Log.V(3).Object(vmi).Info("sending memory dump command")
×
3295
                err = client.VirtualMachineMemoryDump(vmi, memoryDumpPath(volumeStatus))
×
3296
                if err != nil {
×
3297
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3298
                }
×
3299
        }
3300

3301
        return nil
1✔
3302
}
3303

3304
func (c *VirtualMachineController) processVmUpdate(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
3305

1✔
3306
        isUnresponsive, isInitialized, err := c.isLauncherClientUnresponsive(vmi)
1✔
3307
        if err != nil {
1✔
3308
                return err
×
3309
        }
×
3310
        if !isInitialized {
2✔
3311
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3312
                return nil
1✔
3313
        } else if isUnresponsive {
2✔
3314
                return goerror.New(fmt.Sprintf("Can not update a VirtualMachineInstance with unresponsive command server."))
×
3315
        }
×
3316

3317
        c.handlePostMigrationProxyCleanup(vmi)
1✔
3318

1✔
3319
        if c.isPreMigrationTarget(vmi) {
2✔
3320
                return c.vmUpdateHelperMigrationTarget(vmi)
1✔
3321
        } else if c.isMigrationSource(vmi) {
3✔
3322
                return c.vmUpdateHelperMigrationSource(vmi, domain)
1✔
3323
        } else {
2✔
3324
                return c.vmUpdateHelperDefault(vmi, domain != nil)
1✔
3325
        }
1✔
3326
}
3327

3328
func (c *VirtualMachineController) setVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) error {
1✔
3329
        phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
1✔
3330
        if err != nil {
1✔
3331
                return err
×
3332
        }
×
3333
        vmi.Status.Phase = phase
1✔
3334
        return nil
1✔
3335
}
3336
func (c *VirtualMachineController) calculateVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) (v1.VirtualMachineInstancePhase, error) {
1✔
3337

1✔
3338
        if domain == nil {
2✔
3339
                switch {
1✔
3340
                case vmi.IsScheduled():
1✔
3341
                        isUnresponsive, isInitialized, err := c.isLauncherClientUnresponsive(vmi)
1✔
3342

1✔
3343
                        if err != nil {
1✔
3344
                                return vmi.Status.Phase, err
×
3345
                        }
×
3346
                        if !isInitialized {
2✔
3347
                                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3348
                                return vmi.Status.Phase, err
1✔
3349
                        } else if isUnresponsive {
3✔
3350
                                // virt-launcher is gone and VirtualMachineInstance never transitioned
1✔
3351
                                // from scheduled to Running.
1✔
3352
                                return v1.Failed, nil
1✔
3353
                        }
1✔
3354
                        return v1.Scheduled, nil
1✔
3355
                case !vmi.IsRunning() && !vmi.IsFinal():
×
3356
                        return v1.Scheduled, nil
×
3357
                case !vmi.IsFinal():
1✔
3358
                        // That is unexpected. We should not be able to delete a VirtualMachineInstance before we stop it.
1✔
3359
                        // However, if someone directly interacts with libvirt it is possible
1✔
3360
                        return v1.Failed, nil
1✔
3361
                }
3362
        } else {
1✔
3363

1✔
3364
                switch domain.Status.Status {
1✔
3365
                case api.Shutoff, api.Crashed:
1✔
3366
                        switch domain.Status.Reason {
1✔
3367
                        case api.ReasonCrashed, api.ReasonPanicked:
×
3368
                                return v1.Failed, nil
×
3369
                        case api.ReasonDestroyed:
×
3370
                                // When ACPI is available, the domain was tried to be shutdown,
×
3371
                                // and destroyed means that the domain was destroyed after the graceperiod expired.
×
3372
                                // Without ACPI a destroyed domain is ok.
×
3373
                                if isACPIEnabled(vmi, domain) {
×
3374
                                        return v1.Failed, nil
×
3375
                                }
×
3376
                                return v1.Succeeded, nil
×
3377
                        case api.ReasonShutdown, api.ReasonSaved, api.ReasonFromSnapshot:
×
3378
                                return v1.Succeeded, nil
×
3379
                        case api.ReasonMigrated:
1✔
3380
                                // if the domain migrated, we no longer know the phase.
1✔
3381
                                return vmi.Status.Phase, nil
1✔
3382
                        }
3383
                case api.Running, api.Paused, api.Blocked, api.PMSuspended:
1✔
3384
                        return v1.Running, nil
1✔
3385
                }
3386
        }
3387
        return vmi.Status.Phase, nil
×
3388
}
3389

3390
func (c *VirtualMachineController) addFunc(obj interface{}) {
×
3391
        key, err := controller.KeyFunc(obj)
×
3392
        if err == nil {
×
3393
                c.vmiExpectations.LowerExpectations(key, 1, 0)
×
3394
                c.queue.Add(key)
×
3395
        }
×
3396
}
3397
func (c *VirtualMachineController) deleteFunc(obj interface{}) {
×
3398
        key, err := controller.KeyFunc(obj)
×
3399
        if err == nil {
×
3400
                c.vmiExpectations.LowerExpectations(key, 1, 0)
×
3401
                c.queue.Add(key)
×
3402
        }
×
3403
}
3404
func (c *VirtualMachineController) updateFunc(_, new interface{}) {
×
3405
        key, err := controller.KeyFunc(new)
×
3406
        if err == nil {
×
3407
                c.vmiExpectations.LowerExpectations(key, 1, 0)
×
3408
                c.queue.Add(key)
×
3409
        }
×
3410
}
3411

3412
func (c *VirtualMachineController) addDomainFunc(obj interface{}) {
×
3413
        domain := obj.(*api.Domain)
×
3414
        log.Log.Object(domain).Infof("Domain is in state %s reason %s", domain.Status.Status, domain.Status.Reason)
×
3415
        key, err := controller.KeyFunc(obj)
×
3416
        if err == nil {
×
3417
                c.queue.Add(key)
×
3418
        }
×
3419
}
3420
func (c *VirtualMachineController) deleteDomainFunc(obj interface{}) {
×
3421
        domain, ok := obj.(*api.Domain)
×
3422
        if !ok {
×
3423
                tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
×
3424
                if !ok {
×
3425
                        log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
×
3426
                        return
×
3427
                }
×
3428
                domain, ok = tombstone.Obj.(*api.Domain)
×
3429
                if !ok {
×
3430
                        log.Log.Reason(fmt.Errorf("tombstone contained object that is not a domain %#v", obj)).Error("Failed to process delete notification")
×
3431
                        return
×
3432
                }
×
3433
        }
3434
        log.Log.Object(domain).Info("Domain deleted")
×
3435
        key, err := controller.KeyFunc(obj)
×
3436
        if err == nil {
×
3437
                c.queue.Add(key)
×
3438
        }
×
3439
}
3440
func (c *VirtualMachineController) updateDomainFunc(old, new interface{}) {
×
3441
        newDomain := new.(*api.Domain)
×
3442
        oldDomain := old.(*api.Domain)
×
3443
        if oldDomain.Status.Status != newDomain.Status.Status || oldDomain.Status.Reason != newDomain.Status.Reason {
×
3444
                log.Log.Object(newDomain).Infof("Domain is in state %s reason %s", newDomain.Status.Status, newDomain.Status.Reason)
×
3445
        }
×
3446

3447
        if newDomain.ObjectMeta.DeletionTimestamp != nil {
×
3448
                log.Log.Object(newDomain).Info("Domain is marked for deletion")
×
3449
        }
×
3450

3451
        key, err := controller.KeyFunc(new)
×
3452
        if err == nil {
×
3453
                c.queue.Add(key)
×
3454
        }
×
3455
}
3456

3457
func (c *VirtualMachineController) finalizeMigration(vmi *v1.VirtualMachineInstance) error {
1✔
3458
        const errorMessage = "failed to finalize migration"
1✔
3459

1✔
3460
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
3461
        if err != nil {
1✔
3462
                return fmt.Errorf("%s: %v", errorMessage, err)
×
3463
        }
×
3464

3465
        if err := c.hotplugCPU(vmi, client); err != nil {
2✔
3466
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
1✔
3467
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "failed to change vCPUs")
1✔
3468
        }
1✔
3469

3470
        if err := c.hotplugMemory(vmi, client); err != nil {
1✔
3471
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
×
3472
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "failed to update guest memory")
×
3473
        }
×
3474
        removeMigratedVolumes(vmi)
1✔
3475

1✔
3476
        options := &cmdv1.VirtualMachineOptions{}
1✔
3477
        options.InterfaceMigration = domainspec.BindingMigrationByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
3478
        if err := client.FinalizeVirtualMachineMigration(vmi, options); err != nil {
1✔
3479
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
×
3480
                return fmt.Errorf("%s: %v", errorMessage, err)
×
3481
        }
×
3482

3483
        return nil
1✔
3484
}
3485

3486
func vmiHasTerminationGracePeriod(vmi *v1.VirtualMachineInstance) bool {
×
3487
        // if not set we use the default graceperiod
×
3488
        return vmi.Spec.TerminationGracePeriodSeconds == nil ||
×
3489
                (vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds != 0)
×
3490
}
×
3491

3492
func domainHasGracePeriod(domain *api.Domain) bool {
1✔
3493
        return domain != nil &&
1✔
3494
                domain.Spec.Metadata.KubeVirt.GracePeriod != nil &&
1✔
3495
                domain.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds != 0
1✔
3496
}
1✔
3497

3498
func isACPIEnabled(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
×
3499
        return (vmiHasTerminationGracePeriod(vmi) || (vmi.Spec.TerminationGracePeriodSeconds == nil && domainHasGracePeriod(domain))) &&
×
3500
                domain != nil &&
×
3501
                domain.Spec.Features != nil &&
×
3502
                domain.Spec.Features.ACPI != nil
×
3503
}
×
3504

3505
func (c *VirtualMachineController) isHostModelMigratable(vmi *v1.VirtualMachineInstance) error {
1✔
3506
        if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == v1.CPUModeHostModel {
2✔
3507
                if c.hostCpuModel == "" {
2✔
3508
                        err := fmt.Errorf("the node \"%s\" does not allow migration with host-model", vmi.Status.NodeName)
1✔
3509
                        log.Log.Object(vmi).Errorf(err.Error())
1✔
3510
                        return err
1✔
3511
                }
1✔
3512
        }
3513
        return nil
1✔
3514
}
3515

3516
func (c *VirtualMachineController) claimDeviceOwnership(virtLauncherRootMount *safepath.Path, deviceName string) error {
1✔
3517
        softwareEmulation := c.clusterConfig.AllowEmulation()
1✔
3518
        devicePath, err := safepath.JoinNoFollow(virtLauncherRootMount, filepath.Join("dev", deviceName))
1✔
3519
        if err != nil {
1✔
3520
                if softwareEmulation {
×
3521
                        return nil
×
3522
                }
×
3523
                return err
×
3524
        }
3525

3526
        return diskutils.DefaultOwnershipManager.SetFileOwnership(devicePath)
1✔
3527
}
3528

3529
func (c *VirtualMachineController) reportDedicatedCPUSetForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
×
3530
        cgroupManager, err := getCgroupManager(vmi)
×
3531
        if err != nil {
×
3532
                return err
×
3533
        }
×
3534

3535
        cpusetStr, err := cgroupManager.GetCpuSet()
×
3536
        if err != nil {
×
3537
                return err
×
3538
        }
×
3539

3540
        cpuSet, err := hardware.ParseCPUSetLine(cpusetStr, 50000)
×
3541
        if err != nil {
×
3542
                return fmt.Errorf("failed to parse target VMI cpuset: %v", err)
×
3543
        }
×
3544

3545
        vmi.Status.MigrationState.TargetCPUSet = cpuSet
×
3546

×
3547
        return nil
×
3548
}
3549

3550
func (c *VirtualMachineController) reportTargetTopologyForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
×
3551
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, c.clusterConfig)
×
3552
        topology, err := json.Marshal(options.Topology)
×
3553
        if err != nil {
×
3554
                return err
×
3555
        }
×
3556
        vmi.Status.MigrationState.TargetNodeTopology = string(topology)
×
3557
        return nil
×
3558
}
3559

3560
func (c *VirtualMachineController) handleMigrationAbort(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3561
        if vmi.Status.MigrationState.AbortStatus == v1.MigrationAbortInProgress {
1✔
3562
                return nil
×
3563
        }
×
3564

3565
        err := client.CancelVirtualMachineMigration(vmi)
1✔
3566
        if err != nil && err.Error() == migrations.CancelMigrationFailedVmiNotMigratingErr {
1✔
3567
                // If migration did not even start there is no need to cancel it
×
3568
                log.Log.Object(vmi).Infof("skipping migration cancellation since vmi is not migrating")
×
3569
                return err
×
3570
        }
×
3571

3572
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrating.String(), VMIAbortingMigration)
1✔
3573
        return nil
1✔
3574
}
3575

3576
func isIOError(shouldUpdate, domainExists bool, domain *api.Domain) bool {
1✔
3577
        return shouldUpdate && domainExists && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedIOError
1✔
3578
}
1✔
3579

3580
func (c *VirtualMachineController) updateMachineType(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
3581
        if domain == nil || vmi == nil {
2✔
3582
                return
1✔
3583
        }
1✔
3584
        if domain.Spec.OS.Type.Machine != "" {
2✔
3585
                vmi.Status.Machine = &v1.Machine{Type: domain.Spec.OS.Type.Machine}
1✔
3586
        }
1✔
3587
}
3588

3589
func (c *VirtualMachineController) hotplugCPU(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3590
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3591

1✔
3592
        removeVMIVCPUChangeConditionAndLabel := func() {
2✔
3593
                delete(vmi.Labels, v1.VirtualMachinePodCPULimitsLabel)
1✔
3594
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceVCPUChange)
1✔
3595
        }
1✔
3596
        defer removeVMIVCPUChangeConditionAndLabel()
1✔
3597

1✔
3598
        if !vmiConditions.HasCondition(vmi, v1.VirtualMachineInstanceVCPUChange) {
2✔
3599
                return nil
1✔
3600
        }
1✔
3601

3602
        if vmi.IsCPUDedicated() {
1✔
3603
                cpuLimitStr, ok := vmi.Labels[v1.VirtualMachinePodCPULimitsLabel]
×
3604
                if !ok || len(cpuLimitStr) == 0 {
×
3605
                        return fmt.Errorf("cannot read CPU limit from VMI annotation")
×
3606
                }
×
3607

3608
                cpuLimit, err := strconv.Atoi(cpuLimitStr)
×
3609
                if err != nil {
×
3610
                        return fmt.Errorf("cannot parse CPU limit from VMI annotation: %v", err)
×
3611
                }
×
3612

3613
                vcpus := hardware.GetNumberOfVCPUs(vmi.Spec.Domain.CPU)
×
3614
                if vcpus > int64(cpuLimit) {
×
3615
                        return fmt.Errorf("number of requested VCPUS (%d) exceeds the limit (%d)", vcpus, cpuLimit)
×
3616
                }
×
3617
        }
3618

3619
        options := virtualMachineOptions(
1✔
3620
                nil,
1✔
3621
                0,
1✔
3622
                nil,
1✔
3623
                c.capabilities,
1✔
3624
                c.clusterConfig)
1✔
3625

1✔
3626
        if err := client.SyncVirtualMachineCPUs(vmi, options); err != nil {
2✔
3627
                return err
1✔
3628
        }
1✔
3629

3630
        if vmi.Status.CurrentCPUTopology == nil {
2✔
3631
                vmi.Status.CurrentCPUTopology = &v1.CPUTopology{}
1✔
3632
        }
1✔
3633

3634
        vmi.Status.CurrentCPUTopology.Sockets = vmi.Spec.Domain.CPU.Sockets
1✔
3635
        vmi.Status.CurrentCPUTopology.Cores = vmi.Spec.Domain.CPU.Cores
1✔
3636
        vmi.Status.CurrentCPUTopology.Threads = vmi.Spec.Domain.CPU.Threads
1✔
3637

1✔
3638
        return nil
1✔
3639
}
3640

3641
func (c *VirtualMachineController) hotplugMemory(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3642
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3643

1✔
3644
        removeVMIMemoryChangeLabel := func() {
2✔
3645
                delete(vmi.Labels, v1.VirtualMachinePodMemoryRequestsLabel)
1✔
3646
                delete(vmi.Labels, v1.MemoryHotplugOverheadRatioLabel)
1✔
3647
        }
1✔
3648
        defer removeVMIMemoryChangeLabel()
1✔
3649

1✔
3650
        if !vmiConditions.HasCondition(vmi, v1.VirtualMachineInstanceMemoryChange) {
2✔
3651
                return nil
1✔
3652
        }
1✔
3653

3654
        podMemReqStr := vmi.Labels[v1.VirtualMachinePodMemoryRequestsLabel]
1✔
3655
        podMemReq, err := resource.ParseQuantity(podMemReqStr)
1✔
3656
        if err != nil {
1✔
3657
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
×
3658
                return fmt.Errorf("cannot parse Memory requests from VMI label: %v", err)
×
3659
        }
×
3660

3661
        overheadRatio := vmi.Labels[v1.MemoryHotplugOverheadRatioLabel]
1✔
3662
        requiredMemory := services.GetMemoryOverhead(vmi, runtime.GOARCH, &overheadRatio)
1✔
3663
        requiredMemory.Add(
1✔
3664
                c.netBindingPluginMemoryCalculator.Calculate(vmi, c.clusterConfig.GetNetworkBindings()),
1✔
3665
        )
1✔
3666

1✔
3667
        requiredMemory.Add(*vmi.Spec.Domain.Resources.Requests.Memory())
1✔
3668

1✔
3669
        if podMemReq.Cmp(requiredMemory) < 0 {
2✔
3670
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
1✔
3671
                return fmt.Errorf("amount of requested guest memory (%s) exceeds the launcher memory request (%s)", vmi.Spec.Domain.Memory.Guest.String(), podMemReqStr)
1✔
3672
        }
1✔
3673

3674
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, c.clusterConfig)
1✔
3675

1✔
3676
        if err := client.SyncVirtualMachineMemory(vmi, options); err != nil {
2✔
3677
                // mark hotplug as failed
1✔
3678
                vmiConditions.UpdateCondition(vmi, &v1.VirtualMachineInstanceCondition{
1✔
3679
                        Type:    v1.VirtualMachineInstanceMemoryChange,
1✔
3680
                        Status:  k8sv1.ConditionFalse,
1✔
3681
                        Reason:  memoryHotplugFailedReason,
1✔
3682
                        Message: "memory hotplug failed, the VM configuration is not supported",
1✔
3683
                })
1✔
3684
                return err
1✔
3685
        }
1✔
3686

3687
        vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
1✔
3688
        vmi.Status.Memory.GuestRequested = vmi.Spec.Domain.Memory.Guest
1✔
3689
        return nil
1✔
3690
}
3691

3692
func removeMigratedVolumes(vmi *v1.VirtualMachineInstance) {
1✔
3693
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3694
        vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceVolumesChange)
1✔
3695
        vmi.Status.MigratedVolumes = nil
1✔
3696
}
1✔
3697

3698
func parseLibvirtQuantity(value int64, unit string) *resource.Quantity {
1✔
3699
        switch unit {
1✔
3700
        case "b", "bytes":
1✔
3701
                return resource.NewQuantity(value, resource.BinarySI)
1✔
3702
        case "KB":
1✔
3703
                return resource.NewQuantity(value*1000, resource.DecimalSI)
1✔
3704
        case "MB":
1✔
3705
                return resource.NewQuantity(value*1000*1000, resource.DecimalSI)
1✔
3706
        case "GB":
1✔
3707
                return resource.NewQuantity(value*1000*1000*1000, resource.DecimalSI)
1✔
3708
        case "TB":
1✔
3709
                return resource.NewQuantity(value*1000*1000*1000*1000, resource.DecimalSI)
1✔
3710
        case "k", "KiB":
1✔
3711
                return resource.NewQuantity(value*1024, resource.BinarySI)
1✔
3712
        case "M", "MiB":
1✔
3713
                return resource.NewQuantity(value*1024*1024, resource.BinarySI)
1✔
3714
        case "G", "GiB":
1✔
3715
                return resource.NewQuantity(value*1024*1024*1024, resource.BinarySI)
1✔
3716
        case "T", "TiB":
1✔
3717
                return resource.NewQuantity(value*1024*1024*1024*1024, resource.BinarySI)
1✔
3718
        }
3719
        return nil
×
3720
}
3721

3722
func (c *VirtualMachineController) updateMemoryInfo(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
3723
        if domain == nil || vmi == nil || domain.Spec.CurrentMemory == nil {
2✔
3724
                return nil
1✔
3725
        }
1✔
3726
        if vmi.Status.Memory == nil {
1✔
3727
                vmi.Status.Memory = &v1.MemoryStatus{}
×
3728
        }
×
3729
        currentGuest := parseLibvirtQuantity(int64(domain.Spec.CurrentMemory.Value), domain.Spec.CurrentMemory.Unit)
1✔
3730
        vmi.Status.Memory.GuestCurrent = currentGuest
1✔
3731
        return nil
1✔
3732
}
3733

3734
func configureParallelMigrationThreads(options *cmdclient.MigrationOptions, vm *v1.VirtualMachineInstance) {
1✔
3735
        // When the CPU is limited, there's a risk of the migration threads choking the CPU resources on the compute container.
1✔
3736
        // For this reason, we will avoid configuring migration threads in such scenarios.
1✔
3737
        if cpuLimit, cpuLimitExists := vm.Spec.Domain.Resources.Limits[k8sv1.ResourceCPU]; cpuLimitExists && !cpuLimit.IsZero() {
2✔
3738
                return
1✔
3739
        }
1✔
3740

3741
        options.ParallelMigrationThreads = pointer.P(parallelMultifdMigrationThreads)
1✔
3742
}
3743

3744
func isReadOnlyDisk(disk *v1.Disk) bool {
1✔
3745
        isReadOnlyCDRom := disk.CDRom != nil && (disk.CDRom.ReadOnly == nil || *disk.CDRom.ReadOnly)
1✔
3746

1✔
3747
        return isReadOnlyCDRom
1✔
3748
}
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc