• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / c6a5b472-2abe-4080-be46-73e2b1a58f84

18 Feb 2025 04:31PM UTC coverage: 71.643% (-0.004%) from 71.647%
c6a5b472-2abe-4080-be46-73e2b1a58f84

push

prow

web-flow
Merge pull request #13935 from orelmisan/refactor-handler-net-setup

virt-handler: Decouple device ownership claim and network setup

6 of 25 new or added lines in 3 files covered. (24.0%)

239 existing lines in 3 files now uncovered.

62352 of 87032 relevant lines covered (71.64%)

0.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.47
/pkg/virt-handler/vm.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright 2017 Red Hat, Inc.
17
 *
18
 */
19

20
package virthandler
21

22
import (
23
        "bytes"
24
        "context"
25
        "encoding/json"
26
        goerror "errors"
27
        "fmt"
28
        "io"
29
        "net"
30
        "os"
31
        "path/filepath"
32
        "regexp"
33
        "runtime"
34
        "sort"
35
        "strconv"
36
        "strings"
37
        "time"
38

39
        "github.com/mitchellh/go-ps"
40
        "github.com/opencontainers/runc/libcontainer/cgroups"
41
        "golang.org/x/sys/unix"
42
        "libvirt.org/go/libvirtxml"
43

44
        k8sv1 "k8s.io/api/core/v1"
45
        "k8s.io/apimachinery/pkg/api/equality"
46
        "k8s.io/apimachinery/pkg/api/resource"
47
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
48
        "k8s.io/apimachinery/pkg/types"
49
        "k8s.io/apimachinery/pkg/util/errors"
50
        "k8s.io/apimachinery/pkg/util/wait"
51
        "k8s.io/client-go/tools/cache"
52
        "k8s.io/client-go/tools/record"
53
        "k8s.io/client-go/util/workqueue"
54

55
        v1 "kubevirt.io/api/core/v1"
56
        "kubevirt.io/client-go/kubecli"
57
        "kubevirt.io/client-go/log"
58

59
        "kubevirt.io/kubevirt/pkg/config"
60
        containerdisk "kubevirt.io/kubevirt/pkg/container-disk"
61
        "kubevirt.io/kubevirt/pkg/controller"
62
        diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
63
        "kubevirt.io/kubevirt/pkg/executor"
64
        cmdv1 "kubevirt.io/kubevirt/pkg/handler-launcher-com/cmd/v1"
65
        hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
66
        hotplugdisk "kubevirt.io/kubevirt/pkg/hotplug-disk"
67
        netcache "kubevirt.io/kubevirt/pkg/network/cache"
68
        "kubevirt.io/kubevirt/pkg/network/domainspec"
69
        neterrors "kubevirt.io/kubevirt/pkg/network/errors"
70
        netsetup "kubevirt.io/kubevirt/pkg/network/setup"
71
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
72
        "kubevirt.io/kubevirt/pkg/pointer"
73
        "kubevirt.io/kubevirt/pkg/safepath"
74
        "kubevirt.io/kubevirt/pkg/storage/reservation"
75
        pvctypes "kubevirt.io/kubevirt/pkg/storage/types"
76
        storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
77
        "kubevirt.io/kubevirt/pkg/util"
78
        virtutil "kubevirt.io/kubevirt/pkg/util"
79
        "kubevirt.io/kubevirt/pkg/util/hardware"
80
        "kubevirt.io/kubevirt/pkg/util/migrations"
81
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
82
        "kubevirt.io/kubevirt/pkg/virt-controller/services"
83
        "kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
84
        virtcache "kubevirt.io/kubevirt/pkg/virt-handler/cache"
85
        "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
86
        cmdclient "kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
87
        container_disk "kubevirt.io/kubevirt/pkg/virt-handler/container-disk"
88
        device_manager "kubevirt.io/kubevirt/pkg/virt-handler/device-manager"
89
        "kubevirt.io/kubevirt/pkg/virt-handler/heartbeat"
90
        hotplug_volume "kubevirt.io/kubevirt/pkg/virt-handler/hotplug-disk"
91
        "kubevirt.io/kubevirt/pkg/virt-handler/isolation"
92
        migrationproxy "kubevirt.io/kubevirt/pkg/virt-handler/migration-proxy"
93
        "kubevirt.io/kubevirt/pkg/virt-handler/selinux"
94
        "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
95
        "kubevirt.io/kubevirt/pkg/virtiofs"
96
)
97

98
type netconf interface {
99
        Setup(vmi *v1.VirtualMachineInstance, networks []v1.Network, launcherPid int) error
100
        Teardown(vmi *v1.VirtualMachineInstance) error
101
}
102

103
type netstat interface {
104
        UpdateStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error
105
        Teardown(vmi *v1.VirtualMachineInstance)
106
        PodInterfaceVolatileDataIsCached(vmi *v1.VirtualMachineInstance, ifaceName string) bool
107
        CachePodInterfaceVolatileData(vmi *v1.VirtualMachineInstance, ifaceName string, data *netcache.PodIfaceCacheData)
108
}
109

110
type downwardMetricsManager interface {
111
        Run(stopCh chan struct{})
112
        StartServer(vmi *v1.VirtualMachineInstance, pid int) error
113
        StopServer(vmi *v1.VirtualMachineInstance)
114
}
115

116
const (
117
        failedDetectIsolationFmt              = "failed to detect isolation for launcher pod: %v"
118
        unableCreateVirtLauncherConnectionFmt = "unable to create virt-launcher client connection: %v"
119
        // This value was determined after consulting with libvirt developers and performing extensive testing.
120
        parallelMultifdMigrationThreads = uint(8)
121
)
122

123
const (
124
        //VolumeReadyReason is the reason set when the volume is ready.
125
        VolumeReadyReason = "VolumeReady"
126
        //VolumeUnMountedFromPodReason is the reason set when the volume is unmounted from the virtlauncher pod
127
        VolumeUnMountedFromPodReason = "VolumeUnMountedFromPod"
128
        //VolumeMountedToPodReason is the reason set when the volume is mounted to the virtlauncher pod
129
        VolumeMountedToPodReason = "VolumeMountedToPod"
130
        //VolumeUnplugged is the reason set when the volume is completely unplugged from the VMI
131
        VolumeUnplugged = "VolumeUnplugged"
132
        //VMIDefined is the reason set when a VMI is defined
133
        VMIDefined = "VirtualMachineInstance defined."
134
        //VMIStarted is the reason set when a VMI is started
135
        VMIStarted = "VirtualMachineInstance started."
136
        //VMIShutdown is the reason set when a VMI is shutdown
137
        VMIShutdown = "The VirtualMachineInstance was shut down."
138
        //VMICrashed is the reason set when a VMI crashed
139
        VMICrashed = "The VirtualMachineInstance crashed."
140
        //VMIAbortingMigration is the reason set when migration is being aborted
141
        VMIAbortingMigration = "VirtualMachineInstance is aborting migration."
142
        //VMIMigrating in the reason set when the VMI is migrating
143
        VMIMigrating = "VirtualMachineInstance is migrating."
144
        //VMIMigrationTargetPrepared is the reason set when the migration target has been prepared
145
        VMIMigrationTargetPrepared = "VirtualMachineInstance Migration Target Prepared."
146
        //VMIStopping is the reason set when the VMI is stopping
147
        VMIStopping = "VirtualMachineInstance stopping"
148
        //VMIGracefulShutdown is the reason set when the VMI is gracefully shut down
149
        VMIGracefulShutdown = "Signaled Graceful Shutdown"
150
        //VMISignalDeletion is the reason set when the VMI has signal deletion
151
        VMISignalDeletion = "Signaled Deletion"
152

153
        // MemoryHotplugFailedReason is the reason set when the VM cannot hotplug memory
154
        memoryHotplugFailedReason = "Memory Hotplug Failed"
155
)
156

157
var getCgroupManager = func(vmi *v1.VirtualMachineInstance) (cgroup.Manager, error) {
×
158
        return cgroup.NewManagerFromVM(vmi)
×
159
}
×
160

161
func NewController(
162
        recorder record.EventRecorder,
163
        clientset kubecli.KubevirtClient,
164
        host string,
165
        migrationIpAddress string,
166
        virtShareDir string,
167
        virtPrivateDir string,
168
        kubeletPodsDir string,
169
        vmiSourceInformer cache.SharedIndexInformer,
170
        vmiTargetInformer cache.SharedIndexInformer,
171
        domainInformer cache.SharedInformer,
172
        maxDevices int,
173
        clusterConfig *virtconfig.ClusterConfig,
174
        podIsolationDetector isolation.PodIsolationDetector,
175
        migrationProxy migrationproxy.ProxyManager,
176
        downwardMetricsManager downwardMetricsManager,
177
        capabilities *libvirtxml.Caps,
178
        hostCpuModel string,
179
        netConf netconf,
180
        netStat netstat,
181
        netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator,
182
) (*VirtualMachineController, error) {
1✔
183

1✔
184
        queue := workqueue.NewTypedRateLimitingQueueWithConfig[string](
1✔
185
                workqueue.DefaultTypedControllerRateLimiter[string](),
1✔
186
                workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-vm"},
1✔
187
        )
1✔
188

1✔
189
        containerDiskState := filepath.Join(virtPrivateDir, "container-disk-mount-state")
1✔
190
        if err := os.MkdirAll(containerDiskState, 0700); err != nil {
1✔
191
                return nil, err
×
192
        }
×
193

194
        hotplugState := filepath.Join(virtPrivateDir, "hotplug-volume-mount-state")
1✔
195
        if err := os.MkdirAll(hotplugState, 0700); err != nil {
1✔
196
                return nil, err
×
197
        }
×
198

199
        c := &VirtualMachineController{
1✔
200
                queue:                            queue,
1✔
201
                recorder:                         recorder,
1✔
202
                clientset:                        clientset,
1✔
203
                host:                             host,
1✔
204
                migrationIpAddress:               migrationIpAddress,
1✔
205
                virtShareDir:                     virtShareDir,
1✔
206
                vmiSourceStore:                   vmiSourceInformer.GetStore(),
1✔
207
                vmiTargetStore:                   vmiTargetInformer.GetStore(),
1✔
208
                domainStore:                      domainInformer.GetStore(),
1✔
209
                heartBeatInterval:                1 * time.Minute,
1✔
210
                migrationProxy:                   migrationProxy,
1✔
211
                podIsolationDetector:             podIsolationDetector,
1✔
212
                containerDiskMounter:             container_disk.NewMounter(podIsolationDetector, containerDiskState, clusterConfig),
1✔
213
                hotplugVolumeMounter:             hotplug_volume.NewVolumeMounter(hotplugState, kubeletPodsDir),
1✔
214
                clusterConfig:                    clusterConfig,
1✔
215
                virtLauncherFSRunDirPattern:      "/proc/%d/root/var/run",
1✔
216
                capabilities:                     capabilities,
1✔
217
                hostCpuModel:                     hostCpuModel,
1✔
218
                vmiExpectations:                  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
1✔
219
                sriovHotplugExecutorPool:         executor.NewRateLimitedExecutorPool(executor.NewExponentialLimitedBackoffCreator()),
1✔
220
                ioErrorRetryManager:              NewFailRetryManager("io-error-retry", 10*time.Second, 3*time.Minute, 30*time.Second),
1✔
221
                netConf:                          netConf,
1✔
222
                netStat:                          netStat,
1✔
223
                netBindingPluginMemoryCalculator: netBindingPluginMemoryCalculator,
1✔
224
        }
1✔
225

1✔
226
        c.hasSynced = func() bool {
1✔
227
                return domainInformer.HasSynced() && vmiSourceInformer.HasSynced() && vmiTargetInformer.HasSynced()
×
228
        }
×
229

230
        _, err := vmiSourceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
231
                AddFunc:    c.addFunc,
1✔
232
                DeleteFunc: c.deleteFunc,
1✔
233
                UpdateFunc: c.updateFunc,
1✔
234
        })
1✔
235
        if err != nil {
1✔
236
                return nil, err
×
237
        }
×
238

239
        _, err = vmiTargetInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
240
                AddFunc:    c.addFunc,
1✔
241
                DeleteFunc: c.deleteFunc,
1✔
242
                UpdateFunc: c.updateFunc,
1✔
243
        })
1✔
244
        if err != nil {
1✔
245
                return nil, err
×
246
        }
×
247

248
        _, err = domainInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
249
                AddFunc:    c.addDomainFunc,
1✔
250
                DeleteFunc: c.deleteDomainFunc,
1✔
251
                UpdateFunc: c.updateDomainFunc,
1✔
252
        })
1✔
253
        if err != nil {
1✔
254
                return nil, err
×
255
        }
×
256

257
        c.launcherClients = virtcache.LauncherClientInfoByVMI{}
1✔
258

1✔
259
        c.downwardMetricsManager = downwardMetricsManager
1✔
260

1✔
261
        c.domainNotifyPipes = make(map[string]string)
1✔
262

1✔
263
        permissions := "rw"
1✔
264
        if cgroups.IsCgroup2UnifiedMode() {
1✔
265
                // Need 'rwm' permissions otherwise ebpf filtering program attached by runc
×
266
                // will deny probing the device file with 'access' syscall. That in turn
×
267
                // will lead to virtqemud failure on VM startup.
×
268
                // This has been fixed upstream:
×
269
                //   https://github.com/opencontainers/runc/pull/2796
×
270
                // but the workaround is still needed to support previous versions without
×
271
                // the patch.
×
272
                permissions = "rwm"
×
273
        }
×
274

275
        c.deviceManagerController = device_manager.NewDeviceController(
1✔
276
                c.host,
1✔
277
                maxDevices,
1✔
278
                permissions,
1✔
279
                device_manager.PermanentHostDevicePlugins(maxDevices, permissions),
1✔
280
                clusterConfig,
1✔
281
                clientset.CoreV1())
1✔
282
        c.heartBeat = heartbeat.NewHeartBeat(clientset.CoreV1(), c.deviceManagerController, clusterConfig, host)
1✔
283

1✔
284
        return c, nil
1✔
285
}
286

287
type netBindingPluginMemoryCalculator interface {
288
        Calculate(vmi *v1.VirtualMachineInstance, registeredPlugins map[string]v1.InterfaceBindingPlugin) resource.Quantity
289
}
290

291
type VirtualMachineController struct {
292
        recorder                 record.EventRecorder
293
        clientset                kubecli.KubevirtClient
294
        host                     string
295
        migrationIpAddress       string
296
        virtShareDir             string
297
        virtPrivateDir           string
298
        queue                    workqueue.TypedRateLimitingInterface[string]
299
        vmiSourceStore           cache.Store
300
        vmiTargetStore           cache.Store
301
        domainStore              cache.Store
302
        launcherClients          virtcache.LauncherClientInfoByVMI
303
        heartBeatInterval        time.Duration
304
        deviceManagerController  *device_manager.DeviceController
305
        migrationProxy           migrationproxy.ProxyManager
306
        podIsolationDetector     isolation.PodIsolationDetector
307
        containerDiskMounter     container_disk.Mounter
308
        hotplugVolumeMounter     hotplug_volume.VolumeMounter
309
        clusterConfig            *virtconfig.ClusterConfig
310
        sriovHotplugExecutorPool *executor.RateLimitedExecutorPool
311
        downwardMetricsManager   downwardMetricsManager
312

313
        netConf                          netconf
314
        netStat                          netstat
315
        netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator
316

317
        domainNotifyPipes           map[string]string
318
        virtLauncherFSRunDirPattern string
319
        heartBeat                   *heartbeat.HeartBeat
320
        capabilities                *libvirtxml.Caps
321
        hostCpuModel                string
322
        vmiExpectations             *controller.UIDTrackingControllerExpectations
323
        ioErrorRetryManager         *FailRetryManager
324
        hasSynced                   func() bool
325
}
326

327
type virtLauncherCriticalSecurebootError struct {
328
        msg string
329
}
330

331
func (e *virtLauncherCriticalSecurebootError) Error() string { return e.msg }
×
332

333
type vmiIrrecoverableError struct {
334
        msg string
335
}
336

337
func (e *vmiIrrecoverableError) Error() string { return e.msg }
×
338

339
func formatIrrecoverableErrorMessage(domain *api.Domain) string {
×
340
        msg := "unknown reason"
×
341
        if domainPausedFailedPostCopy(domain) {
×
342
                msg = "VMI is irrecoverable due to failed post-copy migration"
×
343
        }
×
344
        return msg
×
345
}
346

347
func handleDomainNotifyPipe(domainPipeStopChan chan struct{}, ln net.Listener, virtShareDir string, vmi *v1.VirtualMachineInstance) {
1✔
348

1✔
349
        fdChan := make(chan net.Conn, 100)
1✔
350

1✔
351
        // Close listener and exit when stop encountered
1✔
352
        go func() {
2✔
353
                <-domainPipeStopChan
1✔
354
                log.Log.Object(vmi).Infof("closing notify pipe listener for vmi")
1✔
355
                if err := ln.Close(); err != nil {
1✔
356
                        log.Log.Object(vmi).Infof("failed closing notify pipe listener for vmi: %v", err)
×
357
                }
×
358
        }()
359

360
        // Listen for new connections,
361
        go func(vmi *v1.VirtualMachineInstance, ln net.Listener, domainPipeStopChan chan struct{}) {
2✔
362
                for {
2✔
363
                        fd, err := ln.Accept()
1✔
364
                        if err != nil {
2✔
365
                                if goerror.Is(err, net.ErrClosed) {
2✔
366
                                        // As Accept blocks, closing it is our mechanism to exit this loop
1✔
367
                                        return
1✔
368
                                }
1✔
369
                                log.Log.Reason(err).Error("Domain pipe accept error encountered.")
×
370
                                // keep listening until stop invoked
×
371
                                time.Sleep(1 * time.Second)
×
372
                        } else {
1✔
373
                                fdChan <- fd
1✔
374
                        }
1✔
375
                }
376
        }(vmi, ln, domainPipeStopChan)
377

378
        // Process new connections
379
        // exit when stop encountered
380
        go func(vmi *v1.VirtualMachineInstance, fdChan chan net.Conn, domainPipeStopChan chan struct{}) {
2✔
381
                for {
2✔
382
                        select {
1✔
383
                        case <-domainPipeStopChan:
1✔
384
                                return
1✔
385
                        case fd := <-fdChan:
1✔
386
                                go func(vmi *v1.VirtualMachineInstance) {
2✔
387
                                        defer fd.Close()
1✔
388

1✔
389
                                        // pipe the VMI domain-notify.sock to the virt-handler domain-notify.sock
1✔
390
                                        // so virt-handler receives notifications from the VMI
1✔
391
                                        conn, err := net.Dial("unix", filepath.Join(virtShareDir, "domain-notify.sock"))
1✔
392
                                        if err != nil {
2✔
393
                                                log.Log.Reason(err).Error("error connecting to domain-notify.sock for proxy connection")
1✔
394
                                                return
1✔
395
                                        }
1✔
396
                                        defer conn.Close()
1✔
397

1✔
398
                                        log.Log.Object(vmi).Infof("Accepted new notify pipe connection for vmi")
1✔
399
                                        copyErr := make(chan error, 2)
1✔
400
                                        go func() {
2✔
401
                                                _, err := io.Copy(fd, conn)
1✔
402
                                                copyErr <- err
1✔
403
                                        }()
1✔
404
                                        go func() {
2✔
405
                                                _, err := io.Copy(conn, fd)
1✔
406
                                                copyErr <- err
1✔
407
                                        }()
1✔
408

409
                                        // wait until one of the copy routines exit then
410
                                        // let the fd close
411
                                        err = <-copyErr
1✔
412
                                        if err != nil {
2✔
413
                                                log.Log.Object(vmi).Infof("closing notify pipe connection for vmi with error: %v", err)
1✔
414
                                        } else {
2✔
415
                                                log.Log.Object(vmi).Infof("gracefully closed notify pipe connection for vmi")
1✔
416
                                        }
1✔
417

418
                                }(vmi)
419
                        }
420
                }
421
        }(vmi, fdChan, domainPipeStopChan)
422
}
423

424
func (c *VirtualMachineController) startDomainNotifyPipe(domainPipeStopChan chan struct{}, vmi *v1.VirtualMachineInstance) error {
×
425

×
426
        res, err := c.podIsolationDetector.Detect(vmi)
×
427
        if err != nil {
×
428
                return fmt.Errorf("failed to detect isolation for launcher pod when setting up notify pipe: %v", err)
×
429
        }
×
430

431
        // inject the domain-notify.sock into the VMI pod.
432
        root, err := res.MountRoot()
×
433
        if err != nil {
×
434
                return err
×
435
        }
×
436
        socketDir, err := root.AppendAndResolveWithRelativeRoot(c.virtShareDir)
×
437
        if err != nil {
×
438
                return err
×
439
        }
×
440

441
        listener, err := safepath.ListenUnixNoFollow(socketDir, "domain-notify-pipe.sock")
×
442
        if err != nil {
×
443
                log.Log.Reason(err).Error("failed to create unix socket for proxy service")
×
444
                return err
×
445
        }
×
446
        socketPath, err := safepath.JoinNoFollow(socketDir, "domain-notify-pipe.sock")
×
447
        if err != nil {
×
448
                return err
×
449
        }
×
450

451
        if util.IsNonRootVMI(vmi) {
×
452
                err := diskutils.DefaultOwnershipManager.SetFileOwnership(socketPath)
×
453
                if err != nil {
×
454
                        log.Log.Reason(err).Error("unable to change ownership for domain notify")
×
455
                        return err
×
456
                }
×
457
        }
458

459
        handleDomainNotifyPipe(domainPipeStopChan, listener, c.virtShareDir, vmi)
×
460

×
461
        return nil
×
462
}
463

464
// Determines if a domain's grace period has expired during shutdown.
465
// If the grace period has started but not expired, timeLeft represents
466
// the time in seconds left until the period expires.
467
// If the grace period has not started, timeLeft will be set to -1.
468
func (c *VirtualMachineController) hasGracePeriodExpired(dom *api.Domain) (hasExpired bool, timeLeft int64) {
1✔
469

1✔
470
        hasExpired = false
1✔
471
        timeLeft = 0
1✔
472

1✔
473
        if dom == nil {
1✔
474
                hasExpired = true
×
475
                return
×
476
        }
×
477

478
        startTime := int64(0)
1✔
479
        if dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp != nil {
2✔
480
                startTime = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp.UTC().Unix()
1✔
481
        }
1✔
482
        gracePeriod := dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds
1✔
483

1✔
484
        // If gracePeriod == 0, then there will be no startTime set, deletion
1✔
485
        // should occur immediately during shutdown.
1✔
486
        if gracePeriod == 0 {
1✔
487
                hasExpired = true
×
488
                return
×
489
        } else if startTime == 0 {
2✔
490
                // If gracePeriod > 0, then the shutdown signal needs to be sent
1✔
491
                // and the gracePeriod start time needs to be set.
1✔
492
                timeLeft = -1
1✔
493
                return
1✔
494
        }
1✔
495

496
        now := time.Now().UTC().Unix()
1✔
497
        diff := now - startTime
1✔
498

1✔
499
        if diff >= gracePeriod {
2✔
500
                hasExpired = true
1✔
501
                return
1✔
502
        }
1✔
503

504
        timeLeft = int64(gracePeriod - diff)
×
505
        if timeLeft < 1 {
×
506
                timeLeft = 1
×
507
        }
×
508
        return
×
509
}
510

511
func (c *VirtualMachineController) hasTargetDetectedReadyDomain(vmi *v1.VirtualMachineInstance) (bool, int64) {
1✔
512
        // give the target node 60 seconds to discover the libvirt domain via the domain informer
1✔
513
        // before allowing the VMI to be processed. This closes the gap between the
1✔
514
        // VMI's status getting updated to reflect the new source node, and the domain
1✔
515
        // informer firing the event to alert the source node of the new domain.
1✔
516
        migrationTargetDelayTimeout := 60
1✔
517

1✔
518
        if vmi.Status.MigrationState != nil &&
1✔
519
                vmi.Status.MigrationState.TargetNodeDomainDetected &&
1✔
520
                vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp != nil {
2✔
521

1✔
522
                return true, 0
1✔
523
        }
1✔
524

525
        nowUnix := time.Now().UTC().Unix()
×
526
        migrationEndUnix := vmi.Status.MigrationState.EndTimestamp.Time.UTC().Unix()
×
527

×
528
        diff := nowUnix - migrationEndUnix
×
529

×
530
        if diff > int64(migrationTargetDelayTimeout) {
×
531
                return false, 0
×
532
        }
×
533

534
        timeLeft := int64(migrationTargetDelayTimeout) - diff
×
535

×
536
        enqueueTime := timeLeft
×
537
        if enqueueTime < 5 {
×
538
                enqueueTime = 5
×
539
        }
×
540

541
        // re-enqueue the key to ensure it gets processed again within the right time.
542
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(enqueueTime)*time.Second)
×
543

×
544
        return false, timeLeft
×
545
}
546

547
// teardownNetwork performs network cache cleanup for a specific VMI.
548
func (c *VirtualMachineController) teardownNetwork(vmi *v1.VirtualMachineInstance) {
1✔
549
        if string(vmi.UID) == "" {
2✔
550
                return
1✔
551
        }
1✔
552
        if err := c.netConf.Teardown(vmi); err != nil {
1✔
553
                log.Log.Reason(err).Errorf("failed to delete VMI Network cache files: %s", err.Error())
×
554
        }
×
555
        c.netStat.Teardown(vmi)
1✔
556
}
557

558
func domainPausedFailedPostCopy(domain *api.Domain) bool {
1✔
559
        return domain != nil && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedPostcopyFailed
1✔
560
}
1✔
561

562
func domainMigrated(domain *api.Domain) bool {
1✔
563
        return domain != nil && domain.Status.Status == api.Shutoff && domain.Status.Reason == api.ReasonMigrated
1✔
564
}
1✔
565

566
func canUpdateToMounted(currentPhase v1.VolumePhase) bool {
1✔
567
        return currentPhase == v1.VolumeBound || currentPhase == v1.VolumePending || currentPhase == v1.HotplugVolumeAttachedToNode
1✔
568
}
1✔
569

570
func canUpdateToUnmounted(currentPhase v1.VolumePhase) bool {
1✔
571
        return currentPhase == v1.VolumeReady || currentPhase == v1.HotplugVolumeMounted || currentPhase == v1.HotplugVolumeAttachedToNode
1✔
572
}
1✔
573

574
func (c *VirtualMachineController) setMigrationProgressStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
575
        if domain == nil ||
1✔
576
                domain.Spec.Metadata.KubeVirt.Migration == nil ||
1✔
577
                vmi.Status.MigrationState == nil ||
1✔
578
                !c.isMigrationSource(vmi) {
2✔
579
                return
1✔
580
        }
1✔
581

582
        migrationMetadata := domain.Spec.Metadata.KubeVirt.Migration
1✔
583
        if migrationMetadata.UID != vmi.Status.MigrationState.MigrationUID {
1✔
584
                return
×
585
        }
×
586

587
        if vmi.Status.MigrationState.EndTimestamp == nil && migrationMetadata.EndTimestamp != nil {
2✔
588
                if migrationMetadata.Failed {
1✔
589
                        vmi.Status.MigrationState.FailureReason = migrationMetadata.FailureReason
×
590
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("VirtualMachineInstance migration uid %s failed. reason:%s", string(migrationMetadata.UID), migrationMetadata.FailureReason))
×
591
                }
×
592
        }
593

594
        if vmi.Status.MigrationState.StartTimestamp == nil {
2✔
595
                vmi.Status.MigrationState.StartTimestamp = migrationMetadata.StartTimestamp
1✔
596
        }
1✔
597
        if vmi.Status.MigrationState.EndTimestamp == nil {
2✔
598
                vmi.Status.MigrationState.EndTimestamp = migrationMetadata.EndTimestamp
1✔
599
        }
1✔
600
        vmi.Status.MigrationState.AbortStatus = v1.MigrationAbortStatus(migrationMetadata.AbortStatus)
1✔
601
        vmi.Status.MigrationState.Completed = migrationMetadata.Completed
1✔
602
        vmi.Status.MigrationState.Failed = migrationMetadata.Failed
1✔
603
        vmi.Status.MigrationState.Mode = migrationMetadata.Mode
1✔
604
}
605

606
func (c *VirtualMachineController) migrationSourceUpdateVMIStatus(origVMI *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
607

1✔
608
        vmi := origVMI.DeepCopy()
1✔
609
        oldStatus := vmi.DeepCopy().Status
1✔
610

1✔
611
        // if a migration happens very quickly, it's possible parts of the in
1✔
612
        // progress status wasn't set. We need to make sure we set this even
1✔
613
        // if the migration has completed
1✔
614
        c.setMigrationProgressStatus(vmi, domain)
1✔
615

1✔
616
        // handle migrations differently than normal status updates.
1✔
617
        //
1✔
618
        // When a successful migration is detected, we must transfer ownership of the VMI
1✔
619
        // from the source node (this node) to the target node (node the domain was migrated to).
1✔
620
        //
1✔
621
        // Transfer ownership by...
1✔
622
        // 1. Marking vmi.Status.MigrationState as completed
1✔
623
        // 2. Update the vmi.Status.NodeName to reflect the target node's name
1✔
624
        // 3. Update the VMI's NodeNameLabel annotation to reflect the target node's name
1✔
625
        // 4. Clear the LauncherContainerImageVersion which virt-controller will detect
1✔
626
        //    and accurately based on the version used on the target pod
1✔
627
        //
1✔
628
        // After a migration, the VMI's phase is no longer owned by this node. Only the
1✔
629
        // MigrationState status field is eligible to be mutated.
1✔
630
        migrationHost := ""
1✔
631
        if vmi.Status.MigrationState != nil {
2✔
632
                migrationHost = vmi.Status.MigrationState.TargetNode
1✔
633
        }
1✔
634

635
        if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.EndTimestamp == nil {
1✔
636
                now := metav1.NewTime(time.Now())
×
637
                vmi.Status.MigrationState.EndTimestamp = &now
×
638
        }
×
639

640
        targetNodeDetectedDomain, timeLeft := c.hasTargetDetectedReadyDomain(vmi)
1✔
641
        // If we can't detect where the migration went to, then we have no
1✔
642
        // way of transferring ownership. The only option here is to move the
1✔
643
        // vmi to failed.  The cluster vmi controller will then tear down the
1✔
644
        // resulting pods.
1✔
645
        if migrationHost == "" {
1✔
646
                // migrated to unknown host.
×
647
                vmi.Status.Phase = v1.Failed
×
648
                vmi.Status.MigrationState.Completed = true
×
649
                vmi.Status.MigrationState.Failed = true
×
650

×
651
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance migrated to unknown host."))
×
652
        } else if !targetNodeDetectedDomain {
1✔
653
                if timeLeft <= 0 {
×
654
                        vmi.Status.Phase = v1.Failed
×
655
                        vmi.Status.MigrationState.Completed = true
×
656
                        vmi.Status.MigrationState.Failed = true
×
657

×
658
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance's domain was never observed on the target after the migration completed within the timeout period."))
×
659
                } else {
×
660
                        log.Log.Object(vmi).Info("Waiting on the target node to observe the migrated domain before performing the handoff")
×
661
                }
×
662
        } else if vmi.Status.MigrationState != nil {
2✔
663
                // this is the migration ACK.
1✔
664
                // At this point we know that the migration has completed and that
1✔
665
                // the target node has seen the domain event.
1✔
666
                vmi.Labels[v1.NodeNameLabel] = migrationHost
1✔
667
                delete(vmi.Labels, v1.OutdatedLauncherImageLabel)
1✔
668
                vmi.Status.LauncherContainerImageVersion = ""
1✔
669
                vmi.Status.NodeName = migrationHost
1✔
670
                // clean the evacuation node name since have already migrated to a new node
1✔
671
                vmi.Status.EvacuationNodeName = ""
1✔
672
                vmi.Status.MigrationState.Completed = true
1✔
673
                // update the vmi migrationTransport to indicate that next migration should use unix URI
1✔
674
                // new workloads will set the migrationTransport on their creation, however, legacy workloads
1✔
675
                // can make the switch only after the first migration
1✔
676
                vmi.Status.MigrationTransport = v1.MigrationTransportUnix
1✔
677
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance migrated to node %s.", migrationHost))
1✔
678
                log.Log.Object(vmi).Infof("migration completed to node %s", migrationHost)
1✔
679
        }
1✔
680

681
        if !equality.Semantic.DeepEqual(oldStatus, vmi.Status) {
2✔
682
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
683
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
684
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
1✔
685
                if err != nil {
1✔
686
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
×
687
                        return err
×
688
                }
×
689
        }
690
        return nil
1✔
691
}
692

693
func domainIsActiveOnTarget(domain *api.Domain) bool {
1✔
694

1✔
695
        if domain == nil {
1✔
696
                return false
×
697
        }
×
698

699
        // It's possible for the domain to be active on the target node if the domain is
700
        // 1. Running
701
        // 2. User initiated Paused
702
        if domain.Status.Status == api.Running {
2✔
703
                return true
1✔
704
        } else if domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedUser {
1✔
705
                return true
×
706
        }
×
707
        return false
×
708

709
}
710

711
func (c *VirtualMachineController) migrationTargetUpdateVMIStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
712

1✔
713
        vmiCopy := vmi.DeepCopy()
1✔
714

1✔
715
        if migrations.MigrationFailed(vmi) {
2✔
716
                // nothing left to report on the target node if the migration failed
1✔
717
                return nil
1✔
718
        }
1✔
719

720
        domainExists := domain != nil
1✔
721

1✔
722
        // Handle post migration
1✔
723
        if domainExists && vmi.Status.MigrationState != nil && !vmi.Status.MigrationState.TargetNodeDomainDetected {
2✔
724
                // record that we've see the domain populated on the target's node
1✔
725
                log.Log.Object(vmi).Info("The target node received the migrated domain")
1✔
726
                vmiCopy.Status.MigrationState.TargetNodeDomainDetected = true
1✔
727

1✔
728
                // adjust QEMU process memlock limits in order to enable old virt-launcher pod's to
1✔
729
                // perform hotplug host-devices on post migration.
1✔
730
                if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
1✔
731
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "Failed to update target node qemu memory limits during live migration")
×
732
                }
×
733

734
        }
735

736
        if domainExists &&
1✔
737
                domainIsActiveOnTarget(domain) &&
1✔
738
                vmi.Status.MigrationState != nil &&
1✔
739
                vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp == nil {
2✔
740

1✔
741
                // record the moment we detected the domain is running.
1✔
742
                // This is used as a trigger to help coordinate when CNI drivers
1✔
743
                // fail over the IP to the new pod.
1✔
744
                log.Log.Object(vmi).Info("The target node received the running migrated domain")
1✔
745
                now := metav1.Now()
1✔
746
                vmiCopy.Status.MigrationState.TargetNodeDomainReadyTimestamp = &now
1✔
747
                c.finalizeMigration(vmiCopy)
1✔
748
        }
1✔
749

750
        if !migrations.IsMigrating(vmi) {
2✔
751
                destSrcPortsMap := c.migrationProxy.GetTargetListenerPorts(string(vmi.UID))
1✔
752
                if len(destSrcPortsMap) == 0 {
1✔
753
                        msg := "target migration listener is not up for this vmi"
×
754
                        log.Log.Object(vmi).Error(msg)
×
755
                        return fmt.Errorf(msg)
×
756
                }
×
757

758
                hostAddress := ""
1✔
759
                // advertise the listener address to the source node
1✔
760
                if vmi.Status.MigrationState != nil {
2✔
761
                        hostAddress = vmi.Status.MigrationState.TargetNodeAddress
1✔
762
                }
1✔
763
                if hostAddress != c.migrationIpAddress {
2✔
764
                        portsList := make([]string, 0, len(destSrcPortsMap))
1✔
765

1✔
766
                        for k := range destSrcPortsMap {
2✔
767
                                portsList = append(portsList, k)
1✔
768
                        }
1✔
769
                        portsStrList := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(portsList)), ","), "[]")
1✔
770
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.PreparingTarget.String(), fmt.Sprintf("Migration Target is listening at %s, on ports: %s", c.migrationIpAddress, portsStrList))
1✔
771
                        vmiCopy.Status.MigrationState.TargetNodeAddress = c.migrationIpAddress
1✔
772
                        vmiCopy.Status.MigrationState.TargetDirectMigrationNodePorts = destSrcPortsMap
1✔
773
                }
774

775
                // If the migrated VMI requires dedicated CPUs, report the new pod CPU set to the source node
776
                // via the VMI migration status in order to patch the domain pre migration
777
                if vmi.IsCPUDedicated() {
1✔
778
                        err := c.reportDedicatedCPUSetForMigratingVMI(vmiCopy)
×
779
                        if err != nil {
×
780
                                return err
×
781
                        }
×
782
                        err = c.reportTargetTopologyForMigratingVMI(vmiCopy)
×
783
                        if err != nil {
×
784
                                return err
×
785
                        }
×
786
                }
787
        }
788

789
        // update the VMI if necessary
790
        if !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) {
2✔
791
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
792
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
793
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmiCopy, metav1.UpdateOptions{})
1✔
794
                if err != nil {
1✔
795
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
×
796
                        return err
×
797
                }
×
798
        }
799

800
        return nil
1✔
801
}
802

803
func (c *VirtualMachineController) generateEventsForVolumeStatusChange(vmi *v1.VirtualMachineInstance, newStatusMap map[string]v1.VolumeStatus) {
1✔
804
        newStatusMapCopy := make(map[string]v1.VolumeStatus)
1✔
805
        for k, v := range newStatusMap {
2✔
806
                newStatusMapCopy[k] = v
1✔
807
        }
1✔
808
        for _, oldStatus := range vmi.Status.VolumeStatus {
2✔
809
                newStatus, ok := newStatusMap[oldStatus.Name]
1✔
810
                if !ok {
1✔
811
                        // status got removed
×
812
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, VolumeUnplugged, fmt.Sprintf("Volume %s has been unplugged", oldStatus.Name))
×
813
                        continue
×
814
                }
815
                if newStatus.Phase != oldStatus.Phase {
2✔
816
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, newStatus.Reason, newStatus.Message)
1✔
817
                }
1✔
818
                delete(newStatusMapCopy, newStatus.Name)
1✔
819
        }
820
        // Send events for any new statuses.
821
        for _, v := range newStatusMapCopy {
2✔
822
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v.Reason, v.Message)
1✔
823
        }
1✔
824
}
825

826
func (c *VirtualMachineController) updateHotplugVolumeStatus(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, specVolumeMap map[string]v1.Volume) (v1.VolumeStatus, bool) {
1✔
827
        needsRefresh := false
1✔
828
        if volumeStatus.Target == "" {
2✔
829
                needsRefresh = true
1✔
830
                mounted, err := c.hotplugVolumeMounter.IsMounted(vmi, volumeStatus.Name, volumeStatus.HotplugVolume.AttachPodUID)
1✔
831
                if err != nil {
1✔
832
                        log.Log.Object(vmi).Errorf("error occurred while checking if volume is mounted: %v", err)
×
833
                }
×
834
                if mounted {
2✔
835
                        if _, ok := specVolumeMap[volumeStatus.Name]; ok && canUpdateToMounted(volumeStatus.Phase) {
2✔
836
                                log.DefaultLogger().Infof("Marking volume %s as mounted in pod, it can now be attached", volumeStatus.Name)
1✔
837
                                // mounted, and still in spec, and in phase we can change, update status to mounted.
1✔
838
                                volumeStatus.Phase = v1.HotplugVolumeMounted
1✔
839
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been mounted in virt-launcher pod", volumeStatus.Name)
1✔
840
                                volumeStatus.Reason = VolumeMountedToPodReason
1✔
841
                        }
1✔
842
                } else {
1✔
843
                        // Not mounted, check if the volume is in the spec, if not update status
1✔
844
                        if _, ok := specVolumeMap[volumeStatus.Name]; !ok && canUpdateToUnmounted(volumeStatus.Phase) {
2✔
845
                                log.DefaultLogger().Infof("Marking volume %s as unmounted from pod, it can now be detached", volumeStatus.Name)
1✔
846
                                // Not mounted.
1✔
847
                                volumeStatus.Phase = v1.HotplugVolumeUnMounted
1✔
848
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been unmounted from virt-launcher pod", volumeStatus.Name)
1✔
849
                                volumeStatus.Reason = VolumeUnMountedFromPodReason
1✔
850
                        }
1✔
851
                }
852
        } else {
1✔
853
                // Successfully attached to VM.
1✔
854
                volumeStatus.Phase = v1.VolumeReady
1✔
855
                volumeStatus.Message = fmt.Sprintf("Successfully attach hotplugged volume %s to VM", volumeStatus.Name)
1✔
856
                volumeStatus.Reason = VolumeReadyReason
1✔
857
        }
1✔
858
        return volumeStatus, needsRefresh
1✔
859
}
860

861
func needToComputeChecksums(vmi *v1.VirtualMachineInstance) bool {
1✔
862
        containerDisks := map[string]*v1.Volume{}
1✔
863
        for _, volume := range vmi.Spec.Volumes {
2✔
864
                if volume.VolumeSource.ContainerDisk != nil {
2✔
865
                        containerDisks[volume.Name] = &volume
1✔
866
                }
1✔
867
        }
868

869
        for i := range vmi.Status.VolumeStatus {
2✔
870
                _, isContainerDisk := containerDisks[vmi.Status.VolumeStatus[i].Name]
1✔
871
                if !isContainerDisk {
2✔
872
                        continue
1✔
873
                }
874

875
                if vmi.Status.VolumeStatus[i].ContainerDiskVolume == nil ||
1✔
876
                        vmi.Status.VolumeStatus[i].ContainerDiskVolume.Checksum == 0 {
2✔
877
                        return true
1✔
878
                }
1✔
879
        }
880

881
        if util.HasKernelBootContainerImage(vmi) {
1✔
882
                if vmi.Status.KernelBootStatus == nil {
×
883
                        return true
×
884
                }
×
885

886
                kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
×
887

×
888
                if kernelBootContainer.KernelPath != "" &&
×
889
                        (vmi.Status.KernelBootStatus.KernelInfo == nil ||
×
890
                                vmi.Status.KernelBootStatus.KernelInfo.Checksum == 0) {
×
891
                        return true
×
892

×
893
                }
×
894

895
                if kernelBootContainer.InitrdPath != "" &&
×
896
                        (vmi.Status.KernelBootStatus.InitrdInfo == nil ||
×
897
                                vmi.Status.KernelBootStatus.InitrdInfo.Checksum == 0) {
×
898
                        return true
×
899

×
900
                }
×
901
        }
902

903
        return false
1✔
904
}
905

906
func (c *VirtualMachineController) updateChecksumInfo(vmi *v1.VirtualMachineInstance, syncError error) error {
1✔
907

1✔
908
        if syncError != nil || vmi.DeletionTimestamp != nil || !needToComputeChecksums(vmi) {
2✔
909
                return nil
1✔
910
        }
1✔
911

912
        diskChecksums, err := c.containerDiskMounter.ComputeChecksums(vmi)
1✔
913
        if goerror.Is(err, container_disk.ErrDiskContainerGone) {
1✔
914
                log.Log.Errorf("cannot compute checksums as containerdisk/kernelboot containers seem to have been terminated")
×
915
                return nil
×
916
        }
×
917
        if err != nil {
1✔
918
                return err
×
919
        }
×
920

921
        // containerdisks
922
        for i := range vmi.Status.VolumeStatus {
2✔
923
                checksum, exists := diskChecksums.ContainerDiskChecksums[vmi.Status.VolumeStatus[i].Name]
1✔
924
                if !exists {
1✔
925
                        // not a containerdisk
×
926
                        continue
×
927
                }
928

929
                vmi.Status.VolumeStatus[i].ContainerDiskVolume = &v1.ContainerDiskInfo{
1✔
930
                        Checksum: checksum,
1✔
931
                }
1✔
932
        }
933

934
        // kernelboot
935
        if util.HasKernelBootContainerImage(vmi) {
2✔
936
                vmi.Status.KernelBootStatus = &v1.KernelBootStatus{}
1✔
937

1✔
938
                if diskChecksums.KernelBootChecksum.Kernel != nil {
2✔
939
                        vmi.Status.KernelBootStatus.KernelInfo = &v1.KernelInfo{
1✔
940
                                Checksum: *diskChecksums.KernelBootChecksum.Kernel,
1✔
941
                        }
1✔
942
                }
1✔
943

944
                if diskChecksums.KernelBootChecksum.Initrd != nil {
2✔
945
                        vmi.Status.KernelBootStatus.InitrdInfo = &v1.InitrdInfo{
1✔
946
                                Checksum: *diskChecksums.KernelBootChecksum.Initrd,
1✔
947
                        }
1✔
948
                }
1✔
949
        }
950

951
        return nil
1✔
952
}
953

954
func (c *VirtualMachineController) updateVolumeStatusesFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
1✔
955
        // used by unit test
1✔
956
        hasHotplug := false
1✔
957

1✔
958
        if domain == nil {
2✔
959
                return hasHotplug
1✔
960
        }
1✔
961

962
        if len(vmi.Status.VolumeStatus) > 0 {
2✔
963
                diskDeviceMap := make(map[string]string)
1✔
964
                for _, disk := range domain.Spec.Devices.Disks {
2✔
965
                        diskDeviceMap[disk.Alias.GetName()] = disk.Target.Device
1✔
966
                }
1✔
967
                specVolumeMap := make(map[string]v1.Volume)
1✔
968
                for _, volume := range vmi.Spec.Volumes {
2✔
969
                        specVolumeMap[volume.Name] = volume
1✔
970
                }
1✔
971
                newStatusMap := make(map[string]v1.VolumeStatus)
1✔
972
                newStatuses := make([]v1.VolumeStatus, 0)
1✔
973
                needsRefresh := false
1✔
974
                for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
975
                        tmpNeedsRefresh := false
1✔
976
                        if _, ok := diskDeviceMap[volumeStatus.Name]; ok {
2✔
977
                                volumeStatus.Target = diskDeviceMap[volumeStatus.Name]
1✔
978
                        }
1✔
979
                        if volumeStatus.HotplugVolume != nil {
2✔
980
                                hasHotplug = true
1✔
981
                                volumeStatus, tmpNeedsRefresh = c.updateHotplugVolumeStatus(vmi, volumeStatus, specVolumeMap)
1✔
982
                                needsRefresh = needsRefresh || tmpNeedsRefresh
1✔
983
                        }
1✔
984
                        if volumeStatus.MemoryDumpVolume != nil {
2✔
985
                                volumeStatus, tmpNeedsRefresh = c.updateMemoryDumpInfo(vmi, volumeStatus, domain)
1✔
986
                                needsRefresh = needsRefresh || tmpNeedsRefresh
1✔
987
                        }
1✔
988
                        newStatuses = append(newStatuses, volumeStatus)
1✔
989
                        newStatusMap[volumeStatus.Name] = volumeStatus
1✔
990
                }
991
                sort.SliceStable(newStatuses, func(i, j int) bool {
2✔
992
                        return strings.Compare(newStatuses[i].Name, newStatuses[j].Name) == -1
1✔
993
                })
1✔
994
                if needsRefresh {
2✔
995
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
1✔
996
                }
1✔
997
                c.generateEventsForVolumeStatusChange(vmi, newStatusMap)
1✔
998
                vmi.Status.VolumeStatus = newStatuses
1✔
999
        }
1000
        return hasHotplug
1✔
1001
}
1002

1003
func (c *VirtualMachineController) updateGuestInfoFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
1004

1✔
1005
        if domain == nil {
2✔
1006
                return
1✔
1007
        }
1✔
1008

1009
        if vmi.Status.GuestOSInfo.Name != domain.Status.OSInfo.Name {
2✔
1010
                vmi.Status.GuestOSInfo.Name = domain.Status.OSInfo.Name
1✔
1011
                vmi.Status.GuestOSInfo.Version = domain.Status.OSInfo.Version
1✔
1012
                vmi.Status.GuestOSInfo.KernelRelease = domain.Status.OSInfo.KernelRelease
1✔
1013
                vmi.Status.GuestOSInfo.PrettyName = domain.Status.OSInfo.PrettyName
1✔
1014
                vmi.Status.GuestOSInfo.VersionID = domain.Status.OSInfo.VersionId
1✔
1015
                vmi.Status.GuestOSInfo.KernelVersion = domain.Status.OSInfo.KernelVersion
1✔
1016
                vmi.Status.GuestOSInfo.Machine = domain.Status.OSInfo.Machine
1✔
1017
                vmi.Status.GuestOSInfo.ID = domain.Status.OSInfo.Id
1✔
1018
        }
1✔
1019
}
1020

1021
func (c *VirtualMachineController) updateAccessCredentialConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1022

1✔
1023
        if domain == nil || domain.Spec.Metadata.KubeVirt.AccessCredential == nil {
2✔
1024
                return
1✔
1025
        }
1✔
1026

1027
        message := domain.Spec.Metadata.KubeVirt.AccessCredential.Message
1✔
1028
        status := k8sv1.ConditionFalse
1✔
1029
        if domain.Spec.Metadata.KubeVirt.AccessCredential.Succeeded {
2✔
1030
                status = k8sv1.ConditionTrue
1✔
1031
        }
1✔
1032

1033
        add := false
1✔
1034
        condition := condManager.GetCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
1035
        if condition == nil {
2✔
1036
                add = true
1✔
1037
        } else if condition.Status != status || condition.Message != message {
3✔
1038
                // if not as expected, remove, then add.
1✔
1039
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
1040
                add = true
1✔
1041
        }
1✔
1042
        if add {
2✔
1043
                newCondition := v1.VirtualMachineInstanceCondition{
1✔
1044
                        Type:               v1.VirtualMachineInstanceAccessCredentialsSynchronized,
1✔
1045
                        LastTransitionTime: metav1.Now(),
1✔
1046
                        Status:             status,
1✔
1047
                        Message:            message,
1✔
1048
                }
1✔
1049
                vmi.Status.Conditions = append(vmi.Status.Conditions, newCondition)
1✔
1050
                if status == k8sv1.ConditionTrue {
2✔
1051
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.AccessCredentialsSyncSuccess.String(), message)
1✔
1052
                } else {
2✔
1053
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.AccessCredentialsSyncFailed.String(), message)
1✔
1054
                }
1✔
1055
        }
1056
}
1057

1058
func (c *VirtualMachineController) updateLiveMigrationConditions(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1059
        // Calculate whether the VM is migratable
1✔
1060
        liveMigrationCondition, isBlockMigration := c.calculateLiveMigrationCondition(vmi)
1✔
1061
        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceIsMigratable) {
2✔
1062
                vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
1✔
1063
                // Set VMI Migration Method
1✔
1064
                if isBlockMigration {
2✔
1065
                        vmi.Status.MigrationMethod = v1.BlockMigration
1✔
1066
                } else {
2✔
1067
                        vmi.Status.MigrationMethod = v1.LiveMigration
1✔
1068
                }
1✔
1069
        } else {
1✔
1070
                cond := condManager.GetCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
1✔
1071
                if !equality.Semantic.DeepEqual(cond, liveMigrationCondition) {
1✔
1072
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
×
1073
                        vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
×
1074
                }
×
1075
        }
1076
        storageLiveMigCond := c.calculateLiveStorageMigrationCondition(vmi)
1✔
1077
        condManager.UpdateCondition(vmi, storageLiveMigCond)
1✔
1078
        evictable := migrations.VMIMigratableOnEviction(c.clusterConfig, vmi)
1✔
1079
        if evictable && liveMigrationCondition.Status == k8sv1.ConditionFalse {
2✔
1080
                c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), "EvictionStrategy is set but vmi is not migratable; %s", liveMigrationCondition.Message)
1✔
1081
        }
1✔
1082
}
1083

1084
func (c *VirtualMachineController) updateGuestAgentConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
1✔
1085

1✔
1086
        // Update the condition when GA is connected
1✔
1087
        channelConnected := false
1✔
1088
        if domain != nil {
2✔
1089
                for _, channel := range domain.Spec.Devices.Channels {
2✔
1090
                        if channel.Target != nil {
2✔
1091
                                log.Log.V(4).Infof("Channel: %s, %s", channel.Target.Name, channel.Target.State)
1✔
1092
                                if channel.Target.Name == "org.qemu.guest_agent.0" {
2✔
1093
                                        if channel.Target.State == "connected" {
2✔
1094
                                                channelConnected = true
1✔
1095
                                        }
1✔
1096
                                }
1097

1098
                        }
1099
                }
1100
        }
1101

1102
        switch {
1✔
1103
        case channelConnected && !condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected):
1✔
1104
                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
1105
                        Type:          v1.VirtualMachineInstanceAgentConnected,
1✔
1106
                        LastProbeTime: metav1.Now(),
1✔
1107
                        Status:        k8sv1.ConditionTrue,
1✔
1108
                }
1✔
1109
                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
1110
        case !channelConnected:
1✔
1111
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAgentConnected)
1✔
1112
        }
1113

1114
        if condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected) {
2✔
1115
                client, err := c.getLauncherClient(vmi)
1✔
1116
                if err != nil {
1✔
1117
                        return err
×
1118
                }
×
1119

1120
                guestInfo, err := client.GetGuestInfo()
1✔
1121
                if err != nil {
1✔
1122
                        return err
×
1123
                }
×
1124

1125
                var supported = false
1✔
1126
                var reason = ""
1✔
1127

1✔
1128
                // For current versions, virt-launcher's supported commands will always contain data.
1✔
1129
                // For backwards compatibility: during upgrade from a previous version of KubeVirt,
1✔
1130
                // virt-launcher might not provide any supported commands. If the list of supported
1✔
1131
                // commands is empty, fall back to previous behavior.
1✔
1132
                if len(guestInfo.SupportedCommands) > 0 {
1✔
1133
                        supported, reason = isGuestAgentSupported(vmi, guestInfo.SupportedCommands)
×
1134
                        log.Log.V(3).Object(vmi).Info(reason)
×
1135
                } else {
1✔
1136
                        for _, version := range c.clusterConfig.GetSupportedAgentVersions() {
2✔
1137
                                supported = supported || regexp.MustCompile(version).MatchString(guestInfo.GAVersion)
1✔
1138
                        }
1✔
1139
                        if !supported {
2✔
1140
                                reason = fmt.Sprintf("Guest agent version '%s' is not supported", guestInfo.GAVersion)
1✔
1141
                        }
1✔
1142
                }
1143

1144
                if !supported {
2✔
1145
                        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent) {
2✔
1146
                                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
1147
                                        Type:          v1.VirtualMachineInstanceUnsupportedAgent,
1✔
1148
                                        LastProbeTime: metav1.Now(),
1✔
1149
                                        Status:        k8sv1.ConditionTrue,
1✔
1150
                                        Reason:        reason,
1✔
1151
                                }
1✔
1152
                                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
1153
                        }
1✔
1154
                } else {
×
1155
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent)
×
1156
                }
×
1157

1158
        }
1159
        return nil
1✔
1160
}
1161

1162
func (c *VirtualMachineController) updatePausedConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1163

1✔
1164
        // Update paused condition in case VMI was paused / unpaused
1✔
1165
        if domain != nil && domain.Status.Status == api.Paused {
2✔
1166
                if !condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
2✔
1167
                        reason := domain.Status.Reason
1✔
1168
                        if c.isVMIPausedDuringMigration(vmi) {
1✔
1169
                                reason = api.ReasonPausedMigration
×
1170
                        }
×
1171
                        calculatePausedCondition(vmi, reason)
1✔
1172
                }
1173
        } else if condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
1✔
1174
                log.Log.Object(vmi).V(3).Info("Removing paused condition")
×
1175
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstancePaused)
×
1176
        }
×
1177
}
1178

1179
func dumpTargetFile(vmiName, volName string) string {
1✔
1180
        targetFileName := fmt.Sprintf("%s-%s-%s.memory.dump", vmiName, volName, time.Now().Format("20060102-150405"))
1✔
1181
        return targetFileName
1✔
1182
}
1✔
1183

1184
func (c *VirtualMachineController) updateMemoryDumpInfo(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, domain *api.Domain) (v1.VolumeStatus, bool) {
1✔
1185
        needsRefresh := false
1✔
1186
        switch volumeStatus.Phase {
1✔
1187
        case v1.HotplugVolumeMounted:
1✔
1188
                needsRefresh = true
1✔
1189
                log.Log.Object(vmi).V(3).Infof("Memory dump volume %s attached, marking it in progress", volumeStatus.Name)
1✔
1190
                volumeStatus.Phase = v1.MemoryDumpVolumeInProgress
1✔
1191
                volumeStatus.Message = fmt.Sprintf("Memory dump Volume %s is attached, getting memory dump", volumeStatus.Name)
1✔
1192
                volumeStatus.Reason = VolumeMountedToPodReason
1✔
1193
                volumeStatus.MemoryDumpVolume.TargetFileName = dumpTargetFile(vmi.Name, volumeStatus.Name)
1✔
1194
        case v1.MemoryDumpVolumeInProgress:
1✔
1195
                memoryDumpMetadata := domain.Spec.Metadata.KubeVirt.MemoryDump
1✔
1196
                if memoryDumpMetadata == nil || memoryDumpMetadata.FileName != volumeStatus.MemoryDumpVolume.TargetFileName {
2✔
1197
                        // memory dump wasnt triggered yet
1✔
1198
                        return volumeStatus, needsRefresh
1✔
1199
                }
1✔
1200
                needsRefresh = true
1✔
1201
                if memoryDumpMetadata.StartTimestamp != nil {
2✔
1202
                        volumeStatus.MemoryDumpVolume.StartTimestamp = memoryDumpMetadata.StartTimestamp
1✔
1203
                }
1✔
1204
                if memoryDumpMetadata.EndTimestamp != nil && memoryDumpMetadata.Failed {
2✔
1205
                        log.Log.Object(vmi).Errorf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
1206
                        volumeStatus.Message = fmt.Sprintf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
1207
                        volumeStatus.Phase = v1.MemoryDumpVolumeFailed
1✔
1208
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
1209
                } else if memoryDumpMetadata.Completed {
3✔
1210
                        log.Log.Object(vmi).V(3).Infof("Marking memory dump to volume %s has completed", volumeStatus.Name)
1✔
1211
                        volumeStatus.Phase = v1.MemoryDumpVolumeCompleted
1✔
1212
                        volumeStatus.Message = fmt.Sprintf("Memory dump to Volume %s has completed successfully", volumeStatus.Name)
1✔
1213
                        volumeStatus.Reason = VolumeReadyReason
1✔
1214
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
1215
                }
1✔
1216
        }
1217

1218
        return volumeStatus, needsRefresh
1✔
1219
}
1220

1221
func (c *VirtualMachineController) updateFSFreezeStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
1222

1✔
1223
        if domain == nil || domain.Status.FSFreezeStatus.Status == "" {
2✔
1224
                return
1✔
1225
        }
1✔
1226

1227
        if domain.Status.FSFreezeStatus.Status == api.FSThawed {
2✔
1228
                vmi.Status.FSFreezeStatus = ""
1✔
1229
        } else {
2✔
1230
                vmi.Status.FSFreezeStatus = domain.Status.FSFreezeStatus.Status
1✔
1231
        }
1✔
1232

1233
}
1234

1235
func IsoGuestVolumePath(namespace, name string, volume *v1.Volume) string {
×
1236
        const basepath = "/var/run"
×
1237
        switch {
×
1238
        case volume.CloudInitNoCloud != nil:
×
1239
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "noCloud.iso")
×
1240
        case volume.CloudInitConfigDrive != nil:
×
1241
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "configdrive.iso")
×
1242
        case volume.ConfigMap != nil:
×
1243
                return config.GetConfigMapDiskPath(volume.Name)
×
1244
        case volume.DownwardAPI != nil:
×
1245
                return config.GetDownwardAPIDiskPath(volume.Name)
×
1246
        case volume.Secret != nil:
×
1247
                return config.GetSecretDiskPath(volume.Name)
×
1248
        case volume.ServiceAccount != nil:
×
1249
                return config.GetServiceAccountDiskPath()
×
1250
        case volume.Sysprep != nil:
×
1251
                return config.GetSysprepDiskPath(volume.Name)
×
1252
        default:
×
1253
                return ""
×
1254
        }
1255
}
1256

1257
func (c *VirtualMachineController) updateIsoSizeStatus(vmi *v1.VirtualMachineInstance) {
1✔
1258
        var podUID string
1✔
1259
        if vmi.Status.Phase != v1.Running {
2✔
1260
                return
1✔
1261
        }
1✔
1262

1263
        for k, v := range vmi.Status.ActivePods {
2✔
1264
                if v == vmi.Status.NodeName {
2✔
1265
                        podUID = string(k)
1✔
1266
                        break
1✔
1267
                }
1268
        }
1269
        if podUID == "" {
2✔
1270
                log.DefaultLogger().Warningf("failed to find pod UID for VMI %s", vmi.Name)
1✔
1271
                return
1✔
1272
        }
1✔
1273

1274
        volumes := make(map[string]v1.Volume)
1✔
1275
        for _, volume := range vmi.Spec.Volumes {
1✔
1276
                volumes[volume.Name] = volume
×
1277
        }
×
1278

1279
        for _, disk := range vmi.Spec.Domain.Devices.Disks {
1✔
1280
                volume, ok := volumes[disk.Name]
×
1281
                if !ok {
×
1282
                        log.DefaultLogger().Warningf("No matching volume with name %s found", disk.Name)
×
1283
                        continue
×
1284
                }
1285

1286
                volPath := IsoGuestVolumePath(vmi.Namespace, vmi.Name, &volume)
×
1287
                if volPath == "" {
×
1288
                        continue
×
1289
                }
1290

1291
                res, err := c.podIsolationDetector.Detect(vmi)
×
1292
                if err != nil {
×
1293
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
1294
                        continue
×
1295
                }
1296

1297
                rootPath, err := res.MountRoot()
×
1298
                if err != nil {
×
1299
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
1300
                        continue
×
1301
                }
1302

1303
                safeVolPath, err := rootPath.AppendAndResolveWithRelativeRoot(volPath)
×
1304
                if err != nil {
×
1305
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
1306
                        continue
×
1307
                }
1308
                fileInfo, err := safepath.StatAtNoFollow(safeVolPath)
×
1309
                if err != nil {
×
1310
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
1311
                        continue
×
1312
                }
1313

1314
                for i := range vmi.Status.VolumeStatus {
×
1315
                        if vmi.Status.VolumeStatus[i].Name == volume.Name {
×
1316
                                vmi.Status.VolumeStatus[i].Size = fileInfo.Size()
×
1317
                                continue
×
1318
                        }
1319
                }
1320
        }
1321
}
1322

1323
func (c *VirtualMachineController) updateSELinuxContext(vmi *v1.VirtualMachineInstance) error {
1✔
1324
        _, present, err := selinux.NewSELinux()
1✔
1325
        if err != nil {
2✔
1326
                return err
1✔
1327
        }
1✔
1328
        if present {
×
1329
                context, err := selinux.GetVirtLauncherContext(vmi)
×
1330
                if err != nil {
×
1331
                        return err
×
1332
                }
×
1333
                vmi.Status.SelinuxContext = context
×
1334
        } else {
×
1335
                vmi.Status.SelinuxContext = "none"
×
1336
        }
×
1337

1338
        return nil
×
1339
}
1340

1341
func (c *VirtualMachineController) updateVMIStatusFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
1342
        c.updateIsoSizeStatus(vmi)
1✔
1343
        err := c.updateSELinuxContext(vmi)
1✔
1344
        if err != nil {
2✔
1345
                log.Log.Reason(err).Errorf("couldn't find the SELinux context for %s", vmi.Name)
1✔
1346
        }
1✔
1347
        c.setMigrationProgressStatus(vmi, domain)
1✔
1348
        c.updateGuestInfoFromDomain(vmi, domain)
1✔
1349
        c.updateVolumeStatusesFromDomain(vmi, domain)
1✔
1350
        c.updateFSFreezeStatus(vmi, domain)
1✔
1351
        c.updateMachineType(vmi, domain)
1✔
1352
        if err = c.updateMemoryInfo(vmi, domain); err != nil {
1✔
1353
                return err
×
1354
        }
×
1355
        err = c.netStat.UpdateStatus(vmi, domain)
1✔
1356
        return err
1✔
1357
}
1358

1359
func (c *VirtualMachineController) updateVMIConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
1✔
1360
        c.updateAccessCredentialConditions(vmi, domain, condManager)
1✔
1361
        c.updateLiveMigrationConditions(vmi, condManager)
1✔
1362
        err := c.updateGuestAgentConditions(vmi, domain, condManager)
1✔
1363
        if err != nil {
1✔
1364
                return err
×
1365
        }
×
1366
        c.updatePausedConditions(vmi, domain, condManager)
1✔
1367

1✔
1368
        return nil
1✔
1369
}
1370

1371
func (c *VirtualMachineController) updateVMIStatus(origVMI *v1.VirtualMachineInstance, domain *api.Domain, syncError error) (err error) {
1✔
1372
        condManager := controller.NewVirtualMachineInstanceConditionManager()
1✔
1373

1✔
1374
        // Don't update the VirtualMachineInstance if it is already in a final state
1✔
1375
        if origVMI.IsFinal() {
2✔
1376
                return nil
1✔
1377
        } else if origVMI.Status.NodeName != "" && origVMI.Status.NodeName != c.host {
3✔
1378
                // Only update the VMI's phase if this node owns the VMI.
1✔
1379
                // not owned by this host, likely the result of a migration
1✔
1380
                return nil
1✔
1381
        } else if domainMigrated(domain) {
3✔
1382
                return c.migrationSourceUpdateVMIStatus(origVMI, domain)
1✔
1383
        }
1✔
1384

1385
        vmi := origVMI.DeepCopy()
1✔
1386
        oldStatus := *vmi.Status.DeepCopy()
1✔
1387

1✔
1388
        // Update VMI status fields based on what is reported on the domain
1✔
1389
        err = c.updateVMIStatusFromDomain(vmi, domain)
1✔
1390
        if err != nil {
1✔
1391
                return err
×
1392
        }
×
1393

1394
        // Calculate the new VirtualMachineInstance state based on what libvirt reported
1395
        err = c.setVmPhaseForStatusReason(domain, vmi)
1✔
1396
        if err != nil {
1✔
1397
                return err
×
1398
        }
×
1399

1400
        // Update conditions on VMI Status
1401
        err = c.updateVMIConditions(vmi, domain, condManager)
1✔
1402
        if err != nil {
1✔
1403
                return err
×
1404
        }
×
1405

1406
        // Store containerdisks and kernelboot checksums
1407
        if err := c.updateChecksumInfo(vmi, syncError); err != nil {
1✔
1408
                return err
×
1409
        }
×
1410

1411
        // Handle sync error
1412
        handleSyncError(vmi, condManager, syncError)
1✔
1413

1✔
1414
        controller.SetVMIPhaseTransitionTimestamp(origVMI, vmi)
1✔
1415

1✔
1416
        // Only issue vmi update if status has changed
1✔
1417
        if !equality.Semantic.DeepEqual(oldStatus, vmi.Status) {
2✔
1418
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
1419
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
1420
                _, err = c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
1✔
1421
                if err != nil {
2✔
1422
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
1423
                        return err
1✔
1424
                }
1✔
1425
        }
1426

1427
        // Record an event on the VMI when the VMI's phase changes
1428
        if oldStatus.Phase != vmi.Status.Phase {
2✔
1429
                c.recordPhaseChangeEvent(vmi)
1✔
1430
        }
1✔
1431

1432
        return nil
1✔
1433
}
1434

1435
func handleSyncError(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager, syncError error) {
1✔
1436
        var criticalNetErr *neterrors.CriticalNetworkError
1✔
1437
        if goerror.As(syncError, &criticalNetErr) {
2✔
1438
                log.Log.Errorf("virt-launcher crashed due to a network error. Updating VMI %s status to Failed", vmi.Name)
1✔
1439
                vmi.Status.Phase = v1.Failed
1✔
1440
        }
1✔
1441
        if _, ok := syncError.(*virtLauncherCriticalSecurebootError); ok {
1✔
1442
                log.Log.Errorf("virt-launcher does not support the Secure Boot setting. Updating VMI %s status to Failed", vmi.Name)
×
1443
                vmi.Status.Phase = v1.Failed
×
1444
        }
×
1445

1446
        if _, ok := syncError.(*vmiIrrecoverableError); ok {
1✔
1447
                log.Log.Errorf("virt-launcher reached an irrecoverable error. Updating VMI %s status to Failed", vmi.Name)
×
1448
                vmi.Status.Phase = v1.Failed
×
1449
        }
×
1450
        condManager.CheckFailure(vmi, syncError, "Synchronizing with the Domain failed.")
1✔
1451
}
1452

1453
func (c *VirtualMachineController) recordPhaseChangeEvent(vmi *v1.VirtualMachineInstance) {
1✔
1454
        switch vmi.Status.Phase {
1✔
1455
        case v1.Running:
1✔
1456
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Started.String(), VMIStarted)
1✔
1457
        case v1.Succeeded:
×
1458
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Stopped.String(), VMIShutdown)
×
1459
        case v1.Failed:
1✔
1460
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Stopped.String(), VMICrashed)
1✔
1461
        }
1462
}
1463

1464
func calculatePausedCondition(vmi *v1.VirtualMachineInstance, reason api.StateChangeReason) {
1✔
1465
        now := metav1.NewTime(time.Now())
1✔
1466
        switch reason {
1✔
1467
        case api.ReasonPausedMigration:
×
1468
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
×
1469
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1470
                        Type:               v1.VirtualMachineInstancePaused,
×
1471
                        Status:             k8sv1.ConditionTrue,
×
1472
                        LastProbeTime:      now,
×
1473
                        LastTransitionTime: now,
×
1474
                        Reason:             "PausedByMigrationMonitor",
×
1475
                        Message:            "VMI was paused by the migration monitor",
×
1476
                })
×
1477
        case api.ReasonPausedUser:
1✔
1478
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
1✔
1479
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
1✔
1480
                        Type:               v1.VirtualMachineInstancePaused,
1✔
1481
                        Status:             k8sv1.ConditionTrue,
1✔
1482
                        LastProbeTime:      now,
1✔
1483
                        LastTransitionTime: now,
1✔
1484
                        Reason:             "PausedByUser",
1✔
1485
                        Message:            "VMI was paused by user",
1✔
1486
                })
1✔
1487
        case api.ReasonPausedIOError:
×
1488
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
×
1489
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1490
                        Type:               v1.VirtualMachineInstancePaused,
×
1491
                        Status:             k8sv1.ConditionTrue,
×
1492
                        LastProbeTime:      now,
×
1493
                        LastTransitionTime: now,
×
1494
                        Reason:             "PausedIOError",
×
1495
                        Message:            "VMI was paused, low-level IO error detected",
×
1496
                })
×
1497
        default:
×
1498
                log.Log.Object(vmi).V(3).Infof("Domain is paused for unknown reason, %s", reason)
×
1499
        }
1500
}
1501

1502
func newNonMigratableCondition(msg string, reason string) *v1.VirtualMachineInstanceCondition {
1✔
1503
        return &v1.VirtualMachineInstanceCondition{
1✔
1504
                Type:    v1.VirtualMachineInstanceIsMigratable,
1✔
1505
                Status:  k8sv1.ConditionFalse,
1✔
1506
                Message: msg,
1✔
1507
                Reason:  reason,
1✔
1508
        }
1✔
1509
}
1✔
1510

1511
func (c *VirtualMachineController) calculateLiveMigrationCondition(vmi *v1.VirtualMachineInstance) (*v1.VirtualMachineInstanceCondition, bool) {
1✔
1512
        isBlockMigration, err := c.checkVolumesForMigration(vmi)
1✔
1513
        if err != nil {
2✔
1514
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonDisksNotMigratable), isBlockMigration
1✔
1515
        }
1✔
1516

1517
        err = c.checkNetworkInterfacesForMigration(vmi)
1✔
1518
        if err != nil {
2✔
1519
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonInterfaceNotMigratable), isBlockMigration
1✔
1520
        }
1✔
1521

1522
        if err := c.isHostModelMigratable(vmi); err != nil {
1✔
1523
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonCPUModeNotMigratable), isBlockMigration
×
1524
        }
×
1525

1526
        if vmiContainsPCIHostDevice(vmi) {
2✔
1527
                return newNonMigratableCondition("VMI uses a PCI host devices", v1.VirtualMachineInstanceReasonHostDeviceNotMigratable), isBlockMigration
1✔
1528
        }
1✔
1529

1530
        if util.IsSEVVMI(vmi) {
2✔
1531
                return newNonMigratableCondition("VMI uses SEV", v1.VirtualMachineInstanceReasonSEVNotMigratable), isBlockMigration
1✔
1532
        }
1✔
1533

1534
        if reservation.HasVMIPersistentReservation(vmi) {
2✔
1535
                return newNonMigratableCondition("VMI uses SCSI persitent reservation", v1.VirtualMachineInstanceReasonPRNotMigratable), isBlockMigration
1✔
1536
        }
1✔
1537

1538
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
2✔
1539
                return newNonMigratableCondition(tscRequirement.Reason, v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable), isBlockMigration
1✔
1540
        }
1✔
1541

1542
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
2✔
1543
                return newNonMigratableCondition("VMI uses hyperv passthrough", v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable), isBlockMigration
1✔
1544
        }
1✔
1545

1546
        return &v1.VirtualMachineInstanceCondition{
1✔
1547
                Type:   v1.VirtualMachineInstanceIsMigratable,
1✔
1548
                Status: k8sv1.ConditionTrue,
1✔
1549
        }, isBlockMigration
1✔
1550
}
1551

1552
func vmiContainsPCIHostDevice(vmi *v1.VirtualMachineInstance) bool {
1✔
1553
        return len(vmi.Spec.Domain.Devices.HostDevices) > 0 || len(vmi.Spec.Domain.Devices.GPUs) > 0
1✔
1554
}
1✔
1555

1556
type multipleNonMigratableCondition struct {
1557
        reasons []string
1558
        msgs    []string
1559
}
1560

1561
func newMultipleNonMigratableCondition() *multipleNonMigratableCondition {
1✔
1562
        return &multipleNonMigratableCondition{}
1✔
1563
}
1✔
1564

1565
func (cond *multipleNonMigratableCondition) addNonMigratableCondition(reason, msg string) {
1✔
1566
        cond.reasons = append(cond.reasons, reason)
1✔
1567
        cond.msgs = append(cond.msgs, msg)
1✔
1568
}
1✔
1569

1570
func (cond *multipleNonMigratableCondition) String() string {
1✔
1571
        var buffer bytes.Buffer
1✔
1572
        for i, c := range cond.reasons {
2✔
1573
                if i > 0 {
1✔
1574
                        buffer.WriteString(", ")
×
1575
                }
×
1576
                buffer.WriteString(fmt.Sprintf("%s: %s", c, cond.msgs[i]))
1✔
1577
        }
1578
        return buffer.String()
1✔
1579
}
1580

1581
func (cond *multipleNonMigratableCondition) generateStorageLiveMigrationCondition() *v1.VirtualMachineInstanceCondition {
1✔
1582
        switch len(cond.reasons) {
1✔
1583
        case 0:
1✔
1584
                return &v1.VirtualMachineInstanceCondition{
1✔
1585
                        Type:   v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1586
                        Status: k8sv1.ConditionTrue,
1✔
1587
                }
1✔
1588
        default:
1✔
1589
                return &v1.VirtualMachineInstanceCondition{
1✔
1590
                        Type:    v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1591
                        Status:  k8sv1.ConditionFalse,
1✔
1592
                        Message: cond.String(),
1✔
1593
                        Reason:  v1.VirtualMachineInstanceReasonNotMigratable,
1✔
1594
                }
1✔
1595
        }
1596
}
1597

1598
func (c *VirtualMachineController) calculateLiveStorageMigrationCondition(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstanceCondition {
1✔
1599
        multiCond := newMultipleNonMigratableCondition()
1✔
1600

1✔
1601
        if err := c.checkNetworkInterfacesForMigration(vmi); err != nil {
2✔
1602
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonInterfaceNotMigratable, err.Error())
1✔
1603
        }
1✔
1604

1605
        if err := c.isHostModelMigratable(vmi); err != nil {
1✔
1606
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonCPUModeNotMigratable, err.Error())
×
1607
        }
×
1608

1609
        if vmiContainsPCIHostDevice(vmi) {
1✔
1610
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHostDeviceNotMigratable, "VMI uses a PCI host devices")
×
1611
        }
×
1612

1613
        if util.IsSEVVMI(vmi) {
1✔
1614
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonSEVNotMigratable, "VMI uses SEV")
×
1615
        }
×
1616

1617
        if reservation.HasVMIPersistentReservation(vmi) {
1✔
1618
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonPRNotMigratable, "VMI uses SCSI persitent reservation")
×
1619
        }
×
1620

1621
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
1✔
1622
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable, tscRequirement.Reason)
×
1623
        }
×
1624

1625
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
1✔
1626
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable, "VMI uses hyperv passthrough")
×
1627
        }
×
1628

1629
        return multiCond.generateStorageLiveMigrationCondition()
1✔
1630
}
1631

1632
func (c *VirtualMachineController) Run(threadiness int, stopCh chan struct{}) {
×
1633
        defer c.queue.ShutDown()
×
1634
        log.Log.Info("Starting virt-handler controller.")
×
1635

×
1636
        go c.deviceManagerController.Run(stopCh)
×
1637

×
1638
        go c.downwardMetricsManager.Run(stopCh)
×
1639

×
1640
        cache.WaitForCacheSync(stopCh, c.hasSynced)
×
1641

×
1642
        // queue keys for previous Domains on the host that no longer exist
×
1643
        // in the cache. This ensures we perform local cleanup of deleted VMs.
×
1644
        for _, domain := range c.domainStore.List() {
×
1645
                d := domain.(*api.Domain)
×
1646
                vmiRef := v1.NewVMIReferenceWithUUID(
×
1647
                        d.ObjectMeta.Namespace,
×
1648
                        d.ObjectMeta.Name,
×
1649
                        d.Spec.Metadata.KubeVirt.UID)
×
1650

×
1651
                key := controller.VirtualMachineInstanceKey(vmiRef)
×
1652

×
1653
                _, exists, _ := c.vmiSourceStore.GetByKey(key)
×
1654
                if !exists {
×
1655
                        c.queue.Add(key)
×
1656
                }
×
1657
        }
1658

1659
        heartBeatDone := c.heartBeat.Run(c.heartBeatInterval, stopCh)
×
1660

×
1661
        go c.ioErrorRetryManager.Run(stopCh)
×
1662

×
1663
        // Start the actual work
×
1664
        for i := 0; i < threadiness; i++ {
×
1665
                go wait.Until(c.runWorker, time.Second, stopCh)
×
1666
        }
×
1667

1668
        <-heartBeatDone
×
1669
        <-stopCh
×
1670
        log.Log.Info("Stopping virt-handler controller.")
×
1671
}
1672

1673
func (c *VirtualMachineController) runWorker() {
×
1674
        for c.Execute() {
×
1675
        }
×
1676
}
1677

1678
func (c *VirtualMachineController) Execute() bool {
1✔
1679
        key, quit := c.queue.Get()
1✔
1680
        if quit {
1✔
1681
                return false
×
1682
        }
×
1683
        defer c.queue.Done(key)
1✔
1684
        if err := c.execute(key); err != nil {
2✔
1685
                log.Log.Reason(err).Infof("re-enqueuing VirtualMachineInstance %v", key)
1✔
1686
                c.queue.AddRateLimited(key)
1✔
1687
        } else {
2✔
1688
                log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
1✔
1689
                c.queue.Forget(key)
1✔
1690
        }
1✔
1691
        return true
1✔
1692
}
1693

1694
func (c *VirtualMachineController) getVMIFromCache(key string) (vmi *v1.VirtualMachineInstance, exists bool, err error) {
1✔
1695

1✔
1696
        // Fetch the latest Vm state from cache
1✔
1697
        obj, exists, err := c.vmiSourceStore.GetByKey(key)
1✔
1698
        if err != nil {
1✔
1699
                return nil, false, err
×
1700
        }
×
1701

1702
        if !exists {
2✔
1703
                obj, exists, err = c.vmiTargetStore.GetByKey(key)
1✔
1704
                if err != nil {
1✔
1705
                        return nil, false, err
×
1706
                }
×
1707
        }
1708

1709
        // Retrieve the VirtualMachineInstance
1710
        if !exists {
2✔
1711
                namespace, name, err := cache.SplitMetaNamespaceKey(key)
1✔
1712
                if err != nil {
2✔
1713
                        // TODO log and don't retry
1✔
1714
                        return nil, false, err
1✔
1715
                }
1✔
1716
                vmi = v1.NewVMIReferenceFromNameWithNS(namespace, name)
1✔
1717
        } else {
1✔
1718
                vmi = obj.(*v1.VirtualMachineInstance)
1✔
1719
        }
1✔
1720
        return vmi, exists, nil
1✔
1721
}
1722

1723
func (c *VirtualMachineController) getDomainFromCache(key string) (domain *api.Domain, exists bool, cachedUID types.UID, err error) {
1✔
1724

1✔
1725
        obj, exists, err := c.domainStore.GetByKey(key)
1✔
1726

1✔
1727
        if err != nil {
1✔
1728
                return nil, false, "", err
×
1729
        }
×
1730

1731
        if exists {
2✔
1732
                domain = obj.(*api.Domain)
1✔
1733
                cachedUID = domain.Spec.Metadata.KubeVirt.UID
1✔
1734

1✔
1735
                // We're using the DeletionTimestamp to signify that the
1✔
1736
                // Domain is deleted rather than sending the DELETE watch event.
1✔
1737
                if domain.ObjectMeta.DeletionTimestamp != nil {
1✔
1738
                        exists = false
×
1739
                        domain = nil
×
1740
                }
×
1741
        }
1742
        return domain, exists, cachedUID, nil
1✔
1743
}
1744

1745
func (c *VirtualMachineController) migrationOrphanedSourceNodeExecute(vmi *v1.VirtualMachineInstance, domainExists bool) error {
×
1746

×
1747
        if domainExists {
×
1748
                err := c.processVmDelete(vmi)
×
1749
                if err != nil {
×
1750
                        return err
×
1751
                }
×
1752
                // we can perform the cleanup immediately after
1753
                // the successful delete here because we don't have
1754
                // to report the deletion results on the VMI status
1755
                // in this case.
1756
                err = c.processVmCleanup(vmi)
×
1757
                if err != nil {
×
1758
                        return err
×
1759
                }
×
1760
        } else {
×
1761
                err := c.processVmCleanup(vmi)
×
1762
                if err != nil {
×
1763
                        return err
×
1764
                }
×
1765
        }
1766
        return nil
×
1767
}
1768

1769
func (c *VirtualMachineController) migrationTargetExecute(vmi *v1.VirtualMachineInstance, vmiExists bool, domain *api.Domain) error {
1✔
1770

1✔
1771
        // set to true when preparation of migration target should be aborted.
1✔
1772
        shouldAbort := false
1✔
1773
        // set to true when VirtualMachineInstance migration target needs to be prepared
1✔
1774
        shouldUpdate := false
1✔
1775
        // set true when the current migration target has exitted and needs to be cleaned up.
1✔
1776
        shouldCleanUp := false
1✔
1777

1✔
1778
        if vmiExists && vmi.IsRunning() {
2✔
1779
                shouldUpdate = true
1✔
1780
        }
1✔
1781

1782
        if !vmiExists || vmi.DeletionTimestamp != nil {
2✔
1783
                shouldAbort = true
1✔
1784
        } else if vmi.IsFinal() {
2✔
1785
                shouldAbort = true
×
1786
        } else if c.hasStaleClientConnections(vmi) {
2✔
1787
                // if stale client exists, force cleanup.
1✔
1788
                // This can happen as a result of a previously
1✔
1789
                // failed attempt to migrate the vmi to this node.
1✔
1790
                shouldCleanUp = true
1✔
1791
        }
1✔
1792

1793
        domainExists := domain != nil
1✔
1794
        if shouldAbort {
2✔
1795
                if domainExists {
1✔
1796
                        err := c.processVmDelete(vmi)
×
1797
                        if err != nil {
×
1798
                                return err
×
1799
                        }
×
1800
                }
1801

1802
                err := c.processVmCleanup(vmi)
1✔
1803
                if err != nil {
1✔
1804
                        return err
×
1805
                }
×
1806
        } else if shouldCleanUp {
2✔
1807
                log.Log.Object(vmi).Infof("Stale client for migration target found. Cleaning up.")
1✔
1808

1✔
1809
                err := c.processVmCleanup(vmi)
1✔
1810
                if err != nil {
1✔
1811
                        return err
×
1812
                }
×
1813

1814
                // if we're still the migration target, we need to keep trying until the migration fails.
1815
                // it's possible we're simply waiting for another target pod to come online.
1816
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
1817

1818
        } else if shouldUpdate {
2✔
1819
                log.Log.Object(vmi).Info("Processing vmi migration target update")
1✔
1820

1✔
1821
                // prepare the POD for the migration
1✔
1822
                err := c.processVmUpdate(vmi, domain)
1✔
1823
                if err != nil {
1✔
1824
                        return err
×
1825
                }
×
1826

1827
                err = c.migrationTargetUpdateVMIStatus(vmi, domain)
1✔
1828
                if err != nil {
1✔
1829
                        return err
×
1830
                }
×
1831
        }
1832

1833
        return nil
1✔
1834
}
1835

1836
// Determine if gracefulShutdown has been triggered by virt-launcher
1837
func (c *VirtualMachineController) hasGracefulShutdownTrigger(domain *api.Domain) bool {
1✔
1838
        if domain == nil {
2✔
1839
                return false
1✔
1840
        }
1✔
1841
        gracePeriod := domain.Spec.Metadata.KubeVirt.GracePeriod
1✔
1842

1✔
1843
        return gracePeriod != nil &&
1✔
1844
                gracePeriod.MarkedForGracefulShutdown != nil &&
1✔
1845
                *gracePeriod.MarkedForGracefulShutdown
1✔
1846
}
1847

1848
func (c *VirtualMachineController) defaultExecute(key string,
1849
        vmi *v1.VirtualMachineInstance,
1850
        vmiExists bool,
1851
        domain *api.Domain,
1852
        domainExists bool) error {
1✔
1853

1✔
1854
        // set to true when domain needs to be shutdown.
1✔
1855
        shouldShutdown := false
1✔
1856
        // set to true when domain needs to be removed from libvirt.
1✔
1857
        shouldDelete := false
1✔
1858
        // optimization. set to true when processing already deleted domain.
1✔
1859
        shouldCleanUp := false
1✔
1860
        // set to true when VirtualMachineInstance is active or about to become active.
1✔
1861
        shouldUpdate := false
1✔
1862
        // set true to ensure that no updates to the current VirtualMachineInstance state will occur
1✔
1863
        forceIgnoreSync := false
1✔
1864
        // set to true when unrecoverable domain needs to be destroyed non-gracefully.
1✔
1865
        forceShutdownIrrecoverable := false
1✔
1866

1✔
1867
        log.Log.V(3).Infof("Processing event %v", key)
1✔
1868

1✔
1869
        if vmiExists && domainExists {
2✔
1870
                log.Log.Object(vmi).Infof("VMI is in phase: %v | Domain status: %v, reason: %v", vmi.Status.Phase, domain.Status.Status, domain.Status.Reason)
1✔
1871
        } else if vmiExists {
3✔
1872
                log.Log.Object(vmi).Infof("VMI is in phase: %v | Domain does not exist", vmi.Status.Phase)
1✔
1873
        } else if domainExists {
3✔
1874
                vmiRef := v1.NewVMIReferenceWithUUID(domain.ObjectMeta.Namespace, domain.ObjectMeta.Name, domain.Spec.Metadata.KubeVirt.UID)
1✔
1875
                log.Log.Object(vmiRef).Infof("VMI does not exist | Domain status: %v, reason: %v", domain.Status.Status, domain.Status.Reason)
1✔
1876
        } else {
1✔
1877
                log.Log.Info("VMI does not exist | Domain does not exist")
×
1878
        }
×
1879

1880
        domainAlive := domainExists &&
1✔
1881
                domain.Status.Status != api.Shutoff &&
1✔
1882
                domain.Status.Status != api.Crashed &&
1✔
1883
                domain.Status.Status != ""
1✔
1884

1✔
1885
        domainMigrated := domainExists && domainMigrated(domain)
1✔
1886
        forceShutdownIrrecoverable = domainExists && domainPausedFailedPostCopy(domain)
1✔
1887

1✔
1888
        gracefulShutdown := c.hasGracefulShutdownTrigger(domain)
1✔
1889
        if gracefulShutdown && vmi.IsRunning() {
1✔
1890
                if domainAlive {
×
1891
                        log.Log.Object(vmi).V(3).Info("Shutting down due to graceful shutdown signal.")
×
1892
                        shouldShutdown = true
×
1893
                } else {
×
1894
                        shouldDelete = true
×
1895
                }
×
1896
        }
1897

1898
        // Determine removal of VirtualMachineInstance from cache should result in deletion.
1899
        if !vmiExists {
2✔
1900
                switch {
1✔
1901
                case domainAlive:
1✔
1902
                        // The VirtualMachineInstance is deleted on the cluster, and domain is alive,
1✔
1903
                        // then shut down the domain.
1✔
1904
                        log.Log.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
1✔
1905
                        shouldShutdown = true
1✔
1906
                case domainExists:
1✔
1907
                        // The VirtualMachineInstance is deleted on the cluster, and domain is not alive
1✔
1908
                        // then delete the domain.
1✔
1909
                        log.Log.Object(vmi).V(3).Info("Deleting domain for deleted VirtualMachineInstance object.")
1✔
1910
                        shouldDelete = true
1✔
1911
                default:
×
1912
                        // If neither the domain nor the vmi object exist locally,
×
1913
                        // then ensure any remaining local ephemeral data is cleaned up.
×
1914
                        shouldCleanUp = true
×
1915
                }
1916
        }
1917

1918
        // Determine if VirtualMachineInstance is being deleted.
1919
        if vmiExists && vmi.ObjectMeta.DeletionTimestamp != nil {
2✔
1920
                switch {
1✔
1921
                case domainAlive:
×
1922
                        log.Log.Object(vmi).V(3).Info("Shutting down domain for VirtualMachineInstance with deletion timestamp.")
×
1923
                        shouldShutdown = true
×
1924
                case domainExists:
×
1925
                        log.Log.Object(vmi).V(3).Info("Deleting domain for VirtualMachineInstance with deletion timestamp.")
×
1926
                        shouldDelete = true
×
1927
                default:
1✔
1928
                        if vmi.IsFinal() {
1✔
1929
                                shouldCleanUp = true
×
1930
                        }
×
1931
                }
1932
        }
1933

1934
        // Determine if domain needs to be deleted as a result of VirtualMachineInstance
1935
        // shutting down naturally (guest internal invoked shutdown)
1936
        if domainExists && vmiExists && vmi.IsFinal() {
1✔
1937
                log.Log.Object(vmi).V(3).Info("Removing domain and ephemeral data for finalized vmi.")
×
1938
                shouldDelete = true
×
1939
        } else if !domainExists && vmiExists && vmi.IsFinal() {
2✔
1940
                log.Log.Object(vmi).V(3).Info("Cleaning up local data for finalized vmi.")
1✔
1941
                shouldCleanUp = true
1✔
1942
        }
1✔
1943

1944
        // Determine if an active (or about to be active) VirtualMachineInstance should be updated.
1945
        if vmiExists && !vmi.IsFinal() {
2✔
1946
                // requiring the phase of the domain and VirtualMachineInstance to be in sync is an
1✔
1947
                // optimization that prevents unnecessary re-processing VMIs during the start flow.
1✔
1948
                phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
1✔
1949
                if err != nil {
1✔
1950
                        return err
×
1951
                }
×
1952
                if vmi.Status.Phase == phase {
2✔
1953
                        shouldUpdate = true
1✔
1954
                }
1✔
1955

1956
                if shouldDelay, delay := c.ioErrorRetryManager.ShouldDelay(string(vmi.UID), func() bool {
2✔
1957
                        return isIOError(shouldUpdate, domainExists, domain)
1✔
1958
                }); shouldDelay {
1✔
1959
                        shouldUpdate = false
×
1960
                        log.Log.Object(vmi).Infof("Delay vm update for %f seconds", delay.Seconds())
×
1961
                        c.queue.AddAfter(key, delay)
×
1962
                }
×
1963
        }
1964

1965
        // NOTE: This must be the last check that occurs before checking the sync booleans!
1966
        //
1967
        // Special logic for domains migrated from a source node.
1968
        // Don't delete/destroy domain until the handoff occurs.
1969
        if domainMigrated {
2✔
1970
                // only allow the sync to occur on the domain once we've done
1✔
1971
                // the node handoff. Otherwise we potentially lose the fact that
1✔
1972
                // the domain migrated because we'll attempt to delete the locally
1✔
1973
                // shut off domain during the sync.
1✔
1974
                if vmiExists &&
1✔
1975
                        !vmi.IsFinal() &&
1✔
1976
                        vmi.DeletionTimestamp == nil &&
1✔
1977
                        vmi.Status.NodeName != "" &&
1✔
1978
                        vmi.Status.NodeName == c.host {
2✔
1979

1✔
1980
                        // If the domain migrated but the VMI still thinks this node
1✔
1981
                        // is the host, force ignore the sync until the VMI's status
1✔
1982
                        // is updated to reflect the node the domain migrated to.
1✔
1983
                        forceIgnoreSync = true
1✔
1984
                }
1✔
1985
        }
1986

1987
        var syncErr error
1✔
1988

1✔
1989
        // Process the VirtualMachineInstance update in this order.
1✔
1990
        // * Shutdown and Deletion due to VirtualMachineInstance deletion, process stopping, graceful shutdown trigger, etc...
1✔
1991
        // * Cleanup of already shutdown and Deleted VMIs
1✔
1992
        // * Update due to spec change and initial start flow.
1✔
1993
        switch {
1✔
1994
        case forceIgnoreSync:
1✔
1995
                log.Log.Object(vmi).V(3).Info("No update processing required: forced ignore")
1✔
1996
        case shouldShutdown:
1✔
1997
                log.Log.Object(vmi).V(3).Info("Processing shutdown.")
1✔
1998
                syncErr = c.processVmShutdown(vmi, domain)
1✔
1999
        case forceShutdownIrrecoverable:
×
2000
                msg := formatIrrecoverableErrorMessage(domain)
×
2001
                log.Log.Object(vmi).V(3).Infof("Processing a destruction of an irrecoverable domain - %s.", msg)
×
2002
                syncErr = c.processVmDestroy(vmi, domain)
×
2003
                if syncErr == nil {
×
2004
                        syncErr = &vmiIrrecoverableError{msg}
×
2005
                }
×
2006
        case shouldDelete:
1✔
2007
                log.Log.Object(vmi).V(3).Info("Processing deletion.")
1✔
2008
                syncErr = c.processVmDelete(vmi)
1✔
2009
        case shouldCleanUp:
1✔
2010
                log.Log.Object(vmi).V(3).Info("Processing local ephemeral data cleanup for shutdown domain.")
1✔
2011
                syncErr = c.processVmCleanup(vmi)
1✔
2012
        case shouldUpdate:
1✔
2013
                log.Log.Object(vmi).V(3).Info("Processing vmi update")
1✔
2014
                syncErr = c.processVmUpdate(vmi, domain)
1✔
2015
        default:
1✔
2016
                log.Log.Object(vmi).V(3).Info("No update processing required")
1✔
2017
        }
2018

2019
        if syncErr != nil && !vmi.IsFinal() {
2✔
2020
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.SyncFailed.String(), syncErr.Error())
1✔
2021

1✔
2022
                // `syncErr` will be propagated anyway, and it will be logged in `re-enqueueing`
1✔
2023
                // so there is no need to log it twice in hot path without increased verbosity.
1✔
2024
                log.Log.Object(vmi).Reason(syncErr).Error("Synchronizing the VirtualMachineInstance failed.")
1✔
2025
        }
1✔
2026

2027
        // Update the VirtualMachineInstance status, if the VirtualMachineInstance exists
2028
        if vmiExists {
2✔
2029
                if err := c.updateVMIStatus(vmi, domain, syncErr); err != nil {
2✔
2030
                        log.Log.Object(vmi).Reason(err).Error("Updating the VirtualMachineInstance status failed.")
1✔
2031
                        return err
1✔
2032
                }
1✔
2033
        }
2034

2035
        if syncErr != nil {
2✔
2036
                return syncErr
1✔
2037
        }
1✔
2038

2039
        log.Log.Object(vmi).V(3).Info("Synchronization loop succeeded.")
1✔
2040
        return nil
1✔
2041

2042
}
2043

2044
func (c *VirtualMachineController) execute(key string) error {
1✔
2045
        vmi, vmiExists, err := c.getVMIFromCache(key)
1✔
2046
        if err != nil {
2✔
2047
                return err
1✔
2048
        }
1✔
2049

2050
        if !vmiExists {
2✔
2051
                c.vmiExpectations.DeleteExpectations(key)
1✔
2052
        } else if !c.vmiExpectations.SatisfiedExpectations(key) {
2✔
2053
                return nil
×
2054
        }
×
2055

2056
        domain, domainExists, domainCachedUID, err := c.getDomainFromCache(key)
1✔
2057
        if err != nil {
1✔
2058
                return err
×
2059
        }
×
2060

2061
        if !vmiExists && string(domainCachedUID) != "" {
2✔
2062
                // it's possible to discover the UID from cache even if the domain
1✔
2063
                // doesn't technically exist anymore
1✔
2064
                vmi.UID = domainCachedUID
1✔
2065
                log.Log.Object(vmi).Infof("Using cached UID for vmi found in domain cache")
1✔
2066
        }
1✔
2067

2068
        // As a last effort, if the UID still can't be determined attempt
2069
        // to retrieve it from the ghost record
2070
        if string(vmi.UID) == "" {
2✔
2071
                uid := virtcache.LastKnownUIDFromGhostRecordCache(key)
1✔
2072
                if uid != "" {
1✔
2073
                        log.Log.Object(vmi).V(3).Infof("ghost record cache provided %s as UID", uid)
×
2074
                        vmi.UID = uid
×
2075
                }
×
2076
        }
2077

2078
        if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
2✔
2079
                oldVMI := v1.NewVMIReferenceFromNameWithNS(vmi.Namespace, vmi.Name)
1✔
2080
                oldVMI.UID = domain.Spec.Metadata.KubeVirt.UID
1✔
2081
                expired, initialized, err := c.isLauncherClientUnresponsive(oldVMI)
1✔
2082
                if err != nil {
1✔
2083
                        return err
×
2084
                }
×
2085
                // If we found an outdated domain which is also not alive anymore, clean up
2086
                if !initialized {
1✔
2087
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
2088
                        return nil
×
2089
                } else if expired {
1✔
2090
                        log.Log.Object(oldVMI).Infof("Detected stale vmi %s that still needs cleanup before new vmi %s with identical name/namespace can be processed", oldVMI.UID, vmi.UID)
×
2091
                        err = c.processVmCleanup(oldVMI)
×
2092
                        if err != nil {
×
2093
                                return err
×
2094
                        }
×
2095
                        // Make sure we re-enqueue the key to ensure this new VMI is processed
2096
                        // after the stale domain is removed
2097
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*5)
×
2098
                }
2099

2100
                return nil
1✔
2101
        }
2102

2103
        // Take different execution paths depending on the state of the migration and the
2104
        // node this is executed on.
2105

2106
        if vmiExists && c.isPreMigrationTarget(vmi) {
2✔
2107
                // 1. PRE-MIGRATION TARGET PREPARATION PATH
1✔
2108
                //
1✔
2109
                // If this node is the target of the vmi's migration, take
1✔
2110
                // a different execute path. The target execute path prepares
1✔
2111
                // the local environment for the migration, but does not
1✔
2112
                // start the VMI
1✔
2113
                return c.migrationTargetExecute(vmi, vmiExists, domain)
1✔
2114
        } else if vmiExists && c.isOrphanedMigrationSource(vmi) {
2✔
2115
                // 3. POST-MIGRATION SOURCE CLEANUP
×
2116
                //
×
2117
                // After a migration, the migrated domain still exists in the old
×
2118
                // source's domain cache. Ensure that any node that isn't currently
×
2119
                // the target or owner of the VMI handles deleting the domain locally.
×
2120
                return c.migrationOrphanedSourceNodeExecute(vmi, domainExists)
×
2121
        }
×
2122
        return c.defaultExecute(key,
1✔
2123
                vmi,
1✔
2124
                vmiExists,
1✔
2125
                domain,
1✔
2126
                domainExists)
1✔
2127

2128
}
2129

2130
func (c *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstance) error {
1✔
2131

1✔
2132
        vmiId := string(vmi.UID)
1✔
2133

1✔
2134
        log.Log.Object(vmi).Infof("Performing final local cleanup for vmi with uid %s", vmiId)
1✔
2135

1✔
2136
        c.migrationProxy.StopTargetListener(vmiId)
1✔
2137
        c.migrationProxy.StopSourceListener(vmiId)
1✔
2138

1✔
2139
        c.downwardMetricsManager.StopServer(vmi)
1✔
2140

1✔
2141
        // Unmount container disks and clean up remaining files
1✔
2142
        if err := c.containerDiskMounter.Unmount(vmi); err != nil {
1✔
2143
                return err
×
2144
        }
×
2145

2146
        // UnmountAll does the cleanup on the "best effort" basis: it is
2147
        // safe to pass a nil cgroupManager.
2148
        cgroupManager, _ := getCgroupManager(vmi)
1✔
2149
        if err := c.hotplugVolumeMounter.UnmountAll(vmi, cgroupManager); err != nil {
1✔
2150
                return err
×
2151
        }
×
2152

2153
        c.teardownNetwork(vmi)
1✔
2154

1✔
2155
        c.sriovHotplugExecutorPool.Delete(vmi.UID)
1✔
2156

1✔
2157
        // Watch dog file and command client must be the last things removed here
1✔
2158
        if err := c.closeLauncherClient(vmi); err != nil {
1✔
2159
                return err
×
2160
        }
×
2161

2162
        // Remove the domain from cache in the event that we're performing
2163
        // a final cleanup and never received the "DELETE" event. This is
2164
        // possible if the VMI pod goes away before we receive the final domain
2165
        // "DELETE"
2166
        domain := api.NewDomainReferenceFromName(vmi.Namespace, vmi.Name)
1✔
2167
        log.Log.Object(domain).Infof("Removing domain from cache during final cleanup")
1✔
2168
        return c.domainStore.Delete(domain)
1✔
2169
}
2170

2171
func (c *VirtualMachineController) closeLauncherClient(vmi *v1.VirtualMachineInstance) error {
1✔
2172

1✔
2173
        // UID is required in order to close socket
1✔
2174
        if string(vmi.GetUID()) == "" {
2✔
2175
                return nil
1✔
2176
        }
1✔
2177

2178
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2179
        if exists && clientInfo.Client != nil {
2✔
2180
                clientInfo.Client.Close()
1✔
2181
                close(clientInfo.DomainPipeStopChan)
1✔
2182
        }
1✔
2183

2184
        virtcache.DeleteGhostRecord(vmi.Namespace, vmi.Name)
1✔
2185
        c.launcherClients.Delete(vmi.UID)
1✔
2186
        return nil
1✔
2187
}
2188

2189
// used by unit tests to add mock clients
2190
func (c *VirtualMachineController) addLauncherClient(vmUID types.UID, info *virtcache.LauncherClientInfo) error {
1✔
2191
        c.launcherClients.Store(vmUID, info)
1✔
2192
        return nil
1✔
2193
}
1✔
2194

2195
func (c *VirtualMachineController) isLauncherClientUnresponsive(vmi *v1.VirtualMachineInstance) (unresponsive bool, initialized bool, err error) {
1✔
2196
        var socketFile string
1✔
2197

1✔
2198
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2199
        if exists {
2✔
2200
                if clientInfo.Ready == true {
2✔
2201
                        // use cached socket if we previously established a connection
1✔
2202
                        socketFile = clientInfo.SocketFile
1✔
2203
                } else {
2✔
2204
                        socketFile, err = cmdclient.FindSocketOnHost(vmi)
1✔
2205
                        if err != nil {
2✔
2206
                                // socket does not exist, but let's see if the pod is still there
1✔
2207
                                if _, err = cmdclient.FindPodDirOnHost(vmi); err != nil {
1✔
2208
                                        // no pod meanst that waiting for it to initialize makes no sense
×
2209
                                        return true, true, nil
×
2210
                                }
×
2211
                                // pod is still there, if there is no socket let's wait for it to become ready
2212
                                if clientInfo.NotInitializedSince.Before(time.Now().Add(-3 * time.Minute)) {
2✔
2213
                                        return true, true, nil
1✔
2214
                                }
1✔
2215
                                return false, false, nil
1✔
2216
                        }
2217
                        clientInfo.Ready = true
×
2218
                        clientInfo.SocketFile = socketFile
×
2219
                }
2220
        } else {
×
2221
                clientInfo := &virtcache.LauncherClientInfo{
×
2222
                        NotInitializedSince: time.Now(),
×
2223
                        Ready:               false,
×
2224
                }
×
2225
                c.launcherClients.Store(vmi.UID, clientInfo)
×
2226
                // attempt to find the socket if the established connection doesn't currently exist.
×
2227
                socketFile, err = cmdclient.FindSocketOnHost(vmi)
×
2228
                // no socket file, no VMI, so it's unresponsive
×
2229
                if err != nil {
×
2230
                        // socket does not exist, but let's see if the pod is still there
×
2231
                        if _, err = cmdclient.FindPodDirOnHost(vmi); err != nil {
×
2232
                                // no pod meanst that waiting for it to initialize makes no sense
×
2233
                                return true, true, nil
×
2234
                        }
×
2235
                        return false, false, nil
×
2236
                }
2237
                clientInfo.Ready = true
×
2238
                clientInfo.SocketFile = socketFile
×
2239
        }
2240
        return cmdclient.IsSocketUnresponsive(socketFile), true, nil
1✔
2241
}
2242

2243
func (c *VirtualMachineController) getLauncherClient(vmi *v1.VirtualMachineInstance) (cmdclient.LauncherClient, error) {
1✔
2244
        var err error
1✔
2245

1✔
2246
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2247
        if exists && clientInfo.Client != nil {
2✔
2248
                return clientInfo.Client, nil
1✔
2249
        }
1✔
2250

2251
        socketFile, err := cmdclient.FindSocketOnHost(vmi)
×
2252
        if err != nil {
×
2253
                return nil, err
×
2254
        }
×
2255

2256
        err = virtcache.AddGhostRecord(vmi.Namespace, vmi.Name, socketFile, vmi.UID)
×
2257
        if err != nil {
×
2258
                return nil, err
×
2259
        }
×
2260

2261
        client, err := cmdclient.NewClient(socketFile)
×
2262
        if err != nil {
×
2263
                return nil, err
×
2264
        }
×
2265

2266
        domainPipeStopChan := make(chan struct{})
×
2267
        //we pipe in the domain socket into the VMI's filesystem
×
2268
        err = c.startDomainNotifyPipe(domainPipeStopChan, vmi)
×
2269
        if err != nil {
×
2270
                client.Close()
×
2271
                close(domainPipeStopChan)
×
2272
                return nil, err
×
2273
        }
×
2274

2275
        c.launcherClients.Store(vmi.UID, &virtcache.LauncherClientInfo{
×
2276
                Client:              client,
×
2277
                SocketFile:          socketFile,
×
2278
                DomainPipeStopChan:  domainPipeStopChan,
×
2279
                NotInitializedSince: time.Now(),
×
2280
                Ready:               true,
×
2281
        })
×
2282

×
2283
        return client, nil
×
2284
}
2285

2286
func (c *VirtualMachineController) processVmDestroy(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
×
2287
        tryGracefully := false
×
2288
        return c.helperVmShutdown(vmi, domain, tryGracefully)
×
2289
}
×
2290

2291
func (c *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
2292
        tryGracefully := true
1✔
2293
        return c.helperVmShutdown(vmi, domain, tryGracefully)
1✔
2294
}
1✔
2295

2296
func (c *VirtualMachineController) helperVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, tryGracefully bool) error {
1✔
2297

1✔
2298
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
1✔
2299
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
2300
        if err != nil {
1✔
2301
                return err
×
2302
        }
×
2303

2304
        if domainHasGracePeriod(domain) && tryGracefully {
2✔
2305
                if expired, timeLeft := c.hasGracePeriodExpired(domain); !expired {
2✔
2306
                        return c.handleVMIShutdown(vmi, domain, client, timeLeft)
1✔
2307
                }
1✔
2308
                log.Log.Object(vmi).Infof("Grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
2309
        } else {
1✔
2310
                log.Log.Object(vmi).Infof("Graceful shutdown not set, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
2311
        }
1✔
2312

2313
        err = client.KillVirtualMachine(vmi)
1✔
2314
        if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2315
                // Only report err if it wasn't the result of a disconnect.
×
2316
                //
×
2317
                // Both virt-launcher and virt-handler are trying to destroy
×
2318
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
2319
                // disconnected during the kill request, which shouldn't be
×
2320
                // considered an error.
×
2321
                return err
×
2322
        }
×
2323

2324
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMIStopping)
1✔
2325

1✔
2326
        return nil
1✔
2327
}
2328

2329
func (c *VirtualMachineController) handleVMIShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, client cmdclient.LauncherClient, timeLeft int64) error {
1✔
2330
        if domain.Status.Status != api.Shutdown {
2✔
2331
                return c.shutdownVMI(vmi, client, timeLeft)
1✔
2332
        }
1✔
2333
        log.Log.V(4).Object(vmi).Infof("%s is already shutting down.", vmi.GetObjectMeta().GetName())
×
2334
        return nil
×
2335
}
2336

2337
func (c *VirtualMachineController) shutdownVMI(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient, timeLeft int64) error {
1✔
2338
        err := client.ShutdownVirtualMachine(vmi)
1✔
2339
        if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2340
                // Only report err if it wasn't the result of a disconnect.
×
2341
                //
×
2342
                // Both virt-launcher and virt-handler are trying to destroy
×
2343
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
2344
                // disconnected during the kill request, which shouldn't be
×
2345
                // considered an error.
×
2346
                return err
×
2347
        }
×
2348

2349
        log.Log.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
1✔
2350

1✔
2351
        // Make sure that we don't hot-loop in case we send the first domain notification
1✔
2352
        if timeLeft == -1 {
2✔
2353
                timeLeft = 5
1✔
2354
                if vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds < timeLeft {
1✔
2355
                        timeLeft = *vmi.Spec.TerminationGracePeriodSeconds
×
2356
                }
×
2357
        }
2358
        // In case we have a long grace period, we want to resend the graceful shutdown every 5 seconds
2359
        // That's important since a booting OS can miss ACPI signals
2360
        if timeLeft > 5 {
1✔
2361
                timeLeft = 5
×
2362
        }
×
2363

2364
        // pending graceful shutdown.
2365
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(timeLeft)*time.Second)
1✔
2366
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), VMIGracefulShutdown)
1✔
2367
        return nil
1✔
2368
}
2369

2370
func (c *VirtualMachineController) processVmDelete(vmi *v1.VirtualMachineInstance) error {
1✔
2371

1✔
2372
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
1✔
2373
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
2374

1✔
2375
        // If the pod has been torn down, we know the VirtualMachineInstance is down.
1✔
2376
        if err == nil {
2✔
2377

1✔
2378
                log.Log.Object(vmi).Infof("Signaled deletion for %s", vmi.GetObjectMeta().GetName())
1✔
2379

1✔
2380
                // pending deletion.
1✔
2381
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMISignalDeletion)
1✔
2382

1✔
2383
                err = client.DeleteDomain(vmi)
1✔
2384
                if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2385
                        // Only report err if it wasn't the result of a disconnect.
×
2386
                        //
×
2387
                        // Both virt-launcher and virt-handler are trying to destroy
×
2388
                        // the VirtualMachineInstance at the same time. It's possible the client may get
×
2389
                        // disconnected during the kill request, which shouldn't be
×
2390
                        // considered an error.
×
2391
                        return err
×
2392
                }
×
2393
        }
2394

2395
        return nil
1✔
2396

2397
}
2398

2399
func (c *VirtualMachineController) hasStaleClientConnections(vmi *v1.VirtualMachineInstance) bool {
1✔
2400
        _, err := c.getVerifiedLauncherClient(vmi)
1✔
2401
        if err == nil {
2✔
2402
                // current client connection is good.
1✔
2403
                return false
1✔
2404
        }
1✔
2405

2406
        // no connection, but ghost file exists.
2407
        if virtcache.HasGhostRecord(vmi.Namespace, vmi.Name) {
2✔
2408
                return true
1✔
2409
        }
1✔
2410

2411
        return false
×
2412

2413
}
2414

2415
func (c *VirtualMachineController) getVerifiedLauncherClient(vmi *v1.VirtualMachineInstance) (client cmdclient.LauncherClient, err error) {
1✔
2416
        client, err = c.getLauncherClient(vmi)
1✔
2417
        if err != nil {
1✔
2418
                return
×
2419
        }
×
2420

2421
        // Verify connectivity.
2422
        // It's possible the pod has already been torn down along with the VirtualMachineInstance.
2423
        err = client.Ping()
1✔
2424
        return
1✔
2425
}
2426

2427
func (c *VirtualMachineController) isOrphanedMigrationSource(vmi *v1.VirtualMachineInstance) bool {
1✔
2428
        nodeName, ok := vmi.Labels[v1.NodeNameLabel]
1✔
2429

1✔
2430
        if ok && nodeName != "" && nodeName != c.host {
1✔
2431
                return true
×
2432
        }
×
2433

2434
        return false
1✔
2435
}
2436

2437
func (c *VirtualMachineController) isPreMigrationTarget(vmi *v1.VirtualMachineInstance) bool {
1✔
2438

1✔
2439
        migrationTargetNodeName, ok := vmi.Labels[v1.MigrationTargetNodeNameLabel]
1✔
2440

1✔
2441
        if ok &&
1✔
2442
                migrationTargetNodeName != "" &&
1✔
2443
                migrationTargetNodeName != vmi.Status.NodeName &&
1✔
2444
                migrationTargetNodeName == c.host {
2✔
2445
                return true
1✔
2446
        }
1✔
2447

2448
        return false
1✔
2449
}
2450

2451
func (c *VirtualMachineController) checkNetworkInterfacesForMigration(vmi *v1.VirtualMachineInstance) error {
1✔
2452
        return netvmispec.VerifyVMIMigratable(vmi, c.clusterConfig.GetNetworkBindings())
1✔
2453
}
1✔
2454

2455
func (c *VirtualMachineController) checkVolumesForMigration(vmi *v1.VirtualMachineInstance) (blockMigrate bool, err error) {
1✔
2456
        volumeStatusMap := make(map[string]v1.VolumeStatus)
1✔
2457

1✔
2458
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
2459
                volumeStatusMap[volumeStatus.Name] = volumeStatus
1✔
2460
        }
1✔
2461

2462
        if len(vmi.Status.MigratedVolumes) > 0 {
1✔
2463
                blockMigrate = true
×
2464
        }
×
2465

2466
        filesystems := storagetypes.GetFilesystemsFromVolumes(vmi)
1✔
2467

1✔
2468
        // Check if all VMI volumes can be shared between the source and the destination
1✔
2469
        // of a live migration. blockMigrate will be returned as false, only if all volumes
1✔
2470
        // are shared and the VMI has no local disks
1✔
2471
        // Some combinations of disks makes the VMI no suitable for live migration.
1✔
2472
        // A relevant error will be returned in this case.
1✔
2473
        for _, volume := range vmi.Spec.Volumes {
2✔
2474
                volSrc := volume.VolumeSource
1✔
2475
                if volSrc.PersistentVolumeClaim != nil || volSrc.DataVolume != nil {
2✔
2476

1✔
2477
                        var claimName string
1✔
2478
                        if volSrc.PersistentVolumeClaim != nil {
2✔
2479
                                claimName = volSrc.PersistentVolumeClaim.ClaimName
1✔
2480
                        } else {
2✔
2481
                                claimName = volSrc.DataVolume.Name
1✔
2482
                        }
1✔
2483

2484
                        volumeStatus, ok := volumeStatusMap[volume.Name]
1✔
2485

1✔
2486
                        if !ok || volumeStatus.PersistentVolumeClaimInfo == nil {
1✔
2487
                                return true, fmt.Errorf("cannot migrate VMI: Unable to determine if PVC %v is shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
×
2488
                        } else if !pvctypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !pvctypes.IsMigratedVolume(volumeStatus.Name, vmi) {
2✔
2489
                                return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
1✔
2490
                        }
1✔
2491

2492
                } else if volSrc.HostDisk != nil {
2✔
2493
                        shared := volSrc.HostDisk.Shared != nil && *volSrc.HostDisk.Shared
1✔
2494
                        if !shared {
2✔
2495
                                return true, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
1✔
2496
                        }
1✔
2497
                } else {
1✔
2498
                        if _, ok := filesystems[volume.Name]; ok {
2✔
2499
                                log.Log.Object(vmi).Infof("Volume %s is shared with virtiofs, allow live migration", volume.Name)
1✔
2500
                                continue
1✔
2501
                        }
2502

2503
                        isVolumeUsedByReadOnlyDisk := false
1✔
2504
                        for _, disk := range vmi.Spec.Domain.Devices.Disks {
2✔
2505
                                if isReadOnlyDisk(&disk) && disk.Name == volume.Name {
2✔
2506
                                        isVolumeUsedByReadOnlyDisk = true
1✔
2507
                                        break
1✔
2508
                                }
2509
                        }
2510

2511
                        if isVolumeUsedByReadOnlyDisk {
2✔
2512
                                continue
1✔
2513
                        }
2514

2515
                        if vmi.Status.MigrationMethod == "" || vmi.Status.MigrationMethod == v1.LiveMigration {
2✔
2516
                                log.Log.Object(vmi).Infof("migration is block migration because of %s volume", volume.Name)
1✔
2517
                        }
1✔
2518
                        blockMigrate = true
1✔
2519
                }
2520
        }
2521
        return
1✔
2522
}
2523

2524
func (c *VirtualMachineController) isVMIPausedDuringMigration(vmi *v1.VirtualMachineInstance) bool {
1✔
2525
        return vmi.Status.MigrationState != nil &&
1✔
2526
                vmi.Status.MigrationState.Mode == v1.MigrationPaused &&
1✔
2527
                !vmi.Status.MigrationState.Completed
1✔
2528
}
1✔
2529

2530
func (c *VirtualMachineController) isMigrationSource(vmi *v1.VirtualMachineInstance) bool {
1✔
2531

1✔
2532
        if vmi.Status.MigrationState != nil &&
1✔
2533
                vmi.Status.MigrationState.SourceNode == c.host &&
1✔
2534
                vmi.Status.MigrationState.TargetNodeAddress != "" &&
1✔
2535
                !vmi.Status.MigrationState.Completed {
2✔
2536

1✔
2537
                return true
1✔
2538
        }
1✔
2539
        return false
1✔
2540

2541
}
2542

2543
func (c *VirtualMachineController) handleTargetMigrationProxy(vmi *v1.VirtualMachineInstance) error {
1✔
2544
        // handle starting/stopping target migration proxy
1✔
2545
        migrationTargetSockets := []string{}
1✔
2546
        res, err := c.podIsolationDetector.Detect(vmi)
1✔
2547
        if err != nil {
1✔
2548
                return err
×
2549
        }
×
2550

2551
        // Get the libvirt connection socket file on the destination pod.
2552
        socketFile := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "libvirt/virtqemud-sock"), res.Pid())
1✔
2553
        // the migration-proxy is no longer shared via host mount, so we
1✔
2554
        // pass in the virt-launcher's baseDir to reach the unix sockets.
1✔
2555
        baseDir := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "kubevirt"), res.Pid())
1✔
2556
        migrationTargetSockets = append(migrationTargetSockets, socketFile)
1✔
2557

1✔
2558
        migrationPortsRange := migrationproxy.GetMigrationPortsList(vmi.IsBlockMigration())
1✔
2559
        for _, port := range migrationPortsRange {
2✔
2560
                key := migrationproxy.ConstructProxyKey(string(vmi.UID), port)
1✔
2561
                // a proxy between the target direct qemu channel and the connector in the destination pod
1✔
2562
                destSocketFile := migrationproxy.SourceUnixFile(baseDir, key)
1✔
2563
                migrationTargetSockets = append(migrationTargetSockets, destSocketFile)
1✔
2564
        }
1✔
2565
        err = c.migrationProxy.StartTargetListener(string(vmi.UID), migrationTargetSockets)
1✔
2566
        if err != nil {
1✔
2567
                return err
×
2568
        }
×
2569
        return nil
1✔
2570
}
2571

2572
func (c *VirtualMachineController) handlePostMigrationProxyCleanup(vmi *v1.VirtualMachineInstance) {
1✔
2573
        if vmi.Status.MigrationState == nil || vmi.Status.MigrationState.Completed || vmi.Status.MigrationState.Failed {
2✔
2574
                c.migrationProxy.StopTargetListener(string(vmi.UID))
1✔
2575
                c.migrationProxy.StopSourceListener(string(vmi.UID))
1✔
2576
        }
1✔
2577
}
2578

2579
func (c *VirtualMachineController) handleSourceMigrationProxy(vmi *v1.VirtualMachineInstance) error {
1✔
2580

1✔
2581
        res, err := c.podIsolationDetector.Detect(vmi)
1✔
2582
        if err != nil {
1✔
2583
                return err
×
2584
        }
×
2585
        // the migration-proxy is no longer shared via host mount, so we
2586
        // pass in the virt-launcher's baseDir to reach the unix sockets.
2587
        baseDir := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "kubevirt"), res.Pid())
1✔
2588
        c.migrationProxy.StopTargetListener(string(vmi.UID))
1✔
2589
        if vmi.Status.MigrationState.TargetDirectMigrationNodePorts == nil {
1✔
2590
                msg := "No migration proxy has been created for this vmi"
×
2591
                return fmt.Errorf("%s", msg)
×
2592
        }
×
2593
        err = c.migrationProxy.StartSourceListener(
1✔
2594
                string(vmi.UID),
1✔
2595
                vmi.Status.MigrationState.TargetNodeAddress,
1✔
2596
                vmi.Status.MigrationState.TargetDirectMigrationNodePorts,
1✔
2597
                baseDir,
1✔
2598
        )
1✔
2599
        if err != nil {
1✔
2600
                return err
×
2601
        }
×
2602

2603
        return nil
1✔
2604
}
2605

2606
func (c *VirtualMachineController) getLauncherClientInfo(vmi *v1.VirtualMachineInstance) *virtcache.LauncherClientInfo {
1✔
2607
        launcherInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2608
        if !exists {
1✔
2609
                return nil
×
2610
        }
×
2611
        return launcherInfo
1✔
2612
}
2613

2614
func isMigrationInProgress(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
1✔
2615
        var domainMigrationMetadata *api.MigrationMetadata
1✔
2616

1✔
2617
        if domain == nil ||
1✔
2618
                vmi.Status.MigrationState == nil ||
1✔
2619
                domain.Spec.Metadata.KubeVirt.Migration == nil {
2✔
2620
                return false
1✔
2621
        }
1✔
2622
        domainMigrationMetadata = domain.Spec.Metadata.KubeVirt.Migration
1✔
2623

1✔
2624
        if vmi.Status.MigrationState.MigrationUID == domainMigrationMetadata.UID &&
1✔
2625
                domainMigrationMetadata.StartTimestamp != nil {
2✔
2626
                return true
1✔
2627
        }
1✔
2628
        return false
×
2629
}
2630

2631
func (c *VirtualMachineController) vmUpdateHelperMigrationSource(origVMI *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
2632

1✔
2633
        client, err := c.getLauncherClient(origVMI)
1✔
2634
        if err != nil {
1✔
2635
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2636
        }
×
2637

2638
        if origVMI.Status.MigrationState.AbortRequested {
2✔
2639
                err = c.handleMigrationAbort(origVMI, client)
1✔
2640
                if err != nil {
1✔
2641
                        return err
×
2642
                }
×
2643
        } else {
1✔
2644
                if isMigrationInProgress(origVMI, domain) {
2✔
2645
                        // we already started this migration, no need to rerun this
1✔
2646
                        log.DefaultLogger().Errorf("migration %s has already been started", origVMI.Status.MigrationState.MigrationUID)
1✔
2647
                        return nil
1✔
2648
                }
1✔
2649

2650
                err = c.handleSourceMigrationProxy(origVMI)
1✔
2651
                if err != nil {
1✔
2652
                        return fmt.Errorf("failed to handle migration proxy: %v", err)
×
2653
                }
×
2654

2655
                migrationConfiguration := origVMI.Status.MigrationState.MigrationConfiguration
1✔
2656
                if migrationConfiguration == nil {
2✔
2657
                        migrationConfiguration = c.clusterConfig.GetMigrationConfiguration()
1✔
2658
                }
1✔
2659

2660
                options := &cmdclient.MigrationOptions{
1✔
2661
                        Bandwidth:               *migrationConfiguration.BandwidthPerMigration,
1✔
2662
                        ProgressTimeout:         *migrationConfiguration.ProgressTimeout,
1✔
2663
                        CompletionTimeoutPerGiB: *migrationConfiguration.CompletionTimeoutPerGiB,
1✔
2664
                        UnsafeMigration:         *migrationConfiguration.UnsafeMigrationOverride,
1✔
2665
                        AllowAutoConverge:       *migrationConfiguration.AllowAutoConverge,
1✔
2666
                        AllowPostCopy:           *migrationConfiguration.AllowPostCopy,
1✔
2667
                        AllowWorkloadDisruption: *migrationConfiguration.AllowWorkloadDisruption,
1✔
2668
                }
1✔
2669

1✔
2670
                configureParallelMigrationThreads(options, origVMI)
1✔
2671

1✔
2672
                marshalledOptions, err := json.Marshal(options)
1✔
2673
                if err != nil {
1✔
2674
                        log.Log.Object(origVMI).Warning("failed to marshall matched migration options")
×
2675
                } else {
1✔
2676
                        log.Log.Object(origVMI).Infof("migration options matched for vmi %s: %s", origVMI.Name, string(marshalledOptions))
1✔
2677
                }
1✔
2678

2679
                vmi := origVMI.DeepCopy()
1✔
2680
                err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2681
                if err != nil {
1✔
2682
                        return err
×
2683
                }
×
2684

2685
                err = client.MigrateVirtualMachine(vmi, options)
1✔
2686
                if err != nil {
1✔
2687
                        return err
×
2688
                }
×
2689
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrating.String(), VMIMigrating)
1✔
2690
        }
2691
        return nil
1✔
2692
}
2693

2694
func replaceMigratedVolumesStatus(vmi *v1.VirtualMachineInstance) {
1✔
2695
        replaceVolsStatus := make(map[string]*v1.PersistentVolumeClaimInfo)
1✔
2696
        for _, v := range vmi.Status.MigratedVolumes {
1✔
2697
                replaceVolsStatus[v.SourcePVCInfo.ClaimName] = v.DestinationPVCInfo
×
2698
        }
×
2699
        for i, v := range vmi.Status.VolumeStatus {
1✔
2700
                if v.PersistentVolumeClaimInfo == nil {
×
2701
                        continue
×
2702
                }
2703
                if status, ok := replaceVolsStatus[v.PersistentVolumeClaimInfo.ClaimName]; ok {
×
2704
                        vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo = status
×
2705
                }
×
2706
        }
2707

2708
}
2709

2710
func (c *VirtualMachineController) vmUpdateHelperMigrationTarget(origVMI *v1.VirtualMachineInstance) error {
1✔
2711
        client, err := c.getLauncherClient(origVMI)
1✔
2712
        if err != nil {
1✔
2713
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2714
        }
×
2715

2716
        vmi := origVMI.DeepCopy()
1✔
2717

1✔
2718
        if migrations.MigrationFailed(vmi) {
2✔
2719
                // if the migration failed, signal the target pod it's okay to exit
1✔
2720
                err = client.SignalTargetPodCleanup(vmi)
1✔
2721
                if err != nil {
1✔
2722
                        return err
×
2723
                }
×
2724
                log.Log.Object(vmi).Infof("Signaled target pod for failed migration to clean up")
1✔
2725
                // nothing left to do here if the migration failed.
1✔
2726
                // Re-enqueue to trigger handler final cleanup
1✔
2727
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
1✔
2728
                return nil
1✔
2729
        } else if migrations.IsMigrating(vmi) {
2✔
2730
                // If the migration has already started,
1✔
2731
                // then there's nothing left to prepare on the target side
1✔
2732
                return nil
1✔
2733
        }
1✔
2734
        // The VolumeStatus is used to retrive additional information for the volume handling.
2735
        // For example, for filesystem PVC, the information are used to create a right size image.
2736
        // In the case of migrated volumes, we need to replace the original volume information with the
2737
        // destination volume properties.
2738
        replaceMigratedVolumesStatus(vmi)
1✔
2739
        err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2740
        if err != nil {
1✔
2741
                return err
×
2742
        }
×
2743

2744
        // give containerDisks some time to become ready before throwing errors on retries
2745
        info := c.getLauncherClientInfo(vmi)
1✔
2746
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
1✔
2747
                if err != nil {
×
2748
                        return err
×
2749
                }
×
2750
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
2751
                return nil
×
2752
        }
2753

2754
        // Verify container disks checksum
2755
        err = container_disk.VerifyChecksums(c.containerDiskMounter, vmi)
1✔
2756
        switch {
1✔
2757
        case goerror.Is(err, container_disk.ErrChecksumMissing):
×
2758
                // wait for checksum to be computed by the source virt-handler
×
2759
                return err
×
2760
        case goerror.Is(err, container_disk.ErrChecksumMismatch):
×
2761
                log.Log.Object(vmi).Infof("Containerdisk checksum mismatch, terminating target pod: %s", err)
×
2762
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, "ContainerDiskFailedChecksum", "Aborting migration as the source and target containerdisks/kernelboot do not match")
×
2763
                return client.SignalTargetPodCleanup(vmi)
×
2764
        case err != nil:
×
2765
                return err
×
2766
        }
2767

2768
        // Mount container disks
2769
        disksInfo, err := c.containerDiskMounter.MountAndVerify(vmi)
1✔
2770
        if err != nil {
1✔
2771
                return err
×
2772
        }
×
2773

2774
        // Mount hotplug disks
2775
        if attachmentPodUID := vmi.Status.MigrationState.TargetAttachmentPodUID; attachmentPodUID != types.UID("") {
1✔
2776
                cgroupManager, err := getCgroupManager(vmi)
×
2777
                if err != nil {
×
2778
                        return err
×
2779
                }
×
2780
                if err := c.hotplugVolumeMounter.MountFromPod(vmi, attachmentPodUID, cgroupManager); err != nil {
×
2781
                        return fmt.Errorf("failed to mount hotplug volumes: %v", err)
×
2782
                }
×
2783
        }
2784

2785
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
2786
        if err != nil {
1✔
2787
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
2788
        }
×
2789

2790
        if err := c.netConf.Setup(vmi, netsetup.FilterNetsForMigrationTarget(vmi), isolationRes.Pid()); err != nil {
1✔
NEW
2791
                return fmt.Errorf("failed to configure vmi network for migration target: %w", err)
×
NEW
2792
        }
×
2793

2794
        virtLauncherRootMount, err := isolationRes.MountRoot()
1✔
2795
        if err != nil {
1✔
2796
                return err
×
2797
        }
×
2798

2799
        err = c.claimDeviceOwnership(virtLauncherRootMount, "kvm")
1✔
2800
        if err != nil {
1✔
2801
                return fmt.Errorf("failed to set up file ownership for /dev/kvm: %v", err)
×
2802
        }
×
2803
        if virtutil.IsAutoAttachVSOCK(vmi) {
1✔
2804
                if err := c.claimDeviceOwnership(virtLauncherRootMount, "vhost-vsock"); err != nil {
×
2805
                        return fmt.Errorf("failed to set up file ownership for /dev/vhost-vsock: %v", err)
×
2806
                }
×
2807
        }
2808

2809
        lessPVCSpaceToleration := c.clusterConfig.GetLessPVCSpaceToleration()
1✔
2810
        minimumPVCReserveBytes := c.clusterConfig.GetMinimumReservePVCBytes()
1✔
2811

1✔
2812
        // initialize disks images for empty PVC
1✔
2813
        hostDiskCreator := hostdisk.NewHostDiskCreator(c.recorder, lessPVCSpaceToleration, minimumPVCReserveBytes, virtLauncherRootMount)
1✔
2814
        err = hostDiskCreator.Create(vmi)
1✔
2815
        if err != nil {
1✔
2816
                return fmt.Errorf("preparing host-disks failed: %v", err)
×
2817
        }
×
2818

2819
        if virtutil.IsNonRootVMI(vmi) {
1✔
2820
                if err := c.nonRootSetup(origVMI); err != nil {
×
2821
                        return err
×
2822
                }
×
2823
        }
2824

2825
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, disksInfo, c.clusterConfig)
1✔
2826
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
2827

1✔
2828
        if err := client.SyncMigrationTarget(vmi, options); err != nil {
1✔
2829
                return fmt.Errorf("syncing migration target failed: %v", err)
×
2830
        }
×
2831
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.PreparingTarget.String(), VMIMigrationTargetPrepared)
1✔
2832

1✔
2833
        err = c.handleTargetMigrationProxy(vmi)
1✔
2834
        if err != nil {
1✔
2835
                return fmt.Errorf("failed to handle post sync migration proxy: %v", err)
×
2836
        }
×
2837
        return nil
1✔
2838
}
2839

2840
func (c *VirtualMachineController) affinePitThread(vmi *v1.VirtualMachineInstance) error {
×
2841
        res, err := c.podIsolationDetector.Detect(vmi)
×
2842
        if err != nil {
×
2843
                return err
×
2844
        }
×
2845
        var Mask unix.CPUSet
×
2846
        Mask.Zero()
×
2847
        qemuprocess, err := res.GetQEMUProcess()
×
2848
        if err != nil {
×
2849
                return err
×
2850
        }
×
2851
        qemupid := qemuprocess.Pid()
×
2852
        if qemupid == -1 {
×
2853
                return nil
×
2854
        }
×
2855

2856
        pitpid, err := res.KvmPitPid()
×
2857
        if err != nil {
×
2858
                return err
×
2859
        }
×
2860
        if pitpid == -1 {
×
2861
                return nil
×
2862
        }
×
2863
        if vmi.IsRealtimeEnabled() {
×
2864
                param := schedParam{priority: 2}
×
2865
                err = schedSetScheduler(pitpid, schedFIFO, param)
×
2866
                if err != nil {
×
2867
                        return fmt.Errorf("failed to set FIFO scheduling and priority 2 for thread %d: %w", pitpid, err)
×
2868
                }
×
2869
        }
2870
        vcpus, err := getVCPUThreadIDs(qemupid)
×
2871
        if err != nil {
×
2872
                return err
×
2873
        }
×
2874
        vpid, ok := vcpus["0"]
×
2875
        if ok == false {
×
2876
                return nil
×
2877
        }
×
2878
        vcpupid, err := strconv.Atoi(vpid)
×
2879
        if err != nil {
×
2880
                return err
×
2881
        }
×
2882
        err = unix.SchedGetaffinity(vcpupid, &Mask)
×
2883
        if err != nil {
×
2884
                return err
×
2885
        }
×
2886
        return unix.SchedSetaffinity(pitpid, &Mask)
×
2887
}
2888

2889
func (c *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
×
2890
        if err := cgroupManager.CreateChildCgroup("housekeeping", "cpuset"); err != nil {
×
2891
                log.Log.Reason(err).Error("CreateChildCgroup ")
×
2892
                return err
×
2893
        }
×
2894

2895
        key := controller.VirtualMachineInstanceKey(vmi)
×
2896
        domain, domainExists, _, err := c.getDomainFromCache(key)
×
2897
        if err != nil {
×
2898
                return err
×
2899
        }
×
2900
        // bail out if domain does not exist
2901
        if domainExists == false {
×
2902
                return nil
×
2903
        }
×
2904

2905
        if domain.Spec.CPUTune == nil || domain.Spec.CPUTune.EmulatorPin == nil {
×
2906
                return nil
×
2907
        }
×
2908

2909
        hkcpus, err := hardware.ParseCPUSetLine(domain.Spec.CPUTune.EmulatorPin.CPUSet, 100)
×
2910
        if err != nil {
×
2911
                return err
×
2912
        }
×
2913

2914
        log.Log.V(3).Object(vmi).Infof("housekeeping cpu: %v", hkcpus)
×
2915

×
2916
        err = cgroupManager.SetCpuSet("housekeeping", hkcpus)
×
2917
        if err != nil {
×
2918
                return err
×
2919
        }
×
2920

2921
        tids, err := cgroupManager.GetCgroupThreads()
×
2922
        if err != nil {
×
2923
                return err
×
2924
        }
×
2925
        hktids := make([]int, 0, 10)
×
2926

×
2927
        for _, tid := range tids {
×
2928
                proc, err := ps.FindProcess(tid)
×
2929
                if err != nil {
×
2930
                        log.Log.Object(vmi).Errorf("Failure to find process: %s", err.Error())
×
2931
                        return err
×
2932
                }
×
2933
                if proc == nil {
×
2934
                        return fmt.Errorf("failed to find process with tid: %d", tid)
×
2935
                }
×
2936
                comm := proc.Executable()
×
2937
                if strings.Contains(comm, "CPU ") && strings.Contains(comm, "KVM") {
×
2938
                        continue
×
2939
                }
2940
                hktids = append(hktids, tid)
×
2941
        }
2942

2943
        log.Log.V(3).Object(vmi).Infof("hk thread ids: %v", hktids)
×
2944
        for _, tid := range hktids {
×
2945
                err = cgroupManager.AttachTID("cpuset", "housekeeping", tid)
×
2946
                if err != nil {
×
2947
                        log.Log.Object(vmi).Errorf("Error attaching tid %d: %v", tid, err.Error())
×
2948
                        return err
×
2949
                }
×
2950
        }
2951

2952
        return nil
×
2953
}
2954

2955
func (c *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMachineInstance, domainExists bool) error {
1✔
2956
        client, err := c.getLauncherClient(origVMI)
1✔
2957
        if err != nil {
1✔
2958
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2959
        }
×
2960

2961
        vmi := origVMI.DeepCopy()
1✔
2962
        preallocatedVolumes := c.getPreallocatedVolumes(vmi)
1✔
2963

1✔
2964
        err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2965
        if err != nil {
1✔
2966
                return err
×
2967
        }
×
2968

2969
        cgroupManager, err := getCgroupManager(vmi)
1✔
2970
        if err != nil {
1✔
2971
                return err
×
2972
        }
×
2973

2974
        var errorTolerantFeaturesError []error
1✔
2975
        disksInfo := map[string]*containerdisk.DiskInfo{}
1✔
2976
        readyToProceed, err := c.handleVMIState(vmi, cgroupManager, &disksInfo, &errorTolerantFeaturesError)
1✔
2977
        if err != nil {
2✔
2978
                return err
1✔
2979
        }
1✔
2980

2981
        if !readyToProceed {
2✔
2982
                return nil
1✔
2983
        }
1✔
2984

2985
        // Synchronize the VirtualMachineInstance state
2986
        err = c.syncVirtualMachine(client, vmi, preallocatedVolumes, disksInfo)
1✔
2987
        if err != nil {
1✔
2988
                return err
×
2989
        }
×
2990

2991
        // Post-sync housekeeping
2992
        err = c.handleHousekeeping(vmi, cgroupManager, domainExists)
1✔
2993
        if err != nil {
1✔
2994
                return err
×
2995
        }
×
2996

2997
        return errors.NewAggregate(errorTolerantFeaturesError)
1✔
2998
}
2999

3000
// handleVMIState: Decides whether to call handleRunningVMI or handleStartingVMI based on the VMI's state.
3001
func (c *VirtualMachineController) handleVMIState(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, disksInfo *map[string]*containerdisk.DiskInfo, errorTolerantFeaturesError *[]error) (bool, error) {
1✔
3002
        if vmi.IsRunning() {
2✔
3003
                return true, c.handleRunningVMI(vmi, cgroupManager, errorTolerantFeaturesError)
1✔
3004
        } else if !vmi.IsFinal() {
3✔
3005
                return c.handleStartingVMI(vmi, cgroupManager, disksInfo)
1✔
3006
        }
1✔
3007
        return true, nil
×
3008
}
3009

3010
// handleRunningVMI contains the logic specifically for running VMs (hotplugging in running state, metrics, network updates)
3011
func (c *VirtualMachineController) handleRunningVMI(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) error {
1✔
3012
        if err := c.hotplugSriovInterfaces(vmi); err != nil {
1✔
3013
                log.Log.Object(vmi).Error(err.Error())
×
3014
        }
×
3015

3016
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
2✔
3017
                return err
1✔
3018
        }
1✔
3019

3020
        if err := c.getMemoryDump(vmi); err != nil {
1✔
3021
                return err
×
3022
        }
×
3023

3024
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
3025
        if err != nil {
1✔
3026
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
3027
        }
×
3028

3029
        if err := c.downwardMetricsManager.StartServer(vmi, isolationRes.Pid()); err != nil {
1✔
3030
                return err
×
3031
        }
×
3032

3033
        if err := c.netConf.Setup(vmi, netsetup.FilterNetsForLiveUpdate(vmi), isolationRes.Pid()); err != nil {
1✔
3034
                log.Log.Object(vmi).Error(err.Error())
×
3035
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "NicHotplug", err.Error())
×
3036
                *errorTolerantFeaturesError = append(*errorTolerantFeaturesError, err)
×
3037
        }
×
3038

3039
        return nil
1✔
3040
}
3041

3042
// handleStartingVMI: Contains the logic for starting VMs (container disks, initial network setup, device ownership).
3043
func (c *VirtualMachineController) handleStartingVMI(
3044
        vmi *v1.VirtualMachineInstance,
3045
        cgroupManager cgroup.Manager,
3046
        disksInfo *map[string]*containerdisk.DiskInfo,
3047
) (bool, error) {
1✔
3048
        // give containerDisks some time to become ready before throwing errors on retries
1✔
3049
        info := c.getLauncherClientInfo(vmi)
1✔
3050
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
2✔
3051
                if err != nil {
2✔
3052
                        return false, err
1✔
3053
                }
1✔
3054
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3055
                return false, nil
1✔
3056
        }
3057

3058
        var err error
1✔
3059
        *disksInfo, err = c.containerDiskMounter.MountAndVerify(vmi)
1✔
3060
        if err != nil {
2✔
3061
                return false, err
1✔
3062
        }
1✔
3063

3064
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
1✔
3065
                return false, err
×
3066
        }
×
3067

3068
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
3069
        if err != nil {
1✔
3070
                return false, fmt.Errorf(failedDetectIsolationFmt, err)
×
3071
        }
×
3072

3073
        if err := c.netConf.Setup(vmi, netsetup.FilterNetsForVMStartup(vmi), isolationRes.Pid()); err != nil {
2✔
3074
                return false, fmt.Errorf("failed to configure vmi network: %w", err)
1✔
3075
        }
1✔
3076

3077
        if err := c.setupDevicesOwnerships(vmi, isolationRes); err != nil {
1✔
3078
                return false, err
×
3079
        }
×
3080

3081
        if err := c.adjustResources(vmi); err != nil {
1✔
3082
                return false, err
×
3083
        }
×
3084

3085
        if err := c.waitForSEVAttestation(vmi); err != nil {
1✔
3086
                return false, err
×
3087
        }
×
3088

3089
        return true, nil
1✔
3090
}
3091

3092
func (c *VirtualMachineController) adjustResources(vmi *v1.VirtualMachineInstance) error {
1✔
3093
        err := c.podIsolationDetector.AdjustResources(vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio)
1✔
3094
        if err != nil {
1✔
3095
                return fmt.Errorf("failed to adjust resources: %v", err)
×
3096
        }
×
3097
        return nil
1✔
3098
}
3099

3100
func (c *VirtualMachineController) waitForSEVAttestation(vmi *v1.VirtualMachineInstance) error {
1✔
3101
        if util.IsSEVAttestationRequested(vmi) {
1✔
3102
                sev := vmi.Spec.Domain.LaunchSecurity.SEV
×
3103
                if sev.Session == "" || sev.DHCert == "" {
×
3104
                        // Wait for the session parameters to be provided
×
3105
                        return nil
×
3106
                }
×
3107
        }
3108
        return nil
1✔
3109
}
3110

3111
func (c *VirtualMachineController) setupDevicesOwnerships(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) error {
1✔
3112
        virtLauncherRootMount, err := isolationRes.MountRoot()
1✔
3113
        if err != nil {
1✔
3114
                return err
×
3115
        }
×
3116

3117
        err = c.claimDeviceOwnership(virtLauncherRootMount, "kvm")
1✔
3118
        if err != nil {
1✔
3119
                return fmt.Errorf("failed to set up file ownership for /dev/kvm: %v", err)
×
3120
        }
×
3121

3122
        if virtutil.IsAutoAttachVSOCK(vmi) {
1✔
3123
                if err := c.claimDeviceOwnership(virtLauncherRootMount, "vhost-vsock"); err != nil {
×
3124
                        return fmt.Errorf("failed to set up file ownership for /dev/vhost-vsock: %v", err)
×
3125
                }
×
3126
        }
3127

3128
        if err := c.configureHostDisks(vmi, isolationRes, virtLauncherRootMount); err != nil {
1✔
3129
                return err
×
3130
        }
×
3131

3132
        if err := c.configureSEVDeviceOwnership(vmi, isolationRes, virtLauncherRootMount); err != nil {
1✔
3133
                return err
×
3134
        }
×
3135

3136
        if virtutil.IsNonRootVMI(vmi) {
1✔
3137
                if err := c.nonRootSetup(vmi); err != nil {
×
3138
                        return err
×
3139
                }
×
3140
        }
3141

3142
        if err := c.configureVirtioFS(vmi, isolationRes); err != nil {
1✔
3143
                return err
×
3144
        }
×
3145

3146
        return nil
1✔
3147
}
3148

3149
func (c *VirtualMachineController) configureHostDisks(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult, virtLauncherRootMount *safepath.Path) error {
1✔
3150
        lessPVCSpaceToleration := c.clusterConfig.GetLessPVCSpaceToleration()
1✔
3151
        minimumPVCReserveBytes := c.clusterConfig.GetMinimumReservePVCBytes()
1✔
3152

1✔
3153
        hostDiskCreator := hostdisk.NewHostDiskCreator(c.recorder, lessPVCSpaceToleration, minimumPVCReserveBytes, virtLauncherRootMount)
1✔
3154
        if err := hostDiskCreator.Create(vmi); err != nil {
1✔
3155
                return fmt.Errorf("preparing host-disks failed: %v", err)
×
3156
        }
×
3157
        return nil
1✔
3158
}
3159

3160
func (c *VirtualMachineController) configureSEVDeviceOwnership(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult, virtLauncherRootMount *safepath.Path) error {
1✔
3161
        if virtutil.IsSEVVMI(vmi) {
1✔
3162
                sevDevice, err := safepath.JoinNoFollow(virtLauncherRootMount, filepath.Join("dev", "sev"))
×
3163
                if err != nil {
×
3164
                        return err
×
3165
                }
×
3166
                if err := diskutils.DefaultOwnershipManager.SetFileOwnership(sevDevice); err != nil {
×
3167
                        return fmt.Errorf("failed to set SEV device owner: %v", err)
×
3168
                }
×
3169
        }
3170
        return nil
1✔
3171
}
3172

3173
func (c *VirtualMachineController) configureVirtioFS(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) error {
1✔
3174
        for _, fs := range vmi.Spec.Domain.Devices.Filesystems {
1✔
3175
                socketPath, err := isolation.SafeJoin(isolationRes, virtiofs.VirtioFSSocketPath(fs.Name))
×
3176
                if err != nil {
×
3177
                        return err
×
3178
                }
×
3179
                if err := diskutils.DefaultOwnershipManager.SetFileOwnership(socketPath); err != nil {
×
3180
                        return err
×
3181
                }
×
3182
        }
3183
        return nil
1✔
3184
}
3185

3186
func (c *VirtualMachineController) syncVirtualMachine(client cmdclient.LauncherClient, vmi *v1.VirtualMachineInstance, preallocatedVolumes []string, disksInfo map[string]*containerdisk.DiskInfo) error {
1✔
3187
        smbios := c.clusterConfig.GetSMBIOS()
1✔
3188
        period := c.clusterConfig.GetMemBalloonStatsPeriod()
1✔
3189

1✔
3190
        options := virtualMachineOptions(smbios, period, preallocatedVolumes, c.capabilities, disksInfo, c.clusterConfig)
1✔
3191
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
3192

1✔
3193
        err := client.SyncVirtualMachine(vmi, options)
1✔
3194
        if err != nil {
1✔
3195
                if strings.Contains(err.Error(), "EFI OVMF rom missing") {
×
3196
                        return &virtLauncherCriticalSecurebootError{fmt.Sprintf("mismatch of Secure Boot setting and bootloaders: %v", err)}
×
3197
                }
×
3198
        }
3199

3200
        return err
1✔
3201
}
3202

3203
func (c *VirtualMachineController) handleHousekeeping(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, domainExists bool) error {
1✔
3204

1✔
3205
        if vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread {
1✔
3206
                err := c.configureHousekeepingCgroup(vmi, cgroupManager)
×
3207
                if err != nil {
×
3208
                        return err
×
3209
                }
×
3210
        }
3211

3212
        // Configure vcpu scheduler for realtime workloads and affine PIT thread for dedicated CPU
3213
        if vmi.IsRealtimeEnabled() && !vmi.IsRunning() && !vmi.IsFinal() {
1✔
3214
                log.Log.Object(vmi).Info("Configuring vcpus for real time workloads")
×
3215
                if err := c.configureVCPUScheduler(vmi); err != nil {
×
3216
                        return err
×
3217
                }
×
3218
        }
3219
        if vmi.IsCPUDedicated() && !vmi.IsRunning() && !vmi.IsFinal() {
1✔
3220
                log.Log.V(3).Object(vmi).Info("Affining PIT thread")
×
3221
                if err := c.affinePitThread(vmi); err != nil {
×
3222
                        return err
×
3223
                }
×
3224
        }
3225
        if !domainExists {
2✔
3226
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Created.String(), VMIDefined)
1✔
3227
        }
1✔
3228

3229
        if vmi.IsRunning() {
2✔
3230
                // Umount any disks no longer mounted
1✔
3231
                if err := c.hotplugVolumeMounter.Unmount(vmi, cgroupManager); err != nil {
1✔
3232
                        return err
×
3233
                }
×
3234
        }
3235
        return nil
1✔
3236
}
3237

3238
func (c *VirtualMachineController) getPreallocatedVolumes(vmi *v1.VirtualMachineInstance) []string {
1✔
3239
        preallocatedVolumes := []string{}
1✔
3240
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
3241
                if volumeStatus.PersistentVolumeClaimInfo != nil && volumeStatus.PersistentVolumeClaimInfo.Preallocated {
1✔
3242
                        preallocatedVolumes = append(preallocatedVolumes, volumeStatus.Name)
×
3243
                }
×
3244
        }
3245
        return preallocatedVolumes
1✔
3246
}
3247

3248
func (c *VirtualMachineController) hotplugSriovInterfaces(vmi *v1.VirtualMachineInstance) error {
1✔
3249
        sriovSpecInterfaces := netvmispec.FilterSRIOVInterfaces(vmi.Spec.Domain.Devices.Interfaces)
1✔
3250

1✔
3251
        sriovSpecIfacesNames := netvmispec.IndexInterfaceSpecByName(sriovSpecInterfaces)
1✔
3252
        attachedSriovStatusIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
1✔
3253
                _, exist := sriovSpecIfacesNames[iface.Name]
×
3254
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceDomain) &&
×
3255
                        netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
3256
        })
×
3257

3258
        desiredSriovMultusPluggedIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
1✔
3259
                _, exist := sriovSpecIfacesNames[iface.Name]
×
3260
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
3261
        })
×
3262

3263
        if len(desiredSriovMultusPluggedIfaces) == len(attachedSriovStatusIfaces) {
2✔
3264
                c.sriovHotplugExecutorPool.Delete(vmi.UID)
1✔
3265
                return nil
1✔
3266
        }
1✔
3267

3268
        rateLimitedExecutor := c.sriovHotplugExecutorPool.LoadOrStore(vmi.UID)
×
3269
        return rateLimitedExecutor.Exec(func() error {
×
3270
                return c.hotplugSriovInterfacesCommand(vmi)
×
3271
        })
×
3272
}
3273

3274
func (c *VirtualMachineController) hotplugSriovInterfacesCommand(vmi *v1.VirtualMachineInstance) error {
×
3275
        const errMsgPrefix = "failed to hot-plug SR-IOV interfaces"
×
3276

×
3277
        client, err := c.getVerifiedLauncherClient(vmi)
×
3278
        if err != nil {
×
3279
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3280
        }
×
3281

3282
        if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
×
3283
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), err.Error())
×
3284
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3285
        }
×
3286

3287
        log.Log.V(3).Object(vmi).Info("sending hot-plug host-devices command")
×
3288
        if err := client.HotplugHostDevices(vmi); err != nil {
×
3289
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3290
        }
×
3291

3292
        return nil
×
3293
}
3294

3295
func memoryDumpPath(volumeStatus v1.VolumeStatus) string {
×
3296
        target := hotplugdisk.GetVolumeMountDir(volumeStatus.Name)
×
3297
        dumpPath := filepath.Join(target, volumeStatus.MemoryDumpVolume.TargetFileName)
×
3298
        return dumpPath
×
3299
}
×
3300

3301
func (c *VirtualMachineController) getMemoryDump(vmi *v1.VirtualMachineInstance) error {
1✔
3302
        const errMsgPrefix = "failed to getting memory dump"
1✔
3303

1✔
3304
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
3305
                if volumeStatus.MemoryDumpVolume == nil || volumeStatus.Phase != v1.MemoryDumpVolumeInProgress {
2✔
3306
                        continue
1✔
3307
                }
3308
                client, err := c.getVerifiedLauncherClient(vmi)
×
3309
                if err != nil {
×
3310
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3311
                }
×
3312

3313
                log.Log.V(3).Object(vmi).Info("sending memory dump command")
×
3314
                err = client.VirtualMachineMemoryDump(vmi, memoryDumpPath(volumeStatus))
×
3315
                if err != nil {
×
3316
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3317
                }
×
3318
        }
3319

3320
        return nil
1✔
3321
}
3322

3323
func (c *VirtualMachineController) processVmUpdate(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
3324

1✔
3325
        isUnresponsive, isInitialized, err := c.isLauncherClientUnresponsive(vmi)
1✔
3326
        if err != nil {
1✔
3327
                return err
×
3328
        }
×
3329
        if !isInitialized {
2✔
3330
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3331
                return nil
1✔
3332
        } else if isUnresponsive {
2✔
3333
                return goerror.New(fmt.Sprintf("Can not update a VirtualMachineInstance with unresponsive command server."))
×
3334
        }
×
3335

3336
        c.handlePostMigrationProxyCleanup(vmi)
1✔
3337

1✔
3338
        if c.isPreMigrationTarget(vmi) {
2✔
3339
                return c.vmUpdateHelperMigrationTarget(vmi)
1✔
3340
        } else if c.isMigrationSource(vmi) {
3✔
3341
                return c.vmUpdateHelperMigrationSource(vmi, domain)
1✔
3342
        } else {
2✔
3343
                return c.vmUpdateHelperDefault(vmi, domain != nil)
1✔
3344
        }
1✔
3345
}
3346

3347
func (c *VirtualMachineController) setVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) error {
1✔
3348
        phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
1✔
3349
        if err != nil {
1✔
3350
                return err
×
3351
        }
×
3352
        vmi.Status.Phase = phase
1✔
3353
        return nil
1✔
3354
}
3355
func (c *VirtualMachineController) calculateVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) (v1.VirtualMachineInstancePhase, error) {
1✔
3356

1✔
3357
        if domain == nil {
2✔
3358
                switch {
1✔
3359
                case vmi.IsScheduled():
1✔
3360
                        isUnresponsive, isInitialized, err := c.isLauncherClientUnresponsive(vmi)
1✔
3361

1✔
3362
                        if err != nil {
1✔
3363
                                return vmi.Status.Phase, err
×
3364
                        }
×
3365
                        if !isInitialized {
2✔
3366
                                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3367
                                return vmi.Status.Phase, err
1✔
3368
                        } else if isUnresponsive {
3✔
3369
                                // virt-launcher is gone and VirtualMachineInstance never transitioned
1✔
3370
                                // from scheduled to Running.
1✔
3371
                                return v1.Failed, nil
1✔
3372
                        }
1✔
3373
                        return v1.Scheduled, nil
1✔
3374
                case !vmi.IsRunning() && !vmi.IsFinal():
×
3375
                        return v1.Scheduled, nil
×
3376
                case !vmi.IsFinal():
1✔
3377
                        // That is unexpected. We should not be able to delete a VirtualMachineInstance before we stop it.
1✔
3378
                        // However, if someone directly interacts with libvirt it is possible
1✔
3379
                        return v1.Failed, nil
1✔
3380
                }
3381
        } else {
1✔
3382

1✔
3383
                switch domain.Status.Status {
1✔
3384
                case api.Shutoff, api.Crashed:
1✔
3385
                        switch domain.Status.Reason {
1✔
3386
                        case api.ReasonCrashed, api.ReasonPanicked:
×
3387
                                return v1.Failed, nil
×
3388
                        case api.ReasonDestroyed:
×
3389
                                // When ACPI is available, the domain was tried to be shutdown,
×
3390
                                // and destroyed means that the domain was destroyed after the graceperiod expired.
×
3391
                                // Without ACPI a destroyed domain is ok.
×
3392
                                if isACPIEnabled(vmi, domain) {
×
3393
                                        return v1.Failed, nil
×
3394
                                }
×
3395
                                return v1.Succeeded, nil
×
3396
                        case api.ReasonShutdown, api.ReasonSaved, api.ReasonFromSnapshot:
×
3397
                                return v1.Succeeded, nil
×
3398
                        case api.ReasonMigrated:
1✔
3399
                                // if the domain migrated, we no longer know the phase.
1✔
3400
                                return vmi.Status.Phase, nil
1✔
3401
                        }
3402
                case api.Running, api.Paused, api.Blocked, api.PMSuspended:
1✔
3403
                        return v1.Running, nil
1✔
3404
                }
3405
        }
3406
        return vmi.Status.Phase, nil
×
3407
}
3408

3409
func (c *VirtualMachineController) addFunc(obj interface{}) {
1✔
3410
        key, err := controller.KeyFunc(obj)
1✔
3411
        if err == nil {
2✔
3412
                c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
3413
                c.queue.Add(key)
1✔
3414
        }
1✔
3415
}
3416
func (c *VirtualMachineController) deleteFunc(obj interface{}) {
×
3417
        key, err := controller.KeyFunc(obj)
×
3418
        if err == nil {
×
3419
                c.vmiExpectations.LowerExpectations(key, 1, 0)
×
3420
                c.queue.Add(key)
×
3421
        }
×
3422
}
3423
func (c *VirtualMachineController) updateFunc(_, new interface{}) {
1✔
3424
        key, err := controller.KeyFunc(new)
1✔
3425
        if err == nil {
2✔
3426
                c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
3427
                c.queue.Add(key)
1✔
3428
        }
1✔
3429
}
3430

3431
func (c *VirtualMachineController) addDomainFunc(obj interface{}) {
1✔
3432
        domain := obj.(*api.Domain)
1✔
3433
        log.Log.Object(domain).Infof("Domain is in state %s reason %s", domain.Status.Status, domain.Status.Reason)
1✔
3434
        key, err := controller.KeyFunc(obj)
1✔
3435
        if err == nil {
2✔
3436
                c.queue.Add(key)
1✔
3437
        }
1✔
3438
}
3439
func (c *VirtualMachineController) deleteDomainFunc(obj interface{}) {
×
3440
        domain, ok := obj.(*api.Domain)
×
3441
        if !ok {
×
3442
                tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
×
3443
                if !ok {
×
3444
                        log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
×
3445
                        return
×
3446
                }
×
3447
                domain, ok = tombstone.Obj.(*api.Domain)
×
3448
                if !ok {
×
3449
                        log.Log.Reason(fmt.Errorf("tombstone contained object that is not a domain %#v", obj)).Error("Failed to process delete notification")
×
3450
                        return
×
3451
                }
×
3452
        }
3453
        log.Log.Object(domain).Info("Domain deleted")
×
3454
        key, err := controller.KeyFunc(obj)
×
3455
        if err == nil {
×
3456
                c.queue.Add(key)
×
3457
        }
×
3458
}
3459
func (c *VirtualMachineController) updateDomainFunc(old, new interface{}) {
1✔
3460
        newDomain := new.(*api.Domain)
1✔
3461
        oldDomain := old.(*api.Domain)
1✔
3462
        if oldDomain.Status.Status != newDomain.Status.Status || oldDomain.Status.Reason != newDomain.Status.Reason {
1✔
3463
                log.Log.Object(newDomain).Infof("Domain is in state %s reason %s", newDomain.Status.Status, newDomain.Status.Reason)
×
3464
        }
×
3465

3466
        if newDomain.ObjectMeta.DeletionTimestamp != nil {
1✔
3467
                log.Log.Object(newDomain).Info("Domain is marked for deletion")
×
3468
        }
×
3469

3470
        key, err := controller.KeyFunc(new)
1✔
3471
        if err == nil {
2✔
3472
                c.queue.Add(key)
1✔
3473
        }
1✔
3474
}
3475

3476
func (c *VirtualMachineController) finalizeMigration(vmi *v1.VirtualMachineInstance) error {
1✔
3477
        const errorMessage = "failed to finalize migration"
1✔
3478

1✔
3479
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
3480
        if err != nil {
1✔
3481
                return fmt.Errorf("%s: %v", errorMessage, err)
×
3482
        }
×
3483

3484
        if err := c.hotplugCPU(vmi, client); err != nil {
2✔
3485
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
1✔
3486
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "failed to change vCPUs")
1✔
3487
        }
1✔
3488

3489
        if err := c.hotplugMemory(vmi, client); err != nil {
1✔
3490
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
×
3491
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "failed to update guest memory")
×
3492
        }
×
3493
        removeMigratedVolumes(vmi)
1✔
3494

1✔
3495
        options := &cmdv1.VirtualMachineOptions{}
1✔
3496
        options.InterfaceMigration = domainspec.BindingMigrationByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
3497
        if err := client.FinalizeVirtualMachineMigration(vmi, options); err != nil {
1✔
3498
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
×
3499
                return fmt.Errorf("%s: %v", errorMessage, err)
×
3500
        }
×
3501

3502
        return nil
1✔
3503
}
3504

3505
func vmiHasTerminationGracePeriod(vmi *v1.VirtualMachineInstance) bool {
×
3506
        // if not set we use the default graceperiod
×
3507
        return vmi.Spec.TerminationGracePeriodSeconds == nil ||
×
3508
                (vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds != 0)
×
3509
}
×
3510

3511
func domainHasGracePeriod(domain *api.Domain) bool {
1✔
3512
        return domain != nil &&
1✔
3513
                domain.Spec.Metadata.KubeVirt.GracePeriod != nil &&
1✔
3514
                domain.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds != 0
1✔
3515
}
1✔
3516

3517
func isACPIEnabled(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
×
3518
        return (vmiHasTerminationGracePeriod(vmi) || (vmi.Spec.TerminationGracePeriodSeconds == nil && domainHasGracePeriod(domain))) &&
×
3519
                domain != nil &&
×
3520
                domain.Spec.Features != nil &&
×
3521
                domain.Spec.Features.ACPI != nil
×
3522
}
×
3523

3524
func (c *VirtualMachineController) isHostModelMigratable(vmi *v1.VirtualMachineInstance) error {
1✔
3525
        if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == v1.CPUModeHostModel {
2✔
3526
                if c.hostCpuModel == "" {
2✔
3527
                        err := fmt.Errorf("the node \"%s\" does not allow migration with host-model", vmi.Status.NodeName)
1✔
3528
                        log.Log.Object(vmi).Errorf(err.Error())
1✔
3529
                        return err
1✔
3530
                }
1✔
3531
        }
3532
        return nil
1✔
3533
}
3534

3535
func (c *VirtualMachineController) claimDeviceOwnership(virtLauncherRootMount *safepath.Path, deviceName string) error {
1✔
3536
        softwareEmulation := c.clusterConfig.AllowEmulation()
1✔
3537
        devicePath, err := safepath.JoinNoFollow(virtLauncherRootMount, filepath.Join("dev", deviceName))
1✔
3538
        if err != nil {
1✔
3539
                if softwareEmulation {
×
3540
                        return nil
×
3541
                }
×
3542
                return err
×
3543
        }
3544

3545
        return diskutils.DefaultOwnershipManager.SetFileOwnership(devicePath)
1✔
3546
}
3547

3548
func (c *VirtualMachineController) reportDedicatedCPUSetForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
×
3549
        cgroupManager, err := getCgroupManager(vmi)
×
3550
        if err != nil {
×
3551
                return err
×
3552
        }
×
3553

3554
        cpusetStr, err := cgroupManager.GetCpuSet()
×
3555
        if err != nil {
×
3556
                return err
×
3557
        }
×
3558

3559
        cpuSet, err := hardware.ParseCPUSetLine(cpusetStr, 50000)
×
3560
        if err != nil {
×
3561
                return fmt.Errorf("failed to parse target VMI cpuset: %v", err)
×
3562
        }
×
3563

3564
        vmi.Status.MigrationState.TargetCPUSet = cpuSet
×
3565

×
3566
        return nil
×
3567
}
3568

3569
func (c *VirtualMachineController) reportTargetTopologyForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
×
3570
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, map[string]*containerdisk.DiskInfo{}, c.clusterConfig)
×
3571
        topology, err := json.Marshal(options.Topology)
×
3572
        if err != nil {
×
3573
                return err
×
3574
        }
×
3575
        vmi.Status.MigrationState.TargetNodeTopology = string(topology)
×
3576
        return nil
×
3577
}
3578

3579
func (c *VirtualMachineController) handleMigrationAbort(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3580
        if vmi.Status.MigrationState.AbortStatus == v1.MigrationAbortInProgress {
1✔
3581
                return nil
×
3582
        }
×
3583

3584
        err := client.CancelVirtualMachineMigration(vmi)
1✔
3585
        if err != nil && err.Error() == migrations.CancelMigrationFailedVmiNotMigratingErr {
1✔
3586
                // If migration did not even start there is no need to cancel it
×
3587
                log.Log.Object(vmi).Infof("skipping migration cancellation since vmi is not migrating")
×
3588
                return err
×
3589
        }
×
3590

3591
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrating.String(), VMIAbortingMigration)
1✔
3592
        return nil
1✔
3593
}
3594

3595
func isIOError(shouldUpdate, domainExists bool, domain *api.Domain) bool {
1✔
3596
        return shouldUpdate && domainExists && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedIOError
1✔
3597
}
1✔
3598

3599
func (c *VirtualMachineController) updateMachineType(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
3600
        if domain == nil || vmi == nil {
2✔
3601
                return
1✔
3602
        }
1✔
3603
        if domain.Spec.OS.Type.Machine != "" {
2✔
3604
                vmi.Status.Machine = &v1.Machine{Type: domain.Spec.OS.Type.Machine}
1✔
3605
        }
1✔
3606
}
3607

3608
func (c *VirtualMachineController) hotplugCPU(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3609
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3610

1✔
3611
        removeVMIVCPUChangeConditionAndLabel := func() {
2✔
3612
                delete(vmi.Labels, v1.VirtualMachinePodCPULimitsLabel)
1✔
3613
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceVCPUChange)
1✔
3614
        }
1✔
3615
        defer removeVMIVCPUChangeConditionAndLabel()
1✔
3616

1✔
3617
        if !vmiConditions.HasCondition(vmi, v1.VirtualMachineInstanceVCPUChange) {
2✔
3618
                return nil
1✔
3619
        }
1✔
3620

3621
        if vmi.IsCPUDedicated() {
1✔
3622
                cpuLimitStr, ok := vmi.Labels[v1.VirtualMachinePodCPULimitsLabel]
×
3623
                if !ok || len(cpuLimitStr) == 0 {
×
3624
                        return fmt.Errorf("cannot read CPU limit from VMI annotation")
×
3625
                }
×
3626

3627
                cpuLimit, err := strconv.Atoi(cpuLimitStr)
×
3628
                if err != nil {
×
3629
                        return fmt.Errorf("cannot parse CPU limit from VMI annotation: %v", err)
×
3630
                }
×
3631

3632
                vcpus := hardware.GetNumberOfVCPUs(vmi.Spec.Domain.CPU)
×
3633
                if vcpus > int64(cpuLimit) {
×
3634
                        return fmt.Errorf("number of requested VCPUS (%d) exceeds the limit (%d)", vcpus, cpuLimit)
×
3635
                }
×
3636
        }
3637

3638
        options := virtualMachineOptions(
1✔
3639
                nil,
1✔
3640
                0,
1✔
3641
                nil,
1✔
3642
                c.capabilities,
1✔
3643
                nil,
1✔
3644
                c.clusterConfig)
1✔
3645

1✔
3646
        if err := client.SyncVirtualMachineCPUs(vmi, options); err != nil {
2✔
3647
                return err
1✔
3648
        }
1✔
3649

3650
        if vmi.Status.CurrentCPUTopology == nil {
2✔
3651
                vmi.Status.CurrentCPUTopology = &v1.CPUTopology{}
1✔
3652
        }
1✔
3653

3654
        vmi.Status.CurrentCPUTopology.Sockets = vmi.Spec.Domain.CPU.Sockets
1✔
3655
        vmi.Status.CurrentCPUTopology.Cores = vmi.Spec.Domain.CPU.Cores
1✔
3656
        vmi.Status.CurrentCPUTopology.Threads = vmi.Spec.Domain.CPU.Threads
1✔
3657

1✔
3658
        return nil
1✔
3659
}
3660

3661
func (c *VirtualMachineController) hotplugMemory(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3662
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3663

1✔
3664
        removeVMIMemoryChangeLabel := func() {
2✔
3665
                delete(vmi.Labels, v1.VirtualMachinePodMemoryRequestsLabel)
1✔
3666
                delete(vmi.Labels, v1.MemoryHotplugOverheadRatioLabel)
1✔
3667
        }
1✔
3668
        defer removeVMIMemoryChangeLabel()
1✔
3669

1✔
3670
        if !vmiConditions.HasCondition(vmi, v1.VirtualMachineInstanceMemoryChange) {
2✔
3671
                return nil
1✔
3672
        }
1✔
3673

3674
        podMemReqStr := vmi.Labels[v1.VirtualMachinePodMemoryRequestsLabel]
1✔
3675
        podMemReq, err := resource.ParseQuantity(podMemReqStr)
1✔
3676
        if err != nil {
1✔
3677
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
×
3678
                return fmt.Errorf("cannot parse Memory requests from VMI label: %v", err)
×
3679
        }
×
3680

3681
        overheadRatio := vmi.Labels[v1.MemoryHotplugOverheadRatioLabel]
1✔
3682
        requiredMemory := services.GetMemoryOverhead(vmi, runtime.GOARCH, &overheadRatio)
1✔
3683
        requiredMemory.Add(
1✔
3684
                c.netBindingPluginMemoryCalculator.Calculate(vmi, c.clusterConfig.GetNetworkBindings()),
1✔
3685
        )
1✔
3686

1✔
3687
        requiredMemory.Add(*vmi.Spec.Domain.Resources.Requests.Memory())
1✔
3688

1✔
3689
        if podMemReq.Cmp(requiredMemory) < 0 {
2✔
3690
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
1✔
3691
                return fmt.Errorf("amount of requested guest memory (%s) exceeds the launcher memory request (%s)", vmi.Spec.Domain.Memory.Guest.String(), podMemReqStr)
1✔
3692
        }
1✔
3693

3694
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, nil, c.clusterConfig)
1✔
3695

1✔
3696
        if err := client.SyncVirtualMachineMemory(vmi, options); err != nil {
2✔
3697
                // mark hotplug as failed
1✔
3698
                vmiConditions.UpdateCondition(vmi, &v1.VirtualMachineInstanceCondition{
1✔
3699
                        Type:    v1.VirtualMachineInstanceMemoryChange,
1✔
3700
                        Status:  k8sv1.ConditionFalse,
1✔
3701
                        Reason:  memoryHotplugFailedReason,
1✔
3702
                        Message: "memory hotplug failed, the VM configuration is not supported",
1✔
3703
                })
1✔
3704
                return err
1✔
3705
        }
1✔
3706

3707
        vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
1✔
3708
        vmi.Status.Memory.GuestRequested = vmi.Spec.Domain.Memory.Guest
1✔
3709
        return nil
1✔
3710
}
3711

3712
func removeMigratedVolumes(vmi *v1.VirtualMachineInstance) {
1✔
3713
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3714
        vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceVolumesChange)
1✔
3715
        vmi.Status.MigratedVolumes = nil
1✔
3716
}
1✔
3717

3718
func parseLibvirtQuantity(value int64, unit string) *resource.Quantity {
1✔
3719
        switch unit {
1✔
3720
        case "b", "bytes":
1✔
3721
                return resource.NewQuantity(value, resource.BinarySI)
1✔
3722
        case "KB":
1✔
3723
                return resource.NewQuantity(value*1000, resource.DecimalSI)
1✔
3724
        case "MB":
1✔
3725
                return resource.NewQuantity(value*1000*1000, resource.DecimalSI)
1✔
3726
        case "GB":
1✔
3727
                return resource.NewQuantity(value*1000*1000*1000, resource.DecimalSI)
1✔
3728
        case "TB":
1✔
3729
                return resource.NewQuantity(value*1000*1000*1000*1000, resource.DecimalSI)
1✔
3730
        case "k", "KiB":
1✔
3731
                return resource.NewQuantity(value*1024, resource.BinarySI)
1✔
3732
        case "M", "MiB":
1✔
3733
                return resource.NewQuantity(value*1024*1024, resource.BinarySI)
1✔
3734
        case "G", "GiB":
1✔
3735
                return resource.NewQuantity(value*1024*1024*1024, resource.BinarySI)
1✔
3736
        case "T", "TiB":
1✔
3737
                return resource.NewQuantity(value*1024*1024*1024*1024, resource.BinarySI)
1✔
3738
        }
3739
        return nil
×
3740
}
3741

3742
func (c *VirtualMachineController) updateMemoryInfo(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
3743
        if domain == nil || vmi == nil || domain.Spec.CurrentMemory == nil {
2✔
3744
                return nil
1✔
3745
        }
1✔
3746
        if vmi.Status.Memory == nil {
1✔
3747
                vmi.Status.Memory = &v1.MemoryStatus{}
×
3748
        }
×
3749
        currentGuest := parseLibvirtQuantity(int64(domain.Spec.CurrentMemory.Value), domain.Spec.CurrentMemory.Unit)
1✔
3750
        vmi.Status.Memory.GuestCurrent = currentGuest
1✔
3751
        return nil
1✔
3752
}
3753

3754
func configureParallelMigrationThreads(options *cmdclient.MigrationOptions, vm *v1.VirtualMachineInstance) {
1✔
3755
        // When the CPU is limited, there's a risk of the migration threads choking the CPU resources on the compute container.
1✔
3756
        // For this reason, we will avoid configuring migration threads in such scenarios.
1✔
3757
        if cpuLimit, cpuLimitExists := vm.Spec.Domain.Resources.Limits[k8sv1.ResourceCPU]; cpuLimitExists && !cpuLimit.IsZero() {
2✔
3758
                return
1✔
3759
        }
1✔
3760

3761
        options.ParallelMigrationThreads = pointer.P(parallelMultifdMigrationThreads)
1✔
3762
}
3763

3764
func isReadOnlyDisk(disk *v1.Disk) bool {
1✔
3765
        isReadOnlyCDRom := disk.CDRom != nil && (disk.CDRom.ReadOnly == nil || *disk.CDRom.ReadOnly)
1✔
3766

1✔
3767
        return isReadOnlyCDRom
1✔
3768
}
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc