• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / kubevirt / b4b49d5a-93d3-4792-b0a5-720fc2abbc8c

18 Feb 2025 01:55AM UTC coverage: 71.73% (+0.01%) from 71.718%
b4b49d5a-93d3-4792-b0a5-720fc2abbc8c

push

prow

web-flow
Merge pull request #13690 from dasionov/add_supported_machine_types_to_node_labeller

bug-fix: add machine type to `NodeSelector` to prevent breaking changes on unsupported nodes

41 of 43 new or added lines in 4 files covered. (95.35%)

3 existing lines in 2 files now uncovered.

62300 of 86854 relevant lines covered (71.73%)

0.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.21
/pkg/virt-handler/vm.go
1
/*
2
 * This file is part of the KubeVirt project
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 *
16
 * Copyright 2017 Red Hat, Inc.
17
 *
18
 */
19

20
package virthandler
21

22
import (
23
        "bytes"
24
        "context"
25
        "encoding/json"
26
        goerror "errors"
27
        "fmt"
28
        "io"
29
        "net"
30
        "os"
31
        "path/filepath"
32
        "regexp"
33
        "runtime"
34
        "sort"
35
        "strconv"
36
        "strings"
37
        "time"
38

39
        "github.com/mitchellh/go-ps"
40
        "github.com/opencontainers/runc/libcontainer/cgroups"
41
        "golang.org/x/sys/unix"
42
        "libvirt.org/go/libvirtxml"
43

44
        k8sv1 "k8s.io/api/core/v1"
45
        "k8s.io/apimachinery/pkg/api/equality"
46
        "k8s.io/apimachinery/pkg/api/resource"
47
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
48
        "k8s.io/apimachinery/pkg/types"
49
        "k8s.io/apimachinery/pkg/util/errors"
50
        "k8s.io/apimachinery/pkg/util/wait"
51
        "k8s.io/client-go/tools/cache"
52
        "k8s.io/client-go/tools/record"
53
        "k8s.io/client-go/util/workqueue"
54

55
        v1 "kubevirt.io/api/core/v1"
56
        "kubevirt.io/client-go/kubecli"
57
        "kubevirt.io/client-go/log"
58

59
        "kubevirt.io/kubevirt/pkg/config"
60
        containerdisk "kubevirt.io/kubevirt/pkg/container-disk"
61
        "kubevirt.io/kubevirt/pkg/controller"
62
        diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
63
        "kubevirt.io/kubevirt/pkg/executor"
64
        cmdv1 "kubevirt.io/kubevirt/pkg/handler-launcher-com/cmd/v1"
65
        hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
66
        hotplugdisk "kubevirt.io/kubevirt/pkg/hotplug-disk"
67
        netcache "kubevirt.io/kubevirt/pkg/network/cache"
68
        "kubevirt.io/kubevirt/pkg/network/domainspec"
69
        neterrors "kubevirt.io/kubevirt/pkg/network/errors"
70
        netsetup "kubevirt.io/kubevirt/pkg/network/setup"
71
        netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
72
        "kubevirt.io/kubevirt/pkg/pointer"
73
        "kubevirt.io/kubevirt/pkg/safepath"
74
        "kubevirt.io/kubevirt/pkg/storage/reservation"
75
        pvctypes "kubevirt.io/kubevirt/pkg/storage/types"
76
        storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
77
        "kubevirt.io/kubevirt/pkg/util"
78
        virtutil "kubevirt.io/kubevirt/pkg/util"
79
        "kubevirt.io/kubevirt/pkg/util/hardware"
80
        "kubevirt.io/kubevirt/pkg/util/migrations"
81
        virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
82
        "kubevirt.io/kubevirt/pkg/virt-controller/services"
83
        "kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
84
        virtcache "kubevirt.io/kubevirt/pkg/virt-handler/cache"
85
        "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
86
        cmdclient "kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
87
        container_disk "kubevirt.io/kubevirt/pkg/virt-handler/container-disk"
88
        device_manager "kubevirt.io/kubevirt/pkg/virt-handler/device-manager"
89
        "kubevirt.io/kubevirt/pkg/virt-handler/heartbeat"
90
        hotplug_volume "kubevirt.io/kubevirt/pkg/virt-handler/hotplug-disk"
91
        "kubevirt.io/kubevirt/pkg/virt-handler/isolation"
92
        migrationproxy "kubevirt.io/kubevirt/pkg/virt-handler/migration-proxy"
93
        "kubevirt.io/kubevirt/pkg/virt-handler/selinux"
94
        "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
95
        "kubevirt.io/kubevirt/pkg/virtiofs"
96
)
97

98
type netconf interface {
99
        Setup(vmi *v1.VirtualMachineInstance, networks []v1.Network, launcherPid int, preSetup func() error) error
100
        Teardown(vmi *v1.VirtualMachineInstance) error
101
}
102

103
type netstat interface {
104
        UpdateStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error
105
        Teardown(vmi *v1.VirtualMachineInstance)
106
        PodInterfaceVolatileDataIsCached(vmi *v1.VirtualMachineInstance, ifaceName string) bool
107
        CachePodInterfaceVolatileData(vmi *v1.VirtualMachineInstance, ifaceName string, data *netcache.PodIfaceCacheData)
108
}
109

110
type downwardMetricsManager interface {
111
        Run(stopCh chan struct{})
112
        StartServer(vmi *v1.VirtualMachineInstance, pid int) error
113
        StopServer(vmi *v1.VirtualMachineInstance)
114
}
115

116
const (
117
        failedDetectIsolationFmt              = "failed to detect isolation for launcher pod: %v"
118
        unableCreateVirtLauncherConnectionFmt = "unable to create virt-launcher client connection: %v"
119
        // This value was determined after consulting with libvirt developers and performing extensive testing.
120
        parallelMultifdMigrationThreads = uint(8)
121
)
122

123
const (
124
        //VolumeReadyReason is the reason set when the volume is ready.
125
        VolumeReadyReason = "VolumeReady"
126
        //VolumeUnMountedFromPodReason is the reason set when the volume is unmounted from the virtlauncher pod
127
        VolumeUnMountedFromPodReason = "VolumeUnMountedFromPod"
128
        //VolumeMountedToPodReason is the reason set when the volume is mounted to the virtlauncher pod
129
        VolumeMountedToPodReason = "VolumeMountedToPod"
130
        //VolumeUnplugged is the reason set when the volume is completely unplugged from the VMI
131
        VolumeUnplugged = "VolumeUnplugged"
132
        //VMIDefined is the reason set when a VMI is defined
133
        VMIDefined = "VirtualMachineInstance defined."
134
        //VMIStarted is the reason set when a VMI is started
135
        VMIStarted = "VirtualMachineInstance started."
136
        //VMIShutdown is the reason set when a VMI is shutdown
137
        VMIShutdown = "The VirtualMachineInstance was shut down."
138
        //VMICrashed is the reason set when a VMI crashed
139
        VMICrashed = "The VirtualMachineInstance crashed."
140
        //VMIAbortingMigration is the reason set when migration is being aborted
141
        VMIAbortingMigration = "VirtualMachineInstance is aborting migration."
142
        //VMIMigrating in the reason set when the VMI is migrating
143
        VMIMigrating = "VirtualMachineInstance is migrating."
144
        //VMIMigrationTargetPrepared is the reason set when the migration target has been prepared
145
        VMIMigrationTargetPrepared = "VirtualMachineInstance Migration Target Prepared."
146
        //VMIStopping is the reason set when the VMI is stopping
147
        VMIStopping = "VirtualMachineInstance stopping"
148
        //VMIGracefulShutdown is the reason set when the VMI is gracefully shut down
149
        VMIGracefulShutdown = "Signaled Graceful Shutdown"
150
        //VMISignalDeletion is the reason set when the VMI has signal deletion
151
        VMISignalDeletion = "Signaled Deletion"
152

153
        // MemoryHotplugFailedReason is the reason set when the VM cannot hotplug memory
154
        memoryHotplugFailedReason = "Memory Hotplug Failed"
155
)
156

157
var getCgroupManager = func(vmi *v1.VirtualMachineInstance) (cgroup.Manager, error) {
×
158
        return cgroup.NewManagerFromVM(vmi)
×
159
}
×
160

161
func NewController(
162
        recorder record.EventRecorder,
163
        clientset kubecli.KubevirtClient,
164
        host string,
165
        migrationIpAddress string,
166
        virtShareDir string,
167
        virtPrivateDir string,
168
        kubeletPodsDir string,
169
        vmiSourceInformer cache.SharedIndexInformer,
170
        vmiTargetInformer cache.SharedIndexInformer,
171
        domainInformer cache.SharedInformer,
172
        maxDevices int,
173
        clusterConfig *virtconfig.ClusterConfig,
174
        podIsolationDetector isolation.PodIsolationDetector,
175
        migrationProxy migrationproxy.ProxyManager,
176
        downwardMetricsManager downwardMetricsManager,
177
        capabilities *libvirtxml.Caps,
178
        hostCpuModel string,
179
        netConf netconf,
180
        netStat netstat,
181
        netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator,
182
) (*VirtualMachineController, error) {
1✔
183

1✔
184
        queue := workqueue.NewTypedRateLimitingQueueWithConfig[string](
1✔
185
                workqueue.DefaultTypedControllerRateLimiter[string](),
1✔
186
                workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-handler-vm"},
1✔
187
        )
1✔
188

1✔
189
        containerDiskState := filepath.Join(virtPrivateDir, "container-disk-mount-state")
1✔
190
        if err := os.MkdirAll(containerDiskState, 0700); err != nil {
1✔
191
                return nil, err
×
192
        }
×
193

194
        hotplugState := filepath.Join(virtPrivateDir, "hotplug-volume-mount-state")
1✔
195
        if err := os.MkdirAll(hotplugState, 0700); err != nil {
1✔
196
                return nil, err
×
197
        }
×
198

199
        c := &VirtualMachineController{
1✔
200
                queue:                            queue,
1✔
201
                recorder:                         recorder,
1✔
202
                clientset:                        clientset,
1✔
203
                host:                             host,
1✔
204
                migrationIpAddress:               migrationIpAddress,
1✔
205
                virtShareDir:                     virtShareDir,
1✔
206
                vmiSourceStore:                   vmiSourceInformer.GetStore(),
1✔
207
                vmiTargetStore:                   vmiTargetInformer.GetStore(),
1✔
208
                domainStore:                      domainInformer.GetStore(),
1✔
209
                heartBeatInterval:                1 * time.Minute,
1✔
210
                migrationProxy:                   migrationProxy,
1✔
211
                podIsolationDetector:             podIsolationDetector,
1✔
212
                containerDiskMounter:             container_disk.NewMounter(podIsolationDetector, containerDiskState, clusterConfig),
1✔
213
                hotplugVolumeMounter:             hotplug_volume.NewVolumeMounter(hotplugState, kubeletPodsDir),
1✔
214
                clusterConfig:                    clusterConfig,
1✔
215
                virtLauncherFSRunDirPattern:      "/proc/%d/root/var/run",
1✔
216
                capabilities:                     capabilities,
1✔
217
                hostCpuModel:                     hostCpuModel,
1✔
218
                vmiExpectations:                  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
1✔
219
                sriovHotplugExecutorPool:         executor.NewRateLimitedExecutorPool(executor.NewExponentialLimitedBackoffCreator()),
1✔
220
                ioErrorRetryManager:              NewFailRetryManager("io-error-retry", 10*time.Second, 3*time.Minute, 30*time.Second),
1✔
221
                netConf:                          netConf,
1✔
222
                netStat:                          netStat,
1✔
223
                netBindingPluginMemoryCalculator: netBindingPluginMemoryCalculator,
1✔
224
        }
1✔
225

1✔
226
        c.hasSynced = func() bool {
1✔
227
                return domainInformer.HasSynced() && vmiSourceInformer.HasSynced() && vmiTargetInformer.HasSynced()
×
228
        }
×
229

230
        _, err := vmiSourceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
231
                AddFunc:    c.addFunc,
1✔
232
                DeleteFunc: c.deleteFunc,
1✔
233
                UpdateFunc: c.updateFunc,
1✔
234
        })
1✔
235
        if err != nil {
1✔
236
                return nil, err
×
237
        }
×
238

239
        _, err = vmiTargetInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
240
                AddFunc:    c.addFunc,
1✔
241
                DeleteFunc: c.deleteFunc,
1✔
242
                UpdateFunc: c.updateFunc,
1✔
243
        })
1✔
244
        if err != nil {
1✔
245
                return nil, err
×
246
        }
×
247

248
        _, err = domainInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
1✔
249
                AddFunc:    c.addDomainFunc,
1✔
250
                DeleteFunc: c.deleteDomainFunc,
1✔
251
                UpdateFunc: c.updateDomainFunc,
1✔
252
        })
1✔
253
        if err != nil {
1✔
254
                return nil, err
×
255
        }
×
256

257
        c.launcherClients = virtcache.LauncherClientInfoByVMI{}
1✔
258

1✔
259
        c.downwardMetricsManager = downwardMetricsManager
1✔
260

1✔
261
        c.domainNotifyPipes = make(map[string]string)
1✔
262

1✔
263
        permissions := "rw"
1✔
264
        if cgroups.IsCgroup2UnifiedMode() {
1✔
265
                // Need 'rwm' permissions otherwise ebpf filtering program attached by runc
×
266
                // will deny probing the device file with 'access' syscall. That in turn
×
267
                // will lead to virtqemud failure on VM startup.
×
268
                // This has been fixed upstream:
×
269
                //   https://github.com/opencontainers/runc/pull/2796
×
270
                // but the workaround is still needed to support previous versions without
×
271
                // the patch.
×
272
                permissions = "rwm"
×
273
        }
×
274

275
        c.deviceManagerController = device_manager.NewDeviceController(
1✔
276
                c.host,
1✔
277
                maxDevices,
1✔
278
                permissions,
1✔
279
                device_manager.PermanentHostDevicePlugins(maxDevices, permissions),
1✔
280
                clusterConfig,
1✔
281
                clientset.CoreV1())
1✔
282
        c.heartBeat = heartbeat.NewHeartBeat(clientset.CoreV1(), c.deviceManagerController, clusterConfig, host)
1✔
283

1✔
284
        return c, nil
1✔
285
}
286

287
type netBindingPluginMemoryCalculator interface {
288
        Calculate(vmi *v1.VirtualMachineInstance, registeredPlugins map[string]v1.InterfaceBindingPlugin) resource.Quantity
289
}
290

291
type VirtualMachineController struct {
292
        recorder                 record.EventRecorder
293
        clientset                kubecli.KubevirtClient
294
        host                     string
295
        migrationIpAddress       string
296
        virtShareDir             string
297
        virtPrivateDir           string
298
        queue                    workqueue.TypedRateLimitingInterface[string]
299
        vmiSourceStore           cache.Store
300
        vmiTargetStore           cache.Store
301
        domainStore              cache.Store
302
        launcherClients          virtcache.LauncherClientInfoByVMI
303
        heartBeatInterval        time.Duration
304
        deviceManagerController  *device_manager.DeviceController
305
        migrationProxy           migrationproxy.ProxyManager
306
        podIsolationDetector     isolation.PodIsolationDetector
307
        containerDiskMounter     container_disk.Mounter
308
        hotplugVolumeMounter     hotplug_volume.VolumeMounter
309
        clusterConfig            *virtconfig.ClusterConfig
310
        sriovHotplugExecutorPool *executor.RateLimitedExecutorPool
311
        downwardMetricsManager   downwardMetricsManager
312

313
        netConf                          netconf
314
        netStat                          netstat
315
        netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator
316

317
        domainNotifyPipes           map[string]string
318
        virtLauncherFSRunDirPattern string
319
        heartBeat                   *heartbeat.HeartBeat
320
        capabilities                *libvirtxml.Caps
321
        hostCpuModel                string
322
        vmiExpectations             *controller.UIDTrackingControllerExpectations
323
        ioErrorRetryManager         *FailRetryManager
324
        hasSynced                   func() bool
325
}
326

327
type virtLauncherCriticalSecurebootError struct {
328
        msg string
329
}
330

331
func (e *virtLauncherCriticalSecurebootError) Error() string { return e.msg }
×
332

333
type vmiIrrecoverableError struct {
334
        msg string
335
}
336

337
func (e *vmiIrrecoverableError) Error() string { return e.msg }
×
338

339
func formatIrrecoverableErrorMessage(domain *api.Domain) string {
×
340
        msg := "unknown reason"
×
341
        if domainPausedFailedPostCopy(domain) {
×
342
                msg = "VMI is irrecoverable due to failed post-copy migration"
×
343
        }
×
344
        return msg
×
345
}
346

347
func handleDomainNotifyPipe(domainPipeStopChan chan struct{}, ln net.Listener, virtShareDir string, vmi *v1.VirtualMachineInstance) {
1✔
348

1✔
349
        fdChan := make(chan net.Conn, 100)
1✔
350

1✔
351
        // Close listener and exit when stop encountered
1✔
352
        go func() {
2✔
353
                <-domainPipeStopChan
1✔
354
                log.Log.Object(vmi).Infof("closing notify pipe listener for vmi")
1✔
355
                if err := ln.Close(); err != nil {
1✔
356
                        log.Log.Object(vmi).Infof("failed closing notify pipe listener for vmi: %v", err)
×
357
                }
×
358
        }()
359

360
        // Listen for new connections,
361
        go func(vmi *v1.VirtualMachineInstance, ln net.Listener, domainPipeStopChan chan struct{}) {
2✔
362
                for {
2✔
363
                        fd, err := ln.Accept()
1✔
364
                        if err != nil {
2✔
365
                                if goerror.Is(err, net.ErrClosed) {
2✔
366
                                        // As Accept blocks, closing it is our mechanism to exit this loop
1✔
367
                                        return
1✔
368
                                }
1✔
369
                                log.Log.Reason(err).Error("Domain pipe accept error encountered.")
×
370
                                // keep listening until stop invoked
×
371
                                time.Sleep(1 * time.Second)
×
372
                        } else {
1✔
373
                                fdChan <- fd
1✔
374
                        }
1✔
375
                }
376
        }(vmi, ln, domainPipeStopChan)
377

378
        // Process new connections
379
        // exit when stop encountered
380
        go func(vmi *v1.VirtualMachineInstance, fdChan chan net.Conn, domainPipeStopChan chan struct{}) {
2✔
381
                for {
2✔
382
                        select {
1✔
383
                        case <-domainPipeStopChan:
1✔
384
                                return
1✔
385
                        case fd := <-fdChan:
1✔
386
                                go func(vmi *v1.VirtualMachineInstance) {
2✔
387
                                        defer fd.Close()
1✔
388

1✔
389
                                        // pipe the VMI domain-notify.sock to the virt-handler domain-notify.sock
1✔
390
                                        // so virt-handler receives notifications from the VMI
1✔
391
                                        conn, err := net.Dial("unix", filepath.Join(virtShareDir, "domain-notify.sock"))
1✔
392
                                        if err != nil {
2✔
393
                                                log.Log.Reason(err).Error("error connecting to domain-notify.sock for proxy connection")
1✔
394
                                                return
1✔
395
                                        }
1✔
396
                                        defer conn.Close()
1✔
397

1✔
398
                                        log.Log.Object(vmi).Infof("Accepted new notify pipe connection for vmi")
1✔
399
                                        copyErr := make(chan error, 2)
1✔
400
                                        go func() {
2✔
401
                                                _, err := io.Copy(fd, conn)
1✔
402
                                                copyErr <- err
1✔
403
                                        }()
1✔
404
                                        go func() {
2✔
405
                                                _, err := io.Copy(conn, fd)
1✔
406
                                                copyErr <- err
1✔
407
                                        }()
1✔
408

409
                                        // wait until one of the copy routines exit then
410
                                        // let the fd close
411
                                        err = <-copyErr
1✔
412
                                        if err != nil {
2✔
413
                                                log.Log.Object(vmi).Infof("closing notify pipe connection for vmi with error: %v", err)
1✔
414
                                        } else {
1✔
UNCOV
415
                                                log.Log.Object(vmi).Infof("gracefully closed notify pipe connection for vmi")
×
UNCOV
416
                                        }
×
417

418
                                }(vmi)
419
                        }
420
                }
421
        }(vmi, fdChan, domainPipeStopChan)
422
}
423

424
func (c *VirtualMachineController) startDomainNotifyPipe(domainPipeStopChan chan struct{}, vmi *v1.VirtualMachineInstance) error {
×
425

×
426
        res, err := c.podIsolationDetector.Detect(vmi)
×
427
        if err != nil {
×
428
                return fmt.Errorf("failed to detect isolation for launcher pod when setting up notify pipe: %v", err)
×
429
        }
×
430

431
        // inject the domain-notify.sock into the VMI pod.
432
        root, err := res.MountRoot()
×
433
        if err != nil {
×
434
                return err
×
435
        }
×
436
        socketDir, err := root.AppendAndResolveWithRelativeRoot(c.virtShareDir)
×
437
        if err != nil {
×
438
                return err
×
439
        }
×
440

441
        listener, err := safepath.ListenUnixNoFollow(socketDir, "domain-notify-pipe.sock")
×
442
        if err != nil {
×
443
                log.Log.Reason(err).Error("failed to create unix socket for proxy service")
×
444
                return err
×
445
        }
×
446
        socketPath, err := safepath.JoinNoFollow(socketDir, "domain-notify-pipe.sock")
×
447
        if err != nil {
×
448
                return err
×
449
        }
×
450

451
        if util.IsNonRootVMI(vmi) {
×
452
                err := diskutils.DefaultOwnershipManager.SetFileOwnership(socketPath)
×
453
                if err != nil {
×
454
                        log.Log.Reason(err).Error("unable to change ownership for domain notify")
×
455
                        return err
×
456
                }
×
457
        }
458

459
        handleDomainNotifyPipe(domainPipeStopChan, listener, c.virtShareDir, vmi)
×
460

×
461
        return nil
×
462
}
463

464
// Determines if a domain's grace period has expired during shutdown.
465
// If the grace period has started but not expired, timeLeft represents
466
// the time in seconds left until the period expires.
467
// If the grace period has not started, timeLeft will be set to -1.
468
func (c *VirtualMachineController) hasGracePeriodExpired(dom *api.Domain) (hasExpired bool, timeLeft int64) {
1✔
469

1✔
470
        hasExpired = false
1✔
471
        timeLeft = 0
1✔
472

1✔
473
        if dom == nil {
1✔
474
                hasExpired = true
×
475
                return
×
476
        }
×
477

478
        startTime := int64(0)
1✔
479
        if dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp != nil {
2✔
480
                startTime = dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp.UTC().Unix()
1✔
481
        }
1✔
482
        gracePeriod := dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds
1✔
483

1✔
484
        // If gracePeriod == 0, then there will be no startTime set, deletion
1✔
485
        // should occur immediately during shutdown.
1✔
486
        if gracePeriod == 0 {
1✔
487
                hasExpired = true
×
488
                return
×
489
        } else if startTime == 0 {
2✔
490
                // If gracePeriod > 0, then the shutdown signal needs to be sent
1✔
491
                // and the gracePeriod start time needs to be set.
1✔
492
                timeLeft = -1
1✔
493
                return
1✔
494
        }
1✔
495

496
        now := time.Now().UTC().Unix()
1✔
497
        diff := now - startTime
1✔
498

1✔
499
        if diff >= gracePeriod {
2✔
500
                hasExpired = true
1✔
501
                return
1✔
502
        }
1✔
503

504
        timeLeft = int64(gracePeriod - diff)
×
505
        if timeLeft < 1 {
×
506
                timeLeft = 1
×
507
        }
×
508
        return
×
509
}
510

511
func (c *VirtualMachineController) hasTargetDetectedReadyDomain(vmi *v1.VirtualMachineInstance) (bool, int64) {
1✔
512
        // give the target node 60 seconds to discover the libvirt domain via the domain informer
1✔
513
        // before allowing the VMI to be processed. This closes the gap between the
1✔
514
        // VMI's status getting updated to reflect the new source node, and the domain
1✔
515
        // informer firing the event to alert the source node of the new domain.
1✔
516
        migrationTargetDelayTimeout := 60
1✔
517

1✔
518
        if vmi.Status.MigrationState != nil &&
1✔
519
                vmi.Status.MigrationState.TargetNodeDomainDetected &&
1✔
520
                vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp != nil {
2✔
521

1✔
522
                return true, 0
1✔
523
        }
1✔
524

525
        nowUnix := time.Now().UTC().Unix()
×
526
        migrationEndUnix := vmi.Status.MigrationState.EndTimestamp.Time.UTC().Unix()
×
527

×
528
        diff := nowUnix - migrationEndUnix
×
529

×
530
        if diff > int64(migrationTargetDelayTimeout) {
×
531
                return false, 0
×
532
        }
×
533

534
        timeLeft := int64(migrationTargetDelayTimeout) - diff
×
535

×
536
        enqueueTime := timeLeft
×
537
        if enqueueTime < 5 {
×
538
                enqueueTime = 5
×
539
        }
×
540

541
        // re-enqueue the key to ensure it gets processed again within the right time.
542
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(enqueueTime)*time.Second)
×
543

×
544
        return false, timeLeft
×
545
}
546

547
// teardownNetwork performs network cache cleanup for a specific VMI.
548
func (c *VirtualMachineController) teardownNetwork(vmi *v1.VirtualMachineInstance) {
1✔
549
        if string(vmi.UID) == "" {
2✔
550
                return
1✔
551
        }
1✔
552
        if err := c.netConf.Teardown(vmi); err != nil {
1✔
553
                log.Log.Reason(err).Errorf("failed to delete VMI Network cache files: %s", err.Error())
×
554
        }
×
555
        c.netStat.Teardown(vmi)
1✔
556
}
557

558
func (c *VirtualMachineController) setupNetwork(vmi *v1.VirtualMachineInstance, networks []v1.Network) error {
1✔
559
        if len(networks) == 0 {
2✔
560
                return nil
1✔
561
        }
1✔
562

563
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
564
        if err != nil {
1✔
565
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
566
        }
×
567
        rootMount, err := isolationRes.MountRoot()
1✔
568
        if err != nil {
1✔
569
                return err
×
570
        }
×
571

572
        return c.netConf.Setup(vmi, networks, isolationRes.Pid(), func() error {
1✔
573
                if virtutil.WantVirtioNetDevice(vmi) {
×
574
                        if err := c.claimDeviceOwnership(rootMount, "vhost-net"); err != nil {
×
575
                                return neterrors.CreateCriticalNetworkError(fmt.Errorf("failed to set up vhost-net device, %s", err))
×
576
                        }
×
577
                }
578
                if virtutil.NeedTunDevice(vmi) {
×
579
                        if err := c.claimDeviceOwnership(rootMount, "/net/tun"); err != nil {
×
580
                                return neterrors.CreateCriticalNetworkError(fmt.Errorf("failed to set up tun device, %s", err))
×
581
                        }
×
582
                }
583
                return nil
×
584
        })
585
}
586

587
func domainPausedFailedPostCopy(domain *api.Domain) bool {
1✔
588
        return domain != nil && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedPostcopyFailed
1✔
589
}
1✔
590

591
func domainMigrated(domain *api.Domain) bool {
1✔
592
        return domain != nil && domain.Status.Status == api.Shutoff && domain.Status.Reason == api.ReasonMigrated
1✔
593
}
1✔
594

595
func canUpdateToMounted(currentPhase v1.VolumePhase) bool {
1✔
596
        return currentPhase == v1.VolumeBound || currentPhase == v1.VolumePending || currentPhase == v1.HotplugVolumeAttachedToNode
1✔
597
}
1✔
598

599
func canUpdateToUnmounted(currentPhase v1.VolumePhase) bool {
1✔
600
        return currentPhase == v1.VolumeReady || currentPhase == v1.HotplugVolumeMounted || currentPhase == v1.HotplugVolumeAttachedToNode
1✔
601
}
1✔
602

603
func (c *VirtualMachineController) setMigrationProgressStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
604
        if domain == nil ||
1✔
605
                domain.Spec.Metadata.KubeVirt.Migration == nil ||
1✔
606
                vmi.Status.MigrationState == nil ||
1✔
607
                !c.isMigrationSource(vmi) {
2✔
608
                return
1✔
609
        }
1✔
610

611
        migrationMetadata := domain.Spec.Metadata.KubeVirt.Migration
1✔
612
        if migrationMetadata.UID != vmi.Status.MigrationState.MigrationUID {
1✔
613
                return
×
614
        }
×
615

616
        if vmi.Status.MigrationState.EndTimestamp == nil && migrationMetadata.EndTimestamp != nil {
2✔
617
                if migrationMetadata.Failed {
1✔
618
                        vmi.Status.MigrationState.FailureReason = migrationMetadata.FailureReason
×
619
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("VirtualMachineInstance migration uid %s failed. reason:%s", string(migrationMetadata.UID), migrationMetadata.FailureReason))
×
620
                }
×
621
        }
622

623
        if vmi.Status.MigrationState.StartTimestamp == nil {
2✔
624
                vmi.Status.MigrationState.StartTimestamp = migrationMetadata.StartTimestamp
1✔
625
        }
1✔
626
        if vmi.Status.MigrationState.EndTimestamp == nil {
2✔
627
                vmi.Status.MigrationState.EndTimestamp = migrationMetadata.EndTimestamp
1✔
628
        }
1✔
629
        vmi.Status.MigrationState.AbortStatus = v1.MigrationAbortStatus(migrationMetadata.AbortStatus)
1✔
630
        vmi.Status.MigrationState.Completed = migrationMetadata.Completed
1✔
631
        vmi.Status.MigrationState.Failed = migrationMetadata.Failed
1✔
632
        vmi.Status.MigrationState.Mode = migrationMetadata.Mode
1✔
633
}
634

635
func (c *VirtualMachineController) migrationSourceUpdateVMIStatus(origVMI *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
636

1✔
637
        vmi := origVMI.DeepCopy()
1✔
638
        oldStatus := vmi.DeepCopy().Status
1✔
639

1✔
640
        // if a migration happens very quickly, it's possible parts of the in
1✔
641
        // progress status wasn't set. We need to make sure we set this even
1✔
642
        // if the migration has completed
1✔
643
        c.setMigrationProgressStatus(vmi, domain)
1✔
644

1✔
645
        // handle migrations differently than normal status updates.
1✔
646
        //
1✔
647
        // When a successful migration is detected, we must transfer ownership of the VMI
1✔
648
        // from the source node (this node) to the target node (node the domain was migrated to).
1✔
649
        //
1✔
650
        // Transfer ownership by...
1✔
651
        // 1. Marking vmi.Status.MigrationState as completed
1✔
652
        // 2. Update the vmi.Status.NodeName to reflect the target node's name
1✔
653
        // 3. Update the VMI's NodeNameLabel annotation to reflect the target node's name
1✔
654
        // 4. Clear the LauncherContainerImageVersion which virt-controller will detect
1✔
655
        //    and accurately based on the version used on the target pod
1✔
656
        //
1✔
657
        // After a migration, the VMI's phase is no longer owned by this node. Only the
1✔
658
        // MigrationState status field is eligible to be mutated.
1✔
659
        migrationHost := ""
1✔
660
        if vmi.Status.MigrationState != nil {
2✔
661
                migrationHost = vmi.Status.MigrationState.TargetNode
1✔
662
        }
1✔
663

664
        if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.EndTimestamp == nil {
1✔
665
                now := metav1.NewTime(time.Now())
×
666
                vmi.Status.MigrationState.EndTimestamp = &now
×
667
        }
×
668

669
        targetNodeDetectedDomain, timeLeft := c.hasTargetDetectedReadyDomain(vmi)
1✔
670
        // If we can't detect where the migration went to, then we have no
1✔
671
        // way of transferring ownership. The only option here is to move the
1✔
672
        // vmi to failed.  The cluster vmi controller will then tear down the
1✔
673
        // resulting pods.
1✔
674
        if migrationHost == "" {
1✔
675
                // migrated to unknown host.
×
676
                vmi.Status.Phase = v1.Failed
×
677
                vmi.Status.MigrationState.Completed = true
×
678
                vmi.Status.MigrationState.Failed = true
×
679

×
680
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance migrated to unknown host."))
×
681
        } else if !targetNodeDetectedDomain {
1✔
682
                if timeLeft <= 0 {
×
683
                        vmi.Status.Phase = v1.Failed
×
684
                        vmi.Status.MigrationState.Completed = true
×
685
                        vmi.Status.MigrationState.Failed = true
×
686

×
687
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance's domain was never observed on the target after the migration completed within the timeout period."))
×
688
                } else {
×
689
                        log.Log.Object(vmi).Info("Waiting on the target node to observe the migrated domain before performing the handoff")
×
690
                }
×
691
        } else if vmi.Status.MigrationState != nil {
2✔
692
                // this is the migration ACK.
1✔
693
                // At this point we know that the migration has completed and that
1✔
694
                // the target node has seen the domain event.
1✔
695
                vmi.Labels[v1.NodeNameLabel] = migrationHost
1✔
696
                delete(vmi.Labels, v1.OutdatedLauncherImageLabel)
1✔
697
                vmi.Status.LauncherContainerImageVersion = ""
1✔
698
                vmi.Status.NodeName = migrationHost
1✔
699
                // clean the evacuation node name since have already migrated to a new node
1✔
700
                vmi.Status.EvacuationNodeName = ""
1✔
701
                vmi.Status.MigrationState.Completed = true
1✔
702
                // update the vmi migrationTransport to indicate that next migration should use unix URI
1✔
703
                // new workloads will set the migrationTransport on their creation, however, legacy workloads
1✔
704
                // can make the switch only after the first migration
1✔
705
                vmi.Status.MigrationTransport = v1.MigrationTransportUnix
1✔
706
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrated.String(), fmt.Sprintf("The VirtualMachineInstance migrated to node %s.", migrationHost))
1✔
707
                log.Log.Object(vmi).Infof("migration completed to node %s", migrationHost)
1✔
708
        }
1✔
709

710
        if !equality.Semantic.DeepEqual(oldStatus, vmi.Status) {
2✔
711
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
712
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
713
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
1✔
714
                if err != nil {
1✔
715
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
×
716
                        return err
×
717
                }
×
718
        }
719
        return nil
1✔
720
}
721

722
func domainIsActiveOnTarget(domain *api.Domain) bool {
1✔
723

1✔
724
        if domain == nil {
1✔
725
                return false
×
726
        }
×
727

728
        // It's possible for the domain to be active on the target node if the domain is
729
        // 1. Running
730
        // 2. User initiated Paused
731
        if domain.Status.Status == api.Running {
2✔
732
                return true
1✔
733
        } else if domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedUser {
1✔
734
                return true
×
735
        }
×
736
        return false
×
737

738
}
739

740
func (c *VirtualMachineController) migrationTargetUpdateVMIStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
741

1✔
742
        vmiCopy := vmi.DeepCopy()
1✔
743

1✔
744
        if migrations.MigrationFailed(vmi) {
2✔
745
                // nothing left to report on the target node if the migration failed
1✔
746
                return nil
1✔
747
        }
1✔
748

749
        domainExists := domain != nil
1✔
750

1✔
751
        // Handle post migration
1✔
752
        if domainExists && vmi.Status.MigrationState != nil && !vmi.Status.MigrationState.TargetNodeDomainDetected {
2✔
753
                // record that we've see the domain populated on the target's node
1✔
754
                log.Log.Object(vmi).Info("The target node received the migrated domain")
1✔
755
                vmiCopy.Status.MigrationState.TargetNodeDomainDetected = true
1✔
756

1✔
757
                // adjust QEMU process memlock limits in order to enable old virt-launcher pod's to
1✔
758
                // perform hotplug host-devices on post migration.
1✔
759
                if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
1✔
760
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "Failed to update target node qemu memory limits during live migration")
×
761
                }
×
762

763
        }
764

765
        if domainExists &&
1✔
766
                domainIsActiveOnTarget(domain) &&
1✔
767
                vmi.Status.MigrationState != nil &&
1✔
768
                vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp == nil {
2✔
769

1✔
770
                // record the moment we detected the domain is running.
1✔
771
                // This is used as a trigger to help coordinate when CNI drivers
1✔
772
                // fail over the IP to the new pod.
1✔
773
                log.Log.Object(vmi).Info("The target node received the running migrated domain")
1✔
774
                now := metav1.Now()
1✔
775
                vmiCopy.Status.MigrationState.TargetNodeDomainReadyTimestamp = &now
1✔
776
                c.finalizeMigration(vmiCopy)
1✔
777
        }
1✔
778

779
        if !migrations.IsMigrating(vmi) {
2✔
780
                destSrcPortsMap := c.migrationProxy.GetTargetListenerPorts(string(vmi.UID))
1✔
781
                if len(destSrcPortsMap) == 0 {
1✔
782
                        msg := "target migration listener is not up for this vmi"
×
783
                        log.Log.Object(vmi).Error(msg)
×
784
                        return fmt.Errorf(msg)
×
785
                }
×
786

787
                hostAddress := ""
1✔
788
                // advertise the listener address to the source node
1✔
789
                if vmi.Status.MigrationState != nil {
2✔
790
                        hostAddress = vmi.Status.MigrationState.TargetNodeAddress
1✔
791
                }
1✔
792
                if hostAddress != c.migrationIpAddress {
2✔
793
                        portsList := make([]string, 0, len(destSrcPortsMap))
1✔
794

1✔
795
                        for k := range destSrcPortsMap {
2✔
796
                                portsList = append(portsList, k)
1✔
797
                        }
1✔
798
                        portsStrList := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(portsList)), ","), "[]")
1✔
799
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.PreparingTarget.String(), fmt.Sprintf("Migration Target is listening at %s, on ports: %s", c.migrationIpAddress, portsStrList))
1✔
800
                        vmiCopy.Status.MigrationState.TargetNodeAddress = c.migrationIpAddress
1✔
801
                        vmiCopy.Status.MigrationState.TargetDirectMigrationNodePorts = destSrcPortsMap
1✔
802
                }
803

804
                // If the migrated VMI requires dedicated CPUs, report the new pod CPU set to the source node
805
                // via the VMI migration status in order to patch the domain pre migration
806
                if vmi.IsCPUDedicated() {
1✔
807
                        err := c.reportDedicatedCPUSetForMigratingVMI(vmiCopy)
×
808
                        if err != nil {
×
809
                                return err
×
810
                        }
×
811
                        err = c.reportTargetTopologyForMigratingVMI(vmiCopy)
×
812
                        if err != nil {
×
813
                                return err
×
814
                        }
×
815
                }
816
        }
817

818
        // update the VMI if necessary
819
        if !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) {
2✔
820
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
821
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
822
                _, err := c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmiCopy, metav1.UpdateOptions{})
1✔
823
                if err != nil {
1✔
824
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
×
825
                        return err
×
826
                }
×
827
        }
828

829
        return nil
1✔
830
}
831

832
func (c *VirtualMachineController) generateEventsForVolumeStatusChange(vmi *v1.VirtualMachineInstance, newStatusMap map[string]v1.VolumeStatus) {
1✔
833
        newStatusMapCopy := make(map[string]v1.VolumeStatus)
1✔
834
        for k, v := range newStatusMap {
2✔
835
                newStatusMapCopy[k] = v
1✔
836
        }
1✔
837
        for _, oldStatus := range vmi.Status.VolumeStatus {
2✔
838
                newStatus, ok := newStatusMap[oldStatus.Name]
1✔
839
                if !ok {
1✔
840
                        // status got removed
×
841
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, VolumeUnplugged, fmt.Sprintf("Volume %s has been unplugged", oldStatus.Name))
×
842
                        continue
×
843
                }
844
                if newStatus.Phase != oldStatus.Phase {
2✔
845
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, newStatus.Reason, newStatus.Message)
1✔
846
                }
1✔
847
                delete(newStatusMapCopy, newStatus.Name)
1✔
848
        }
849
        // Send events for any new statuses.
850
        for _, v := range newStatusMapCopy {
2✔
851
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v.Reason, v.Message)
1✔
852
        }
1✔
853
}
854

855
func (c *VirtualMachineController) updateHotplugVolumeStatus(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, specVolumeMap map[string]v1.Volume) (v1.VolumeStatus, bool) {
1✔
856
        needsRefresh := false
1✔
857
        if volumeStatus.Target == "" {
2✔
858
                needsRefresh = true
1✔
859
                mounted, err := c.hotplugVolumeMounter.IsMounted(vmi, volumeStatus.Name, volumeStatus.HotplugVolume.AttachPodUID)
1✔
860
                if err != nil {
1✔
861
                        log.Log.Object(vmi).Errorf("error occurred while checking if volume is mounted: %v", err)
×
862
                }
×
863
                if mounted {
2✔
864
                        if _, ok := specVolumeMap[volumeStatus.Name]; ok && canUpdateToMounted(volumeStatus.Phase) {
2✔
865
                                log.DefaultLogger().Infof("Marking volume %s as mounted in pod, it can now be attached", volumeStatus.Name)
1✔
866
                                // mounted, and still in spec, and in phase we can change, update status to mounted.
1✔
867
                                volumeStatus.Phase = v1.HotplugVolumeMounted
1✔
868
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been mounted in virt-launcher pod", volumeStatus.Name)
1✔
869
                                volumeStatus.Reason = VolumeMountedToPodReason
1✔
870
                        }
1✔
871
                } else {
1✔
872
                        // Not mounted, check if the volume is in the spec, if not update status
1✔
873
                        if _, ok := specVolumeMap[volumeStatus.Name]; !ok && canUpdateToUnmounted(volumeStatus.Phase) {
2✔
874
                                log.DefaultLogger().Infof("Marking volume %s as unmounted from pod, it can now be detached", volumeStatus.Name)
1✔
875
                                // Not mounted.
1✔
876
                                volumeStatus.Phase = v1.HotplugVolumeUnMounted
1✔
877
                                volumeStatus.Message = fmt.Sprintf("Volume %s has been unmounted from virt-launcher pod", volumeStatus.Name)
1✔
878
                                volumeStatus.Reason = VolumeUnMountedFromPodReason
1✔
879
                        }
1✔
880
                }
881
        } else {
1✔
882
                // Successfully attached to VM.
1✔
883
                volumeStatus.Phase = v1.VolumeReady
1✔
884
                volumeStatus.Message = fmt.Sprintf("Successfully attach hotplugged volume %s to VM", volumeStatus.Name)
1✔
885
                volumeStatus.Reason = VolumeReadyReason
1✔
886
        }
1✔
887
        return volumeStatus, needsRefresh
1✔
888
}
889

890
func needToComputeChecksums(vmi *v1.VirtualMachineInstance) bool {
1✔
891
        containerDisks := map[string]*v1.Volume{}
1✔
892
        for _, volume := range vmi.Spec.Volumes {
2✔
893
                if volume.VolumeSource.ContainerDisk != nil {
2✔
894
                        containerDisks[volume.Name] = &volume
1✔
895
                }
1✔
896
        }
897

898
        for i := range vmi.Status.VolumeStatus {
2✔
899
                _, isContainerDisk := containerDisks[vmi.Status.VolumeStatus[i].Name]
1✔
900
                if !isContainerDisk {
2✔
901
                        continue
1✔
902
                }
903

904
                if vmi.Status.VolumeStatus[i].ContainerDiskVolume == nil ||
1✔
905
                        vmi.Status.VolumeStatus[i].ContainerDiskVolume.Checksum == 0 {
2✔
906
                        return true
1✔
907
                }
1✔
908
        }
909

910
        if util.HasKernelBootContainerImage(vmi) {
1✔
911
                if vmi.Status.KernelBootStatus == nil {
×
912
                        return true
×
913
                }
×
914

915
                kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
×
916

×
917
                if kernelBootContainer.KernelPath != "" &&
×
918
                        (vmi.Status.KernelBootStatus.KernelInfo == nil ||
×
919
                                vmi.Status.KernelBootStatus.KernelInfo.Checksum == 0) {
×
920
                        return true
×
921

×
922
                }
×
923

924
                if kernelBootContainer.InitrdPath != "" &&
×
925
                        (vmi.Status.KernelBootStatus.InitrdInfo == nil ||
×
926
                                vmi.Status.KernelBootStatus.InitrdInfo.Checksum == 0) {
×
927
                        return true
×
928

×
929
                }
×
930
        }
931

932
        return false
1✔
933
}
934

935
func (c *VirtualMachineController) updateChecksumInfo(vmi *v1.VirtualMachineInstance, syncError error) error {
1✔
936

1✔
937
        if syncError != nil || vmi.DeletionTimestamp != nil || !needToComputeChecksums(vmi) {
2✔
938
                return nil
1✔
939
        }
1✔
940

941
        diskChecksums, err := c.containerDiskMounter.ComputeChecksums(vmi)
1✔
942
        if goerror.Is(err, container_disk.ErrDiskContainerGone) {
1✔
943
                log.Log.Errorf("cannot compute checksums as containerdisk/kernelboot containers seem to have been terminated")
×
944
                return nil
×
945
        }
×
946
        if err != nil {
1✔
947
                return err
×
948
        }
×
949

950
        // containerdisks
951
        for i := range vmi.Status.VolumeStatus {
2✔
952
                checksum, exists := diskChecksums.ContainerDiskChecksums[vmi.Status.VolumeStatus[i].Name]
1✔
953
                if !exists {
1✔
954
                        // not a containerdisk
×
955
                        continue
×
956
                }
957

958
                vmi.Status.VolumeStatus[i].ContainerDiskVolume = &v1.ContainerDiskInfo{
1✔
959
                        Checksum: checksum,
1✔
960
                }
1✔
961
        }
962

963
        // kernelboot
964
        if util.HasKernelBootContainerImage(vmi) {
2✔
965
                vmi.Status.KernelBootStatus = &v1.KernelBootStatus{}
1✔
966

1✔
967
                if diskChecksums.KernelBootChecksum.Kernel != nil {
2✔
968
                        vmi.Status.KernelBootStatus.KernelInfo = &v1.KernelInfo{
1✔
969
                                Checksum: *diskChecksums.KernelBootChecksum.Kernel,
1✔
970
                        }
1✔
971
                }
1✔
972

973
                if diskChecksums.KernelBootChecksum.Initrd != nil {
2✔
974
                        vmi.Status.KernelBootStatus.InitrdInfo = &v1.InitrdInfo{
1✔
975
                                Checksum: *diskChecksums.KernelBootChecksum.Initrd,
1✔
976
                        }
1✔
977
                }
1✔
978
        }
979

980
        return nil
1✔
981
}
982

983
func (c *VirtualMachineController) updateVolumeStatusesFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
1✔
984
        // used by unit test
1✔
985
        hasHotplug := false
1✔
986

1✔
987
        if domain == nil {
2✔
988
                return hasHotplug
1✔
989
        }
1✔
990

991
        if len(vmi.Status.VolumeStatus) > 0 {
2✔
992
                diskDeviceMap := make(map[string]string)
1✔
993
                for _, disk := range domain.Spec.Devices.Disks {
2✔
994
                        diskDeviceMap[disk.Alias.GetName()] = disk.Target.Device
1✔
995
                }
1✔
996
                specVolumeMap := make(map[string]v1.Volume)
1✔
997
                for _, volume := range vmi.Spec.Volumes {
2✔
998
                        specVolumeMap[volume.Name] = volume
1✔
999
                }
1✔
1000
                newStatusMap := make(map[string]v1.VolumeStatus)
1✔
1001
                newStatuses := make([]v1.VolumeStatus, 0)
1✔
1002
                needsRefresh := false
1✔
1003
                for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
1004
                        tmpNeedsRefresh := false
1✔
1005
                        if _, ok := diskDeviceMap[volumeStatus.Name]; ok {
2✔
1006
                                volumeStatus.Target = diskDeviceMap[volumeStatus.Name]
1✔
1007
                        }
1✔
1008
                        if volumeStatus.HotplugVolume != nil {
2✔
1009
                                hasHotplug = true
1✔
1010
                                volumeStatus, tmpNeedsRefresh = c.updateHotplugVolumeStatus(vmi, volumeStatus, specVolumeMap)
1✔
1011
                                needsRefresh = needsRefresh || tmpNeedsRefresh
1✔
1012
                        }
1✔
1013
                        if volumeStatus.MemoryDumpVolume != nil {
2✔
1014
                                volumeStatus, tmpNeedsRefresh = c.updateMemoryDumpInfo(vmi, volumeStatus, domain)
1✔
1015
                                needsRefresh = needsRefresh || tmpNeedsRefresh
1✔
1016
                        }
1✔
1017
                        newStatuses = append(newStatuses, volumeStatus)
1✔
1018
                        newStatusMap[volumeStatus.Name] = volumeStatus
1✔
1019
                }
1020
                sort.SliceStable(newStatuses, func(i, j int) bool {
2✔
1021
                        return strings.Compare(newStatuses[i].Name, newStatuses[j].Name) == -1
1✔
1022
                })
1✔
1023
                if needsRefresh {
2✔
1024
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
1✔
1025
                }
1✔
1026
                c.generateEventsForVolumeStatusChange(vmi, newStatusMap)
1✔
1027
                vmi.Status.VolumeStatus = newStatuses
1✔
1028
        }
1029
        return hasHotplug
1✔
1030
}
1031

1032
func (c *VirtualMachineController) updateGuestInfoFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
1033

1✔
1034
        if domain == nil {
2✔
1035
                return
1✔
1036
        }
1✔
1037

1038
        if vmi.Status.GuestOSInfo.Name != domain.Status.OSInfo.Name {
2✔
1039
                vmi.Status.GuestOSInfo.Name = domain.Status.OSInfo.Name
1✔
1040
                vmi.Status.GuestOSInfo.Version = domain.Status.OSInfo.Version
1✔
1041
                vmi.Status.GuestOSInfo.KernelRelease = domain.Status.OSInfo.KernelRelease
1✔
1042
                vmi.Status.GuestOSInfo.PrettyName = domain.Status.OSInfo.PrettyName
1✔
1043
                vmi.Status.GuestOSInfo.VersionID = domain.Status.OSInfo.VersionId
1✔
1044
                vmi.Status.GuestOSInfo.KernelVersion = domain.Status.OSInfo.KernelVersion
1✔
1045
                vmi.Status.GuestOSInfo.Machine = domain.Status.OSInfo.Machine
1✔
1046
                vmi.Status.GuestOSInfo.ID = domain.Status.OSInfo.Id
1✔
1047
        }
1✔
1048
}
1049

1050
func (c *VirtualMachineController) updateAccessCredentialConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1051

1✔
1052
        if domain == nil || domain.Spec.Metadata.KubeVirt.AccessCredential == nil {
2✔
1053
                return
1✔
1054
        }
1✔
1055

1056
        message := domain.Spec.Metadata.KubeVirt.AccessCredential.Message
1✔
1057
        status := k8sv1.ConditionFalse
1✔
1058
        if domain.Spec.Metadata.KubeVirt.AccessCredential.Succeeded {
2✔
1059
                status = k8sv1.ConditionTrue
1✔
1060
        }
1✔
1061

1062
        add := false
1✔
1063
        condition := condManager.GetCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
1064
        if condition == nil {
2✔
1065
                add = true
1✔
1066
        } else if condition.Status != status || condition.Message != message {
3✔
1067
                // if not as expected, remove, then add.
1✔
1068
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAccessCredentialsSynchronized)
1✔
1069
                add = true
1✔
1070
        }
1✔
1071
        if add {
2✔
1072
                newCondition := v1.VirtualMachineInstanceCondition{
1✔
1073
                        Type:               v1.VirtualMachineInstanceAccessCredentialsSynchronized,
1✔
1074
                        LastTransitionTime: metav1.Now(),
1✔
1075
                        Status:             status,
1✔
1076
                        Message:            message,
1✔
1077
                }
1✔
1078
                vmi.Status.Conditions = append(vmi.Status.Conditions, newCondition)
1✔
1079
                if status == k8sv1.ConditionTrue {
2✔
1080
                        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.AccessCredentialsSyncSuccess.String(), message)
1✔
1081
                } else {
2✔
1082
                        c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.AccessCredentialsSyncFailed.String(), message)
1✔
1083
                }
1✔
1084
        }
1085
}
1086

1087
func (c *VirtualMachineController) updateLiveMigrationConditions(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1088
        // Calculate whether the VM is migratable
1✔
1089
        liveMigrationCondition, isBlockMigration := c.calculateLiveMigrationCondition(vmi)
1✔
1090
        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceIsMigratable) {
2✔
1091
                vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
1✔
1092
                // Set VMI Migration Method
1✔
1093
                if isBlockMigration {
2✔
1094
                        vmi.Status.MigrationMethod = v1.BlockMigration
1✔
1095
                } else {
2✔
1096
                        vmi.Status.MigrationMethod = v1.LiveMigration
1✔
1097
                }
1✔
1098
        } else {
1✔
1099
                cond := condManager.GetCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
1✔
1100
                if !equality.Semantic.DeepEqual(cond, liveMigrationCondition) {
1✔
1101
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceIsMigratable)
×
1102
                        vmi.Status.Conditions = append(vmi.Status.Conditions, *liveMigrationCondition)
×
1103
                }
×
1104
        }
1105
        storageLiveMigCond := c.calculateLiveStorageMigrationCondition(vmi)
1✔
1106
        condManager.UpdateCondition(vmi, storageLiveMigCond)
1✔
1107
        evictable := migrations.VMIMigratableOnEviction(c.clusterConfig, vmi)
1✔
1108
        if evictable && liveMigrationCondition.Status == k8sv1.ConditionFalse {
2✔
1109
                c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, v1.Migrated.String(), "EvictionStrategy is set but vmi is not migratable; %s", liveMigrationCondition.Message)
1✔
1110
        }
1✔
1111
}
1112

1113
func (c *VirtualMachineController) updateGuestAgentConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
1✔
1114

1✔
1115
        // Update the condition when GA is connected
1✔
1116
        channelConnected := false
1✔
1117
        if domain != nil {
2✔
1118
                for _, channel := range domain.Spec.Devices.Channels {
2✔
1119
                        if channel.Target != nil {
2✔
1120
                                log.Log.V(4).Infof("Channel: %s, %s", channel.Target.Name, channel.Target.State)
1✔
1121
                                if channel.Target.Name == "org.qemu.guest_agent.0" {
2✔
1122
                                        if channel.Target.State == "connected" {
2✔
1123
                                                channelConnected = true
1✔
1124
                                        }
1✔
1125
                                }
1126

1127
                        }
1128
                }
1129
        }
1130

1131
        switch {
1✔
1132
        case channelConnected && !condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected):
1✔
1133
                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
1134
                        Type:          v1.VirtualMachineInstanceAgentConnected,
1✔
1135
                        LastProbeTime: metav1.Now(),
1✔
1136
                        Status:        k8sv1.ConditionTrue,
1✔
1137
                }
1✔
1138
                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
1139
        case !channelConnected:
1✔
1140
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceAgentConnected)
1✔
1141
        }
1142

1143
        if condManager.HasCondition(vmi, v1.VirtualMachineInstanceAgentConnected) {
2✔
1144
                client, err := c.getLauncherClient(vmi)
1✔
1145
                if err != nil {
1✔
1146
                        return err
×
1147
                }
×
1148

1149
                guestInfo, err := client.GetGuestInfo()
1✔
1150
                if err != nil {
1✔
1151
                        return err
×
1152
                }
×
1153

1154
                var supported = false
1✔
1155
                var reason = ""
1✔
1156

1✔
1157
                // For current versions, virt-launcher's supported commands will always contain data.
1✔
1158
                // For backwards compatibility: during upgrade from a previous version of KubeVirt,
1✔
1159
                // virt-launcher might not provide any supported commands. If the list of supported
1✔
1160
                // commands is empty, fall back to previous behavior.
1✔
1161
                if len(guestInfo.SupportedCommands) > 0 {
1✔
1162
                        supported, reason = isGuestAgentSupported(vmi, guestInfo.SupportedCommands)
×
1163
                        log.Log.V(3).Object(vmi).Info(reason)
×
1164
                } else {
1✔
1165
                        for _, version := range c.clusterConfig.GetSupportedAgentVersions() {
2✔
1166
                                supported = supported || regexp.MustCompile(version).MatchString(guestInfo.GAVersion)
1✔
1167
                        }
1✔
1168
                        if !supported {
2✔
1169
                                reason = fmt.Sprintf("Guest agent version '%s' is not supported", guestInfo.GAVersion)
1✔
1170
                        }
1✔
1171
                }
1172

1173
                if !supported {
2✔
1174
                        if !condManager.HasCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent) {
2✔
1175
                                agentCondition := v1.VirtualMachineInstanceCondition{
1✔
1176
                                        Type:          v1.VirtualMachineInstanceUnsupportedAgent,
1✔
1177
                                        LastProbeTime: metav1.Now(),
1✔
1178
                                        Status:        k8sv1.ConditionTrue,
1✔
1179
                                        Reason:        reason,
1✔
1180
                                }
1✔
1181
                                vmi.Status.Conditions = append(vmi.Status.Conditions, agentCondition)
1✔
1182
                        }
1✔
1183
                } else {
×
1184
                        condManager.RemoveCondition(vmi, v1.VirtualMachineInstanceUnsupportedAgent)
×
1185
                }
×
1186

1187
        }
1188
        return nil
1✔
1189
}
1190

1191
func (c *VirtualMachineController) updatePausedConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) {
1✔
1192

1✔
1193
        // Update paused condition in case VMI was paused / unpaused
1✔
1194
        if domain != nil && domain.Status.Status == api.Paused {
2✔
1195
                if !condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
2✔
1196
                        reason := domain.Status.Reason
1✔
1197
                        if c.isVMIPausedDuringMigration(vmi) {
1✔
1198
                                reason = api.ReasonPausedMigration
×
1199
                        }
×
1200
                        calculatePausedCondition(vmi, reason)
1✔
1201
                }
1202
        } else if condManager.HasCondition(vmi, v1.VirtualMachineInstancePaused) {
1✔
1203
                log.Log.Object(vmi).V(3).Info("Removing paused condition")
×
1204
                condManager.RemoveCondition(vmi, v1.VirtualMachineInstancePaused)
×
1205
        }
×
1206
}
1207

1208
func dumpTargetFile(vmiName, volName string) string {
1✔
1209
        targetFileName := fmt.Sprintf("%s-%s-%s.memory.dump", vmiName, volName, time.Now().Format("20060102-150405"))
1✔
1210
        return targetFileName
1✔
1211
}
1✔
1212

1213
func (c *VirtualMachineController) updateMemoryDumpInfo(vmi *v1.VirtualMachineInstance, volumeStatus v1.VolumeStatus, domain *api.Domain) (v1.VolumeStatus, bool) {
1✔
1214
        needsRefresh := false
1✔
1215
        switch volumeStatus.Phase {
1✔
1216
        case v1.HotplugVolumeMounted:
1✔
1217
                needsRefresh = true
1✔
1218
                log.Log.Object(vmi).V(3).Infof("Memory dump volume %s attached, marking it in progress", volumeStatus.Name)
1✔
1219
                volumeStatus.Phase = v1.MemoryDumpVolumeInProgress
1✔
1220
                volumeStatus.Message = fmt.Sprintf("Memory dump Volume %s is attached, getting memory dump", volumeStatus.Name)
1✔
1221
                volumeStatus.Reason = VolumeMountedToPodReason
1✔
1222
                volumeStatus.MemoryDumpVolume.TargetFileName = dumpTargetFile(vmi.Name, volumeStatus.Name)
1✔
1223
        case v1.MemoryDumpVolumeInProgress:
1✔
1224
                memoryDumpMetadata := domain.Spec.Metadata.KubeVirt.MemoryDump
1✔
1225
                if memoryDumpMetadata == nil || memoryDumpMetadata.FileName != volumeStatus.MemoryDumpVolume.TargetFileName {
2✔
1226
                        // memory dump wasnt triggered yet
1✔
1227
                        return volumeStatus, needsRefresh
1✔
1228
                }
1✔
1229
                needsRefresh = true
1✔
1230
                if memoryDumpMetadata.StartTimestamp != nil {
2✔
1231
                        volumeStatus.MemoryDumpVolume.StartTimestamp = memoryDumpMetadata.StartTimestamp
1✔
1232
                }
1✔
1233
                if memoryDumpMetadata.EndTimestamp != nil && memoryDumpMetadata.Failed {
2✔
1234
                        log.Log.Object(vmi).Errorf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
1235
                        volumeStatus.Message = fmt.Sprintf("Memory dump to pvc %s failed: %v", volumeStatus.Name, memoryDumpMetadata.FailureReason)
1✔
1236
                        volumeStatus.Phase = v1.MemoryDumpVolumeFailed
1✔
1237
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
1238
                } else if memoryDumpMetadata.Completed {
3✔
1239
                        log.Log.Object(vmi).V(3).Infof("Marking memory dump to volume %s has completed", volumeStatus.Name)
1✔
1240
                        volumeStatus.Phase = v1.MemoryDumpVolumeCompleted
1✔
1241
                        volumeStatus.Message = fmt.Sprintf("Memory dump to Volume %s has completed successfully", volumeStatus.Name)
1✔
1242
                        volumeStatus.Reason = VolumeReadyReason
1✔
1243
                        volumeStatus.MemoryDumpVolume.EndTimestamp = memoryDumpMetadata.EndTimestamp
1✔
1244
                }
1✔
1245
        }
1246

1247
        return volumeStatus, needsRefresh
1✔
1248
}
1249

1250
func (c *VirtualMachineController) updateFSFreezeStatus(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
1251

1✔
1252
        if domain == nil || domain.Status.FSFreezeStatus.Status == "" {
2✔
1253
                return
1✔
1254
        }
1✔
1255

1256
        if domain.Status.FSFreezeStatus.Status == api.FSThawed {
2✔
1257
                vmi.Status.FSFreezeStatus = ""
1✔
1258
        } else {
2✔
1259
                vmi.Status.FSFreezeStatus = domain.Status.FSFreezeStatus.Status
1✔
1260
        }
1✔
1261

1262
}
1263

1264
func IsoGuestVolumePath(namespace, name string, volume *v1.Volume) string {
×
1265
        const basepath = "/var/run"
×
1266
        switch {
×
1267
        case volume.CloudInitNoCloud != nil:
×
1268
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "noCloud.iso")
×
1269
        case volume.CloudInitConfigDrive != nil:
×
1270
                return filepath.Join(basepath, "kubevirt-ephemeral-disks", "cloud-init-data", namespace, name, "configdrive.iso")
×
1271
        case volume.ConfigMap != nil:
×
1272
                return config.GetConfigMapDiskPath(volume.Name)
×
1273
        case volume.DownwardAPI != nil:
×
1274
                return config.GetDownwardAPIDiskPath(volume.Name)
×
1275
        case volume.Secret != nil:
×
1276
                return config.GetSecretDiskPath(volume.Name)
×
1277
        case volume.ServiceAccount != nil:
×
1278
                return config.GetServiceAccountDiskPath()
×
1279
        case volume.Sysprep != nil:
×
1280
                return config.GetSysprepDiskPath(volume.Name)
×
1281
        default:
×
1282
                return ""
×
1283
        }
1284
}
1285

1286
func (c *VirtualMachineController) updateIsoSizeStatus(vmi *v1.VirtualMachineInstance) {
1✔
1287
        var podUID string
1✔
1288
        if vmi.Status.Phase != v1.Running {
2✔
1289
                return
1✔
1290
        }
1✔
1291

1292
        for k, v := range vmi.Status.ActivePods {
2✔
1293
                if v == vmi.Status.NodeName {
2✔
1294
                        podUID = string(k)
1✔
1295
                        break
1✔
1296
                }
1297
        }
1298
        if podUID == "" {
2✔
1299
                log.DefaultLogger().Warningf("failed to find pod UID for VMI %s", vmi.Name)
1✔
1300
                return
1✔
1301
        }
1✔
1302

1303
        volumes := make(map[string]v1.Volume)
1✔
1304
        for _, volume := range vmi.Spec.Volumes {
1✔
1305
                volumes[volume.Name] = volume
×
1306
        }
×
1307

1308
        for _, disk := range vmi.Spec.Domain.Devices.Disks {
1✔
1309
                volume, ok := volumes[disk.Name]
×
1310
                if !ok {
×
1311
                        log.DefaultLogger().Warningf("No matching volume with name %s found", disk.Name)
×
1312
                        continue
×
1313
                }
1314

1315
                volPath := IsoGuestVolumePath(vmi.Namespace, vmi.Name, &volume)
×
1316
                if volPath == "" {
×
1317
                        continue
×
1318
                }
1319

1320
                res, err := c.podIsolationDetector.Detect(vmi)
×
1321
                if err != nil {
×
1322
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
1323
                        continue
×
1324
                }
1325

1326
                rootPath, err := res.MountRoot()
×
1327
                if err != nil {
×
1328
                        log.DefaultLogger().Reason(err).Warningf("failed to detect VMI %s", vmi.Name)
×
1329
                        continue
×
1330
                }
1331

1332
                safeVolPath, err := rootPath.AppendAndResolveWithRelativeRoot(volPath)
×
1333
                if err != nil {
×
1334
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
1335
                        continue
×
1336
                }
1337
                fileInfo, err := safepath.StatAtNoFollow(safeVolPath)
×
1338
                if err != nil {
×
1339
                        log.DefaultLogger().Warningf("failed to determine file size for volume %s", volPath)
×
1340
                        continue
×
1341
                }
1342

1343
                for i := range vmi.Status.VolumeStatus {
×
1344
                        if vmi.Status.VolumeStatus[i].Name == volume.Name {
×
1345
                                vmi.Status.VolumeStatus[i].Size = fileInfo.Size()
×
1346
                                continue
×
1347
                        }
1348
                }
1349
        }
1350
}
1351

1352
func (c *VirtualMachineController) updateSELinuxContext(vmi *v1.VirtualMachineInstance) error {
1✔
1353
        _, present, err := selinux.NewSELinux()
1✔
1354
        if err != nil {
2✔
1355
                return err
1✔
1356
        }
1✔
1357
        if present {
×
1358
                context, err := selinux.GetVirtLauncherContext(vmi)
×
1359
                if err != nil {
×
1360
                        return err
×
1361
                }
×
1362
                vmi.Status.SelinuxContext = context
×
1363
        } else {
×
1364
                vmi.Status.SelinuxContext = "none"
×
1365
        }
×
1366

1367
        return nil
×
1368
}
1369

1370
func (c *VirtualMachineController) updateVMIStatusFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
1371
        c.updateIsoSizeStatus(vmi)
1✔
1372
        err := c.updateSELinuxContext(vmi)
1✔
1373
        if err != nil {
2✔
1374
                log.Log.Reason(err).Errorf("couldn't find the SELinux context for %s", vmi.Name)
1✔
1375
        }
1✔
1376
        c.setMigrationProgressStatus(vmi, domain)
1✔
1377
        c.updateGuestInfoFromDomain(vmi, domain)
1✔
1378
        c.updateVolumeStatusesFromDomain(vmi, domain)
1✔
1379
        c.updateFSFreezeStatus(vmi, domain)
1✔
1380
        c.updateMachineType(vmi, domain)
1✔
1381
        if err = c.updateMemoryInfo(vmi, domain); err != nil {
1✔
1382
                return err
×
1383
        }
×
1384
        err = c.netStat.UpdateStatus(vmi, domain)
1✔
1385
        return err
1✔
1386
}
1387

1388
func (c *VirtualMachineController) updateVMIConditions(vmi *v1.VirtualMachineInstance, domain *api.Domain, condManager *controller.VirtualMachineInstanceConditionManager) error {
1✔
1389
        c.updateAccessCredentialConditions(vmi, domain, condManager)
1✔
1390
        c.updateLiveMigrationConditions(vmi, condManager)
1✔
1391
        err := c.updateGuestAgentConditions(vmi, domain, condManager)
1✔
1392
        if err != nil {
1✔
1393
                return err
×
1394
        }
×
1395
        c.updatePausedConditions(vmi, domain, condManager)
1✔
1396

1✔
1397
        return nil
1✔
1398
}
1399

1400
func (c *VirtualMachineController) updateVMIStatus(origVMI *v1.VirtualMachineInstance, domain *api.Domain, syncError error) (err error) {
1✔
1401
        condManager := controller.NewVirtualMachineInstanceConditionManager()
1✔
1402

1✔
1403
        // Don't update the VirtualMachineInstance if it is already in a final state
1✔
1404
        if origVMI.IsFinal() {
2✔
1405
                return nil
1✔
1406
        } else if origVMI.Status.NodeName != "" && origVMI.Status.NodeName != c.host {
3✔
1407
                // Only update the VMI's phase if this node owns the VMI.
1✔
1408
                // not owned by this host, likely the result of a migration
1✔
1409
                return nil
1✔
1410
        } else if domainMigrated(domain) {
3✔
1411
                return c.migrationSourceUpdateVMIStatus(origVMI, domain)
1✔
1412
        }
1✔
1413

1414
        vmi := origVMI.DeepCopy()
1✔
1415
        oldStatus := *vmi.Status.DeepCopy()
1✔
1416

1✔
1417
        // Update VMI status fields based on what is reported on the domain
1✔
1418
        err = c.updateVMIStatusFromDomain(vmi, domain)
1✔
1419
        if err != nil {
1✔
1420
                return err
×
1421
        }
×
1422

1423
        // Calculate the new VirtualMachineInstance state based on what libvirt reported
1424
        err = c.setVmPhaseForStatusReason(domain, vmi)
1✔
1425
        if err != nil {
1✔
1426
                return err
×
1427
        }
×
1428

1429
        // Update conditions on VMI Status
1430
        err = c.updateVMIConditions(vmi, domain, condManager)
1✔
1431
        if err != nil {
1✔
1432
                return err
×
1433
        }
×
1434

1435
        // Store containerdisks and kernelboot checksums
1436
        if err := c.updateChecksumInfo(vmi, syncError); err != nil {
1✔
1437
                return err
×
1438
        }
×
1439

1440
        // Handle sync error
1441
        handleSyncError(vmi, condManager, syncError)
1✔
1442

1✔
1443
        controller.SetVMIPhaseTransitionTimestamp(origVMI, vmi)
1✔
1444

1✔
1445
        // Only issue vmi update if status has changed
1✔
1446
        if !equality.Semantic.DeepEqual(oldStatus, vmi.Status) {
2✔
1447
                key := controller.VirtualMachineInstanceKey(vmi)
1✔
1448
                c.vmiExpectations.SetExpectations(key, 1, 0)
1✔
1449
                _, err = c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, metav1.UpdateOptions{})
1✔
1450
                if err != nil {
2✔
1451
                        c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
1452
                        return err
1✔
1453
                }
1✔
1454
        }
1455

1456
        // Record an event on the VMI when the VMI's phase changes
1457
        if oldStatus.Phase != vmi.Status.Phase {
2✔
1458
                c.recordPhaseChangeEvent(vmi)
1✔
1459
        }
1✔
1460

1461
        return nil
1✔
1462
}
1463

1464
func handleSyncError(vmi *v1.VirtualMachineInstance, condManager *controller.VirtualMachineInstanceConditionManager, syncError error) {
1✔
1465
        var criticalNetErr *neterrors.CriticalNetworkError
1✔
1466
        if goerror.As(syncError, &criticalNetErr) {
2✔
1467
                log.Log.Errorf("virt-launcher crashed due to a network error. Updating VMI %s status to Failed", vmi.Name)
1✔
1468
                vmi.Status.Phase = v1.Failed
1✔
1469
        }
1✔
1470
        if _, ok := syncError.(*virtLauncherCriticalSecurebootError); ok {
1✔
1471
                log.Log.Errorf("virt-launcher does not support the Secure Boot setting. Updating VMI %s status to Failed", vmi.Name)
×
1472
                vmi.Status.Phase = v1.Failed
×
1473
        }
×
1474

1475
        if _, ok := syncError.(*vmiIrrecoverableError); ok {
1✔
1476
                log.Log.Errorf("virt-launcher reached an irrecoverable error. Updating VMI %s status to Failed", vmi.Name)
×
1477
                vmi.Status.Phase = v1.Failed
×
1478
        }
×
1479
        condManager.CheckFailure(vmi, syncError, "Synchronizing with the Domain failed.")
1✔
1480
}
1481

1482
func (c *VirtualMachineController) recordPhaseChangeEvent(vmi *v1.VirtualMachineInstance) {
1✔
1483
        switch vmi.Status.Phase {
1✔
1484
        case v1.Running:
1✔
1485
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Started.String(), VMIStarted)
1✔
1486
        case v1.Succeeded:
×
1487
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Stopped.String(), VMIShutdown)
×
1488
        case v1.Failed:
1✔
1489
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.Stopped.String(), VMICrashed)
1✔
1490
        }
1491
}
1492

1493
func calculatePausedCondition(vmi *v1.VirtualMachineInstance, reason api.StateChangeReason) {
1✔
1494
        now := metav1.NewTime(time.Now())
1✔
1495
        switch reason {
1✔
1496
        case api.ReasonPausedMigration:
×
1497
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
×
1498
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1499
                        Type:               v1.VirtualMachineInstancePaused,
×
1500
                        Status:             k8sv1.ConditionTrue,
×
1501
                        LastProbeTime:      now,
×
1502
                        LastTransitionTime: now,
×
1503
                        Reason:             "PausedByMigrationMonitor",
×
1504
                        Message:            "VMI was paused by the migration monitor",
×
1505
                })
×
1506
        case api.ReasonPausedUser:
1✔
1507
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
1✔
1508
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
1✔
1509
                        Type:               v1.VirtualMachineInstancePaused,
1✔
1510
                        Status:             k8sv1.ConditionTrue,
1✔
1511
                        LastProbeTime:      now,
1✔
1512
                        LastTransitionTime: now,
1✔
1513
                        Reason:             "PausedByUser",
1✔
1514
                        Message:            "VMI was paused by user",
1✔
1515
                })
1✔
1516
        case api.ReasonPausedIOError:
×
1517
                log.Log.Object(vmi).V(3).Info("Adding paused condition")
×
1518
                vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
×
1519
                        Type:               v1.VirtualMachineInstancePaused,
×
1520
                        Status:             k8sv1.ConditionTrue,
×
1521
                        LastProbeTime:      now,
×
1522
                        LastTransitionTime: now,
×
1523
                        Reason:             "PausedIOError",
×
1524
                        Message:            "VMI was paused, low-level IO error detected",
×
1525
                })
×
1526
        default:
×
1527
                log.Log.Object(vmi).V(3).Infof("Domain is paused for unknown reason, %s", reason)
×
1528
        }
1529
}
1530

1531
func newNonMigratableCondition(msg string, reason string) *v1.VirtualMachineInstanceCondition {
1✔
1532
        return &v1.VirtualMachineInstanceCondition{
1✔
1533
                Type:    v1.VirtualMachineInstanceIsMigratable,
1✔
1534
                Status:  k8sv1.ConditionFalse,
1✔
1535
                Message: msg,
1✔
1536
                Reason:  reason,
1✔
1537
        }
1✔
1538
}
1✔
1539

1540
func (c *VirtualMachineController) calculateLiveMigrationCondition(vmi *v1.VirtualMachineInstance) (*v1.VirtualMachineInstanceCondition, bool) {
1✔
1541
        isBlockMigration, err := c.checkVolumesForMigration(vmi)
1✔
1542
        if err != nil {
2✔
1543
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonDisksNotMigratable), isBlockMigration
1✔
1544
        }
1✔
1545

1546
        err = c.checkNetworkInterfacesForMigration(vmi)
1✔
1547
        if err != nil {
2✔
1548
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonInterfaceNotMigratable), isBlockMigration
1✔
1549
        }
1✔
1550

1551
        if err := c.isHostModelMigratable(vmi); err != nil {
1✔
1552
                return newNonMigratableCondition(err.Error(), v1.VirtualMachineInstanceReasonCPUModeNotMigratable), isBlockMigration
×
1553
        }
×
1554

1555
        if vmiContainsPCIHostDevice(vmi) {
2✔
1556
                return newNonMigratableCondition("VMI uses a PCI host devices", v1.VirtualMachineInstanceReasonHostDeviceNotMigratable), isBlockMigration
1✔
1557
        }
1✔
1558

1559
        if util.IsSEVVMI(vmi) {
2✔
1560
                return newNonMigratableCondition("VMI uses SEV", v1.VirtualMachineInstanceReasonSEVNotMigratable), isBlockMigration
1✔
1561
        }
1✔
1562

1563
        if reservation.HasVMIPersistentReservation(vmi) {
2✔
1564
                return newNonMigratableCondition("VMI uses SCSI persitent reservation", v1.VirtualMachineInstanceReasonPRNotMigratable), isBlockMigration
1✔
1565
        }
1✔
1566

1567
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
2✔
1568
                return newNonMigratableCondition(tscRequirement.Reason, v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable), isBlockMigration
1✔
1569
        }
1✔
1570

1571
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
2✔
1572
                return newNonMigratableCondition("VMI uses hyperv passthrough", v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable), isBlockMigration
1✔
1573
        }
1✔
1574

1575
        return &v1.VirtualMachineInstanceCondition{
1✔
1576
                Type:   v1.VirtualMachineInstanceIsMigratable,
1✔
1577
                Status: k8sv1.ConditionTrue,
1✔
1578
        }, isBlockMigration
1✔
1579
}
1580

1581
func vmiContainsPCIHostDevice(vmi *v1.VirtualMachineInstance) bool {
1✔
1582
        return len(vmi.Spec.Domain.Devices.HostDevices) > 0 || len(vmi.Spec.Domain.Devices.GPUs) > 0
1✔
1583
}
1✔
1584

1585
type multipleNonMigratableCondition struct {
1586
        reasons []string
1587
        msgs    []string
1588
}
1589

1590
func newMultipleNonMigratableCondition() *multipleNonMigratableCondition {
1✔
1591
        return &multipleNonMigratableCondition{}
1✔
1592
}
1✔
1593

1594
func (cond *multipleNonMigratableCondition) addNonMigratableCondition(reason, msg string) {
1✔
1595
        cond.reasons = append(cond.reasons, reason)
1✔
1596
        cond.msgs = append(cond.msgs, msg)
1✔
1597
}
1✔
1598

1599
func (cond *multipleNonMigratableCondition) String() string {
1✔
1600
        var buffer bytes.Buffer
1✔
1601
        for i, c := range cond.reasons {
2✔
1602
                if i > 0 {
1✔
1603
                        buffer.WriteString(", ")
×
1604
                }
×
1605
                buffer.WriteString(fmt.Sprintf("%s: %s", c, cond.msgs[i]))
1✔
1606
        }
1607
        return buffer.String()
1✔
1608
}
1609

1610
func (cond *multipleNonMigratableCondition) generateStorageLiveMigrationCondition() *v1.VirtualMachineInstanceCondition {
1✔
1611
        switch len(cond.reasons) {
1✔
1612
        case 0:
1✔
1613
                return &v1.VirtualMachineInstanceCondition{
1✔
1614
                        Type:   v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1615
                        Status: k8sv1.ConditionTrue,
1✔
1616
                }
1✔
1617
        default:
1✔
1618
                return &v1.VirtualMachineInstanceCondition{
1✔
1619
                        Type:    v1.VirtualMachineInstanceIsStorageLiveMigratable,
1✔
1620
                        Status:  k8sv1.ConditionFalse,
1✔
1621
                        Message: cond.String(),
1✔
1622
                        Reason:  v1.VirtualMachineInstanceReasonNotMigratable,
1✔
1623
                }
1✔
1624
        }
1625
}
1626

1627
func (c *VirtualMachineController) calculateLiveStorageMigrationCondition(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstanceCondition {
1✔
1628
        multiCond := newMultipleNonMigratableCondition()
1✔
1629

1✔
1630
        if err := c.checkNetworkInterfacesForMigration(vmi); err != nil {
2✔
1631
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonInterfaceNotMigratable, err.Error())
1✔
1632
        }
1✔
1633

1634
        if err := c.isHostModelMigratable(vmi); err != nil {
1✔
1635
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonCPUModeNotMigratable, err.Error())
×
1636
        }
×
1637

1638
        if vmiContainsPCIHostDevice(vmi) {
1✔
1639
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHostDeviceNotMigratable, "VMI uses a PCI host devices")
×
1640
        }
×
1641

1642
        if util.IsSEVVMI(vmi) {
1✔
1643
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonSEVNotMigratable, "VMI uses SEV")
×
1644
        }
×
1645

1646
        if reservation.HasVMIPersistentReservation(vmi) {
1✔
1647
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonPRNotMigratable, "VMI uses SCSI persitent reservation")
×
1648
        }
×
1649

1650
        if tscRequirement := topology.GetTscFrequencyRequirement(vmi); !topology.AreTSCFrequencyTopologyHintsDefined(vmi) && tscRequirement.Type == topology.RequiredForMigration {
1✔
1651
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonNoTSCFrequencyMigratable, tscRequirement.Reason)
×
1652
        }
×
1653

1654
        if vmiFeatures := vmi.Spec.Domain.Features; vmiFeatures != nil && vmiFeatures.HypervPassthrough != nil && *vmiFeatures.HypervPassthrough.Enabled {
1✔
1655
                multiCond.addNonMigratableCondition(v1.VirtualMachineInstanceReasonHypervPassthroughNotMigratable, "VMI uses hyperv passthrough")
×
1656
        }
×
1657

1658
        return multiCond.generateStorageLiveMigrationCondition()
1✔
1659
}
1660

1661
func (c *VirtualMachineController) Run(threadiness int, stopCh chan struct{}) {
×
1662
        defer c.queue.ShutDown()
×
1663
        log.Log.Info("Starting virt-handler controller.")
×
1664

×
1665
        go c.deviceManagerController.Run(stopCh)
×
1666

×
1667
        go c.downwardMetricsManager.Run(stopCh)
×
1668

×
1669
        cache.WaitForCacheSync(stopCh, c.hasSynced)
×
1670

×
1671
        // queue keys for previous Domains on the host that no longer exist
×
1672
        // in the cache. This ensures we perform local cleanup of deleted VMs.
×
1673
        for _, domain := range c.domainStore.List() {
×
1674
                d := domain.(*api.Domain)
×
1675
                vmiRef := v1.NewVMIReferenceWithUUID(
×
1676
                        d.ObjectMeta.Namespace,
×
1677
                        d.ObjectMeta.Name,
×
1678
                        d.Spec.Metadata.KubeVirt.UID)
×
1679

×
1680
                key := controller.VirtualMachineInstanceKey(vmiRef)
×
1681

×
1682
                _, exists, _ := c.vmiSourceStore.GetByKey(key)
×
1683
                if !exists {
×
1684
                        c.queue.Add(key)
×
1685
                }
×
1686
        }
1687

1688
        heartBeatDone := c.heartBeat.Run(c.heartBeatInterval, stopCh)
×
1689

×
1690
        go c.ioErrorRetryManager.Run(stopCh)
×
1691

×
1692
        // Start the actual work
×
1693
        for i := 0; i < threadiness; i++ {
×
1694
                go wait.Until(c.runWorker, time.Second, stopCh)
×
1695
        }
×
1696

1697
        <-heartBeatDone
×
1698
        <-stopCh
×
1699
        log.Log.Info("Stopping virt-handler controller.")
×
1700
}
1701

1702
func (c *VirtualMachineController) runWorker() {
×
1703
        for c.Execute() {
×
1704
        }
×
1705
}
1706

1707
func (c *VirtualMachineController) Execute() bool {
1✔
1708
        key, quit := c.queue.Get()
1✔
1709
        if quit {
1✔
1710
                return false
×
1711
        }
×
1712
        defer c.queue.Done(key)
1✔
1713
        if err := c.execute(key); err != nil {
2✔
1714
                log.Log.Reason(err).Infof("re-enqueuing VirtualMachineInstance %v", key)
1✔
1715
                c.queue.AddRateLimited(key)
1✔
1716
        } else {
2✔
1717
                log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
1✔
1718
                c.queue.Forget(key)
1✔
1719
        }
1✔
1720
        return true
1✔
1721
}
1722

1723
func (c *VirtualMachineController) getVMIFromCache(key string) (vmi *v1.VirtualMachineInstance, exists bool, err error) {
1✔
1724

1✔
1725
        // Fetch the latest Vm state from cache
1✔
1726
        obj, exists, err := c.vmiSourceStore.GetByKey(key)
1✔
1727
        if err != nil {
1✔
1728
                return nil, false, err
×
1729
        }
×
1730

1731
        if !exists {
2✔
1732
                obj, exists, err = c.vmiTargetStore.GetByKey(key)
1✔
1733
                if err != nil {
1✔
1734
                        return nil, false, err
×
1735
                }
×
1736
        }
1737

1738
        // Retrieve the VirtualMachineInstance
1739
        if !exists {
2✔
1740
                namespace, name, err := cache.SplitMetaNamespaceKey(key)
1✔
1741
                if err != nil {
2✔
1742
                        // TODO log and don't retry
1✔
1743
                        return nil, false, err
1✔
1744
                }
1✔
1745
                vmi = v1.NewVMIReferenceFromNameWithNS(namespace, name)
1✔
1746
        } else {
1✔
1747
                vmi = obj.(*v1.VirtualMachineInstance)
1✔
1748
        }
1✔
1749
        return vmi, exists, nil
1✔
1750
}
1751

1752
func (c *VirtualMachineController) getDomainFromCache(key string) (domain *api.Domain, exists bool, cachedUID types.UID, err error) {
1✔
1753

1✔
1754
        obj, exists, err := c.domainStore.GetByKey(key)
1✔
1755

1✔
1756
        if err != nil {
1✔
1757
                return nil, false, "", err
×
1758
        }
×
1759

1760
        if exists {
2✔
1761
                domain = obj.(*api.Domain)
1✔
1762
                cachedUID = domain.Spec.Metadata.KubeVirt.UID
1✔
1763

1✔
1764
                // We're using the DeletionTimestamp to signify that the
1✔
1765
                // Domain is deleted rather than sending the DELETE watch event.
1✔
1766
                if domain.ObjectMeta.DeletionTimestamp != nil {
1✔
1767
                        exists = false
×
1768
                        domain = nil
×
1769
                }
×
1770
        }
1771
        return domain, exists, cachedUID, nil
1✔
1772
}
1773

1774
func (c *VirtualMachineController) migrationOrphanedSourceNodeExecute(vmi *v1.VirtualMachineInstance, domainExists bool) error {
×
1775

×
1776
        if domainExists {
×
1777
                err := c.processVmDelete(vmi)
×
1778
                if err != nil {
×
1779
                        return err
×
1780
                }
×
1781
                // we can perform the cleanup immediately after
1782
                // the successful delete here because we don't have
1783
                // to report the deletion results on the VMI status
1784
                // in this case.
1785
                err = c.processVmCleanup(vmi)
×
1786
                if err != nil {
×
1787
                        return err
×
1788
                }
×
1789
        } else {
×
1790
                err := c.processVmCleanup(vmi)
×
1791
                if err != nil {
×
1792
                        return err
×
1793
                }
×
1794
        }
1795
        return nil
×
1796
}
1797

1798
func (c *VirtualMachineController) migrationTargetExecute(vmi *v1.VirtualMachineInstance, vmiExists bool, domain *api.Domain) error {
1✔
1799

1✔
1800
        // set to true when preparation of migration target should be aborted.
1✔
1801
        shouldAbort := false
1✔
1802
        // set to true when VirtualMachineInstance migration target needs to be prepared
1✔
1803
        shouldUpdate := false
1✔
1804
        // set true when the current migration target has exitted and needs to be cleaned up.
1✔
1805
        shouldCleanUp := false
1✔
1806

1✔
1807
        if vmiExists && vmi.IsRunning() {
2✔
1808
                shouldUpdate = true
1✔
1809
        }
1✔
1810

1811
        if !vmiExists || vmi.DeletionTimestamp != nil {
2✔
1812
                shouldAbort = true
1✔
1813
        } else if vmi.IsFinal() {
2✔
1814
                shouldAbort = true
×
1815
        } else if c.hasStaleClientConnections(vmi) {
2✔
1816
                // if stale client exists, force cleanup.
1✔
1817
                // This can happen as a result of a previously
1✔
1818
                // failed attempt to migrate the vmi to this node.
1✔
1819
                shouldCleanUp = true
1✔
1820
        }
1✔
1821

1822
        domainExists := domain != nil
1✔
1823
        if shouldAbort {
2✔
1824
                if domainExists {
1✔
1825
                        err := c.processVmDelete(vmi)
×
1826
                        if err != nil {
×
1827
                                return err
×
1828
                        }
×
1829
                }
1830

1831
                err := c.processVmCleanup(vmi)
1✔
1832
                if err != nil {
1✔
1833
                        return err
×
1834
                }
×
1835
        } else if shouldCleanUp {
2✔
1836
                log.Log.Object(vmi).Infof("Stale client for migration target found. Cleaning up.")
1✔
1837

1✔
1838
                err := c.processVmCleanup(vmi)
1✔
1839
                if err != nil {
1✔
1840
                        return err
×
1841
                }
×
1842

1843
                // if we're still the migration target, we need to keep trying until the migration fails.
1844
                // it's possible we're simply waiting for another target pod to come online.
1845
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
1846

1847
        } else if shouldUpdate {
2✔
1848
                log.Log.Object(vmi).Info("Processing vmi migration target update")
1✔
1849

1✔
1850
                // prepare the POD for the migration
1✔
1851
                err := c.processVmUpdate(vmi, domain)
1✔
1852
                if err != nil {
1✔
1853
                        return err
×
1854
                }
×
1855

1856
                err = c.migrationTargetUpdateVMIStatus(vmi, domain)
1✔
1857
                if err != nil {
1✔
1858
                        return err
×
1859
                }
×
1860
        }
1861

1862
        return nil
1✔
1863
}
1864

1865
// Determine if gracefulShutdown has been triggered by virt-launcher
1866
func (c *VirtualMachineController) hasGracefulShutdownTrigger(domain *api.Domain) bool {
1✔
1867
        if domain == nil {
2✔
1868
                return false
1✔
1869
        }
1✔
1870
        gracePeriod := domain.Spec.Metadata.KubeVirt.GracePeriod
1✔
1871

1✔
1872
        return gracePeriod != nil &&
1✔
1873
                gracePeriod.MarkedForGracefulShutdown != nil &&
1✔
1874
                *gracePeriod.MarkedForGracefulShutdown
1✔
1875
}
1876

1877
func (c *VirtualMachineController) defaultExecute(key string,
1878
        vmi *v1.VirtualMachineInstance,
1879
        vmiExists bool,
1880
        domain *api.Domain,
1881
        domainExists bool) error {
1✔
1882

1✔
1883
        // set to true when domain needs to be shutdown.
1✔
1884
        shouldShutdown := false
1✔
1885
        // set to true when domain needs to be removed from libvirt.
1✔
1886
        shouldDelete := false
1✔
1887
        // optimization. set to true when processing already deleted domain.
1✔
1888
        shouldCleanUp := false
1✔
1889
        // set to true when VirtualMachineInstance is active or about to become active.
1✔
1890
        shouldUpdate := false
1✔
1891
        // set true to ensure that no updates to the current VirtualMachineInstance state will occur
1✔
1892
        forceIgnoreSync := false
1✔
1893
        // set to true when unrecoverable domain needs to be destroyed non-gracefully.
1✔
1894
        forceShutdownIrrecoverable := false
1✔
1895

1✔
1896
        log.Log.V(3).Infof("Processing event %v", key)
1✔
1897

1✔
1898
        if vmiExists && domainExists {
2✔
1899
                log.Log.Object(vmi).Infof("VMI is in phase: %v | Domain status: %v, reason: %v", vmi.Status.Phase, domain.Status.Status, domain.Status.Reason)
1✔
1900
        } else if vmiExists {
3✔
1901
                log.Log.Object(vmi).Infof("VMI is in phase: %v | Domain does not exist", vmi.Status.Phase)
1✔
1902
        } else if domainExists {
3✔
1903
                vmiRef := v1.NewVMIReferenceWithUUID(domain.ObjectMeta.Namespace, domain.ObjectMeta.Name, domain.Spec.Metadata.KubeVirt.UID)
1✔
1904
                log.Log.Object(vmiRef).Infof("VMI does not exist | Domain status: %v, reason: %v", domain.Status.Status, domain.Status.Reason)
1✔
1905
        } else {
1✔
1906
                log.Log.Info("VMI does not exist | Domain does not exist")
×
1907
        }
×
1908

1909
        domainAlive := domainExists &&
1✔
1910
                domain.Status.Status != api.Shutoff &&
1✔
1911
                domain.Status.Status != api.Crashed &&
1✔
1912
                domain.Status.Status != ""
1✔
1913

1✔
1914
        domainMigrated := domainExists && domainMigrated(domain)
1✔
1915
        forceShutdownIrrecoverable = domainExists && domainPausedFailedPostCopy(domain)
1✔
1916

1✔
1917
        gracefulShutdown := c.hasGracefulShutdownTrigger(domain)
1✔
1918
        if gracefulShutdown && vmi.IsRunning() {
1✔
1919
                if domainAlive {
×
1920
                        log.Log.Object(vmi).V(3).Info("Shutting down due to graceful shutdown signal.")
×
1921
                        shouldShutdown = true
×
1922
                } else {
×
1923
                        shouldDelete = true
×
1924
                }
×
1925
        }
1926

1927
        // Determine removal of VirtualMachineInstance from cache should result in deletion.
1928
        if !vmiExists {
2✔
1929
                switch {
1✔
1930
                case domainAlive:
1✔
1931
                        // The VirtualMachineInstance is deleted on the cluster, and domain is alive,
1✔
1932
                        // then shut down the domain.
1✔
1933
                        log.Log.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
1✔
1934
                        shouldShutdown = true
1✔
1935
                case domainExists:
1✔
1936
                        // The VirtualMachineInstance is deleted on the cluster, and domain is not alive
1✔
1937
                        // then delete the domain.
1✔
1938
                        log.Log.Object(vmi).V(3).Info("Deleting domain for deleted VirtualMachineInstance object.")
1✔
1939
                        shouldDelete = true
1✔
1940
                default:
×
1941
                        // If neither the domain nor the vmi object exist locally,
×
1942
                        // then ensure any remaining local ephemeral data is cleaned up.
×
1943
                        shouldCleanUp = true
×
1944
                }
1945
        }
1946

1947
        // Determine if VirtualMachineInstance is being deleted.
1948
        if vmiExists && vmi.ObjectMeta.DeletionTimestamp != nil {
2✔
1949
                switch {
1✔
1950
                case domainAlive:
×
1951
                        log.Log.Object(vmi).V(3).Info("Shutting down domain for VirtualMachineInstance with deletion timestamp.")
×
1952
                        shouldShutdown = true
×
1953
                case domainExists:
×
1954
                        log.Log.Object(vmi).V(3).Info("Deleting domain for VirtualMachineInstance with deletion timestamp.")
×
1955
                        shouldDelete = true
×
1956
                default:
1✔
1957
                        if vmi.IsFinal() {
1✔
1958
                                shouldCleanUp = true
×
1959
                        }
×
1960
                }
1961
        }
1962

1963
        // Determine if domain needs to be deleted as a result of VirtualMachineInstance
1964
        // shutting down naturally (guest internal invoked shutdown)
1965
        if domainExists && vmiExists && vmi.IsFinal() {
1✔
1966
                log.Log.Object(vmi).V(3).Info("Removing domain and ephemeral data for finalized vmi.")
×
1967
                shouldDelete = true
×
1968
        } else if !domainExists && vmiExists && vmi.IsFinal() {
2✔
1969
                log.Log.Object(vmi).V(3).Info("Cleaning up local data for finalized vmi.")
1✔
1970
                shouldCleanUp = true
1✔
1971
        }
1✔
1972

1973
        // Determine if an active (or about to be active) VirtualMachineInstance should be updated.
1974
        if vmiExists && !vmi.IsFinal() {
2✔
1975
                // requiring the phase of the domain and VirtualMachineInstance to be in sync is an
1✔
1976
                // optimization that prevents unnecessary re-processing VMIs during the start flow.
1✔
1977
                phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
1✔
1978
                if err != nil {
1✔
1979
                        return err
×
1980
                }
×
1981
                if vmi.Status.Phase == phase {
2✔
1982
                        shouldUpdate = true
1✔
1983
                }
1✔
1984

1985
                if shouldDelay, delay := c.ioErrorRetryManager.ShouldDelay(string(vmi.UID), func() bool {
2✔
1986
                        return isIOError(shouldUpdate, domainExists, domain)
1✔
1987
                }); shouldDelay {
1✔
1988
                        shouldUpdate = false
×
1989
                        log.Log.Object(vmi).Infof("Delay vm update for %f seconds", delay.Seconds())
×
1990
                        c.queue.AddAfter(key, delay)
×
1991
                }
×
1992
        }
1993

1994
        // NOTE: This must be the last check that occurs before checking the sync booleans!
1995
        //
1996
        // Special logic for domains migrated from a source node.
1997
        // Don't delete/destroy domain until the handoff occurs.
1998
        if domainMigrated {
2✔
1999
                // only allow the sync to occur on the domain once we've done
1✔
2000
                // the node handoff. Otherwise we potentially lose the fact that
1✔
2001
                // the domain migrated because we'll attempt to delete the locally
1✔
2002
                // shut off domain during the sync.
1✔
2003
                if vmiExists &&
1✔
2004
                        !vmi.IsFinal() &&
1✔
2005
                        vmi.DeletionTimestamp == nil &&
1✔
2006
                        vmi.Status.NodeName != "" &&
1✔
2007
                        vmi.Status.NodeName == c.host {
2✔
2008

1✔
2009
                        // If the domain migrated but the VMI still thinks this node
1✔
2010
                        // is the host, force ignore the sync until the VMI's status
1✔
2011
                        // is updated to reflect the node the domain migrated to.
1✔
2012
                        forceIgnoreSync = true
1✔
2013
                }
1✔
2014
        }
2015

2016
        var syncErr error
1✔
2017

1✔
2018
        // Process the VirtualMachineInstance update in this order.
1✔
2019
        // * Shutdown and Deletion due to VirtualMachineInstance deletion, process stopping, graceful shutdown trigger, etc...
1✔
2020
        // * Cleanup of already shutdown and Deleted VMIs
1✔
2021
        // * Update due to spec change and initial start flow.
1✔
2022
        switch {
1✔
2023
        case forceIgnoreSync:
1✔
2024
                log.Log.Object(vmi).V(3).Info("No update processing required: forced ignore")
1✔
2025
        case shouldShutdown:
1✔
2026
                log.Log.Object(vmi).V(3).Info("Processing shutdown.")
1✔
2027
                syncErr = c.processVmShutdown(vmi, domain)
1✔
2028
        case forceShutdownIrrecoverable:
×
2029
                msg := formatIrrecoverableErrorMessage(domain)
×
2030
                log.Log.Object(vmi).V(3).Infof("Processing a destruction of an irrecoverable domain - %s.", msg)
×
2031
                syncErr = c.processVmDestroy(vmi, domain)
×
2032
                if syncErr == nil {
×
2033
                        syncErr = &vmiIrrecoverableError{msg}
×
2034
                }
×
2035
        case shouldDelete:
1✔
2036
                log.Log.Object(vmi).V(3).Info("Processing deletion.")
1✔
2037
                syncErr = c.processVmDelete(vmi)
1✔
2038
        case shouldCleanUp:
1✔
2039
                log.Log.Object(vmi).V(3).Info("Processing local ephemeral data cleanup for shutdown domain.")
1✔
2040
                syncErr = c.processVmCleanup(vmi)
1✔
2041
        case shouldUpdate:
1✔
2042
                log.Log.Object(vmi).V(3).Info("Processing vmi update")
1✔
2043
                syncErr = c.processVmUpdate(vmi, domain)
1✔
2044
        default:
1✔
2045
                log.Log.Object(vmi).V(3).Info("No update processing required")
1✔
2046
        }
2047

2048
        if syncErr != nil && !vmi.IsFinal() {
2✔
2049
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, v1.SyncFailed.String(), syncErr.Error())
1✔
2050

1✔
2051
                // `syncErr` will be propagated anyway, and it will be logged in `re-enqueueing`
1✔
2052
                // so there is no need to log it twice in hot path without increased verbosity.
1✔
2053
                log.Log.Object(vmi).Reason(syncErr).Error("Synchronizing the VirtualMachineInstance failed.")
1✔
2054
        }
1✔
2055

2056
        // Update the VirtualMachineInstance status, if the VirtualMachineInstance exists
2057
        if vmiExists {
2✔
2058
                if err := c.updateVMIStatus(vmi, domain, syncErr); err != nil {
2✔
2059
                        log.Log.Object(vmi).Reason(err).Error("Updating the VirtualMachineInstance status failed.")
1✔
2060
                        return err
1✔
2061
                }
1✔
2062
        }
2063

2064
        if syncErr != nil {
2✔
2065
                return syncErr
1✔
2066
        }
1✔
2067

2068
        log.Log.Object(vmi).V(3).Info("Synchronization loop succeeded.")
1✔
2069
        return nil
1✔
2070

2071
}
2072

2073
func (c *VirtualMachineController) execute(key string) error {
1✔
2074
        vmi, vmiExists, err := c.getVMIFromCache(key)
1✔
2075
        if err != nil {
2✔
2076
                return err
1✔
2077
        }
1✔
2078

2079
        if !vmiExists {
2✔
2080
                c.vmiExpectations.DeleteExpectations(key)
1✔
2081
        } else if !c.vmiExpectations.SatisfiedExpectations(key) {
2✔
2082
                return nil
×
2083
        }
×
2084

2085
        domain, domainExists, domainCachedUID, err := c.getDomainFromCache(key)
1✔
2086
        if err != nil {
1✔
2087
                return err
×
2088
        }
×
2089

2090
        if !vmiExists && string(domainCachedUID) != "" {
2✔
2091
                // it's possible to discover the UID from cache even if the domain
1✔
2092
                // doesn't technically exist anymore
1✔
2093
                vmi.UID = domainCachedUID
1✔
2094
                log.Log.Object(vmi).Infof("Using cached UID for vmi found in domain cache")
1✔
2095
        }
1✔
2096

2097
        // As a last effort, if the UID still can't be determined attempt
2098
        // to retrieve it from the ghost record
2099
        if string(vmi.UID) == "" {
2✔
2100
                uid := virtcache.LastKnownUIDFromGhostRecordCache(key)
1✔
2101
                if uid != "" {
1✔
2102
                        log.Log.Object(vmi).V(3).Infof("ghost record cache provided %s as UID", uid)
×
2103
                        vmi.UID = uid
×
2104
                }
×
2105
        }
2106

2107
        if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
2✔
2108
                oldVMI := v1.NewVMIReferenceFromNameWithNS(vmi.Namespace, vmi.Name)
1✔
2109
                oldVMI.UID = domain.Spec.Metadata.KubeVirt.UID
1✔
2110
                expired, initialized, err := c.isLauncherClientUnresponsive(oldVMI)
1✔
2111
                if err != nil {
1✔
2112
                        return err
×
2113
                }
×
2114
                // If we found an outdated domain which is also not alive anymore, clean up
2115
                if !initialized {
1✔
2116
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
2117
                        return nil
×
2118
                } else if expired {
1✔
2119
                        log.Log.Object(oldVMI).Infof("Detected stale vmi %s that still needs cleanup before new vmi %s with identical name/namespace can be processed", oldVMI.UID, vmi.UID)
×
2120
                        err = c.processVmCleanup(oldVMI)
×
2121
                        if err != nil {
×
2122
                                return err
×
2123
                        }
×
2124
                        // Make sure we re-enqueue the key to ensure this new VMI is processed
2125
                        // after the stale domain is removed
2126
                        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*5)
×
2127
                }
2128

2129
                return nil
1✔
2130
        }
2131

2132
        // Take different execution paths depending on the state of the migration and the
2133
        // node this is executed on.
2134

2135
        if vmiExists && c.isPreMigrationTarget(vmi) {
2✔
2136
                // 1. PRE-MIGRATION TARGET PREPARATION PATH
1✔
2137
                //
1✔
2138
                // If this node is the target of the vmi's migration, take
1✔
2139
                // a different execute path. The target execute path prepares
1✔
2140
                // the local environment for the migration, but does not
1✔
2141
                // start the VMI
1✔
2142
                return c.migrationTargetExecute(vmi, vmiExists, domain)
1✔
2143
        } else if vmiExists && c.isOrphanedMigrationSource(vmi) {
2✔
2144
                // 3. POST-MIGRATION SOURCE CLEANUP
×
2145
                //
×
2146
                // After a migration, the migrated domain still exists in the old
×
2147
                // source's domain cache. Ensure that any node that isn't currently
×
2148
                // the target or owner of the VMI handles deleting the domain locally.
×
2149
                return c.migrationOrphanedSourceNodeExecute(vmi, domainExists)
×
2150
        }
×
2151
        return c.defaultExecute(key,
1✔
2152
                vmi,
1✔
2153
                vmiExists,
1✔
2154
                domain,
1✔
2155
                domainExists)
1✔
2156

2157
}
2158

2159
func (c *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstance) error {
1✔
2160

1✔
2161
        vmiId := string(vmi.UID)
1✔
2162

1✔
2163
        log.Log.Object(vmi).Infof("Performing final local cleanup for vmi with uid %s", vmiId)
1✔
2164

1✔
2165
        c.migrationProxy.StopTargetListener(vmiId)
1✔
2166
        c.migrationProxy.StopSourceListener(vmiId)
1✔
2167

1✔
2168
        c.downwardMetricsManager.StopServer(vmi)
1✔
2169

1✔
2170
        // Unmount container disks and clean up remaining files
1✔
2171
        if err := c.containerDiskMounter.Unmount(vmi); err != nil {
1✔
2172
                return err
×
2173
        }
×
2174

2175
        // UnmountAll does the cleanup on the "best effort" basis: it is
2176
        // safe to pass a nil cgroupManager.
2177
        cgroupManager, _ := getCgroupManager(vmi)
1✔
2178
        if err := c.hotplugVolumeMounter.UnmountAll(vmi, cgroupManager); err != nil {
1✔
2179
                return err
×
2180
        }
×
2181

2182
        c.teardownNetwork(vmi)
1✔
2183

1✔
2184
        c.sriovHotplugExecutorPool.Delete(vmi.UID)
1✔
2185

1✔
2186
        // Watch dog file and command client must be the last things removed here
1✔
2187
        if err := c.closeLauncherClient(vmi); err != nil {
1✔
2188
                return err
×
2189
        }
×
2190

2191
        // Remove the domain from cache in the event that we're performing
2192
        // a final cleanup and never received the "DELETE" event. This is
2193
        // possible if the VMI pod goes away before we receive the final domain
2194
        // "DELETE"
2195
        domain := api.NewDomainReferenceFromName(vmi.Namespace, vmi.Name)
1✔
2196
        log.Log.Object(domain).Infof("Removing domain from cache during final cleanup")
1✔
2197
        return c.domainStore.Delete(domain)
1✔
2198
}
2199

2200
func (c *VirtualMachineController) closeLauncherClient(vmi *v1.VirtualMachineInstance) error {
1✔
2201

1✔
2202
        // UID is required in order to close socket
1✔
2203
        if string(vmi.GetUID()) == "" {
2✔
2204
                return nil
1✔
2205
        }
1✔
2206

2207
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2208
        if exists && clientInfo.Client != nil {
2✔
2209
                clientInfo.Client.Close()
1✔
2210
                close(clientInfo.DomainPipeStopChan)
1✔
2211
        }
1✔
2212

2213
        virtcache.DeleteGhostRecord(vmi.Namespace, vmi.Name)
1✔
2214
        c.launcherClients.Delete(vmi.UID)
1✔
2215
        return nil
1✔
2216
}
2217

2218
// used by unit tests to add mock clients
2219
func (c *VirtualMachineController) addLauncherClient(vmUID types.UID, info *virtcache.LauncherClientInfo) error {
1✔
2220
        c.launcherClients.Store(vmUID, info)
1✔
2221
        return nil
1✔
2222
}
1✔
2223

2224
func (c *VirtualMachineController) isLauncherClientUnresponsive(vmi *v1.VirtualMachineInstance) (unresponsive bool, initialized bool, err error) {
1✔
2225
        var socketFile string
1✔
2226

1✔
2227
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2228
        if exists {
2✔
2229
                if clientInfo.Ready == true {
2✔
2230
                        // use cached socket if we previously established a connection
1✔
2231
                        socketFile = clientInfo.SocketFile
1✔
2232
                } else {
2✔
2233
                        socketFile, err = cmdclient.FindSocketOnHost(vmi)
1✔
2234
                        if err != nil {
2✔
2235
                                // socket does not exist, but let's see if the pod is still there
1✔
2236
                                if _, err = cmdclient.FindPodDirOnHost(vmi); err != nil {
1✔
2237
                                        // no pod meanst that waiting for it to initialize makes no sense
×
2238
                                        return true, true, nil
×
2239
                                }
×
2240
                                // pod is still there, if there is no socket let's wait for it to become ready
2241
                                if clientInfo.NotInitializedSince.Before(time.Now().Add(-3 * time.Minute)) {
2✔
2242
                                        return true, true, nil
1✔
2243
                                }
1✔
2244
                                return false, false, nil
1✔
2245
                        }
2246
                        clientInfo.Ready = true
×
2247
                        clientInfo.SocketFile = socketFile
×
2248
                }
2249
        } else {
×
2250
                clientInfo := &virtcache.LauncherClientInfo{
×
2251
                        NotInitializedSince: time.Now(),
×
2252
                        Ready:               false,
×
2253
                }
×
2254
                c.launcherClients.Store(vmi.UID, clientInfo)
×
2255
                // attempt to find the socket if the established connection doesn't currently exist.
×
2256
                socketFile, err = cmdclient.FindSocketOnHost(vmi)
×
2257
                // no socket file, no VMI, so it's unresponsive
×
2258
                if err != nil {
×
2259
                        // socket does not exist, but let's see if the pod is still there
×
2260
                        if _, err = cmdclient.FindPodDirOnHost(vmi); err != nil {
×
2261
                                // no pod meanst that waiting for it to initialize makes no sense
×
2262
                                return true, true, nil
×
2263
                        }
×
2264
                        return false, false, nil
×
2265
                }
2266
                clientInfo.Ready = true
×
2267
                clientInfo.SocketFile = socketFile
×
2268
        }
2269
        return cmdclient.IsSocketUnresponsive(socketFile), true, nil
1✔
2270
}
2271

2272
func (c *VirtualMachineController) getLauncherClient(vmi *v1.VirtualMachineInstance) (cmdclient.LauncherClient, error) {
1✔
2273
        var err error
1✔
2274

1✔
2275
        clientInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2276
        if exists && clientInfo.Client != nil {
2✔
2277
                return clientInfo.Client, nil
1✔
2278
        }
1✔
2279

2280
        socketFile, err := cmdclient.FindSocketOnHost(vmi)
×
2281
        if err != nil {
×
2282
                return nil, err
×
2283
        }
×
2284

2285
        err = virtcache.AddGhostRecord(vmi.Namespace, vmi.Name, socketFile, vmi.UID)
×
2286
        if err != nil {
×
2287
                return nil, err
×
2288
        }
×
2289

2290
        client, err := cmdclient.NewClient(socketFile)
×
2291
        if err != nil {
×
2292
                return nil, err
×
2293
        }
×
2294

2295
        domainPipeStopChan := make(chan struct{})
×
2296
        //we pipe in the domain socket into the VMI's filesystem
×
2297
        err = c.startDomainNotifyPipe(domainPipeStopChan, vmi)
×
2298
        if err != nil {
×
2299
                client.Close()
×
2300
                close(domainPipeStopChan)
×
2301
                return nil, err
×
2302
        }
×
2303

2304
        c.launcherClients.Store(vmi.UID, &virtcache.LauncherClientInfo{
×
2305
                Client:              client,
×
2306
                SocketFile:          socketFile,
×
2307
                DomainPipeStopChan:  domainPipeStopChan,
×
2308
                NotInitializedSince: time.Now(),
×
2309
                Ready:               true,
×
2310
        })
×
2311

×
2312
        return client, nil
×
2313
}
2314

2315
func (c *VirtualMachineController) processVmDestroy(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
×
2316
        tryGracefully := false
×
2317
        return c.helperVmShutdown(vmi, domain, tryGracefully)
×
2318
}
×
2319

2320
func (c *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
2321
        tryGracefully := true
1✔
2322
        return c.helperVmShutdown(vmi, domain, tryGracefully)
1✔
2323
}
1✔
2324

2325
func (c *VirtualMachineController) helperVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, tryGracefully bool) error {
1✔
2326

1✔
2327
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
1✔
2328
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
2329
        if err != nil {
1✔
2330
                return err
×
2331
        }
×
2332

2333
        if domainHasGracePeriod(domain) && tryGracefully {
2✔
2334
                if expired, timeLeft := c.hasGracePeriodExpired(domain); !expired {
2✔
2335
                        return c.handleVMIShutdown(vmi, domain, client, timeLeft)
1✔
2336
                }
1✔
2337
                log.Log.Object(vmi).Infof("Grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
2338
        } else {
1✔
2339
                log.Log.Object(vmi).Infof("Graceful shutdown not set, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
1✔
2340
        }
1✔
2341

2342
        err = client.KillVirtualMachine(vmi)
1✔
2343
        if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2344
                // Only report err if it wasn't the result of a disconnect.
×
2345
                //
×
2346
                // Both virt-launcher and virt-handler are trying to destroy
×
2347
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
2348
                // disconnected during the kill request, which shouldn't be
×
2349
                // considered an error.
×
2350
                return err
×
2351
        }
×
2352

2353
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMIStopping)
1✔
2354

1✔
2355
        return nil
1✔
2356
}
2357

2358
func (c *VirtualMachineController) handleVMIShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain, client cmdclient.LauncherClient, timeLeft int64) error {
1✔
2359
        if domain.Status.Status != api.Shutdown {
2✔
2360
                return c.shutdownVMI(vmi, client, timeLeft)
1✔
2361
        }
1✔
2362
        log.Log.V(4).Object(vmi).Infof("%s is already shutting down.", vmi.GetObjectMeta().GetName())
×
2363
        return nil
×
2364
}
2365

2366
func (c *VirtualMachineController) shutdownVMI(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient, timeLeft int64) error {
1✔
2367
        err := client.ShutdownVirtualMachine(vmi)
1✔
2368
        if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2369
                // Only report err if it wasn't the result of a disconnect.
×
2370
                //
×
2371
                // Both virt-launcher and virt-handler are trying to destroy
×
2372
                // the VirtualMachineInstance at the same time. It's possible the client may get
×
2373
                // disconnected during the kill request, which shouldn't be
×
2374
                // considered an error.
×
2375
                return err
×
2376
        }
×
2377

2378
        log.Log.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
1✔
2379

1✔
2380
        // Make sure that we don't hot-loop in case we send the first domain notification
1✔
2381
        if timeLeft == -1 {
2✔
2382
                timeLeft = 5
1✔
2383
                if vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds < timeLeft {
1✔
2384
                        timeLeft = *vmi.Spec.TerminationGracePeriodSeconds
×
2385
                }
×
2386
        }
2387
        // In case we have a long grace period, we want to resend the graceful shutdown every 5 seconds
2388
        // That's important since a booting OS can miss ACPI signals
2389
        if timeLeft > 5 {
1✔
2390
                timeLeft = 5
×
2391
        }
×
2392

2393
        // pending graceful shutdown.
2394
        c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Duration(timeLeft)*time.Second)
1✔
2395
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), VMIGracefulShutdown)
1✔
2396
        return nil
1✔
2397
}
2398

2399
func (c *VirtualMachineController) processVmDelete(vmi *v1.VirtualMachineInstance) error {
1✔
2400

1✔
2401
        // Only attempt to shutdown/destroy if we still have a connection established with the pod.
1✔
2402
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
2403

1✔
2404
        // If the pod has been torn down, we know the VirtualMachineInstance is down.
1✔
2405
        if err == nil {
2✔
2406

1✔
2407
                log.Log.Object(vmi).Infof("Signaled deletion for %s", vmi.GetObjectMeta().GetName())
1✔
2408

1✔
2409
                // pending deletion.
1✔
2410
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), VMISignalDeletion)
1✔
2411

1✔
2412
                err = client.DeleteDomain(vmi)
1✔
2413
                if err != nil && !cmdclient.IsDisconnected(err) {
1✔
2414
                        // Only report err if it wasn't the result of a disconnect.
×
2415
                        //
×
2416
                        // Both virt-launcher and virt-handler are trying to destroy
×
2417
                        // the VirtualMachineInstance at the same time. It's possible the client may get
×
2418
                        // disconnected during the kill request, which shouldn't be
×
2419
                        // considered an error.
×
2420
                        return err
×
2421
                }
×
2422
        }
2423

2424
        return nil
1✔
2425

2426
}
2427

2428
func (c *VirtualMachineController) hasStaleClientConnections(vmi *v1.VirtualMachineInstance) bool {
1✔
2429
        _, err := c.getVerifiedLauncherClient(vmi)
1✔
2430
        if err == nil {
2✔
2431
                // current client connection is good.
1✔
2432
                return false
1✔
2433
        }
1✔
2434

2435
        // no connection, but ghost file exists.
2436
        if virtcache.HasGhostRecord(vmi.Namespace, vmi.Name) {
2✔
2437
                return true
1✔
2438
        }
1✔
2439

2440
        return false
×
2441

2442
}
2443

2444
func (c *VirtualMachineController) getVerifiedLauncherClient(vmi *v1.VirtualMachineInstance) (client cmdclient.LauncherClient, err error) {
1✔
2445
        client, err = c.getLauncherClient(vmi)
1✔
2446
        if err != nil {
1✔
2447
                return
×
2448
        }
×
2449

2450
        // Verify connectivity.
2451
        // It's possible the pod has already been torn down along with the VirtualMachineInstance.
2452
        err = client.Ping()
1✔
2453
        return
1✔
2454
}
2455

2456
func (c *VirtualMachineController) isOrphanedMigrationSource(vmi *v1.VirtualMachineInstance) bool {
1✔
2457
        nodeName, ok := vmi.Labels[v1.NodeNameLabel]
1✔
2458

1✔
2459
        if ok && nodeName != "" && nodeName != c.host {
1✔
2460
                return true
×
2461
        }
×
2462

2463
        return false
1✔
2464
}
2465

2466
func (c *VirtualMachineController) isPreMigrationTarget(vmi *v1.VirtualMachineInstance) bool {
1✔
2467

1✔
2468
        migrationTargetNodeName, ok := vmi.Labels[v1.MigrationTargetNodeNameLabel]
1✔
2469

1✔
2470
        if ok &&
1✔
2471
                migrationTargetNodeName != "" &&
1✔
2472
                migrationTargetNodeName != vmi.Status.NodeName &&
1✔
2473
                migrationTargetNodeName == c.host {
2✔
2474
                return true
1✔
2475
        }
1✔
2476

2477
        return false
1✔
2478
}
2479

2480
func (c *VirtualMachineController) checkNetworkInterfacesForMigration(vmi *v1.VirtualMachineInstance) error {
1✔
2481
        return netvmispec.VerifyVMIMigratable(vmi, c.clusterConfig.GetNetworkBindings())
1✔
2482
}
1✔
2483

2484
func (c *VirtualMachineController) checkVolumesForMigration(vmi *v1.VirtualMachineInstance) (blockMigrate bool, err error) {
1✔
2485
        volumeStatusMap := make(map[string]v1.VolumeStatus)
1✔
2486

1✔
2487
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
2488
                volumeStatusMap[volumeStatus.Name] = volumeStatus
1✔
2489
        }
1✔
2490

2491
        if len(vmi.Status.MigratedVolumes) > 0 {
1✔
2492
                blockMigrate = true
×
2493
        }
×
2494

2495
        filesystems := storagetypes.GetFilesystemsFromVolumes(vmi)
1✔
2496

1✔
2497
        // Check if all VMI volumes can be shared between the source and the destination
1✔
2498
        // of a live migration. blockMigrate will be returned as false, only if all volumes
1✔
2499
        // are shared and the VMI has no local disks
1✔
2500
        // Some combinations of disks makes the VMI no suitable for live migration.
1✔
2501
        // A relevant error will be returned in this case.
1✔
2502
        for _, volume := range vmi.Spec.Volumes {
2✔
2503
                volSrc := volume.VolumeSource
1✔
2504
                if volSrc.PersistentVolumeClaim != nil || volSrc.DataVolume != nil {
2✔
2505

1✔
2506
                        var claimName string
1✔
2507
                        if volSrc.PersistentVolumeClaim != nil {
2✔
2508
                                claimName = volSrc.PersistentVolumeClaim.ClaimName
1✔
2509
                        } else {
2✔
2510
                                claimName = volSrc.DataVolume.Name
1✔
2511
                        }
1✔
2512

2513
                        volumeStatus, ok := volumeStatusMap[volume.Name]
1✔
2514

1✔
2515
                        if !ok || volumeStatus.PersistentVolumeClaimInfo == nil {
1✔
2516
                                return true, fmt.Errorf("cannot migrate VMI: Unable to determine if PVC %v is shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
×
2517
                        } else if !pvctypes.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes) && !pvctypes.IsMigratedVolume(volumeStatus.Name, vmi) {
2✔
2518
                                return true, fmt.Errorf("cannot migrate VMI: PVC %v is not shared, live migration requires that all PVCs must be shared (using ReadWriteMany access mode)", claimName)
1✔
2519
                        }
1✔
2520

2521
                } else if volSrc.HostDisk != nil {
2✔
2522
                        shared := volSrc.HostDisk.Shared != nil && *volSrc.HostDisk.Shared
1✔
2523
                        if !shared {
2✔
2524
                                return true, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
1✔
2525
                        }
1✔
2526
                } else {
1✔
2527
                        if _, ok := filesystems[volume.Name]; ok {
2✔
2528
                                log.Log.Object(vmi).Infof("Volume %s is shared with virtiofs, allow live migration", volume.Name)
1✔
2529
                                continue
1✔
2530
                        }
2531

2532
                        isVolumeUsedByReadOnlyDisk := false
1✔
2533
                        for _, disk := range vmi.Spec.Domain.Devices.Disks {
2✔
2534
                                if isReadOnlyDisk(&disk) && disk.Name == volume.Name {
2✔
2535
                                        isVolumeUsedByReadOnlyDisk = true
1✔
2536
                                        break
1✔
2537
                                }
2538
                        }
2539

2540
                        if isVolumeUsedByReadOnlyDisk {
2✔
2541
                                continue
1✔
2542
                        }
2543

2544
                        if vmi.Status.MigrationMethod == "" || vmi.Status.MigrationMethod == v1.LiveMigration {
2✔
2545
                                log.Log.Object(vmi).Infof("migration is block migration because of %s volume", volume.Name)
1✔
2546
                        }
1✔
2547
                        blockMigrate = true
1✔
2548
                }
2549
        }
2550
        return
1✔
2551
}
2552

2553
func (c *VirtualMachineController) isVMIPausedDuringMigration(vmi *v1.VirtualMachineInstance) bool {
1✔
2554
        return vmi.Status.MigrationState != nil &&
1✔
2555
                vmi.Status.MigrationState.Mode == v1.MigrationPaused &&
1✔
2556
                !vmi.Status.MigrationState.Completed
1✔
2557
}
1✔
2558

2559
func (c *VirtualMachineController) isMigrationSource(vmi *v1.VirtualMachineInstance) bool {
1✔
2560

1✔
2561
        if vmi.Status.MigrationState != nil &&
1✔
2562
                vmi.Status.MigrationState.SourceNode == c.host &&
1✔
2563
                vmi.Status.MigrationState.TargetNodeAddress != "" &&
1✔
2564
                !vmi.Status.MigrationState.Completed {
2✔
2565

1✔
2566
                return true
1✔
2567
        }
1✔
2568
        return false
1✔
2569

2570
}
2571

2572
func (c *VirtualMachineController) handleTargetMigrationProxy(vmi *v1.VirtualMachineInstance) error {
1✔
2573
        // handle starting/stopping target migration proxy
1✔
2574
        migrationTargetSockets := []string{}
1✔
2575
        res, err := c.podIsolationDetector.Detect(vmi)
1✔
2576
        if err != nil {
1✔
2577
                return err
×
2578
        }
×
2579

2580
        // Get the libvirt connection socket file on the destination pod.
2581
        socketFile := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "libvirt/virtqemud-sock"), res.Pid())
1✔
2582
        // the migration-proxy is no longer shared via host mount, so we
1✔
2583
        // pass in the virt-launcher's baseDir to reach the unix sockets.
1✔
2584
        baseDir := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "kubevirt"), res.Pid())
1✔
2585
        migrationTargetSockets = append(migrationTargetSockets, socketFile)
1✔
2586

1✔
2587
        migrationPortsRange := migrationproxy.GetMigrationPortsList(vmi.IsBlockMigration())
1✔
2588
        for _, port := range migrationPortsRange {
2✔
2589
                key := migrationproxy.ConstructProxyKey(string(vmi.UID), port)
1✔
2590
                // a proxy between the target direct qemu channel and the connector in the destination pod
1✔
2591
                destSocketFile := migrationproxy.SourceUnixFile(baseDir, key)
1✔
2592
                migrationTargetSockets = append(migrationTargetSockets, destSocketFile)
1✔
2593
        }
1✔
2594
        err = c.migrationProxy.StartTargetListener(string(vmi.UID), migrationTargetSockets)
1✔
2595
        if err != nil {
1✔
2596
                return err
×
2597
        }
×
2598
        return nil
1✔
2599
}
2600

2601
func (c *VirtualMachineController) handlePostMigrationProxyCleanup(vmi *v1.VirtualMachineInstance) {
1✔
2602
        if vmi.Status.MigrationState == nil || vmi.Status.MigrationState.Completed || vmi.Status.MigrationState.Failed {
2✔
2603
                c.migrationProxy.StopTargetListener(string(vmi.UID))
1✔
2604
                c.migrationProxy.StopSourceListener(string(vmi.UID))
1✔
2605
        }
1✔
2606
}
2607

2608
func (c *VirtualMachineController) handleSourceMigrationProxy(vmi *v1.VirtualMachineInstance) error {
1✔
2609

1✔
2610
        res, err := c.podIsolationDetector.Detect(vmi)
1✔
2611
        if err != nil {
1✔
2612
                return err
×
2613
        }
×
2614
        // the migration-proxy is no longer shared via host mount, so we
2615
        // pass in the virt-launcher's baseDir to reach the unix sockets.
2616
        baseDir := fmt.Sprintf(filepath.Join(c.virtLauncherFSRunDirPattern, "kubevirt"), res.Pid())
1✔
2617
        c.migrationProxy.StopTargetListener(string(vmi.UID))
1✔
2618
        if vmi.Status.MigrationState.TargetDirectMigrationNodePorts == nil {
1✔
2619
                msg := "No migration proxy has been created for this vmi"
×
2620
                return fmt.Errorf("%s", msg)
×
2621
        }
×
2622
        err = c.migrationProxy.StartSourceListener(
1✔
2623
                string(vmi.UID),
1✔
2624
                vmi.Status.MigrationState.TargetNodeAddress,
1✔
2625
                vmi.Status.MigrationState.TargetDirectMigrationNodePorts,
1✔
2626
                baseDir,
1✔
2627
        )
1✔
2628
        if err != nil {
1✔
2629
                return err
×
2630
        }
×
2631

2632
        return nil
1✔
2633
}
2634

2635
func (c *VirtualMachineController) getLauncherClientInfo(vmi *v1.VirtualMachineInstance) *virtcache.LauncherClientInfo {
1✔
2636
        launcherInfo, exists := c.launcherClients.Load(vmi.UID)
1✔
2637
        if !exists {
1✔
2638
                return nil
×
2639
        }
×
2640
        return launcherInfo
1✔
2641
}
2642

2643
func isMigrationInProgress(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
1✔
2644
        var domainMigrationMetadata *api.MigrationMetadata
1✔
2645

1✔
2646
        if domain == nil ||
1✔
2647
                vmi.Status.MigrationState == nil ||
1✔
2648
                domain.Spec.Metadata.KubeVirt.Migration == nil {
2✔
2649
                return false
1✔
2650
        }
1✔
2651
        domainMigrationMetadata = domain.Spec.Metadata.KubeVirt.Migration
1✔
2652

1✔
2653
        if vmi.Status.MigrationState.MigrationUID == domainMigrationMetadata.UID &&
1✔
2654
                domainMigrationMetadata.StartTimestamp != nil {
2✔
2655
                return true
1✔
2656
        }
1✔
2657
        return false
×
2658
}
2659

2660
func (c *VirtualMachineController) vmUpdateHelperMigrationSource(origVMI *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
2661

1✔
2662
        client, err := c.getLauncherClient(origVMI)
1✔
2663
        if err != nil {
1✔
2664
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2665
        }
×
2666

2667
        if origVMI.Status.MigrationState.AbortRequested {
2✔
2668
                err = c.handleMigrationAbort(origVMI, client)
1✔
2669
                if err != nil {
1✔
2670
                        return err
×
2671
                }
×
2672
        } else {
1✔
2673
                if isMigrationInProgress(origVMI, domain) {
2✔
2674
                        // we already started this migration, no need to rerun this
1✔
2675
                        log.DefaultLogger().Errorf("migration %s has already been started", origVMI.Status.MigrationState.MigrationUID)
1✔
2676
                        return nil
1✔
2677
                }
1✔
2678

2679
                err = c.handleSourceMigrationProxy(origVMI)
1✔
2680
                if err != nil {
1✔
2681
                        return fmt.Errorf("failed to handle migration proxy: %v", err)
×
2682
                }
×
2683

2684
                migrationConfiguration := origVMI.Status.MigrationState.MigrationConfiguration
1✔
2685
                if migrationConfiguration == nil {
2✔
2686
                        migrationConfiguration = c.clusterConfig.GetMigrationConfiguration()
1✔
2687
                }
1✔
2688

2689
                options := &cmdclient.MigrationOptions{
1✔
2690
                        Bandwidth:               *migrationConfiguration.BandwidthPerMigration,
1✔
2691
                        ProgressTimeout:         *migrationConfiguration.ProgressTimeout,
1✔
2692
                        CompletionTimeoutPerGiB: *migrationConfiguration.CompletionTimeoutPerGiB,
1✔
2693
                        UnsafeMigration:         *migrationConfiguration.UnsafeMigrationOverride,
1✔
2694
                        AllowAutoConverge:       *migrationConfiguration.AllowAutoConverge,
1✔
2695
                        AllowPostCopy:           *migrationConfiguration.AllowPostCopy,
1✔
2696
                        AllowWorkloadDisruption: *migrationConfiguration.AllowWorkloadDisruption,
1✔
2697
                }
1✔
2698

1✔
2699
                configureParallelMigrationThreads(options, origVMI)
1✔
2700

1✔
2701
                marshalledOptions, err := json.Marshal(options)
1✔
2702
                if err != nil {
1✔
2703
                        log.Log.Object(origVMI).Warning("failed to marshall matched migration options")
×
2704
                } else {
1✔
2705
                        log.Log.Object(origVMI).Infof("migration options matched for vmi %s: %s", origVMI.Name, string(marshalledOptions))
1✔
2706
                }
1✔
2707

2708
                vmi := origVMI.DeepCopy()
1✔
2709
                err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2710
                if err != nil {
1✔
2711
                        return err
×
2712
                }
×
2713

2714
                err = client.MigrateVirtualMachine(vmi, options)
1✔
2715
                if err != nil {
1✔
2716
                        return err
×
2717
                }
×
2718
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrating.String(), VMIMigrating)
1✔
2719
        }
2720
        return nil
1✔
2721
}
2722

2723
func replaceMigratedVolumesStatus(vmi *v1.VirtualMachineInstance) {
1✔
2724
        replaceVolsStatus := make(map[string]*v1.PersistentVolumeClaimInfo)
1✔
2725
        for _, v := range vmi.Status.MigratedVolumes {
1✔
2726
                replaceVolsStatus[v.SourcePVCInfo.ClaimName] = v.DestinationPVCInfo
×
2727
        }
×
2728
        for i, v := range vmi.Status.VolumeStatus {
1✔
2729
                if v.PersistentVolumeClaimInfo == nil {
×
2730
                        continue
×
2731
                }
2732
                if status, ok := replaceVolsStatus[v.PersistentVolumeClaimInfo.ClaimName]; ok {
×
2733
                        vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo = status
×
2734
                }
×
2735
        }
2736

2737
}
2738

2739
func (c *VirtualMachineController) vmUpdateHelperMigrationTarget(origVMI *v1.VirtualMachineInstance) error {
1✔
2740
        client, err := c.getLauncherClient(origVMI)
1✔
2741
        if err != nil {
1✔
2742
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2743
        }
×
2744

2745
        vmi := origVMI.DeepCopy()
1✔
2746

1✔
2747
        if migrations.MigrationFailed(vmi) {
2✔
2748
                // if the migration failed, signal the target pod it's okay to exit
1✔
2749
                err = client.SignalTargetPodCleanup(vmi)
1✔
2750
                if err != nil {
1✔
2751
                        return err
×
2752
                }
×
2753
                log.Log.Object(vmi).Infof("Signaled target pod for failed migration to clean up")
1✔
2754
                // nothing left to do here if the migration failed.
1✔
2755
                // Re-enqueue to trigger handler final cleanup
1✔
2756
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second)
1✔
2757
                return nil
1✔
2758
        } else if migrations.IsMigrating(vmi) {
2✔
2759
                // If the migration has already started,
1✔
2760
                // then there's nothing left to prepare on the target side
1✔
2761
                return nil
1✔
2762
        }
1✔
2763
        // The VolumeStatus is used to retrive additional information for the volume handling.
2764
        // For example, for filesystem PVC, the information are used to create a right size image.
2765
        // In the case of migrated volumes, we need to replace the original volume information with the
2766
        // destination volume properties.
2767
        replaceMigratedVolumesStatus(vmi)
1✔
2768
        err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2769
        if err != nil {
1✔
2770
                return err
×
2771
        }
×
2772

2773
        // give containerDisks some time to become ready before throwing errors on retries
2774
        info := c.getLauncherClientInfo(vmi)
1✔
2775
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
1✔
2776
                if err != nil {
×
2777
                        return err
×
2778
                }
×
2779
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
×
2780
                return nil
×
2781
        }
2782

2783
        // Verify container disks checksum
2784
        err = container_disk.VerifyChecksums(c.containerDiskMounter, vmi)
1✔
2785
        switch {
1✔
2786
        case goerror.Is(err, container_disk.ErrChecksumMissing):
×
2787
                // wait for checksum to be computed by the source virt-handler
×
2788
                return err
×
2789
        case goerror.Is(err, container_disk.ErrChecksumMismatch):
×
2790
                log.Log.Object(vmi).Infof("Containerdisk checksum mismatch, terminating target pod: %s", err)
×
2791
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, "ContainerDiskFailedChecksum", "Aborting migration as the source and target containerdisks/kernelboot do not match")
×
2792
                return client.SignalTargetPodCleanup(vmi)
×
2793
        case err != nil:
×
2794
                return err
×
2795
        }
2796

2797
        // Mount container disks
2798
        disksInfo, err := c.containerDiskMounter.MountAndVerify(vmi)
1✔
2799
        if err != nil {
1✔
2800
                return err
×
2801
        }
×
2802

2803
        // Mount hotplug disks
2804
        if attachmentPodUID := vmi.Status.MigrationState.TargetAttachmentPodUID; attachmentPodUID != types.UID("") {
1✔
2805
                cgroupManager, err := getCgroupManager(vmi)
×
2806
                if err != nil {
×
2807
                        return err
×
2808
                }
×
2809
                if err := c.hotplugVolumeMounter.MountFromPod(vmi, attachmentPodUID, cgroupManager); err != nil {
×
2810
                        return fmt.Errorf("failed to mount hotplug volumes: %v", err)
×
2811
                }
×
2812
        }
2813

2814
        // configure network inside virt-launcher compute container
2815
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForMigrationTarget(vmi)); err != nil {
1✔
2816
                return fmt.Errorf("failed to configure vmi network for migration target: %w", err)
×
2817
        }
×
2818

2819
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
2820
        if err != nil {
1✔
2821
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
2822
        }
×
2823
        virtLauncherRootMount, err := isolationRes.MountRoot()
1✔
2824
        if err != nil {
1✔
2825
                return err
×
2826
        }
×
2827

2828
        err = c.claimDeviceOwnership(virtLauncherRootMount, "kvm")
1✔
2829
        if err != nil {
1✔
2830
                return fmt.Errorf("failed to set up file ownership for /dev/kvm: %v", err)
×
2831
        }
×
2832
        if virtutil.IsAutoAttachVSOCK(vmi) {
1✔
2833
                if err := c.claimDeviceOwnership(virtLauncherRootMount, "vhost-vsock"); err != nil {
×
2834
                        return fmt.Errorf("failed to set up file ownership for /dev/vhost-vsock: %v", err)
×
2835
                }
×
2836
        }
2837

2838
        lessPVCSpaceToleration := c.clusterConfig.GetLessPVCSpaceToleration()
1✔
2839
        minimumPVCReserveBytes := c.clusterConfig.GetMinimumReservePVCBytes()
1✔
2840

1✔
2841
        // initialize disks images for empty PVC
1✔
2842
        hostDiskCreator := hostdisk.NewHostDiskCreator(c.recorder, lessPVCSpaceToleration, minimumPVCReserveBytes, virtLauncherRootMount)
1✔
2843
        err = hostDiskCreator.Create(vmi)
1✔
2844
        if err != nil {
1✔
2845
                return fmt.Errorf("preparing host-disks failed: %v", err)
×
2846
        }
×
2847

2848
        if virtutil.IsNonRootVMI(vmi) {
1✔
2849
                if err := c.nonRootSetup(origVMI); err != nil {
×
2850
                        return err
×
2851
                }
×
2852
        }
2853

2854
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, disksInfo, c.clusterConfig)
1✔
2855
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
2856

1✔
2857
        if err := client.SyncMigrationTarget(vmi, options); err != nil {
1✔
2858
                return fmt.Errorf("syncing migration target failed: %v", err)
×
2859
        }
×
2860
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.PreparingTarget.String(), VMIMigrationTargetPrepared)
1✔
2861

1✔
2862
        err = c.handleTargetMigrationProxy(vmi)
1✔
2863
        if err != nil {
1✔
2864
                return fmt.Errorf("failed to handle post sync migration proxy: %v", err)
×
2865
        }
×
2866
        return nil
1✔
2867
}
2868

2869
func (c *VirtualMachineController) affinePitThread(vmi *v1.VirtualMachineInstance) error {
×
2870
        res, err := c.podIsolationDetector.Detect(vmi)
×
2871
        if err != nil {
×
2872
                return err
×
2873
        }
×
2874
        var Mask unix.CPUSet
×
2875
        Mask.Zero()
×
2876
        qemuprocess, err := res.GetQEMUProcess()
×
2877
        if err != nil {
×
2878
                return err
×
2879
        }
×
2880
        qemupid := qemuprocess.Pid()
×
2881
        if qemupid == -1 {
×
2882
                return nil
×
2883
        }
×
2884

2885
        pitpid, err := res.KvmPitPid()
×
2886
        if err != nil {
×
2887
                return err
×
2888
        }
×
2889
        if pitpid == -1 {
×
2890
                return nil
×
2891
        }
×
2892
        if vmi.IsRealtimeEnabled() {
×
2893
                param := schedParam{priority: 2}
×
2894
                err = schedSetScheduler(pitpid, schedFIFO, param)
×
2895
                if err != nil {
×
2896
                        return fmt.Errorf("failed to set FIFO scheduling and priority 2 for thread %d: %w", pitpid, err)
×
2897
                }
×
2898
        }
2899
        vcpus, err := getVCPUThreadIDs(qemupid)
×
2900
        if err != nil {
×
2901
                return err
×
2902
        }
×
2903
        vpid, ok := vcpus["0"]
×
2904
        if ok == false {
×
2905
                return nil
×
2906
        }
×
2907
        vcpupid, err := strconv.Atoi(vpid)
×
2908
        if err != nil {
×
2909
                return err
×
2910
        }
×
2911
        err = unix.SchedGetaffinity(vcpupid, &Mask)
×
2912
        if err != nil {
×
2913
                return err
×
2914
        }
×
2915
        return unix.SchedSetaffinity(pitpid, &Mask)
×
2916
}
2917

2918
func (c *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
×
2919
        if err := cgroupManager.CreateChildCgroup("housekeeping", "cpuset"); err != nil {
×
2920
                log.Log.Reason(err).Error("CreateChildCgroup ")
×
2921
                return err
×
2922
        }
×
2923

2924
        key := controller.VirtualMachineInstanceKey(vmi)
×
2925
        domain, domainExists, _, err := c.getDomainFromCache(key)
×
2926
        if err != nil {
×
2927
                return err
×
2928
        }
×
2929
        // bail out if domain does not exist
2930
        if domainExists == false {
×
2931
                return nil
×
2932
        }
×
2933

2934
        if domain.Spec.CPUTune == nil || domain.Spec.CPUTune.EmulatorPin == nil {
×
2935
                return nil
×
2936
        }
×
2937

2938
        hkcpus, err := hardware.ParseCPUSetLine(domain.Spec.CPUTune.EmulatorPin.CPUSet, 100)
×
2939
        if err != nil {
×
2940
                return err
×
2941
        }
×
2942

2943
        log.Log.V(3).Object(vmi).Infof("housekeeping cpu: %v", hkcpus)
×
2944

×
2945
        err = cgroupManager.SetCpuSet("housekeeping", hkcpus)
×
2946
        if err != nil {
×
2947
                return err
×
2948
        }
×
2949

2950
        tids, err := cgroupManager.GetCgroupThreads()
×
2951
        if err != nil {
×
2952
                return err
×
2953
        }
×
2954
        hktids := make([]int, 0, 10)
×
2955

×
2956
        for _, tid := range tids {
×
2957
                proc, err := ps.FindProcess(tid)
×
2958
                if err != nil {
×
2959
                        log.Log.Object(vmi).Errorf("Failure to find process: %s", err.Error())
×
2960
                        return err
×
2961
                }
×
2962
                if proc == nil {
×
2963
                        return fmt.Errorf("failed to find process with tid: %d", tid)
×
2964
                }
×
2965
                comm := proc.Executable()
×
2966
                if strings.Contains(comm, "CPU ") && strings.Contains(comm, "KVM") {
×
2967
                        continue
×
2968
                }
2969
                hktids = append(hktids, tid)
×
2970
        }
2971

2972
        log.Log.V(3).Object(vmi).Infof("hk thread ids: %v", hktids)
×
2973
        for _, tid := range hktids {
×
2974
                err = cgroupManager.AttachTID("cpuset", "housekeeping", tid)
×
2975
                if err != nil {
×
2976
                        log.Log.Object(vmi).Errorf("Error attaching tid %d: %v", tid, err.Error())
×
2977
                        return err
×
2978
                }
×
2979
        }
2980

2981
        return nil
×
2982
}
2983

2984
func (c *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMachineInstance, domainExists bool) error {
1✔
2985
        client, err := c.getLauncherClient(origVMI)
1✔
2986
        if err != nil {
1✔
2987
                return fmt.Errorf(unableCreateVirtLauncherConnectionFmt, err)
×
2988
        }
×
2989

2990
        vmi := origVMI.DeepCopy()
1✔
2991
        preallocatedVolumes := c.getPreallocatedVolumes(vmi)
1✔
2992

1✔
2993
        err = hostdisk.ReplacePVCByHostDisk(vmi)
1✔
2994
        if err != nil {
1✔
2995
                return err
×
2996
        }
×
2997

2998
        cgroupManager, err := getCgroupManager(vmi)
1✔
2999
        if err != nil {
1✔
3000
                return err
×
3001
        }
×
3002

3003
        var errorTolerantFeaturesError []error
1✔
3004
        disksInfo := map[string]*containerdisk.DiskInfo{}
1✔
3005
        readyToProceed, err := c.handleVMIState(vmi, cgroupManager, &disksInfo, &errorTolerantFeaturesError)
1✔
3006
        if err != nil {
2✔
3007
                return err
1✔
3008
        }
1✔
3009

3010
        if !readyToProceed {
2✔
3011
                return nil
1✔
3012
        }
1✔
3013

3014
        // Synchronize the VirtualMachineInstance state
3015
        err = c.syncVirtualMachine(client, vmi, preallocatedVolumes, disksInfo)
1✔
3016
        if err != nil {
1✔
3017
                return err
×
3018
        }
×
3019

3020
        // Post-sync housekeeping
3021
        err = c.handleHousekeeping(vmi, cgroupManager, domainExists)
1✔
3022
        if err != nil {
1✔
3023
                return err
×
3024
        }
×
3025

3026
        return errors.NewAggregate(errorTolerantFeaturesError)
1✔
3027
}
3028

3029
// handleVMIState: Decides whether to call handleRunningVMI or handleStartingVMI based on the VMI's state.
3030
func (c *VirtualMachineController) handleVMIState(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, disksInfo *map[string]*containerdisk.DiskInfo, errorTolerantFeaturesError *[]error) (bool, error) {
1✔
3031
        if vmi.IsRunning() {
2✔
3032
                return true, c.handleRunningVMI(vmi, cgroupManager, errorTolerantFeaturesError)
1✔
3033
        } else if !vmi.IsFinal() {
3✔
3034
                return c.handleStartingVMI(vmi, cgroupManager, disksInfo)
1✔
3035
        }
1✔
3036
        return true, nil
×
3037
}
3038

3039
// handleRunningVMI contains the logic specifically for running VMs (hotplugging in running state, metrics, network updates)
3040
func (c *VirtualMachineController) handleRunningVMI(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, errorTolerantFeaturesError *[]error) error {
1✔
3041
        if err := c.hotplugSriovInterfaces(vmi); err != nil {
1✔
3042
                log.Log.Object(vmi).Error(err.Error())
×
3043
        }
×
3044

3045
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
2✔
3046
                return err
1✔
3047
        }
1✔
3048

3049
        if err := c.getMemoryDump(vmi); err != nil {
1✔
3050
                return err
×
3051
        }
×
3052

3053
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
3054
        if err != nil {
1✔
3055
                return fmt.Errorf(failedDetectIsolationFmt, err)
×
3056
        }
×
3057

3058
        if err := c.downwardMetricsManager.StartServer(vmi, isolationRes.Pid()); err != nil {
1✔
3059
                return err
×
3060
        }
×
3061

3062
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForLiveUpdate(vmi)); err != nil {
1✔
3063
                log.Log.Object(vmi).Error(err.Error())
×
3064
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, "NicHotplug", err.Error())
×
3065
                *errorTolerantFeaturesError = append(*errorTolerantFeaturesError, err)
×
3066
        }
×
3067

3068
        return nil
1✔
3069
}
3070

3071
// handleStartingVMI: Contains the logic for starting VMs (container disks, initial network setup, device ownership).
3072
func (c *VirtualMachineController) handleStartingVMI(
3073
        vmi *v1.VirtualMachineInstance,
3074
        cgroupManager cgroup.Manager,
3075
        disksInfo *map[string]*containerdisk.DiskInfo,
3076
) (bool, error) {
1✔
3077
        // give containerDisks some time to become ready before throwing errors on retries
1✔
3078
        info := c.getLauncherClientInfo(vmi)
1✔
3079
        if ready, err := c.containerDiskMounter.ContainerDisksReady(vmi, info.NotInitializedSince); !ready {
2✔
3080
                if err != nil {
2✔
3081
                        return false, err
1✔
3082
                }
1✔
3083
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3084
                return false, nil
1✔
3085
        }
3086

3087
        var err error
1✔
3088
        *disksInfo, err = c.containerDiskMounter.MountAndVerify(vmi)
1✔
3089
        if err != nil {
2✔
3090
                return false, err
1✔
3091
        }
1✔
3092

3093
        if err := c.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
1✔
3094
                return false, err
×
3095
        }
×
3096

3097
        if err := c.setupNetwork(vmi, netsetup.FilterNetsForVMStartup(vmi)); err != nil {
2✔
3098
                return false, fmt.Errorf("failed to configure vmi network: %w", err)
1✔
3099
        }
1✔
3100

3101
        isolationRes, err := c.podIsolationDetector.Detect(vmi)
1✔
3102
        if err != nil {
1✔
3103
                return false, fmt.Errorf(failedDetectIsolationFmt, err)
×
3104
        }
×
3105

3106
        if err := c.setupDevicesOwnerships(vmi, isolationRes); err != nil {
1✔
3107
                return false, err
×
3108
        }
×
3109

3110
        if err := c.adjustResources(vmi); err != nil {
1✔
3111
                return false, err
×
3112
        }
×
3113

3114
        if err := c.waitForSEVAttestation(vmi); err != nil {
1✔
3115
                return false, err
×
3116
        }
×
3117

3118
        return true, nil
1✔
3119
}
3120

3121
func (c *VirtualMachineController) adjustResources(vmi *v1.VirtualMachineInstance) error {
1✔
3122
        err := c.podIsolationDetector.AdjustResources(vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio)
1✔
3123
        if err != nil {
1✔
3124
                return fmt.Errorf("failed to adjust resources: %v", err)
×
3125
        }
×
3126
        return nil
1✔
3127
}
3128

3129
func (c *VirtualMachineController) waitForSEVAttestation(vmi *v1.VirtualMachineInstance) error {
1✔
3130
        if util.IsSEVAttestationRequested(vmi) {
1✔
3131
                sev := vmi.Spec.Domain.LaunchSecurity.SEV
×
3132
                if sev.Session == "" || sev.DHCert == "" {
×
3133
                        // Wait for the session parameters to be provided
×
3134
                        return nil
×
3135
                }
×
3136
        }
3137
        return nil
1✔
3138
}
3139

3140
func (c *VirtualMachineController) setupDevicesOwnerships(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) error {
1✔
3141
        virtLauncherRootMount, err := isolationRes.MountRoot()
1✔
3142
        if err != nil {
1✔
3143
                return err
×
3144
        }
×
3145

3146
        err = c.claimDeviceOwnership(virtLauncherRootMount, "kvm")
1✔
3147
        if err != nil {
1✔
3148
                return fmt.Errorf("failed to set up file ownership for /dev/kvm: %v", err)
×
3149
        }
×
3150

3151
        if virtutil.IsAutoAttachVSOCK(vmi) {
1✔
3152
                if err := c.claimDeviceOwnership(virtLauncherRootMount, "vhost-vsock"); err != nil {
×
3153
                        return fmt.Errorf("failed to set up file ownership for /dev/vhost-vsock: %v", err)
×
3154
                }
×
3155
        }
3156

3157
        if err := c.configureHostDisks(vmi, isolationRes, virtLauncherRootMount); err != nil {
1✔
3158
                return err
×
3159
        }
×
3160

3161
        if err := c.configureSEVDeviceOwnership(vmi, isolationRes, virtLauncherRootMount); err != nil {
1✔
3162
                return err
×
3163
        }
×
3164

3165
        if virtutil.IsNonRootVMI(vmi) {
1✔
3166
                if err := c.nonRootSetup(vmi); err != nil {
×
3167
                        return err
×
3168
                }
×
3169
        }
3170

3171
        if err := c.configureVirtioFS(vmi, isolationRes); err != nil {
1✔
3172
                return err
×
3173
        }
×
3174

3175
        return nil
1✔
3176
}
3177

3178
func (c *VirtualMachineController) configureHostDisks(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult, virtLauncherRootMount *safepath.Path) error {
1✔
3179
        lessPVCSpaceToleration := c.clusterConfig.GetLessPVCSpaceToleration()
1✔
3180
        minimumPVCReserveBytes := c.clusterConfig.GetMinimumReservePVCBytes()
1✔
3181

1✔
3182
        hostDiskCreator := hostdisk.NewHostDiskCreator(c.recorder, lessPVCSpaceToleration, minimumPVCReserveBytes, virtLauncherRootMount)
1✔
3183
        if err := hostDiskCreator.Create(vmi); err != nil {
1✔
3184
                return fmt.Errorf("preparing host-disks failed: %v", err)
×
3185
        }
×
3186
        return nil
1✔
3187
}
3188

3189
func (c *VirtualMachineController) configureSEVDeviceOwnership(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult, virtLauncherRootMount *safepath.Path) error {
1✔
3190
        if virtutil.IsSEVVMI(vmi) {
1✔
3191
                sevDevice, err := safepath.JoinNoFollow(virtLauncherRootMount, filepath.Join("dev", "sev"))
×
3192
                if err != nil {
×
3193
                        return err
×
3194
                }
×
3195
                if err := diskutils.DefaultOwnershipManager.SetFileOwnership(sevDevice); err != nil {
×
3196
                        return fmt.Errorf("failed to set SEV device owner: %v", err)
×
3197
                }
×
3198
        }
3199
        return nil
1✔
3200
}
3201

3202
func (c *VirtualMachineController) configureVirtioFS(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) error {
1✔
3203
        for _, fs := range vmi.Spec.Domain.Devices.Filesystems {
1✔
3204
                socketPath, err := isolation.SafeJoin(isolationRes, virtiofs.VirtioFSSocketPath(fs.Name))
×
3205
                if err != nil {
×
3206
                        return err
×
3207
                }
×
3208
                if err := diskutils.DefaultOwnershipManager.SetFileOwnership(socketPath); err != nil {
×
3209
                        return err
×
3210
                }
×
3211
        }
3212
        return nil
1✔
3213
}
3214

3215
func (c *VirtualMachineController) syncVirtualMachine(client cmdclient.LauncherClient, vmi *v1.VirtualMachineInstance, preallocatedVolumes []string, disksInfo map[string]*containerdisk.DiskInfo) error {
1✔
3216
        smbios := c.clusterConfig.GetSMBIOS()
1✔
3217
        period := c.clusterConfig.GetMemBalloonStatsPeriod()
1✔
3218

1✔
3219
        options := virtualMachineOptions(smbios, period, preallocatedVolumes, c.capabilities, disksInfo, c.clusterConfig)
1✔
3220
        options.InterfaceDomainAttachment = domainspec.DomainAttachmentByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
3221

1✔
3222
        err := client.SyncVirtualMachine(vmi, options)
1✔
3223
        if err != nil {
1✔
3224
                if strings.Contains(err.Error(), "EFI OVMF rom missing") {
×
3225
                        return &virtLauncherCriticalSecurebootError{fmt.Sprintf("mismatch of Secure Boot setting and bootloaders: %v", err)}
×
3226
                }
×
3227
        }
3228

3229
        return err
1✔
3230
}
3231

3232
func (c *VirtualMachineController) handleHousekeeping(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager, domainExists bool) error {
1✔
3233

1✔
3234
        if vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread {
1✔
3235
                err := c.configureHousekeepingCgroup(vmi, cgroupManager)
×
3236
                if err != nil {
×
3237
                        return err
×
3238
                }
×
3239
        }
3240

3241
        // Configure vcpu scheduler for realtime workloads and affine PIT thread for dedicated CPU
3242
        if vmi.IsRealtimeEnabled() && !vmi.IsRunning() && !vmi.IsFinal() {
1✔
3243
                log.Log.Object(vmi).Info("Configuring vcpus for real time workloads")
×
3244
                if err := c.configureVCPUScheduler(vmi); err != nil {
×
3245
                        return err
×
3246
                }
×
3247
        }
3248
        if vmi.IsCPUDedicated() && !vmi.IsRunning() && !vmi.IsFinal() {
1✔
3249
                log.Log.V(3).Object(vmi).Info("Affining PIT thread")
×
3250
                if err := c.affinePitThread(vmi); err != nil {
×
3251
                        return err
×
3252
                }
×
3253
        }
3254
        if !domainExists {
2✔
3255
                c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Created.String(), VMIDefined)
1✔
3256
        }
1✔
3257

3258
        if vmi.IsRunning() {
2✔
3259
                // Umount any disks no longer mounted
1✔
3260
                if err := c.hotplugVolumeMounter.Unmount(vmi, cgroupManager); err != nil {
1✔
3261
                        return err
×
3262
                }
×
3263
        }
3264
        return nil
1✔
3265
}
3266

3267
func (c *VirtualMachineController) getPreallocatedVolumes(vmi *v1.VirtualMachineInstance) []string {
1✔
3268
        preallocatedVolumes := []string{}
1✔
3269
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
3270
                if volumeStatus.PersistentVolumeClaimInfo != nil && volumeStatus.PersistentVolumeClaimInfo.Preallocated {
1✔
3271
                        preallocatedVolumes = append(preallocatedVolumes, volumeStatus.Name)
×
3272
                }
×
3273
        }
3274
        return preallocatedVolumes
1✔
3275
}
3276

3277
func (c *VirtualMachineController) hotplugSriovInterfaces(vmi *v1.VirtualMachineInstance) error {
1✔
3278
        sriovSpecInterfaces := netvmispec.FilterSRIOVInterfaces(vmi.Spec.Domain.Devices.Interfaces)
1✔
3279

1✔
3280
        sriovSpecIfacesNames := netvmispec.IndexInterfaceSpecByName(sriovSpecInterfaces)
1✔
3281
        attachedSriovStatusIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
1✔
3282
                _, exist := sriovSpecIfacesNames[iface.Name]
×
3283
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceDomain) &&
×
3284
                        netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
3285
        })
×
3286

3287
        desiredSriovMultusPluggedIfaces := netvmispec.IndexInterfaceStatusByName(vmi.Status.Interfaces, func(iface v1.VirtualMachineInstanceNetworkInterface) bool {
1✔
3288
                _, exist := sriovSpecIfacesNames[iface.Name]
×
3289
                return exist && netvmispec.ContainsInfoSource(iface.InfoSource, netvmispec.InfoSourceMultusStatus)
×
3290
        })
×
3291

3292
        if len(desiredSriovMultusPluggedIfaces) == len(attachedSriovStatusIfaces) {
2✔
3293
                c.sriovHotplugExecutorPool.Delete(vmi.UID)
1✔
3294
                return nil
1✔
3295
        }
1✔
3296

3297
        rateLimitedExecutor := c.sriovHotplugExecutorPool.LoadOrStore(vmi.UID)
×
3298
        return rateLimitedExecutor.Exec(func() error {
×
3299
                return c.hotplugSriovInterfacesCommand(vmi)
×
3300
        })
×
3301
}
3302

3303
func (c *VirtualMachineController) hotplugSriovInterfacesCommand(vmi *v1.VirtualMachineInstance) error {
×
3304
        const errMsgPrefix = "failed to hot-plug SR-IOV interfaces"
×
3305

×
3306
        client, err := c.getVerifiedLauncherClient(vmi)
×
3307
        if err != nil {
×
3308
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3309
        }
×
3310

3311
        if err := isolation.AdjustQemuProcessMemoryLimits(c.podIsolationDetector, vmi, c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio); err != nil {
×
3312
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), err.Error())
×
3313
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3314
        }
×
3315

3316
        log.Log.V(3).Object(vmi).Info("sending hot-plug host-devices command")
×
3317
        if err := client.HotplugHostDevices(vmi); err != nil {
×
3318
                return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3319
        }
×
3320

3321
        return nil
×
3322
}
3323

3324
func memoryDumpPath(volumeStatus v1.VolumeStatus) string {
×
3325
        target := hotplugdisk.GetVolumeMountDir(volumeStatus.Name)
×
3326
        dumpPath := filepath.Join(target, volumeStatus.MemoryDumpVolume.TargetFileName)
×
3327
        return dumpPath
×
3328
}
×
3329

3330
func (c *VirtualMachineController) getMemoryDump(vmi *v1.VirtualMachineInstance) error {
1✔
3331
        const errMsgPrefix = "failed to getting memory dump"
1✔
3332

1✔
3333
        for _, volumeStatus := range vmi.Status.VolumeStatus {
2✔
3334
                if volumeStatus.MemoryDumpVolume == nil || volumeStatus.Phase != v1.MemoryDumpVolumeInProgress {
2✔
3335
                        continue
1✔
3336
                }
3337
                client, err := c.getVerifiedLauncherClient(vmi)
×
3338
                if err != nil {
×
3339
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3340
                }
×
3341

3342
                log.Log.V(3).Object(vmi).Info("sending memory dump command")
×
3343
                err = client.VirtualMachineMemoryDump(vmi, memoryDumpPath(volumeStatus))
×
3344
                if err != nil {
×
3345
                        return fmt.Errorf("%s: %v", errMsgPrefix, err)
×
3346
                }
×
3347
        }
3348

3349
        return nil
1✔
3350
}
3351

3352
func (c *VirtualMachineController) processVmUpdate(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
3353

1✔
3354
        isUnresponsive, isInitialized, err := c.isLauncherClientUnresponsive(vmi)
1✔
3355
        if err != nil {
1✔
3356
                return err
×
3357
        }
×
3358
        if !isInitialized {
2✔
3359
                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3360
                return nil
1✔
3361
        } else if isUnresponsive {
2✔
3362
                return goerror.New(fmt.Sprintf("Can not update a VirtualMachineInstance with unresponsive command server."))
×
3363
        }
×
3364

3365
        c.handlePostMigrationProxyCleanup(vmi)
1✔
3366

1✔
3367
        if c.isPreMigrationTarget(vmi) {
2✔
3368
                return c.vmUpdateHelperMigrationTarget(vmi)
1✔
3369
        } else if c.isMigrationSource(vmi) {
3✔
3370
                return c.vmUpdateHelperMigrationSource(vmi, domain)
1✔
3371
        } else {
2✔
3372
                return c.vmUpdateHelperDefault(vmi, domain != nil)
1✔
3373
        }
1✔
3374
}
3375

3376
func (c *VirtualMachineController) setVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) error {
1✔
3377
        phase, err := c.calculateVmPhaseForStatusReason(domain, vmi)
1✔
3378
        if err != nil {
1✔
3379
                return err
×
3380
        }
×
3381
        vmi.Status.Phase = phase
1✔
3382
        return nil
1✔
3383
}
3384
func (c *VirtualMachineController) calculateVmPhaseForStatusReason(domain *api.Domain, vmi *v1.VirtualMachineInstance) (v1.VirtualMachineInstancePhase, error) {
1✔
3385

1✔
3386
        if domain == nil {
2✔
3387
                switch {
1✔
3388
                case vmi.IsScheduled():
1✔
3389
                        isUnresponsive, isInitialized, err := c.isLauncherClientUnresponsive(vmi)
1✔
3390

1✔
3391
                        if err != nil {
1✔
3392
                                return vmi.Status.Phase, err
×
3393
                        }
×
3394
                        if !isInitialized {
2✔
3395
                                c.queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second*1)
1✔
3396
                                return vmi.Status.Phase, err
1✔
3397
                        } else if isUnresponsive {
3✔
3398
                                // virt-launcher is gone and VirtualMachineInstance never transitioned
1✔
3399
                                // from scheduled to Running.
1✔
3400
                                return v1.Failed, nil
1✔
3401
                        }
1✔
3402
                        return v1.Scheduled, nil
1✔
3403
                case !vmi.IsRunning() && !vmi.IsFinal():
×
3404
                        return v1.Scheduled, nil
×
3405
                case !vmi.IsFinal():
1✔
3406
                        // That is unexpected. We should not be able to delete a VirtualMachineInstance before we stop it.
1✔
3407
                        // However, if someone directly interacts with libvirt it is possible
1✔
3408
                        return v1.Failed, nil
1✔
3409
                }
3410
        } else {
1✔
3411

1✔
3412
                switch domain.Status.Status {
1✔
3413
                case api.Shutoff, api.Crashed:
1✔
3414
                        switch domain.Status.Reason {
1✔
3415
                        case api.ReasonCrashed, api.ReasonPanicked:
×
3416
                                return v1.Failed, nil
×
3417
                        case api.ReasonDestroyed:
×
3418
                                // When ACPI is available, the domain was tried to be shutdown,
×
3419
                                // and destroyed means that the domain was destroyed after the graceperiod expired.
×
3420
                                // Without ACPI a destroyed domain is ok.
×
3421
                                if isACPIEnabled(vmi, domain) {
×
3422
                                        return v1.Failed, nil
×
3423
                                }
×
3424
                                return v1.Succeeded, nil
×
3425
                        case api.ReasonShutdown, api.ReasonSaved, api.ReasonFromSnapshot:
×
3426
                                return v1.Succeeded, nil
×
3427
                        case api.ReasonMigrated:
1✔
3428
                                // if the domain migrated, we no longer know the phase.
1✔
3429
                                return vmi.Status.Phase, nil
1✔
3430
                        }
3431
                case api.Running, api.Paused, api.Blocked, api.PMSuspended:
1✔
3432
                        return v1.Running, nil
1✔
3433
                }
3434
        }
3435
        return vmi.Status.Phase, nil
×
3436
}
3437

3438
func (c *VirtualMachineController) addFunc(obj interface{}) {
1✔
3439
        key, err := controller.KeyFunc(obj)
1✔
3440
        if err == nil {
2✔
3441
                c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
3442
                c.queue.Add(key)
1✔
3443
        }
1✔
3444
}
3445
func (c *VirtualMachineController) deleteFunc(obj interface{}) {
×
3446
        key, err := controller.KeyFunc(obj)
×
3447
        if err == nil {
×
3448
                c.vmiExpectations.LowerExpectations(key, 1, 0)
×
3449
                c.queue.Add(key)
×
3450
        }
×
3451
}
3452
func (c *VirtualMachineController) updateFunc(_, new interface{}) {
1✔
3453
        key, err := controller.KeyFunc(new)
1✔
3454
        if err == nil {
2✔
3455
                c.vmiExpectations.LowerExpectations(key, 1, 0)
1✔
3456
                c.queue.Add(key)
1✔
3457
        }
1✔
3458
}
3459

3460
func (c *VirtualMachineController) addDomainFunc(obj interface{}) {
1✔
3461
        domain := obj.(*api.Domain)
1✔
3462
        log.Log.Object(domain).Infof("Domain is in state %s reason %s", domain.Status.Status, domain.Status.Reason)
1✔
3463
        key, err := controller.KeyFunc(obj)
1✔
3464
        if err == nil {
2✔
3465
                c.queue.Add(key)
1✔
3466
        }
1✔
3467
}
3468
func (c *VirtualMachineController) deleteDomainFunc(obj interface{}) {
×
3469
        domain, ok := obj.(*api.Domain)
×
3470
        if !ok {
×
3471
                tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
×
3472
                if !ok {
×
3473
                        log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
×
3474
                        return
×
3475
                }
×
3476
                domain, ok = tombstone.Obj.(*api.Domain)
×
3477
                if !ok {
×
3478
                        log.Log.Reason(fmt.Errorf("tombstone contained object that is not a domain %#v", obj)).Error("Failed to process delete notification")
×
3479
                        return
×
3480
                }
×
3481
        }
3482
        log.Log.Object(domain).Info("Domain deleted")
×
3483
        key, err := controller.KeyFunc(obj)
×
3484
        if err == nil {
×
3485
                c.queue.Add(key)
×
3486
        }
×
3487
}
3488
func (c *VirtualMachineController) updateDomainFunc(old, new interface{}) {
1✔
3489
        newDomain := new.(*api.Domain)
1✔
3490
        oldDomain := old.(*api.Domain)
1✔
3491
        if oldDomain.Status.Status != newDomain.Status.Status || oldDomain.Status.Reason != newDomain.Status.Reason {
1✔
3492
                log.Log.Object(newDomain).Infof("Domain is in state %s reason %s", newDomain.Status.Status, newDomain.Status.Reason)
×
3493
        }
×
3494

3495
        if newDomain.ObjectMeta.DeletionTimestamp != nil {
1✔
3496
                log.Log.Object(newDomain).Info("Domain is marked for deletion")
×
3497
        }
×
3498

3499
        key, err := controller.KeyFunc(new)
1✔
3500
        if err == nil {
2✔
3501
                c.queue.Add(key)
1✔
3502
        }
1✔
3503
}
3504

3505
func (c *VirtualMachineController) finalizeMigration(vmi *v1.VirtualMachineInstance) error {
1✔
3506
        const errorMessage = "failed to finalize migration"
1✔
3507

1✔
3508
        client, err := c.getVerifiedLauncherClient(vmi)
1✔
3509
        if err != nil {
1✔
3510
                return fmt.Errorf("%s: %v", errorMessage, err)
×
3511
        }
×
3512

3513
        if err := c.hotplugCPU(vmi, client); err != nil {
2✔
3514
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
1✔
3515
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "failed to change vCPUs")
1✔
3516
        }
1✔
3517

3518
        if err := c.hotplugMemory(vmi, client); err != nil {
1✔
3519
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
×
3520
                c.recorder.Event(vmi, k8sv1.EventTypeWarning, err.Error(), "failed to update guest memory")
×
3521
        }
×
3522
        removeMigratedVolumes(vmi)
1✔
3523

1✔
3524
        options := &cmdv1.VirtualMachineOptions{}
1✔
3525
        options.InterfaceMigration = domainspec.BindingMigrationByInterfaceName(vmi.Spec.Domain.Devices.Interfaces, c.clusterConfig.GetNetworkBindings())
1✔
3526
        if err := client.FinalizeVirtualMachineMigration(vmi, options); err != nil {
1✔
3527
                log.Log.Object(vmi).Reason(err).Error(errorMessage)
×
3528
                return fmt.Errorf("%s: %v", errorMessage, err)
×
3529
        }
×
3530

3531
        return nil
1✔
3532
}
3533

3534
func vmiHasTerminationGracePeriod(vmi *v1.VirtualMachineInstance) bool {
×
3535
        // if not set we use the default graceperiod
×
3536
        return vmi.Spec.TerminationGracePeriodSeconds == nil ||
×
3537
                (vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds != 0)
×
3538
}
×
3539

3540
func domainHasGracePeriod(domain *api.Domain) bool {
1✔
3541
        return domain != nil &&
1✔
3542
                domain.Spec.Metadata.KubeVirt.GracePeriod != nil &&
1✔
3543
                domain.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds != 0
1✔
3544
}
1✔
3545

3546
func isACPIEnabled(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
×
3547
        return (vmiHasTerminationGracePeriod(vmi) || (vmi.Spec.TerminationGracePeriodSeconds == nil && domainHasGracePeriod(domain))) &&
×
3548
                domain != nil &&
×
3549
                domain.Spec.Features != nil &&
×
3550
                domain.Spec.Features.ACPI != nil
×
3551
}
×
3552

3553
func (c *VirtualMachineController) isHostModelMigratable(vmi *v1.VirtualMachineInstance) error {
1✔
3554
        if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == v1.CPUModeHostModel {
2✔
3555
                if c.hostCpuModel == "" {
2✔
3556
                        err := fmt.Errorf("the node \"%s\" does not allow migration with host-model", vmi.Status.NodeName)
1✔
3557
                        log.Log.Object(vmi).Errorf(err.Error())
1✔
3558
                        return err
1✔
3559
                }
1✔
3560
        }
3561
        return nil
1✔
3562
}
3563

3564
func (c *VirtualMachineController) claimDeviceOwnership(virtLauncherRootMount *safepath.Path, deviceName string) error {
1✔
3565
        softwareEmulation := c.clusterConfig.AllowEmulation()
1✔
3566
        devicePath, err := safepath.JoinNoFollow(virtLauncherRootMount, filepath.Join("dev", deviceName))
1✔
3567
        if err != nil {
1✔
3568
                if softwareEmulation {
×
3569
                        return nil
×
3570
                }
×
3571
                return err
×
3572
        }
3573

3574
        return diskutils.DefaultOwnershipManager.SetFileOwnership(devicePath)
1✔
3575
}
3576

3577
func (c *VirtualMachineController) reportDedicatedCPUSetForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
×
3578
        cgroupManager, err := getCgroupManager(vmi)
×
3579
        if err != nil {
×
3580
                return err
×
3581
        }
×
3582

3583
        cpusetStr, err := cgroupManager.GetCpuSet()
×
3584
        if err != nil {
×
3585
                return err
×
3586
        }
×
3587

3588
        cpuSet, err := hardware.ParseCPUSetLine(cpusetStr, 50000)
×
3589
        if err != nil {
×
3590
                return fmt.Errorf("failed to parse target VMI cpuset: %v", err)
×
3591
        }
×
3592

3593
        vmi.Status.MigrationState.TargetCPUSet = cpuSet
×
3594

×
3595
        return nil
×
3596
}
3597

3598
func (c *VirtualMachineController) reportTargetTopologyForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
×
3599
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, map[string]*containerdisk.DiskInfo{}, c.clusterConfig)
×
3600
        topology, err := json.Marshal(options.Topology)
×
3601
        if err != nil {
×
3602
                return err
×
3603
        }
×
3604
        vmi.Status.MigrationState.TargetNodeTopology = string(topology)
×
3605
        return nil
×
3606
}
3607

3608
func (c *VirtualMachineController) handleMigrationAbort(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3609
        if vmi.Status.MigrationState.AbortStatus == v1.MigrationAbortInProgress {
1✔
3610
                return nil
×
3611
        }
×
3612

3613
        err := client.CancelVirtualMachineMigration(vmi)
1✔
3614
        if err != nil && err.Error() == migrations.CancelMigrationFailedVmiNotMigratingErr {
1✔
3615
                // If migration did not even start there is no need to cancel it
×
3616
                log.Log.Object(vmi).Infof("skipping migration cancellation since vmi is not migrating")
×
3617
                return err
×
3618
        }
×
3619

3620
        c.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Migrating.String(), VMIAbortingMigration)
1✔
3621
        return nil
1✔
3622
}
3623

3624
func isIOError(shouldUpdate, domainExists bool, domain *api.Domain) bool {
1✔
3625
        return shouldUpdate && domainExists && domain.Status.Status == api.Paused && domain.Status.Reason == api.ReasonPausedIOError
1✔
3626
}
1✔
3627

3628
func (c *VirtualMachineController) updateMachineType(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
1✔
3629
        if domain == nil || vmi == nil {
2✔
3630
                return
1✔
3631
        }
1✔
3632
        if domain.Spec.OS.Type.Machine != "" {
2✔
3633
                vmi.Status.Machine = &v1.Machine{Type: domain.Spec.OS.Type.Machine}
1✔
3634
        }
1✔
3635
}
3636

3637
func (c *VirtualMachineController) hotplugCPU(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3638
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3639

1✔
3640
        removeVMIVCPUChangeConditionAndLabel := func() {
2✔
3641
                delete(vmi.Labels, v1.VirtualMachinePodCPULimitsLabel)
1✔
3642
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceVCPUChange)
1✔
3643
        }
1✔
3644
        defer removeVMIVCPUChangeConditionAndLabel()
1✔
3645

1✔
3646
        if !vmiConditions.HasCondition(vmi, v1.VirtualMachineInstanceVCPUChange) {
2✔
3647
                return nil
1✔
3648
        }
1✔
3649

3650
        if vmi.IsCPUDedicated() {
1✔
3651
                cpuLimitStr, ok := vmi.Labels[v1.VirtualMachinePodCPULimitsLabel]
×
3652
                if !ok || len(cpuLimitStr) == 0 {
×
3653
                        return fmt.Errorf("cannot read CPU limit from VMI annotation")
×
3654
                }
×
3655

3656
                cpuLimit, err := strconv.Atoi(cpuLimitStr)
×
3657
                if err != nil {
×
3658
                        return fmt.Errorf("cannot parse CPU limit from VMI annotation: %v", err)
×
3659
                }
×
3660

3661
                vcpus := hardware.GetNumberOfVCPUs(vmi.Spec.Domain.CPU)
×
3662
                if vcpus > int64(cpuLimit) {
×
3663
                        return fmt.Errorf("number of requested VCPUS (%d) exceeds the limit (%d)", vcpus, cpuLimit)
×
3664
                }
×
3665
        }
3666

3667
        options := virtualMachineOptions(
1✔
3668
                nil,
1✔
3669
                0,
1✔
3670
                nil,
1✔
3671
                c.capabilities,
1✔
3672
                nil,
1✔
3673
                c.clusterConfig)
1✔
3674

1✔
3675
        if err := client.SyncVirtualMachineCPUs(vmi, options); err != nil {
2✔
3676
                return err
1✔
3677
        }
1✔
3678

3679
        if vmi.Status.CurrentCPUTopology == nil {
2✔
3680
                vmi.Status.CurrentCPUTopology = &v1.CPUTopology{}
1✔
3681
        }
1✔
3682

3683
        vmi.Status.CurrentCPUTopology.Sockets = vmi.Spec.Domain.CPU.Sockets
1✔
3684
        vmi.Status.CurrentCPUTopology.Cores = vmi.Spec.Domain.CPU.Cores
1✔
3685
        vmi.Status.CurrentCPUTopology.Threads = vmi.Spec.Domain.CPU.Threads
1✔
3686

1✔
3687
        return nil
1✔
3688
}
3689

3690
func (c *VirtualMachineController) hotplugMemory(vmi *v1.VirtualMachineInstance, client cmdclient.LauncherClient) error {
1✔
3691
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3692

1✔
3693
        removeVMIMemoryChangeLabel := func() {
2✔
3694
                delete(vmi.Labels, v1.VirtualMachinePodMemoryRequestsLabel)
1✔
3695
                delete(vmi.Labels, v1.MemoryHotplugOverheadRatioLabel)
1✔
3696
        }
1✔
3697
        defer removeVMIMemoryChangeLabel()
1✔
3698

1✔
3699
        if !vmiConditions.HasCondition(vmi, v1.VirtualMachineInstanceMemoryChange) {
2✔
3700
                return nil
1✔
3701
        }
1✔
3702

3703
        podMemReqStr := vmi.Labels[v1.VirtualMachinePodMemoryRequestsLabel]
1✔
3704
        podMemReq, err := resource.ParseQuantity(podMemReqStr)
1✔
3705
        if err != nil {
1✔
3706
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
×
3707
                return fmt.Errorf("cannot parse Memory requests from VMI label: %v", err)
×
3708
        }
×
3709

3710
        overheadRatio := vmi.Labels[v1.MemoryHotplugOverheadRatioLabel]
1✔
3711
        requiredMemory := services.GetMemoryOverhead(vmi, runtime.GOARCH, &overheadRatio)
1✔
3712
        requiredMemory.Add(
1✔
3713
                c.netBindingPluginMemoryCalculator.Calculate(vmi, c.clusterConfig.GetNetworkBindings()),
1✔
3714
        )
1✔
3715

1✔
3716
        requiredMemory.Add(*vmi.Spec.Domain.Resources.Requests.Memory())
1✔
3717

1✔
3718
        if podMemReq.Cmp(requiredMemory) < 0 {
2✔
3719
                vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
1✔
3720
                return fmt.Errorf("amount of requested guest memory (%s) exceeds the launcher memory request (%s)", vmi.Spec.Domain.Memory.Guest.String(), podMemReqStr)
1✔
3721
        }
1✔
3722

3723
        options := virtualMachineOptions(nil, 0, nil, c.capabilities, nil, c.clusterConfig)
1✔
3724

1✔
3725
        if err := client.SyncVirtualMachineMemory(vmi, options); err != nil {
2✔
3726
                // mark hotplug as failed
1✔
3727
                vmiConditions.UpdateCondition(vmi, &v1.VirtualMachineInstanceCondition{
1✔
3728
                        Type:    v1.VirtualMachineInstanceMemoryChange,
1✔
3729
                        Status:  k8sv1.ConditionFalse,
1✔
3730
                        Reason:  memoryHotplugFailedReason,
1✔
3731
                        Message: "memory hotplug failed, the VM configuration is not supported",
1✔
3732
                })
1✔
3733
                return err
1✔
3734
        }
1✔
3735

3736
        vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
1✔
3737
        vmi.Status.Memory.GuestRequested = vmi.Spec.Domain.Memory.Guest
1✔
3738
        return nil
1✔
3739
}
3740

3741
func removeMigratedVolumes(vmi *v1.VirtualMachineInstance) {
1✔
3742
        vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
1✔
3743
        vmiConditions.RemoveCondition(vmi, v1.VirtualMachineInstanceVolumesChange)
1✔
3744
        vmi.Status.MigratedVolumes = nil
1✔
3745
}
1✔
3746

3747
func parseLibvirtQuantity(value int64, unit string) *resource.Quantity {
1✔
3748
        switch unit {
1✔
3749
        case "b", "bytes":
1✔
3750
                return resource.NewQuantity(value, resource.BinarySI)
1✔
3751
        case "KB":
1✔
3752
                return resource.NewQuantity(value*1000, resource.DecimalSI)
1✔
3753
        case "MB":
1✔
3754
                return resource.NewQuantity(value*1000*1000, resource.DecimalSI)
1✔
3755
        case "GB":
1✔
3756
                return resource.NewQuantity(value*1000*1000*1000, resource.DecimalSI)
1✔
3757
        case "TB":
1✔
3758
                return resource.NewQuantity(value*1000*1000*1000*1000, resource.DecimalSI)
1✔
3759
        case "k", "KiB":
1✔
3760
                return resource.NewQuantity(value*1024, resource.BinarySI)
1✔
3761
        case "M", "MiB":
1✔
3762
                return resource.NewQuantity(value*1024*1024, resource.BinarySI)
1✔
3763
        case "G", "GiB":
1✔
3764
                return resource.NewQuantity(value*1024*1024*1024, resource.BinarySI)
1✔
3765
        case "T", "TiB":
1✔
3766
                return resource.NewQuantity(value*1024*1024*1024*1024, resource.BinarySI)
1✔
3767
        }
3768
        return nil
×
3769
}
3770

3771
func (c *VirtualMachineController) updateMemoryInfo(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
1✔
3772
        if domain == nil || vmi == nil || domain.Spec.CurrentMemory == nil {
2✔
3773
                return nil
1✔
3774
        }
1✔
3775
        if vmi.Status.Memory == nil {
1✔
3776
                vmi.Status.Memory = &v1.MemoryStatus{}
×
3777
        }
×
3778
        currentGuest := parseLibvirtQuantity(int64(domain.Spec.CurrentMemory.Value), domain.Spec.CurrentMemory.Unit)
1✔
3779
        vmi.Status.Memory.GuestCurrent = currentGuest
1✔
3780
        return nil
1✔
3781
}
3782

3783
func configureParallelMigrationThreads(options *cmdclient.MigrationOptions, vm *v1.VirtualMachineInstance) {
1✔
3784
        // When the CPU is limited, there's a risk of the migration threads choking the CPU resources on the compute container.
1✔
3785
        // For this reason, we will avoid configuring migration threads in such scenarios.
1✔
3786
        if cpuLimit, cpuLimitExists := vm.Spec.Domain.Resources.Limits[k8sv1.ResourceCPU]; cpuLimitExists && !cpuLimit.IsZero() {
2✔
3787
                return
1✔
3788
        }
1✔
3789

3790
        options.ParallelMigrationThreads = pointer.P(parallelMultifdMigrationThreads)
1✔
3791
}
3792

3793
func isReadOnlyDisk(disk *v1.Disk) bool {
1✔
3794
        isReadOnlyCDRom := disk.CDRom != nil && (disk.CDRom.ReadOnly == nil || *disk.CDRom.ReadOnly)
1✔
3795

1✔
3796
        return isReadOnlyCDRom
1✔
3797
}
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc