• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / hyperconverged-cluster-operator / 16339590247

17 Jul 2025 08:01AM UTC coverage: 75.103% (-0.02%) from 75.124%
16339590247

Pull #3601

github

web-flow
Merge bcff6906b into cb8d4dd66
Pull Request #3601: network,passt: Deploy Passt required objects

391 of 521 new or added lines in 10 files covered. (75.05%)

97 existing lines in 2 files now uncovered.

6908 of 9198 relevant lines covered (75.1%)

1.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.48
/controllers/hyperconverged/hyperconverged_controller.go
1
package hyperconverged
2

3
import (
4
        "cmp"
5
        "context"
6
        "fmt"
7
        "os"
8
        "reflect"
9
        "slices"
10
        "time"
11

12
        "github.com/blang/semver/v4"
13
        jsonpatch "github.com/evanphx/json-patch/v5"
14
        "github.com/go-logr/logr"
15
        netattdefv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
16
        openshiftconfigv1 "github.com/openshift/api/config/v1"
17
        consolev1 "github.com/openshift/api/console/v1"
18
        imagev1 "github.com/openshift/api/image/v1"
19
        routev1 "github.com/openshift/api/route/v1"
20
        securityv1 "github.com/openshift/api/security/v1"
21
        operatorhandler "github.com/operator-framework/operator-lib/handler"
22
        monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
23
        appsv1 "k8s.io/api/apps/v1"
24
        corev1 "k8s.io/api/core/v1"
25
        rbacv1 "k8s.io/api/rbac/v1"
26
        schedulingv1 "k8s.io/api/scheduling/v1"
27
        apierrors "k8s.io/apimachinery/pkg/api/errors"
28
        apimetav1 "k8s.io/apimachinery/pkg/api/meta"
29
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30
        "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
31
        "k8s.io/apimachinery/pkg/runtime"
32
        "k8s.io/apimachinery/pkg/runtime/schema"
33
        "k8s.io/apimachinery/pkg/types"
34
        "k8s.io/utils/ptr"
35
        "sigs.k8s.io/controller-runtime/pkg/client"
36
        "sigs.k8s.io/controller-runtime/pkg/controller"
37
        "sigs.k8s.io/controller-runtime/pkg/event"
38
        "sigs.k8s.io/controller-runtime/pkg/handler"
39
        logf "sigs.k8s.io/controller-runtime/pkg/log"
40
        "sigs.k8s.io/controller-runtime/pkg/manager"
41
        "sigs.k8s.io/controller-runtime/pkg/predicate"
42
        "sigs.k8s.io/controller-runtime/pkg/reconcile"
43
        "sigs.k8s.io/controller-runtime/pkg/source"
44

45
        networkaddonsv1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1"
46
        kubevirtcorev1 "kubevirt.io/api/core/v1"
47
        aaqv1alpha1 "kubevirt.io/application-aware-quota/staging/src/kubevirt.io/application-aware-quota-api/pkg/apis/core/v1alpha1"
48
        cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
49
        sspv1beta3 "kubevirt.io/ssp-operator/api/v1beta3"
50

51
        hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/api/v1beta1"
52
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/alerts"
53
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/common"
54
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/operandhandler"
55
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/reqresolver"
56
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/monitoring/hyperconverged/metrics"
57
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/nodeinfo"
58
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/upgradepatch"
59
        hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
60
        "github.com/kubevirt/hyperconverged-cluster-operator/version"
61
)
62

63
var (
64
        log = logf.Log.WithName("controller_hyperconverged")
65
)
66

67
const (
68
        // We cannot set owner reference of cluster-wide resources to namespaced HyperConverged object. Therefore,
69
        // use finalizers to manage the cleanup.
70
        FinalizerName = "kubevirt.io/hyperconverged"
71

72
        // OpenshiftNamespace is for resources that belong in the openshift namespace
73

74
        reconcileInit               = "Init"
75
        reconcileInitMessage        = "Initializing HyperConverged cluster"
76
        reconcileCompleted          = "ReconcileCompleted"
77
        reconcileCompletedMessage   = "Reconcile completed successfully"
78
        invalidRequestReason        = "InvalidRequest"
79
        invalidRequestMessageFormat = "Request does not match expected name (%v) and namespace (%v)"
80
        commonDegradedReason        = "HCODegraded"
81
        commonProgressingReason     = "HCOProgressing"
82
        taintedConfigurationReason  = "UnsupportedFeatureAnnotation"
83
        taintedConfigurationMessage = "Unsupported feature was activated via an HCO annotation"
84
        systemHealthStatusHealthy   = "healthy"
85
        systemHealthStatusWarning   = "warning"
86
        systemHealthStatusError     = "error"
87

88
        hcoVersionName = "operator"
89

90
        requestedStatusKey = "requested status"
91

92
        requeueAfter = time.Millisecond * 100
93
)
94

95
// JSONPatchAnnotationNames - annotations used to patch operand CRs with unsupported/unofficial/hidden features.
96
// The presence of any of these annotations raises the hcov1beta1.ConditionTaintedConfiguration condition.
97
var JSONPatchAnnotationNames = []string{
98
        common.JSONPatchKVAnnotationName,
99
        common.JSONPatchCDIAnnotationName,
100
        common.JSONPatchCNAOAnnotationName,
101
        common.JSONPatchSSPAnnotationName,
102
}
103

104
// RegisterReconciler creates a new HyperConverged Reconciler and registers it into manager.
105
func RegisterReconciler(mgr manager.Manager,
106
        ci hcoutil.ClusterInfo,
107
        upgradeableCond hcoutil.Condition,
108
        ingressEventCh <-chan event.GenericEvent,
109
        nodeEventChannel <-chan event.GenericEvent) error {
×
110

×
111
        return add(mgr, newReconciler(mgr, ci, upgradeableCond), ci, ingressEventCh, nodeEventChannel)
×
112
}
×
113

114
// newReconciler returns a new reconcile.Reconciler
115
func newReconciler(mgr manager.Manager, ci hcoutil.ClusterInfo, upgradeableCond hcoutil.Condition) reconcile.Reconciler {
×
116

×
117
        ownVersion := cmp.Or(os.Getenv(hcoutil.HcoKvIoVersionName), version.Version)
×
118

×
119
        r := &ReconcileHyperConverged{
×
120
                client:               mgr.GetClient(),
×
121
                scheme:               mgr.GetScheme(),
×
122
                operandHandler:       operandhandler.NewOperandHandler(mgr.GetClient(), mgr.GetScheme(), ci, hcoutil.GetEventEmitter()),
×
123
                upgradeMode:          false,
×
124
                ownVersion:           ownVersion,
×
125
                eventEmitter:         hcoutil.GetEventEmitter(),
×
126
                firstLoop:            true,
×
127
                upgradeableCondition: upgradeableCond,
×
128
        }
×
129

×
130
        if ci.IsMonitoringAvailable() {
×
131
                r.monitoringReconciler = alerts.NewMonitoringReconciler(ci, r.client, hcoutil.GetEventEmitter(), r.scheme)
×
132
        }
×
133

134
        return r
×
135
}
136

137
// newCRDremover returns a new CRDRemover
138
func add(mgr manager.Manager, r reconcile.Reconciler, ci hcoutil.ClusterInfo, ingressEventCh <-chan event.GenericEvent, nodeEventChannel <-chan event.GenericEvent) error {
×
139
        // Create a new controller
×
140
        c, err := controller.New("hyperconverged-controller", mgr, controller.Options{Reconciler: r})
×
141
        if err != nil {
×
142
                return err
×
143
        }
×
144

145
        // Watch for changes to primary resource HyperConverged
146
        err = c.Watch(
×
147
                source.Kind(
×
148
                        mgr.GetCache(), client.Object(&hcov1beta1.HyperConverged{}),
×
149
                        &operatorhandler.InstrumentedEnqueueRequestForObject[client.Object]{},
×
150
                        predicate.Or[client.Object](predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{},
×
151
                                predicate.ResourceVersionChangedPredicate{}),
×
152
                ))
×
153
        if err != nil {
×
154
                return err
×
155
        }
×
156

157
        // To limit the memory usage, the controller manager got instantiated with a custom cache
158
        // that is watching only a specific set of objects with selectors.
159
        // When a new object got added here, it has also to be added to the custom cache
160
        // managed by getNewManagerCache()
161
        secondaryResources := []client.Object{
×
162
                &kubevirtcorev1.KubeVirt{},
×
163
                &cdiv1beta1.CDI{},
×
164
                &networkaddonsv1.NetworkAddonsConfig{},
×
165
                &aaqv1alpha1.AAQ{},
×
166
                &schedulingv1.PriorityClass{},
×
167
                &corev1.ConfigMap{},
×
168
                &corev1.Service{},
×
NEW
169
                &corev1.ServiceAccount{},
×
NEW
170
                &appsv1.DaemonSet{},
×
171
                &rbacv1.Role{},
×
172
                &rbacv1.RoleBinding{},
×
173
        }
×
174
        if ci.IsMonitoringAvailable() {
×
175
                secondaryResources = append(secondaryResources, []client.Object{
×
176
                        &monitoringv1.ServiceMonitor{},
×
177
                        &monitoringv1.PrometheusRule{},
×
178
                }...)
×
179
        }
×
180
        if ci.IsOpenshift() {
×
181
                secondaryResources = append(secondaryResources, []client.Object{
×
182
                        &sspv1beta3.SSP{},
×
183
                        &corev1.Service{},
×
184
                        &routev1.Route{},
×
185
                        &consolev1.ConsoleCLIDownload{},
×
186
                        &consolev1.ConsoleQuickStart{},
×
187
                        &consolev1.ConsolePlugin{},
×
188
                        &imagev1.ImageStream{},
×
189
                        &corev1.Namespace{},
×
190
                        &appsv1.Deployment{},
×
NEW
191
                        &securityv1.SecurityContextConstraints{},
×
NEW
192
                }...)
×
NEW
193
        }
×
194

NEW
195
        if ci.IsNADAvailable() {
×
NEW
196
                secondaryResources = append(secondaryResources, []client.Object{
×
NEW
197
                        &netattdefv1.NetworkAttachmentDefinition{},
×
198
                }...)
×
199
        }
×
200

201
        // Watch secondary resources
202
        for _, resource := range secondaryResources {
×
203
                msg := fmt.Sprintf("Reconciling for %T", resource)
×
204
                err = c.Watch(
×
205
                        source.Kind(mgr.GetCache(), resource,
×
206
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
207
                                        // enqueue using a placeholder to be able to discriminate request triggered
×
208
                                        // by changes on the HyperConverged object from request triggered by changes
×
209
                                        // on a secondary CR controlled by HCO
×
210
                                        log.Info(msg)
×
211
                                        return []reconcile.Request{
×
212
                                                reqresolver.GetSecondaryCRRequest(),
×
213
                                        }
×
214
                                }),
×
215
                        ))
216
                if err != nil {
×
217
                        return err
×
218
                }
×
219
        }
220

221
        if ci.IsOpenshift() {
×
222
                err = c.Watch(
×
223
                        source.Kind(
×
224
                                mgr.GetCache(),
×
225
                                client.Object(&openshiftconfigv1.APIServer{}),
×
226
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
227
                                        // enqueue using a placeholder to signal that the change is not
×
228
                                        // directly on HCO CR but on the APIServer CR that we want to reload
×
229
                                        // only if really changed
×
230
                                        log.Info("Reconciling for openshiftconfigv1.APIServer")
×
231
                                        return []reconcile.Request{
×
232
                                                reqresolver.GetAPIServerCRRequest(),
×
233
                                        }
×
234
                                }),
×
235
                        ))
236
                if err != nil {
×
237
                        return err
×
238
                }
×
239

240
                err = c.Watch(
×
241
                        source.Channel(
×
242
                                ingressEventCh,
×
243
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
244
                                        // the ingress-cluster controller initiate this by pushing an event to the ingressEventCh channel
×
245
                                        // This will force this controller to update the URL of the cli download route, if the user
×
246
                                        // customized the hostname.
×
247
                                        log.Info("Reconciling for openshiftconfigv1.Ingress")
×
248
                                        return []reconcile.Request{
×
249
                                                reqresolver.GetIngressCRResource(),
×
250
                                        }
×
251
                                }),
×
252
                        ))
253
                if err != nil {
×
254
                        return err
×
255
                }
×
256

257
                err = c.Watch(
×
258
                        source.Channel(
×
259
                                nodeEventChannel,
×
260
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
261
                                        // the nodes controller initiate this by pushing an event to the nodeEventChannel channel
×
262
                                        // This will force this controller to update the status fields related to the cluster nodes, and
×
263
                                        // to re-generate the DataImportCronTemplates in the SSP CR.
×
264
                                        log.Info("Reconciling for core.Node")
×
265
                                        return []reconcile.Request{
×
266
                                                reqresolver.GetNodeResource(),
×
267
                                        }
×
268
                                }),
×
269
                        ))
270
                if err != nil {
×
271
                        return err
×
272
                }
×
273
        }
274

275
        return nil
×
276
}
277

278
var _ reconcile.Reconciler = &ReconcileHyperConverged{}
279

280
// ReconcileHyperConverged reconciles a HyperConverged object
281
type ReconcileHyperConverged struct {
282
        // This client, initialized using mgr.Client() above, is a split client
283
        // that reads objects from the cache and writes to the apiserver
284
        client               client.Client
285
        scheme               *runtime.Scheme
286
        operandHandler       *operandhandler.OperandHandler
287
        upgradeMode          bool
288
        ownVersion           string
289
        eventEmitter         hcoutil.EventEmitter
290
        firstLoop            bool
291
        upgradeableCondition hcoutil.Condition
292
        monitoringReconciler *alerts.MonitoringReconciler
293
}
294

295
// Reconcile reads that state of the cluster for a HyperConverged object and makes changes based on the state read
296
// and what is in the HyperConverged.Spec
297
// Note:
298
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
299
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
300
func (r *ReconcileHyperConverged) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
1✔
301
        logger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
1✔
302
        err := r.refreshAPIServerCR(ctx, logger, request)
1✔
303
        if err != nil {
1✔
304
                return reconcile.Result{}, err
×
305
        }
×
306

307
        resolvedRequest, hcoTriggered := reqresolver.ResolveReconcileRequest(log, request)
1✔
308
        hcoRequest := common.NewHcoRequest(ctx, resolvedRequest, log, r.upgradeMode, hcoTriggered)
1✔
309

1✔
310
        if hcoTriggered {
2✔
311
                r.operandHandler.Reset()
1✔
312
        }
1✔
313

314
        err = r.monitoringReconciler.Reconcile(hcoRequest, r.firstLoop)
1✔
315
        if err != nil {
1✔
316
                return reconcile.Result{}, err
×
317
        }
×
318

319
        // Fetch the HyperConverged instance
320
        instance, err := r.getHyperConverged(hcoRequest)
1✔
321
        if err != nil {
1✔
322
                return reconcile.Result{}, err
×
323
        }
×
324

325
        hcoRequest.Instance = instance
1✔
326

1✔
327
        if instance == nil {
2✔
328
                // if the HyperConverged CR was deleted during an upgrade process, then this is not an upgrade anymore
1✔
329
                r.upgradeMode = false
1✔
330
                err = r.setOperatorUpgradeableStatus(hcoRequest)
1✔
331

1✔
332
                return reconcile.Result{}, err
1✔
333
        }
1✔
334

335
        if r.firstLoop {
2✔
336
                r.firstLoopInitialization(hcoRequest)
1✔
337
        }
1✔
338

339
        if err = r.monitoringReconciler.UpdateRelatedObjects(hcoRequest); err != nil {
1✔
340
                logger.Error(err, "Failed to update the PrometheusRule as a related object")
×
341
                return reconcile.Result{}, err
×
342
        }
×
343

344
        result, err := r.doReconcile(hcoRequest)
1✔
345
        if err != nil {
2✔
346
                r.eventEmitter.EmitEvent(hcoRequest.Instance, corev1.EventTypeWarning, "ReconcileError", err.Error())
1✔
347
                return result, err
1✔
348
        }
1✔
349

350
        if err = r.setOperatorUpgradeableStatus(hcoRequest); err != nil {
1✔
351
                return reconcile.Result{}, err
×
352
        }
×
353

354
        requeue, err := r.updateHyperConverged(hcoRequest)
1✔
355
        if requeue || apierrors.IsConflict(err) {
2✔
356
                result.RequeueAfter = requeueAfter
1✔
357
        }
1✔
358

359
        return result, err
1✔
360
}
361

362
// refreshAPIServerCR refreshes the APIServer cR, if the request is triggered by this CR.
363
func (r *ReconcileHyperConverged) refreshAPIServerCR(ctx context.Context, logger logr.Logger, originalRequest reconcile.Request) error {
1✔
364
        if reqresolver.IsTriggeredByAPIServerCR(originalRequest) {
2✔
365
                logger.Info("Refreshing the ApiServer CR")
1✔
366
                return hcoutil.GetClusterInfo().RefreshAPIServerCR(ctx, r.client)
1✔
367
        }
1✔
368

369
        return nil
1✔
370
}
371

372
func (r *ReconcileHyperConverged) doReconcile(req *common.HcoRequest) (reconcile.Result, error) {
1✔
373

1✔
374
        valid := r.validateNamespace(req)
1✔
375
        if !valid {
2✔
376
                return reconcile.Result{}, nil
1✔
377
        }
1✔
378

379
        // Add conditions if there are none
380
        init := req.Instance.Status.Conditions == nil
1✔
381
        if init {
2✔
382
                r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "InitHCO", "Initiating the HyperConverged")
1✔
383
                r.setInitialConditions(req)
1✔
384

1✔
385
                req.StatusDirty = true
1✔
386
        }
1✔
387

388
        r.setLabels(req)
1✔
389

1✔
390
        updateStatus(req)
1✔
391

1✔
392
        // in-memory conditions should start off empty. It will only ever hold
1✔
393
        // negative conditions (!Available, Degraded, Progressing)
1✔
394
        req.Conditions = common.NewHcoConditions()
1✔
395

1✔
396
        // Handle finalizers
1✔
397
        if !checkFinalizers(req) {
2✔
398
                if !req.HCOTriggered {
1✔
399
                        // this is just the effect of a delete request created by HCO
×
400
                        // in the previous iteration, ignore it
×
401
                        return reconcile.Result{}, nil
×
402
                }
×
403
                return r.ensureHcoDeleted(req)
1✔
404
        }
405

406
        applyDataImportSchedule(req)
1✔
407

1✔
408
        // If the current version is not updated in CR ,then we're updating. This is also works when updating from
1✔
409
        // an old version, since Status.Versions will be empty.
1✔
410
        knownHcoVersion, _ := GetVersion(&req.Instance.Status, hcoVersionName)
1✔
411

1✔
412
        // detect upgrade mode
1✔
413
        if !r.upgradeMode && !init && knownHcoVersion != r.ownVersion {
2✔
414
                // get into upgrade mode
1✔
415

1✔
416
                r.upgradeMode = true
1✔
417
                r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "UpgradeHCO", "Upgrading the HyperConverged to version "+r.ownVersion)
1✔
418
                req.Logger.Info(fmt.Sprintf("Start upgrading from version %s to version %s", knownHcoVersion, r.ownVersion))
1✔
419
        }
1✔
420

421
        req.SetUpgradeMode(r.upgradeMode)
1✔
422

1✔
423
        if r.upgradeMode {
2✔
424
                if result, err := r.handleUpgrade(req); result != nil {
2✔
425
                        return *result, err
1✔
426
                }
1✔
427
        }
428

429
        return r.EnsureOperandAndComplete(req, init)
1✔
430
}
431

432
func (r *ReconcileHyperConverged) handleUpgrade(req *common.HcoRequest) (*reconcile.Result, error) {
1✔
433
        modified, err := r.migrateBeforeUpgrade(req)
1✔
434
        if err != nil {
2✔
435
                return &reconcile.Result{RequeueAfter: requeueAfter}, err
1✔
436
        }
1✔
437

438
        if modified {
2✔
439
                r.updateConditions(req)
1✔
440
                return &reconcile.Result{RequeueAfter: requeueAfter}, nil
1✔
441
        }
1✔
442
        return nil, nil
1✔
443
}
444

445
func (r *ReconcileHyperConverged) EnsureOperandAndComplete(req *common.HcoRequest, init bool) (reconcile.Result, error) {
1✔
446
        if err := r.operandHandler.Ensure(req); err != nil {
2✔
447
                r.updateConditions(req)
1✔
448
                requeue := time.Duration(0)
1✔
449
                if init {
2✔
450
                        requeue = requeueAfter
1✔
451
                }
1✔
452
                return reconcile.Result{RequeueAfter: requeue}, nil
1✔
453
        }
454

455
        req.Logger.Info("Reconcile complete")
1✔
456

1✔
457
        // Requeue if we just created everything
1✔
458
        if init {
2✔
459
                return reconcile.Result{RequeueAfter: requeueAfter}, nil
1✔
460
        }
1✔
461

462
        r.completeReconciliation(req)
1✔
463

1✔
464
        return reconcile.Result{}, nil
1✔
465
}
466

467
func updateStatus(req *common.HcoRequest) {
1✔
468
        if req.Instance.Generation != req.Instance.Status.ObservedGeneration {
2✔
469
                req.Instance.Status.ObservedGeneration = req.Instance.Generation
1✔
470
                req.StatusDirty = true
1✔
471
        }
1✔
472

473
        if infraHighlyAvailable := nodeinfo.IsInfrastructureHighlyAvailable(); req.Instance.Status.InfrastructureHighlyAvailable == nil ||
1✔
474
                *req.Instance.Status.InfrastructureHighlyAvailable != infraHighlyAvailable {
2✔
475

1✔
476
                if infraHighlyAvailable {
1✔
477
                        req.Logger.Info("infrastructure became highly available")
×
478
                } else {
1✔
479
                        req.Logger.Info("infrastructure became not highly available")
1✔
480
                }
1✔
481

482
                req.Instance.Status.InfrastructureHighlyAvailable = ptr.To(infraHighlyAvailable)
1✔
483
                req.StatusDirty = true
1✔
484
        }
485

486
        if cpArch := nodeinfo.GetControlPlaneArchitectures(); slices.Compare(req.Instance.Status.NodeInfo.ControlPlaneArchitectures, cpArch) != 0 {
1✔
487
                req.Instance.Status.NodeInfo.ControlPlaneArchitectures = cpArch
×
488
                req.StatusDirty = true
×
489
        }
×
490

491
        if workloadsArch := nodeinfo.GetWorkloadsArchitectures(); slices.Compare(req.Instance.Status.NodeInfo.WorkloadsArchitectures, workloadsArch) != 0 {
1✔
492
                req.Instance.Status.NodeInfo.WorkloadsArchitectures = workloadsArch
×
493
                req.StatusDirty = true
×
494
        }
×
495
}
496

497
// getHyperConverged gets the HyperConverged resource from the Kubernetes API.
498
func (r *ReconcileHyperConverged) getHyperConverged(req *common.HcoRequest) (*hcov1beta1.HyperConverged, error) {
1✔
499
        instance := &hcov1beta1.HyperConverged{}
1✔
500
        err := r.client.Get(req.Ctx, req.NamespacedName, instance)
1✔
501

1✔
502
        // Green path first
1✔
503
        if err == nil {
2✔
504
                metrics.SetHCOMetricHyperConvergedExists()
1✔
505
                return instance, nil
1✔
506
        }
1✔
507

508
        // Error path
509
        if apierrors.IsNotFound(err) {
2✔
510
                req.Logger.Info("No HyperConverged resource")
1✔
511
                metrics.SetHCOMetricHyperConvergedNotExists()
1✔
512

1✔
513
                // Request object not found, could have been deleted after reconcile request.
1✔
514
                // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
1✔
515
                // Return and don't requeue
1✔
516
                return nil, nil
1✔
517
        }
1✔
518

519
        // Another error reading the object.
520
        // Just return the error so that the request is requeued.
521
        return nil, err
×
522
}
523

524
// updateHyperConverged updates the HyperConverged resource according to its state in the request.
525
func (r *ReconcileHyperConverged) updateHyperConverged(request *common.HcoRequest) (bool, error) {
1✔
526

1✔
527
        // Since the status subresource is enabled for the HyperConverged kind,
1✔
528
        // we need to update the status and the metadata separately.
1✔
529
        // Moreover, we need to update the status first, in order to prevent a conflict.
1✔
530
        // In addition, metadata and spec changes are removed by status update, but since status update done first, we need
1✔
531
        // to store metadata and spec and recover it after status update
1✔
532

1✔
533
        var spec hcov1beta1.HyperConvergedSpec
1✔
534
        var meta metav1.ObjectMeta
1✔
535
        if request.Dirty {
2✔
536
                request.Instance.Spec.DeepCopyInto(&spec)
1✔
537
                request.Instance.ObjectMeta.DeepCopyInto(&meta)
1✔
538
        }
1✔
539

540
        err := r.updateHyperConvergedStatus(request)
1✔
541
        if err != nil {
2✔
542
                request.Logger.Error(err, "Failed to update HCO Status")
1✔
543
                return false, err
1✔
544
        }
1✔
545

546
        if request.Dirty {
2✔
547
                request.Instance.Annotations = meta.Annotations
1✔
548
                request.Instance.Finalizers = meta.Finalizers
1✔
549
                request.Instance.Labels = meta.Labels
1✔
550
                request.Instance.Spec = spec
1✔
551

1✔
552
                err = r.updateHyperConvergedSpecMetadata(request)
1✔
553
                if err != nil {
2✔
554
                        request.Logger.Error(err, "Failed to update HCO CR")
1✔
555
                        return false, err
1✔
556
                }
1✔
557
                // version update is a two steps process
558
                knownHcoVersion, _ := GetVersion(&request.Instance.Status, hcoVersionName)
1✔
559
                if r.ownVersion != knownHcoVersion && request.StatusDirty {
2✔
560
                        return true, nil
1✔
561
                }
1✔
562
        }
563

564
        return false, nil
1✔
565
}
566

567
// updateHyperConvergedSpecMetadata updates the HyperConverged resource's spec and metadata.
568
func (r *ReconcileHyperConverged) updateHyperConvergedSpecMetadata(request *common.HcoRequest) error {
1✔
569
        if !request.Dirty {
1✔
570
                return nil
×
571
        }
×
572

573
        return r.client.Update(request.Ctx, request.Instance)
1✔
574
}
575

576
// updateHyperConvergedSpecMetadata updates the HyperConverged resource's status (and metadata).
577
func (r *ReconcileHyperConverged) updateHyperConvergedStatus(request *common.HcoRequest) error {
1✔
578
        if !request.StatusDirty {
2✔
579
                return nil
1✔
580
        }
1✔
581

582
        return r.client.Status().Update(request.Ctx, request.Instance)
1✔
583
}
584

585
func (r *ReconcileHyperConverged) validateNamespace(req *common.HcoRequest) bool {
1✔
586
        // Ignore invalid requests
1✔
587
        if !reqresolver.IsTriggeredByHyperConverged(req.NamespacedName) {
2✔
588
                req.Logger.Info("Invalid request", "HyperConverged.Namespace", req.Namespace, "HyperConverged.Name", req.Name)
1✔
589
                hc := reqresolver.GetHyperConvergedNamespacedName()
1✔
590
                req.Conditions.SetStatusCondition(metav1.Condition{
1✔
591
                        Type:               hcov1beta1.ConditionReconcileComplete,
1✔
592
                        Status:             metav1.ConditionFalse,
1✔
593
                        Reason:             invalidRequestReason,
1✔
594
                        Message:            fmt.Sprintf(invalidRequestMessageFormat, hc.Name, hc.Namespace),
1✔
595
                        ObservedGeneration: req.Instance.Generation,
1✔
596
                })
1✔
597
                r.updateConditions(req)
1✔
598
                return false
1✔
599
        }
1✔
600
        return true
1✔
601
}
602

603
func (r *ReconcileHyperConverged) setInitialConditions(req *common.HcoRequest) {
1✔
604
        UpdateVersion(&req.Instance.Status, hcoVersionName, r.ownVersion)
1✔
605

1✔
606
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
607
                Type:               hcov1beta1.ConditionReconcileComplete,
1✔
608
                Status:             metav1.ConditionUnknown, // we just started trying to reconcile
1✔
609
                Reason:             reconcileInit,
1✔
610
                Message:            reconcileInitMessage,
1✔
611
                ObservedGeneration: req.Instance.Generation,
1✔
612
        })
1✔
613
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
614
                Type:               hcov1beta1.ConditionAvailable,
1✔
615
                Status:             metav1.ConditionFalse,
1✔
616
                Reason:             reconcileInit,
1✔
617
                Message:            reconcileInitMessage,
1✔
618
                ObservedGeneration: req.Instance.Generation,
1✔
619
        })
1✔
620
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
621
                Type:               hcov1beta1.ConditionProgressing,
1✔
622
                Status:             metav1.ConditionTrue,
1✔
623
                Reason:             reconcileInit,
1✔
624
                Message:            reconcileInitMessage,
1✔
625
                ObservedGeneration: req.Instance.Generation,
1✔
626
        })
1✔
627
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
628
                Type:               hcov1beta1.ConditionDegraded,
1✔
629
                Status:             metav1.ConditionFalse,
1✔
630
                Reason:             reconcileInit,
1✔
631
                Message:            reconcileInitMessage,
1✔
632
                ObservedGeneration: req.Instance.Generation,
1✔
633
        })
1✔
634
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
635
                Type:               hcov1beta1.ConditionUpgradeable,
1✔
636
                Status:             metav1.ConditionUnknown,
1✔
637
                Reason:             reconcileInit,
1✔
638
                Message:            reconcileInitMessage,
1✔
639
                ObservedGeneration: req.Instance.Generation,
1✔
640
        })
1✔
641

1✔
642
        r.updateConditions(req)
1✔
643
}
1✔
644

645
func (r *ReconcileHyperConverged) ensureHcoDeleted(req *common.HcoRequest) (reconcile.Result, error) {
1✔
646
        err := r.operandHandler.EnsureDeleted(req)
1✔
647
        if err != nil {
1✔
648
                return reconcile.Result{}, err
×
649
        }
×
650

651
        requeue := time.Duration(0)
1✔
652

1✔
653
        // Remove the finalizers
1✔
654
        if idx := slices.Index(req.Instance.Finalizers, FinalizerName); idx >= 0 {
2✔
655
                req.Instance.Finalizers = slices.Delete(req.Instance.Finalizers, idx, idx+1)
1✔
656
                req.Dirty = true
1✔
657
                requeue = requeueAfter
1✔
658
        }
1✔
659

660
        // Need to requeue because finalizer update does not change metadata.generation
661
        return reconcile.Result{RequeueAfter: requeue}, nil
1✔
662
}
663

664
func (r *ReconcileHyperConverged) aggregateComponentConditions(req *common.HcoRequest) bool {
1✔
665
        /*
1✔
666
                See the chart at design/aggregateComponentConditions.svg; The numbers below follows the numbers in the chart
1✔
667
                Here is the PlantUML code for the chart that describes the aggregation of the sub-components conditions.
1✔
668
                Find the PlantURL syntax here: https://plantuml.com/activity-diagram-beta
1✔
669

1✔
670
                @startuml ../../../design/aggregateComponentConditions.svg
1✔
671
                title Aggregate Component Conditions
1✔
672

1✔
673
                start
1✔
674
                  #springgreen:Set **ReconcileComplete = True**]
1✔
675
                  !x=1
1✔
676
                if ((x) [Degraded = True] Exists) then
1✔
677
                  !x=x+1
1✔
678
                  #orangered:<<implicit>>\n**Degraded = True** /
1✔
679
                  -[#orangered]-> yes;
1✔
680
                  if ((x) [Progressing = True] Exists) then
1✔
681
                        !x=x+1
1✔
682
                        -[#springgreen]-> no;
1✔
683
                        #springgreen:(x) Set **Progressing = False**]
1✔
684
                        !x=x+1
1✔
685
                  else
1✔
686
                        -[#orangered]-> yes;
1✔
687
                        #orangered:<<implicit>>\n**Progressing = True** /
1✔
688
                  endif
1✔
689
                  if ((x) [Upgradable = False] Exists) then
1✔
690
                        !x=x+1
1✔
691
                        -[#springgreen]-> no;
1✔
692
                        #orangered:(x) Set **Upgradable = False**]
1✔
693
                        !x=x+1
1✔
694
                  else
1✔
695
                        -[#orangered]-> yes;
1✔
696
                        #orangered:<<implicit>>\n**Upgradable = False** /
1✔
697
                  endif
1✔
698
                  if ((x) [Available = False] Exists) then
1✔
699
                        !x=x+1
1✔
700
                        -[#springgreen]-> no;
1✔
701
                        #orangered:(x) Set **Available = False**]
1✔
702
                        !x=x+1
1✔
703
                  else
1✔
704
                        -[#orangered]-> yes;
1✔
705
                        #orangered:<<implicit>>\n**Available = False** /
1✔
706
                  endif
1✔
707
                else
1✔
708
                  -[#springgreen]-> no;
1✔
709
                  #springgreen:(x) Set **Degraded = False**]
1✔
710
                  !x=x+1
1✔
711
                  if ((x) [Progressing = True] Exists) then
1✔
712
                        !x=x+1
1✔
713
                        -[#orangered]-> yes;
1✔
714
                        #orangered:<<implicit>>\n**Progressing = True** /
1✔
715
                        if ((x) [Upgradable = False] Exists) then
1✔
716
                          !x=x+1
1✔
717
                          -[#springgreen]-> no;
1✔
718
                          #orangered:(x) Set **Upgradable = False**]
1✔
719
                          !x=x+1
1✔
720
                        else
1✔
721
                          -[#orangered]-> yes;
1✔
722
                          #orangered:<<implicit>>\n**Upgradable = False** /
1✔
723
                        endif
1✔
724
                        if ((x) [Available = False] Exists) then
1✔
725
                          !x=x+1
1✔
726
                          -[#springgreen]-> no;
1✔
727
                          #springgreen:(x) Set **Available = True**]
1✔
728
                          !x=x+1
1✔
729
                        else
1✔
730
                          #orangered:<<implicit>>\n**Available = False** /
1✔
731
                          -[#orangered]-> yes;
1✔
732
                        endif
1✔
733
                  else
1✔
734
                        -[#springgreen]-> no;
1✔
735
                        #springgreen:(x) Set **Progressing = False**]
1✔
736
                        !x=x+1
1✔
737
                        if ((x) [Upgradable = False] Exists) then
1✔
738
                          !x=x+1
1✔
739
                          -[#springgreen]-> no;
1✔
740
                          #springgreen:(x) Set **Upgradable = True**]
1✔
741
                          !x=x+1
1✔
742
                        else
1✔
743
                        #orangered:<<implicit>>\n**Upgradable = False** /
1✔
744
                          -[#orangered]-> yes;
1✔
745
                        endif
1✔
746
                        if ((x) [Available = False] Exists) then
1✔
747
                          !x=x+1
1✔
748
                          -[#springgreen]-> no;
1✔
749
                          #springgreen:(x) Set **Available = True**]
1✔
750
                          !x=x+1
1✔
751
                        else
1✔
752
                          -[#orangered]-> yes;
1✔
753
                          #orangered:<<implicit>>\n**Available = False** /
1✔
754
                        endif
1✔
755
                  endif
1✔
756
                endif
1✔
757
                end
1✔
758
                @enduml
1✔
759
        */
1✔
760

1✔
761
        /*
1✔
762
                    If any component operator reports negatively we want to write that to
1✔
763
                        the instance while preserving it's lastTransitionTime.
1✔
764
                        For example, consider the KubeVirt resource has the Available condition
1✔
765
                        type with type "False". When reconciling KubeVirt's resource we would
1✔
766
                        add it to the in-memory representation of HCO's conditions (r.conditions)
1✔
767
                        and here we are simply writing it back to the server.
1✔
768
                        One shortcoming is that only one failure of a particular condition can be
1✔
769
                        captured at one time (ie. if KubeVirt and CDI are both reporting !Available,
1✔
770
                    you will only see CDI as it updates last).
1✔
771
        */
1✔
772
        allComponentsAreUp := req.Conditions.IsEmpty()
1✔
773
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
774
                Type:               hcov1beta1.ConditionReconcileComplete,
1✔
775
                Status:             metav1.ConditionTrue,
1✔
776
                Reason:             reconcileCompleted,
1✔
777
                Message:            reconcileCompletedMessage,
1✔
778
                ObservedGeneration: req.Instance.Generation,
1✔
779
        })
1✔
780

1✔
781
        if req.Conditions.HasCondition(hcov1beta1.ConditionDegraded) { // (#chart 1)
2✔
782

1✔
783
                req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 2,3)
1✔
784
                        Type:               hcov1beta1.ConditionProgressing,
1✔
785
                        Status:             metav1.ConditionFalse,
1✔
786
                        Reason:             reconcileCompleted,
1✔
787
                        Message:            reconcileCompletedMessage,
1✔
788
                        ObservedGeneration: req.Instance.Generation,
1✔
789
                })
1✔
790

1✔
791
                req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 4,5)
1✔
792
                        Type:               hcov1beta1.ConditionUpgradeable,
1✔
793
                        Status:             metav1.ConditionFalse,
1✔
794
                        Reason:             commonDegradedReason,
1✔
795
                        Message:            "HCO is not Upgradeable due to degraded components",
1✔
796
                        ObservedGeneration: req.Instance.Generation,
1✔
797
                })
1✔
798

1✔
799
                req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 6,7)
1✔
800
                        Type:               hcov1beta1.ConditionAvailable,
1✔
801
                        Status:             metav1.ConditionFalse,
1✔
802
                        Reason:             commonDegradedReason,
1✔
803
                        Message:            "HCO is not available due to degraded components",
1✔
804
                        ObservedGeneration: req.Instance.Generation,
1✔
805
                })
1✔
806

1✔
807
        } else {
2✔
808

1✔
809
                // Degraded is not found. add it.
1✔
810
                req.Conditions.SetStatusCondition(metav1.Condition{ // (#chart 8)
1✔
811
                        Type:               hcov1beta1.ConditionDegraded,
1✔
812
                        Status:             metav1.ConditionFalse,
1✔
813
                        Reason:             reconcileCompleted,
1✔
814
                        Message:            reconcileCompletedMessage,
1✔
815
                        ObservedGeneration: req.Instance.Generation,
1✔
816
                })
1✔
817

1✔
818
                if req.Conditions.HasCondition(hcov1beta1.ConditionProgressing) { // (#chart 9)
2✔
819

1✔
820
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 10,11)
1✔
821
                                Type:               hcov1beta1.ConditionUpgradeable,
1✔
822
                                Status:             metav1.ConditionFalse,
1✔
823
                                Reason:             commonProgressingReason,
1✔
824
                                Message:            "HCO is not Upgradeable due to progressing components",
1✔
825
                                ObservedGeneration: req.Instance.Generation,
1✔
826
                        })
1✔
827

1✔
828
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 12,13)
1✔
829
                                Type:               hcov1beta1.ConditionAvailable,
1✔
830
                                Status:             metav1.ConditionTrue,
1✔
831
                                Reason:             reconcileCompleted,
1✔
832
                                Message:            reconcileCompletedMessage,
1✔
833
                                ObservedGeneration: req.Instance.Generation,
1✔
834
                        })
1✔
835

1✔
836
                } else {
2✔
837

1✔
838
                        req.Conditions.SetStatusCondition(metav1.Condition{ // (#chart 14)
1✔
839
                                Type:               hcov1beta1.ConditionProgressing,
1✔
840
                                Status:             metav1.ConditionFalse,
1✔
841
                                Reason:             reconcileCompleted,
1✔
842
                                Message:            reconcileCompletedMessage,
1✔
843
                                ObservedGeneration: req.Instance.Generation,
1✔
844
                        })
1✔
845

1✔
846
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 15,16)
1✔
847
                                Type:               hcov1beta1.ConditionUpgradeable,
1✔
848
                                Status:             metav1.ConditionTrue,
1✔
849
                                Reason:             reconcileCompleted,
1✔
850
                                Message:            reconcileCompletedMessage,
1✔
851
                                ObservedGeneration: req.Instance.Generation,
1✔
852
                        })
1✔
853

1✔
854
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 17,18)
1✔
855
                                Type:               hcov1beta1.ConditionAvailable,
1✔
856
                                Status:             metav1.ConditionTrue,
1✔
857
                                Reason:             reconcileCompleted,
1✔
858
                                Message:            reconcileCompletedMessage,
1✔
859
                                ObservedGeneration: req.Instance.Generation,
1✔
860
                        })
1✔
861

1✔
862
                }
1✔
863
        }
864

865
        return allComponentsAreUp
1✔
866
}
867

868
func (r *ReconcileHyperConverged) completeReconciliation(req *common.HcoRequest) {
1✔
869
        allComponentsAreUp := r.aggregateComponentConditions(req)
1✔
870

1✔
871
        hcoReady := false
1✔
872

1✔
873
        if allComponentsAreUp {
2✔
874
                req.Logger.Info("No component operator reported negatively")
1✔
875

1✔
876
                // if in upgrade mode, and all the components are upgraded, and nothing pending to be written - upgrade is completed
1✔
877
                if r.upgradeMode && req.ComponentUpgradeInProgress && !req.Dirty {
2✔
878
                        // update the new version only when upgrade is completed
1✔
879
                        UpdateVersion(&req.Instance.Status, hcoVersionName, r.ownVersion)
1✔
880
                        req.StatusDirty = true
1✔
881

1✔
882
                        r.upgradeMode = false
1✔
883
                        req.ComponentUpgradeInProgress = false
1✔
884
                        req.Logger.Info(fmt.Sprintf("Successfully upgraded to version %s", r.ownVersion))
1✔
885
                        r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "UpgradeHCO", fmt.Sprintf("Successfully upgraded to version %s", r.ownVersion))
1✔
886
                }
1✔
887

888
                // If not in upgrade mode, then we're ready, because all the operators reported positive conditions.
889
                // if upgrade was done successfully, r.upgradeMode is already false here.
890
                hcoReady = !r.upgradeMode
1✔
891
        }
892

893
        if r.upgradeMode {
2✔
894
                // override the Progressing condition during upgrade
1✔
895
                req.Conditions.SetStatusCondition(metav1.Condition{
1✔
896
                        Type:               hcov1beta1.ConditionProgressing,
1✔
897
                        Status:             metav1.ConditionTrue,
1✔
898
                        Reason:             "HCOUpgrading",
1✔
899
                        Message:            "HCO is now upgrading to version " + r.ownVersion,
1✔
900
                        ObservedGeneration: req.Instance.Generation,
1✔
901
                })
1✔
902
        }
1✔
903

904
        // check if HCO was available before this reconcile loop
905
        hcoWasAvailable := apimetav1.IsStatusConditionTrue(req.Instance.Status.Conditions, hcov1beta1.ConditionAvailable) &&
1✔
906
                apimetav1.IsStatusConditionFalse(req.Instance.Status.Conditions, hcov1beta1.ConditionProgressing)
1✔
907

1✔
908
        if hcoReady {
2✔
909
                // If no operator whose conditions we are watching reports an error, then it is safe
1✔
910
                // to set readiness.
1✔
911
                if !hcoWasAvailable { // only when become available
2✔
912
                        r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "ReconcileHCO", "HCO Reconcile completed successfully")
1✔
913
                }
1✔
914
        } else {
1✔
915
                // If for any reason we marked ourselves !upgradeable...then unset readiness
1✔
916
                if !r.upgradeMode && hcoWasAvailable { // only when become not ready
1✔
917
                        r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeWarning, "ReconcileHCO", "Not all the operators are ready")
×
918
                }
×
919
        }
920

921
        r.updateConditions(req)
1✔
922
}
923

924
// This function is used to exit from the reconcile function, updating the conditions and returns the reconcile result
925
func (r *ReconcileHyperConverged) updateConditions(req *common.HcoRequest) {
1✔
926
        conditions := slices.Clone(req.Instance.Status.Conditions)
1✔
927

1✔
928
        for _, condType := range common.HcoConditionTypes {
2✔
929
                cond, found := req.Conditions[condType]
1✔
930
                if !found {
2✔
931
                        cond = metav1.Condition{
1✔
932
                                Type:               condType,
1✔
933
                                Status:             metav1.ConditionUnknown,
1✔
934
                                Message:            "Unknown Status",
1✔
935
                                Reason:             "StatusUnknown",
1✔
936
                                ObservedGeneration: req.Instance.Generation,
1✔
937
                        }
1✔
938
                }
1✔
939

940
                apimetav1.SetStatusCondition(&conditions, cond)
1✔
941
        }
942

943
        // Detect a "TaintedConfiguration" state, and raise a corresponding event
944
        r.detectTaintedConfiguration(req, &conditions)
1✔
945

1✔
946
        if !reflect.DeepEqual(conditions, req.Instance.Status.Conditions) {
2✔
947
                req.Instance.Status.Conditions = conditions
1✔
948
                req.StatusDirty = true
1✔
949
        }
1✔
950

951
        systemHealthStatus := r.getSystemHealthStatus(req.Conditions)
1✔
952

1✔
953
        if systemHealthStatus != req.Instance.Status.SystemHealthStatus {
2✔
954
                req.Instance.Status.SystemHealthStatus = systemHealthStatus
1✔
955
                req.StatusDirty = true
1✔
956
        }
1✔
957

958
        metrics.SetHCOMetricSystemHealthStatus(getNumericalHealthStatus(systemHealthStatus))
1✔
959
}
960

961
func (r *ReconcileHyperConverged) setLabels(req *common.HcoRequest) {
1✔
962
        if req.Instance.Labels == nil {
2✔
963
                req.Instance.Labels = map[string]string{}
1✔
964
        }
1✔
965
        if req.Instance.Labels[hcoutil.AppLabel] == "" {
2✔
966
                req.Instance.Labels[hcoutil.AppLabel] = req.Instance.Name
1✔
967
                req.Dirty = true
1✔
968
        }
1✔
969
}
970

971
func (r *ReconcileHyperConverged) detectTaintedConfiguration(req *common.HcoRequest, conditions *[]metav1.Condition) {
1✔
972
        conditionExists := apimetav1.IsStatusConditionTrue(req.Instance.Status.Conditions, hcov1beta1.ConditionTaintedConfiguration)
1✔
973

1✔
974
        // A tainted configuration state is indicated by the
1✔
975
        // presence of at least one of the JSON Patch annotations
1✔
976
        tainted := false
1✔
977
        for _, jpa := range JSONPatchAnnotationNames {
2✔
978
                NumOfChanges := 0
1✔
979
                jsonPatch, exists := req.Instance.Annotations[jpa]
1✔
980
                if exists {
2✔
981
                        if NumOfChanges = getNumOfChangesJSONPatch(jsonPatch); NumOfChanges > 0 {
2✔
982
                                tainted = true
1✔
983
                        }
1✔
984
                }
985
                metrics.SetUnsafeModificationCount(NumOfChanges, jpa)
1✔
986
        }
987

988
        if tainted {
2✔
989
                apimetav1.SetStatusCondition(conditions, metav1.Condition{
1✔
990
                        Type:               hcov1beta1.ConditionTaintedConfiguration,
1✔
991
                        Status:             metav1.ConditionTrue,
1✔
992
                        Reason:             taintedConfigurationReason,
1✔
993
                        Message:            taintedConfigurationMessage,
1✔
994
                        ObservedGeneration: req.Instance.Generation,
1✔
995
                })
1✔
996

1✔
997
                if !conditionExists {
2✔
998
                        // Only log at "first occurrence" of detection
1✔
999
                        req.Logger.Info("Detected tainted configuration state for HCO")
1✔
1000
                }
1✔
1001
        } else { // !tainted
1✔
1002

1✔
1003
                // For the sake of keeping the JSONPatch backdoor in low profile,
1✔
1004
                // we just remove the condition instead of False'ing it.
1✔
1005
                if conditionExists {
2✔
1006
                        apimetav1.RemoveStatusCondition(conditions, hcov1beta1.ConditionTaintedConfiguration)
1✔
1007

1✔
1008
                        req.Logger.Info("Detected untainted configuration state for HCO")
1✔
1009
                }
1✔
1010
        }
1011
}
1012

1013
func (r *ReconcileHyperConverged) getSystemHealthStatus(conditions common.HcoConditions) string {
1✔
1014
        if isSystemHealthStatusError(conditions) {
2✔
1015
                return systemHealthStatusError
1✔
1016
        }
1✔
1017

1018
        if isSystemHealthStatusWarning(conditions) {
2✔
1019
                return systemHealthStatusWarning
1✔
1020
        }
1✔
1021

1022
        return systemHealthStatusHealthy
1✔
1023
}
1024

1025
func isSystemHealthStatusError(conditions common.HcoConditions) bool {
1✔
1026
        return !conditions.IsStatusConditionTrue(hcov1beta1.ConditionAvailable) || conditions.IsStatusConditionTrue(hcov1beta1.ConditionDegraded)
1✔
1027
}
1✔
1028

1029
func isSystemHealthStatusWarning(conditions common.HcoConditions) bool {
1✔
1030
        return !conditions.IsStatusConditionTrue(hcov1beta1.ConditionReconcileComplete) || conditions.IsStatusConditionTrue(hcov1beta1.ConditionProgressing)
1✔
1031
}
1✔
1032

1033
func getNumOfChangesJSONPatch(jsonPatch string) int {
1✔
1034
        patches, err := jsonpatch.DecodePatch([]byte(jsonPatch))
1✔
1035
        if err != nil {
2✔
1036
                return 0
1✔
1037
        }
1✔
1038
        return len(patches)
1✔
1039
}
1040

1041
func getNumericalHealthStatus(status string) float64 {
1✔
1042
        healthStatusCodes := map[string]float64{
1✔
1043
                systemHealthStatusHealthy: metrics.SystemHealthStatusHealthy,
1✔
1044
                systemHealthStatusWarning: metrics.SystemHealthStatusWarning,
1✔
1045
                systemHealthStatusError:   metrics.SystemHealthStatusError,
1✔
1046
        }
1✔
1047

1✔
1048
        return healthStatusCodes[status]
1✔
1049
}
1✔
1050

1051
func (r *ReconcileHyperConverged) firstLoopInitialization(request *common.HcoRequest) {
1✔
1052
        // Initialize operand handler.
1✔
1053
        r.operandHandler.FirstUseInitiation(r.scheme, hcoutil.GetClusterInfo(), request.Instance)
1✔
1054

1✔
1055
        // Avoid re-initializing.
1✔
1056
        r.firstLoop = false
1✔
1057
}
1✔
1058

1059
func (r *ReconcileHyperConverged) setOperatorUpgradeableStatus(request *common.HcoRequest) error {
1✔
1060
        if hcoutil.GetClusterInfo().IsManagedByOLM() {
2✔
1061

1✔
1062
                upgradeable := !r.upgradeMode && request.Upgradeable
1✔
1063

1✔
1064
                request.Logger.Info("setting the Upgradeable operator condition", requestedStatusKey, upgradeable)
1✔
1065

1✔
1066
                msg := hcoutil.UpgradeableAllowMessage
1✔
1067
                status := metav1.ConditionTrue
1✔
1068
                reason := hcoutil.UpgradeableAllowReason
1✔
1069

1✔
1070
                if !upgradeable {
2✔
1071
                        status = metav1.ConditionFalse
1✔
1072

1✔
1073
                        if r.upgradeMode {
2✔
1074
                                msg = hcoutil.UpgradeableUpgradingMessage + r.ownVersion
1✔
1075
                                reason = hcoutil.UpgradeableUpgradingReason
1✔
1076
                        } else {
2✔
1077
                                condition, found := request.Conditions.GetCondition(hcov1beta1.ConditionUpgradeable)
1✔
1078
                                if found && condition.Status == metav1.ConditionFalse {
2✔
1079
                                        reason = condition.Reason
1✔
1080
                                        msg = condition.Message
1✔
1081
                                }
1✔
1082
                        }
1083
                }
1084

1085
                if err := r.upgradeableCondition.Set(request.Ctx, status, reason, msg); err != nil {
1✔
1086
                        request.Logger.Error(err, "can't set the Upgradeable operator condition", requestedStatusKey, upgradeable)
×
1087
                        return err
×
1088
                }
×
1089

1090
        }
1091

1092
        return nil
1✔
1093
}
1094

1095
func (r *ReconcileHyperConverged) migrateBeforeUpgrade(req *common.HcoRequest) (bool, error) {
1✔
1096
        upgradePatched, err := r.applyUpgradePatches(req)
1✔
1097
        if err != nil {
2✔
1098
                return false, err
1✔
1099
        }
1✔
1100

1101
        removeOldQuickStartGuides(req, r.client, r.operandHandler.GetQuickStartNames())
1✔
1102
        removeOldImageStream(req, r.client, r.operandHandler.GetImageStreamNames())
1✔
1103

1✔
1104
        return upgradePatched, nil
1✔
1105
}
1106

1107
func (r *ReconcileHyperConverged) applyUpgradePatches(req *common.HcoRequest) (bool, error) {
1✔
1108
        modified := false
1✔
1109

1✔
1110
        knownHcoVersion, _ := GetVersion(&req.Instance.Status, hcoVersionName)
1✔
1111
        if knownHcoVersion == "" {
2✔
1112
                knownHcoVersion = "0.0.0"
1✔
1113
        }
1✔
1114
        knownHcoSV, err := semver.ParseTolerant(knownHcoVersion)
1✔
1115
        if err != nil {
2✔
1116
                req.Logger.Error(err, "Error!")
1✔
1117
                return false, err
1✔
1118
        }
1✔
1119

1120
        tmpInstance, err := upgradepatch.ApplyUpgradePatch(req.Logger, req.Instance, knownHcoSV)
1✔
1121
        if err != nil {
1✔
1122
                return false, err
×
1123
        }
×
1124

1125
        for _, p := range upgradepatch.GetObjectsToBeRemoved() {
2✔
1126
                removed, err := r.removeLeftover(req, knownHcoSV, p)
1✔
1127
                if err != nil {
1✔
1128
                        return removed, err
×
1129
                }
×
1130
        }
1131

1132
        if !reflect.DeepEqual(tmpInstance.Spec, req.Instance.Spec) {
2✔
1133
                req.Logger.Info("updating HCO spec as a result of upgrade patches")
1✔
1134
                tmpInstance.Spec.DeepCopyInto(&req.Instance.Spec)
1✔
1135
                modified = true
1✔
1136
                req.Dirty = true
1✔
1137
        }
1✔
1138

1139
        return modified, nil
1✔
1140
}
1141

1142
func (r *ReconcileHyperConverged) removeLeftover(req *common.HcoRequest, knownHcoSV semver.Version, p upgradepatch.ObjectToBeRemoved) (bool, error) {
1✔
1143
        if p.IsAffectedRange(knownHcoSV) {
2✔
1144
                removeRelatedObject(req, r.client, p.GroupVersionKind, p.ObjectKey)
1✔
1145
                u := &unstructured.Unstructured{}
1✔
1146
                u.SetGroupVersionKind(p.GroupVersionKind)
1✔
1147
                gerr := r.client.Get(req.Ctx, p.ObjectKey, u)
1✔
1148
                if gerr != nil {
2✔
1149
                        if apierrors.IsNotFound(gerr) {
2✔
1150
                                return false, nil
1✔
1151
                        }
1✔
1152

1153
                        req.Logger.Error(gerr, "failed looking for leftovers", "objectToBeRemoved", p)
×
1154
                        return false, gerr
×
1155
                }
1156
                return r.deleteObj(req, u, false)
1✔
1157

1158
        }
1159
        return false, nil
1✔
1160
}
1161

1162
func (r *ReconcileHyperConverged) deleteObj(req *common.HcoRequest, obj client.Object, protectNonHCOObjects bool) (bool, error) {
1✔
1163
        removed, err := hcoutil.EnsureDeleted(req.Ctx, r.client, obj, req.Instance.Name, req.Logger, false, false, protectNonHCOObjects)
1✔
1164

1✔
1165
        if err != nil {
1✔
1166
                req.Logger.Error(
×
1167
                        err,
×
1168
                        fmt.Sprintf("failed to delete %s", obj.GetObjectKind().GroupVersionKind().Kind),
×
1169
                        "name",
×
1170
                        obj.GetName(),
×
1171
                )
×
1172

×
1173
                return removed, err
×
1174
        }
×
1175

1176
        if removed {
2✔
1177
                r.eventEmitter.EmitEvent(
1✔
1178
                        req.Instance, corev1.EventTypeNormal, "Killing",
1✔
1179
                        fmt.Sprintf("Removed %s %s", obj.GetName(), obj.GetObjectKind().GroupVersionKind().Kind),
1✔
1180
                )
1✔
1181
        }
1✔
1182

1183
        return removed, nil
1✔
1184
}
1185

1186
func removeRelatedObject(req *common.HcoRequest, cl client.Client, gvk schema.GroupVersionKind, objectKey types.NamespacedName) {
1✔
1187
        refs := make([]corev1.ObjectReference, 0, len(req.Instance.Status.RelatedObjects))
1✔
1188
        foundRO := false
1✔
1189

1✔
1190
        crdGVK := schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}
1✔
1191

1✔
1192
        for _, obj := range req.Instance.Status.RelatedObjects {
2✔
1193
                apiVersion, kind := gvk.ToAPIVersionAndKind()
1✔
1194
                if obj.APIVersion == apiVersion && obj.Kind == kind && obj.Namespace == objectKey.Namespace && obj.Name == objectKey.Name {
2✔
1195
                        foundRO = true
1✔
1196
                        req.Logger.Info("Removed relatedObject entry for", "gvk", gvk, "objectKey", objectKey)
1✔
1197
                        continue
1✔
1198
                }
1199
                if reflect.DeepEqual(gvk, crdGVK) {
2✔
1200
                        mapping, err := cl.RESTMapper().RESTMapping(obj.GroupVersionKind().GroupKind(), obj.GroupVersionKind().Version)
1✔
1201
                        if err == nil && mapping != nil && mapping.Resource.GroupResource().String() == objectKey.Name {
2✔
1202
                                foundRO = true
1✔
1203
                                req.Logger.Info("Removed relatedObject on CRD removal for", "gvk", gvk, "objectKey", objectKey)
1✔
1204
                                continue
1✔
1205
                        }
1206
                }
1207
                refs = append(refs, obj)
1✔
1208
        }
1209

1210
        if foundRO {
2✔
1211
                req.Instance.Status.RelatedObjects = refs
1✔
1212
                req.StatusDirty = true
1✔
1213
        }
1✔
1214

1215
}
1216

1217
func checkFinalizers(req *common.HcoRequest) bool {
1✔
1218
        if req.Instance.DeletionTimestamp.IsZero() {
2✔
1219
                // Add the finalizer if it's not there
1✔
1220
                if !slices.Contains(req.Instance.Finalizers, FinalizerName) {
2✔
1221
                        req.Logger.Info("setting a finalizer (with fully qualified name)")
1✔
1222
                        req.Instance.Finalizers = append(req.Instance.Finalizers, FinalizerName)
1✔
1223
                        req.Dirty = true
1✔
1224
                }
1✔
1225
                return true
1✔
1226
        }
1227
        return false
1✔
1228
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc