• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / hyperconverged-cluster-operator / 15967738050

30 Jun 2025 08:23AM UTC coverage: 75.41% (-0.2%) from 75.563%
15967738050

Pull #3594

github

web-flow
Merge 0dead85ad into fd650c52c
Pull Request #3594: Monitor the cluster architectures

234 of 341 new or added lines in 8 files covered. (68.62%)

3 existing lines in 2 files now uncovered.

6581 of 8727 relevant lines covered (75.41%)

1.5 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.18
/controllers/hyperconverged/hyperconverged_controller.go
1
package hyperconverged
2

3
import (
4
        "cmp"
5
        "context"
6
        "fmt"
7
        "os"
8
        "reflect"
9
        "slices"
10
        "time"
11

12
        "github.com/blang/semver/v4"
13
        jsonpatch "github.com/evanphx/json-patch/v5"
14
        "github.com/go-logr/logr"
15
        openshiftconfigv1 "github.com/openshift/api/config/v1"
16
        consolev1 "github.com/openshift/api/console/v1"
17
        imagev1 "github.com/openshift/api/image/v1"
18
        routev1 "github.com/openshift/api/route/v1"
19
        operatorhandler "github.com/operator-framework/operator-lib/handler"
20
        monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
21
        appsv1 "k8s.io/api/apps/v1"
22
        corev1 "k8s.io/api/core/v1"
23
        rbacv1 "k8s.io/api/rbac/v1"
24
        schedulingv1 "k8s.io/api/scheduling/v1"
25
        apierrors "k8s.io/apimachinery/pkg/api/errors"
26
        apimetav1 "k8s.io/apimachinery/pkg/api/meta"
27
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28
        "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
29
        "k8s.io/apimachinery/pkg/runtime"
30
        "k8s.io/apimachinery/pkg/runtime/schema"
31
        "k8s.io/apimachinery/pkg/types"
32
        "k8s.io/utils/ptr"
33
        "sigs.k8s.io/controller-runtime/pkg/client"
34
        "sigs.k8s.io/controller-runtime/pkg/controller"
35
        "sigs.k8s.io/controller-runtime/pkg/event"
36
        "sigs.k8s.io/controller-runtime/pkg/handler"
37
        logf "sigs.k8s.io/controller-runtime/pkg/log"
38
        "sigs.k8s.io/controller-runtime/pkg/manager"
39
        "sigs.k8s.io/controller-runtime/pkg/predicate"
40
        "sigs.k8s.io/controller-runtime/pkg/reconcile"
41
        "sigs.k8s.io/controller-runtime/pkg/source"
42

43
        networkaddonsv1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1"
44
        kubevirtcorev1 "kubevirt.io/api/core/v1"
45
        aaqv1alpha1 "kubevirt.io/application-aware-quota/staging/src/kubevirt.io/application-aware-quota-api/pkg/apis/core/v1alpha1"
46
        cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
47
        sspv1beta3 "kubevirt.io/ssp-operator/api/v1beta3"
48

49
        hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/api/v1beta1"
50
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/alerts"
51
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/common"
52
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/operands"
53
        "github.com/kubevirt/hyperconverged-cluster-operator/controllers/reqresolver"
54
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/monitoring/hyperconverged/metrics"
55
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/nodeinfo"
56
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/upgradepatch"
57
        hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
58
        "github.com/kubevirt/hyperconverged-cluster-operator/version"
59
)
60

61
var (
62
        log = logf.Log.WithName("controller_hyperconverged")
63
)
64

65
const (
66
        // We cannot set owner reference of cluster-wide resources to namespaced HyperConverged object. Therefore,
67
        // use finalizers to manage the cleanup.
68
        FinalizerName = "kubevirt.io/hyperconverged"
69

70
        // OpenshiftNamespace is for resources that belong in the openshift namespace
71

72
        reconcileInit               = "Init"
73
        reconcileInitMessage        = "Initializing HyperConverged cluster"
74
        reconcileCompleted          = "ReconcileCompleted"
75
        reconcileCompletedMessage   = "Reconcile completed successfully"
76
        invalidRequestReason        = "InvalidRequest"
77
        invalidRequestMessageFormat = "Request does not match expected name (%v) and namespace (%v)"
78
        commonDegradedReason        = "HCODegraded"
79
        commonProgressingReason     = "HCOProgressing"
80
        taintedConfigurationReason  = "UnsupportedFeatureAnnotation"
81
        taintedConfigurationMessage = "Unsupported feature was activated via an HCO annotation"
82
        systemHealthStatusHealthy   = "healthy"
83
        systemHealthStatusWarning   = "warning"
84
        systemHealthStatusError     = "error"
85

86
        hcoVersionName = "operator"
87

88
        requestedStatusKey = "requested status"
89

90
        requeueAfter = time.Millisecond * 100
91
)
92

93
// JSONPatchAnnotationNames - annotations used to patch operand CRs with unsupported/unofficial/hidden features.
94
// The presence of any of these annotations raises the hcov1beta1.ConditionTaintedConfiguration condition.
95
var JSONPatchAnnotationNames = []string{
96
        common.JSONPatchKVAnnotationName,
97
        common.JSONPatchCDIAnnotationName,
98
        common.JSONPatchCNAOAnnotationName,
99
        common.JSONPatchSSPAnnotationName,
100
}
101

102
// RegisterReconciler creates a new HyperConverged Reconciler and registers it into manager.
103
func RegisterReconciler(mgr manager.Manager,
104
        ci hcoutil.ClusterInfo,
105
        upgradeableCond hcoutil.Condition,
106
        ingressEventCh <-chan event.GenericEvent,
NEW
107
        nodeEventChannel <-chan event.GenericEvent) error {
×
NEW
108

×
NEW
109
        return add(mgr, newReconciler(mgr, ci, upgradeableCond), ci, ingressEventCh, nodeEventChannel)
×
UNCOV
110
}
×
111

112
// newReconciler returns a new reconcile.Reconciler
113
func newReconciler(mgr manager.Manager, ci hcoutil.ClusterInfo, upgradeableCond hcoutil.Condition) reconcile.Reconciler {
×
114

×
115
        ownVersion := cmp.Or(os.Getenv(hcoutil.HcoKvIoVersionName), version.Version)
×
116

×
117
        r := &ReconcileHyperConverged{
×
118
                client:               mgr.GetClient(),
×
119
                scheme:               mgr.GetScheme(),
×
120
                operandHandler:       operands.NewOperandHandler(mgr.GetClient(), mgr.GetScheme(), ci, hcoutil.GetEventEmitter()),
×
121
                upgradeMode:          false,
×
122
                ownVersion:           ownVersion,
×
123
                eventEmitter:         hcoutil.GetEventEmitter(),
×
124
                firstLoop:            true,
×
125
                upgradeableCondition: upgradeableCond,
×
126
        }
×
127

×
128
        if ci.IsMonitoringAvailable() {
×
129
                r.monitoringReconciler = alerts.NewMonitoringReconciler(ci, r.client, hcoutil.GetEventEmitter(), r.scheme)
×
130
        }
×
131

132
        return r
×
133
}
134

135
// newCRDremover returns a new CRDRemover
NEW
136
func add(mgr manager.Manager, r reconcile.Reconciler, ci hcoutil.ClusterInfo, ingressEventCh <-chan event.GenericEvent, nodeEventChannel <-chan event.GenericEvent) error {
×
137
        // Create a new controller
×
138
        c, err := controller.New("hyperconverged-controller", mgr, controller.Options{Reconciler: r})
×
139
        if err != nil {
×
140
                return err
×
141
        }
×
142

143
        // Watch for changes to primary resource HyperConverged
144
        err = c.Watch(
×
145
                source.Kind(
×
146
                        mgr.GetCache(), client.Object(&hcov1beta1.HyperConverged{}),
×
147
                        &operatorhandler.InstrumentedEnqueueRequestForObject[client.Object]{},
×
148
                        predicate.Or[client.Object](predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{},
×
149
                                predicate.ResourceVersionChangedPredicate{}),
×
150
                ))
×
151
        if err != nil {
×
152
                return err
×
153
        }
×
154

155
        // To limit the memory usage, the controller manager got instantiated with a custom cache
156
        // that is watching only a specific set of objects with selectors.
157
        // When a new object got added here, it has also to be added to the custom cache
158
        // managed by getNewManagerCache()
159
        secondaryResources := []client.Object{
×
160
                &kubevirtcorev1.KubeVirt{},
×
161
                &cdiv1beta1.CDI{},
×
162
                &networkaddonsv1.NetworkAddonsConfig{},
×
163
                &aaqv1alpha1.AAQ{},
×
164
                &schedulingv1.PriorityClass{},
×
165
                &corev1.ConfigMap{},
×
166
                &corev1.Service{},
×
167
                &rbacv1.Role{},
×
168
                &rbacv1.RoleBinding{},
×
169
        }
×
170
        if ci.IsMonitoringAvailable() {
×
171
                secondaryResources = append(secondaryResources, []client.Object{
×
172
                        &monitoringv1.ServiceMonitor{},
×
173
                        &monitoringv1.PrometheusRule{},
×
174
                }...)
×
175
        }
×
176
        if ci.IsOpenshift() {
×
177
                secondaryResources = append(secondaryResources, []client.Object{
×
178
                        &sspv1beta3.SSP{},
×
179
                        &corev1.Service{},
×
180
                        &routev1.Route{},
×
181
                        &consolev1.ConsoleCLIDownload{},
×
182
                        &consolev1.ConsoleQuickStart{},
×
183
                        &consolev1.ConsolePlugin{},
×
184
                        &imagev1.ImageStream{},
×
185
                        &corev1.Namespace{},
×
186
                        &appsv1.Deployment{},
×
187
                }...)
×
188
        }
×
189

190
        // Watch secondary resources
191
        for _, resource := range secondaryResources {
×
192
                msg := fmt.Sprintf("Reconciling for %T", resource)
×
193
                err = c.Watch(
×
194
                        source.Kind(mgr.GetCache(), resource,
×
195
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
196
                                        // enqueue using a placeholder to be able to discriminate request triggered
×
197
                                        // by changes on the HyperConverged object from request triggered by changes
×
198
                                        // on a secondary CR controlled by HCO
×
199
                                        log.Info(msg)
×
200
                                        return []reconcile.Request{
×
201
                                                reqresolver.GetSecondaryCRRequest(),
×
202
                                        }
×
203
                                }),
×
204
                        ))
205
                if err != nil {
×
206
                        return err
×
207
                }
×
208
        }
209

210
        if ci.IsOpenshift() {
×
211
                err = c.Watch(
×
212
                        source.Kind(
×
213
                                mgr.GetCache(),
×
214
                                client.Object(&openshiftconfigv1.APIServer{}),
×
215
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
216
                                        // enqueue using a placeholder to signal that the change is not
×
217
                                        // directly on HCO CR but on the APIServer CR that we want to reload
×
218
                                        // only if really changed
×
219
                                        log.Info("Reconciling for openshiftconfigv1.APIServer")
×
220
                                        return []reconcile.Request{
×
221
                                                reqresolver.GetAPIServerCRRequest(),
×
222
                                        }
×
223
                                }),
×
224
                        ))
225
                if err != nil {
×
226
                        return err
×
227
                }
×
228

229
                err = c.Watch(
×
230
                        source.Channel(
×
231
                                ingressEventCh,
×
232
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
233
                                        // the ingress-cluster controller initiate this by pushing an event to the ingressEventCh channel
×
234
                                        // This will force this controller to update the URL of the cli download route, if the user
×
235
                                        // customized the hostname.
×
236
                                        log.Info("Reconciling for openshiftconfigv1.Ingress")
×
237
                                        return []reconcile.Request{
×
238
                                                reqresolver.GetIngressCRResource(),
×
239
                                        }
×
240
                                }),
×
241
                        ))
242
                if err != nil {
×
243
                        return err
×
244
                }
×
245

NEW
246
                err = c.Watch(
×
NEW
247
                        source.Channel(
×
NEW
248
                                nodeEventChannel,
×
NEW
249
                                handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request {
×
NEW
250
                                        // the nodes controller initiate this by pushing an event to the nodeEventChannel channel
×
NEW
251
                                        // This will force this controller to update the status fields related to the cluster nodes, and
×
NEW
252
                                        // to re-generate the DataImportCronTemplates in the SSP CR.
×
NEW
253
                                        log.Info("Reconciling for core.Node")
×
NEW
254
                                        return []reconcile.Request{
×
NEW
255
                                                reqresolver.GetNodeResource(),
×
NEW
256
                                        }
×
NEW
257
                                }),
×
258
                        ))
NEW
259
                if err != nil {
×
NEW
260
                        return err
×
NEW
261
                }
×
262
        }
263

264
        return nil
×
265
}
266

267
var _ reconcile.Reconciler = &ReconcileHyperConverged{}
268

269
// ReconcileHyperConverged reconciles a HyperConverged object
270
type ReconcileHyperConverged struct {
271
        // This client, initialized using mgr.Client() above, is a split client
272
        // that reads objects from the cache and writes to the apiserver
273
        client               client.Client
274
        scheme               *runtime.Scheme
275
        operandHandler       *operands.OperandHandler
276
        upgradeMode          bool
277
        ownVersion           string
278
        eventEmitter         hcoutil.EventEmitter
279
        firstLoop            bool
280
        upgradeableCondition hcoutil.Condition
281
        monitoringReconciler *alerts.MonitoringReconciler
282
}
283

284
// Reconcile reads that state of the cluster for a HyperConverged object and makes changes based on the state read
285
// and what is in the HyperConverged.Spec
286
// Note:
287
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
288
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
289
func (r *ReconcileHyperConverged) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
1✔
290
        logger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
1✔
291
        err := r.refreshAPIServerCR(ctx, logger, request)
1✔
292
        if err != nil {
1✔
293
                return reconcile.Result{}, err
×
294
        }
×
295

296
        resolvedRequest, hcoTriggered := reqresolver.ResolveReconcileRequest(log, request)
1✔
297
        hcoRequest := common.NewHcoRequest(ctx, resolvedRequest, log, r.upgradeMode, hcoTriggered)
1✔
298

1✔
299
        if hcoTriggered {
2✔
300
                r.operandHandler.Reset()
1✔
301
        }
1✔
302

303
        err = r.monitoringReconciler.Reconcile(hcoRequest, r.firstLoop)
1✔
304
        if err != nil {
1✔
305
                return reconcile.Result{}, err
×
306
        }
×
307

308
        // Fetch the HyperConverged instance
309
        instance, err := r.getHyperConverged(hcoRequest)
1✔
310
        if err != nil {
1✔
311
                return reconcile.Result{}, err
×
312
        }
×
313

314
        hcoRequest.Instance = instance
1✔
315

1✔
316
        if instance == nil {
2✔
317
                // if the HyperConverged CR was deleted during an upgrade process, then this is not an upgrade anymore
1✔
318
                r.upgradeMode = false
1✔
319
                err = r.setOperatorUpgradeableStatus(hcoRequest)
1✔
320

1✔
321
                return reconcile.Result{}, err
1✔
322
        }
1✔
323

324
        if r.firstLoop {
2✔
325
                r.firstLoopInitialization(hcoRequest)
1✔
326
        }
1✔
327

328
        if err = r.monitoringReconciler.UpdateRelatedObjects(hcoRequest); err != nil {
1✔
329
                logger.Error(err, "Failed to update the PrometheusRule as a related object")
×
330
                return reconcile.Result{}, err
×
331
        }
×
332

333
        result, err := r.doReconcile(hcoRequest)
1✔
334
        if err != nil {
2✔
335
                r.eventEmitter.EmitEvent(hcoRequest.Instance, corev1.EventTypeWarning, "ReconcileError", err.Error())
1✔
336
                return result, err
1✔
337
        }
1✔
338

339
        if err = r.setOperatorUpgradeableStatus(hcoRequest); err != nil {
1✔
340
                return reconcile.Result{}, err
×
341
        }
×
342

343
        requeue, err := r.updateHyperConverged(hcoRequest)
1✔
344
        if requeue || apierrors.IsConflict(err) {
2✔
345
                result.RequeueAfter = requeueAfter
1✔
346
        }
1✔
347

348
        return result, err
1✔
349
}
350

351
// refreshAPIServerCR refreshes the APIServer cR, if the request is triggered by this CR.
352
func (r *ReconcileHyperConverged) refreshAPIServerCR(ctx context.Context, logger logr.Logger, originalRequest reconcile.Request) error {
1✔
353
        if reqresolver.IsTriggeredByAPIServerCR(originalRequest) {
2✔
354
                logger.Info("Refreshing the ApiServer CR")
1✔
355
                return hcoutil.GetClusterInfo().RefreshAPIServerCR(ctx, r.client)
1✔
356
        }
1✔
357

358
        return nil
1✔
359
}
360

361
func (r *ReconcileHyperConverged) doReconcile(req *common.HcoRequest) (reconcile.Result, error) {
1✔
362

1✔
363
        valid := r.validateNamespace(req)
1✔
364
        if !valid {
2✔
365
                return reconcile.Result{}, nil
1✔
366
        }
1✔
367

368
        // Add conditions if there are none
369
        init := req.Instance.Status.Conditions == nil
1✔
370
        if init {
2✔
371
                r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "InitHCO", "Initiating the HyperConverged")
1✔
372
                r.setInitialConditions(req)
1✔
373

1✔
374
                req.StatusDirty = true
1✔
375
        }
1✔
376

377
        r.setLabels(req)
1✔
378

1✔
379
        updateStatus(req)
1✔
380

1✔
381
        // in-memory conditions should start off empty. It will only ever hold
1✔
382
        // negative conditions (!Available, Degraded, Progressing)
1✔
383
        req.Conditions = common.NewHcoConditions()
1✔
384

1✔
385
        // Handle finalizers
1✔
386
        if !checkFinalizers(req) {
2✔
387
                if !req.HCOTriggered {
1✔
388
                        // this is just the effect of a delete request created by HCO
×
389
                        // in the previous iteration, ignore it
×
390
                        return reconcile.Result{}, nil
×
391
                }
×
392
                return r.ensureHcoDeleted(req)
1✔
393
        }
394

395
        applyDataImportSchedule(req)
1✔
396

1✔
397
        // If the current version is not updated in CR ,then we're updating. This is also works when updating from
1✔
398
        // an old version, since Status.Versions will be empty.
1✔
399
        knownHcoVersion, _ := GetVersion(&req.Instance.Status, hcoVersionName)
1✔
400

1✔
401
        // detect upgrade mode
1✔
402
        if !r.upgradeMode && !init && knownHcoVersion != r.ownVersion {
2✔
403
                // get into upgrade mode
1✔
404

1✔
405
                r.upgradeMode = true
1✔
406
                r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "UpgradeHCO", "Upgrading the HyperConverged to version "+r.ownVersion)
1✔
407
                req.Logger.Info(fmt.Sprintf("Start upgrading from version %s to version %s", knownHcoVersion, r.ownVersion))
1✔
408
        }
1✔
409

410
        req.SetUpgradeMode(r.upgradeMode)
1✔
411

1✔
412
        if r.upgradeMode {
2✔
413
                if result, err := r.handleUpgrade(req); result != nil {
2✔
414
                        return *result, err
1✔
415
                }
1✔
416
        }
417

418
        return r.EnsureOperandAndComplete(req, init)
1✔
419
}
420

421
func (r *ReconcileHyperConverged) handleUpgrade(req *common.HcoRequest) (*reconcile.Result, error) {
1✔
422
        modified, err := r.migrateBeforeUpgrade(req)
1✔
423
        if err != nil {
2✔
424
                return &reconcile.Result{RequeueAfter: requeueAfter}, err
1✔
425
        }
1✔
426

427
        if modified {
2✔
428
                r.updateConditions(req)
1✔
429
                return &reconcile.Result{RequeueAfter: requeueAfter}, nil
1✔
430
        }
1✔
431
        return nil, nil
1✔
432
}
433

434
func (r *ReconcileHyperConverged) EnsureOperandAndComplete(req *common.HcoRequest, init bool) (reconcile.Result, error) {
1✔
435
        if err := r.operandHandler.Ensure(req); err != nil {
2✔
436
                r.updateConditions(req)
1✔
437
                requeue := time.Duration(0)
1✔
438
                if init {
2✔
439
                        requeue = requeueAfter
1✔
440
                }
1✔
441
                return reconcile.Result{RequeueAfter: requeue}, nil
1✔
442
        }
443

444
        req.Logger.Info("Reconcile complete")
1✔
445

1✔
446
        // Requeue if we just created everything
1✔
447
        if init {
2✔
448
                return reconcile.Result{RequeueAfter: requeueAfter}, nil
1✔
449
        }
1✔
450

451
        r.completeReconciliation(req)
1✔
452

1✔
453
        return reconcile.Result{}, nil
1✔
454
}
455

456
func updateStatus(req *common.HcoRequest) {
1✔
457
        if req.Instance.Generation != req.Instance.Status.ObservedGeneration {
2✔
458
                req.Instance.Status.ObservedGeneration = req.Instance.Generation
1✔
459
                req.StatusDirty = true
1✔
460
        }
1✔
461

462
        if infraHighlyAvailable := nodeinfo.IsInfrastructureHighlyAvailable(); req.Instance.Status.InfrastructureHighlyAvailable == nil ||
1✔
463
                *req.Instance.Status.InfrastructureHighlyAvailable != infraHighlyAvailable {
2✔
464

1✔
465
                if infraHighlyAvailable {
1✔
NEW
466
                        req.Logger.Info("infrastructure became highly available")
×
467
                } else {
1✔
468
                        req.Logger.Info("infrastructure became not highly available")
1✔
469
                }
1✔
470

471
                req.Instance.Status.InfrastructureHighlyAvailable = ptr.To(infraHighlyAvailable)
1✔
472
                req.StatusDirty = true
1✔
473
        }
474

475
        if cpArch := nodeinfo.GetControlPlaneArchitectures(); slices.Compare(req.Instance.Status.NodeInfo.ControllerNodeArchitecture, cpArch) != 0 {
1✔
NEW
476
                req.Instance.Status.NodeInfo.ControllerNodeArchitecture = cpArch
×
NEW
477
                req.StatusDirty = true
×
NEW
478
        }
×
479

480
        if workloadsArch := nodeinfo.GetWorkloadsArchitectures(); slices.Compare(req.Instance.Status.NodeInfo.WorkloadsArchitectures, workloadsArch) != 0 {
1✔
NEW
481
                req.Instance.Status.NodeInfo.WorkloadsArchitectures = workloadsArch
×
NEW
482
                req.StatusDirty = true
×
NEW
483
        }
×
484
}
485

486
// getHyperConverged gets the HyperConverged resource from the Kubernetes API.
487
func (r *ReconcileHyperConverged) getHyperConverged(req *common.HcoRequest) (*hcov1beta1.HyperConverged, error) {
1✔
488
        instance := &hcov1beta1.HyperConverged{}
1✔
489
        err := r.client.Get(req.Ctx, req.NamespacedName, instance)
1✔
490

1✔
491
        // Green path first
1✔
492
        if err == nil {
2✔
493
                metrics.SetHCOMetricHyperConvergedExists()
1✔
494
                return instance, nil
1✔
495
        }
1✔
496

497
        // Error path
498
        if apierrors.IsNotFound(err) {
2✔
499
                req.Logger.Info("No HyperConverged resource")
1✔
500
                metrics.SetHCOMetricHyperConvergedNotExists()
1✔
501

1✔
502
                // Request object not found, could have been deleted after reconcile request.
1✔
503
                // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
1✔
504
                // Return and don't requeue
1✔
505
                return nil, nil
1✔
506
        }
1✔
507

508
        // Another error reading the object.
509
        // Just return the error so that the request is requeued.
510
        return nil, err
×
511
}
512

513
// updateHyperConverged updates the HyperConverged resource according to its state in the request.
514
func (r *ReconcileHyperConverged) updateHyperConverged(request *common.HcoRequest) (bool, error) {
1✔
515

1✔
516
        // Since the status subresource is enabled for the HyperConverged kind,
1✔
517
        // we need to update the status and the metadata separately.
1✔
518
        // Moreover, we need to update the status first, in order to prevent a conflict.
1✔
519
        // In addition, metadata and spec changes are removed by status update, but since status update done first, we need
1✔
520
        // to store metadata and spec and recover it after status update
1✔
521

1✔
522
        var spec hcov1beta1.HyperConvergedSpec
1✔
523
        var meta metav1.ObjectMeta
1✔
524
        if request.Dirty {
2✔
525
                request.Instance.Spec.DeepCopyInto(&spec)
1✔
526
                request.Instance.ObjectMeta.DeepCopyInto(&meta)
1✔
527
        }
1✔
528

529
        err := r.updateHyperConvergedStatus(request)
1✔
530
        if err != nil {
2✔
531
                request.Logger.Error(err, "Failed to update HCO Status")
1✔
532
                return false, err
1✔
533
        }
1✔
534

535
        if request.Dirty {
2✔
536
                request.Instance.Annotations = meta.Annotations
1✔
537
                request.Instance.Finalizers = meta.Finalizers
1✔
538
                request.Instance.Labels = meta.Labels
1✔
539
                request.Instance.Spec = spec
1✔
540

1✔
541
                err = r.updateHyperConvergedSpecMetadata(request)
1✔
542
                if err != nil {
2✔
543
                        request.Logger.Error(err, "Failed to update HCO CR")
1✔
544
                        return false, err
1✔
545
                }
1✔
546
                // version update is a two steps process
547
                knownHcoVersion, _ := GetVersion(&request.Instance.Status, hcoVersionName)
1✔
548
                if r.ownVersion != knownHcoVersion && request.StatusDirty {
2✔
549
                        return true, nil
1✔
550
                }
1✔
551
        }
552

553
        return false, nil
1✔
554
}
555

556
// updateHyperConvergedSpecMetadata updates the HyperConverged resource's spec and metadata.
557
func (r *ReconcileHyperConverged) updateHyperConvergedSpecMetadata(request *common.HcoRequest) error {
1✔
558
        if !request.Dirty {
1✔
559
                return nil
×
560
        }
×
561

562
        return r.client.Update(request.Ctx, request.Instance)
1✔
563
}
564

565
// updateHyperConvergedSpecMetadata updates the HyperConverged resource's status (and metadata).
566
func (r *ReconcileHyperConverged) updateHyperConvergedStatus(request *common.HcoRequest) error {
1✔
567
        if !request.StatusDirty {
2✔
568
                return nil
1✔
569
        }
1✔
570

571
        return r.client.Status().Update(request.Ctx, request.Instance)
1✔
572
}
573

574
func (r *ReconcileHyperConverged) validateNamespace(req *common.HcoRequest) bool {
1✔
575
        // Ignore invalid requests
1✔
576
        if !reqresolver.IsTriggeredByHyperConverged(req.NamespacedName) {
2✔
577
                req.Logger.Info("Invalid request", "HyperConverged.Namespace", req.Namespace, "HyperConverged.Name", req.Name)
1✔
578
                hc := reqresolver.GetHyperConvergedNamespacedName()
1✔
579
                req.Conditions.SetStatusCondition(metav1.Condition{
1✔
580
                        Type:               hcov1beta1.ConditionReconcileComplete,
1✔
581
                        Status:             metav1.ConditionFalse,
1✔
582
                        Reason:             invalidRequestReason,
1✔
583
                        Message:            fmt.Sprintf(invalidRequestMessageFormat, hc.Name, hc.Namespace),
1✔
584
                        ObservedGeneration: req.Instance.Generation,
1✔
585
                })
1✔
586
                r.updateConditions(req)
1✔
587
                return false
1✔
588
        }
1✔
589
        return true
1✔
590
}
591

592
func (r *ReconcileHyperConverged) setInitialConditions(req *common.HcoRequest) {
1✔
593
        UpdateVersion(&req.Instance.Status, hcoVersionName, r.ownVersion)
1✔
594

1✔
595
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
596
                Type:               hcov1beta1.ConditionReconcileComplete,
1✔
597
                Status:             metav1.ConditionUnknown, // we just started trying to reconcile
1✔
598
                Reason:             reconcileInit,
1✔
599
                Message:            reconcileInitMessage,
1✔
600
                ObservedGeneration: req.Instance.Generation,
1✔
601
        })
1✔
602
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
603
                Type:               hcov1beta1.ConditionAvailable,
1✔
604
                Status:             metav1.ConditionFalse,
1✔
605
                Reason:             reconcileInit,
1✔
606
                Message:            reconcileInitMessage,
1✔
607
                ObservedGeneration: req.Instance.Generation,
1✔
608
        })
1✔
609
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
610
                Type:               hcov1beta1.ConditionProgressing,
1✔
611
                Status:             metav1.ConditionTrue,
1✔
612
                Reason:             reconcileInit,
1✔
613
                Message:            reconcileInitMessage,
1✔
614
                ObservedGeneration: req.Instance.Generation,
1✔
615
        })
1✔
616
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
617
                Type:               hcov1beta1.ConditionDegraded,
1✔
618
                Status:             metav1.ConditionFalse,
1✔
619
                Reason:             reconcileInit,
1✔
620
                Message:            reconcileInitMessage,
1✔
621
                ObservedGeneration: req.Instance.Generation,
1✔
622
        })
1✔
623
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
624
                Type:               hcov1beta1.ConditionUpgradeable,
1✔
625
                Status:             metav1.ConditionUnknown,
1✔
626
                Reason:             reconcileInit,
1✔
627
                Message:            reconcileInitMessage,
1✔
628
                ObservedGeneration: req.Instance.Generation,
1✔
629
        })
1✔
630

1✔
631
        r.updateConditions(req)
1✔
632
}
1✔
633

634
func (r *ReconcileHyperConverged) ensureHcoDeleted(req *common.HcoRequest) (reconcile.Result, error) {
1✔
635
        err := r.operandHandler.EnsureDeleted(req)
1✔
636
        if err != nil {
1✔
637
                return reconcile.Result{}, err
×
638
        }
×
639

640
        requeue := time.Duration(0)
1✔
641

1✔
642
        // Remove the finalizers
1✔
643
        if idx := slices.Index(req.Instance.Finalizers, FinalizerName); idx >= 0 {
2✔
644
                req.Instance.Finalizers = slices.Delete(req.Instance.Finalizers, idx, idx+1)
1✔
645
                req.Dirty = true
1✔
646
                requeue = requeueAfter
1✔
647
        }
1✔
648

649
        // Need to requeue because finalizer update does not change metadata.generation
650
        return reconcile.Result{RequeueAfter: requeue}, nil
1✔
651
}
652

653
func (r *ReconcileHyperConverged) aggregateComponentConditions(req *common.HcoRequest) bool {
1✔
654
        /*
1✔
655
                See the chart at design/aggregateComponentConditions.svg; The numbers below follows the numbers in the chart
1✔
656
                Here is the PlantUML code for the chart that describes the aggregation of the sub-components conditions.
1✔
657
                Find the PlantURL syntax here: https://plantuml.com/activity-diagram-beta
1✔
658

1✔
659
                @startuml ../../../design/aggregateComponentConditions.svg
1✔
660
                title Aggregate Component Conditions
1✔
661

1✔
662
                start
1✔
663
                  #springgreen:Set **ReconcileComplete = True**]
1✔
664
                  !x=1
1✔
665
                if ((x) [Degraded = True] Exists) then
1✔
666
                  !x=x+1
1✔
667
                  #orangered:<<implicit>>\n**Degraded = True** /
1✔
668
                  -[#orangered]-> yes;
1✔
669
                  if ((x) [Progressing = True] Exists) then
1✔
670
                        !x=x+1
1✔
671
                        -[#springgreen]-> no;
1✔
672
                        #springgreen:(x) Set **Progressing = False**]
1✔
673
                        !x=x+1
1✔
674
                  else
1✔
675
                        -[#orangered]-> yes;
1✔
676
                        #orangered:<<implicit>>\n**Progressing = True** /
1✔
677
                  endif
1✔
678
                  if ((x) [Upgradable = False] Exists) then
1✔
679
                        !x=x+1
1✔
680
                        -[#springgreen]-> no;
1✔
681
                        #orangered:(x) Set **Upgradable = False**]
1✔
682
                        !x=x+1
1✔
683
                  else
1✔
684
                        -[#orangered]-> yes;
1✔
685
                        #orangered:<<implicit>>\n**Upgradable = False** /
1✔
686
                  endif
1✔
687
                  if ((x) [Available = False] Exists) then
1✔
688
                        !x=x+1
1✔
689
                        -[#springgreen]-> no;
1✔
690
                        #orangered:(x) Set **Available = False**]
1✔
691
                        !x=x+1
1✔
692
                  else
1✔
693
                        -[#orangered]-> yes;
1✔
694
                        #orangered:<<implicit>>\n**Available = False** /
1✔
695
                  endif
1✔
696
                else
1✔
697
                  -[#springgreen]-> no;
1✔
698
                  #springgreen:(x) Set **Degraded = False**]
1✔
699
                  !x=x+1
1✔
700
                  if ((x) [Progressing = True] Exists) then
1✔
701
                        !x=x+1
1✔
702
                        -[#orangered]-> yes;
1✔
703
                        #orangered:<<implicit>>\n**Progressing = True** /
1✔
704
                        if ((x) [Upgradable = False] Exists) then
1✔
705
                          !x=x+1
1✔
706
                          -[#springgreen]-> no;
1✔
707
                          #orangered:(x) Set **Upgradable = False**]
1✔
708
                          !x=x+1
1✔
709
                        else
1✔
710
                          -[#orangered]-> yes;
1✔
711
                          #orangered:<<implicit>>\n**Upgradable = False** /
1✔
712
                        endif
1✔
713
                        if ((x) [Available = False] Exists) then
1✔
714
                          !x=x+1
1✔
715
                          -[#springgreen]-> no;
1✔
716
                          #springgreen:(x) Set **Available = True**]
1✔
717
                          !x=x+1
1✔
718
                        else
1✔
719
                          #orangered:<<implicit>>\n**Available = False** /
1✔
720
                          -[#orangered]-> yes;
1✔
721
                        endif
1✔
722
                  else
1✔
723
                        -[#springgreen]-> no;
1✔
724
                        #springgreen:(x) Set **Progressing = False**]
1✔
725
                        !x=x+1
1✔
726
                        if ((x) [Upgradable = False] Exists) then
1✔
727
                          !x=x+1
1✔
728
                          -[#springgreen]-> no;
1✔
729
                          #springgreen:(x) Set **Upgradable = True**]
1✔
730
                          !x=x+1
1✔
731
                        else
1✔
732
                        #orangered:<<implicit>>\n**Upgradable = False** /
1✔
733
                          -[#orangered]-> yes;
1✔
734
                        endif
1✔
735
                        if ((x) [Available = False] Exists) then
1✔
736
                          !x=x+1
1✔
737
                          -[#springgreen]-> no;
1✔
738
                          #springgreen:(x) Set **Available = True**]
1✔
739
                          !x=x+1
1✔
740
                        else
1✔
741
                          -[#orangered]-> yes;
1✔
742
                          #orangered:<<implicit>>\n**Available = False** /
1✔
743
                        endif
1✔
744
                  endif
1✔
745
                endif
1✔
746
                end
1✔
747
                @enduml
1✔
748
        */
1✔
749

1✔
750
        /*
1✔
751
                    If any component operator reports negatively we want to write that to
1✔
752
                        the instance while preserving it's lastTransitionTime.
1✔
753
                        For example, consider the KubeVirt resource has the Available condition
1✔
754
                        type with type "False". When reconciling KubeVirt's resource we would
1✔
755
                        add it to the in-memory representation of HCO's conditions (r.conditions)
1✔
756
                        and here we are simply writing it back to the server.
1✔
757
                        One shortcoming is that only one failure of a particular condition can be
1✔
758
                        captured at one time (ie. if KubeVirt and CDI are both reporting !Available,
1✔
759
                    you will only see CDI as it updates last).
1✔
760
        */
1✔
761
        allComponentsAreUp := req.Conditions.IsEmpty()
1✔
762
        req.Conditions.SetStatusCondition(metav1.Condition{
1✔
763
                Type:               hcov1beta1.ConditionReconcileComplete,
1✔
764
                Status:             metav1.ConditionTrue,
1✔
765
                Reason:             reconcileCompleted,
1✔
766
                Message:            reconcileCompletedMessage,
1✔
767
                ObservedGeneration: req.Instance.Generation,
1✔
768
        })
1✔
769

1✔
770
        if req.Conditions.HasCondition(hcov1beta1.ConditionDegraded) { // (#chart 1)
2✔
771

1✔
772
                req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 2,3)
1✔
773
                        Type:               hcov1beta1.ConditionProgressing,
1✔
774
                        Status:             metav1.ConditionFalse,
1✔
775
                        Reason:             reconcileCompleted,
1✔
776
                        Message:            reconcileCompletedMessage,
1✔
777
                        ObservedGeneration: req.Instance.Generation,
1✔
778
                })
1✔
779

1✔
780
                req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 4,5)
1✔
781
                        Type:               hcov1beta1.ConditionUpgradeable,
1✔
782
                        Status:             metav1.ConditionFalse,
1✔
783
                        Reason:             commonDegradedReason,
1✔
784
                        Message:            "HCO is not Upgradeable due to degraded components",
1✔
785
                        ObservedGeneration: req.Instance.Generation,
1✔
786
                })
1✔
787

1✔
788
                req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 6,7)
1✔
789
                        Type:               hcov1beta1.ConditionAvailable,
1✔
790
                        Status:             metav1.ConditionFalse,
1✔
791
                        Reason:             commonDegradedReason,
1✔
792
                        Message:            "HCO is not available due to degraded components",
1✔
793
                        ObservedGeneration: req.Instance.Generation,
1✔
794
                })
1✔
795

1✔
796
        } else {
2✔
797

1✔
798
                // Degraded is not found. add it.
1✔
799
                req.Conditions.SetStatusCondition(metav1.Condition{ // (#chart 8)
1✔
800
                        Type:               hcov1beta1.ConditionDegraded,
1✔
801
                        Status:             metav1.ConditionFalse,
1✔
802
                        Reason:             reconcileCompleted,
1✔
803
                        Message:            reconcileCompletedMessage,
1✔
804
                        ObservedGeneration: req.Instance.Generation,
1✔
805
                })
1✔
806

1✔
807
                if req.Conditions.HasCondition(hcov1beta1.ConditionProgressing) { // (#chart 9)
2✔
808

1✔
809
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 10,11)
1✔
810
                                Type:               hcov1beta1.ConditionUpgradeable,
1✔
811
                                Status:             metav1.ConditionFalse,
1✔
812
                                Reason:             commonProgressingReason,
1✔
813
                                Message:            "HCO is not Upgradeable due to progressing components",
1✔
814
                                ObservedGeneration: req.Instance.Generation,
1✔
815
                        })
1✔
816

1✔
817
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 12,13)
1✔
818
                                Type:               hcov1beta1.ConditionAvailable,
1✔
819
                                Status:             metav1.ConditionTrue,
1✔
820
                                Reason:             reconcileCompleted,
1✔
821
                                Message:            reconcileCompletedMessage,
1✔
822
                                ObservedGeneration: req.Instance.Generation,
1✔
823
                        })
1✔
824

1✔
825
                } else {
2✔
826

1✔
827
                        req.Conditions.SetStatusCondition(metav1.Condition{ // (#chart 14)
1✔
828
                                Type:               hcov1beta1.ConditionProgressing,
1✔
829
                                Status:             metav1.ConditionFalse,
1✔
830
                                Reason:             reconcileCompleted,
1✔
831
                                Message:            reconcileCompletedMessage,
1✔
832
                                ObservedGeneration: req.Instance.Generation,
1✔
833
                        })
1✔
834

1✔
835
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 15,16)
1✔
836
                                Type:               hcov1beta1.ConditionUpgradeable,
1✔
837
                                Status:             metav1.ConditionTrue,
1✔
838
                                Reason:             reconcileCompleted,
1✔
839
                                Message:            reconcileCompletedMessage,
1✔
840
                                ObservedGeneration: req.Instance.Generation,
1✔
841
                        })
1✔
842

1✔
843
                        req.Conditions.SetStatusConditionIfUnset(metav1.Condition{ // (#chart 17,18)
1✔
844
                                Type:               hcov1beta1.ConditionAvailable,
1✔
845
                                Status:             metav1.ConditionTrue,
1✔
846
                                Reason:             reconcileCompleted,
1✔
847
                                Message:            reconcileCompletedMessage,
1✔
848
                                ObservedGeneration: req.Instance.Generation,
1✔
849
                        })
1✔
850

1✔
851
                }
1✔
852
        }
853

854
        return allComponentsAreUp
1✔
855
}
856

857
func (r *ReconcileHyperConverged) completeReconciliation(req *common.HcoRequest) {
1✔
858
        allComponentsAreUp := r.aggregateComponentConditions(req)
1✔
859

1✔
860
        hcoReady := false
1✔
861

1✔
862
        if allComponentsAreUp {
2✔
863
                req.Logger.Info("No component operator reported negatively")
1✔
864

1✔
865
                // if in upgrade mode, and all the components are upgraded, and nothing pending to be written - upgrade is completed
1✔
866
                if r.upgradeMode && req.ComponentUpgradeInProgress && !req.Dirty {
2✔
867
                        // update the new version only when upgrade is completed
1✔
868
                        UpdateVersion(&req.Instance.Status, hcoVersionName, r.ownVersion)
1✔
869
                        req.StatusDirty = true
1✔
870

1✔
871
                        r.upgradeMode = false
1✔
872
                        req.ComponentUpgradeInProgress = false
1✔
873
                        req.Logger.Info(fmt.Sprintf("Successfully upgraded to version %s", r.ownVersion))
1✔
874
                        r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "UpgradeHCO", fmt.Sprintf("Successfully upgraded to version %s", r.ownVersion))
1✔
875
                }
1✔
876

877
                // If not in upgrade mode, then we're ready, because all the operators reported positive conditions.
878
                // if upgrade was done successfully, r.upgradeMode is already false here.
879
                hcoReady = !r.upgradeMode
1✔
880
        }
881

882
        if r.upgradeMode {
2✔
883
                // override the Progressing condition during upgrade
1✔
884
                req.Conditions.SetStatusCondition(metav1.Condition{
1✔
885
                        Type:               hcov1beta1.ConditionProgressing,
1✔
886
                        Status:             metav1.ConditionTrue,
1✔
887
                        Reason:             "HCOUpgrading",
1✔
888
                        Message:            "HCO is now upgrading to version " + r.ownVersion,
1✔
889
                        ObservedGeneration: req.Instance.Generation,
1✔
890
                })
1✔
891
        }
1✔
892

893
        // check if HCO was available before this reconcile loop
894
        hcoWasAvailable := apimetav1.IsStatusConditionTrue(req.Instance.Status.Conditions, hcov1beta1.ConditionAvailable) &&
1✔
895
                apimetav1.IsStatusConditionFalse(req.Instance.Status.Conditions, hcov1beta1.ConditionProgressing)
1✔
896

1✔
897
        if hcoReady {
2✔
898
                // If no operator whose conditions we are watching reports an error, then it is safe
1✔
899
                // to set readiness.
1✔
900
                if !hcoWasAvailable { // only when become available
2✔
901
                        r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "ReconcileHCO", "HCO Reconcile completed successfully")
1✔
902
                }
1✔
903
        } else {
1✔
904
                // If for any reason we marked ourselves !upgradeable...then unset readiness
1✔
905
                if !r.upgradeMode && hcoWasAvailable { // only when become not ready
1✔
906
                        r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeWarning, "ReconcileHCO", "Not all the operators are ready")
×
907
                }
×
908
        }
909

910
        r.updateConditions(req)
1✔
911
}
912

913
// This function is used to exit from the reconcile function, updating the conditions and returns the reconcile result
914
func (r *ReconcileHyperConverged) updateConditions(req *common.HcoRequest) {
1✔
915
        conditions := slices.Clone(req.Instance.Status.Conditions)
1✔
916

1✔
917
        for _, condType := range common.HcoConditionTypes {
2✔
918
                cond, found := req.Conditions[condType]
1✔
919
                if !found {
2✔
920
                        cond = metav1.Condition{
1✔
921
                                Type:               condType,
1✔
922
                                Status:             metav1.ConditionUnknown,
1✔
923
                                Message:            "Unknown Status",
1✔
924
                                Reason:             "StatusUnknown",
1✔
925
                                ObservedGeneration: req.Instance.Generation,
1✔
926
                        }
1✔
927
                }
1✔
928

929
                apimetav1.SetStatusCondition(&conditions, cond)
1✔
930
        }
931

932
        // Detect a "TaintedConfiguration" state, and raise a corresponding event
933
        r.detectTaintedConfiguration(req, &conditions)
1✔
934

1✔
935
        if !reflect.DeepEqual(conditions, req.Instance.Status.Conditions) {
2✔
936
                req.Instance.Status.Conditions = conditions
1✔
937
                req.StatusDirty = true
1✔
938
        }
1✔
939

940
        systemHealthStatus := r.getSystemHealthStatus(req.Conditions)
1✔
941

1✔
942
        if systemHealthStatus != req.Instance.Status.SystemHealthStatus {
2✔
943
                req.Instance.Status.SystemHealthStatus = systemHealthStatus
1✔
944
                req.StatusDirty = true
1✔
945
        }
1✔
946

947
        metrics.SetHCOMetricSystemHealthStatus(getNumericalHealthStatus(systemHealthStatus))
1✔
948
}
949

950
func (r *ReconcileHyperConverged) setLabels(req *common.HcoRequest) {
1✔
951
        if req.Instance.Labels == nil {
2✔
952
                req.Instance.Labels = map[string]string{}
1✔
953
        }
1✔
954
        if req.Instance.Labels[hcoutil.AppLabel] == "" {
2✔
955
                req.Instance.Labels[hcoutil.AppLabel] = req.Instance.Name
1✔
956
                req.Dirty = true
1✔
957
        }
1✔
958
}
959

960
func (r *ReconcileHyperConverged) detectTaintedConfiguration(req *common.HcoRequest, conditions *[]metav1.Condition) {
1✔
961
        conditionExists := apimetav1.IsStatusConditionTrue(req.Instance.Status.Conditions, hcov1beta1.ConditionTaintedConfiguration)
1✔
962

1✔
963
        // A tainted configuration state is indicated by the
1✔
964
        // presence of at least one of the JSON Patch annotations
1✔
965
        tainted := false
1✔
966
        for _, jpa := range JSONPatchAnnotationNames {
2✔
967
                NumOfChanges := 0
1✔
968
                jsonPatch, exists := req.Instance.Annotations[jpa]
1✔
969
                if exists {
2✔
970
                        if NumOfChanges = getNumOfChangesJSONPatch(jsonPatch); NumOfChanges > 0 {
2✔
971
                                tainted = true
1✔
972
                        }
1✔
973
                }
974
                metrics.SetUnsafeModificationCount(NumOfChanges, jpa)
1✔
975
        }
976

977
        if tainted {
2✔
978
                apimetav1.SetStatusCondition(conditions, metav1.Condition{
1✔
979
                        Type:               hcov1beta1.ConditionTaintedConfiguration,
1✔
980
                        Status:             metav1.ConditionTrue,
1✔
981
                        Reason:             taintedConfigurationReason,
1✔
982
                        Message:            taintedConfigurationMessage,
1✔
983
                        ObservedGeneration: req.Instance.Generation,
1✔
984
                })
1✔
985

1✔
986
                if !conditionExists {
2✔
987
                        // Only log at "first occurrence" of detection
1✔
988
                        req.Logger.Info("Detected tainted configuration state for HCO")
1✔
989
                }
1✔
990
        } else { // !tainted
1✔
991

1✔
992
                // For the sake of keeping the JSONPatch backdoor in low profile,
1✔
993
                // we just remove the condition instead of False'ing it.
1✔
994
                if conditionExists {
2✔
995
                        apimetav1.RemoveStatusCondition(conditions, hcov1beta1.ConditionTaintedConfiguration)
1✔
996

1✔
997
                        req.Logger.Info("Detected untainted configuration state for HCO")
1✔
998
                }
1✔
999
        }
1000
}
1001

1002
func (r *ReconcileHyperConverged) getSystemHealthStatus(conditions common.HcoConditions) string {
1✔
1003
        if isSystemHealthStatusError(conditions) {
2✔
1004
                return systemHealthStatusError
1✔
1005
        }
1✔
1006

1007
        if isSystemHealthStatusWarning(conditions) {
2✔
1008
                return systemHealthStatusWarning
1✔
1009
        }
1✔
1010

1011
        return systemHealthStatusHealthy
1✔
1012
}
1013

1014
func isSystemHealthStatusError(conditions common.HcoConditions) bool {
1✔
1015
        return !conditions.IsStatusConditionTrue(hcov1beta1.ConditionAvailable) || conditions.IsStatusConditionTrue(hcov1beta1.ConditionDegraded)
1✔
1016
}
1✔
1017

1018
func isSystemHealthStatusWarning(conditions common.HcoConditions) bool {
1✔
1019
        return !conditions.IsStatusConditionTrue(hcov1beta1.ConditionReconcileComplete) || conditions.IsStatusConditionTrue(hcov1beta1.ConditionProgressing)
1✔
1020
}
1✔
1021

1022
func getNumOfChangesJSONPatch(jsonPatch string) int {
1✔
1023
        patches, err := jsonpatch.DecodePatch([]byte(jsonPatch))
1✔
1024
        if err != nil {
2✔
1025
                return 0
1✔
1026
        }
1✔
1027
        return len(patches)
1✔
1028
}
1029

1030
func getNumericalHealthStatus(status string) float64 {
1✔
1031
        healthStatusCodes := map[string]float64{
1✔
1032
                systemHealthStatusHealthy: metrics.SystemHealthStatusHealthy,
1✔
1033
                systemHealthStatusWarning: metrics.SystemHealthStatusWarning,
1✔
1034
                systemHealthStatusError:   metrics.SystemHealthStatusError,
1✔
1035
        }
1✔
1036

1✔
1037
        return healthStatusCodes[status]
1✔
1038
}
1✔
1039

1040
func (r *ReconcileHyperConverged) firstLoopInitialization(request *common.HcoRequest) {
1✔
1041
        // Initialize operand handler.
1✔
1042
        r.operandHandler.FirstUseInitiation(r.scheme, hcoutil.GetClusterInfo(), request.Instance)
1✔
1043

1✔
1044
        // Avoid re-initializing.
1✔
1045
        r.firstLoop = false
1✔
1046
}
1✔
1047

1048
func (r *ReconcileHyperConverged) setOperatorUpgradeableStatus(request *common.HcoRequest) error {
1✔
1049
        if hcoutil.GetClusterInfo().IsManagedByOLM() {
2✔
1050

1✔
1051
                upgradeable := !r.upgradeMode && request.Upgradeable
1✔
1052

1✔
1053
                request.Logger.Info("setting the Upgradeable operator condition", requestedStatusKey, upgradeable)
1✔
1054

1✔
1055
                msg := hcoutil.UpgradeableAllowMessage
1✔
1056
                status := metav1.ConditionTrue
1✔
1057
                reason := hcoutil.UpgradeableAllowReason
1✔
1058

1✔
1059
                if !upgradeable {
2✔
1060
                        status = metav1.ConditionFalse
1✔
1061

1✔
1062
                        if r.upgradeMode {
2✔
1063
                                msg = hcoutil.UpgradeableUpgradingMessage + r.ownVersion
1✔
1064
                                reason = hcoutil.UpgradeableUpgradingReason
1✔
1065
                        } else {
2✔
1066
                                condition, found := request.Conditions.GetCondition(hcov1beta1.ConditionUpgradeable)
1✔
1067
                                if found && condition.Status == metav1.ConditionFalse {
2✔
1068
                                        reason = condition.Reason
1✔
1069
                                        msg = condition.Message
1✔
1070
                                }
1✔
1071
                        }
1072
                }
1073

1074
                if err := r.upgradeableCondition.Set(request.Ctx, status, reason, msg); err != nil {
1✔
1075
                        request.Logger.Error(err, "can't set the Upgradeable operator condition", requestedStatusKey, upgradeable)
×
1076
                        return err
×
1077
                }
×
1078

1079
        }
1080

1081
        return nil
1✔
1082
}
1083

1084
func (r *ReconcileHyperConverged) migrateBeforeUpgrade(req *common.HcoRequest) (bool, error) {
1✔
1085
        upgradePatched, err := r.applyUpgradePatches(req)
1✔
1086
        if err != nil {
2✔
1087
                return false, err
1✔
1088
        }
1✔
1089

1090
        removeOldQuickStartGuides(req, r.client, r.operandHandler.GetQuickStartNames())
1✔
1091
        removeOldImageStream(req, r.client, r.operandHandler.GetImageStreamNames())
1✔
1092

1✔
1093
        return upgradePatched, nil
1✔
1094
}
1095

1096
func (r *ReconcileHyperConverged) applyUpgradePatches(req *common.HcoRequest) (bool, error) {
1✔
1097
        modified := false
1✔
1098

1✔
1099
        knownHcoVersion, _ := GetVersion(&req.Instance.Status, hcoVersionName)
1✔
1100
        if knownHcoVersion == "" {
2✔
1101
                knownHcoVersion = "0.0.0"
1✔
1102
        }
1✔
1103
        knownHcoSV, err := semver.ParseTolerant(knownHcoVersion)
1✔
1104
        if err != nil {
2✔
1105
                req.Logger.Error(err, "Error!")
1✔
1106
                return false, err
1✔
1107
        }
1✔
1108

1109
        tmpInstance, err := upgradepatch.ApplyUpgradePatch(req.Logger, req.Instance, knownHcoSV)
1✔
1110
        if err != nil {
1✔
1111
                return false, err
×
1112
        }
×
1113

1114
        for _, p := range upgradepatch.GetObjectsToBeRemoved() {
2✔
1115
                removed, err := r.removeLeftover(req, knownHcoSV, p)
1✔
1116
                if err != nil {
1✔
1117
                        return removed, err
×
1118
                }
×
1119
        }
1120

1121
        if !reflect.DeepEqual(tmpInstance.Spec, req.Instance.Spec) {
2✔
1122
                req.Logger.Info("updating HCO spec as a result of upgrade patches")
1✔
1123
                tmpInstance.Spec.DeepCopyInto(&req.Instance.Spec)
1✔
1124
                modified = true
1✔
1125
                req.Dirty = true
1✔
1126
        }
1✔
1127

1128
        return modified, nil
1✔
1129
}
1130

1131
func (r *ReconcileHyperConverged) removeLeftover(req *common.HcoRequest, knownHcoSV semver.Version, p upgradepatch.ObjectToBeRemoved) (bool, error) {
1✔
1132
        if p.IsAffectedRange(knownHcoSV) {
2✔
1133
                removeRelatedObject(req, r.client, p.GroupVersionKind, p.ObjectKey)
1✔
1134
                u := &unstructured.Unstructured{}
1✔
1135
                u.SetGroupVersionKind(p.GroupVersionKind)
1✔
1136
                gerr := r.client.Get(req.Ctx, p.ObjectKey, u)
1✔
1137
                if gerr != nil {
2✔
1138
                        if apierrors.IsNotFound(gerr) {
2✔
1139
                                return false, nil
1✔
1140
                        }
1✔
1141

1142
                        req.Logger.Error(gerr, "failed looking for leftovers", "objectToBeRemoved", p)
×
1143
                        return false, gerr
×
1144
                }
1145
                return r.deleteObj(req, u, false)
1✔
1146

1147
        }
1148
        return false, nil
1✔
1149
}
1150

1151
func (r *ReconcileHyperConverged) deleteObj(req *common.HcoRequest, obj client.Object, protectNonHCOObjects bool) (bool, error) {
1✔
1152
        removed, err := hcoutil.EnsureDeleted(req.Ctx, r.client, obj, req.Instance.Name, req.Logger, false, false, protectNonHCOObjects)
1✔
1153

1✔
1154
        if err != nil {
1✔
1155
                req.Logger.Error(
×
1156
                        err,
×
1157
                        fmt.Sprintf("failed to delete %s", obj.GetObjectKind().GroupVersionKind().Kind),
×
1158
                        "name",
×
1159
                        obj.GetName(),
×
1160
                )
×
1161

×
1162
                return removed, err
×
1163
        }
×
1164

1165
        if removed {
2✔
1166
                r.eventEmitter.EmitEvent(
1✔
1167
                        req.Instance, corev1.EventTypeNormal, "Killing",
1✔
1168
                        fmt.Sprintf("Removed %s %s", obj.GetName(), obj.GetObjectKind().GroupVersionKind().Kind),
1✔
1169
                )
1✔
1170
        }
1✔
1171

1172
        return removed, nil
1✔
1173
}
1174

1175
func removeRelatedObject(req *common.HcoRequest, cl client.Client, gvk schema.GroupVersionKind, objectKey types.NamespacedName) {
1✔
1176
        refs := make([]corev1.ObjectReference, 0, len(req.Instance.Status.RelatedObjects))
1✔
1177
        foundRO := false
1✔
1178

1✔
1179
        crdGVK := schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}
1✔
1180

1✔
1181
        for _, obj := range req.Instance.Status.RelatedObjects {
2✔
1182
                apiVersion, kind := gvk.ToAPIVersionAndKind()
1✔
1183
                if obj.APIVersion == apiVersion && obj.Kind == kind && obj.Namespace == objectKey.Namespace && obj.Name == objectKey.Name {
2✔
1184
                        foundRO = true
1✔
1185
                        req.Logger.Info("Removed relatedObject entry for", "gvk", gvk, "objectKey", objectKey)
1✔
1186
                        continue
1✔
1187
                }
1188
                if reflect.DeepEqual(gvk, crdGVK) {
2✔
1189
                        mapping, err := cl.RESTMapper().RESTMapping(obj.GroupVersionKind().GroupKind(), obj.GroupVersionKind().Version)
1✔
1190
                        if err == nil && mapping != nil && mapping.Resource.GroupResource().String() == objectKey.Name {
2✔
1191
                                foundRO = true
1✔
1192
                                req.Logger.Info("Removed relatedObject on CRD removal for", "gvk", gvk, "objectKey", objectKey)
1✔
1193
                                continue
1✔
1194
                        }
1195
                }
1196
                refs = append(refs, obj)
1✔
1197
        }
1198

1199
        if foundRO {
2✔
1200
                req.Instance.Status.RelatedObjects = refs
1✔
1201
                req.StatusDirty = true
1✔
1202
        }
1✔
1203

1204
}
1205

1206
func checkFinalizers(req *common.HcoRequest) bool {
1✔
1207
        if req.Instance.DeletionTimestamp.IsZero() {
2✔
1208
                // Add the finalizer if it's not there
1✔
1209
                if !slices.Contains(req.Instance.Finalizers, FinalizerName) {
2✔
1210
                        req.Logger.Info("setting a finalizer (with fully qualified name)")
1✔
1211
                        req.Instance.Finalizers = append(req.Instance.Finalizers, FinalizerName)
1✔
1212
                        req.Dirty = true
1✔
1213
                }
1✔
1214
                return true
1✔
1215
        }
1216
        return false
1✔
1217
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc