• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / hyperconverged-cluster-operator / 19509545460

19 Nov 2025 04:57PM UTC coverage: 77.008% (-0.2%) from 77.24%
19509545460

Pull #3871

github

web-flow
Merge d84db5bfe into 2350d213b
Pull Request #3871: Allow virt-operator deployment on Hosted Control Planes Cluster

65 of 127 new or added lines in 3 files covered. (51.18%)

15 existing lines in 1 file now uncovered.

7988 of 10373 relevant lines covered (77.01%)

1.84 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

50.28
/controllers/nodes/nodes_controller.go
1
package nodes
2

3
import (
4
        "context"
5
        "fmt"
6
        "time"
7

8
        "github.com/go-logr/logr"
9
        "github.com/google/uuid"
10
        operatorhandler "github.com/operator-framework/operator-lib/handler"
11
        corev1 "k8s.io/api/core/v1"
12
        "k8s.io/apimachinery/pkg/api/errors"
13
        k8stypes "k8s.io/apimachinery/pkg/types"
14
        "k8s.io/client-go/util/workqueue"
15
        "sigs.k8s.io/controller-runtime/pkg/client"
16
        "sigs.k8s.io/controller-runtime/pkg/controller"
17
        "sigs.k8s.io/controller-runtime/pkg/event"
18
        "sigs.k8s.io/controller-runtime/pkg/handler"
19
        logf "sigs.k8s.io/controller-runtime/pkg/log"
20
        "sigs.k8s.io/controller-runtime/pkg/manager"
21
        "sigs.k8s.io/controller-runtime/pkg/reconcile"
22
        "sigs.k8s.io/controller-runtime/pkg/source"
23

24
        hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/api/v1beta1"
25
        "github.com/kubevirt/hyperconverged-cluster-operator/pkg/nodeinfo"
26
        hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
27
)
28

29
const (
30
        // Node role labels
31
        labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
32
        labelNodeRoleMaster       = "node-role.kubernetes.io/master"
33
        labelNodeRoleWorker       = "node-role.kubernetes.io/worker"
34

35
        // HyperShift label value for worker nodes
36
        hypershiftLabelValue = "set-to-allow-kubevirt-deployment"
37
)
38

39
var (
40
        log               = logf.Log.WithName("controller_nodes")
41
        randomConstSuffix = uuid.New().String()
42

43
        hcoReq = reconcile.Request{
44
                NamespacedName: k8stypes.NamespacedName{
45
                        Name:      "hyperconverged-req-" + randomConstSuffix,
46
                        Namespace: hcoutil.GetOperatorNamespaceFromEnv(),
47
                },
48
        }
49
)
50

51
// startupNodeLabeler is a runnable that labels all nodes after the manager cache starts
52
type startupNodeLabeler struct {
53
        reconciler *ReconcileNodeCounter
54
}
55

56
// Start implements manager.Runnable
NEW
57
func (s *startupNodeLabeler) Start(ctx context.Context) error {
×
NEW
58
        log.Info("Starting node labeling after cache is ready")
×
NEW
59

×
NEW
60
        // Label all nodes now that the cache is ready
×
NEW
61
        err := s.reconciler.labelAllNodesAtStartup(ctx)
×
NEW
62
        if err != nil {
×
NEW
63
                log.Error(err, "Failed to label nodes at startup")
×
NEW
64
                // Don't return error to avoid stopping the manager
×
NEW
65
                for range time.Tick(30 * time.Second) {
×
NEW
66
                        if err = s.reconciler.labelAllNodesAtStartup(ctx); err == nil {
×
NEW
67
                                break
×
68
                        }
69

NEW
70
                        log.Error(err, "Failed to label nodes at startup")
×
71
                }
72
        }
73

74
        // Keep running until context is cancelled
NEW
75
        <-ctx.Done()
×
NEW
76
        return nil
×
77
}
78

79
// NeedLeaderElection implements manager.LeaderElectionRunnable
NEW
80
func (s *startupNodeLabeler) NeedLeaderElection() bool {
×
NEW
81
        return true
×
NEW
82
}
×
83

84
// RegisterReconciler creates a new Nodes Reconciler and registers it into manager.
85
func RegisterReconciler(mgr manager.Manager, nodeEvents chan<- event.GenericEvent) error {
×
NEW
86
        reconciler := newReconciler(mgr, nodeEvents)
×
NEW
87

×
NEW
88
        // Add a runnable to label all nodes after the cache starts
×
NEW
89
        if reconciler.shouldLabelNodes {
×
NEW
90
                if err := mgr.Add(&startupNodeLabeler{reconciler: reconciler}); err != nil {
×
NEW
91
                        return fmt.Errorf("failed to add startup node labeler: %w", err)
×
NEW
92
                }
×
93
        }
94

NEW
95
        return add(mgr, reconciler)
×
96
}
97

98
// newReconciler returns a new reconcile.Reconciler
NEW
99
func newReconciler(mgr manager.Manager, nodeEvents chan<- event.GenericEvent) *ReconcileNodeCounter {
×
NEW
100
        clusterInfo := hcoutil.GetClusterInfo()
×
NEW
101

×
NEW
102
        // Evaluate once at initialization whether we should label nodes for HyperShift
×
NEW
103
        shouldLabelNodes := clusterInfo.IsOpenshift() && clusterInfo.IsHyperShiftManaged()
×
NEW
104

×
NEW
105
        log.Info("Initializing nodes controller",
×
NEW
106
                "isOpenshift", clusterInfo.IsOpenshift(),
×
NEW
107
                "isHyperShiftManaged", clusterInfo.IsHyperShiftManaged(),
×
NEW
108
                "shouldLabelNodes", shouldLabelNodes,
×
NEW
109
        )
×
NEW
UNCOV
110

×
UNCOV
111
        r := &ReconcileNodeCounter{
×
NEW
UNCOV
112
                Client:           mgr.GetClient(),
×
NEW
113
                nodeEvents:       nodeEvents,
×
NEW
114
                shouldLabelNodes: shouldLabelNodes,
×
NEW
115
        }
×
NEW
116

×
NEW
117
        if shouldLabelNodes {
×
NEW
118
                r.HandleHyperShiftNodeLabeling = HandleHyperShiftNodeLabeling
×
NEW
119
        } else {
×
NEW
120
                r.HandleHyperShiftNodeLabeling = func(_ context.Context, _ client.Client, _ string, _ logr.Logger) error {
×
NEW
121
                        return nil
×
NEW
122
                }
×
123
        }
124

125
        return r
×
126
}
127

128
// add adds a new Controller to mgr with r as the reconcile.Reconciler
129
func add(mgr manager.Manager, r reconcile.Reconciler) error {
×
130
        // Create a new controller
×
131
        c, err := controller.New("nodes-controller", mgr, controller.Options{Reconciler: r})
×
132
        if err != nil {
×
133
                return err
×
134
        }
×
135

136
        // Watch for changes to the cluster's nodes
137
        err = c.Watch(
×
UNCOV
138
                source.Kind[*corev1.Node](
×
139
                        mgr.GetCache(), &corev1.Node{},
×
140
                        &operatorhandler.InstrumentedEnqueueRequestForObject[*corev1.Node]{},
×
141
                        nodeCountChangePredicate{},
×
142
                ))
×
143
        if err != nil {
×
144
                return err
×
145
        }
×
146

147
        return c.Watch(
×
148
                source.Kind[*hcov1beta1.HyperConverged](
×
149
                        mgr.GetCache(), &hcov1beta1.HyperConverged{},
×
150
                        &handler.TypedEnqueueRequestForObject[*hcov1beta1.HyperConverged]{},
×
UNCOV
151
                        hyperconvergedPredicate{},
×
UNCOV
152
                ))
×
153
}
154

155
// ReconcileNodeCounter reconciles the nodes count
156
type ReconcileNodeCounter struct {
157
        // This client, initialized using mgr.Client() above, is a split client
158
        // that reads objects from the cache and writes to the apiserver
159
        client.Client
160
        HyperConvergedQueue          workqueue.TypedRateLimitingInterface[reconcile.Request]
161
        nodeEvents                   chan<- event.GenericEvent
162
        shouldLabelNodes             bool // Cached value for HyperShift node labeling decision
163
        HandleHyperShiftNodeLabeling func(ctx context.Context, cli client.Client, nodeName string, logger logr.Logger) error
164
}
165

166
// Reconcile updates the nodes count on ClusterInfo singleton
167
func (r *ReconcileNodeCounter) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
1✔
168
        logger, err := logr.FromContext(ctx)
1✔
169
        if err != nil {
2✔
170
                logger = log
1✔
171
        }
1✔
172
        if req == hcoReq {
2✔
173
                // This is a request triggered by a change in the HyperConverged CR
1✔
174
                logger.Info("Triggered by a HyperConverged CR change")
1✔
175
        } else {
2✔
176
                logger.Info("Triggered by a node change", "node name", req.Name)
1✔
177
        }
1✔
178

179
        logger.Info("Reading the latest HyperConverged CR")
1✔
180
        hc, err := r.readHyperConverged(ctx)
1✔
181
        if err != nil {
1✔
UNCOV
182
                return reconcile.Result{}, fmt.Errorf("failed to read the HyperConverged CR; %v", err)
×
UNCOV
183
        }
×
184

185
        nodeInfoChanged, err := nodeinfo.HandleNodeChanges(ctx, r, hc, logger)
1✔
186
        if err != nil {
2✔
187
                return reconcile.Result{}, err
1✔
188
        }
1✔
189

190
        // Handle HyperShift node labeling for hosted control plane clusters
191
        // Only process if this is a node event (not HCO event)
192
        if req != hcoReq {
2✔
193
                if err := r.HandleHyperShiftNodeLabeling(ctx, r.Client, req.Name, logger); err != nil {
1✔
NEW
UNCOV
194
                        logger.Error(err, "Failed to handle HyperShift node labeling")
×
NEW
UNCOV
195
                        return reconcile.Result{}, err
×
NEW
UNCOV
196
                }
×
197
        }
198

199
        if hc == nil || !hc.DeletionTimestamp.IsZero() {
2✔
200
                return reconcile.Result{}, nil
1✔
201
        }
1✔
202

203
        if nodeInfoChanged {
2✔
204
                r.nodeEvents <- event.GenericEvent{}
1✔
205
        }
1✔
206

207
        return reconcile.Result{}, nil
1✔
208
}
209

210
func (r *ReconcileNodeCounter) readHyperConverged(ctx context.Context) (*hcov1beta1.HyperConverged, error) {
1✔
211
        hc := &hcov1beta1.HyperConverged{}
1✔
212
        hcoKey := k8stypes.NamespacedName{
1✔
213
                Name:      hcoutil.HyperConvergedName,
1✔
214
                Namespace: hcoutil.GetOperatorNamespaceFromEnv(),
1✔
215
        }
1✔
216

1✔
217
        err := r.Get(ctx, hcoKey, hc)
1✔
218
        if err != nil {
2✔
219
                if errors.IsNotFound(err) {
2✔
220
                        return nil, nil
1✔
221
                }
1✔
UNCOV
222
                return nil, err
×
223
        }
224

225
        return hc, nil
1✔
226
}
227

228
// labelAllNodesAtStartup labels all worker nodes at controller startup for HyperShift clusters
229
func (r *ReconcileNodeCounter) labelAllNodesAtStartup(ctx context.Context) error {
1✔
230
        log.Info("Labeling all worker nodes at startup for HyperShift")
1✔
231

1✔
232
        // Get all nodes
1✔
233
        nodesList := &corev1.NodeList{}
1✔
234
        if err := r.List(ctx, nodesList); err != nil {
1✔
NEW
UNCOV
235
                return fmt.Errorf("failed to list nodes for HyperShift labeling at startup: %w", err)
×
NEW
UNCOV
236
        }
×
237

238
        for i := range nodesList.Items {
2✔
239
                node := &nodesList.Items[i]
1✔
240
                if err := labelNode(ctx, r.Client, node, log); err != nil {
1✔
NEW
UNCOV
241
                        log.Error(err, "Failed to label node at startup", "node", node.Name)
×
NEW
242
                        // Continue with other nodes even if one fails
×
NEW
243
                }
×
244
        }
245

246
        log.Info("Completed labeling nodes at startup", "totalNodes", len(nodesList.Items))
1✔
247
        return nil
1✔
248
}
249

250
// HandleHyperShiftNodeLabeling manages the control-plane label on a specific worker node for HyperShift managed clusters
251
func HandleHyperShiftNodeLabeling(ctx context.Context, cli client.Client, nodeName string, logger logr.Logger) error {
1✔
252
        // Get the specific node
1✔
253
        node := &corev1.Node{}
1✔
254
        if err := cli.Get(ctx, client.ObjectKey{Name: nodeName}, node); err != nil {
2✔
255
                if errors.IsNotFound(err) {
2✔
256
                        // Node was deleted, nothing to do
1✔
257
                        logger.V(1).Info("Node not found, skipping HyperShift labeling", "node", nodeName)
1✔
258
                        return nil
1✔
259
                }
1✔
NEW
260
                return fmt.Errorf("failed to get node %s for HyperShift labeling: %w", nodeName, err)
×
261
        }
262

263
        return labelNode(ctx, cli, node, logger)
1✔
264
}
265

266
// labelNode applies the HyperShift label on a single node
267
func labelNode(ctx context.Context, cli client.Client, node *corev1.Node, logger logr.Logger) error {
1✔
268
        needsUpdate := false
1✔
269
        updatedNode := node.DeepCopy()
1✔
270

1✔
271
        // Add label to worker nodes that don't have it
1✔
272
        if isWorkerNode(node) {
2✔
273
                currentValue, hasLabel := updatedNode.Labels[labelNodeRoleControlPlane]
1✔
274
                if !hasLabel || currentValue != hypershiftLabelValue {
2✔
275
                        if updatedNode.Labels == nil {
1✔
NEW
276
                                updatedNode.Labels = make(map[string]string)
×
NEW
277
                        }
×
278
                        updatedNode.Labels[labelNodeRoleControlPlane] = hypershiftLabelValue
1✔
279
                        needsUpdate = true
1✔
280
                        logger.Info("Adding control-plane label to worker node",
1✔
281
                                "node", node.Name,
1✔
282
                                "labelValue", hypershiftLabelValue,
1✔
283
                        )
1✔
284
                }
285
        }
286

287
        if needsUpdate {
2✔
288
                if err := cli.Update(ctx, updatedNode); err != nil {
1✔
NEW
289
                        return fmt.Errorf("failed to update node %s: %w", node.Name, err)
×
NEW
290
                }
×
291
                logger.Info("Successfully updated node labels",
1✔
292
                        "node", node.Name,
1✔
293
                )
1✔
294
        }
295

296
        return nil
1✔
297
}
298

299
// isWorkerNode checks if a node has the worker role label
300
func isWorkerNode(node *corev1.Node) bool {
1✔
301
        // A node is a worker if it has the worker label and doesn't have control-plane label
1✔
302
        _, hasWorkerLabel := node.Labels[labelNodeRoleWorker]
1✔
303
        _, hasControlPlaneLabel := node.Labels[labelNodeRoleControlPlane]
1✔
304

1✔
305
        return hasWorkerLabel && !hasControlPlaneLabel
1✔
306
}
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc