• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

heathcliff26 / kube-upgrade / 19362472155

14 Nov 2025 10:59AM UTC coverage: 61.734% (-3.9%) from 65.627%
19362472155

Pull #176

github

web-flow
Merge 470120908 into 4aba69003
Pull Request #176: upgrade-controller: Use controller-runtime client with cache

18 of 36 new or added lines in 1 file covered. (50.0%)

53 existing lines in 3 files now uncovered.

997 of 1615 relevant lines covered (61.73%)

10.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

55.15
/pkg/upgrade-controller/controller/controller.go
1
package controller
2

3
import (
4
        "context"
5
        "fmt"
6
        "time"
7

8
        "github.com/go-logr/logr"
9
        api "github.com/heathcliff26/kube-upgrade/pkg/apis/kubeupgrade/v1alpha3"
10
        "github.com/heathcliff26/kube-upgrade/pkg/constants"
11
        "golang.org/x/mod/semver"
12
        appv1 "k8s.io/api/apps/v1"
13
        corev1 "k8s.io/api/core/v1"
14
        "k8s.io/apimachinery/pkg/runtime"
15
        clientgoscheme "k8s.io/client-go/kubernetes/scheme"
16
        "k8s.io/client-go/rest"
17
        "k8s.io/klog/v2"
18
        ctrl "sigs.k8s.io/controller-runtime"
19
        "sigs.k8s.io/controller-runtime/pkg/cache"
20
        "sigs.k8s.io/controller-runtime/pkg/client"
21
        "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
22
        "sigs.k8s.io/controller-runtime/pkg/healthz"
23
        "sigs.k8s.io/controller-runtime/pkg/manager"
24
        "sigs.k8s.io/controller-runtime/pkg/manager/signals"
25
)
26

27
const (
28
        defaultUpgradedImage = "ghcr.io/heathcliff26/kube-upgraded"
29
        upgradedImageEnv     = "UPGRADED_IMAGE"
30
        upgradedTagEnv       = "UPGRADED_TAG"
31
)
32

33
func init() {
2✔
34
        ctrl.SetLogger(klog.NewKlogr())
2✔
35
}
2✔
36

37
type controller struct {
38
        client.Client
39
        manager       manager.Manager
40
        namespace     string
41
        upgradedImage string
42
}
43

44
// Run make generate when changing these comments
45
// +kubebuilder:rbac:groups=kubeupgrade.heathcliff.eu,resources=kubeupgradeplans,verbs=get;list;watch;create;update;patch;delete
46
// +kubebuilder:rbac:groups=kubeupgrade.heathcliff.eu,resources=kubeupgradeplans/status,verbs=get;update;patch
47
// +kubebuilder:rbac:groups="",resources=nodes,verbs=list;update
48

49
func NewController(name string) (*controller, error) {
1✔
50
        config, err := rest.InClusterConfig()
1✔
51
        if err != nil {
2✔
52
                return nil, err
1✔
53
        }
1✔
54

NEW
55
        ns, err := GetNamespace()
×
56
        if err != nil {
×
57
                return nil, err
×
58
        }
×
59

NEW
60
        scheme := runtime.NewScheme()
×
NEW
61
        err = api.AddToScheme(scheme)
×
NEW
62
        if err != nil {
×
NEW
63
                return nil, err
×
NEW
64
        }
×
NEW
65
        err = clientgoscheme.AddToScheme(scheme)
×
66
        if err != nil {
×
67
                return nil, err
×
68
        }
×
69

70
        mgr, err := ctrl.NewManager(config, manager.Options{
×
NEW
71
                Scheme:                        scheme,
×
72
                LeaderElection:                true,
×
73
                LeaderElectionNamespace:       ns,
×
74
                LeaderElectionID:              name,
×
75
                LeaderElectionReleaseOnCancel: true,
×
76
                LeaseDuration:                 Pointer(time.Minute),
×
77
                RenewDeadline:                 Pointer(10 * time.Second),
×
78
                RetryPeriod:                   Pointer(5 * time.Second),
×
79
                HealthProbeBindAddress:        ":9090",
×
NEW
80
                Cache: cache.Options{
×
NEW
81
                        DefaultNamespaces: map[string]cache.Config{ns: {}},
×
NEW
82
                },
×
83
        })
×
84
        if err != nil {
×
85
                return nil, err
×
86
        }
×
87
        err = mgr.AddHealthzCheck("healthz", healthz.Ping)
×
88
        if err != nil {
×
89
                return nil, err
×
90
        }
×
91
        err = mgr.AddReadyzCheck("readyz", healthz.Ping)
×
92
        if err != nil {
×
93
                return nil, err
×
94
        }
×
95

96
        return &controller{
×
97
                Client:        mgr.GetClient(),
×
98
                manager:       mgr,
×
99
                namespace:     ns,
×
100
                upgradedImage: GetUpgradedImage(),
×
101
        }, nil
×
102
}
103

104
func (c *controller) Run() error {
×
NEW
105
        err := ctrl.NewControllerManagedBy(c.manager).
×
NEW
106
                For(&api.KubeUpgradePlan{}).
×
NEW
107
                Owns(&appv1.DaemonSet{}).
×
NEW
108
                Owns(&corev1.ConfigMap{}).
×
NEW
109
                Complete(c)
×
110
        if err != nil {
×
111
                return err
×
112
        }
×
113

114
        err = ctrl.NewWebhookManagedBy(c.manager).
×
115
                For(&api.KubeUpgradePlan{}).
×
116
                WithDefaulter(&planMutatingHook{}).
×
117
                WithValidator(&planValidatingHook{}).
×
118
                Complete()
×
119
        if err != nil {
×
120
                return err
×
121
        }
×
122

123
        return c.manager.Start(signals.SetupSignalHandler())
×
124
}
125

126
func (c *controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
×
127
        logger := klog.LoggerWithValues(klog.NewKlogr(), "plan", req.Name)
×
128

×
129
        var plan api.KubeUpgradePlan
×
130
        err := c.Get(ctx, req.NamespacedName, &plan)
×
131
        if err != nil {
×
132
                logger.Error(err, "Failed to get Plan")
×
133
                return ctrl.Result{}, err
×
134
        }
×
135

136
        err = c.reconcile(ctx, &plan, logger)
×
137
        if err != nil {
×
138
                return ctrl.Result{}, err
×
139
        }
×
140

141
        err = c.Status().Update(ctx, &plan)
×
142
        if err != nil {
×
143
                logger.Error(err, "Failed to update plan status")
×
144
                return ctrl.Result{}, err
×
145
        }
×
146

147
        return ctrl.Result{
×
148
                Requeue:      plan.Status.Summary != api.PlanStatusComplete,
×
149
                RequeueAfter: time.Minute,
×
150
        }, nil
×
151
}
152

153
func (c *controller) reconcile(ctx context.Context, plan *api.KubeUpgradePlan, logger logr.Logger) error {
18✔
154
        if plan.Status.Groups == nil {
28✔
155
                plan.Status.Groups = make(map[string]string, len(plan.Spec.Groups))
10✔
156
        }
10✔
157

158
        if controllerutil.AddFinalizer(plan, constants.Finalizer) {
35✔
159
                err := c.Update(ctx, plan)
17✔
160
                if err != nil {
17✔
161
                        return fmt.Errorf("failed to add finalizer to plan %s: %v", plan.Name, err)
×
162
                }
×
163
        }
164

165
        cmList := &corev1.ConfigMapList{}
18✔
166
        err := c.List(ctx, cmList, client.InNamespace(c.namespace), client.MatchingLabels{
18✔
167
                constants.LabelPlanName: plan.Name,
18✔
168
        })
18✔
169
        if err != nil {
18✔
170
                logger.WithValues("plan", plan.Name).Error(err, "Failed to fetch upgraded ConfigMaps")
×
171
                return err
×
172
        }
×
173

174
        dsList := &appv1.DaemonSetList{}
18✔
175
        err = c.List(ctx, dsList, client.InNamespace(c.namespace), client.MatchingLabels{
18✔
176
                constants.LabelPlanName: plan.Name,
18✔
177
        })
18✔
178
        if err != nil {
18✔
179
                logger.WithValues("plan", plan.Name).Error(err, "Failed to fetch upgraded DaemonSets")
×
180
                return err
×
181
        }
×
182

183
        if !plan.DeletionTimestamp.IsZero() {
19✔
184
                logger.WithValues("plan", plan.Name).Info("Plan is being deleted, cleaning up resources")
1✔
185
                for _, daemon := range dsList.Items {
1✔
NEW
186
                        err = c.Delete(ctx, &daemon)
×
187
                        if err != nil {
×
188
                                return fmt.Errorf("failed to delete DaemonSet %s: %v", daemon.Name, err)
×
189
                        }
×
UNCOV
190
                        logger.WithValues("name", daemon.Name).Info("Deleted DaemonSet")
×
191
                }
192
                for _, cm := range cmList.Items {
1✔
NEW
193
                        err := c.Delete(ctx, &cm)
×
UNCOV
194
                        if err != nil {
×
195
                                return fmt.Errorf("failed to delete ConfigMap %s: %v", cm.Name, err)
×
196
                        }
×
UNCOV
197
                        logger.WithValues("name", cm.Name).Info("Deleted ConfigMap")
×
198
                }
199
                controllerutil.RemoveFinalizer(plan, constants.Finalizer)
1✔
200
                err = c.Update(ctx, plan)
1✔
201
                if err != nil {
1✔
202
                        return fmt.Errorf("failed to remove finalizer from plan %s: %v", plan.Name, err)
×
203
                }
×
204
                logger.WithValues("plan", plan.Name).Info("Finished cleanup of resources")
1✔
205
                return nil
1✔
206
        }
207

208
        daemons := make(map[string]appv1.DaemonSet, len(plan.Spec.Groups))
17✔
209
        for _, daemon := range dsList.Items {
25✔
210
                group := daemon.Labels[constants.LabelNodeGroup]
8✔
211
                if _, ok := plan.Spec.Groups[group]; ok {
14✔
212
                        daemons[group] = daemon
6✔
213
                } else {
8✔
214
                        err = c.Delete(ctx, &daemon)
2✔
215
                        if err != nil {
2✔
216
                                return fmt.Errorf("failed to delete DaemonSet %s: %v", daemon.Name, err)
×
217
                        }
×
218
                        logger.WithValues("name", daemon.Name).Info("Deleted obsolete DaemonSet")
2✔
219
                }
220
        }
221

222
        cms := make(map[string]corev1.ConfigMap, len(plan.Spec.Groups))
17✔
223
        for _, cm := range cmList.Items {
25✔
224
                group := cm.Labels[constants.LabelNodeGroup]
8✔
225
                if _, ok := plan.Spec.Groups[group]; ok {
14✔
226
                        cms[group] = cm
6✔
227
                } else {
8✔
228
                        err = c.Delete(ctx, &cm)
2✔
229
                        if err != nil {
2✔
230
                                return fmt.Errorf("failed to delete ConfigMap %s: %v", cm.Name, err)
×
231
                        }
×
232
                        logger.WithValues("name", cm.Name).Info("Deleted obsolete ConfigMap")
2✔
233
                }
234
        }
235

236
        nodesToUpdate := make(map[string][]corev1.Node, len(plan.Spec.Groups))
17✔
237
        newGroupStatus := make(map[string]string, len(plan.Spec.Groups))
17✔
238

17✔
239
        for name, cfg := range plan.Spec.Groups {
54✔
240
                upgradedCfg := combineConfig(plan.Spec.Upgraded, plan.Spec.Groups[name].Upgraded)
37✔
241

37✔
242
                cm, ok := cms[name]
37✔
243
                if !ok {
68✔
244
                        cm = c.NewEmptyUpgradedConfigMap(plan.Name, name)
31✔
245
                }
31✔
246
                err = c.AttachUpgradedConfigMapData(&cm, upgradedCfg)
37✔
247
                if err != nil {
37✔
248
                        return fmt.Errorf("failed to attach data to ConfigMap %s: %v", cm.Name, err)
×
249
                }
×
250
                if ok {
43✔
251
                        err = c.Update(ctx, &cm)
6✔
252
                } else {
37✔
253
                        logger.WithValues("group", name, "config", cm.Name).Info("Creating upgraded ConfigMap for group")
31✔
254
                        err = c.Create(ctx, &cm)
31✔
255
                }
31✔
256
                if err != nil {
37✔
257
                        return fmt.Errorf("failed to create/update ConfigMap %s: %v", cm.Name, err)
×
258
                }
×
259

260
                daemon, ok := daemons[name]
37✔
261
                if !ok {
68✔
262
                        daemon = c.NewEmptyUpgradedDaemonSet(plan.Name, name)
31✔
263
                }
31✔
264
                daemon.Spec = c.NewUpgradedDaemonSetSpec(plan.Name, name)
37✔
265
                daemon.Spec.Template.Spec.NodeSelector = cfg.Labels
37✔
266
                daemon.Spec.Template.Spec.Tolerations = cfg.Tolerations
37✔
267
                if ok {
43✔
268
                        err = c.Update(ctx, &daemon)
6✔
269
                } else {
37✔
270
                        logger.WithValues("group", name, "daemon", daemon.Name).Info("Creating upgraded DaemonSet for group")
31✔
271
                        err = c.Create(ctx, &daemon)
31✔
272
                }
31✔
273
                if err != nil {
37✔
274
                        return fmt.Errorf("failed to create/update DaemonSet %s: %v", daemon.Name, err)
×
275
                }
×
276

277
                nodeList := &corev1.NodeList{}
37✔
278
                err = c.List(ctx, nodeList, client.MatchingLabels(cfg.Labels))
37✔
279
                if err != nil {
37✔
280
                        logger.WithValues("group", name).Error(err, "Failed to get nodes for group")
×
281
                        return err
×
282
                }
×
283

284
                status, update, nodes, err := c.reconcileNodes(plan.Spec.KubernetesVersion, plan.Spec.AllowDowngrade, nodeList.Items)
37✔
285
                if err != nil {
37✔
286
                        logger.WithValues("group", name).Error(err, "Failed to reconcile nodes for group")
×
287
                        return err
×
288
                }
×
289

290
                newGroupStatus[name] = status
37✔
291

37✔
292
                if update {
60✔
293
                        nodesToUpdate[name] = nodes
23✔
294
                } else if plan.Status.Groups[name] != newGroupStatus[name] {
47✔
295
                        logger.WithValues("group", name, "status", newGroupStatus[name]).Info("Group changed status")
10✔
296
                }
10✔
297
        }
298

299
        for name, nodes := range nodesToUpdate {
40✔
300
                if groupWaitForDependency(plan.Spec.Groups[name].DependsOn, newGroupStatus) {
29✔
301
                        logger.WithValues("group", name).Info("Group is waiting on dependencies")
6✔
302
                        newGroupStatus[name] = api.PlanStatusWaiting
6✔
303
                        continue
6✔
304
                } else if plan.Status.Groups[name] != newGroupStatus[name] {
32✔
305
                        logger.WithValues("group", name, "status", newGroupStatus[name]).Info("Group changed status")
15✔
306
                }
15✔
307

308
                for _, node := range nodes {
34✔
309
                        err = c.Update(ctx, &node)
17✔
310
                        if err != nil {
17✔
311
                                return fmt.Errorf("failed to update node %s: %v", node.GetName(), err)
×
312
                        }
×
313
                }
314
        }
315

316
        plan.Status.Groups = newGroupStatus
17✔
317
        plan.Status.Summary = createStatusSummary(plan.Status.Groups)
17✔
318

17✔
319
        return nil
17✔
320
}
321

322
func (c *controller) reconcileNodes(kubeVersion string, downgrade bool, nodes []corev1.Node) (string, bool, []corev1.Node, error) {
39✔
323
        if len(nodes) == 0 {
43✔
324
                return api.PlanStatusUnknown, false, nil, nil
4✔
325
        }
4✔
326

327
        completed := 0
35✔
328
        needUpdate := false
35✔
329
        errorNodes := make([]string, 0)
35✔
330

35✔
331
        for i := range nodes {
70✔
332
                if nodes[i].Annotations == nil {
55✔
333
                        nodes[i].Annotations = make(map[string]string)
20✔
334
                }
20✔
335

336
                // Step to cleanup after migration to v0.6.0
337
                // TODO: Remove in v0.7.0
338
                if deleteConfigAnnotations(nodes[i].Annotations) {
37✔
339
                        needUpdate = true
2✔
340
                }
2✔
341

342
                if !downgrade && semver.Compare(kubeVersion, nodes[i].Status.NodeInfo.KubeletVersion) < 0 {
36✔
343
                        return api.PlanStatusError, false, nil, fmt.Errorf("node %s version %s is newer than %s, but downgrade is disabled", nodes[i].GetName(), nodes[i].Status.NodeInfo.KubeletVersion, kubeVersion)
1✔
344
                }
1✔
345

346
                if nodes[i].Annotations[constants.NodeKubernetesVersion] == kubeVersion {
46✔
347
                        switch nodes[i].Annotations[constants.NodeUpgradeStatus] {
12✔
348
                        case constants.NodeUpgradeStatusCompleted:
11✔
349
                                completed++
11✔
350
                        case constants.NodeUpgradeStatusError:
1✔
351
                                errorNodes = append(errorNodes, nodes[i].GetName())
1✔
352
                        }
353
                        continue
12✔
354
                }
355

356
                nodes[i].Annotations[constants.NodeKubernetesVersion] = kubeVersion
22✔
357
                nodes[i].Annotations[constants.NodeUpgradeStatus] = constants.NodeUpgradeStatusPending
22✔
358

22✔
359
                needUpdate = true
22✔
360
        }
361

362
        var status string
34✔
363
        if len(errorNodes) > 0 {
35✔
364
                status = fmt.Sprintf("%s: The nodes %v are reporting errors", api.PlanStatusError, errorNodes)
1✔
365
        } else if len(nodes) == completed {
45✔
366
                status = api.PlanStatusComplete
11✔
367
        } else {
33✔
368
                status = fmt.Sprintf("%s: %d/%d nodes upgraded", api.PlanStatusProgressing, completed, len(nodes))
22✔
369
        }
22✔
370
        return status, needUpdate, nodes, nil
34✔
371
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc