• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

heathcliff26 / kube-upgrade / 19367666174

14 Nov 2025 02:30PM UTC coverage: 61.666% (-0.07%) from 61.734%
19367666174

push

github

heathcliff26
upgrade-controller: Only update resources when changes have been made

Move the reconcile logic out of the main Reconcile function.
Only call update if changes have been made.

Signed-off-by: Heathcliff <heathcliff@heathcliff.eu>

103 of 115 new or added lines in 2 files covered. (89.57%)

2 existing lines in 1 file now uncovered.

1007 of 1633 relevant lines covered (61.67%)

10.5 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

51.42
/pkg/upgrade-controller/controller/controller.go
1
package controller
2

3
import (
4
        "context"
5
        "fmt"
6
        "time"
7

8
        "github.com/go-logr/logr"
9
        api "github.com/heathcliff26/kube-upgrade/pkg/apis/kubeupgrade/v1alpha3"
10
        "github.com/heathcliff26/kube-upgrade/pkg/constants"
11
        "golang.org/x/mod/semver"
12
        appv1 "k8s.io/api/apps/v1"
13
        corev1 "k8s.io/api/core/v1"
14
        "k8s.io/apimachinery/pkg/runtime"
15
        clientgoscheme "k8s.io/client-go/kubernetes/scheme"
16
        "k8s.io/client-go/rest"
17
        "k8s.io/klog/v2"
18
        ctrl "sigs.k8s.io/controller-runtime"
19
        "sigs.k8s.io/controller-runtime/pkg/cache"
20
        "sigs.k8s.io/controller-runtime/pkg/client"
21
        "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
22
        "sigs.k8s.io/controller-runtime/pkg/healthz"
23
        "sigs.k8s.io/controller-runtime/pkg/manager"
24
        "sigs.k8s.io/controller-runtime/pkg/manager/signals"
25
)
26

27
const (
28
        defaultUpgradedImage = "ghcr.io/heathcliff26/kube-upgraded"
29
        upgradedImageEnv     = "UPGRADED_IMAGE"
30
        upgradedTagEnv       = "UPGRADED_TAG"
31
)
32

33
func init() {
2✔
34
        ctrl.SetLogger(klog.NewKlogr())
2✔
35
}
2✔
36

37
type controller struct {
38
        client.Client
39
        manager       manager.Manager
40
        namespace     string
41
        upgradedImage string
42
}
43

44
// Run make generate when changing these comments
45
// +kubebuilder:rbac:groups=kubeupgrade.heathcliff.eu,resources=kubeupgradeplans,verbs=get;list;watch;create;update;patch;delete
46
// +kubebuilder:rbac:groups=kubeupgrade.heathcliff.eu,resources=kubeupgradeplans/status,verbs=get;update;patch
47
// +kubebuilder:rbac:groups="",resources=nodes,verbs=list;update
48
// +kubebuilder:rbac:groups="",namespace=kube-upgrade,resources=events,verbs=create;patch
49
// +kubebuilder:rbac:groups="coordination.k8s.io",namespace=kube-upgrade,resources=leases,verbs=create;get;update
50
// +kubebuilder:rbac:groups="apps",namespace=kube-upgrade,resources=daemonsets,verbs=list;watch;create;update;delete
51
// +kubebuilder:rbac:groups="",namespace=kube-upgrade,resources=configmaps,verbs=list;watch;create;update;delete
52

53
func NewController(name string) (*controller, error) {
1✔
54
        config, err := rest.InClusterConfig()
1✔
55
        if err != nil {
2✔
56
                return nil, err
1✔
57
        }
1✔
58

59
        ns, err := GetNamespace()
×
60
        if err != nil {
×
61
                return nil, err
×
62
        }
×
63

64
        scheme := runtime.NewScheme()
×
65
        err = api.AddToScheme(scheme)
×
66
        if err != nil {
×
67
                return nil, err
×
68
        }
×
69
        err = clientgoscheme.AddToScheme(scheme)
×
70
        if err != nil {
×
71
                return nil, err
×
72
        }
×
73

74
        mgr, err := ctrl.NewManager(config, manager.Options{
×
75
                Scheme:                        scheme,
×
76
                LeaderElection:                true,
×
77
                LeaderElectionNamespace:       ns,
×
78
                LeaderElectionID:              name,
×
79
                LeaderElectionReleaseOnCancel: true,
×
80
                LeaseDuration:                 Pointer(time.Minute),
×
81
                RenewDeadline:                 Pointer(10 * time.Second),
×
82
                RetryPeriod:                   Pointer(5 * time.Second),
×
83
                HealthProbeBindAddress:        ":9090",
×
84
                Cache: cache.Options{
×
85
                        DefaultNamespaces: map[string]cache.Config{ns: {}},
×
86
                },
×
87
        })
×
88
        if err != nil {
×
89
                return nil, err
×
90
        }
×
91
        err = mgr.AddHealthzCheck("healthz", healthz.Ping)
×
92
        if err != nil {
×
93
                return nil, err
×
94
        }
×
95
        err = mgr.AddReadyzCheck("readyz", healthz.Ping)
×
96
        if err != nil {
×
97
                return nil, err
×
98
        }
×
99

100
        return &controller{
×
101
                Client:        mgr.GetClient(),
×
102
                manager:       mgr,
×
103
                namespace:     ns,
×
104
                upgradedImage: GetUpgradedImage(),
×
105
        }, nil
×
106
}
107

108
func (c *controller) Run() error {
×
109
        err := ctrl.NewControllerManagedBy(c.manager).
×
110
                For(&api.KubeUpgradePlan{}).
×
111
                Owns(&appv1.DaemonSet{}).
×
112
                Owns(&corev1.ConfigMap{}).
×
113
                Complete(c)
×
114
        if err != nil {
×
115
                return err
×
116
        }
×
117

118
        err = ctrl.NewWebhookManagedBy(c.manager).
×
119
                For(&api.KubeUpgradePlan{}).
×
120
                WithDefaulter(&planMutatingHook{}).
×
121
                WithValidator(&planValidatingHook{}).
×
122
                Complete()
×
123
        if err != nil {
×
124
                return err
×
125
        }
×
126

127
        return c.manager.Start(signals.SetupSignalHandler())
×
128
}
129

130
func (c *controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
×
131
        logger := klog.LoggerWithValues(klog.NewKlogr(), "plan", req.Name)
×
132

×
133
        var plan api.KubeUpgradePlan
×
134
        err := c.Get(ctx, req.NamespacedName, &plan)
×
135
        if err != nil {
×
136
                logger.Error(err, "Failed to get Plan")
×
137
                return ctrl.Result{}, err
×
138
        }
×
139

140
        err = c.reconcile(ctx, &plan, logger)
×
141
        if err != nil {
×
142
                return ctrl.Result{}, err
×
143
        }
×
144

145
        err = c.Status().Update(ctx, &plan)
×
146
        if err != nil {
×
147
                logger.Error(err, "Failed to update plan status")
×
148
                return ctrl.Result{}, err
×
149
        }
×
150

151
        return ctrl.Result{
×
152
                Requeue:      plan.Status.Summary != api.PlanStatusComplete,
×
153
                RequeueAfter: time.Minute,
×
154
        }, nil
×
155
}
156

157
func (c *controller) reconcile(ctx context.Context, plan *api.KubeUpgradePlan, logger logr.Logger) error {
18✔
158
        if plan.Status.Groups == nil {
28✔
159
                plan.Status.Groups = make(map[string]string, len(plan.Spec.Groups))
10✔
160
        }
10✔
161

162
        if controllerutil.AddFinalizer(plan, constants.Finalizer) {
35✔
163
                err := c.Update(ctx, plan)
17✔
164
                if err != nil {
17✔
165
                        return fmt.Errorf("failed to add finalizer to plan %s: %v", plan.Name, err)
×
166
                }
×
167
        }
168

169
        cmList := &corev1.ConfigMapList{}
18✔
170
        err := c.List(ctx, cmList, client.InNamespace(c.namespace), client.MatchingLabels{
18✔
171
                constants.LabelPlanName: plan.Name,
18✔
172
        })
18✔
173
        if err != nil {
18✔
174
                logger.WithValues("plan", plan.Name).Error(err, "Failed to fetch upgraded ConfigMaps")
×
175
                return err
×
176
        }
×
177

178
        dsList := &appv1.DaemonSetList{}
18✔
179
        err = c.List(ctx, dsList, client.InNamespace(c.namespace), client.MatchingLabels{
18✔
180
                constants.LabelPlanName: plan.Name,
18✔
181
        })
18✔
182
        if err != nil {
18✔
183
                logger.WithValues("plan", plan.Name).Error(err, "Failed to fetch upgraded DaemonSets")
×
184
                return err
×
185
        }
×
186

187
        if !plan.DeletionTimestamp.IsZero() {
19✔
188
                logger.WithValues("plan", plan.Name).Info("Plan is being deleted, cleaning up resources")
1✔
189
                for _, daemon := range dsList.Items {
1✔
190
                        err = c.Delete(ctx, &daemon)
×
191
                        if err != nil {
×
192
                                return fmt.Errorf("failed to delete DaemonSet %s: %v", daemon.Name, err)
×
193
                        }
×
194
                        logger.WithValues("name", daemon.Name).Info("Deleted DaemonSet")
×
195
                }
196
                for _, cm := range cmList.Items {
1✔
197
                        err := c.Delete(ctx, &cm)
×
198
                        if err != nil {
×
199
                                return fmt.Errorf("failed to delete ConfigMap %s: %v", cm.Name, err)
×
200
                        }
×
201
                        logger.WithValues("name", cm.Name).Info("Deleted ConfigMap")
×
202
                }
203
                controllerutil.RemoveFinalizer(plan, constants.Finalizer)
1✔
204
                err = c.Update(ctx, plan)
1✔
205
                if err != nil {
1✔
206
                        return fmt.Errorf("failed to remove finalizer from plan %s: %v", plan.Name, err)
×
207
                }
×
208
                logger.WithValues("plan", plan.Name).Info("Finished cleanup of resources")
1✔
209
                return nil
1✔
210
        }
211

212
        daemons := make(map[string]*appv1.DaemonSet, len(plan.Spec.Groups))
17✔
213
        for i := range dsList.Items {
25✔
214
                daemon := &dsList.Items[i]
8✔
215
                group := daemon.Labels[constants.LabelNodeGroup]
8✔
216
                if _, ok := plan.Spec.Groups[group]; ok {
14✔
217
                        daemons[group] = daemon
6✔
218
                } else {
8✔
219
                        err = c.Delete(ctx, daemon)
2✔
220
                        if err != nil {
2✔
221
                                return fmt.Errorf("failed to delete DaemonSet %s: %v", daemon.Name, err)
×
222
                        }
×
223
                        logger.WithValues("name", daemon.Name).Info("Deleted obsolete DaemonSet")
2✔
224
                }
225
        }
226

227
        cms := make(map[string]*corev1.ConfigMap, len(plan.Spec.Groups))
17✔
228
        for i := range cmList.Items {
25✔
229
                cm := &cmList.Items[i]
8✔
230
                group := cm.Labels[constants.LabelNodeGroup]
8✔
231
                if _, ok := plan.Spec.Groups[group]; ok {
14✔
232
                        cms[group] = cm
6✔
233
                } else {
8✔
234
                        err = c.Delete(ctx, cm)
2✔
235
                        if err != nil {
2✔
236
                                return fmt.Errorf("failed to delete ConfigMap %s: %v", cm.Name, err)
×
237
                        }
×
238
                        logger.WithValues("name", cm.Name).Info("Deleted obsolete ConfigMap")
2✔
239
                }
240
        }
241

242
        nodesToUpdate := make(map[string][]corev1.Node, len(plan.Spec.Groups))
17✔
243
        newGroupStatus := make(map[string]string, len(plan.Spec.Groups))
17✔
244

17✔
245
        for name, cfg := range plan.Spec.Groups {
54✔
246
                err = c.reconcileUpgradedConfigMap(ctx, plan, logger, cms[name], name)
37✔
247
                if err != nil {
37✔
NEW
248
                        return fmt.Errorf("failed to reconcile ConfigMap for group %s: %v", name, err)
×
249
                }
×
250

251
                err = c.reconcileUpgradedDaemonSet(ctx, plan, logger, daemons[name], name, cfg)
37✔
252
                if err != nil {
37✔
NEW
253
                        return fmt.Errorf("failed to reconcile DaemonSet for group %s: %v", name, err)
×
254
                }
×
255

256
                nodeList := &corev1.NodeList{}
37✔
257
                err = c.List(ctx, nodeList, client.MatchingLabels(cfg.Labels))
37✔
258
                if err != nil {
37✔
259
                        logger.WithValues("group", name).Error(err, "Failed to get nodes for group")
×
260
                        return err
×
261
                }
×
262

263
                status, update, nodes, err := c.reconcileNodes(plan.Spec.KubernetesVersion, plan.Spec.AllowDowngrade, nodeList.Items)
37✔
264
                if err != nil {
37✔
265
                        logger.WithValues("group", name).Error(err, "Failed to reconcile nodes for group")
×
266
                        return err
×
267
                }
×
268

269
                newGroupStatus[name] = status
37✔
270

37✔
271
                if update {
60✔
272
                        nodesToUpdate[name] = nodes
23✔
273
                } else if plan.Status.Groups[name] != newGroupStatus[name] {
47✔
274
                        logger.WithValues("group", name, "status", newGroupStatus[name]).Info("Group changed status")
10✔
275
                }
10✔
276
        }
277

278
        for name, nodes := range nodesToUpdate {
40✔
279
                if groupWaitForDependency(plan.Spec.Groups[name].DependsOn, newGroupStatus) {
29✔
280
                        logger.WithValues("group", name).Info("Group is waiting on dependencies")
6✔
281
                        newGroupStatus[name] = api.PlanStatusWaiting
6✔
282
                        continue
6✔
283
                } else if plan.Status.Groups[name] != newGroupStatus[name] {
32✔
284
                        logger.WithValues("group", name, "status", newGroupStatus[name]).Info("Group changed status")
15✔
285
                }
15✔
286

287
                for _, node := range nodes {
34✔
288
                        err = c.Update(ctx, &node)
17✔
289
                        if err != nil {
17✔
290
                                return fmt.Errorf("failed to update node %s: %v", node.GetName(), err)
×
291
                        }
×
292
                }
293
        }
294

295
        plan.Status.Groups = newGroupStatus
17✔
296
        plan.Status.Summary = createStatusSummary(plan.Status.Groups)
17✔
297

17✔
298
        return nil
17✔
299
}
300

301
func (c *controller) reconcileNodes(kubeVersion string, downgrade bool, nodes []corev1.Node) (string, bool, []corev1.Node, error) {
39✔
302
        if len(nodes) == 0 {
43✔
303
                return api.PlanStatusUnknown, false, nil, nil
4✔
304
        }
4✔
305

306
        completed := 0
35✔
307
        needUpdate := false
35✔
308
        errorNodes := make([]string, 0)
35✔
309

35✔
310
        for i := range nodes {
70✔
311
                if nodes[i].Annotations == nil {
55✔
312
                        nodes[i].Annotations = make(map[string]string)
20✔
313
                }
20✔
314

315
                // Step to cleanup after migration to v0.6.0
316
                // TODO: Remove in v0.7.0
317
                if deleteConfigAnnotations(nodes[i].Annotations) {
37✔
318
                        needUpdate = true
2✔
319
                }
2✔
320

321
                if !downgrade && semver.Compare(kubeVersion, nodes[i].Status.NodeInfo.KubeletVersion) < 0 {
36✔
322
                        return api.PlanStatusError, false, nil, fmt.Errorf("node %s version %s is newer than %s, but downgrade is disabled", nodes[i].GetName(), nodes[i].Status.NodeInfo.KubeletVersion, kubeVersion)
1✔
323
                }
1✔
324

325
                if nodes[i].Annotations[constants.NodeKubernetesVersion] == kubeVersion {
46✔
326
                        switch nodes[i].Annotations[constants.NodeUpgradeStatus] {
12✔
327
                        case constants.NodeUpgradeStatusCompleted:
11✔
328
                                completed++
11✔
329
                        case constants.NodeUpgradeStatusError:
1✔
330
                                errorNodes = append(errorNodes, nodes[i].GetName())
1✔
331
                        }
332
                        continue
12✔
333
                }
334

335
                nodes[i].Annotations[constants.NodeKubernetesVersion] = kubeVersion
22✔
336
                nodes[i].Annotations[constants.NodeUpgradeStatus] = constants.NodeUpgradeStatusPending
22✔
337

22✔
338
                needUpdate = true
22✔
339
        }
340

341
        var status string
34✔
342
        if len(errorNodes) > 0 {
35✔
343
                status = fmt.Sprintf("%s: The nodes %v are reporting errors", api.PlanStatusError, errorNodes)
1✔
344
        } else if len(nodes) == completed {
45✔
345
                status = api.PlanStatusComplete
11✔
346
        } else {
33✔
347
                status = fmt.Sprintf("%s: %d/%d nodes upgraded", api.PlanStatusProgressing, completed, len(nodes))
22✔
348
        }
22✔
349
        return status, needUpdate, nodes, nil
34✔
350
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc