• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

heathcliff26 / kube-upgrade / 19390467233

15 Nov 2025 01:23PM UTC coverage: 72.155% (-0.3%) from 72.49%
19390467233

push

github

heathcliff26
upgrade-controller: Only reconcile DaemonSet when it changed

Use hashes to check if the spec changed.
This should reduce the amount of updates.

Fixes: #186

Signed-off-by: Heathcliff <heathcliff@heathcliff.eu>

32 of 46 new or added lines in 1 file covered. (69.57%)

4 existing lines in 2 files now uncovered.

1078 of 1494 relevant lines covered (72.16%)

11.74 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

51.33
/pkg/upgrade-controller/controller/controller.go
1
package controller
2

3
import (
4
        "context"
5
        "fmt"
6
        "log/slog"
7
        "time"
8

9
        api "github.com/heathcliff26/kube-upgrade/pkg/apis/kubeupgrade/v1alpha3"
10
        "github.com/heathcliff26/kube-upgrade/pkg/constants"
11
        "golang.org/x/mod/semver"
12
        appv1 "k8s.io/api/apps/v1"
13
        corev1 "k8s.io/api/core/v1"
14
        "k8s.io/apimachinery/pkg/runtime"
15
        clientgoscheme "k8s.io/client-go/kubernetes/scheme"
16
        "k8s.io/client-go/rest"
17
        ctrl "sigs.k8s.io/controller-runtime"
18
        "sigs.k8s.io/controller-runtime/pkg/cache"
19
        "sigs.k8s.io/controller-runtime/pkg/client"
20
        "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
21
        "sigs.k8s.io/controller-runtime/pkg/healthz"
22
        "sigs.k8s.io/controller-runtime/pkg/manager"
23
        "sigs.k8s.io/controller-runtime/pkg/manager/signals"
24
)
25

26
const (
27
        defaultUpgradedImage = "ghcr.io/heathcliff26/kube-upgraded"
28
        upgradedImageEnv     = "UPGRADED_IMAGE"
29
        upgradedTagEnv       = "UPGRADED_TAG"
30
)
31

32
type controller struct {
33
        client.Client
34
        manager       manager.Manager
35
        namespace     string
36
        upgradedImage string
37
}
38

39
// Run make generate when changing these comments
40
// +kubebuilder:rbac:groups=kubeupgrade.heathcliff.eu,resources=kubeupgradeplans,verbs=get;list;watch;create;update;patch;delete
41
// +kubebuilder:rbac:groups=kubeupgrade.heathcliff.eu,resources=kubeupgradeplans/status,verbs=get;update;patch
42
// +kubebuilder:rbac:groups="",resources=nodes,verbs=list;watch;update
43
// +kubebuilder:rbac:groups="",namespace=kube-upgrade,resources=events,verbs=create;patch
44
// +kubebuilder:rbac:groups="coordination.k8s.io",namespace=kube-upgrade,resources=leases,verbs=create;get;update
45
// +kubebuilder:rbac:groups="apps",namespace=kube-upgrade,resources=daemonsets,verbs=list;watch;create;update;delete
46
// +kubebuilder:rbac:groups="",namespace=kube-upgrade,resources=configmaps,verbs=list;watch;create;update;delete
47

48
func NewController(name string) (*controller, error) {
1✔
49
        config, err := rest.InClusterConfig()
1✔
50
        if err != nil {
2✔
51
                return nil, err
1✔
52
        }
1✔
53

54
        ns, err := GetNamespace()
×
55
        if err != nil {
×
56
                return nil, err
×
57
        }
×
58

59
        scheme := runtime.NewScheme()
×
60
        err = api.AddToScheme(scheme)
×
61
        if err != nil {
×
62
                return nil, err
×
63
        }
×
64
        err = clientgoscheme.AddToScheme(scheme)
×
65
        if err != nil {
×
66
                return nil, err
×
67
        }
×
68

69
        mgr, err := ctrl.NewManager(config, manager.Options{
×
70
                Scheme:                        scheme,
×
71
                LeaderElection:                true,
×
72
                LeaderElectionNamespace:       ns,
×
73
                LeaderElectionID:              name,
×
74
                LeaderElectionReleaseOnCancel: true,
×
75
                LeaseDuration:                 Pointer(time.Minute),
×
76
                RenewDeadline:                 Pointer(10 * time.Second),
×
77
                RetryPeriod:                   Pointer(5 * time.Second),
×
78
                HealthProbeBindAddress:        ":9090",
×
79
                Cache: cache.Options{
×
80
                        DefaultNamespaces: map[string]cache.Config{ns: {}},
×
81
                },
×
82
        })
×
83
        if err != nil {
×
84
                return nil, err
×
85
        }
×
86
        err = mgr.AddHealthzCheck("healthz", healthz.Ping)
×
87
        if err != nil {
×
88
                return nil, err
×
89
        }
×
90
        err = mgr.AddReadyzCheck("readyz", healthz.Ping)
×
91
        if err != nil {
×
92
                return nil, err
×
93
        }
×
94

95
        return &controller{
×
96
                Client:        mgr.GetClient(),
×
97
                manager:       mgr,
×
98
                namespace:     ns,
×
99
                upgradedImage: GetUpgradedImage(),
×
100
        }, nil
×
101
}
102

103
func (c *controller) Run() error {
×
104
        err := ctrl.NewControllerManagedBy(c.manager).
×
105
                For(&api.KubeUpgradePlan{}).
×
106
                Owns(&appv1.DaemonSet{}).
×
107
                Owns(&corev1.ConfigMap{}).
×
108
                Complete(c)
×
109
        if err != nil {
×
110
                return err
×
111
        }
×
112

113
        err = ctrl.NewWebhookManagedBy(c.manager).
×
114
                For(&api.KubeUpgradePlan{}).
×
115
                WithDefaulter(&planMutatingHook{}).
×
116
                WithValidator(&planValidatingHook{}).
×
117
                Complete()
×
118
        if err != nil {
×
119
                return err
×
120
        }
×
121

122
        return c.manager.Start(signals.SetupSignalHandler())
×
123
}
124

125
func (c *controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
×
126
        logger := slog.With("plan", req.Name)
×
127

×
128
        var plan api.KubeUpgradePlan
×
129
        err := c.Get(ctx, req.NamespacedName, &plan)
×
130
        if err != nil {
×
131
                logger.Error("Failed to get Plan", "err", err)
×
132
                return ctrl.Result{}, err
×
133
        }
×
134

135
        err = c.reconcile(ctx, &plan, logger)
×
136
        if err != nil {
×
137
                return ctrl.Result{}, err
×
138
        }
×
139

140
        err = c.Status().Update(ctx, &plan)
×
141
        if err != nil {
×
142
                logger.Error("Failed to update plan status", "err", err)
×
143
                return ctrl.Result{}, err
×
144
        }
×
145

146
        return ctrl.Result{
×
147
                Requeue:      plan.Status.Summary != api.PlanStatusComplete,
×
148
                RequeueAfter: time.Minute,
×
149
        }, nil
×
150
}
151

152
func (c *controller) reconcile(ctx context.Context, plan *api.KubeUpgradePlan, logger *slog.Logger) error {
16✔
153
        if plan.Status.Groups == nil {
26✔
154
                plan.Status.Groups = make(map[string]string, len(plan.Spec.Groups))
10✔
155
        }
10✔
156

157
        // Migration from v0.6.0: Remove the finalizer as it is not needed
158
        // TODO: Remove in future release
159
        if controllerutil.RemoveFinalizer(plan, constants.Finalizer) {
17✔
160
                logger.Debug("Removing finalizer from plan")
1✔
161
                err := c.Update(ctx, plan)
1✔
162
                if err != nil {
1✔
163
                        return fmt.Errorf("failed to remove finalizer from plan %s: %v", plan.Name, err)
×
164
                }
×
165
        }
166

167
        cmList := &corev1.ConfigMapList{}
16✔
168
        err := c.List(ctx, cmList, client.InNamespace(c.namespace), client.MatchingLabels{
16✔
169
                constants.LabelPlanName: plan.Name,
16✔
170
        })
16✔
171
        if err != nil {
16✔
172
                logger.Error("Failed to fetch upgraded ConfigMaps", "err", err)
×
173
                return err
×
174
        }
×
175

176
        dsList := &appv1.DaemonSetList{}
16✔
177
        err = c.List(ctx, dsList, client.InNamespace(c.namespace), client.MatchingLabels{
16✔
178
                constants.LabelPlanName: plan.Name,
16✔
179
        })
16✔
180
        if err != nil {
16✔
181
                logger.Error("Failed to fetch upgraded DaemonSets", "err", err)
×
182
                return err
×
183
        }
×
184

185
        daemons := make(map[string]*appv1.DaemonSet, len(plan.Spec.Groups))
16✔
186
        for i := range dsList.Items {
24✔
187
                daemon := &dsList.Items[i]
8✔
188
                group := daemon.Labels[constants.LabelNodeGroup]
8✔
189
                if _, ok := plan.Spec.Groups[group]; ok {
14✔
190
                        daemons[group] = daemon
6✔
191
                } else {
8✔
192
                        err = c.Delete(ctx, daemon)
2✔
193
                        if err != nil {
2✔
194
                                return fmt.Errorf("failed to delete DaemonSet %s: %v", daemon.Name, err)
×
195
                        }
×
196
                        logger.Info("Deleted obsolete DaemonSet", "name", daemon.Name)
2✔
197
                }
198
        }
199

200
        cms := make(map[string]*corev1.ConfigMap, len(plan.Spec.Groups))
16✔
201
        for i := range cmList.Items {
24✔
202
                cm := &cmList.Items[i]
8✔
203
                group := cm.Labels[constants.LabelNodeGroup]
8✔
204
                if _, ok := plan.Spec.Groups[group]; ok {
14✔
205
                        cms[group] = cm
6✔
206
                } else {
8✔
207
                        err = c.Delete(ctx, cm)
2✔
208
                        if err != nil {
2✔
209
                                return fmt.Errorf("failed to delete ConfigMap %s: %v", cm.Name, err)
×
210
                        }
×
211
                        logger.Info("Deleted obsolete ConfigMap", "name", cm.Name)
2✔
212
                }
213
        }
214

215
        nodesToUpdate := make(map[string][]corev1.Node, len(plan.Spec.Groups))
16✔
216
        newGroupStatus := make(map[string]string, len(plan.Spec.Groups))
16✔
217

16✔
218
        for name, cfg := range plan.Spec.Groups {
51✔
219
                logger := logger.With("group", name)
35✔
220

35✔
221
                err = c.reconcileUpgradedConfigMap(ctx, plan, logger, cms[name], name)
35✔
222
                if err != nil {
35✔
223
                        return fmt.Errorf("failed to reconcile ConfigMap for group %s: %v", name, err)
×
224
                }
×
225

226
                err = c.reconcileUpgradedDaemonSet(ctx, plan, logger, daemons[name], name, cfg)
35✔
227
                if err != nil {
35✔
228
                        return fmt.Errorf("failed to reconcile DaemonSet for group %s: %v", name, err)
×
229
                }
×
230

231
                nodeList := &corev1.NodeList{}
35✔
232
                err = c.List(ctx, nodeList, client.MatchingLabels(cfg.Labels))
35✔
233
                if err != nil {
35✔
234
                        logger.Error("Failed to get nodes for group", "err", err)
×
235
                        return err
×
236
                }
×
237

238
                status, update, nodes, err := c.reconcileNodes(plan.Spec.KubernetesVersion, plan.Spec.AllowDowngrade, nodeList.Items)
35✔
239
                if err != nil {
35✔
240
                        logger.Error("Failed to reconcile nodes for group", "err", err)
×
241
                        return err
×
242
                }
×
243

244
                newGroupStatus[name] = status
35✔
245

35✔
246
                if update {
60✔
247
                        nodesToUpdate[name] = nodes
25✔
248
                } else if plan.Status.Groups[name] != newGroupStatus[name] {
41✔
249
                        logger.Info("Group changed status", "status", newGroupStatus[name])
6✔
250
                }
6✔
251
        }
252

253
        for name, nodes := range nodesToUpdate {
41✔
254
                logger := logger.With("group", name)
25✔
255

25✔
256
                if groupWaitForDependency(plan.Spec.Groups[name].DependsOn, newGroupStatus) {
31✔
257
                        logger.Info("Group is waiting on dependencies")
6✔
258
                        newGroupStatus[name] = api.PlanStatusWaiting
6✔
259
                        continue
6✔
260
                } else if plan.Status.Groups[name] != newGroupStatus[name] {
38✔
261
                        logger.Info("Group changed status", "status", newGroupStatus[name])
19✔
262
                }
19✔
263

264
                for _, node := range nodes {
38✔
265
                        logger.Debug("Updating node annotations", "node", node.Name)
19✔
266
                        err = c.Update(ctx, &node)
19✔
267
                        if err != nil {
19✔
268
                                return fmt.Errorf("failed to update node %s: %v", node.GetName(), err)
×
269
                        }
×
270
                }
271
        }
272

273
        plan.Status.Groups = newGroupStatus
16✔
274
        plan.Status.Summary = createStatusSummary(plan.Status.Groups)
16✔
275

16✔
276
        return nil
16✔
277
}
278

279
func (c *controller) reconcileNodes(kubeVersion string, downgrade bool, nodes []corev1.Node) (string, bool, []corev1.Node, error) {
37✔
280
        if len(nodes) == 0 {
37✔
UNCOV
281
                return api.PlanStatusUnknown, false, nil, nil
×
UNCOV
282
        }
×
283

284
        completed := 0
37✔
285
        needUpdate := false
37✔
286
        errorNodes := make([]string, 0)
37✔
287

37✔
288
        for i := range nodes {
74✔
289
                if nodes[i].Annotations == nil {
61✔
290
                        nodes[i].Annotations = make(map[string]string)
24✔
291
                }
24✔
292

293
                if !downgrade && semver.Compare(kubeVersion, nodes[i].Status.NodeInfo.KubeletVersion) < 0 {
38✔
294
                        return api.PlanStatusError, false, nil, fmt.Errorf("node %s version %s is newer than %s, but downgrade is disabled", nodes[i].GetName(), nodes[i].Status.NodeInfo.KubeletVersion, kubeVersion)
1✔
295
                }
1✔
296

297
                if nodes[i].Annotations[constants.NodeKubernetesVersion] == kubeVersion {
46✔
298
                        switch nodes[i].Annotations[constants.NodeUpgradeStatus] {
10✔
299
                        case constants.NodeUpgradeStatusCompleted:
9✔
300
                                completed++
9✔
301
                        case constants.NodeUpgradeStatusError:
1✔
302
                                errorNodes = append(errorNodes, nodes[i].GetName())
1✔
303
                        }
304
                        continue
10✔
305
                }
306

307
                nodes[i].Annotations[constants.NodeKubernetesVersion] = kubeVersion
26✔
308
                nodes[i].Annotations[constants.NodeUpgradeStatus] = constants.NodeUpgradeStatusPending
26✔
309

26✔
310
                needUpdate = true
26✔
311
        }
312

313
        var status string
36✔
314
        if len(errorNodes) > 0 {
37✔
315
                status = fmt.Sprintf("%s: The nodes %v are reporting errors", api.PlanStatusError, errorNodes)
1✔
316
        } else if len(nodes) == completed {
45✔
317
                status = api.PlanStatusComplete
9✔
318
        } else {
35✔
319
                status = fmt.Sprintf("%s: %d/%d nodes upgraded", api.PlanStatusProgressing, completed, len(nodes))
26✔
320
        }
26✔
321
        return status, needUpdate, nodes, nil
36✔
322
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc