• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

zalando / postgres-operator / 11632412735

01 Nov 2024 04:06PM UTC coverage: 44.803% (-0.01%) from 44.813%
11632412735

push

github

web-flow
fix switch over candidate retrieving (#2760)

* fix switch over candidate retrieving

Signed-off-by: fahed dorgaa <fahed.dorgaa.ext@corp.ovh.com>

---------

Signed-off-by: fahed dorgaa <fahed.dorgaa.ext@corp.ovh.com>
Co-authored-by: fahed dorgaa <fahed.dorgaa.ext@corp.ovh.com>
Co-authored-by: Felix Kunde <felix-kunde@gmx.de>

12 of 12 new or added lines in 1 file covered. (100.0%)

1 existing line in 1 file now uncovered.

6737 of 15037 relevant lines covered (44.8%)

26.89 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

31.06
/pkg/cluster/pod.go
1
package cluster
2

3
import (
4
        "context"
5
        "fmt"
6
        "sort"
7
        "strconv"
8
        "time"
9

10
        "golang.org/x/exp/slices"
11

12
        appsv1 "k8s.io/api/apps/v1"
13
        v1 "k8s.io/api/core/v1"
14
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15
        "k8s.io/apimachinery/pkg/types"
16

17
        acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
18
        "github.com/zalando/postgres-operator/pkg/spec"
19
        "github.com/zalando/postgres-operator/pkg/util"
20
        "github.com/zalando/postgres-operator/pkg/util/patroni"
21
        "github.com/zalando/postgres-operator/pkg/util/retryutil"
22
)
23

24
func (c *Cluster) listPods() ([]v1.Pod, error) {
61✔
25
        listOptions := metav1.ListOptions{
61✔
26
                LabelSelector: c.labelsSet(false).String(),
61✔
27
        }
61✔
28

61✔
29
        pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions)
61✔
30
        if err != nil {
61✔
31
                return nil, fmt.Errorf("could not get list of pods: %v", err)
×
32
        }
×
33

34
        return pods.Items, nil
61✔
35
}
36

37
func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) {
2✔
38
        listOptions := metav1.ListOptions{
2✔
39
                LabelSelector: c.roleLabelsSet(false, role).String(),
2✔
40
        }
2✔
41

2✔
42
        pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions)
2✔
43
        if err != nil {
2✔
44
                return nil, fmt.Errorf("could not get list of pods: %v", err)
×
45
        }
×
46

47
        if role == Master && len(pods.Items) > 1 {
2✔
48
                return nil, fmt.Errorf("too many masters")
×
49
        }
×
50

51
        return pods.Items, nil
2✔
52
}
53

54
// markRollingUpdateFlagForPod sets the indicator for the rolling update requirement
55
// in the Pod annotation.
56
func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
4✔
57
        // no need to patch pod if annotation is already there
4✔
58
        if c.getRollingUpdateFlagFromPod(pod) {
5✔
59
                return nil
1✔
60
        }
1✔
61

62
        c.logger.Infof("mark rolling update annotation for %s: reason %s", pod.Name, msg)
3✔
63
        flag := make(map[string]string)
3✔
64
        flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true)
3✔
65

3✔
66
        patchData, err := metaAnnotationsPatch(flag)
3✔
67
        if err != nil {
3✔
68
                return fmt.Errorf("could not form patch for pod's rolling update flag: %v", err)
×
69
        }
×
70

71
        err = retryutil.Retry(1*time.Second, 5*time.Second,
3✔
72
                func() (bool, error) {
6✔
73
                        _, err2 := c.KubeClient.Pods(pod.Namespace).Patch(
3✔
74
                                context.TODO(),
3✔
75
                                pod.Name,
3✔
76
                                types.MergePatchType,
3✔
77
                                []byte(patchData),
3✔
78
                                metav1.PatchOptions{},
3✔
79
                                "")
3✔
80
                        if err2 != nil {
3✔
81
                                return false, err2
×
82
                        }
×
83
                        return true, nil
3✔
84
                })
85
        if err != nil {
3✔
86
                return fmt.Errorf("could not patch pod rolling update flag %q: %v", patchData, err)
×
87
        }
×
88

89
        return nil
3✔
90
}
91

92
// getRollingUpdateFlagFromPod returns the value of the rollingUpdate flag from the given pod
93
func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) {
30✔
94
        anno := pod.GetAnnotations()
30✔
95
        flag = false
30✔
96

30✔
97
        stringFlag, exists := anno[rollingUpdatePodAnnotationKey]
30✔
98
        if exists {
34✔
99
                var err error
4✔
100
                c.logger.Debugf("found rolling update flag on pod %q", pod.Name)
4✔
101
                if flag, err = strconv.ParseBool(stringFlag); err != nil {
4✔
102
                        c.logger.Warnf("error when parsing %q annotation for the pod %q: expected boolean value, got %q\n",
×
103
                                rollingUpdatePodAnnotationKey,
×
104
                                types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name},
×
105
                                stringFlag)
×
106
                }
×
107
        }
108

109
        return flag
30✔
110
}
111

112
func (c *Cluster) deletePods() error {
×
113
        c.logger.Debug("deleting pods")
×
114
        pods, err := c.listPods()
×
115
        if err != nil {
×
116
                return err
×
117
        }
×
118

119
        for _, obj := range pods {
×
120
                podName := util.NameFromMeta(obj.ObjectMeta)
×
121

×
122
                c.logger.Debugf("deleting pod %q", podName)
×
123
                if err := c.deletePod(podName); err != nil {
×
124
                        c.logger.Errorf("could not delete pod %q: %v", podName, err)
×
125
                } else {
×
126
                        c.logger.Infof("pod %q has been deleted", podName)
×
127
                }
×
128
        }
129
        if len(pods) > 0 {
×
130
                c.logger.Debug("pods have been deleted")
×
131
        } else {
×
132
                c.logger.Debug("no pods to delete")
×
133
        }
×
134

135
        return nil
×
136
}
137

138
func (c *Cluster) deletePod(podName spec.NamespacedName) error {
×
139
        c.setProcessName("deleting pod %q", podName)
×
140
        ch := c.registerPodSubscriber(podName)
×
141
        defer c.unregisterPodSubscriber(podName)
×
142

×
143
        if err := c.KubeClient.Pods(podName.Namespace).Delete(context.TODO(), podName.Name, c.deleteOptions); err != nil {
×
144
                return err
×
145
        }
×
146

147
        return c.waitForPodDeletion(ch)
×
148
}
149

150
func (c *Cluster) unregisterPodSubscriber(podName spec.NamespacedName) {
×
151
        c.logger.Debugf("unsubscribing from pod %q events", podName)
×
152
        c.podSubscribersMu.Lock()
×
153
        defer c.podSubscribersMu.Unlock()
×
154

×
155
        ch, ok := c.podSubscribers[podName]
×
156
        if !ok {
×
157
                panic("subscriber for pod '" + podName.String() + "' is not found")
×
158
        }
159

160
        delete(c.podSubscribers, podName)
×
161
        close(ch)
×
162
}
163

164
func (c *Cluster) registerPodSubscriber(podName spec.NamespacedName) chan PodEvent {
×
165
        c.logger.Debugf("subscribing to pod %q", podName)
×
166
        c.podSubscribersMu.Lock()
×
167
        defer c.podSubscribersMu.Unlock()
×
168

×
169
        ch := make(chan PodEvent)
×
170
        if _, ok := c.podSubscribers[podName]; ok {
×
171
                panic("pod '" + podName.String() + "' is already subscribed")
×
172
        }
173
        c.podSubscribers[podName] = ch
×
174

×
175
        return ch
×
176
}
177

178
func (c *Cluster) movePodFromEndOfLifeNode(pod *v1.Pod) (*v1.Pod, error) {
×
179
        var (
×
180
                eol    bool
×
181
                err    error
×
182
                newPod *v1.Pod
×
183
        )
×
184
        podName := util.NameFromMeta(pod.ObjectMeta)
×
185

×
186
        if eol, err = c.podIsEndOfLife(pod); err != nil {
×
187
                return nil, fmt.Errorf("could not get node %q: %v", pod.Spec.NodeName, err)
×
188
        } else if !eol {
×
189
                c.logger.Infof("check failed: pod %q is already on a live node", podName)
×
190
                return pod, nil
×
191
        }
×
192

193
        c.setProcessName("moving pod %q out of end-of-life node %q", podName, pod.Spec.NodeName)
×
194
        c.logger.Infof("moving pod %q out of the end-of-life node %q", podName, pod.Spec.NodeName)
×
195

×
196
        if newPod, err = c.recreatePod(podName); err != nil {
×
197
                return nil, fmt.Errorf("could not move pod: %v", err)
×
198
        }
×
199

200
        if newPod.Spec.NodeName == pod.Spec.NodeName {
×
201
                return nil, fmt.Errorf("pod %q remained on the same node", podName)
×
202
        }
×
203

204
        if eol, err = c.podIsEndOfLife(newPod); err != nil {
×
205
                return nil, fmt.Errorf("could not get node %q: %v", pod.Spec.NodeName, err)
×
206
        } else if eol {
×
207
                c.logger.Warningf("pod %q moved to end-of-life node %q", podName, newPod.Spec.NodeName)
×
208
                return newPod, nil
×
209
        }
×
210

211
        c.logger.Infof("pod %q moved from node %q to node %q", podName, pod.Spec.NodeName, newPod.Spec.NodeName)
×
212

×
213
        return newPod, nil
×
214
}
215

216
// MigrateMasterPod migrates master pod via failover to a replica
217
func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
×
218
        var (
×
219
                err error
×
220
                eol bool
×
221
        )
×
222

×
223
        oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{})
×
224
        if err != nil {
×
225
                return fmt.Errorf("could not get master pod: %v", err)
×
226
        }
×
227

228
        c.logger.Infof("starting process to migrate master pod %q", podName)
×
229
        if eol, err = c.podIsEndOfLife(oldMaster); err != nil {
×
230
                return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
×
231
        }
×
232
        if !eol {
×
233
                c.logger.Debug("no action needed: master pod is already on a live node")
×
234
                return nil
×
235
        }
×
236

237
        if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != Master {
×
238
                c.logger.Warningf("no action needed: pod %q is not the master (anymore)", podName)
×
239
                return nil
×
240
        }
×
241
        // we must have a statefulset in the cluster for the migration to work
242
        if c.Statefulset == nil {
×
243
                var sset *appsv1.StatefulSet
×
244
                if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(
×
245
                        context.TODO(),
×
246
                        c.statefulSetName(),
×
247
                        metav1.GetOptions{}); err != nil {
×
248
                        return fmt.Errorf("could not retrieve cluster statefulset: %v", err)
×
249
                }
×
250
                c.Statefulset = sset
×
251
        }
252
        // we may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case
253
        masterCandidateName := podName
×
254
        masterCandidatePod := oldMaster
×
255
        if *c.Statefulset.Spec.Replicas > 1 {
×
256
                if masterCandidateName, err = c.getSwitchoverCandidate(oldMaster); err != nil {
×
257
                        return fmt.Errorf("could not find suitable replica pod as candidate for failover: %v", err)
×
258
                }
×
259
                masterCandidatePod, err = c.KubeClient.Pods(masterCandidateName.Namespace).Get(context.TODO(), masterCandidateName.Name, metav1.GetOptions{})
×
260
                if err != nil {
×
261
                        return fmt.Errorf("could not get master candidate pod: %v", err)
×
262
                }
×
263
        } else {
×
264
                c.logger.Warningf("migrating single pod cluster %q, this will cause downtime of the Postgres cluster until pod is back", c.clusterName())
×
265
        }
×
266

267
        // there are two cases for each postgres cluster that has its master pod on the node to migrate from:
268
        // - the cluster has some replicas - migrate one of those if necessary and failover to it
269
        // - there are no replicas - just terminate the master and wait until it respawns
270
        // in both cases the result is the new master up and running on a new node.
271

272
        if masterCandidatePod == nil {
×
273
                if _, err = c.movePodFromEndOfLifeNode(oldMaster); err != nil {
×
274
                        return fmt.Errorf("could not move pod: %v", err)
×
275
                }
×
276
                return nil
×
277
        }
278

279
        if _, err = c.movePodFromEndOfLifeNode(masterCandidatePod); err != nil {
×
280
                return fmt.Errorf("could not move pod: %v", err)
×
281
        }
×
282

283
        err = retryutil.Retry(1*time.Minute, 5*time.Minute,
×
284
                func() (bool, error) {
×
285
                        err := c.Switchover(oldMaster, masterCandidateName)
×
286
                        if err != nil {
×
287
                                c.logger.Errorf("could not failover to pod %q: %v", masterCandidateName, err)
×
288
                                return false, nil
×
289
                        }
×
290
                        return true, nil
×
291
                },
292
        )
293

294
        if err != nil {
×
295
                return fmt.Errorf("could not migrate master pod: %v", err)
×
296
        }
×
297

298
        return nil
×
299
}
300

301
// MigrateReplicaPod recreates pod on a new node
302
func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName string) error {
×
303
        replicaPod, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{})
×
304
        if err != nil {
×
305
                return fmt.Errorf("could not get pod: %v", err)
×
306
        }
×
307

308
        c.logger.Infof("migrating replica pod %q to live node", podName)
×
309

×
310
        if replicaPod.Spec.NodeName != fromNodeName {
×
311
                c.logger.Infof("check failed: pod %q has already migrated to node %q", podName, replicaPod.Spec.NodeName)
×
312
                return nil
×
313
        }
×
314

315
        if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != Replica {
×
316
                return fmt.Errorf("check failed: pod %q is not a replica", podName)
×
317
        }
×
318

319
        _, err = c.movePodFromEndOfLifeNode(replicaPod)
×
320
        if err != nil {
×
321
                return fmt.Errorf("could not move pod: %v", err)
×
322
        }
×
323

324
        return nil
×
325
}
326

327
func (c *Cluster) getPatroniConfig(pod *v1.Pod) (acidv1.Patroni, map[string]string, error) {
14✔
328
        var (
14✔
329
                patroniConfig acidv1.Patroni
14✔
330
                pgParameters  map[string]string
14✔
331
        )
14✔
332
        podName := util.NameFromMeta(pod.ObjectMeta)
14✔
333
        err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
14✔
334
                func() (bool, error) {
28✔
335
                        var err error
14✔
336
                        patroniConfig, pgParameters, err = c.patroni.GetConfig(pod)
14✔
337

14✔
338
                        if err != nil {
27✔
339
                                return false, err
13✔
340
                        }
13✔
341
                        return true, nil
1✔
342
                },
343
        )
344

345
        if err != nil {
27✔
346
                return acidv1.Patroni{}, nil, fmt.Errorf("could not get Postgres config from pod %s: %v", podName, err)
13✔
347
        }
13✔
348

349
        return patroniConfig, pgParameters, nil
1✔
350
}
351

352
func (c *Cluster) getPatroniMemberData(pod *v1.Pod) (patroni.MemberData, error) {
14✔
353
        var memberData patroni.MemberData
14✔
354
        err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
14✔
355
                func() (bool, error) {
28✔
356
                        var err error
14✔
357
                        memberData, err = c.patroni.GetMemberData(pod)
14✔
358

14✔
359
                        if err != nil {
28✔
360
                                return false, err
14✔
361
                        }
14✔
362
                        return true, nil
×
363
                },
364
        )
365
        if err != nil {
28✔
366
                return patroni.MemberData{}, fmt.Errorf("could not get member data: %v", err)
14✔
367
        }
14✔
368
        if memberData.State == "creating replica" {
×
369
                return patroni.MemberData{}, fmt.Errorf("replica currently being initialized")
×
370
        }
×
371

372
        return memberData, nil
×
373
}
374

375
func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
×
376
        stopCh := make(chan struct{})
×
377
        ch := c.registerPodSubscriber(podName)
×
378
        defer c.unregisterPodSubscriber(podName)
×
379
        defer close(stopCh)
×
380

×
381
        err := retryutil.Retry(1*time.Second, 5*time.Second,
×
382
                func() (bool, error) {
×
383
                        err2 := c.KubeClient.Pods(podName.Namespace).Delete(
×
384
                                context.TODO(),
×
385
                                podName.Name,
×
386
                                c.deleteOptions)
×
387
                        if err2 != nil {
×
388
                                return false, err2
×
389
                        }
×
390
                        return true, nil
×
391
                })
392
        if err != nil {
×
393
                return nil, fmt.Errorf("could not delete pod: %v", err)
×
394
        }
×
395

396
        if err := c.waitForPodDeletion(ch); err != nil {
×
397
                return nil, err
×
398
        }
×
399
        pod, err := c.waitForPodLabel(ch, stopCh, nil)
×
400
        if err != nil {
×
401
                return nil, err
×
402
        }
×
403
        c.logger.Infof("pod %q has been recreated", podName)
×
404
        return pod, nil
×
405
}
406

407
func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.NamespacedName) error {
×
408
        c.setProcessName("starting to recreate pods")
×
409
        c.logger.Infof("there are %d pods in the cluster to recreate", len(pods))
×
410

×
411
        var (
×
412
                masterPod, newMasterPod *v1.Pod
×
413
        )
×
414
        replicas := switchoverCandidates
×
415

×
416
        for i, pod := range pods {
×
417
                role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
×
418

×
419
                if role == Master {
×
420
                        masterPod = &pods[i]
×
421
                        continue
×
422
                }
423

424
                podName := util.NameFromMeta(pods[i].ObjectMeta)
×
425
                newPod, err := c.recreatePod(podName)
×
426
                if err != nil {
×
427
                        return fmt.Errorf("could not recreate replica pod %q: %v", util.NameFromMeta(pod.ObjectMeta), err)
×
428
                }
×
429

430
                newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel])
×
431
                if newRole == Replica {
×
432
                        replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta))
×
433
                } else if newRole == Master {
×
434
                        newMasterPod = newPod
×
435
                }
×
436
        }
437

438
        if masterPod != nil {
×
439
                // switchover if
×
440
                // 1. we have not observed a new master pod when re-creating former replicas
×
441
                // 2. we know possible switchover targets even when no replicas were recreated
×
442
                if newMasterPod == nil && len(replicas) > 0 {
×
443
                        masterCandidate, err := c.getSwitchoverCandidate(masterPod)
×
444
                        if err != nil {
×
445
                                // do not recreate master now so it will keep the update flag and switchover will be retried on next sync
×
446
                                return fmt.Errorf("skipping switchover: %v", err)
×
447
                        }
×
448
                        if err := c.Switchover(masterPod, masterCandidate); err != nil {
×
449
                                return fmt.Errorf("could not perform switch over: %v", err)
×
450
                        }
×
451
                } else if newMasterPod == nil && len(replicas) == 0 {
×
452
                        c.logger.Warningf("cannot perform switch over before re-creating the pod: no replicas")
×
453
                }
×
454
                c.logger.Infof("recreating old master pod %q", util.NameFromMeta(masterPod.ObjectMeta))
×
455

×
456
                if _, err := c.recreatePod(util.NameFromMeta(masterPod.ObjectMeta)); err != nil {
×
457
                        return fmt.Errorf("could not recreate old master pod %q: %v", util.NameFromMeta(masterPod.ObjectMeta), err)
×
458
                }
×
459
        }
460

461
        return nil
×
462
}
463

464
func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, error) {
6✔
465

6✔
466
        var members []patroni.ClusterMember
6✔
467
        candidates := make([]patroni.ClusterMember, 0)
6✔
468
        syncCandidates := make([]patroni.ClusterMember, 0)
6✔
469

6✔
470
        err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
6✔
471
                func() (bool, error) {
14✔
472
                        var err error
8✔
473
                        members, err = c.patroni.GetClusterMembers(master)
8✔
474
                        if err != nil {
10✔
475
                                return false, err
2✔
476
                        }
2✔
477

478
                        // look for SyncStandby candidates (which also implies pod is in running state)
479
                        for _, member := range members {
22✔
480
                                if PostgresRole(member.Role) == SyncStandby {
17✔
481
                                        syncCandidates = append(syncCandidates, member)
1✔
482
                                }
1✔
483
                                if PostgresRole(member.Role) != Leader && PostgresRole(member.Role) != StandbyLeader && slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) {
25✔
484
                                        candidates = append(candidates, member)
9✔
485
                                }
9✔
486
                        }
487

488
                        // if synchronous mode is enabled and no SyncStandy was found
489
                        // return false for retry - cannot failover with no sync candidate
490
                        if c.Spec.Patroni.SynchronousMode && len(syncCandidates) == 0 {
7✔
491
                                c.logger.Warnf("no sync standby found - retrying fetching cluster members")
1✔
492
                                return false, nil
1✔
493
                        }
1✔
494

495
                        // retry also in asynchronous mode when no replica candidate was found
496
                        if !c.Spec.Patroni.SynchronousMode && len(candidates) == 0 {
6✔
497
                                c.logger.Warnf("no replica candidate found - retrying fetching cluster members")
1✔
498
                                return false, nil
1✔
499
                        }
1✔
500

501
                        return true, nil
4✔
502
                },
503
        )
504
        if err != nil {
8✔
505
                return spec.NamespacedName{}, fmt.Errorf("failed to get Patroni cluster members: %s", err)
2✔
506
        }
2✔
507

508
        // pick candidate with lowest lag
509
        if len(syncCandidates) > 0 {
5✔
510
                sort.Slice(syncCandidates, func(i, j int) bool {
1✔
511
                        return syncCandidates[i].Lag < syncCandidates[j].Lag
×
512
                })
×
513
                return spec.NamespacedName{Namespace: master.Namespace, Name: syncCandidates[0].Name}, nil
1✔
514
        }
515
        if len(candidates) > 0 {
6✔
516
                sort.Slice(candidates, func(i, j int) bool {
6✔
517
                        return candidates[i].Lag < candidates[j].Lag
3✔
518
                })
3✔
519
                return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil
3✔
520
        }
521

UNCOV
522
        return spec.NamespacedName{}, fmt.Errorf("no switchover candidate found")
×
523
}
524

525
func (c *Cluster) podIsEndOfLife(pod *v1.Pod) (bool, error) {
×
526
        node, err := c.KubeClient.Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
×
527
        if err != nil {
×
528
                return false, err
×
529
        }
×
530
        return node.Spec.Unschedulable || !util.MapContains(node.Labels, c.OpConfig.NodeReadinessLabel), nil
×
531

532
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc