• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

NVIDIA / gpu-operator / 18786027269

24 Oct 2025 04:32PM UTC coverage: 22.329% (-0.08%) from 22.409%
18786027269

Pull #1761

github

shivakunv
node labels cleanup
Pull Request #1761: Daemonsets OwnerRef cleanup labels

1 of 46 new or added lines in 2 files covered. (2.17%)

1 existing line in 1 file now uncovered.

2615 of 11711 relevant lines covered (22.33%)

0.25 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/controllers/clusterpolicy_controller.go
1
/*
2
Copyright 2021.
3

4
Licensed under the Apache License, Version 2.0 (the "License");
5
you may not use this file except in compliance with the License.
6
You may obtain a copy of the License at
7

8
    http://www.apache.org/licenses/LICENSE-2.0
9

10
Unless required by applicable law or agreed to in writing, software
11
distributed under the License is distributed on an "AS IS" BASIS,
12
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
See the License for the specific language governing permissions and
14
limitations under the License.
15
*/
16

17
package controllers
18

19
import (
20
        "context"
21
        "fmt"
22

23
        "github.com/go-logr/logr"
24

25
        appsv1 "k8s.io/api/apps/v1"
26
        corev1 "k8s.io/api/core/v1"
27
        apierrors "k8s.io/apimachinery/pkg/api/errors"
28
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29
        "k8s.io/apimachinery/pkg/runtime"
30
        "k8s.io/apimachinery/pkg/types"
31
        "k8s.io/client-go/util/workqueue"
32

33
        "time"
34

35
        ctrl "sigs.k8s.io/controller-runtime"
36
        "sigs.k8s.io/controller-runtime/pkg/client"
37
        "sigs.k8s.io/controller-runtime/pkg/controller"
38
        "sigs.k8s.io/controller-runtime/pkg/event"
39
        "sigs.k8s.io/controller-runtime/pkg/handler"
40
        "sigs.k8s.io/controller-runtime/pkg/predicate"
41
        "sigs.k8s.io/controller-runtime/pkg/reconcile"
42
        "sigs.k8s.io/controller-runtime/pkg/source"
43

44
        "github.com/NVIDIA/k8s-operator-libs/pkg/consts"
45

46
        gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1"
47
        "github.com/NVIDIA/gpu-operator/internal/conditions"
48
)
49

50
const (
51
        minDelayCR                      = 100 * time.Millisecond
52
        maxDelayCR                      = 3 * time.Second
53
        clusterPolicyControllerIndexKey = "metadata.nvidia.clusterpolicy.controller"
54
)
55

56
// blank assignment to verify that ReconcileClusterPolicy implements reconcile.Reconciler
57
var _ reconcile.Reconciler = &ClusterPolicyReconciler{}
58
var clusterPolicyCtrl ClusterPolicyController
59

60
// ClusterPolicyReconciler reconciles a ClusterPolicy object
61
type ClusterPolicyReconciler struct {
62
        client.Client
63
        Log              logr.Logger
64
        Scheme           *runtime.Scheme
65
        Namespace        string
66
        conditionUpdater conditions.Updater
67
}
68

69
// +kubebuilder:rbac:groups=nvidia.com,resources=*,verbs=get;list;watch;create;update;patch;delete
70
// +kubebuilder:rbac:groups=config.openshift.io,resources=clusterversions;proxies,verbs=get;list;watch
71
// +kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=get;list;watch;create;update;patch;delete
72
// +kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=use,resourceNames=privileged
73
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings;roles;rolebindings,verbs=*
74
// +kubebuilder:rbac:groups="",resources=namespaces;serviceaccounts;pods;pods/eviction;services;services/finalizers;endpoints,verbs=get;list;watch;create;update;patch;delete
75
// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims;events;configmaps;secrets;nodes,verbs=get;list;watch;create;update;patch;delete
76
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
77
// +kubebuilder:rbac:groups=apps,resources=controllerrevisions,verbs=get;list;watch
78
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;prometheusrules,verbs=get;list;watch;create;update;patch;delete
79
// +kubebuilder:rbac:groups=scheduling.k8s.io,resources=priorityclasses,verbs=get;list;watch;create
80
// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch
81
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch
82
// +kubebuilder:rbac:groups=image.openshift.io,resources=imagestreams,verbs=get;list;watch
83
// +kubebuilder:rbac:groups=node.k8s.io,resources=runtimeclasses,verbs=get;list;create;update;watch;delete
84
// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch
85

86
// Reconcile is part of the main kubernetes reconciliation loop which aims to
87
// move the current state of the cluster closer to the desired state.
88
// TODO(user): Modify the Reconcile function to compare the state specified by
89
// the ClusterPolicy object against the actual cluster state, and then
90
// perform operations to make the cluster state reflect the state specified by
91
// the user.
92
//
93
// For more details, check Reconcile and its Result here:
94
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile
95
func (r *ClusterPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
×
96
        _ = r.Log.WithValues("Reconciling ClusterPolicy", req.NamespacedName)
×
97

×
98
        // Fetch the ClusterPolicy instance
×
99
        instance := &gpuv1.ClusterPolicy{}
×
100
        var condErr error
×
101
        err := r.Get(ctx, req.NamespacedName, instance)
×
102
        if err != nil {
×
103
                err = fmt.Errorf("failed to get ClusterPolicy object: %v", err)
×
104
                r.Log.Error(nil, err.Error())
×
105
                clusterPolicyCtrl.operatorMetrics.reconciliationStatus.Set(reconciliationStatusClusterPolicyUnavailable)
×
106
                if apierrors.IsNotFound(err) {
×
107
                        // Request object not found, could have been deleted after reconcile request.
×
108
                        // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
×
109
                        // Return and don't requeue
×
110
                        return reconcile.Result{}, nil
×
111
                }
×
112
                // Error reading the object - requeue the request.
113
                condErr = r.conditionUpdater.SetConditionsError(ctx, instance, conditions.ReconcileFailed, err.Error())
×
114
                if condErr != nil {
×
115
                        r.Log.V(consts.LogLevelDebug).Error(nil, condErr.Error())
×
116
                }
×
117
                return reconcile.Result{}, err
×
118
        }
119

120
        // TODO: Handle deletion of the main ClusterPolicy and cycle to the next one.
121
        // We already have a main Clusterpolicy
122
        if clusterPolicyCtrl.singleton != nil && clusterPolicyCtrl.singleton.Name != instance.Name {
×
123
                instance.SetStatus(gpuv1.Ignored, clusterPolicyCtrl.operatorNamespace)
×
124
                // do not change `clusterPolicyCtrl.operatorMetrics.reconciliationStatus` here,
×
125
                // spurious reconciliation
×
126
                return ctrl.Result{}, err
×
127
        }
×
128

NEW
129
        err = clusterPolicyCtrl.addLabelsFinalizer(ctx, r, instance)
×
NEW
130
        if err != nil {
×
NEW
131
                // TODO Check if return error or continue
×
NEW
132
                r.Log.Error(nil, err.Error())
×
NEW
133
                condErr = r.conditionUpdater.SetConditionsError(ctx, instance, conditions.ReconcileFailed, err.Error())
×
NEW
134
                if condErr != nil {
×
NEW
135
                        r.Log.V(consts.LogLevelDebug).Error(nil, condErr.Error())
×
NEW
136
                }
×
NEW
137
                return ctrl.Result{}, err
×
138
        }
139

140
        err = clusterPolicyCtrl.init(ctx, r, instance)
×
141
        if err != nil {
×
142
                err = fmt.Errorf("failed to initialize ClusterPolicy controller: %v", err)
×
143
                r.Log.Error(nil, err.Error())
×
144
                condErr = r.conditionUpdater.SetConditionsError(ctx, instance, conditions.ReconcileFailed, err.Error())
×
145
                if condErr != nil {
×
146
                        r.Log.V(consts.LogLevelDebug).Error(nil, condErr.Error())
×
147
                }
×
148
                if clusterPolicyCtrl.operatorMetrics != nil {
×
149
                        clusterPolicyCtrl.operatorMetrics.reconciliationStatus.Set(reconciliationStatusClusterPolicyUnavailable)
×
150
                }
×
151
                return ctrl.Result{}, err
×
152
        }
153

154
        if !clusterPolicyCtrl.hasNFDLabels {
×
155
                r.Log.Info("WARNING: NFD labels missing in the cluster, GPU nodes cannot be discovered.")
×
156
                clusterPolicyCtrl.operatorMetrics.reconciliationHasNFDLabels.Set(0)
×
157
        } else {
×
158
                clusterPolicyCtrl.operatorMetrics.reconciliationHasNFDLabels.Set(1)
×
159
        }
×
160
        if !clusterPolicyCtrl.hasGPUNodes {
×
161
                r.Log.Info("No GPU node can be found in the cluster.")
×
162
        }
×
163

164
        clusterPolicyCtrl.operatorMetrics.reconciliationTotal.Inc()
×
165
        overallStatus := gpuv1.Ready
×
166
        statesNotReady := []string{}
×
167
        for {
×
168
                status, statusError := clusterPolicyCtrl.step()
×
169
                if statusError != nil {
×
170
                        clusterPolicyCtrl.operatorMetrics.reconciliationStatus.Set(reconciliationStatusNotReady)
×
171
                        clusterPolicyCtrl.operatorMetrics.reconciliationFailed.Inc()
×
172
                        updateCRState(ctx, r, req.NamespacedName, gpuv1.NotReady)
×
173
                        condErr = r.conditionUpdater.SetConditionsError(ctx, instance, conditions.ReconcileFailed, fmt.Sprintf("Failed to reconcile %s: %s", clusterPolicyCtrl.stateNames[clusterPolicyCtrl.idx], statusError.Error()))
×
174
                        if condErr != nil {
×
175
                                r.Log.V(consts.LogLevelDebug).Error(nil, condErr.Error())
×
176
                        }
×
177
                        return ctrl.Result{RequeueAfter: time.Second * 5}, statusError
×
178
                }
179

180
                if status == gpuv1.NotReady {
×
181
                        overallStatus = gpuv1.NotReady
×
182
                        statesNotReady = append(statesNotReady, clusterPolicyCtrl.stateNames[clusterPolicyCtrl.idx-1])
×
183
                }
×
184
                r.Log.Info("ClusterPolicy step completed",
×
185
                        "state:", clusterPolicyCtrl.stateNames[clusterPolicyCtrl.idx-1],
×
186
                        "status", status)
×
187

×
188
                if clusterPolicyCtrl.last() {
×
189
                        break
×
190
                }
191
        }
192

193
        // if any state is not ready, requeue for reconfile after 5 seconds
194
        if overallStatus != gpuv1.Ready {
×
195
                clusterPolicyCtrl.operatorMetrics.reconciliationStatus.Set(reconciliationStatusNotReady)
×
196
                clusterPolicyCtrl.operatorMetrics.reconciliationFailed.Inc()
×
197

×
198
                errStr := fmt.Sprintf("ClusterPolicy is not ready, states not ready: %v", statesNotReady)
×
199
                r.Log.Error(nil, errStr)
×
200
                updateCRState(ctx, r, req.NamespacedName, gpuv1.NotReady)
×
201
                condErr = r.conditionUpdater.SetConditionsError(ctx, instance, conditions.OperandNotReady, errStr)
×
202
                if condErr != nil {
×
203
                        r.Log.V(consts.LogLevelDebug).Error(nil, condErr.Error())
×
204
                }
×
205
                return ctrl.Result{RequeueAfter: time.Second * 5}, nil
×
206
        }
207

208
        if !clusterPolicyCtrl.hasNFDLabels {
×
209
                // no NFD-labelled node in the cluster (required dependency),
×
210
                // watch periodically for the labels to appear
×
211
                var requeueAfter = time.Second * 45
×
212
                r.Log.Info("No NFD label found, polling for new nodes.",
×
213
                        "requeueAfter", requeueAfter)
×
214

×
215
                // Update CR state as ready as all states are complete
×
216
                updateCRState(ctx, r, req.NamespacedName, gpuv1.Ready)
×
217
                condErr = r.conditionUpdater.SetConditionsReady(ctx, instance, conditions.NFDLabelsMissing, "No NFD labels found")
×
218
                if condErr != nil {
×
219
                        r.Log.V(consts.LogLevelDebug).Error(nil, condErr.Error())
×
220
                }
×
221

222
                clusterPolicyCtrl.operatorMetrics.reconciliationStatus.Set(reconciliationStatusSuccess)
×
223

×
224
                return ctrl.Result{RequeueAfter: requeueAfter}, nil
×
225
        }
226

227
        // Update CR state as ready as all states are complete
228
        updateCRState(ctx, r, req.NamespacedName, gpuv1.Ready)
×
229
        clusterPolicyCtrl.operatorMetrics.reconciliationStatus.Set(reconciliationStatusSuccess)
×
230
        clusterPolicyCtrl.operatorMetrics.reconciliationLastSuccess.Set(float64(time.Now().Unix()))
×
231

×
232
        var infoStr string
×
233
        if !clusterPolicyCtrl.hasGPUNodes {
×
234
                infoStr = "No GPU node found, watching for new nodes to join the cluster."
×
235
                r.Log.Info(infoStr, "hasNFDLabels", clusterPolicyCtrl.hasNFDLabels)
×
236
                if condErr = r.conditionUpdater.SetConditionsReady(ctx, instance, conditions.NoGPUNodes, infoStr); condErr != nil {
×
237
                        return ctrl.Result{}, condErr
×
238
                }
×
239
        } else {
×
240
                infoStr = "ClusterPolicy is ready as all resources have been successfully reconciled"
×
241
                r.Log.Info(infoStr)
×
242
                if condErr = r.conditionUpdater.SetConditionsReady(ctx, instance, conditions.Reconciled, infoStr); condErr != nil {
×
243
                        return ctrl.Result{}, condErr
×
244
                }
×
245
        }
246
        return ctrl.Result{}, nil
×
247
}
248

249
func updateCRState(ctx context.Context, r *ClusterPolicyReconciler, namespacedName types.NamespacedName, state gpuv1.State) {
×
250
        // Fetch latest instance and update state to avoid version mismatch
×
251
        instance := &gpuv1.ClusterPolicy{}
×
252
        err := r.Get(ctx, namespacedName, instance)
×
253
        if err != nil {
×
254
                r.Log.Error(err, "Failed to get ClusterPolicy instance for status update")
×
255
        }
×
256
        if instance.Status.State == state {
×
257
                // state is unchanged
×
258
                return
×
259
        }
×
260
        // Update the CR state
261
        instance.SetStatus(state, clusterPolicyCtrl.operatorNamespace)
×
262
        err = r.Client.Status().Update(ctx, instance)
×
263
        if err != nil {
×
264
                r.Log.Error(err, "Failed to update ClusterPolicy status")
×
265
        }
×
266
}
267

268
func addWatchNewGPUNode(r *ClusterPolicyReconciler, c controller.Controller, mgr ctrl.Manager) error {
×
269
        // Define a mapping from the Node object in the event to one or more
×
270
        // ClusterPolicy objects to Reconcile
×
271
        mapFn := func(ctx context.Context, n *corev1.Node) []reconcile.Request {
×
272
                // find all the ClusterPolicy to trigger their reconciliation
×
273
                opts := []client.ListOption{} // Namespace = "" to list across all namespaces.
×
274
                list := &gpuv1.ClusterPolicyList{}
×
275

×
276
                err := r.List(ctx, list, opts...)
×
277
                if err != nil {
×
278
                        r.Log.Error(err, "Unable to list ClusterPolicies")
×
279
                        return []reconcile.Request{}
×
280
                }
×
281

282
                cpToRec := []reconcile.Request{}
×
283

×
284
                for _, cp := range list.Items {
×
285
                        cpToRec = append(cpToRec, reconcile.Request{NamespacedName: types.NamespacedName{
×
286
                                Name:      cp.GetName(),
×
287
                                Namespace: cp.GetNamespace(),
×
288
                        }})
×
289
                }
×
290
                r.Log.Info("Reconciliate ClusterPolicies after node label update", "nb", len(cpToRec))
×
291

×
292
                return cpToRec
×
293
        }
294

295
        p := predicate.TypedFuncs[*corev1.Node]{
×
296
                CreateFunc: func(e event.TypedCreateEvent[*corev1.Node]) bool {
×
297
                        labels := e.Object.GetLabels()
×
298

×
299
                        return hasGPULabels(labels)
×
300
                },
×
301
                UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Node]) bool {
×
NEW
302

×
303
                        newLabels := e.ObjectNew.GetLabels()
×
304
                        oldLabels := e.ObjectOld.GetLabels()
×
305
                        nodeName := e.ObjectNew.GetName()
×
306

×
307
                        gpuCommonLabelMissing := hasGPULabels(newLabels) && !hasCommonGPULabel(newLabels)
×
308
                        gpuCommonLabelOutdated := !hasGPULabels(newLabels) && hasCommonGPULabel(newLabels)
×
309
                        migManagerLabelMissing := hasMIGCapableGPU(newLabels) && !hasMIGManagerLabel(newLabels)
×
310
                        commonOperandsLabelChanged := hasOperandsDisabled(oldLabels) != hasOperandsDisabled(newLabels)
×
311

×
312
                        oldGPUWorkloadConfig, _ := getWorkloadConfig(oldLabels, true)
×
313
                        newGPUWorkloadConfig, _ := getWorkloadConfig(newLabels, true)
×
314
                        gpuWorkloadConfigLabelChanged := oldGPUWorkloadConfig != newGPUWorkloadConfig
×
315

×
316
                        oldOSTreeLabel := oldLabels[nfdOSTreeVersionLabelKey]
×
317
                        newOSTreeLabel := newLabels[nfdOSTreeVersionLabelKey]
×
318
                        osTreeLabelChanged := oldOSTreeLabel != newOSTreeLabel
×
319

×
320
                        needsUpdate := gpuCommonLabelMissing ||
×
321
                                gpuCommonLabelOutdated ||
×
322
                                migManagerLabelMissing ||
×
323
                                commonOperandsLabelChanged ||
×
324
                                gpuWorkloadConfigLabelChanged ||
×
325
                                osTreeLabelChanged
×
326

×
327
                        if needsUpdate {
×
328
                                r.Log.Info("Node needs an update",
×
329
                                        "name", nodeName,
×
330
                                        "gpuCommonLabelMissing", gpuCommonLabelMissing,
×
331
                                        "gpuCommonLabelOutdated", gpuCommonLabelOutdated,
×
332
                                        "migManagerLabelMissing", migManagerLabelMissing,
×
333
                                        "commonOperandsLabelChanged", commonOperandsLabelChanged,
×
334
                                        "gpuWorkloadConfigLabelChanged", gpuWorkloadConfigLabelChanged,
×
335
                                        "osTreeLabelChanged", osTreeLabelChanged,
×
336
                                )
×
337
                        }
×
338

UNCOV
339
                        return needsUpdate
×
340
                },
341
                DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Node]) bool {
×
342
                        // if an RHCOS GPU node is deleted, trigger a
×
343
                        // reconciliation to ensure that there is no dangling
×
344
                        // OpenShift Driver-Toolkit (RHCOS version-specific)
×
345
                        // DaemonSet.
×
346
                        // NB: we cannot know here if the DriverToolkit is
×
347
                        // enabled.
×
348

×
349
                        labels := e.Object.GetLabels()
×
350

×
351
                        _, hasOSTreeLabel := labels[nfdOSTreeVersionLabelKey]
×
352

×
353
                        return hasGPULabels(labels) && hasOSTreeLabel
×
354
                },
×
355
        }
356

357
        err := c.Watch(
×
358
                source.Kind(mgr.GetCache(),
×
359
                        &corev1.Node{},
×
360
                        handler.TypedEnqueueRequestsFromMapFunc[*corev1.Node](mapFn),
×
361
                        p,
×
362
                ),
×
363
        )
×
364

×
365
        return err
×
366
}
367

368
// SetupWithManager sets up the controller with the Manager.
369
func (r *ClusterPolicyReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
×
370
        // Create a new controller
×
371
        c, err := controller.New("clusterpolicy-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: 1,
×
372
                RateLimiter: workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](minDelayCR, maxDelayCR)})
×
373
        if err != nil {
×
374
                return err
×
375
        }
×
376

377
        // initialize condition updater
378
        r.conditionUpdater = conditions.NewClusterPolicyUpdater(mgr.GetClient())
×
379

×
380
        // Watch for changes to primary resource ClusterPolicy
×
381
        err = c.Watch(source.Kind(
×
382
                mgr.GetCache(),
×
383
                &gpuv1.ClusterPolicy{},
×
384
                &handler.TypedEnqueueRequestForObject[*gpuv1.ClusterPolicy]{},
×
385
                predicate.TypedGenerationChangedPredicate[*gpuv1.ClusterPolicy]{},
×
386
        ),
×
387
        )
×
388
        if err != nil {
×
389
                return err
×
390
        }
×
391

392
        // Watch for changes to Node labels and requeue the owner ClusterPolicy
393
        err = addWatchNewGPUNode(r, c, mgr)
×
394
        if err != nil {
×
395
                return err
×
396
        }
×
397

398
        // TODO(user): Modify this to be the types you create that are owned by the primary resource
399
        // Watch for changes to secondary resource Daemonsets and requeue the owner ClusterPolicy
400
        err = c.Watch(
×
401
                source.Kind(mgr.GetCache(),
×
402
                        &appsv1.DaemonSet{},
×
403
                        handler.TypedEnqueueRequestForOwner[*appsv1.DaemonSet](mgr.GetScheme(), mgr.GetRESTMapper(), &gpuv1.ClusterPolicy{},
×
404
                                handler.OnlyControllerOwner()),
×
405
                ),
×
406
        )
×
407
        if err != nil {
×
408
                return err
×
409
        }
×
410

411
        // Add an index key which allows our reconciler to quickly look up DaemonSets owned by it.
412
        //
413
        // (cdesiniotis) Ideally we could duplicate this index for all the k8s objects
414
        // that ClusterPolicy manages, that way, we could easily restrict the ClusterPolicy
415
        // controller to only update / delete objects it owns. Unfortunately, the
416
        // underlying implementation of the index does not support generic container types
417
        // (i.e. unstructured.Unstructured{}). For additional details, see the comment in
418
        // the last link of the below call stack:
419
        // IndexField(): https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/cache/informer_cache.go#L204
420
        //   GetInformer(): https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/cache/informer_cache.go#L168
421
        //     GVKForObject(): https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/client/apiutil/apimachinery.go#L113
422
        if err := mgr.GetFieldIndexer().IndexField(ctx, &appsv1.DaemonSet{}, clusterPolicyControllerIndexKey, func(rawObj client.Object) []string {
×
423
                ds := rawObj.(*appsv1.DaemonSet)
×
424
                owner := metav1.GetControllerOf(ds)
×
425
                if owner == nil {
×
426
                        return nil
×
427
                }
×
428
                if owner.APIVersion != gpuv1.SchemeGroupVersion.String() || owner.Kind != "ClusterPolicy" {
×
429
                        return nil
×
430
                }
×
431
                return []string{owner.Name}
×
432
        }); err != nil {
×
433
                return fmt.Errorf("failed to add index key: %w", err)
×
434
        }
×
435

436
        return nil
×
437
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc