• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

redhat-openshift-ecosystem / openshift-preflight / 21217358652

21 Jan 2026 04:26PM UTC coverage: 84.067% (+0.4%) from 83.645%
21217358652

Pull #1352

github

web-flow
Merge 5f32ea548 into 746d55234
Pull Request #1352: Check files and hardlinks paths for subdirectory traversal during untar

13 of 14 new or added lines in 1 file covered. (92.86%)

1 existing line in 1 file now uncovered.

5118 of 6088 relevant lines covered (84.07%)

163.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.32
/internal/engine/engine.go
1
package engine
2

3
import (
4
        "archive/tar"
5
        "bytes"
6
        "context"
7
        "crypto/md5"
8
        "encoding/json"
9
        "fmt"
10
        "io"
11
        "io/fs"
12
        "maps"
13
        "net/http"
14
        "os"
15
        "os/exec"
16
        "path"
17
        "path/filepath"
18
        "regexp"
19
        "slices"
20
        "strings"
21
        "time"
22

23
        "github.com/go-logr/logr"
24

25
        "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts"
26
        "github.com/redhat-openshift-ecosystem/openshift-preflight/certification"
27
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/check"
28
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/image"
29
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/log"
30
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/openshift"
31
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/operatorsdk"
32
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/option"
33
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/policy"
34
        containerpol "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/policy/container"
35
        operatorpol "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/policy/operator"
36
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/pyxis"
37
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/rpm"
38
        "github.com/redhat-openshift-ecosystem/openshift-preflight/internal/runtime"
39

40
        "github.com/google/go-containerregistry/pkg/crane"
41
        "github.com/google/go-containerregistry/pkg/name"
42
        "github.com/google/go-containerregistry/pkg/v1/cache"
43
        "github.com/google/go-containerregistry/pkg/v1/mutate"
44
)
45

46
// New creates a new CraneEngine from the passed params
47
func New(ctx context.Context,
48
        checks []check.Check,
49
        kubeconfig []byte,
50
        cfg runtime.Config,
51
) (craneEngine, error) {
1✔
52
        return craneEngine{
1✔
53
                kubeconfig:         kubeconfig,
1✔
54
                dockerConfig:       cfg.DockerConfig,
1✔
55
                image:              cfg.Image,
1✔
56
                checks:             checks,
1✔
57
                isBundle:           cfg.Bundle,
1✔
58
                isScratch:          cfg.Scratch,
1✔
59
                platform:           cfg.Platform,
1✔
60
                insecure:           cfg.Insecure,
1✔
61
                manifestListDigest: cfg.ManifestListDigest,
1✔
62
        }, nil
1✔
63
}
1✔
64

65
// CraneEngine implements a certification.CheckEngine, and leverage crane to interact with
66
// the container registry and target image.
67
type craneEngine struct {
68
        // Kubeconfig is a byte slice containing a valid Kubeconfig to be used by checks.
69
        kubeconfig []byte
70
        // DockerConfig is the credential required to pull the image.
71
        dockerConfig string
72
        // Image is what is being tested, and should contain the
73
        // fully addressable path (including registry, namespaces, etc)
74
        // to the image
75
        image string
76
        // Checks is an array of all checks to be executed against
77
        // the image provided.
78
        checks []check.Check
79
        // Platform is the container platform to use. E.g. amd64.
80
        platform string
81

82
        // IsBundle is an indicator that the asset is a bundle.
83
        isBundle bool
84

85
        // IsScratch is an indicator that the asset is a scratch image
86
        isScratch bool
87

88
        // Insecure controls whether to allow an insecure connection to
89
        // the registry crane connects with.
90
        insecure bool
91

92
        // ManifestListDigest is the sha256 digest for the manifest list
93
        manifestListDigest string
94

95
        imageRef image.ImageReference
96
        results  certification.Results
97
}
98

99
func (c *craneEngine) CranePlatform() string {
6✔
100
        return c.platform
6✔
101
}
6✔
102

103
func (c *craneEngine) CraneDockerConfig() string {
6✔
104
        return c.dockerConfig
6✔
105
}
6✔
106

107
func (c *craneEngine) CraneInsecure() bool {
6✔
108
        return c.insecure
6✔
109
}
6✔
110

111
var _ option.CraneConfig = &craneEngine{}
112

113
func (c *craneEngine) ExecuteChecks(ctx context.Context) error {
6✔
114
        logger := logr.FromContextOrDiscard(ctx)
6✔
115
        logger.Info("target image", "image", c.image)
6✔
116

6✔
117
        // pull the image and save to fs
6✔
118
        logger.V(log.DBG).Info("pulling image from target registry")
6✔
119
        options := option.GenerateCraneOptions(ctx, c)
6✔
120
        img, err := crane.Pull(c.image, options...)
6✔
121
        if err != nil {
7✔
122
                return fmt.Errorf("failed to pull remote container: %v", err)
1✔
123
        }
1✔
124

125
        // create tmpdir to receive extracted fs
126
        tmpdir, err := os.MkdirTemp(os.TempDir(), "preflight-*")
5✔
127
        if err != nil {
5✔
128
                return fmt.Errorf("failed to create temporary directory: %v", err)
×
129
        }
×
130
        logger.V(log.DBG).Info("created temporary directory", "path", tmpdir)
5✔
131
        defer func() {
10✔
132
                if err := os.RemoveAll(tmpdir); err != nil {
5✔
133
                        logger.Error(err, "unable to clean up tmpdir", "tempDir", tmpdir)
×
134
                }
×
135
        }()
136

137
        imageTarPath := path.Join(tmpdir, "cache")
5✔
138
        if err := os.Mkdir(imageTarPath, 0o755); err != nil {
5✔
139
                return fmt.Errorf("failed to create cache directory: %s: %v", imageTarPath, err)
×
140
        }
×
141

142
        img = cache.Image(img, cache.NewFilesystemCache(imageTarPath))
5✔
143

5✔
144
        containerFSPath := path.Join(tmpdir, "fs")
5✔
145
        if err := os.Mkdir(containerFSPath, 0o755); err != nil {
5✔
146
                return fmt.Errorf("failed to create container expansion directory: %s: %v", containerFSPath, err)
×
147
        }
×
148

149
        // Wrap this critical section in a closure to that we can close the
150
        // mutate.Extract reader sooner than the end of the checks
151
        if err := func() error {
10✔
152
                // export/flatten, and extract
5✔
153
                logger.V(log.DBG).Info("exporting and flattening image")
5✔
154
                fs := mutate.Extract(img)
5✔
155
                defer fs.Close()
5✔
156

5✔
157
                logger.V(log.DBG).Info("extracting container filesystem", "path", containerFSPath)
5✔
158
                if err := untar(ctx, containerFSPath, fs); err != nil {
5✔
159
                        return fmt.Errorf("failed to extract tarball: %v", err)
×
160
                }
×
161

162
                // explicitly discarding from the reader for cases where there is data in the reader after it sends an EOF
163
                _, err = io.Copy(io.Discard, fs)
5✔
164
                if err != nil {
6✔
165
                        return fmt.Errorf("failed to drain io reader: %v", err)
1✔
166
                }
1✔
167
                return nil
4✔
168
        }(); err != nil {
1✔
169
                return err
1✔
170
        }
1✔
171

172
        reference, err := name.ParseReference(c.image)
4✔
173
        if err != nil {
4✔
174
                return fmt.Errorf("image uri could not be parsed: %v", err)
×
175
        }
×
176

177
        // store the image internals in the engine image reference to pass to validations.
178
        c.imageRef = image.ImageReference{
4✔
179
                ImageURI:           c.image,
4✔
180
                ImageFSPath:        containerFSPath,
4✔
181
                ImageInfo:          img,
4✔
182
                ImageRegistry:      reference.Context().RegistryStr(),
4✔
183
                ImageRepository:    reference.Context().RepositoryStr(),
4✔
184
                ImageTagOrSha:      reference.Identifier(),
4✔
185
                ManifestListDigest: c.manifestListDigest,
4✔
186
        }
4✔
187

4✔
188
        if err := writeCertImage(ctx, c.imageRef); err != nil {
4✔
189
                return fmt.Errorf("could not write cert image: %v", err)
×
190
        }
×
191

192
        if !c.isScratch {
8✔
193
                if err := writeRPMManifest(ctx, containerFSPath); err != nil {
4✔
194
                        return fmt.Errorf("could not write rpm manifest: %v", err)
×
195
                }
×
196
        }
197

198
        if c.isBundle {
7✔
199
                // Record test cluster version
3✔
200
                version, err := openshift.GetOpenshiftClusterVersion(ctx, c.kubeconfig)
3✔
201
                if err != nil {
6✔
202
                        logger.Error(err, "could not determine test cluster version")
3✔
203
                }
3✔
204
                c.results.TestedOn = version
3✔
205
        } else {
1✔
206
                logger.V(log.DBG).Info("Container checks do not require a cluster. skipping cluster version check.")
1✔
207
                c.results.TestedOn = runtime.UnknownOpenshiftClusterVersion()
1✔
208
        }
1✔
209

210
        // execute checks
211
        logger.V(log.DBG).Info("executing checks")
4✔
212
        for _, executedCheck := range c.checks {
32✔
213
                logger := logger.WithValues("check", executedCheck.Name())
28✔
214
                ctx := logr.NewContext(ctx, logger)
28✔
215
                c.results.TestedImage = c.image
28✔
216

28✔
217
                logger.V(log.DBG).Info("running check")
28✔
218
                if executedCheck.Metadata().Level == check.LevelOptional || executedCheck.Metadata().Level == check.LevelWarn {
44✔
219
                        logger.Info(fmt.Sprintf("Check %s is not currently being enforced.", executedCheck.Name()))
16✔
220
                }
16✔
221

222
                // run the validation
223
                checkStartTime := time.Now()
28✔
224
                checkPassed, err := executedCheck.Validate(ctx, c.imageRef)
28✔
225
                checkElapsedTime := time.Since(checkStartTime)
28✔
226

28✔
227
                if err != nil {
36✔
228
                        logger.WithValues("result", "ERROR", "err", err.Error()).Info("check completed")
8✔
229
                        result := certification.Result{Check: executedCheck, ElapsedTime: checkElapsedTime}
8✔
230
                        c.results.Errors = appendUnlessOptional(c.results.Errors, *result.WithError(err))
8✔
231
                        continue
8✔
232
                }
233

234
                if !checkPassed {
28✔
235
                        // if a test doesn't pass but is of level warn include it in warning results, instead of failed results
8✔
236
                        if executedCheck.Metadata().Level == check.LevelWarn {
12✔
237
                                logger.WithValues("result", "WARNING").Info("check completed")
4✔
238
                                c.results.Warned = appendUnlessOptional(c.results.Warned, certification.Result{Check: executedCheck, ElapsedTime: checkElapsedTime})
4✔
239
                                continue
4✔
240
                        }
241
                        logger.WithValues("result", "FAILED").Info("check completed")
4✔
242
                        c.results.Failed = appendUnlessOptional(c.results.Failed, certification.Result{Check: executedCheck, ElapsedTime: checkElapsedTime})
4✔
243
                        continue
4✔
244
                }
245

246
                logger.WithValues("result", "PASSED").Info("check completed")
12✔
247
                c.results.Passed = appendUnlessOptional(c.results.Passed, certification.Result{Check: executedCheck, ElapsedTime: checkElapsedTime})
12✔
248
        }
249

250
        if len(c.results.Errors) > 0 || len(c.results.Failed) > 0 {
8✔
251
                c.results.PassedOverall = false
4✔
252
        } else {
4✔
253
                c.results.PassedOverall = true
×
254
        }
×
255

256
        if c.isBundle { // for operators:
7✔
257
                // hash the contents of the bundle.
3✔
258
                md5sum, err := generateBundleHash(ctx, c.imageRef.ImageFSPath)
3✔
259
                if err != nil {
3✔
260
                        logger.Error(err, "could not generate bundle hash")
×
261
                }
×
262
                c.results.CertificationHash = md5sum
3✔
263
        } else { // for containers:
1✔
264
                // Inform the user about the sha/tag binding.
1✔
265

1✔
266
                // By this point, we should have already resolved the digest so
1✔
267
                // we don't handle this error, but fail safe and don't log a potentially
1✔
268
                // incorrect line message to the user.
1✔
269
                if resolvedDigest, err := c.imageRef.ImageInfo.Digest(); err == nil {
2✔
270
                        msg, warn := tagDigestBindingInfo(c.imageRef.ImageTagOrSha, resolvedDigest.String())
1✔
271
                        if warn {
1✔
272
                                logger.Info(fmt.Sprintf("Warning: %s", msg))
×
273
                        } else {
1✔
274
                                logger.Info(msg)
1✔
275
                        }
1✔
276
                }
277
        }
278

279
        return nil
4✔
280
}
281

282
func appendUnlessOptional(results []certification.Result, result certification.Result) []certification.Result {
28✔
283
        if result.Check.Metadata().Level == "optional" {
36✔
284
                return results
8✔
285
        }
8✔
286
        return append(results, result)
20✔
287
}
288

289
// tagDigestBindingInfo emits a log line describing tag and digest binding semantics.
290
// The providedIdentifer is the tag or digest of the image as the user gave it at the commandline.
291
// resolvedDigest
292
func tagDigestBindingInfo(providedIdentifier string, resolvedDigest string) (msg string, warn bool) {
3✔
293
        if strings.HasPrefix(providedIdentifier, "sha256:") {
4✔
294
                return "You've provided an image by digest. " +
1✔
295
                                "When submitting this image to Red Hat for certification, " +
1✔
296
                                "no tag will be associated with this image. " +
1✔
297
                                "If you would like to associate a tag with this image, " +
1✔
298
                                "please rerun this tool replacing your image reference with a tag.",
1✔
299
                        true
1✔
300
        }
1✔
301

302
        return fmt.Sprintf(
2✔
303
                `This image's tag %s will be paired with digest %s `+
2✔
304
                        `once this image has been published in accordance `+
2✔
305
                        `with Red Hat Certification policy. `+
2✔
306
                        `You may then add or remove any supplemental tags `+
2✔
307
                        `through your Red Hat Connect portal as you see fit.`,
2✔
308
                providedIdentifier, resolvedDigest,
2✔
309
        ), false
2✔
310
}
311

312
func generateBundleHash(ctx context.Context, bundlePath string) (string, error) {
3✔
313
        logger := logr.FromContextOrDiscard(ctx)
3✔
314
        files := make(map[string]string)
3✔
315
        fileSystem := os.DirFS(bundlePath)
3✔
316

3✔
317
        hashBuffer := bytes.Buffer{}
3✔
318

3✔
319
        _ = fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error {
9✔
320
                if err != nil {
6✔
321
                        return fmt.Errorf("could not read bundle directory: %s: %w", path, err)
×
322
                }
×
323
                if d.Name() == "Dockerfile" {
6✔
324
                        return nil
×
325
                }
×
326
                if d.IsDir() {
9✔
327
                        return nil
3✔
328
                }
3✔
329
                filebytes, err := fs.ReadFile(fileSystem, path)
3✔
330
                if err != nil {
4✔
331
                        return fmt.Errorf("could not read file: %s: %w", path, err)
1✔
332
                }
1✔
333
                md5sum := fmt.Sprintf("%x", md5.Sum(filebytes))
2✔
334
                files[md5sum] = fmt.Sprintf("./%s", path)
2✔
335
                return nil
2✔
336
        })
337

338
        keys := slices.Collect(maps.Keys(files))
3✔
339
        slices.Sort(keys)
3✔
340

3✔
341
        for _, k := range keys {
5✔
342
                hashBuffer.WriteString(fmt.Sprintf("%s  %s\n", k, files[k]))
2✔
343
        }
2✔
344

345
        artifactsWriter := artifacts.WriterFromContext(ctx)
3✔
346
        if artifactsWriter != nil {
6✔
347
                _, err := artifactsWriter.WriteFile("hashes.txt", &hashBuffer)
3✔
348
                if err != nil {
3✔
349
                        return "", fmt.Errorf("could not write hash file to artifacts dir: %w", err)
×
350
                }
×
351
        }
352

353
        sum := fmt.Sprintf("%x", md5.Sum(hashBuffer.Bytes()))
3✔
354

3✔
355
        logger.V(log.DBG).Info("md5 sum", "md5sum", sum)
3✔
356

3✔
357
        return sum, nil
3✔
358
}
359

360
// Results will return the results of check execution.
361
func (c *craneEngine) Results(ctx context.Context) certification.Results {
×
362
        return c.results
×
363
}
×
364

365
// Untar takes a destination path and a reader; a tar reader loops over the tarfile
366
// creating the file structure at 'dst' along the way, and writing any files. This
367
// function uses a pre-allocated buffer to reduce allocations and is not goroutine-safe.
368
func untar(ctx context.Context, dst string, r io.Reader) error {
16✔
369
        logger := logr.FromContextOrDiscard(ctx)
16✔
370
        tr := tar.NewReader(r)
16✔
371
        // dst doesn't change, clean it once and reuse.
16✔
372
        dst = filepath.Clean(dst)
16✔
373

16✔
374
        // Buffer for io.CopyBuffer operations to reduce allocations
16✔
375
        buf := make([]byte, 32*1024)
16✔
376

16✔
377
        for {
61✔
378
                header, err := tr.Next()
45✔
379

45✔
380
                switch {
45✔
381
                // if no more files are found return
382
                case err == io.EOF:
14✔
383
                        return nil
14✔
384

385
                // return any other error
386
                case err != nil:
×
387
                        return err
×
388

389
                // if the header is nil, just skip it (not sure how this happens)
390
                case header == nil:
×
391
                        continue
×
392
                }
393
                // Safeguard for cases where we're trying to extract to something
394
                // outside of our base fs.
395
                target := filepath.Join(dst, header.Name)
31✔
396

31✔
397
                // Check if target escapes the destination directory. Use path separator to avoid partial dirname matches.
31✔
398
                if target != dst && !strings.HasPrefix(target, dst+string(os.PathSeparator)) {
33✔
399
                        return fmt.Errorf("untar error, extracted file would reach outside of extraction base directory: %s resolves to %s", header.Name, target)
2✔
400
                }
2✔
401

402
                // check the file type
403
                switch header.Typeflag {
29✔
404
                // if its a dir and it doesn't exist create it
405
                case tar.TypeDir:
×
406
                        if _, err := os.Stat(target); err != nil {
×
407
                                if err := os.MkdirAll(target, 0o755); err != nil {
×
408
                                        return err
×
409
                                }
×
410
                        }
411

412
                // if it's a file create it
413
                case tar.TypeReg:
21✔
414
                        // If the file's parent dir doesn't exist, create it.
21✔
415
                        dirname := filepath.Dir(target)
21✔
416
                        if _, err := os.Stat(dirname); err != nil {
22✔
417
                                if err := os.MkdirAll(dirname, 0o755); err != nil {
1✔
418
                                        return err
×
419
                                }
×
420
                        }
421
                        f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
21✔
422
                        if err != nil {
21✔
423
                                return err
×
424
                        }
×
425

426
                        // copy over contents
427
                        if _, err := io.CopyBuffer(f, tr, buf); err != nil {
21✔
428
                                f.Close()
×
429
                                return err
×
430
                        }
×
431

432
                        // manually close here after each file operation; defering would cause each file close
433
                        // to wait until all operations have completed.
434
                        f.Close()
21✔
435

436
                // if it's a link create it
437
                case tar.TypeSymlink, tar.TypeLink:
8✔
438
                        nobaseLinkname, nobaseName := resolveLinkPaths(header.Linkname, header.Name)
8✔
439
                        fullLinkname := filepath.Join(dst, nobaseLinkname)
8✔
440
                        fullName := filepath.Join(dst, nobaseName)
8✔
441
                        // Safeguard for cases where we're trying to link to something
8✔
442
                        // outside of our base fs.
8✔
443
                        if !strings.HasPrefix(fullLinkname, dst+string(os.PathSeparator)) {
8✔
NEW
444
                                logger.V(log.DBG).Info("Error processing link. Link would reach outside of the image archive. Skipping this link", "link", header.Name, "linkedTo", header.Linkname, "resolvedTo", fullLinkname, "type", header.Typeflag)
×
UNCOV
445
                                continue
×
446
                        }
447
                        // Create the new link's directory if it doesn't exist.
448
                        dirname := filepath.Dir(fullName)
8✔
449
                        if _, err := os.Stat(dirname); err != nil {
8✔
450
                                if err := os.MkdirAll(dirname, 0o755); err != nil {
×
451
                                        return err
×
452
                                }
×
453
                        }
454

455
                        linkFn := os.Link
8✔
456
                        if header.Typeflag == tar.TypeSymlink {
12✔
457
                                linkFn = os.Symlink
4✔
458
                        }
4✔
459

460
                        err := linkFn(fullLinkname, fullName)
8✔
461
                        if err != nil {
11✔
462
                                logger.V(log.DBG).Info(fmt.Sprintf("Error creating: %s. Ignoring.", header.Name), "link", fullName, "linkedTo", fullLinkname, "type", header.Typeflag, "reason", err)
3✔
463
                                continue
3✔
464
                        }
465
                }
466
        }
467
}
468

469
// resolveLinkPaths determines if oldname is an absolute path or a relative
470
// path, and returns oldname relative to newname if necessary.
471
func resolveLinkPaths(oldname, newname string) (string, string) {
12✔
472
        if filepath.IsAbs(oldname) {
13✔
473
                return oldname, newname
1✔
474
        }
1✔
475

476
        linkDir := filepath.Dir(newname)
11✔
477
        // If the newname is at the root of the filesystem, but the oldname is
11✔
478
        // relative, we'll swap out the value we get from filepath.Dir for a / to
11✔
479
        // allow relative pathing to resolve. This strips `..` references given the
11✔
480
        // link exists at the very base of the filesystem. In effect, it converts
11✔
481
        // oldname to an absolute path
11✔
482
        if linkDir == "." {
20✔
483
                linkDir = "/"
9✔
484
        }
9✔
485

486
        return filepath.Join(linkDir, oldname), newname
11✔
487
}
488

489
// writeCertImage takes imageRef and writes it to disk as JSON representing a pyxis.CertImage
490
// struct. The file is written at path certification.DefaultCertImageFilename.
491
//
492
//nolint:unparam // ctx is unused. Keep for future use.
493
func writeCertImage(ctx context.Context, imageRef image.ImageReference) error {
4✔
494
        logger := logr.FromContextOrDiscard(ctx)
4✔
495

4✔
496
        config, err := imageRef.ImageInfo.ConfigFile()
4✔
497
        if err != nil {
4✔
498
                return fmt.Errorf("failed to get image config file: %w", err)
×
499
        }
×
500

501
        manifest, err := imageRef.ImageInfo.Manifest()
4✔
502
        if err != nil {
4✔
503
                return fmt.Errorf("failed to get image manifest: %w", err)
×
504
        }
×
505

506
        digest, err := imageRef.ImageInfo.Digest()
4✔
507
        if err != nil {
4✔
508
                return fmt.Errorf("failed to get image digest: %w", err)
×
509
        }
×
510

511
        rawConfig, err := imageRef.ImageInfo.RawConfigFile()
4✔
512
        if err != nil {
4✔
513
                return fmt.Errorf("failed to image raw config file: %w", err)
×
514
        }
×
515

516
        size, err := imageRef.ImageInfo.Size()
4✔
517
        if err != nil {
4✔
518
                return fmt.Errorf("failed to get image size: %w", err)
×
519
        }
×
520

521
        labels := convertLabels(config.Config.Labels)
4✔
522
        layerSizes := make([]pyxis.Layer, 0, len(config.RootFS.DiffIDs))
4✔
523
        for _, diffid := range config.RootFS.DiffIDs {
16✔
524
                layer, err := imageRef.ImageInfo.LayerByDiffID(diffid)
12✔
525
                if err != nil {
12✔
526
                        return fmt.Errorf("could not get layer by diff id: %w", err)
×
527
                }
×
528

529
                uncompressed, err := layer.Uncompressed()
12✔
530
                if err != nil {
12✔
531
                        return fmt.Errorf("could not get uncompressed layer: %w", err)
×
532
                }
×
533
                written, err := io.Copy(io.Discard, uncompressed)
12✔
534
                if err != nil {
12✔
535
                        return fmt.Errorf("could not copy from layer: %w", err)
×
536
                }
×
537

538
                pyxisLayer := pyxis.Layer{
12✔
539
                        LayerID: diffid.String(),
12✔
540
                        Size:    written,
12✔
541
                }
12✔
542
                layerSizes = append(layerSizes, pyxisLayer)
12✔
543
        }
544

545
        manifestLayers := make([]string, 0, len(manifest.Layers))
4✔
546

4✔
547
        // CertImage expects the layers to be stored in the order from base to top.
4✔
548
        // Index 0 is the base layer, and the last index is the top layer.
4✔
549
        for _, layer := range slices.Backward(manifest.Layers) {
16✔
550
                manifestLayers = append(manifestLayers, layer.Digest.String())
12✔
551
        }
12✔
552

553
        sumLayersSizeBytes := sumLayerSizeBytes(layerSizes)
4✔
554

4✔
555
        addedDate := time.Now().UTC().Format(time.RFC3339)
4✔
556

4✔
557
        tags := make([]pyxis.Tag, 0, 1)
4✔
558
        tags = append(tags, pyxis.Tag{
4✔
559
                AddedDate: addedDate,
4✔
560
                Name:      imageRef.ImageTagOrSha,
4✔
561
        })
4✔
562

4✔
563
        repositories := make([]pyxis.Repository, 0, 1)
4✔
564
        repositories = append(repositories, pyxis.Repository{
4✔
565
                PushDate:           addedDate,
4✔
566
                Registry:           imageRef.ImageRegistry,
4✔
567
                Repository:         imageRef.ImageRepository,
4✔
568
                Tags:               tags,
4✔
569
                ManifestListDigest: imageRef.ManifestListDigest,
4✔
570
        })
4✔
571

4✔
572
        certImage := pyxis.CertImage{
4✔
573
                DockerImageDigest: digest.String(),
4✔
574
                DockerImageID:     manifest.Config.Digest.String(),
4✔
575
                ImageID:           digest.String(),
4✔
576
                Architecture:      config.Architecture,
4✔
577
                ParsedData: &pyxis.ParsedData{
4✔
578
                        Architecture:           config.Architecture,
4✔
579
                        Command:                strings.Join(config.Config.Cmd, " "),
4✔
580
                        Created:                config.Created.String(),
4✔
581
                        ImageID:                digest.String(),
4✔
582
                        Labels:                 labels,
4✔
583
                        Layers:                 manifestLayers,
4✔
584
                        OS:                     config.OS,
4✔
585
                        Size:                   size,
4✔
586
                        UncompressedLayerSizes: layerSizes,
4✔
587
                },
4✔
588
                RawConfig:         string(rawConfig),
4✔
589
                Repositories:      repositories,
4✔
590
                SumLayerSizeBytes: sumLayersSizeBytes,
4✔
591
                // This is an assumption that the DiffIDs are in order from base up.
4✔
592
                // Need more evidence that this is always the case.
4✔
593
                UncompressedTopLayerID: config.RootFS.DiffIDs[0].String(),
4✔
594
        }
4✔
595

4✔
596
        // calling MarshalIndent so the json file written to disk is human-readable when opened
4✔
597
        certImageJSON, err := json.MarshalIndent(certImage, "", "    ")
4✔
598
        if err != nil {
4✔
599
                return fmt.Errorf("could not marshal cert image: %w", err)
×
600
        }
×
601

602
        artifactWriter := artifacts.WriterFromContext(ctx)
4✔
603
        if artifactWriter != nil {
8✔
604
                fileName, err := artifactWriter.WriteFile(check.DefaultCertImageFilename, bytes.NewReader(certImageJSON))
4✔
605
                if err != nil {
4✔
606
                        return fmt.Errorf("failed to save file to artifacts directory: %w", err)
×
607
                }
×
608

609
                logger.V(log.TRC).Info("image config written to disk", "filename", fileName)
4✔
610
        }
611

612
        return nil
4✔
613
}
614

615
func getBgName(srcrpm string) string {
3✔
616
        parts := strings.Split(srcrpm, "-")
3✔
617
        return strings.Join(parts[0:len(parts)-2], "-")
3✔
618
}
3✔
619

620
func writeRPMManifest(ctx context.Context, containerFSPath string) error {
4✔
621
        logger := logr.FromContextOrDiscard(ctx)
4✔
622
        pkgList, err := rpm.GetPackageList(ctx, containerFSPath)
4✔
623
        if err != nil {
8✔
624
                logger.Error(err, "could not get rpm list, continuing without it")
4✔
625
        }
4✔
626

627
        // covert rpm struct to pxyis struct
628
        rpms := make([]pyxis.RPM, 0, len(pkgList))
4✔
629
        rpmSuffixRegexp, err := regexp.Compile("(-[0-9].*)")
4✔
630
        if err != nil {
4✔
631
                return fmt.Errorf("error while compiling regexp: %w", err)
×
632
        }
×
633
        pgpKeyIdRegexp, err := regexp.Compile(".*, Key ID (.*)")
4✔
634
        if err != nil {
4✔
635
                return fmt.Errorf("error while compiling regexp: %w", err)
×
636
        }
×
637
        for _, packageInfo := range pkgList {
4✔
638
                var bgName, endChop, srpmNevra, pgpKeyID string
×
639

×
640
                // accounting for the fact that not all packages have a source rpm
×
641
                if len(packageInfo.SourceRpm) > 0 {
×
642
                        bgName = getBgName(packageInfo.SourceRpm)
×
643
                        endChop = strings.TrimPrefix(strings.TrimSuffix(rpmSuffixRegexp.FindString(packageInfo.SourceRpm), ".rpm"), "-")
×
644

×
645
                        srpmNevra = fmt.Sprintf("%s-%d:%s", bgName, packageInfo.Epoch, endChop)
×
646
                }
×
647

648
                if len(packageInfo.PGP) > 0 {
×
649
                        matches := pgpKeyIdRegexp.FindStringSubmatch(packageInfo.PGP)
×
650
                        if matches != nil {
×
651
                                pgpKeyID = matches[1]
×
652
                        } else {
×
653
                                logger.V(log.DBG).Info("string did not match the format required", "pgp", packageInfo.PGP)
×
654
                                pgpKeyID = ""
×
655
                        }
×
656
                }
657

658
                pyxisRPM := pyxis.RPM{
×
659
                        Architecture: packageInfo.Arch,
×
660
                        Gpg:          pgpKeyID,
×
661
                        Name:         packageInfo.Name,
×
662
                        Nvra:         fmt.Sprintf("%s-%s-%s.%s", packageInfo.Name, packageInfo.Version, packageInfo.Release, packageInfo.Arch),
×
663
                        Release:      packageInfo.Release,
×
664
                        SrpmName:     bgName,
×
665
                        SrpmNevra:    srpmNevra,
×
666
                        Summary:      packageInfo.Summary,
×
667
                        Version:      packageInfo.Version,
×
668
                }
×
669

×
670
                rpms = append(rpms, pyxisRPM)
×
671
        }
672

673
        rpmManifest := pyxis.RPMManifest{
4✔
674
                RPMS: rpms,
4✔
675
        }
4✔
676

4✔
677
        // calling MarshalIndent so the json file written to disk is human-readable when opened
4✔
678
        rpmManifestJSON, err := json.MarshalIndent(rpmManifest, "", "    ")
4✔
679
        if err != nil {
4✔
680
                return fmt.Errorf("could not marshal rpm manifest: %w", err)
×
681
        }
×
682

683
        if artifactWriter := artifacts.WriterFromContext(ctx); artifactWriter != nil {
8✔
684
                fileName, err := artifactWriter.WriteFile(check.DefaultRPMManifestFilename, bytes.NewReader(rpmManifestJSON))
4✔
685
                if err != nil {
4✔
686
                        return fmt.Errorf("failed to save file to artifacts directory: %w", err)
×
687
                }
×
688

689
                logger.V(log.TRC).Info("rpm manifest written to disk", "filename", fileName)
4✔
690
        }
691

692
        return nil
4✔
693
}
694

695
func sumLayerSizeBytes(layers []pyxis.Layer) int64 {
4✔
696
        var sum int64
4✔
697
        for _, layer := range layers {
16✔
698
                sum += layer.Size
12✔
699
        }
12✔
700

701
        return sum
4✔
702
}
703

704
func convertLabels(imageLabels map[string]string) []pyxis.Label {
4✔
705
        pyxisLabels := make([]pyxis.Label, 0, len(imageLabels))
4✔
706
        for key, value := range imageLabels {
4✔
707
                label := pyxis.Label{
×
708
                        Name:  key,
×
709
                        Value: value,
×
710
                }
×
711

×
712
                pyxisLabels = append(pyxisLabels, label)
×
713
        }
×
714

715
        return pyxisLabels
4✔
716
}
717

718
// OperatorCheckConfig contains configuration relevant to an individual check's execution.
719
type OperatorCheckConfig struct {
720
        ScorecardImage, ScorecardWaitTime, ScorecardNamespace, ScorecardServiceAccount string
721
        IndexImage, DockerConfig, Channel                                              string
722
        Kubeconfig                                                                     []byte
723
        CSVTimeout                                                                     time.Duration
724
        SubscriptionTimeout                                                            time.Duration
725
}
726

727
// InitializeOperatorChecks returns opeartor checks for policy p give cfg.
728
func InitializeOperatorChecks(ctx context.Context, p policy.Policy, cfg OperatorCheckConfig) ([]check.Check, error) {
4✔
729
        switch p {
4✔
730
        case policy.PolicyOperator:
3✔
731
                return []check.Check{
3✔
732
                        operatorpol.NewScorecardBasicSpecCheck(operatorsdk.New(cfg.ScorecardImage, exec.Command), cfg.ScorecardNamespace, cfg.ScorecardServiceAccount, cfg.Kubeconfig, cfg.ScorecardWaitTime),
3✔
733
                        operatorpol.NewScorecardOlmSuiteCheck(operatorsdk.New(cfg.ScorecardImage, exec.Command), cfg.ScorecardNamespace, cfg.ScorecardServiceAccount, cfg.Kubeconfig, cfg.ScorecardWaitTime),
3✔
734
                        operatorpol.NewDeployableByOlmCheck(cfg.IndexImage, cfg.DockerConfig, cfg.Channel, operatorpol.WithCSVTimeout(cfg.CSVTimeout), operatorpol.WithSubscriptionTimeout(cfg.SubscriptionTimeout)),
3✔
735
                        operatorpol.NewValidateOperatorBundleCheck(),
3✔
736
                        operatorpol.NewCertifiedImagesCheck(pyxis.NewPyxisClient(
3✔
737
                                check.DefaultPyxisHost,
3✔
738
                                "",
3✔
739
                                "",
3✔
740
                                &http.Client{Timeout: 60 * time.Second}),
3✔
741
                        ),
3✔
742
                        operatorpol.NewSecurityContextConstraintsCheck(),
3✔
743
                        &operatorpol.RelatedImagesCheck{},
3✔
744
                        operatorpol.FollowsRestrictedNetworkEnablementGuidelines{},
3✔
745
                        operatorpol.RequiredAnnotations{},
3✔
746
                }, nil
3✔
747
        }
748

749
        return nil, fmt.Errorf("provided operator policy %s is unknown", p)
1✔
750
}
751

752
// ContainerCheckConfig contains configuration relevant to an individual check's execution.
753
type ContainerCheckConfig struct {
754
        DockerConfig, PyxisAPIToken, CertificationProjectID, PyxisHost string
755
}
756

757
// InitializeContainerChecks returns the appropriate checks for policy p given cfg.
758
func InitializeContainerChecks(ctx context.Context, p policy.Policy, cfg ContainerCheckConfig) ([]check.Check, error) {
16✔
759
        switch p {
16✔
760
        case policy.PolicyContainer:
3✔
761
                return []check.Check{
3✔
762
                        &containerpol.HasLicenseCheck{},
3✔
763
                        containerpol.NewHasUniqueTagCheck(cfg.DockerConfig),
3✔
764
                        &containerpol.MaxLayersCheck{},
3✔
765
                        &containerpol.HasNoProhibitedPackagesCheck{},
3✔
766
                        &containerpol.HasRequiredLabelsCheck{},
3✔
767
                        &containerpol.HasNoProhibitedLabelsCheck{},
3✔
768
                        &containerpol.RunAsNonRootCheck{},
3✔
769
                        &containerpol.HasModifiedFilesCheck{},
3✔
770
                        containerpol.NewBasedOnUbiCheck(pyxis.NewPyxisClient(
3✔
771
                                cfg.PyxisHost,
3✔
772
                                cfg.PyxisAPIToken,
3✔
773
                                cfg.CertificationProjectID,
3✔
774
                                &http.Client{Timeout: 60 * time.Second})),
3✔
775
                        &containerpol.HasProhibitedContainerName{},
3✔
776
                }, nil
3✔
777
        case policy.PolicyRoot:
3✔
778
                return []check.Check{
3✔
779
                        &containerpol.HasLicenseCheck{},
3✔
780
                        containerpol.NewHasUniqueTagCheck(cfg.DockerConfig),
3✔
781
                        &containerpol.MaxLayersCheck{},
3✔
782
                        &containerpol.HasNoProhibitedPackagesCheck{},
3✔
783
                        &containerpol.HasRequiredLabelsCheck{},
3✔
784
                        &containerpol.HasNoProhibitedLabelsCheck{},
3✔
785
                        &containerpol.HasModifiedFilesCheck{},
3✔
786
                        containerpol.NewBasedOnUbiCheck(pyxis.NewPyxisClient(
3✔
787
                                cfg.PyxisHost,
3✔
788
                                cfg.PyxisAPIToken,
3✔
789
                                cfg.CertificationProjectID,
3✔
790
                                &http.Client{Timeout: 60 * time.Second})),
3✔
791
                        &containerpol.HasProhibitedContainerName{},
3✔
792
                }, nil
3✔
793
        case policy.PolicyScratchNonRoot:
3✔
794
                return []check.Check{
3✔
795
                        &containerpol.HasLicenseCheck{},
3✔
796
                        containerpol.NewHasUniqueTagCheck(cfg.DockerConfig),
3✔
797
                        &containerpol.MaxLayersCheck{},
3✔
798
                        &containerpol.HasRequiredLabelsCheck{},
3✔
799
                        &containerpol.HasNoProhibitedLabelsCheck{},
3✔
800
                        &containerpol.RunAsNonRootCheck{},
3✔
801
                        &containerpol.HasProhibitedContainerName{},
3✔
802
                }, nil
3✔
803
        case policy.PolicyScratchRoot:
3✔
804
                return []check.Check{
3✔
805
                        &containerpol.HasLicenseCheck{},
3✔
806
                        containerpol.NewHasUniqueTagCheck(cfg.DockerConfig),
3✔
807
                        &containerpol.MaxLayersCheck{},
3✔
808
                        &containerpol.HasRequiredLabelsCheck{},
3✔
809
                        &containerpol.HasNoProhibitedLabelsCheck{},
3✔
810
                        &containerpol.HasProhibitedContainerName{},
3✔
811
                }, nil
3✔
812
        case policy.PolicyKonflux:
3✔
813
                return []check.Check{
3✔
814
                        &containerpol.HasLicenseCheck{},
3✔
815
                        containerpol.NewHasUniqueTagCheck(cfg.DockerConfig),
3✔
816
                        &containerpol.MaxLayersCheck{},
3✔
817
                        &containerpol.HasNoProhibitedPackagesCheck{},
3✔
818
                        &containerpol.HasRequiredLabelsCheck{},
3✔
819
                        &containerpol.RunAsNonRootCheck{},
3✔
820
                        &containerpol.HasModifiedFilesCheck{},
3✔
821
                        containerpol.NewBasedOnUbiCheck(pyxis.NewPyxisClient(
3✔
822
                                cfg.PyxisHost,
3✔
823
                                cfg.PyxisAPIToken,
3✔
824
                                cfg.CertificationProjectID,
3✔
825
                                &http.Client{Timeout: 60 * time.Second})),
3✔
826
                }, nil
3✔
827
        }
828

829
        return nil, fmt.Errorf("provided container policy %s is unknown", p)
1✔
830
}
831

832
// makeCheckList returns a list of check names.
833
func makeCheckList(checks []check.Check) []string {
12✔
834
        checkNames := make([]string, len(checks))
12✔
835

12✔
836
        for i, check := range checks {
110✔
837
                checkNames[i] = check.Name()
98✔
838
        }
98✔
839

840
        return checkNames
12✔
841
}
842

843
// checkNamesFor produces a slice of names for checks in the requested policy.
844
func checkNamesFor(ctx context.Context, p policy.Policy) []string {
13✔
845
        var c []check.Check
13✔
846
        switch p {
13✔
847
        case policy.PolicyContainer, policy.PolicyRoot, policy.PolicyScratchNonRoot, policy.PolicyScratchRoot, policy.PolicyKonflux:
10✔
848
                c, _ = InitializeContainerChecks(ctx, p, ContainerCheckConfig{})
10✔
849
        case policy.PolicyOperator:
2✔
850
                c, _ = InitializeOperatorChecks(ctx, p, OperatorCheckConfig{})
2✔
851
        default:
1✔
852
                return []string{}
1✔
853
        }
854

855
        return makeCheckList(c)
12✔
856
}
857

858
// OperatorPolicy returns the names of checks in the operator policy.
859
func OperatorPolicy(ctx context.Context) []string {
2✔
860
        return checkNamesFor(ctx, policy.PolicyOperator)
2✔
861
}
2✔
862

863
// ContainerPolicy returns the names of checks in the container policy.
864
func ContainerPolicy(ctx context.Context) []string {
2✔
865
        return checkNamesFor(ctx, policy.PolicyContainer)
2✔
866
}
2✔
867

868
// ScratchNonRootContainerPolicy returns the names of checks in the
869
// container policy with scratch exception.
870
func ScratchNonRootContainerPolicy(ctx context.Context) []string {
2✔
871
        return checkNamesFor(ctx, policy.PolicyScratchNonRoot)
2✔
872
}
2✔
873

874
// ScratchRootContainerPolicy returns the names of checks in the
875
// container policy with scratch and root exception.
876
func ScratchRootContainerPolicy(ctx context.Context) []string {
2✔
877
        return checkNamesFor(ctx, policy.PolicyScratchRoot)
2✔
878
}
2✔
879

880
// RootExceptionContainerPolicy returns the names of checks in the
881
// container policy with root exception.
882
func RootExceptionContainerPolicy(ctx context.Context) []string {
2✔
883
        return checkNamesFor(ctx, policy.PolicyRoot)
2✔
884
}
2✔
885

886
// KonfluxContainerPolicy returns the names of checks to be used in
887
// a konflux pipeline
888
func KonfluxContainerPolicy(ctx context.Context) []string {
2✔
889
        return checkNamesFor(ctx, policy.PolicyKonflux)
2✔
890
}
2✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc