• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / containerized-data-importer / #5485

16 Jul 2025 07:43AM UTC coverage: 59.534% (+0.03%) from 59.502%
#5485

Pull #3840

travis-ci

Acedus
dnm, testing network-policies for CDI

Signed-off-by: Adi Aloni <aaloni@redhat.com>
Pull Request #3840: [WIP] Testing network-policies for CDI

17110 of 28740 relevant lines covered (59.53%)

0.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.53
/pkg/importer/http-datasource.go
1
/*
2
Copyright 2018 The CDI Authors.
3

4
Licensed under the Apache License, Version 2.0 (the "License");
5
you may not use this file except in compliance with the License.
6
You may obtain a copy of the License at
7

8
    http://www.apache.org/licenses/LICENSE-2.0
9

10
Unless required by applicable law or agreed to in writing, software
11
distributed under the License is distributed on an "AS IS" BASIS,
12
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
See the License for the specific language governing permissions and
14
limitations under the License.
15
*/
16

17
package importer
18

19
import (
20
        "context"
21
        "crypto/tls"
22
        "crypto/x509"
23
        "encoding/json"
24
        "fmt"
25
        "io"
26
        "io/fs"
27
        "net/http"
28
        "net/url"
29
        "os"
30
        "path"
31
        "path/filepath"
32
        "strconv"
33
        "strings"
34
        "sync"
35
        "time"
36

37
        "github.com/pkg/errors"
38

39
        "k8s.io/klog/v2"
40

41
        cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
42
        "kubevirt.io/containerized-data-importer/pkg/common"
43
        "kubevirt.io/containerized-data-importer/pkg/image"
44
        "kubevirt.io/containerized-data-importer/pkg/util"
45
)
46

47
const (
48
        tempFile          = "tmpimage"
49
        nbdkitPid         = "/tmp/nbdkit.pid"
50
        nbdkitSocket      = "/tmp/nbdkit.sock"
51
        defaultUserAgent  = "cdi-golang-importer"
52
        httpContentType   = "Content-Type"
53
        httpContentLength = "Content-Length"
54
)
55

56
// HTTPDataSource is the data provider for http(s) endpoints.
57
// Sequence of phases:
58
// 1a. Info -> Convert (In Info phase the format readers are configured), if the source Reader image is not archived, and no custom CA is used, and can be converted by QEMU-IMG (RAW/QCOW2).
59
// 1b. Info -> TransferArchive if the content type is archive.
60
// 1c. Info -> ValidatePreScratch if image size validation using nbdkit prior to Transfer is possible.
61
// 1d. Info -> Transfer in all other cases.
62
// 2.  ValidatePreScratch -> TransferScratch.
63
// 3a. Transfer -> Convert if content type is kubevirt
64
// 3b. Transfer -> Complete if content type is archive (Transfer is called with the target instead of the scratch space). Non block PVCs only.
65
type HTTPDataSource struct {
66
        httpReader io.ReadCloser
67
        ctx        context.Context
68
        cancel     context.CancelFunc
69
        cancelLock sync.Mutex
70
        // content type expected by the to live on the endpoint.
71
        contentType cdiv1.DataVolumeContentType
72
        // stack of readers
73
        readers *FormatReaders
74
        // endpoint the http endpoint to retrieve the data from.
75
        endpoint *url.URL
76
        // url the url to report to the caller of getURL, could be the endpoint, or a file in scratch space.
77
        url *url.URL
78
        // path to the custom CA. Empty if not used
79
        customCA string
80
        // true if we know `qemu-img` will fail to download this
81
        brokenForQemuImg bool
82
        // the content length reported by the http server.
83
        contentLength uint64
84

85
        n image.NbdkitOperation
86
}
87

88
var createNbdkitCurl = image.NewNbdkitCurl
89

90
// NewHTTPDataSource creates a new instance of the http data provider.
91
func NewHTTPDataSource(endpoint, accessKey, secKey, certDir string, contentType cdiv1.DataVolumeContentType) (*HTTPDataSource, error) {
1✔
92
        ep, err := ParseEndpoint(endpoint)
1✔
93
        if err != nil {
2✔
94
                return nil, errors.Wrapf(err, "unable to parse endpoint %q", endpoint)
1✔
95
        }
1✔
96
        ctx, cancel := context.WithCancel(context.Background())
1✔
97

1✔
98
        extraHeaders, secretExtraHeaders, err := getExtraHeaders()
1✔
99
        if err != nil {
1✔
100
                cancel()
×
101
                return nil, errors.Wrap(err, "Error getting extra headers for HTTP client")
×
102
        }
×
103

104
        httpReader, contentLength, brokenForQemuImg, err := createHTTPReader(ctx, ep, accessKey, secKey, certDir, extraHeaders, secretExtraHeaders, contentType)
1✔
105
        if err != nil {
2✔
106
                cancel()
1✔
107
                return nil, err
1✔
108
        }
1✔
109

110
        httpSource := &HTTPDataSource{
1✔
111
                ctx:              ctx,
1✔
112
                cancel:           cancel,
1✔
113
                httpReader:       httpReader,
1✔
114
                contentType:      contentType,
1✔
115
                endpoint:         ep,
1✔
116
                customCA:         certDir,
1✔
117
                brokenForQemuImg: brokenForQemuImg,
1✔
118
                contentLength:    contentLength,
1✔
119
        }
1✔
120
        httpSource.n, err = createNbdkitCurl(nbdkitPid, accessKey, secKey, certDir, nbdkitSocket, extraHeaders, secretExtraHeaders)
1✔
121
        if err != nil {
1✔
122
                cancel()
×
123
                return nil, err
×
124
        }
×
125
        // We know this is a counting reader, so no need to check.
126
        countingReader := httpReader.(*util.CountingReader)
1✔
127
        go httpSource.pollProgress(countingReader, 10*time.Minute, time.Second)
1✔
128
        return httpSource, nil
1✔
129
}
130

131
// Info is called to get initial information about the data.
132
func (hs *HTTPDataSource) Info() (ProcessingPhase, error) {
1✔
133
        var err error
1✔
134
        hs.readers, err = NewFormatReaders(hs.httpReader, hs.contentLength)
1✔
135
        if err != nil {
1✔
136
                klog.Errorf("Error creating readers: %v", err)
×
137
                return ProcessingPhaseError, err
×
138
        }
×
139
        if hs.contentType == cdiv1.DataVolumeArchive {
2✔
140
                return ProcessingPhaseTransferDataDir, nil
1✔
141
        }
1✔
142
        if pullMethod, _ := util.ParseEnvVar(common.ImporterPullMethod, false); pullMethod == string(cdiv1.RegistryPullNode) {
1✔
143
                if err := hs.startNbdKit(); err != nil {
×
144
                        return ProcessingPhaseError, err
×
145
                }
×
146
                return ProcessingPhaseConvert, nil
×
147
        }
148
        if err := hs.startNbdKit(); err == nil && !hs.brokenForQemuImg {
2✔
149
                // Validate that target volume size is sufficient early.
1✔
150
                return ProcessingPhaseValidatePreScratch, nil
1✔
151
        }
1✔
152
        hs.url = nil
1✔
153
        return ProcessingPhaseTransferScratch, nil
1✔
154
}
155

156
// Transfer is called to transfer the data from the source to a scratch location.
157
func (hs *HTTPDataSource) Transfer(path string, preallocation bool) (ProcessingPhase, error) {
1✔
158
        if hs.contentType == cdiv1.DataVolumeKubeVirt {
2✔
159
                file := filepath.Join(path, tempFile)
1✔
160
                if err := CleanAll(file); err != nil {
1✔
161
                        return ProcessingPhaseError, err
×
162
                }
×
163
                size, err := GetAvailableSpace(path)
1✔
164
                if err != nil || size <= 0 {
2✔
165
                        return ProcessingPhaseError, ErrInvalidPath
1✔
166
                }
1✔
167
                // Validate that scratch space size is sufficient early.
168
                // This check is best effort as qemu-img info can't parse compressed images properly.
169
                if hs.url != nil {
2✔
170
                        // Other errors such as qemu-img or nbdkit-curl failures are irrelevant as this
1✔
171
                        // is a best effort attempt and should not fail the import.
1✔
172
                        // HTTPs Proxy CA is currently unsupported until nbdkit adds support for the relevant flags
1✔
173
                        // https://gitlab.com/nbdkit/nbdkit/-/merge_requests/87
1✔
174
                        if err = qemuOperations.Validate(hs.url, size); errors.Is(err, image.ErrLargerPVCRequired) {
2✔
175
                                return ProcessingPhaseError, err
1✔
176
                        }
1✔
177
                }
178
                hs.readers.StartProgressUpdate()
1✔
179
                _, _, err = StreamDataToFile(hs.readers.TopReader(), file, preallocation)
1✔
180
                if err != nil {
1✔
181
                        return ProcessingPhaseError, err
×
182
                }
×
183
                // If we successfully wrote to the file, then the parse will succeed.
184
                hs.url, _ = url.Parse(file)
1✔
185
                return ProcessingPhaseConvert, nil
1✔
186
        } else if hs.contentType == cdiv1.DataVolumeArchive {
2✔
187
                if err := util.UnArchiveTar(hs.readers.TopReader(), path); err != nil {
2✔
188
                        return ProcessingPhaseError, errors.Wrap(err, "unable to untar files from endpoint")
1✔
189
                }
1✔
190
                hs.url = nil
1✔
191
                return ProcessingPhaseComplete, nil
1✔
192
        }
193
        return ProcessingPhaseError, errors.Errorf("Unknown content type: %s", hs.contentType)
1✔
194
}
195

196
// TransferFile is called to transfer the data from the source to the passed in file.
197
func (hs *HTTPDataSource) TransferFile(fileName string, preallocation bool) (ProcessingPhase, error) {
×
198
        if err := CleanAll(fileName); err != nil {
×
199
                return ProcessingPhaseError, err
×
200
        }
×
201
        hs.readers.StartProgressUpdate()
×
202
        _, _, err := StreamDataToFile(hs.readers.TopReader(), fileName, preallocation)
×
203
        if err != nil {
×
204
                return ProcessingPhaseError, err
×
205
        }
×
206
        return ProcessingPhaseResize, nil
×
207
}
208

209
// GetURL returns the URI that the data processor can use when converting the data.
210
func (hs *HTTPDataSource) GetURL() *url.URL {
×
211
        return hs.url
×
212
}
×
213

214
// GetTerminationMessage returns data to be serialized and used as the termination message of the importer.
215
func (hs *HTTPDataSource) GetTerminationMessage() *common.TerminationMessage {
1✔
216
        if pullMethod, _ := util.ParseEnvVar(common.ImporterPullMethod, false); pullMethod != string(cdiv1.RegistryPullNode) {
2✔
217
                return nil
1✔
218
        }
1✔
219

220
        info, err := getServerInfo(hs.ctx, fmt.Sprintf("%s://%s/info", hs.endpoint.Scheme, hs.endpoint.Host))
1✔
221
        if err != nil {
1✔
222
                klog.Errorf("%+v", err)
×
223
                return nil
×
224
        }
×
225

226
        return &common.TerminationMessage{
1✔
227
                Labels: envsToLabels(info.Env),
1✔
228
        }
1✔
229
}
230

231
// Close all readers.
232
func (hs *HTTPDataSource) Close() error {
1✔
233
        var err error
1✔
234
        if hs.readers != nil {
2✔
235
                err = hs.readers.Close()
1✔
236
        }
1✔
237
        hs.cancelLock.Lock()
1✔
238
        if hs.cancel != nil {
2✔
239
                hs.cancel()
1✔
240
                hs.cancel = nil
1✔
241
        }
1✔
242
        hs.cancelLock.Unlock()
1✔
243
        return err
1✔
244
}
245

246
func createCertPool(certDir string) (*x509.CertPool, error) {
1✔
247
        // let's get system certs as well
1✔
248
        certPool, err := x509.SystemCertPool()
1✔
249
        if err != nil {
1✔
250
                return nil, errors.Wrap(err, "Error getting system certs")
×
251
        }
×
252

253
        // append the user-provided trusted CA certificates bundle when making egress connections using proxy
254
        if files, err := os.ReadDir(common.ImporterProxyCertDir); err == nil {
1✔
255
                for _, file := range files {
×
256
                        if file.IsDir() || file.Name()[0] == '.' {
×
257
                                continue
×
258
                        }
259
                        fp := path.Join(common.ImporterProxyCertDir, file.Name())
×
260
                        if certs, err := os.ReadFile(fp); err == nil {
×
261
                                certPool.AppendCertsFromPEM(certs)
×
262
                        }
×
263
                }
264
        }
265

266
        // append server CA certificates
267
        files, err := os.ReadDir(certDir)
1✔
268
        if err != nil {
2✔
269
                return nil, errors.Wrapf(err, "Error listing files in %s", certDir)
1✔
270
        }
1✔
271

272
        for _, file := range files {
2✔
273
                if file.IsDir() || file.Name()[0] == '.' {
1✔
274
                        continue
×
275
                }
276

277
                fp := path.Join(certDir, file.Name())
1✔
278

1✔
279
                klog.Infof("Attempting to get certs from %s", fp)
1✔
280

1✔
281
                certs, err := os.ReadFile(fp)
1✔
282
                if err != nil {
1✔
283
                        return nil, errors.Wrapf(err, "Error reading file %s", fp)
×
284
                }
×
285

286
                if ok := certPool.AppendCertsFromPEM(certs); !ok {
1✔
287
                        klog.Warningf("No certs in %s", fp)
×
288
                }
×
289
        }
290

291
        return certPool, nil
1✔
292
}
293

294
func createHTTPClient(certDir string) (*http.Client, error) {
1✔
295
        client := &http.Client{
1✔
296
                // Don't set timeout here, since that will be an absolute timeout, we need a relative to last progress timeout.
1✔
297
        }
1✔
298

1✔
299
        if certDir == "" {
2✔
300
                return client, nil
1✔
301
        }
1✔
302

303
        certPool, err := createCertPool(certDir)
1✔
304
        if err != nil {
2✔
305
                return nil, err
1✔
306
        }
1✔
307

308
        // the default transport contains Proxy configurations to use environment variables and default timeouts
309
        transport := http.DefaultTransport.(*http.Transport).Clone()
1✔
310
        transport.TLSClientConfig = &tls.Config{
1✔
311
                RootCAs:    certPool,
1✔
312
                MinVersion: tls.VersionTLS12,
1✔
313
        }
1✔
314
        transport.GetProxyConnectHeader = func(ctx context.Context, proxyURL *url.URL, target string) (http.Header, error) {
1✔
315
                h := http.Header{}
×
316
                h.Add("User-Agent", defaultUserAgent)
×
317
                return h, nil
×
318
        }
×
319
        client.Transport = transport
1✔
320

1✔
321
        return client, nil
1✔
322
}
323

324
func addExtraheaders(req *http.Request, extraHeaders []string) {
1✔
325
        for _, header := range extraHeaders {
2✔
326
                parts := strings.SplitN(header, ":", 2)
1✔
327
                if len(parts) > 1 {
2✔
328
                        req.Header.Add(parts[0], parts[1])
1✔
329
                }
1✔
330
        }
331
        req.Header.Add("User-Agent", defaultUserAgent)
1✔
332
}
333

334
func createHTTPReader(ctx context.Context, ep *url.URL, accessKey, secKey, certDir string, extraHeaders, secretExtraHeaders []string, contentType cdiv1.DataVolumeContentType) (io.ReadCloser, uint64, bool, error) {
1✔
335
        var brokenForQemuImg bool
1✔
336
        client, err := createHTTPClient(certDir)
1✔
337
        if err != nil {
2✔
338
                return nil, uint64(0), false, errors.Wrap(err, "Error creating http client")
1✔
339
        }
1✔
340

341
        allExtraHeaders := append(extraHeaders, secretExtraHeaders...)
1✔
342

1✔
343
        client.CheckRedirect = func(r *http.Request, via []*http.Request) error {
2✔
344
                if len(accessKey) > 0 && len(secKey) > 0 {
2✔
345
                        r.SetBasicAuth(accessKey, secKey) // Redirects will lose basic auth, so reset them manually
1✔
346
                }
1✔
347
                addExtraheaders(r, allExtraHeaders)
1✔
348
                return nil
1✔
349
        }
350

351
        total, err := getContentLength(client, ep, accessKey, secKey, allExtraHeaders)
1✔
352
        if err != nil {
2✔
353
                brokenForQemuImg = true
1✔
354
        }
1✔
355
        // http.NewRequest can only return error on invalid METHOD, or invalid url. Here the METHOD is always GET, and the url is always valid, thus error cannot happen.
356
        req, _ := http.NewRequest(http.MethodGet, ep.String(), nil)
1✔
357

1✔
358
        addExtraheaders(req, allExtraHeaders)
1✔
359

1✔
360
        req = req.WithContext(ctx)
1✔
361
        if len(accessKey) > 0 && len(secKey) > 0 {
2✔
362
                req.SetBasicAuth(accessKey, secKey)
1✔
363
        }
1✔
364
        klog.V(2).Infof("Attempting to get object %q via http client\n", ep.String())
1✔
365
        resp, err := client.Do(req)
1✔
366
        if err != nil {
1✔
367
                return nil, uint64(0), true, errors.Wrap(err, "HTTP request errored")
×
368
        }
×
369
        if want := http.StatusOK; resp.StatusCode != want {
2✔
370
                klog.Errorf("http: expected status code %d, got %d", want, resp.StatusCode)
1✔
371
                return nil, uint64(0), true, errors.Errorf("expected status code %d, got %d. Status: %s", want, resp.StatusCode, resp.Status)
1✔
372
        }
1✔
373

374
        if contentType == cdiv1.DataVolumeKubeVirt {
2✔
375
                // Check the content-type if we are expecting a KubeVirt img.
1✔
376
                if val, ok := resp.Header[httpContentType]; ok {
2✔
377
                        if strings.HasPrefix(val[0], "text/") {
1✔
378
                                // We will continue with the import nonetheless, but content might be unexpected.
×
379
                                klog.Warningf("Unexpected content type '%s'. Content might not be a KubeVirt image.", val[0])
×
380
                        }
×
381
                }
382
        }
383

384
        acceptRanges, ok := resp.Header["Accept-Ranges"]
1✔
385
        if !ok || acceptRanges[0] == "none" {
2✔
386
                klog.V(2).Infof("Accept-Ranges isn't bytes, avoiding qemu-img")
1✔
387
                brokenForQemuImg = true
1✔
388
        }
1✔
389

390
        if total == 0 {
2✔
391
                // The total seems bogus. Let's try the GET Content-Length header
1✔
392
                total = parseHTTPHeader(resp)
1✔
393
        }
1✔
394
        countingReader := &util.CountingReader{
1✔
395
                Reader:  resp.Body,
1✔
396
                Current: 0,
1✔
397
        }
1✔
398
        return countingReader, total, brokenForQemuImg, nil
1✔
399
}
400

401
func (hs *HTTPDataSource) pollProgress(reader *util.CountingReader, idleTime, pollInterval time.Duration) {
1✔
402
        count := reader.Current
1✔
403
        lastUpdate := time.Now()
1✔
404
        for {
2✔
405
                if count < reader.Current {
1✔
406
                        // Some progress was made, reset now.
×
407
                        lastUpdate = time.Now()
×
408
                        count = reader.Current
×
409
                }
×
410

411
                if time.Until(lastUpdate.Add(idleTime)).Nanoseconds() < 0 {
2✔
412
                        hs.cancelLock.Lock()
1✔
413
                        if hs.cancel != nil {
2✔
414
                                // No progress for the idle time, cancel http client.
1✔
415
                                hs.cancel() // This will trigger dp.ctx.Done()
1✔
416
                        }
1✔
417
                        hs.cancelLock.Unlock()
1✔
418
                }
419
                select {
1✔
420
                case <-time.After(pollInterval):
1✔
421
                        continue
1✔
422
                case <-hs.ctx.Done():
1✔
423
                        return // Don't leak, once the transfer is cancelled or completed this is called.
1✔
424
                }
425
        }
426
}
427

428
func getContentLength(client *http.Client, ep *url.URL, accessKey, secKey string, extraHeaders []string) (uint64, error) {
1✔
429
        req, err := http.NewRequest(http.MethodHead, ep.String(), nil)
1✔
430
        if err != nil {
1✔
431
                return uint64(0), errors.Wrap(err, "could not create HTTP request")
×
432
        }
×
433
        if len(accessKey) > 0 && len(secKey) > 0 {
2✔
434
                req.SetBasicAuth(accessKey, secKey)
1✔
435
        }
1✔
436

437
        addExtraheaders(req, extraHeaders)
1✔
438

1✔
439
        klog.V(2).Infof("Attempting to HEAD %q via http client\n", ep.String())
1✔
440
        resp, err := client.Do(req)
1✔
441
        if err != nil {
2✔
442
                return uint64(0), errors.Wrap(err, "HTTP request errored")
1✔
443
        }
1✔
444

445
        if want := http.StatusOK; resp.StatusCode != want {
2✔
446
                klog.Errorf("http: expected status code %d, got %d", want, resp.StatusCode)
1✔
447
                return uint64(0), errors.Errorf("expected status code %d, got %d. Status: %s", want, resp.StatusCode, resp.Status)
1✔
448
        }
1✔
449

450
        for k, v := range resp.Header {
2✔
451
                klog.V(3).Infof("GO CLIENT: key: %s, value: %s\n", k, v)
1✔
452
        }
1✔
453

454
        total := parseHTTPHeader(resp)
1✔
455

1✔
456
        err = resp.Body.Close()
1✔
457
        if err != nil {
1✔
458
                return uint64(0), errors.Wrap(err, "could not close head read")
×
459
        }
×
460
        return total, nil
1✔
461
}
462

463
func parseHTTPHeader(resp *http.Response) uint64 {
1✔
464
        var err error
1✔
465
        total := uint64(0)
1✔
466
        if val, ok := resp.Header[httpContentLength]; ok {
2✔
467
                total, err = strconv.ParseUint(val[0], 10, 64)
1✔
468
                if err != nil {
1✔
469
                        klog.Errorf("could not convert content length, got %v", err)
×
470
                }
×
471
                klog.V(3).Infof("Content length: %d\n", total)
1✔
472
        }
473

474
        return total
1✔
475
}
476

477
// Check for any extra headers to pass along. Return secret headers separately so callers can suppress logging them.
478
func getExtraHeaders() ([]string, []string, error) {
1✔
479
        extraHeaders := getExtraHeadersFromEnvironment()
1✔
480
        secretExtraHeaders, err := getExtraHeadersFromSecrets()
1✔
481
        return extraHeaders, secretExtraHeaders, err
1✔
482
}
1✔
483

484
// Check for extra headers from environment variables.
485
func getExtraHeadersFromEnvironment() []string {
1✔
486
        var extraHeaders []string
1✔
487

1✔
488
        for _, value := range os.Environ() {
2✔
489
                if strings.HasPrefix(value, common.ImporterExtraHeader) {
2✔
490
                        env := strings.SplitN(value, "=", 2)
1✔
491
                        if len(env) > 1 {
2✔
492
                                extraHeaders = append(extraHeaders, env[1])
1✔
493
                        }
1✔
494
                }
495
        }
496

497
        return extraHeaders
1✔
498
}
499

500
// Check for extra headers from mounted secrets.
501
func getExtraHeadersFromSecrets() ([]string, error) {
1✔
502
        var secretExtraHeaders []string
1✔
503
        var err error
1✔
504

1✔
505
        secretDir := common.ImporterSecretExtraHeadersDir
1✔
506
        err = filepath.Walk(secretDir, func(filePath string, info fs.FileInfo, err error) error {
2✔
507
                if err != nil {
2✔
508
                        if os.IsNotExist(err) {
2✔
509
                                return nil
1✔
510
                        }
1✔
511
                        return errors.Wrapf(err, "Error listing directories under %s", secretDir)
×
512
                }
513

514
                // Skip directories like ..data and ..2021_11_09_17_20_16.253260263
515
                if info.IsDir() && info.Name()[0] == '.' {
×
516
                        return fs.SkipDir
×
517
                }
×
518

519
                // Don't try to read directories, or files that start with dots
520
                if info.IsDir() || info.Name()[0] == '.' {
×
521
                        return nil
×
522
                }
×
523

524
                header, err := os.ReadFile(filePath)
×
525
                if err != nil {
×
526
                        return errors.Wrapf(err, "Error reading headers from %s", filePath)
×
527
                }
×
528
                secretExtraHeaders = append(secretExtraHeaders, string(header))
×
529

×
530
                return err
×
531
        })
532

533
        return secretExtraHeaders, err
1✔
534
}
535

536
func getServerInfo(ctx context.Context, infoURL string) (*common.ServerInfo, error) {
1✔
537
        req, err := http.NewRequestWithContext(ctx, http.MethodGet, infoURL, nil)
1✔
538
        if err != nil {
1✔
539
                return nil, errors.Wrap(err, "failed to construct request for containerimage-server info")
×
540
        }
×
541

542
        client := &http.Client{}
1✔
543
        resp, err := client.Do(req)
1✔
544
        if err != nil {
1✔
545
                return nil, errors.Wrap(err, "failed request containerimage-server info")
×
546
        }
×
547
        defer resp.Body.Close()
1✔
548

1✔
549
        if resp.StatusCode != http.StatusOK {
1✔
550
                return nil, fmt.Errorf("failed request containerimage-server info: expected status code 200, got %d", resp.StatusCode)
×
551
        }
×
552

553
        body, err := io.ReadAll(resp.Body)
1✔
554
        if err != nil {
1✔
555
                return nil, errors.Wrap(err, "failed to read body of containerimage-server info request")
×
556
        }
×
557

558
        info := &common.ServerInfo{}
1✔
559
        if err := json.Unmarshal(body, info); err != nil {
1✔
560
                return nil, errors.Wrap(err, "failed to unmarshal body of containerimage-server info request")
×
561
        }
×
562

563
        return info, nil
1✔
564
}
565

566
func (hs *HTTPDataSource) startNbdKit() error {
1✔
567
        hs.url, _ = url.Parse(fmt.Sprintf("nbd+unix:///?socket=%s", nbdkitSocket))
1✔
568
        if err := hs.n.StartNbdkit(hs.endpoint.String()); err != nil {
1✔
569
                return err
×
570
        }
×
571
        return nil
1✔
572
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc