• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kubevirt / containerized-data-importer / #5251

24 Apr 2025 09:09AM UTC coverage: 59.199% (+0.03%) from 59.172%
#5251

Pull #3711

travis-ci

arnongilboa
Allow increasing PVC size to the minimum supported by its storage class

Supports both DataVolume PVC rendering and PVC mutating webhook
rendering, so it can cover the kubevirt-side created TPM small PVC and
any other PVC created by DV or independently. For independent PVCs they
should be labeled with cdi.kubevirt.io/applyStorageProfile: "true" and
CDI featureGate WebhookPvcRendering should be enabled. A storage class
can be annotated with its minimum supported volume size with e.g.:
    cdi.kubevirt.io/minimumSupportedPvcSize: 4Gi

Signed-off-by: Arnon Gilboa <agilboa@redhat.com>
Pull Request #3711: Allow increasing PVC size to the minimum supported by its storage class

5 of 11 new or added lines in 1 file covered. (45.45%)

4 existing lines in 1 file now uncovered.

16829 of 28428 relevant lines covered (59.2%)

0.65 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.58
/pkg/importer/http-datasource.go
1
/*
2
Copyright 2018 The CDI Authors.
3

4
Licensed under the Apache License, Version 2.0 (the "License");
5
you may not use this file except in compliance with the License.
6
You may obtain a copy of the License at
7

8
    http://www.apache.org/licenses/LICENSE-2.0
9

10
Unless required by applicable law or agreed to in writing, software
11
distributed under the License is distributed on an "AS IS" BASIS,
12
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
See the License for the specific language governing permissions and
14
limitations under the License.
15
*/
16

17
package importer
18

19
import (
20
        "context"
21
        "crypto/tls"
22
        "crypto/x509"
23
        "encoding/json"
24
        "fmt"
25
        "io"
26
        "io/fs"
27
        "net/http"
28
        "net/url"
29
        "os"
30
        "path"
31
        "path/filepath"
32
        "strconv"
33
        "strings"
34
        "sync"
35
        "time"
36

37
        "github.com/pkg/errors"
38

39
        "k8s.io/klog/v2"
40

41
        cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
42
        "kubevirt.io/containerized-data-importer/pkg/common"
43
        "kubevirt.io/containerized-data-importer/pkg/image"
44
        "kubevirt.io/containerized-data-importer/pkg/util"
45
)
46

47
const (
48
        tempFile          = "tmpimage"
49
        nbdkitPid         = "/tmp/nbdkit.pid"
50
        nbdkitSocket      = "/tmp/nbdkit.sock"
51
        defaultUserAgent  = "cdi-golang-importer"
52
        httpContentType   = "Content-Type"
53
        httpContentLength = "Content-Length"
54
)
55

56
// HTTPDataSource is the data provider for http(s) endpoints.
57
// Sequence of phases:
58
// 1a. Info -> Convert (In Info phase the format readers are configured), if the source Reader image is not archived, and no custom CA is used, and can be converted by QEMU-IMG (RAW/QCOW2)
59
// 1b. Info -> TransferArchive if the content type is archive
60
// 1c. Info -> Transfer in all other cases.
61
// 2a. Transfer -> Convert if content type is kube virt
62
// 2b. Transfer -> Complete if content type is archive (Transfer is called with the target instead of the scratch space). Non block PVCs only.
63
type HTTPDataSource struct {
64
        httpReader io.ReadCloser
65
        ctx        context.Context
66
        cancel     context.CancelFunc
67
        cancelLock sync.Mutex
68
        // content type expected by the to live on the endpoint.
69
        contentType cdiv1.DataVolumeContentType
70
        // stack of readers
71
        readers *FormatReaders
72
        // endpoint the http endpoint to retrieve the data from.
73
        endpoint *url.URL
74
        // url the url to report to the caller of getURL, could be the endpoint, or a file in scratch space.
75
        url *url.URL
76
        // path to the custom CA. Empty if not used
77
        customCA string
78
        // true if we know `qemu-img` will fail to download this
79
        brokenForQemuImg bool
80
        // the content length reported by the http server.
81
        contentLength uint64
82

83
        n image.NbdkitOperation
84
}
85

86
var createNbdkitCurl = image.NewNbdkitCurl
87

88
// NewHTTPDataSource creates a new instance of the http data provider.
89
func NewHTTPDataSource(endpoint, accessKey, secKey, certDir string, contentType cdiv1.DataVolumeContentType) (*HTTPDataSource, error) {
1✔
90
        ep, err := ParseEndpoint(endpoint)
1✔
91
        if err != nil {
2✔
92
                return nil, errors.Wrapf(err, "unable to parse endpoint %q", endpoint)
1✔
93
        }
1✔
94
        ctx, cancel := context.WithCancel(context.Background())
1✔
95

1✔
96
        extraHeaders, secretExtraHeaders, err := getExtraHeaders()
1✔
97
        if err != nil {
1✔
98
                cancel()
×
99
                return nil, errors.Wrap(err, "Error getting extra headers for HTTP client")
×
100
        }
×
101

102
        httpReader, contentLength, brokenForQemuImg, err := createHTTPReader(ctx, ep, accessKey, secKey, certDir, extraHeaders, secretExtraHeaders, contentType)
1✔
103
        if err != nil {
2✔
104
                cancel()
1✔
105
                return nil, err
1✔
106
        }
1✔
107

108
        httpSource := &HTTPDataSource{
1✔
109
                ctx:              ctx,
1✔
110
                cancel:           cancel,
1✔
111
                httpReader:       httpReader,
1✔
112
                contentType:      contentType,
1✔
113
                endpoint:         ep,
1✔
114
                customCA:         certDir,
1✔
115
                brokenForQemuImg: brokenForQemuImg,
1✔
116
                contentLength:    contentLength,
1✔
117
        }
1✔
118
        httpSource.n, err = createNbdkitCurl(nbdkitPid, accessKey, secKey, certDir, nbdkitSocket, extraHeaders, secretExtraHeaders)
1✔
119
        if err != nil {
1✔
120
                cancel()
×
121
                return nil, err
×
122
        }
×
123
        // We know this is a counting reader, so no need to check.
124
        countingReader := httpReader.(*util.CountingReader)
1✔
125
        go httpSource.pollProgress(countingReader, 10*time.Minute, time.Second)
1✔
126
        return httpSource, nil
1✔
127
}
128

129
// Info is called to get initial information about the data.
130
func (hs *HTTPDataSource) Info() (ProcessingPhase, error) {
1✔
131
        var err error
1✔
132
        hs.readers, err = NewFormatReaders(hs.httpReader, hs.contentLength)
1✔
133
        if err != nil {
1✔
134
                klog.Errorf("Error creating readers: %v", err)
×
135
                return ProcessingPhaseError, err
×
136
        }
×
137
        if hs.contentType == cdiv1.DataVolumeArchive {
2✔
138
                return ProcessingPhaseTransferDataDir, nil
1✔
139
        }
1✔
140
        if pullMethod, _ := util.ParseEnvVar(common.ImporterPullMethod, false); pullMethod == string(cdiv1.RegistryPullNode) {
1✔
141
                hs.url, _ = url.Parse(fmt.Sprintf("nbd+unix:///?socket=%s", nbdkitSocket))
×
142
                if err = hs.n.StartNbdkit(hs.endpoint.String()); err != nil {
×
143
                        return ProcessingPhaseError, err
×
144
                }
×
145
                return ProcessingPhaseConvert, nil
×
146
        }
147
        return ProcessingPhaseTransferScratch, nil
1✔
148
}
149

150
// Transfer is called to transfer the data from the source to a scratch location.
151
func (hs *HTTPDataSource) Transfer(path string) (ProcessingPhase, error) {
1✔
152
        if hs.contentType == cdiv1.DataVolumeKubeVirt {
2✔
153
                file := filepath.Join(path, tempFile)
1✔
154
                if err := CleanAll(file); err != nil {
1✔
155
                        return ProcessingPhaseError, err
×
156
                }
×
157
                size, err := util.GetAvailableSpace(path)
1✔
158
                if err != nil || size <= 0 {
2✔
159
                        return ProcessingPhaseError, ErrInvalidPath
1✔
160
                }
1✔
161
                hs.readers.StartProgressUpdate()
1✔
162
                err = streamDataToFile(hs.readers.TopReader(), file)
1✔
163
                if err != nil {
1✔
164
                        return ProcessingPhaseError, err
×
165
                }
×
166
                // If we successfully wrote to the file, then the parse will succeed.
167
                hs.url, _ = url.Parse(file)
1✔
168
                return ProcessingPhaseConvert, nil
1✔
169
        } else if hs.contentType == cdiv1.DataVolumeArchive {
2✔
170
                if err := util.UnArchiveTar(hs.readers.TopReader(), path); err != nil {
2✔
171
                        return ProcessingPhaseError, errors.Wrap(err, "unable to untar files from endpoint")
1✔
172
                }
1✔
173
                hs.url = nil
1✔
174
                return ProcessingPhaseComplete, nil
1✔
175
        }
176
        return ProcessingPhaseError, errors.Errorf("Unknown content type: %s", hs.contentType)
1✔
177
}
178

179
// TransferFile is called to transfer the data from the source to the passed in file.
180
func (hs *HTTPDataSource) TransferFile(fileName string) (ProcessingPhase, error) {
×
181
        if err := CleanAll(fileName); err != nil {
×
182
                return ProcessingPhaseError, err
×
183
        }
×
184
        hs.readers.StartProgressUpdate()
×
185
        err := streamDataToFile(hs.readers.TopReader(), fileName)
×
186
        if err != nil {
×
187
                return ProcessingPhaseError, err
×
188
        }
×
189
        return ProcessingPhaseResize, nil
×
190
}
191

192
// GetURL returns the URI that the data processor can use when converting the data.
193
func (hs *HTTPDataSource) GetURL() *url.URL {
×
194
        return hs.url
×
195
}
×
196

197
// GetTerminationMessage returns data to be serialized and used as the termination message of the importer.
198
func (hs *HTTPDataSource) GetTerminationMessage() *common.TerminationMessage {
1✔
199
        if pullMethod, _ := util.ParseEnvVar(common.ImporterPullMethod, false); pullMethod != string(cdiv1.RegistryPullNode) {
2✔
200
                return nil
1✔
201
        }
1✔
202

203
        info, err := getServerInfo(hs.ctx, fmt.Sprintf("%s://%s/info", hs.endpoint.Scheme, hs.endpoint.Host))
1✔
204
        if err != nil {
1✔
205
                klog.Errorf("%+v", err)
×
206
                return nil
×
207
        }
×
208

209
        return &common.TerminationMessage{
1✔
210
                Labels: envsToLabels(info.Env),
1✔
211
        }
1✔
212
}
213

214
// Close all readers.
215
func (hs *HTTPDataSource) Close() error {
1✔
216
        var err error
1✔
217
        if hs.readers != nil {
2✔
218
                err = hs.readers.Close()
1✔
219
        }
1✔
220
        hs.cancelLock.Lock()
1✔
221
        if hs.cancel != nil {
2✔
222
                hs.cancel()
1✔
223
                hs.cancel = nil
1✔
224
        }
1✔
225
        hs.cancelLock.Unlock()
1✔
226
        return err
1✔
227
}
228

229
func createCertPool(certDir string) (*x509.CertPool, error) {
1✔
230
        // let's get system certs as well
1✔
231
        certPool, err := x509.SystemCertPool()
1✔
232
        if err != nil {
1✔
233
                return nil, errors.Wrap(err, "Error getting system certs")
×
234
        }
×
235

236
        // append the user-provided trusted CA certificates bundle when making egress connections using proxy
237
        if files, err := os.ReadDir(common.ImporterProxyCertDir); err == nil {
1✔
238
                for _, file := range files {
×
239
                        if file.IsDir() || file.Name()[0] == '.' {
×
240
                                continue
×
241
                        }
242
                        fp := path.Join(common.ImporterProxyCertDir, file.Name())
×
243
                        if certs, err := os.ReadFile(fp); err == nil {
×
244
                                certPool.AppendCertsFromPEM(certs)
×
245
                        }
×
246
                }
247
        }
248

249
        // append server CA certificates
250
        files, err := os.ReadDir(certDir)
1✔
251
        if err != nil {
2✔
252
                return nil, errors.Wrapf(err, "Error listing files in %s", certDir)
1✔
253
        }
1✔
254

255
        for _, file := range files {
2✔
256
                if file.IsDir() || file.Name()[0] == '.' {
1✔
257
                        continue
×
258
                }
259

260
                fp := path.Join(certDir, file.Name())
1✔
261

1✔
262
                klog.Infof("Attempting to get certs from %s", fp)
1✔
263

1✔
264
                certs, err := os.ReadFile(fp)
1✔
265
                if err != nil {
1✔
266
                        return nil, errors.Wrapf(err, "Error reading file %s", fp)
×
267
                }
×
268

269
                if ok := certPool.AppendCertsFromPEM(certs); !ok {
1✔
270
                        klog.Warningf("No certs in %s", fp)
×
271
                }
×
272
        }
273

274
        return certPool, nil
1✔
275
}
276

277
func createHTTPClient(certDir string) (*http.Client, error) {
1✔
278
        client := &http.Client{
1✔
279
                // Don't set timeout here, since that will be an absolute timeout, we need a relative to last progress timeout.
1✔
280
        }
1✔
281

1✔
282
        if certDir == "" {
2✔
283
                return client, nil
1✔
284
        }
1✔
285

286
        certPool, err := createCertPool(certDir)
1✔
287
        if err != nil {
2✔
288
                return nil, err
1✔
289
        }
1✔
290

291
        // the default transport contains Proxy configurations to use environment variables and default timeouts
292
        transport := http.DefaultTransport.(*http.Transport).Clone()
1✔
293
        transport.TLSClientConfig = &tls.Config{
1✔
294
                RootCAs:    certPool,
1✔
295
                MinVersion: tls.VersionTLS12,
1✔
296
        }
1✔
297
        transport.GetProxyConnectHeader = func(ctx context.Context, proxyURL *url.URL, target string) (http.Header, error) {
1✔
298
                h := http.Header{}
×
299
                h.Add("User-Agent", defaultUserAgent)
×
300
                return h, nil
×
301
        }
×
302
        client.Transport = transport
1✔
303

1✔
304
        return client, nil
1✔
305
}
306

307
func addExtraheaders(req *http.Request, extraHeaders []string) {
1✔
308
        for _, header := range extraHeaders {
2✔
309
                parts := strings.SplitN(header, ":", 2)
1✔
310
                if len(parts) > 1 {
2✔
311
                        req.Header.Add(parts[0], parts[1])
1✔
312
                }
1✔
313
        }
314
        req.Header.Add("User-Agent", defaultUserAgent)
1✔
315
}
316

317
func createHTTPReader(ctx context.Context, ep *url.URL, accessKey, secKey, certDir string, extraHeaders, secretExtraHeaders []string, contentType cdiv1.DataVolumeContentType) (io.ReadCloser, uint64, bool, error) {
1✔
318
        var brokenForQemuImg bool
1✔
319
        client, err := createHTTPClient(certDir)
1✔
320
        if err != nil {
2✔
321
                return nil, uint64(0), false, errors.Wrap(err, "Error creating http client")
1✔
322
        }
1✔
323

324
        allExtraHeaders := append(extraHeaders, secretExtraHeaders...)
1✔
325

1✔
326
        client.CheckRedirect = func(r *http.Request, via []*http.Request) error {
2✔
327
                if len(accessKey) > 0 && len(secKey) > 0 {
2✔
328
                        r.SetBasicAuth(accessKey, secKey) // Redirects will lose basic auth, so reset them manually
1✔
329
                }
1✔
330
                addExtraheaders(r, allExtraHeaders)
1✔
331
                return nil
1✔
332
        }
333

334
        total, err := getContentLength(client, ep, accessKey, secKey, allExtraHeaders)
1✔
335
        if err != nil {
2✔
336
                brokenForQemuImg = true
1✔
337
        }
1✔
338
        // http.NewRequest can only return error on invalid METHOD, or invalid url. Here the METHOD is always GET, and the url is always valid, thus error cannot happen.
339
        req, _ := http.NewRequest(http.MethodGet, ep.String(), nil)
1✔
340

1✔
341
        addExtraheaders(req, allExtraHeaders)
1✔
342

1✔
343
        req = req.WithContext(ctx)
1✔
344
        if len(accessKey) > 0 && len(secKey) > 0 {
2✔
345
                req.SetBasicAuth(accessKey, secKey)
1✔
346
        }
1✔
347
        klog.V(2).Infof("Attempting to get object %q via http client\n", ep.String())
1✔
348
        resp, err := client.Do(req)
1✔
349
        if err != nil {
1✔
350
                return nil, uint64(0), true, errors.Wrap(err, "HTTP request errored")
×
351
        }
×
352
        if want := http.StatusOK; resp.StatusCode != want {
2✔
353
                klog.Errorf("http: expected status code %d, got %d", want, resp.StatusCode)
1✔
354
                return nil, uint64(0), true, errors.Errorf("expected status code %d, got %d. Status: %s", want, resp.StatusCode, resp.Status)
1✔
355
        }
1✔
356

357
        if contentType == cdiv1.DataVolumeKubeVirt {
2✔
358
                // Check the content-type if we are expecting a KubeVirt img.
1✔
359
                if val, ok := resp.Header[httpContentType]; ok {
2✔
360
                        if strings.HasPrefix(val[0], "text/") {
1✔
361
                                // We will continue with the import nonetheless, but content might be unexpected.
×
362
                                klog.Warningf("Unexpected content type '%s'. Content might not be a KubeVirt image.", val[0])
×
363
                        }
×
364
                }
365
        }
366

367
        acceptRanges, ok := resp.Header["Accept-Ranges"]
1✔
368
        if !ok || acceptRanges[0] == "none" {
2✔
369
                klog.V(2).Infof("Accept-Ranges isn't bytes, avoiding qemu-img")
1✔
370
                brokenForQemuImg = true
1✔
371
        }
1✔
372

373
        if total == 0 {
2✔
374
                // The total seems bogus. Let's try the GET Content-Length header
1✔
375
                total = parseHTTPHeader(resp)
1✔
376
        }
1✔
377
        countingReader := &util.CountingReader{
1✔
378
                Reader:  resp.Body,
1✔
379
                Current: 0,
1✔
380
        }
1✔
381
        return countingReader, total, brokenForQemuImg, nil
1✔
382
}
383

384
func (hs *HTTPDataSource) pollProgress(reader *util.CountingReader, idleTime, pollInterval time.Duration) {
1✔
385
        count := reader.Current
1✔
386
        lastUpdate := time.Now()
1✔
387
        for {
2✔
388
                if count < reader.Current {
1✔
UNCOV
389
                        // Some progress was made, reset now.
×
UNCOV
390
                        lastUpdate = time.Now()
×
UNCOV
391
                        count = reader.Current
×
UNCOV
392
                }
×
393

394
                if time.Until(lastUpdate.Add(idleTime)).Nanoseconds() < 0 {
2✔
395
                        hs.cancelLock.Lock()
1✔
396
                        if hs.cancel != nil {
2✔
397
                                // No progress for the idle time, cancel http client.
1✔
398
                                hs.cancel() // This will trigger dp.ctx.Done()
1✔
399
                        }
1✔
400
                        hs.cancelLock.Unlock()
1✔
401
                }
402
                select {
1✔
403
                case <-time.After(pollInterval):
1✔
404
                        continue
1✔
405
                case <-hs.ctx.Done():
1✔
406
                        return // Don't leak, once the transfer is cancelled or completed this is called.
1✔
407
                }
408
        }
409
}
410

411
func getContentLength(client *http.Client, ep *url.URL, accessKey, secKey string, extraHeaders []string) (uint64, error) {
1✔
412
        req, err := http.NewRequest(http.MethodHead, ep.String(), nil)
1✔
413
        if err != nil {
1✔
414
                return uint64(0), errors.Wrap(err, "could not create HTTP request")
×
415
        }
×
416
        if len(accessKey) > 0 && len(secKey) > 0 {
2✔
417
                req.SetBasicAuth(accessKey, secKey)
1✔
418
        }
1✔
419

420
        addExtraheaders(req, extraHeaders)
1✔
421

1✔
422
        klog.V(2).Infof("Attempting to HEAD %q via http client\n", ep.String())
1✔
423
        resp, err := client.Do(req)
1✔
424
        if err != nil {
2✔
425
                return uint64(0), errors.Wrap(err, "HTTP request errored")
1✔
426
        }
1✔
427

428
        if want := http.StatusOK; resp.StatusCode != want {
2✔
429
                klog.Errorf("http: expected status code %d, got %d", want, resp.StatusCode)
1✔
430
                return uint64(0), errors.Errorf("expected status code %d, got %d. Status: %s", want, resp.StatusCode, resp.Status)
1✔
431
        }
1✔
432

433
        for k, v := range resp.Header {
2✔
434
                klog.V(3).Infof("GO CLIENT: key: %s, value: %s\n", k, v)
1✔
435
        }
1✔
436

437
        total := parseHTTPHeader(resp)
1✔
438

1✔
439
        err = resp.Body.Close()
1✔
440
        if err != nil {
1✔
441
                return uint64(0), errors.Wrap(err, "could not close head read")
×
442
        }
×
443
        return total, nil
1✔
444
}
445

446
func parseHTTPHeader(resp *http.Response) uint64 {
1✔
447
        var err error
1✔
448
        total := uint64(0)
1✔
449
        if val, ok := resp.Header[httpContentLength]; ok {
2✔
450
                total, err = strconv.ParseUint(val[0], 10, 64)
1✔
451
                if err != nil {
1✔
452
                        klog.Errorf("could not convert content length, got %v", err)
×
453
                }
×
454
                klog.V(3).Infof("Content length: %d\n", total)
1✔
455
        }
456

457
        return total
1✔
458
}
459

460
// Check for any extra headers to pass along. Return secret headers separately so callers can suppress logging them.
461
func getExtraHeaders() ([]string, []string, error) {
1✔
462
        extraHeaders := getExtraHeadersFromEnvironment()
1✔
463
        secretExtraHeaders, err := getExtraHeadersFromSecrets()
1✔
464
        return extraHeaders, secretExtraHeaders, err
1✔
465
}
1✔
466

467
// Check for extra headers from environment variables.
468
func getExtraHeadersFromEnvironment() []string {
1✔
469
        var extraHeaders []string
1✔
470

1✔
471
        for _, value := range os.Environ() {
2✔
472
                if strings.HasPrefix(value, common.ImporterExtraHeader) {
2✔
473
                        env := strings.SplitN(value, "=", 2)
1✔
474
                        if len(env) > 1 {
2✔
475
                                extraHeaders = append(extraHeaders, env[1])
1✔
476
                        }
1✔
477
                }
478
        }
479

480
        return extraHeaders
1✔
481
}
482

483
// Check for extra headers from mounted secrets.
484
func getExtraHeadersFromSecrets() ([]string, error) {
1✔
485
        var secretExtraHeaders []string
1✔
486
        var err error
1✔
487

1✔
488
        secretDir := common.ImporterSecretExtraHeadersDir
1✔
489
        err = filepath.Walk(secretDir, func(filePath string, info fs.FileInfo, err error) error {
2✔
490
                if err != nil {
2✔
491
                        if os.IsNotExist(err) {
2✔
492
                                return nil
1✔
493
                        }
1✔
494
                        return errors.Wrapf(err, "Error listing directories under %s", secretDir)
×
495
                }
496

497
                // Skip directories like ..data and ..2021_11_09_17_20_16.253260263
498
                if info.IsDir() && info.Name()[0] == '.' {
×
499
                        return fs.SkipDir
×
500
                }
×
501

502
                // Don't try to read directories, or files that start with dots
503
                if info.IsDir() || info.Name()[0] == '.' {
×
504
                        return nil
×
505
                }
×
506

507
                header, err := os.ReadFile(filePath)
×
508
                if err != nil {
×
509
                        return errors.Wrapf(err, "Error reading headers from %s", filePath)
×
510
                }
×
511
                secretExtraHeaders = append(secretExtraHeaders, string(header))
×
512

×
513
                return err
×
514
        })
515

516
        return secretExtraHeaders, err
1✔
517
}
518

519
func getServerInfo(ctx context.Context, infoURL string) (*common.ServerInfo, error) {
1✔
520
        req, err := http.NewRequestWithContext(ctx, http.MethodGet, infoURL, nil)
1✔
521
        if err != nil {
1✔
522
                return nil, errors.Wrap(err, "failed to construct request for containerimage-server info")
×
523
        }
×
524

525
        client := &http.Client{}
1✔
526
        resp, err := client.Do(req)
1✔
527
        if err != nil {
1✔
528
                return nil, errors.Wrap(err, "failed request containerimage-server info")
×
529
        }
×
530
        defer resp.Body.Close()
1✔
531

1✔
532
        if resp.StatusCode != http.StatusOK {
1✔
533
                return nil, fmt.Errorf("failed request containerimage-server info: expected status code 200, got %d", resp.StatusCode)
×
534
        }
×
535

536
        body, err := io.ReadAll(resp.Body)
1✔
537
        if err != nil {
1✔
538
                return nil, errors.Wrap(err, "failed to read body of containerimage-server info request")
×
539
        }
×
540

541
        info := &common.ServerInfo{}
1✔
542
        if err := json.Unmarshal(body, info); err != nil {
1✔
543
                return nil, errors.Wrap(err, "failed to unmarshal body of containerimage-server info request")
×
544
        }
×
545

546
        return info, nil
1✔
547
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc