• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

codenotary / immudb / 25364743170

05 May 2026 08:00AM UTC coverage: 85.038% (-0.2%) from 85.255%
25364743170

push

gh-ci

vchaindz
chore(deps): bump jackc/pgx/v5 v5.9.1 -> v5.9.2 (CVE-2026-41889)

Patches a low-severity SQL-injection edge case in pgx's simple-protocol
codepath when a dollar-quoted string literal embeds attacker-controllable
placeholder text (GHSA-j88v-2chj-qfwx).

Not exploitable in immudb: pgx is a test-only dependency used by
pkg/pgsql/server/pgsql_{hardened,compat_integration,integration}_test.go
to drive the wire-compat layer with a real Postgres client. It is not
in the production package graph (`go list -deps ./cmd/... ./pkg/...
./embedded/...` returns 0 for jackc/pgx). Bumped purely to keep the
Dependabot alert clean.

Verified:
  go build ./...                                ok
  go test -count=1 ./pkg/pgsql/server/          ok

45157 of 53102 relevant lines covered (85.04%)

126482.99 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.59
/embedded/appendable/remoteapp/remote_storage_reader.go
1
package remoteapp
2

3
import (
4
        "bytes"
5
        "compress/flate"
6
        "compress/gzip"
7
        "compress/lzw"
8
        "compress/zlib"
9
        "context"
10
        "encoding/binary"
11
        "io"
12
        "io/ioutil"
13
        "sync"
14

15
        "github.com/codenotary/immudb/embedded/appendable"
16
        "github.com/codenotary/immudb/embedded/appendable/singleapp"
17
        "github.com/codenotary/immudb/embedded/remotestorage"
18
        "github.com/prometheus/client_golang/prometheus"
19
)
20

21
// DefaultReaderRangeCacheSize is the per-window size used by the
22
// remote-storage reader's LRU cache. Sized to match immudb's default
23
// multiapp chunk size (`WithFileSize(1<<20)` = 1 MiB), so a sequential
24
// full-chunk read on a default-configured deployment does **one** S3
25
// GET on open — same as the pre-Wave-1 reader — while a partial read
26
// of a large chunk still pays only the windows it actually touches.
27
//
28
// Tunable via WithReaderRangeCacheSize on the remoteapp options.
29
const DefaultReaderRangeCacheSize = 1 * 1024 * 1024
30

31
// openHeaderSlack is the extra bytes the open-time GET pulls beyond
32
// `rangeCacheSize` so the first cached window covers a full aligned
33
// payload region even after the variable-length metadata header is
34
// stripped. Without this, baseOffset (4 + metadata length) eats into
35
// the first window, the payload portion ends before the next aligned
36
// window boundary, and the next read past it forces an extra GET to
37
// fetch a window we'd otherwise have ended on. 4 KiB is large enough
38
// for any plausible appendable metadata while staying tiny relative
39
// to the cache window.
40
const openHeaderSlack = 4096
41

42
// DefaultReaderCacheWindows is the LRU depth of the per-reader cache.
43
// Each cached window is up to DefaultReaderRangeCacheSize bytes, so
44
// the worst-case resident memory per reader is
45
// DefaultReaderCacheWindows * DefaultReaderRangeCacheSize.
46
//
47
// Depth 4 trades ~4 MiB per reader for an order-of-magnitude better
48
// hit rate on sparse / random access patterns: revisiting a window
49
// touched within the last 3 misses is a memcpy instead of an S3 GET.
50
const DefaultReaderCacheWindows = 4
51

52
// pendingFetch represents a single in-flight read-ahead Get. The
53
// fetcher goroutine sets data/err and then closes done. Foreground
54
// ReadAt callers find the matching pendingFetch (by offs) and wait on
55
// done to overlap the per-window RTT with whatever they were doing
56
// while consuming the previous window.
57
type pendingFetch struct {
58
        offs int64
59
        done chan struct{}
60

61
        data []byte
62
        err  error
63
}
64

65
// cachedWindow is one entry in the LRU. data holds payload bytes for
66
// offsets [offs, offs+len(data)).
67
type cachedWindow struct {
68
        offs int64
69
        data []byte
70
}
71

72
type remoteStorageReader struct {
73
        r              remotestorage.Storage
74
        name           string
75
        baseOffset     int64
76
        rangeCacheSize int
77
        maxWindows     int
78

79
        // compressionFormat is parsed from the chunk's metadata header
80
        // at open time. When non-zero (i.e. anything other than
81
        // appendable.NoCompression), ReadAt reproduces the per-entry
82
        // length-prefix + decompress protocol that singleapp's writer
83
        // uses, on top of the same range cache that handles raw byte
84
        // fetches. compressionLevel is parsed alongside but only used
85
        // for symmetry — the readers infer level from the compressed
86
        // stream.
87
        compressionFormat int
88
        compressionLevel  int
89

90
        mu          sync.Mutex
91
        windows     []cachedWindow // MRU at index 0, LRU at the tail
92
        sizeKnown   bool
93
        payloadSize int64 // valid only when sizeKnown
94

95
        pf *pendingFetch // at most one prefetch in flight, guarded by mu
96

97
        prefetchCtx    context.Context
98
        prefetchCancel context.CancelFunc
99
}
100

101
func openRemoteStorageReader(r remotestorage.Storage, name string, rangeCacheSize int) (*remoteStorageReader, error) {
37✔
102
        defer prometheus.NewTimer(metricsOpenTime).ObserveDuration()
37✔
103

37✔
104
        if rangeCacheSize <= 0 {
42✔
105
                rangeCacheSize = DefaultReaderRangeCacheSize
5✔
106
        }
5✔
107

108
        ctx := context.Background()
37✔
109

37✔
110
        // Read header + as much of the leading payload as fits in one
37✔
111
        // window. A short response (len < rangeCacheSize) means we've
37✔
112
        // seen the whole object, in which case pin payloadSize so
37✔
113
        // out-of-range ReadAt calls don't issue a wasted GET.
37✔
114
        openFetchSize := int64(rangeCacheSize) + openHeaderSlack
37✔
115
        reader, err := r.Get(ctx, name, 0, openFetchSize)
37✔
116
        if err != nil {
40✔
117
                metricsUncachedReadErrors.Inc()
3✔
118
                return nil, err
3✔
119
        }
3✔
120
        data, err := ioutil.ReadAll(reader)
34✔
121
        reader.Close()
34✔
122
        if err != nil {
35✔
123
                metricsUncachedReadErrors.Inc()
1✔
124
                return nil, err
1✔
125
        }
1✔
126
        metricsUncachedReads.Inc()
33✔
127
        metricsUncachedReadBytes.Add(float64(len(data)))
33✔
128
        if len(data) < 4 {
34✔
129
                metricsCorruptedMetadata.Inc()
1✔
130
                return nil, ErrCorruptedMetadata
1✔
131
        }
1✔
132

133
        baseOffset := int64(4 + binary.BigEndian.Uint32(data[:4]))
32✔
134
        if baseOffset > int64(len(data)) {
33✔
135
                metricsCorruptedMetadata.Inc()
1✔
136
                return nil, ErrCorruptedMetadata
1✔
137
        }
1✔
138

139
        // Parse the chunk metadata to find out whether the upstream
140
        // writer used singleapp's per-entry compression. If yes, ReadAt
141
        // reproduces the same length-prefix + decompress protocol;
142
        // otherwise we serve raw bytes. Metadata key matches
143
        // singleapp.MetaCompressionFormat (the on-disk format key).
144
        //
145
        // Defensive: NewMetadata panics on bytes that don't follow the
146
        // expected TLV layout (some test fixtures + any non-singleapp
147
        // producer). Recover and default to NoCompression rather than
148
        // failing the whole open.
149
        compressionFormat := appendable.NoCompression
31✔
150
        if baseOffset > 4 {
61✔
151
                compressionFormat = parseCompressionFormat(data[4:baseOffset])
30✔
152
        }
30✔
153

154
        pCtx, pCancel := context.WithCancel(context.Background())
31✔
155
        sr := &remoteStorageReader{
31✔
156
                r:                 r,
31✔
157
                name:              name,
31✔
158
                baseOffset:        baseOffset,
31✔
159
                rangeCacheSize:    rangeCacheSize,
31✔
160
                maxWindows:        DefaultReaderCacheWindows,
31✔
161
                compressionFormat: compressionFormat,
31✔
162
                windows: []cachedWindow{
31✔
163
                        {offs: 0, data: data[baseOffset:]},
31✔
164
                },
31✔
165
                prefetchCtx:    pCtx,
31✔
166
                prefetchCancel: pCancel,
31✔
167
        }
31✔
168
        if int64(len(data)) < openFetchSize {
61✔
169
                sr.sizeKnown = true
30✔
170
                sr.payloadSize = int64(len(sr.windows[0].data))
30✔
171
        }
30✔
172
        // Wave 3: do NOT start a prefetch on open. If the consumer never
173
        // issues a ReadAt past the initial window, no further GET fires.
174
        // Prefetch is started lazily from fetchAndCopyLocked once we've
175
        // observed an actual cache miss — that's evidence the consumer is
176
        // reading sequentially past the cache and will benefit from the
177
        // next window arriving in parallel.
178
        return sr, nil
31✔
179
}
180

181
func (r *remoteStorageReader) Metadata() []byte {
1✔
182
        panic("unimplemented")
1✔
183
}
184

185
func (r *remoteStorageReader) Size() (int64, error) {
1✔
186
        panic("unimplemented")
1✔
187
}
188

189
func (r *remoteStorageReader) Offset() int64 {
1✔
190
        panic("unimplemented")
1✔
191
}
192

193
func (r *remoteStorageReader) SetOffset(off int64) error {
1✔
194
        panic("unimplemented")
1✔
195
}
196

197
func (r *remoteStorageReader) DiscardUpto(off int64) error {
1✔
198
        panic("unimplemented")
1✔
199
}
200

201
func (r *remoteStorageReader) Append(bs []byte) (off int64, n int, err error) {
1✔
202
        panic("unimplemented")
1✔
203
}
204

205
func (r *remoteStorageReader) CompressionFormat() int {
1✔
206
        return r.compressionFormat
1✔
207
}
1✔
208

209
func (r *remoteStorageReader) CompressionLevel() int {
1✔
210
        return r.compressionLevel
1✔
211
}
1✔
212

213
func (r *remoteStorageReader) Flush() error {
1✔
214
        return nil
1✔
215
}
1✔
216

217
func (r *remoteStorageReader) Sync() error {
1✔
218
        return nil
1✔
219
}
1✔
220

221
func (r *remoteStorageReader) SwitchToReadOnlyMode() error {
1✔
222
        return nil
1✔
223
}
1✔
224

225
// ReadAt is the public entry. For uncompressed chunks it delegates
226
// straight to the raw range-cache path. For compressed chunks it
227
// reproduces singleapp's per-entry frame protocol on top of the raw
228
// reader: read the 4-byte length prefix at off, fetch that many
229
// compressed bytes at off+4, decompress in memory, then copy.
230
func (r *remoteStorageReader) ReadAt(bs []byte, off int64) (int, error) {
19✔
231
        if off < 0 {
20✔
232
                return 0, ErrIllegalArguments
1✔
233
        }
1✔
234
        if len(bs) == 0 {
18✔
235
                return 0, nil
×
236
        }
×
237

238
        if r.compressionFormat == appendable.NoCompression {
36✔
239
                return r.readAtRaw(bs, off)
18✔
240
        }
18✔
241
        return r.readAtCompressedFrame(bs, off)
×
242
}
243

244
// readAtRaw is the uncompressed range-cache + prefetch path. Also
245
// used by readAtCompressedFrame to fetch the underlying compressed
246
// bytes (length prefix + compressed payload) before decompressing.
247
func (r *remoteStorageReader) readAtRaw(bs []byte, off int64) (int, error) {
18✔
248
        r.mu.Lock()
18✔
249
        defer r.mu.Unlock()
18✔
250

18✔
251
        if r.sizeKnown && off >= r.payloadSize {
20✔
252
                return 0, io.EOF
2✔
253
        }
2✔
254

255
        // Cache hit: any of the LRU windows covers this offset?
256
        if w, ok := r.lookupWindowLocked(off); ok {
31✔
257
                return r.serveFromWindowLocked(bs, off, w)
15✔
258
        }
15✔
259

260
        // Cache miss. If an in-flight prefetch is fetching the
261
        // window-sized region containing this offset, wait on it instead
262
        // of issuing a fresh GET. The fetcher goroutine doesn't take
263
        // r.mu, so dropping the lock while we wait is safe.
264
        if r.pf != nil && off >= r.pf.offs && off < r.pf.offs+int64(r.rangeCacheSize) {
1✔
265
                pf := r.pf
×
266
                r.mu.Unlock()
×
267
                <-pf.done
×
268
                r.mu.Lock()
×
269

×
270
                // Adopt the prefetched data (if it's still the current pf).
×
271
                if r.pf == pf {
×
272
                        r.pf = nil
×
273
                        if pf.err == nil {
×
274
                                r.insertWindowLocked(pf.offs, pf.data)
×
275
                                if int64(len(pf.data)) < int64(r.rangeCacheSize) {
×
276
                                        r.sizeKnown = true
×
277
                                        r.payloadSize = pf.offs + int64(len(pf.data))
×
278
                                }
×
279
                                // Keep the pipeline primed for the next window — without
280
                                // this, every adoption would stall the next sequential
281
                                // miss for a full RTT instead of overlapping.
282
                                r.maybeStartPrefetchLocked()
×
283
                        }
284
                }
285

286
                if w, ok := r.lookupWindowLocked(off); ok {
×
287
                        return r.serveFromWindowLocked(bs, off, w)
×
288
                }
×
289
                // Prefetch errored or was preempted; fall through to direct fetch.
290
        }
291

292
        return r.fetchAndCopyLocked(bs, off)
1✔
293
}
294

295
// lookupWindowLocked returns the window covering off, moving it to
296
// MRU position on hit. Caller must hold r.mu.
297
func (r *remoteStorageReader) lookupWindowLocked(off int64) (cachedWindow, bool) {
16✔
298
        for i, w := range r.windows {
32✔
299
                if off >= w.offs && off < w.offs+int64(len(w.data)) {
31✔
300
                        if i > 0 {
15✔
301
                                // Move w to MRU (index 0); shift the prefix right.
×
302
                                copy(r.windows[1:i+1], r.windows[0:i])
×
303
                                r.windows[0] = w
×
304
                        }
×
305
                        return w, true
15✔
306
                }
307
        }
308
        return cachedWindow{}, false
1✔
309
}
310

311
// insertWindowLocked installs a freshly-fetched window at MRU,
312
// evicting the LRU entry if we're at capacity. Caller must hold r.mu.
313
func (r *remoteStorageReader) insertWindowLocked(offs int64, data []byte) {
1✔
314
        nw := cachedWindow{offs: offs, data: data}
1✔
315
        if len(r.windows) < r.maxWindows {
2✔
316
                // Grow by one and shift everything right by one slot.
1✔
317
                r.windows = append(r.windows, cachedWindow{})
1✔
318
        }
1✔
319
        copy(r.windows[1:], r.windows[:len(r.windows)-1])
1✔
320
        r.windows[0] = nw
1✔
321
}
322

323
// serveFromWindowLocked copies from `w` into bs and handles partial
324
// reads (cache window boundary or end of object). On a partial read
325
// it either returns io.EOF (object exhausted) or recursively fetches
326
// the next window. Caller must hold r.mu.
327
func (r *remoteStorageReader) serveFromWindowLocked(bs []byte, off int64, w cachedWindow) (int, error) {
15✔
328
        rel := off - w.offs
15✔
329
        n := copy(bs, w.data[rel:])
15✔
330
        metricsReads.Inc()
15✔
331
        metricsReadBytes.Add(float64(n))
15✔
332

15✔
333
        if n == len(bs) {
17✔
334
                return n, nil
2✔
335
        }
2✔
336
        if r.sizeKnown && off+int64(n) >= r.payloadSize {
26✔
337
                return n, io.EOF
13✔
338
        }
13✔
339
        more, err := r.fetchAndCopyLocked(bs[n:], off+int64(n))
×
340
        return n + more, err
×
341
}
342

343
// fetchAndCopyLocked issues a Range GET aligned to a fixed window
344
// boundary that contains `off`, inserts the result as the new MRU
345
// cache window, and copies into bs. Aligning to fixed boundaries
346
// (rather than starting the GET at the random caller offset) means
347
// subsequent reads near the same region land in the same cached
348
// window — without alignment, two sparse reads that fall in the
349
// "same logical 1 MiB region" but at different offsets would each
350
// fetch their own overlapping window and the cache would thrash.
351
//
352
// If the request straddles two windows, this fetches the first one
353
// and recurses for the remainder. Caller must hold r.mu.
354
func (r *remoteStorageReader) fetchAndCopyLocked(bs []byte, off int64) (int, error) {
1✔
355
        windowSize := int64(r.rangeCacheSize)
1✔
356
        windowOffs := (off / windowSize) * windowSize
1✔
357
        fetchLen := windowSize
1✔
358

1✔
359
        ctx := context.Background()
1✔
360
        reader, err := r.r.Get(ctx, r.name, windowOffs+r.baseOffset, fetchLen)
1✔
361
        if err != nil {
1✔
362
                metricsUncachedReadErrors.Inc()
×
363
                return 0, err
×
364
        }
×
365
        data, err := ioutil.ReadAll(reader)
1✔
366
        reader.Close()
1✔
367
        if err != nil {
1✔
368
                metricsUncachedReadErrors.Inc()
×
369
                return 0, err
×
370
        }
×
371
        metricsUncachedReads.Inc()
1✔
372
        metricsUncachedReadBytes.Add(float64(len(data)))
1✔
373

1✔
374
        r.insertWindowLocked(windowOffs, data)
1✔
375
        if int64(len(data)) < fetchLen {
1✔
376
                r.sizeKnown = true
×
377
                r.payloadSize = windowOffs + int64(len(data))
×
378
        }
×
379

380
        rel := off - windowOffs
1✔
381
        if rel >= int64(len(data)) {
1✔
382
                // Window doesn't actually contain off (object ended inside
×
383
                // the aligned window before this offset). EOF.
×
384
                return 0, io.EOF
×
385
        }
×
386
        n := copy(bs, data[rel:])
1✔
387
        metricsReads.Inc()
1✔
388
        metricsReadBytes.Add(float64(n))
1✔
389
        if n < len(bs) {
1✔
390
                // Request straddles a window boundary or extends past EOF.
×
391
                if r.sizeKnown && off+int64(n) >= r.payloadSize {
×
392
                        return n, io.EOF
×
393
                }
×
394
                more, err := r.fetchAndCopyLocked(bs[n:], off+int64(n))
×
395
                return n + more, err
×
396
        }
397
        // Wave 3: only kick a prefetch from the cache-miss path. Cache
398
        // hits don't fire one because every hit on a random pattern would
399
        // otherwise queue a wasted GET — and on a sequential pattern the
400
        // next miss will fire it just as effectively.
401
        r.maybeStartPrefetchLocked()
1✔
402
        return n, nil
1✔
403
}
404

405
// maybeStartPrefetchLocked spawns one background Get for the window
406
// immediately after the current MRU cache entry, if and only if (a)
407
// no useful prefetch is already in flight and (b) we don't already
408
// know the payload ends inside the cached prefix. A completed but
409
// stale prefetch (offs no longer matches the next-after-MRU we want)
410
// is dropped first so a new one can start. Caller must hold r.mu.
411
func (r *remoteStorageReader) maybeStartPrefetchLocked() {
1✔
412
        if r.sizeKnown {
1✔
413
                return
×
414
        }
×
415
        if len(r.windows) == 0 {
1✔
416
                return
×
417
        }
×
418
        mru := r.windows[0]
1✔
419
        nextOffs := mru.offs + int64(len(mru.data))
1✔
420
        if nextOffs <= 0 {
1✔
421
                return
×
422
        }
×
423

424
        // Existing prefetch already lined up correctly? Leave it.
425
        if r.pf != nil && r.pf.offs == nextOffs {
1✔
426
                return
×
427
        }
×
428
        // Existing prefetch in flight for a different offset? Don't start
429
        // a second concurrent fetch; let the in-flight one drain first.
430
        // If it's already completed but stale, drop it so we can issue
431
        // the right one.
432
        if r.pf != nil {
1✔
433
                select {
×
434
                case <-r.pf.done:
×
435
                        r.pf = nil
×
436
                default:
×
437
                        return
×
438
                }
439
        }
440

441
        pf := &pendingFetch{
1✔
442
                offs: nextOffs,
1✔
443
                done: make(chan struct{}),
1✔
444
        }
1✔
445
        r.pf = pf
1✔
446

1✔
447
        go func() {
2✔
448
                defer close(pf.done)
1✔
449
                reader, err := r.r.Get(r.prefetchCtx, r.name, nextOffs+r.baseOffset, int64(r.rangeCacheSize))
1✔
450
                if err != nil {
1✔
451
                        pf.err = err
×
452
                        return
×
453
                }
×
454
                data, errRead := ioutil.ReadAll(reader)
1✔
455
                reader.Close()
1✔
456
                if errRead != nil {
1✔
457
                        pf.err = errRead
×
458
                        return
×
459
                }
×
460
                pf.data = data
1✔
461
        }()
462
}
463

464
// readAtCompressedFrame implements singleapp's per-Append frame
465
// protocol on top of readAtRaw:
466
//
467
//        [ 4-byte big-endian length N ][ N bytes of compressed payload ]
468
//
469
// The frame at `off` decompresses to one Append's payload. We copy
470
// up to len(bs) bytes of that payload into bs and return io.EOF if
471
// the decoded payload was shorter than the request.
472
func (r *remoteStorageReader) readAtCompressedFrame(bs []byte, off int64) (int, error) {
×
473
        var lenBuf [4]byte
×
474
        if _, err := r.readAtRaw(lenBuf[:], off); err != nil {
×
475
                return 0, err
×
476
        }
×
477
        clen := binary.BigEndian.Uint32(lenBuf[:])
×
478
        if clen == 0 {
×
479
                return 0, io.EOF
×
480
        }
×
481

482
        cBs := make([]byte, clen)
×
483
        if _, err := r.readAtRaw(cBs, off+4); err != nil {
×
484
                return 0, err
×
485
        }
×
486

487
        dec, err := newDecompressReader(r.compressionFormat, bytes.NewReader(cBs))
×
488
        if err != nil {
×
489
                return 0, err
×
490
        }
×
491
        defer dec.Close()
×
492

×
493
        var buf bytes.Buffer
×
494
        if _, err := buf.ReadFrom(dec); err != nil {
×
495
                return 0, err
×
496
        }
×
497
        rbs := buf.Bytes()
×
498

×
499
        n := copy(bs, rbs)
×
500
        if n < len(bs) {
×
501
                return n, io.EOF
×
502
        }
×
503
        return n, nil
×
504
}
505

506
// parseCompressionFormat extracts singleapp's MetaCompressionFormat
507
// from a chunk's TLV metadata bytes. Returns NoCompression on any
508
// parse failure — the metadata is owned by the writer and may legally
509
// be in formats this package doesn't understand.
510
func parseCompressionFormat(metaBytes []byte) (cf int) {
30✔
511
        defer func() {
60✔
512
                if r := recover(); r != nil {
31✔
513
                        cf = appendable.NoCompression
1✔
514
                }
1✔
515
        }()
516
        md := appendable.NewMetadata(metaBytes)
30✔
517
        if v, ok := md.GetInt(singleapp.MetaCompressionFormat); ok {
59✔
518
                return v
29✔
519
        }
29✔
520
        return appendable.NoCompression
×
521
}
522

523
// newDecompressReader returns a decoder for the given compression
524
// format. Mirrors singleapp.AppendableFile.reader so the on-disk
525
// frame format stays identical between local and remote chunks.
526
func newDecompressReader(format int, src io.Reader) (io.ReadCloser, error) {
×
527
        switch format {
×
528
        case appendable.FlateCompression:
×
529
                return flate.NewReader(src), nil
×
530
        case appendable.GZipCompression:
×
531
                return gzip.NewReader(src)
×
532
        case appendable.LZWCompression:
×
533
                return lzw.NewReader(src, lzw.MSB, 8), nil
×
534
        case appendable.ZLibCompression:
×
535
                return zlib.NewReader(src)
×
536
        default:
×
537
                return nil, ErrCorruptedMetadata
×
538
        }
539
}
540

541
func (r *remoteStorageReader) Close() error {
14✔
542
        // Cancel any in-flight prefetch; the goroutine drains itself
14✔
543
        // promptly because the underlying Storage.Get respects the
14✔
544
        // context. We don't wait — Close is called from the cache
14✔
545
        // eviction path and shouldn't block on network I/O.
14✔
546
        if r.prefetchCancel != nil {
28✔
547
                r.prefetchCancel()
14✔
548
        }
14✔
549
        return nil
14✔
550
}
551

552
func (r *remoteStorageReader) Copy(dstPath string) error {
1✔
553
        panic("unimplemented")
1✔
554
}
555

556
var _ appendable.Appendable = (*remoteStorageReader)(nil)
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc