• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mixxxdj / mixxx / 4725386255

pending completion
4725386255

push

github

GitHub
<a href="https://github.com/mixxxdj/mixxx/commit/a91914ee8">Merge pull request #11487 from mixxxdj/dependabot/github_actions/actions/labeler-</a><a class="double-link" href="https://github.com/mixxxdj/mixxx/commit/9471598e3">9471598e3</a>

26195 of 81430 relevant lines covered (32.17%)

61547.41 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.54
/src/engine/cachingreader/cachingreader.cpp
1
#include "engine/cachingreader/cachingreader.h"
2

3
#include <QFileInfo>
4
#include <QtDebug>
5

6
#include "control/controlobject.h"
7
#include "moc_cachingreader.cpp"
8
#include "track/track.h"
9
#include "util/assert.h"
10
#include "util/compatibility/qatomic.h"
11
#include "util/counter.h"
12
#include "util/logger.h"
13
#include "util/math.h"
14
#include "util/sample.h"
15

16
namespace {
17

18
mixxx::Logger kLogger("CachingReader");
19

20
// This is the default hint frameCount that is adopted in case of Hint::kFrameCountForward and
21
// Hint::kFrameCountBackward count is provided. It matches 23 ms @ 44.1 kHz
22
// TODO() Do we suffer cache misses if we use an audio buffer of above 23 ms?
23
constexpr SINT kDefaultHintFrames = 1024;
24

25
// With CachingReaderChunk::kFrames = 8192 each chunk consumes
26
// 8192 frames * 2 channels/frame * 4-bytes per sample = 65 kB.
27
//
28
//     80 chunks ->  5120 KB =  5 MB
29
//
30
// Each deck (including sample decks) will use their own CachingReader.
31
// Consequently the total memory required for all allocated chunks depends
32
// on the number of decks. The amount of memory reserved for a single
33
// CachingReader must be multiplied by the number of decks to calculate
34
// the total amount!
35
//
36
// NOTE(uklotzde, 2019-09-05): Reduce this number to just few chunks
37
// (kNumberOfCachedChunksInMemory = 1, 2, 3, ...) for testing purposes
38
// to verify that the MRU/LRU cache works as expected. Even though
39
// massive drop outs are expected to occur Mixxx should run reliably!
40
constexpr SINT kNumberOfCachedChunksInMemory = 80;
41

42
} // anonymous namespace
43

44
CachingReader::CachingReader(const QString& group,
770✔
45
        UserSettingsPointer config)
770✔
46
        : m_pConfig(config),
770✔
47
          // Limit the number of in-flight requests to the worker. This should
48
          // prevent to overload the worker when it is not able to fetch those
49
          // requests from the FIFO timely. Otherwise outdated requests pile up
50
          // in the FIFO and it would take a long time to process them, just to
51
          // discard the results that most likely have already become obsolete.
52
          // TODO(XXX): Ideally the request FIFO would be implemented as a ring
53
          // buffer, where new requests replace old requests when full. Those
54
          // old requests need to be returned immediately to the CachingReader
55
          // that must take ownership and free them!!!
56
          m_chunkReadRequestFIFO(kNumberOfCachedChunksInMemory / 4),
770✔
57
          // The capacity of the back channel must be equal to the number of
58
          // allocated chunks, because the worker use writeBlocking(). Otherwise
59
          // the worker could get stuck in a hot loop!!!
60
          m_readerStatusUpdateFIFO(kNumberOfCachedChunksInMemory),
770✔
61
          m_state(STATE_IDLE),
770✔
62
          m_mruCachingReaderChunk(nullptr),
770✔
63
          m_lruCachingReaderChunk(nullptr),
770✔
64
          m_sampleBuffer(CachingReaderChunk::kSamples * kNumberOfCachedChunksInMemory),
770✔
65
          m_worker(group, &m_chunkReadRequestFIFO, &m_readerStatusUpdateFIFO) {
2,310✔
66
    m_allocatedCachingReaderChunks.reserve(kNumberOfCachedChunksInMemory);
770✔
67
    // Divide up the allocated raw memory buffer into total_chunks
68
    // chunks. Initialize each chunk to hold nothing and add it to the free
69
    // list.
70
    for (SINT i = 0; i < kNumberOfCachedChunksInMemory; ++i) {
62,370✔
71
        CachingReaderChunkForOwner* c =
72
                new CachingReaderChunkForOwner(
73
                        mixxx::SampleBuffer::WritableSlice(
×
74
                                m_sampleBuffer,
61,600✔
75
                                CachingReaderChunk::kSamples * i,
61,600✔
76
                                CachingReaderChunk::kSamples));
61,600✔
77
        m_chunks.push_back(c);
61,600✔
78
        m_freeChunks.push_back(c);
61,600✔
79
    }
80

81
    // Forward signals from worker
82
    connect(&m_worker, &CachingReaderWorker::trackLoading,
770✔
83
            this, &CachingReader::trackLoading,
84
            Qt::DirectConnection);
85
    connect(&m_worker, &CachingReaderWorker::trackLoaded,
770✔
86
            this, &CachingReader::trackLoaded,
87
            Qt::DirectConnection);
88
    connect(&m_worker, &CachingReaderWorker::trackLoadFailed,
770✔
89
            this, &CachingReader::trackLoadFailed,
90
            Qt::DirectConnection);
91

92
    m_worker.start(QThread::HighPriority);
770✔
93
}
770✔
94

95
CachingReader::~CachingReader() {
1,540✔
96
    m_worker.quitWait();
770✔
97
    qDeleteAll(m_chunks);
770✔
98
}
770✔
99

100
void CachingReader::freeChunkFromList(CachingReaderChunkForOwner* pChunk) {
3✔
101
    pChunk->removeFromList(
3✔
102
            &m_mruCachingReaderChunk,
103
            &m_lruCachingReaderChunk);
104
    pChunk->free();
3✔
105
    m_freeChunks.push_back(pChunk);
3✔
106
}
3✔
107

108
void CachingReader::freeChunk(CachingReaderChunkForOwner* pChunk) {
×
109
    DEBUG_ASSERT(pChunk);
×
110
    DEBUG_ASSERT(pChunk->getState() != CachingReaderChunkForOwner::READ_PENDING);
×
111

112
    const int removed = m_allocatedCachingReaderChunks.remove(pChunk->getIndex());
×
113
    Q_UNUSED(removed); // only used in DEBUG_ASSERT
114
    // We'll tolerate not being in allocatedCachingReaderChunks,
115
    // because sometime you free a chunk right after you allocated it.
116
    DEBUG_ASSERT(removed <= 1);
×
117

118
    freeChunkFromList(pChunk);
×
119
}
×
120

121
void CachingReader::freeAllChunks() {
1✔
122
    for (const auto& pChunk: qAsConst(m_chunks)) {
81✔
123
        // We will receive CHUNK_READ_INVALID for all pending chunk reads
124
        // which should free the chunks individually.
125
        if (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING) {
80✔
126
            continue;
×
127
        }
128

129
        if (pChunk->getState() != CachingReaderChunkForOwner::FREE) {
80✔
130
            freeChunkFromList(pChunk);
3✔
131
        }
132
    }
133
    DEBUG_ASSERT(!m_mruCachingReaderChunk);
1✔
134
    DEBUG_ASSERT(!m_lruCachingReaderChunk);
1✔
135

136
    m_allocatedCachingReaderChunks.clear();
1✔
137
}
1✔
138

139
CachingReaderChunkForOwner* CachingReader::allocateChunk(SINT chunkIndex) {
256✔
140
    if (m_freeChunks.empty()) {
256✔
141
        return nullptr;
×
142
    }
143
    CachingReaderChunkForOwner* pChunk = m_freeChunks.front();
256✔
144
    m_freeChunks.pop_front();
256✔
145

146
    pChunk->init(chunkIndex);
256✔
147

148
    m_allocatedCachingReaderChunks.insert(chunkIndex, pChunk);
256✔
149

150
    return pChunk;
256✔
151
}
152

153
CachingReaderChunkForOwner* CachingReader::allocateChunkExpireLRU(SINT chunkIndex) {
256✔
154
    auto* pChunk = allocateChunk(chunkIndex);
256✔
155
    if (!pChunk) {
256✔
156
        if (m_lruCachingReaderChunk) {
×
157
            freeChunk(m_lruCachingReaderChunk);
×
158
            pChunk = allocateChunk(chunkIndex);
×
159
        } else {
160
            kLogger.warning() << "No cached LRU chunk available for freeing";
×
161
        }
162
    }
163
    if (kLogger.traceEnabled()) {
256✔
164
        kLogger.trace() << "allocateChunkExpireLRU" << chunkIndex << pChunk;
×
165
    }
166
    return pChunk;
256✔
167
}
168

169
CachingReaderChunkForOwner* CachingReader::lookupChunk(SINT chunkIndex) {
1,337✔
170
    // Defaults to nullptr if it's not in the hash.
171
    auto* pChunk = m_allocatedCachingReaderChunks.value(chunkIndex, nullptr);
1,337✔
172
    DEBUG_ASSERT(!pChunk || pChunk->getIndex() == chunkIndex);
1,337✔
173
    return pChunk;
1,337✔
174
}
175

176
void CachingReader::freshenChunk(CachingReaderChunkForOwner* pChunk) {
940✔
177
    DEBUG_ASSERT(pChunk);
940✔
178
    DEBUG_ASSERT(pChunk->getState() == CachingReaderChunkForOwner::READY);
940✔
179
    if (kLogger.traceEnabled()) {
940✔
180
        kLogger.trace()
×
181
                << "freshenChunk()"
×
182
                << pChunk->getIndex()
×
183
                << pChunk;
×
184
    }
185

186
    // Remove the chunk from the MRU/LRU list
187
    pChunk->removeFromList(
940✔
188
            &m_mruCachingReaderChunk,
189
            &m_lruCachingReaderChunk);
190

191
    // Reinsert has new head of MRU list
192
    pChunk->insertIntoListBefore(
940✔
193
            &m_mruCachingReaderChunk,
194
            &m_lruCachingReaderChunk,
195
            m_mruCachingReaderChunk);
196
}
940✔
197

198
CachingReaderChunkForOwner* CachingReader::lookupChunkAndFreshen(SINT chunkIndex) {
78✔
199
    auto* pChunk = lookupChunk(chunkIndex);
78✔
200
    if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
78✔
201
        freshenChunk(pChunk);
75✔
202
    }
203
    return pChunk;
78✔
204
}
205

206
// Invoked from the UI thread!!
207
void CachingReader::newTrack(TrackPointer pTrack) {
98✔
208
    auto newState = pTrack ? STATE_TRACK_LOADING : STATE_TRACK_UNLOADING;
98✔
209
    auto oldState = m_state.fetchAndStoreAcquire(newState);
98✔
210

211
    // TODO():
212
    // BaseTrackPlayerImpl::slotLoadTrack() distributes the new track via
213
    // emit loadingTrack(pNewTrack, pOldTrack);
214
    // but the newTrack may change if we load a new track while the previous one
215
    // is still loading. This leads to inconsistent states for example a different
216
    // track in the Mixxx Title and the Deck label.
217
    if (oldState == STATE_TRACK_LOADING &&
98✔
218
            newState == STATE_TRACK_LOADING) {
219
        kLogger.warning()
2✔
220
                << "Loading a new track while loading a track may lead to inconsistent states";
1✔
221
    }
222
    m_worker.newTrack(std::move(pTrack));
98✔
223
}
98✔
224

225
// Called from the engine thread
226
void CachingReader::process() {
2,306✔
227
    ReaderStatusUpdate update;
228
    while (m_readerStatusUpdateFIFO.read(&update, 1) == 1) {
2,565✔
229
        auto* pChunk = update.takeFromWorker();
259✔
230
        if (pChunk) {
259✔
231
            // Result of a read request (with a chunk)
232
            DEBUG_ASSERT(atomicLoadRelaxed(m_state) != STATE_IDLE);
180✔
233
            DEBUG_ASSERT(
180✔
234
                    update.status == CHUNK_READ_SUCCESS ||
235
                    update.status == CHUNK_READ_EOF ||
236
                    update.status == CHUNK_READ_INVALID ||
237
                    update.status == CHUNK_READ_DISCARDED);
238
            if (m_state.loadAcquire() == STATE_TRACK_LOADING) {
180✔
239
                // Discard all results from pending read requests for the
240
                // previous track before the next track has been loaded.
241
                freeChunk(pChunk);
×
242
                continue;
×
243
            }
244
            DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED);
180✔
245
            if (update.status == CHUNK_READ_SUCCESS) {
180✔
246
                // Insert or freshen the chunk in the MRU/LRU list after
247
                // obtaining ownership from the worker.
248
                freshenChunk(pChunk);
180✔
249
            } else {
250
                // Discard chunks that don't carry any data
251
                freeChunk(pChunk);
×
252
            }
253
            // Adjust the readable frame index range (if available)
254
            if (update.status != CHUNK_READ_DISCARDED) {
180✔
255
                m_readableFrameIndexRange = intersect(
360✔
256
                        m_readableFrameIndexRange,
257
                        update.readableFrameIndexRange());
180✔
258
            }
259
        } else {
260
            // State update (without a chunk)
261
            if (update.status == TRACK_LOADED) {
79✔
262
                // We have a new Track ready to go.
263
                // Assert that we either have had STATE_TRACK_LOADING before and all
264
                // chunks in the m_readerStatusUpdateFIFO have been discarded.
265
                // or the cache has been already cleared.
266
                // In case of two consecutive load events, we receive two consecutive
267
                // TRACK_LOADED without a chunk in between, assert this here.
268
                DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
78✔
269
                        (atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED &&
270
                                !m_mruCachingReaderChunk && !m_lruCachingReaderChunk));
271
                // now purge also the recently used chunk list from the old track.
272
                if (m_mruCachingReaderChunk || m_lruCachingReaderChunk) {
78✔
273
                    DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING);
1✔
274
                    freeAllChunks();
1✔
275
                }
276
                // Reset the readable frame index range
277
                m_readableFrameIndexRange = update.readableFrameIndexRange();
78✔
278
                m_state.storeRelease(STATE_TRACK_LOADED);
78✔
279
            } else {
280
                DEBUG_ASSERT(update.status == TRACK_UNLOADED);
1✔
281
                // This message could be processed later when a new
282
                // track is already loading! In this case the TRACK_LOADED will
283
                // be the very next status update.
284
                if (!m_state.testAndSetRelease(STATE_TRACK_UNLOADING, STATE_IDLE)) {
1✔
285
                    DEBUG_ASSERT(
×
286
                            atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
287
                            atomicLoadRelaxed(m_state) == STATE_IDLE);
288
                }
289
            }
290
        }
291
    }
292
}
2,306✔
293

294
CachingReader::ReadResult CachingReader::read(SINT startSample, SINT numSamples, bool reverse, CSAMPLE* buffer) {
94✔
295
    // Check for bad inputs
296
    VERIFY_OR_DEBUG_ASSERT(
94✔
297
            // Refuse to read from an invalid position
298
            (startSample % CachingReaderChunk::kChannels == 0) &&
299
            // Refuse to read from an invalid number of samples
300
            (numSamples % CachingReaderChunk::kChannels == 0) && (numSamples >= 0)) {
301
        kLogger.critical()
×
302
                << "Invalid arguments for read():"
×
303
                << "startSample =" << startSample
×
304
                << "numSamples =" << numSamples
×
305
                << "reverse =" << reverse;
×
306
        return ReadResult::UNAVAILABLE;
×
307
    }
308
    VERIFY_OR_DEBUG_ASSERT(buffer) {
94✔
309
        return ReadResult::UNAVAILABLE;
×
310
    }
311

312
    // If no track is loaded, don't do anything.
313
    if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
94✔
314
        return ReadResult::UNAVAILABLE;
×
315
    }
316

317
    // If asked to read 0 samples, don't do anything. (this is a perfectly
318
    // reasonable request that happens sometimes.
319
    if (numSamples == 0) {
94✔
320
        return ReadResult::AVAILABLE; // nothing to do
6✔
321
    }
322

323
    // the samples are always read in forward direction
324
    // If reverse = true, the frames are copied in reverse order to the
325
    // destination buffer
326
    SINT sample = startSample;
88✔
327
    if (reverse) {
88✔
328
        // Start with the last sample in buffer
329
        sample -= numSamples;
17✔
330
    }
331

332
    SINT samplesRemaining = numSamples;
88✔
333

334
    // Process new messages from the reader thread before looking up
335
    // the first chunk and to update m_readableFrameIndexRange
336
    process();
88✔
337

338
    auto remainingFrameIndexRange =
339
            mixxx::IndexRange::forward(
88✔
340
                    CachingReaderChunk::samples2frames(sample),
341
                    CachingReaderChunk::samples2frames(numSamples));
342
    DEBUG_ASSERT(!remainingFrameIndexRange.empty());
88✔
343

344
    auto result = ReadResult::AVAILABLE;
88✔
345
    if (!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty()) {
88✔
346
        // Fill the buffer up to the first readable sample with
347
        // silence. This may happen when the engine is in preroll,
348
        // i.e. if the frame index points a region before the first
349
        // track sample.
350
        if (remainingFrameIndexRange.start() < m_readableFrameIndexRange.start()) {
78✔
351
            const auto prerollFrameIndexRange =
352
                    mixxx::IndexRange::between(
2✔
353
                            remainingFrameIndexRange.start(),
354
                            m_readableFrameIndexRange.start());
355
            DEBUG_ASSERT(prerollFrameIndexRange.length() <= remainingFrameIndexRange.length());
2✔
356
            if (kLogger.debugEnabled()) {
2✔
357
                kLogger.debug()
×
358
                        << "Preroll: Filling the first"
×
359
                        << prerollFrameIndexRange.length()
×
360
                        << "sample frames in"
×
361
                        << remainingFrameIndexRange
×
362
                        << "with silence. Audio signal starts at"
×
363
                        << m_readableFrameIndexRange.start();
×
364
            }
365
            const SINT prerollFrames = prerollFrameIndexRange.length();
2✔
366
            const SINT prerollSamples = CachingReaderChunk::frames2samples(prerollFrames);
2✔
367
            DEBUG_ASSERT(samplesRemaining >= prerollSamples);
2✔
368
            if (reverse) {
2✔
369
                SampleUtil::clear(&buffer[samplesRemaining - prerollSamples], prerollSamples);
2✔
370
            } else {
371
                SampleUtil::clear(buffer, prerollSamples);
×
372
                buffer += prerollSamples;
×
373
            }
374
            samplesRemaining -= prerollSamples;
2✔
375
            remainingFrameIndexRange.shrinkFront(prerollFrames);
2✔
376
            result = ReadResult::PARTIALLY_AVAILABLE;
2✔
377
        }
378

379
        // Read the actual samples from the audio source into the
380
        // buffer. The buffer will be filled with silence for every
381
        // unreadable sample or samples outside of the track region
382
        // later at the end of this function.
383
        if (!remainingFrameIndexRange.empty()) {
78✔
384
            // The intersection between the readable samples from the track
385
            // and the requested samples is not empty, so start reading.
386
            DEBUG_ASSERT(!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty());
78✔
387
            DEBUG_ASSERT(remainingFrameIndexRange.start() >= m_readableFrameIndexRange.start());
78✔
388

389
            const SINT firstChunkIndex =
390
                    CachingReaderChunk::indexForFrame(remainingFrameIndexRange.start());
78✔
391
            SINT lastChunkIndex =
392
                    CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
78✔
393
            for (SINT chunkIndex = firstChunkIndex;
153✔
394
                    chunkIndex <= lastChunkIndex;
153✔
395
                    ++chunkIndex) {
396

397
                // Process new messages from the reader thread before looking up
398
                // the next chunk
399
                process();
78✔
400

401
                // m_readableFrameIndexRange might change with every read operation!
402
                // On a cache miss audio data will be read from the audio source in
403
                // process() and the readable frame index range might get adjusted
404
                // if decoding errors occur.
405
                remainingFrameIndexRange =
406
                        intersect(
78✔
407
                                remainingFrameIndexRange,
408
                                m_readableFrameIndexRange);
78✔
409

410
                if (remainingFrameIndexRange.empty()) {
78✔
411
                    // No more readable data available. Exit the loop and
412
                    // fill the remaining buffer with silence.
413
                    kLogger.warning() << "Failed to read more sample data";
×
414
                    break;
×
415
                }
416
                lastChunkIndex =
417
                        CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
78✔
418
                if (lastChunkIndex < chunkIndex) {
78✔
419
                    // No more readable data available. Exit the loop and
420
                    // fill the remaining buffer with silence.
421
                    kLogger.warning() << "Abort reading of sample data";
×
422
                    break;
×
423
                }
424

425
                mixxx::IndexRange bufferedFrameIndexRange;
78✔
426
                const CachingReaderChunkForOwner* const pChunk = lookupChunkAndFreshen(chunkIndex);
78✔
427
                if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
78✔
428
                    if (reverse) {
75✔
429
                        bufferedFrameIndexRange =
430
                                pChunk->readBufferedSampleFramesReverse(
8✔
431
                                        &buffer[samplesRemaining],
8✔
432
                                        remainingFrameIndexRange);
8✔
433
                    } else {
434
                        bufferedFrameIndexRange =
435
                                pChunk->readBufferedSampleFrames(
134✔
436
                                        buffer,
437
                                        remainingFrameIndexRange);
67✔
438
                    }
439
                } else {
440
                    // This will happen regularly when jumping to a new position
441
                    // within the file and decoding of the audio data is still
442
                    // pending.
443
                    DEBUG_ASSERT(!pChunk ||
3✔
444
                            (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING));
445
                    Counter("CachingReader::read(): Failed to read chunk on cache miss")++;
3✔
446
                    if (kLogger.traceEnabled()) {
3✔
447
                        kLogger.trace()
×
448
                                << "Cache miss for chunk with index"
×
449
                                << chunkIndex
×
450
                                << "- abort reading";
×
451
                    }
452
                    // Abort reading (see below)
453
                    DEBUG_ASSERT(bufferedFrameIndexRange.empty());
3✔
454
                }
455
                if (bufferedFrameIndexRange.empty()) {
78✔
456
                    if (samplesRemaining == numSamples) {
3✔
457
                        DEBUG_ASSERT(chunkIndex == firstChunkIndex);
3✔
458
                        // We have not read a single frame caused by a cache miss of
459
                        // the first required chunk. Inform the calling code that no
460
                        // data has been written into the buffer and to handle this
461
                        // situation appropriately.
462
                        return ReadResult::UNAVAILABLE;
3✔
463
                    }
464
                    // No more readable data available. Exit the loop and
465
                    // finally fill the remaining buffer with silence.
466
                    break;
×
467
                }
468
                DEBUG_ASSERT(bufferedFrameIndexRange.isSubrangeOf(remainingFrameIndexRange));
75✔
469
                if (remainingFrameIndexRange.start() < bufferedFrameIndexRange.start()) {
75✔
470
                    const auto paddingFrameIndexRange =
471
                            mixxx::IndexRange::between(
×
472
                                    remainingFrameIndexRange.start(),
473
                                    bufferedFrameIndexRange.start());
474
                    kLogger.warning()
×
475
                            << "Inserting"
×
476
                            << paddingFrameIndexRange.length()
×
477
                            << "frames of silence for unreadable audio data";
×
478
                    SINT paddingSamples = CachingReaderChunk::frames2samples(paddingFrameIndexRange.length());
×
479
                    DEBUG_ASSERT(samplesRemaining >= paddingSamples);
×
480
                    if (reverse) {
×
481
                        SampleUtil::clear(&buffer[samplesRemaining - paddingSamples], paddingSamples);
×
482
                    } else {
483
                        SampleUtil::clear(buffer, paddingSamples);
×
484
                        buffer += paddingSamples;
×
485
                    }
486
                    samplesRemaining -= paddingSamples;
×
487
                    remainingFrameIndexRange.shrinkFront(paddingFrameIndexRange.length());
×
488
                    result = ReadResult::PARTIALLY_AVAILABLE;
×
489
                }
490
                const SINT chunkSamples =
491
                        CachingReaderChunk::frames2samples(bufferedFrameIndexRange.length());
75✔
492
                DEBUG_ASSERT(chunkSamples > 0);
75✔
493
                if (!reverse) {
75✔
494
                    buffer += chunkSamples;
67✔
495
                }
496
                DEBUG_ASSERT(samplesRemaining >= chunkSamples);
75✔
497
                samplesRemaining -= chunkSamples;
75✔
498
                remainingFrameIndexRange.shrinkFront(bufferedFrameIndexRange.length());
75✔
499
            }
500
        }
501
    }
502
    // Finally fill the remaining buffer with silence
503
    DEBUG_ASSERT(samplesRemaining >= 0);
85✔
504
    if (samplesRemaining > 0) {
85✔
505
        SampleUtil::clear(buffer, samplesRemaining);
10✔
506
        result = ReadResult::PARTIALLY_AVAILABLE;
10✔
507
    }
508
    return result;
85✔
509
}
510

511
void CachingReader::hintAndMaybeWake(const HintVector& hintList) {
2,136✔
512
    // If no file is loaded, skip.
513
    if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
2,136✔
514
        return;
1,870✔
515
    }
516

517
    // For every chunk that the hints indicated, check if it is in the cache. If
518
    // any are not, then wake.
519
    bool shouldWake = false;
266✔
520

521
    for (const auto& hint: hintList) {
1,176✔
522
        SINT hintFrame = hint.frame;
910✔
523
        SINT hintFrameCount = hint.frameCount;
910✔
524

525
        // Handle some special length values
526
        if (hintFrameCount == Hint::kFrameCountForward) {
910✔
527
                hintFrameCount = kDefaultHintFrames;
625✔
528
        } else if (hintFrameCount == Hint::kFrameCountBackward) {
285✔
529
                hintFrame -= kDefaultHintFrames;
22✔
530
                hintFrameCount = kDefaultHintFrames;
22✔
531
            if (hintFrame < 0) {
22✔
532
                    hintFrameCount += hintFrame;
2✔
533
                if (hintFrameCount <= 0) {
2✔
534
                    continue;
4✔
535
                }
536
                hintFrame = 0;
1✔
537
            }
538
        }
539

540
        VERIFY_OR_DEBUG_ASSERT(hintFrameCount >= 0) {
909✔
541
            kLogger.warning() << "CachingReader: Ignoring negative hint length.";
×
542
            continue;
×
543
        }
544

545
        const auto readableFrameIndexRange = intersect(
909✔
546
                m_readableFrameIndexRange,
547
                mixxx::IndexRange::forward(hintFrame, hintFrameCount));
548
        if (readableFrameIndexRange.empty()) {
909✔
549
            continue;
3✔
550
        }
551

552
        const int firstChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.start());
906✔
553
        const int lastChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.end() - 1);
906✔
554
        for (int chunkIndex = firstChunkIndex; chunkIndex <= lastChunkIndex; ++chunkIndex) {
2,165✔
555
            CachingReaderChunkForOwner* pChunk = lookupChunk(chunkIndex);
1,259✔
556
            if (!pChunk) {
1,259✔
557
                shouldWake = true;
256✔
558
                pChunk = allocateChunkExpireLRU(chunkIndex);
256✔
559
                if (!pChunk) {
256✔
560
                    kLogger.warning()
×
561
                            << "Failed to allocate chunk"
×
562
                            << chunkIndex
×
563
                            << "for read request";
×
564
                    continue;
×
565
                }
566
                // Do not insert the allocated chunk into the MRU/LRU list,
567
                // because it will be handed over to the worker immediately
568
                CachingReaderChunkReadRequest request;
569
                request.giveToWorker(pChunk);
256✔
570
                if (kLogger.traceEnabled()) {
256✔
571
                    kLogger.trace()
×
572
                            << "Requesting read of chunk"
×
573
                            << request.chunk;
×
574
                }
575
                if (m_chunkReadRequestFIFO.write(&request, 1) != 1) {
256✔
576
                    kLogger.warning()
×
577
                            << "Failed to submit read request for chunk"
×
578
                            << chunkIndex;
×
579
                    // Revoke the chunk from the worker and free it
580
                    pChunk->takeFromWorker();
×
581
                    freeChunk(pChunk);
×
582
                }
583
            } else if (pChunk->getState() == CachingReaderChunkForOwner::READY) {
1,003✔
584
                // This will cause the chunk to be 'freshened' in the cache. The
585
                // chunk will be moved to the end of the LRU list.
586
                freshenChunk(pChunk);
685✔
587
            }
588
        }
589
    }
590

591
    // If there are chunks to be read, wake up.
592
    if (shouldWake) {
266✔
593
        m_worker.workReady();
127✔
594
    }
595
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc