• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mixxxdj / mixxx / 9930284157

14 Jul 2024 07:42PM CUT coverage: 40.502% (-0.002%) from 40.504%
9930284157

push

github

daschuer
Pull latest translations from https://www.transifex.com/mixxx-dj-software/mixxxdj/mixxx2-4/. Compile QM files out of TS files that are used by the localized app

31256 of 77171 relevant lines covered (40.5%)

63524.16 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

73.56
/src/engine/cachingreader/cachingreader.cpp
1
#include "engine/cachingreader/cachingreader.h"
2

3
#include <QtDebug>
4

5
#include "moc_cachingreader.cpp"
6
#include "util/assert.h"
7
#include "util/compatibility/qatomic.h"
8
#include "util/counter.h"
9
#include "util/logger.h"
10
#include "util/sample.h"
11

12
namespace {
13

14
mixxx::Logger kLogger("CachingReader");
15

16
// This is the default hint frameCount that is adopted in case of Hint::kFrameCountForward and
17
// Hint::kFrameCountBackward count is provided. It matches 23 ms @ 44.1 kHz
18
// TODO() Do we suffer cache misses if we use an audio buffer of above 23 ms?
19
constexpr SINT kDefaultHintFrames = 1024;
20

21
// With CachingReaderChunk::kFrames = 8192 each chunk consumes
22
// 8192 frames * 2 channels/frame * 4-bytes per sample = 65 kB.
23
//
24
//     80 chunks ->  5120 KB =  5 MB
25
//
26
// Each deck (including sample decks) will use their own CachingReader.
27
// Consequently the total memory required for all allocated chunks depends
28
// on the number of decks. The amount of memory reserved for a single
29
// CachingReader must be multiplied by the number of decks to calculate
30
// the total amount!
31
//
32
// NOTE(uklotzde, 2019-09-05): Reduce this number to just few chunks
33
// (kNumberOfCachedChunksInMemory = 1, 2, 3, ...) for testing purposes
34
// to verify that the MRU/LRU cache works as expected. Even though
35
// massive drop outs are expected to occur Mixxx should run reliably!
36
constexpr SINT kNumberOfCachedChunksInMemory = 80;
37

38
} // anonymous namespace
39

40
CachingReader::CachingReader(const QString& group,
792✔
41
        UserSettingsPointer config)
792✔
42
        : m_pConfig(config),
792✔
43
          // Limit the number of in-flight requests to the worker. This should
44
          // prevent to overload the worker when it is not able to fetch those
45
          // requests from the FIFO timely. Otherwise outdated requests pile up
46
          // in the FIFO and it would take a long time to process them, just to
47
          // discard the results that most likely have already become obsolete.
48
          // TODO(XXX): Ideally the request FIFO would be implemented as a ring
49
          // buffer, where new requests replace old requests when full. Those
50
          // old requests need to be returned immediately to the CachingReader
51
          // that must take ownership and free them!!!
52
          m_chunkReadRequestFIFO(kNumberOfCachedChunksInMemory / 4),
792✔
53
          // The capacity of the back channel must be equal to the number of
54
          // allocated chunks, because the worker use writeBlocking(). Otherwise
55
          // the worker could get stuck in a hot loop!!!
56
          m_readerStatusUpdateFIFO(kNumberOfCachedChunksInMemory),
792✔
57
          m_state(STATE_IDLE),
792✔
58
          m_mruCachingReaderChunk(nullptr),
792✔
59
          m_lruCachingReaderChunk(nullptr),
792✔
60
          m_sampleBuffer(CachingReaderChunk::kSamples * kNumberOfCachedChunksInMemory),
792✔
61
          m_worker(group, &m_chunkReadRequestFIFO, &m_readerStatusUpdateFIFO) {
2,376✔
62
    m_allocatedCachingReaderChunks.reserve(kNumberOfCachedChunksInMemory);
792✔
63
    // Divide up the allocated raw memory buffer into total_chunks
64
    // chunks. Initialize each chunk to hold nothing and add it to the free
65
    // list.
66
    for (SINT i = 0; i < kNumberOfCachedChunksInMemory; ++i) {
64,152✔
67
        CachingReaderChunkForOwner* c =
68
                new CachingReaderChunkForOwner(
69
                        mixxx::SampleBuffer::WritableSlice(
×
70
                                m_sampleBuffer,
63,360✔
71
                                CachingReaderChunk::kSamples * i,
63,360✔
72
                                CachingReaderChunk::kSamples));
63,360✔
73
        m_chunks.push_back(c);
63,360✔
74
        m_freeChunks.push_back(c);
63,360✔
75
    }
76

77
    // Forward signals from worker
78
    connect(&m_worker, &CachingReaderWorker::trackLoading,
792✔
79
            this, &CachingReader::trackLoading,
80
            Qt::DirectConnection);
81
    connect(&m_worker, &CachingReaderWorker::trackLoaded,
792✔
82
            this, &CachingReader::trackLoaded,
83
            Qt::DirectConnection);
84
    connect(&m_worker, &CachingReaderWorker::trackLoadFailed,
792✔
85
            this, &CachingReader::trackLoadFailed,
86
            Qt::DirectConnection);
87

88
    m_worker.start(QThread::HighPriority);
792✔
89
}
792✔
90

91
CachingReader::~CachingReader() {
2,375✔
92
    m_worker.quitWait();
792✔
93
    qDeleteAll(m_chunks);
792✔
94
}
1,583✔
95

96
void CachingReader::freeChunkFromList(CachingReaderChunkForOwner* pChunk) {
3✔
97
    pChunk->removeFromList(
3✔
98
            &m_mruCachingReaderChunk,
99
            &m_lruCachingReaderChunk);
100
    pChunk->free();
3✔
101
    m_freeChunks.push_back(pChunk);
3✔
102
}
3✔
103

104
void CachingReader::freeChunk(CachingReaderChunkForOwner* pChunk) {
1✔
105
    DEBUG_ASSERT(pChunk);
1✔
106
    DEBUG_ASSERT(pChunk->getState() != CachingReaderChunkForOwner::READ_PENDING);
1✔
107

108
    const int removed = m_allocatedCachingReaderChunks.remove(pChunk->getIndex());
1✔
109
    Q_UNUSED(removed); // only used in DEBUG_ASSERT
110
    // We'll tolerate not being in allocatedCachingReaderChunks,
111
    // because sometime you free a chunk right after you allocated it.
112
    DEBUG_ASSERT(removed <= 1);
1✔
113

114
    freeChunkFromList(pChunk);
1✔
115
}
1✔
116

117
void CachingReader::freeAllChunks() {
1✔
118
    for (const auto& pChunk : std::as_const(m_chunks)) {
81✔
119
        // We will receive CHUNK_READ_INVALID for all pending chunk reads
120
        // which should free the chunks individually.
121
        if (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING) {
80✔
122
            continue;
×
123
        }
124

125
        if (pChunk->getState() != CachingReaderChunkForOwner::FREE) {
80✔
126
            freeChunkFromList(pChunk);
2✔
127
        }
128
    }
129
    DEBUG_ASSERT(!m_mruCachingReaderChunk);
1✔
130
    DEBUG_ASSERT(!m_lruCachingReaderChunk);
1✔
131

132
    m_allocatedCachingReaderChunks.clear();
1✔
133
}
1✔
134

135
CachingReaderChunkForOwner* CachingReader::allocateChunk(SINT chunkIndex) {
258✔
136
    if (m_freeChunks.empty()) {
258✔
137
        return nullptr;
×
138
    }
139
    CachingReaderChunkForOwner* pChunk = m_freeChunks.front();
258✔
140
    m_freeChunks.pop_front();
258✔
141

142
    pChunk->init(chunkIndex);
258✔
143

144
    m_allocatedCachingReaderChunks.insert(chunkIndex, pChunk);
258✔
145

146
    return pChunk;
258✔
147
}
148

149
CachingReaderChunkForOwner* CachingReader::allocateChunkExpireLRU(SINT chunkIndex) {
258✔
150
    auto* pChunk = allocateChunk(chunkIndex);
258✔
151
    if (!pChunk) {
258✔
152
        if (m_lruCachingReaderChunk) {
×
153
            freeChunk(m_lruCachingReaderChunk);
×
154
            pChunk = allocateChunk(chunkIndex);
×
155
        } else {
156
            kLogger.warning() << "No cached LRU chunk available for freeing";
×
157
        }
158
    }
159
    if (kLogger.traceEnabled()) {
258✔
160
        kLogger.trace() << "allocateChunkExpireLRU" << chunkIndex << pChunk;
×
161
    }
162
    return pChunk;
258✔
163
}
164

165
CachingReaderChunkForOwner* CachingReader::lookupChunk(SINT chunkIndex) {
1,339✔
166
    // Defaults to nullptr if it's not in the hash.
167
    auto* pChunk = m_allocatedCachingReaderChunks.value(chunkIndex, nullptr);
1,339✔
168
    DEBUG_ASSERT(!pChunk || pChunk->getIndex() == chunkIndex);
1,339✔
169
    return pChunk;
1,339✔
170
}
171

172
void CachingReader::freshenChunk(CachingReaderChunkForOwner* pChunk) {
750✔
173
    DEBUG_ASSERT(pChunk);
750✔
174
    DEBUG_ASSERT(pChunk->getState() == CachingReaderChunkForOwner::READY);
750✔
175
    if (kLogger.traceEnabled()) {
750✔
176
        kLogger.trace()
×
177
                << "freshenChunk()"
×
178
                << pChunk->getIndex()
×
179
                << pChunk;
×
180
    }
181

182
    // Remove the chunk from the MRU/LRU list
183
    pChunk->removeFromList(
750✔
184
            &m_mruCachingReaderChunk,
185
            &m_lruCachingReaderChunk);
186

187
    // Reinsert has new head of MRU list
188
    pChunk->insertIntoListBefore(
750✔
189
            &m_mruCachingReaderChunk,
190
            &m_lruCachingReaderChunk,
191
            m_mruCachingReaderChunk);
192
}
750✔
193

194
CachingReaderChunkForOwner* CachingReader::lookupChunkAndFreshen(SINT chunkIndex) {
78✔
195
    auto* pChunk = lookupChunk(chunkIndex);
78✔
196
    if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
78✔
197
        freshenChunk(pChunk);
70✔
198
    }
199
    return pChunk;
78✔
200
}
201

202
// Invoked from the UI thread!!
203
void CachingReader::newTrack(TrackPointer pTrack) {
103✔
204
    auto newState = pTrack ? STATE_TRACK_LOADING : STATE_TRACK_UNLOADING;
103✔
205
    auto oldState = m_state.fetchAndStoreAcquire(newState);
103✔
206

207
    // TODO():
208
    // BaseTrackPlayerImpl::slotLoadTrack() distributes the new track via
209
    // emit loadingTrack(pNewTrack, pOldTrack);
210
    // but the newTrack may change if we load a new track while the previous one
211
    // is still loading. This leads to inconsistent states for example a different
212
    // track in the Mixxx Title and the Deck label.
213
    if (oldState == STATE_TRACK_LOADING &&
103✔
214
            newState == STATE_TRACK_LOADING) {
215
        kLogger.warning()
4✔
216
                << "Loading a new track while loading a track may lead to inconsistent states";
2✔
217
    }
218
    m_worker.newTrack(std::move(pTrack));
103✔
219
}
103✔
220

221
// Called from the engine thread
222
void CachingReader::process() {
2,631✔
223
    ReaderStatusUpdate update;
224
    while (m_readerStatusUpdateFIFO.read(&update, 1) == 1) {
2,847✔
225
        auto* pChunk = update.takeFromWorker();
216✔
226
        if (pChunk) {
216✔
227
            // Result of a read request (with a chunk)
228
            DEBUG_ASSERT(atomicLoadRelaxed(m_state) != STATE_IDLE);
133✔
229
            DEBUG_ASSERT(
133✔
230
                    update.status == CHUNK_READ_SUCCESS ||
231
                    update.status == CHUNK_READ_EOF ||
232
                    update.status == CHUNK_READ_INVALID ||
233
                    update.status == CHUNK_READ_DISCARDED);
234
            if (m_state.loadAcquire() == STATE_TRACK_LOADING) {
133✔
235
                // Discard all results from pending read requests for the
236
                // previous track before the next track has been loaded.
237
                freeChunk(pChunk);
1✔
238
                continue;
1✔
239
            }
240
            DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED);
132✔
241
            if (update.status == CHUNK_READ_SUCCESS) {
132✔
242
                // Insert or freshen the chunk in the MRU/LRU list after
243
                // obtaining ownership from the worker.
244
                freshenChunk(pChunk);
132✔
245
            } else {
246
                // Discard chunks that don't carry any data
247
                freeChunk(pChunk);
×
248
            }
249
            // Adjust the readable frame index range (if available)
250
            if (update.status != CHUNK_READ_DISCARDED) {
132✔
251
                m_readableFrameIndexRange = intersect(
264✔
252
                        m_readableFrameIndexRange,
253
                        update.readableFrameIndexRange());
132✔
254
            }
255
        } else {
256
            // State update (without a chunk)
257
            if (update.status == TRACK_LOADED) {
83✔
258
                // We have a new Track ready to go.
259
                // Assert that we either have had STATE_TRACK_LOADING before and all
260
                // chunks in the m_readerStatusUpdateFIFO have been discarded.
261
                // or the cache has been already cleared.
262
                // In case of two consecutive load events, we receive two consecutive
263
                // TRACK_LOADED without a chunk in between, assert this here.
264
                DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
79✔
265
                        (atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED &&
266
                                !m_mruCachingReaderChunk && !m_lruCachingReaderChunk));
267
                // now purge also the recently used chunk list from the old track.
268
                if (m_mruCachingReaderChunk || m_lruCachingReaderChunk) {
79✔
269
                    DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING);
1✔
270
                    freeAllChunks();
1✔
271
                }
272
                // Reset the readable frame index range
273
                m_readableFrameIndexRange = update.readableFrameIndexRange();
79✔
274
                m_state.storeRelease(STATE_TRACK_LOADED);
79✔
275
            } else {
276
                DEBUG_ASSERT(update.status == TRACK_UNLOADED);
4✔
277
                // This message could be processed later when a new
278
                // track is already loading! In this case the TRACK_LOADED will
279
                // be the very next status update.
280
                if (!m_state.testAndSetRelease(STATE_TRACK_UNLOADING, STATE_IDLE)) {
4✔
281
                    DEBUG_ASSERT(
×
282
                            atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
283
                            atomicLoadRelaxed(m_state) == STATE_IDLE);
284
                }
285
            }
286
        }
287
    }
288
}
2,631✔
289

290
CachingReader::ReadResult CachingReader::read(SINT startSample, SINT numSamples, bool reverse, CSAMPLE* buffer) {
91✔
291
    // Check for bad inputs
292
    // Refuse to read from an invalid position
293
    VERIFY_OR_DEBUG_ASSERT(startSample % CachingReaderChunk::kChannels == 0) {
91✔
294
        kLogger.critical()
×
295
                << "Invalid arguments for read():"
×
296
                << "startSample =" << startSample;
×
297
        return ReadResult::UNAVAILABLE;
×
298
    }
299
    // Refuse to read from an invalid number of samples
300
    VERIFY_OR_DEBUG_ASSERT(numSamples % CachingReaderChunk::kChannels == 0) {
91✔
301
        kLogger.critical()
×
302
                << "Invalid arguments for read():"
×
303
                << "numSamples =" << numSamples;
×
304
        return ReadResult::UNAVAILABLE;
×
305
    }
306
    VERIFY_OR_DEBUG_ASSERT(numSamples >= 0) {
91✔
307
        kLogger.critical()
×
308
                << "Invalid arguments for read():"
×
309
                << "numSamples =" << numSamples;
×
310
        return ReadResult::UNAVAILABLE;
×
311
    }
312
    VERIFY_OR_DEBUG_ASSERT(buffer) {
91✔
313
        kLogger.critical()
×
314
                << "Invalid arguments for read():"
×
315
                << "buffer =" << buffer;
×
316
        return ReadResult::UNAVAILABLE;
×
317
    }
318

319
    // If no track is loaded, don't do anything.
320
    if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
91✔
321
        return ReadResult::UNAVAILABLE;
×
322
    }
323

324
    // If asked to read 0 samples, don't do anything. (this is a perfectly
325
    // reasonable request that happens sometimes.
326
    if (numSamples == 0) {
91✔
327
        return ReadResult::AVAILABLE; // nothing to do
3✔
328
    }
329

330
    // the samples are always read in forward direction
331
    // If reverse = true, the frames are copied in reverse order to the
332
    // destination buffer
333
    SINT sample = startSample;
88✔
334
    if (reverse) {
88✔
335
        // Start with the last sample in buffer
336
        sample -= numSamples;
17✔
337
    }
338

339
    SINT samplesRemaining = numSamples;
88✔
340

341
    // Process new messages from the reader thread before looking up
342
    // the first chunk and to update m_readableFrameIndexRange
343
    process();
88✔
344

345
    auto remainingFrameIndexRange =
346
            mixxx::IndexRange::forward(
88✔
347
                    CachingReaderChunk::samples2frames(sample),
348
                    CachingReaderChunk::samples2frames(numSamples));
349
    DEBUG_ASSERT(!remainingFrameIndexRange.empty());
88✔
350

351
    auto result = ReadResult::AVAILABLE;
88✔
352
    if (!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty()) {
88✔
353
        // Fill the buffer up to the first readable sample with
354
        // silence. This may happen when the engine is in preroll,
355
        // i.e. if the frame index points a region before the first
356
        // track sample.
357
        if (remainingFrameIndexRange.start() < m_readableFrameIndexRange.start()) {
78✔
358
            const auto prerollFrameIndexRange =
359
                    mixxx::IndexRange::between(
1✔
360
                            remainingFrameIndexRange.start(),
361
                            m_readableFrameIndexRange.start());
362
            DEBUG_ASSERT(prerollFrameIndexRange.length() <= remainingFrameIndexRange.length());
1✔
363
            if (kLogger.debugEnabled()) {
1✔
364
                kLogger.debug()
×
365
                        << "Preroll: Filling the first"
×
366
                        << prerollFrameIndexRange.length()
×
367
                        << "sample frames in"
×
368
                        << remainingFrameIndexRange
×
369
                        << "with silence. Audio signal starts at"
×
370
                        << m_readableFrameIndexRange.start();
×
371
            }
372
            const SINT prerollFrames = prerollFrameIndexRange.length();
1✔
373
            const SINT prerollSamples = CachingReaderChunk::frames2samples(prerollFrames);
1✔
374
            DEBUG_ASSERT(samplesRemaining >= prerollSamples);
1✔
375
            if (reverse) {
1✔
376
                SampleUtil::clear(&buffer[samplesRemaining - prerollSamples], prerollSamples);
1✔
377
            } else {
378
                SampleUtil::clear(buffer, prerollSamples);
×
379
                buffer += prerollSamples;
×
380
            }
381
            samplesRemaining -= prerollSamples;
1✔
382
            remainingFrameIndexRange.shrinkFront(prerollFrames);
1✔
383
            result = ReadResult::PARTIALLY_AVAILABLE;
1✔
384
        }
385

386
        // Read the actual samples from the audio source into the
387
        // buffer. The buffer will be filled with silence for every
388
        // unreadable sample or samples outside of the track region
389
        // later at the end of this function.
390
        if (!remainingFrameIndexRange.empty()) {
78✔
391
            // The intersection between the readable samples from the track
392
            // and the requested samples is not empty, so start reading.
393
            DEBUG_ASSERT(!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty());
78✔
394
            DEBUG_ASSERT(remainingFrameIndexRange.start() >= m_readableFrameIndexRange.start());
78✔
395

396
            const SINT firstChunkIndex =
397
                    CachingReaderChunk::indexForFrame(remainingFrameIndexRange.start());
78✔
398
            SINT lastChunkIndex =
399
                    CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
78✔
400
            for (SINT chunkIndex = firstChunkIndex;
148✔
401
                    chunkIndex <= lastChunkIndex;
148✔
402
                    ++chunkIndex) {
403

404
                // Process new messages from the reader thread before looking up
405
                // the next chunk
406
                process();
78✔
407

408
                // m_readableFrameIndexRange might change with every read operation!
409
                // On a cache miss audio data will be read from the audio source in
410
                // process() and the readable frame index range might get adjusted
411
                // if decoding errors occur.
412
                remainingFrameIndexRange =
413
                        intersect(
78✔
414
                                remainingFrameIndexRange,
415
                                m_readableFrameIndexRange);
78✔
416

417
                if (remainingFrameIndexRange.empty()) {
78✔
418
                    // No more readable data available. Exit the loop and
419
                    // fill the remaining buffer with silence.
420
                    kLogger.warning() << "Failed to read more sample data";
×
421
                    break;
×
422
                }
423
                lastChunkIndex =
424
                        CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
78✔
425
                if (lastChunkIndex < chunkIndex) {
78✔
426
                    // No more readable data available. Exit the loop and
427
                    // fill the remaining buffer with silence.
428
                    kLogger.warning() << "Abort reading of sample data";
×
429
                    break;
×
430
                }
431

432
                mixxx::IndexRange bufferedFrameIndexRange;
78✔
433
                const CachingReaderChunkForOwner* const pChunk = lookupChunkAndFreshen(chunkIndex);
78✔
434
                if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
78✔
435
                    if (reverse) {
70✔
436
                        bufferedFrameIndexRange =
437
                                pChunk->readBufferedSampleFramesReverse(
8✔
438
                                        &buffer[samplesRemaining],
8✔
439
                                        remainingFrameIndexRange);
8✔
440
                    } else {
441
                        bufferedFrameIndexRange =
442
                                pChunk->readBufferedSampleFrames(
124✔
443
                                        buffer,
444
                                        remainingFrameIndexRange);
62✔
445
                    }
446
                } else {
447
                    // This will happen regularly when jumping to a new position
448
                    // within the file and decoding of the audio data is still
449
                    // pending.
450
                    DEBUG_ASSERT(!pChunk ||
8✔
451
                            (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING));
452
                    Counter("CachingReader::read(): Failed to read chunk on cache miss")++;
8✔
453
                    if (kLogger.traceEnabled()) {
8✔
454
                        kLogger.trace()
×
455
                                << "Cache miss for chunk with index"
×
456
                                << chunkIndex
×
457
                                << "- abort reading";
×
458
                    }
459
                    // Abort reading (see below)
460
                    DEBUG_ASSERT(bufferedFrameIndexRange.empty());
8✔
461
                }
462
                if (bufferedFrameIndexRange.empty()) {
78✔
463
                    if (samplesRemaining == numSamples) {
8✔
464
                        DEBUG_ASSERT(chunkIndex == firstChunkIndex);
8✔
465
                        // We have not read a single frame caused by a cache miss of
466
                        // the first required chunk. Inform the calling code that no
467
                        // data has been written into the buffer and to handle this
468
                        // situation appropriately.
469
                        return ReadResult::UNAVAILABLE;
8✔
470
                    }
471
                    // No more readable data available. Exit the loop and
472
                    // finally fill the remaining buffer with silence.
473
                    break;
×
474
                }
475
                DEBUG_ASSERT(bufferedFrameIndexRange.isSubrangeOf(remainingFrameIndexRange));
70✔
476
                if (remainingFrameIndexRange.start() < bufferedFrameIndexRange.start()) {
70✔
477
                    const auto paddingFrameIndexRange =
478
                            mixxx::IndexRange::between(
×
479
                                    remainingFrameIndexRange.start(),
480
                                    bufferedFrameIndexRange.start());
481
                    kLogger.warning()
×
482
                            << "Inserting"
×
483
                            << paddingFrameIndexRange.length()
×
484
                            << "frames of silence for unreadable audio data";
×
485
                    SINT paddingSamples = CachingReaderChunk::frames2samples(paddingFrameIndexRange.length());
×
486
                    DEBUG_ASSERT(samplesRemaining >= paddingSamples);
×
487
                    if (reverse) {
×
488
                        SampleUtil::clear(&buffer[samplesRemaining - paddingSamples], paddingSamples);
×
489
                    } else {
490
                        SampleUtil::clear(buffer, paddingSamples);
×
491
                        buffer += paddingSamples;
×
492
                    }
493
                    samplesRemaining -= paddingSamples;
×
494
                    remainingFrameIndexRange.shrinkFront(paddingFrameIndexRange.length());
×
495
                    result = ReadResult::PARTIALLY_AVAILABLE;
×
496
                }
497
                const SINT chunkSamples =
498
                        CachingReaderChunk::frames2samples(bufferedFrameIndexRange.length());
70✔
499
                DEBUG_ASSERT(chunkSamples > 0);
70✔
500
                if (!reverse) {
70✔
501
                    buffer += chunkSamples;
62✔
502
                }
503
                DEBUG_ASSERT(samplesRemaining >= chunkSamples);
70✔
504
                samplesRemaining -= chunkSamples;
70✔
505
                remainingFrameIndexRange.shrinkFront(bufferedFrameIndexRange.length());
70✔
506
            }
507
        }
508
    }
509
    // Finally fill the remaining buffer with silence
510
    DEBUG_ASSERT(samplesRemaining >= 0);
80✔
511
    if (samplesRemaining > 0) {
80✔
512
        SampleUtil::clear(buffer, samplesRemaining);
10✔
513
        result = ReadResult::PARTIALLY_AVAILABLE;
10✔
514
    }
515
    return result;
80✔
516
}
517

518
void CachingReader::hintAndMaybeWake(const HintVector& hintList) {
2,460✔
519
    // If no file is loaded, skip.
520
    if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
2,460✔
521
        return;
2,193✔
522
    }
523

524
    // For every chunk that the hints indicated, check if it is in the cache. If
525
    // any are not, then wake.
526
    bool shouldWake = false;
267✔
527

528
    for (const auto& hint: hintList) {
1,180✔
529
        SINT hintFrame = hint.frame;
913✔
530
        SINT hintFrameCount = hint.frameCount;
913✔
531

532
        // Handle some special length values
533
        if (hintFrameCount == Hint::kFrameCountForward) {
913✔
534
                hintFrameCount = kDefaultHintFrames;
627✔
535
        } else if (hintFrameCount == Hint::kFrameCountBackward) {
286✔
536
                hintFrame -= kDefaultHintFrames;
22✔
537
                hintFrameCount = kDefaultHintFrames;
22✔
538
            if (hintFrame < 0) {
22✔
539
                    hintFrameCount += hintFrame;
2✔
540
                if (hintFrameCount <= 0) {
2✔
541
                    continue;
6✔
542
                }
543
                hintFrame = 0;
1✔
544
            }
545
        }
546

547
        VERIFY_OR_DEBUG_ASSERT(hintFrameCount >= 0) {
912✔
548
            kLogger.warning() << "CachingReader: Ignoring negative hint length.";
×
549
            continue;
×
550
        }
551

552
        const auto readableFrameIndexRange = intersect(
912✔
553
                m_readableFrameIndexRange,
554
                mixxx::IndexRange::forward(hintFrame, hintFrameCount));
555
        if (readableFrameIndexRange.empty()) {
912✔
556
            continue;
5✔
557
        }
558

559
        const int firstChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.start());
907✔
560
        const int lastChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.end() - 1);
907✔
561
        for (int chunkIndex = firstChunkIndex; chunkIndex <= lastChunkIndex; ++chunkIndex) {
2,168✔
562
            CachingReaderChunkForOwner* pChunk = lookupChunk(chunkIndex);
1,261✔
563
            if (!pChunk) {
1,261✔
564
                shouldWake = true;
258✔
565
                pChunk = allocateChunkExpireLRU(chunkIndex);
258✔
566
                if (!pChunk) {
258✔
567
                    kLogger.warning()
×
568
                            << "Failed to allocate chunk"
×
569
                            << chunkIndex
×
570
                            << "for read request";
×
571
                    continue;
×
572
                }
573
                // Do not insert the allocated chunk into the MRU/LRU list,
574
                // because it will be handed over to the worker immediately
575
                CachingReaderChunkReadRequest request;
576
                request.giveToWorker(pChunk);
258✔
577
                if (kLogger.traceEnabled()) {
258✔
578
                    kLogger.trace()
×
579
                            << "Requesting read of chunk"
×
580
                            << request.chunk;
×
581
                }
582
                if (m_chunkReadRequestFIFO.write(&request, 1) != 1) {
258✔
583
                    kLogger.warning()
×
584
                            << "Failed to submit read request for chunk"
×
585
                            << chunkIndex;
×
586
                    // Revoke the chunk from the worker and free it
587
                    pChunk->takeFromWorker();
×
588
                    freeChunk(pChunk);
×
589
                }
590
            } else if (pChunk->getState() == CachingReaderChunkForOwner::READY) {
1,003✔
591
                // This will cause the chunk to be 'freshened' in the cache. The
592
                // chunk will be moved to the end of the LRU list.
593
                freshenChunk(pChunk);
548✔
594
            }
595
        }
596
    }
597

598
    // If there are chunks to be read, wake up.
599
    if (shouldWake) {
267✔
600
        m_worker.workReady();
128✔
601
    }
602
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc