• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mixxxdj / mixxx / 9882284892

10 Jul 2024 10:22PM CUT coverage: 31.828% (-0.01%) from 31.838%
9882284892

push

github

web-flow
Merge pull request #13458 from daschuer/hash_clean_up

hash clean up

0 of 7 new or added lines in 2 files covered. (0.0%)

10 existing lines in 2 files now uncovered.

32859 of 103240 relevant lines covered (31.83%)

47670.02 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

70.61
/src/engine/cachingreader/cachingreader.cpp
1
#include "engine/cachingreader/cachingreader.h"
2

3
#include <QtDebug>
4

5
#include "moc_cachingreader.cpp"
6
#include "util/assert.h"
7
#include "util/compatibility/qatomic.h"
8
#include "util/counter.h"
9
#include "util/logger.h"
10
#include "util/sample.h"
11

12
namespace {
13

14
mixxx::Logger kLogger("CachingReader");
15

16
// This is the default hint frameCount that is adopted in case of Hint::kFrameCountForward and
17
// Hint::kFrameCountBackward count is provided. It matches 23 ms @ 44.1 kHz
18
// TODO() Do we suffer cache misses if we use an audio buffer of above 23 ms?
19
constexpr SINT kDefaultHintFrames = 1024;
20

21
// With CachingReaderChunk::kFrames = 8192 each chunk consumes
22
// 8192 frames * 2 channels/frame * 4-bytes per sample = 65 kB for stereo frame.
23
//
24
//     80 chunks ->  5120 KB =  5 MB
25
//
26
// Each deck (including sample decks) will use their own CachingReader.
27
// Consequently the total memory required for all allocated chunks depends
28
// on the number of decks. The amount of memory reserved for a single
29
// CachingReader must be multiplied by the number of decks to calculate
30
// the total amount!
31
//
32
// NOTE(uklotzde, 2019-09-05): Reduce this number to just few chunks
33
// (kNumberOfCachedChunksInMemory = 1, 2, 3, ...) for testing purposes
34
// to verify that the MRU/LRU cache works as expected. Even though
35
// massive drop outs are expected to occur Mixxx should run reliably!
36
constexpr SINT kNumberOfCachedChunksInMemory = 80;
37

38
} // anonymous namespace
39

40
CachingReader::CachingReader(const QString& group,
885✔
41
        UserSettingsPointer config,
42
        mixxx::audio::ChannelCount maxSupportedChannel)
885✔
43
        : m_pConfig(config),
885✔
44
          // Limit the number of in-flight requests to the worker. This should
45
          // prevent to overload the worker when it is not able to fetch those
46
          // requests from the FIFO timely. Otherwise outdated requests pile up
47
          // in the FIFO and it would take a long time to process them, just to
48
          // discard the results that most likely have already become obsolete.
49
          // TODO(XXX): Ideally the request FIFO would be implemented as a ring
50
          // buffer, where new requests replace old requests when full. Those
51
          // old requests need to be returned immediately to the CachingReader
52
          // that must take ownership and free them!!!
53
          m_chunkReadRequestFIFO(kNumberOfCachedChunksInMemory / 4),
885✔
54
          // The capacity of the back channel must be equal to the number of
55
          // allocated chunks, because the worker use writeBlocking(). Otherwise
56
          // the worker could get stuck in a hot loop!!!
57
          m_readerStatusUpdateFIFO(kNumberOfCachedChunksInMemory),
885✔
58
          m_state(STATE_IDLE),
885✔
59
          m_mruCachingReaderChunk(nullptr),
885✔
60
          m_lruCachingReaderChunk(nullptr),
885✔
61
          m_sampleBuffer(CachingReaderChunk::kFrames * maxSupportedChannel *
885✔
62
                  kNumberOfCachedChunksInMemory),
63
          m_worker(group,
885✔
64
                  &m_chunkReadRequestFIFO,
65
                  &m_readerStatusUpdateFIFO,
66
                  maxSupportedChannel) {
2,655✔
67
    m_allocatedCachingReaderChunks.reserve(kNumberOfCachedChunksInMemory);
885✔
68
    // Divide up the allocated raw memory buffer into total_chunks
69
    // chunks. Initialize each chunk to hold nothing and add it to the free
70
    // list.
71
    for (SINT i = 0; i < kNumberOfCachedChunksInMemory; ++i) {
71,685✔
72
        CachingReaderChunkForOwner* c =
73
                new CachingReaderChunkForOwner(
74
                        mixxx::SampleBuffer::WritableSlice(
×
75
                                m_sampleBuffer,
70,800✔
76
                                CachingReaderChunk::kFrames * maxSupportedChannel * i,
70,800✔
77
                                CachingReaderChunk::kFrames * maxSupportedChannel));
141,600✔
78
        m_chunks.push_back(c);
70,800✔
79
        m_freeChunks.push_back(c);
70,800✔
80
    }
81

82
    // Forward signals from worker
83
    connect(&m_worker, &CachingReaderWorker::trackLoading,
885✔
84
            this, &CachingReader::trackLoading,
85
            Qt::DirectConnection);
86
    connect(&m_worker, &CachingReaderWorker::trackLoaded,
885✔
87
            this, &CachingReader::trackLoaded,
88
            Qt::DirectConnection);
89
    connect(&m_worker, &CachingReaderWorker::trackLoadFailed,
885✔
90
            this, &CachingReader::trackLoadFailed,
91
            Qt::DirectConnection);
92

93
    m_worker.start(QThread::HighPriority);
885✔
94
}
885✔
95

96
CachingReader::~CachingReader() {
2,654✔
97
    m_worker.quitWait();
885✔
98
    qDeleteAll(m_chunks);
885✔
99
}
1,769✔
100

101
void CachingReader::freeChunkFromList(CachingReaderChunkForOwner* pChunk) {
3✔
102
    pChunk->removeFromList(
3✔
103
            &m_mruCachingReaderChunk,
104
            &m_lruCachingReaderChunk);
105
    pChunk->free();
3✔
106
    m_freeChunks.push_back(pChunk);
3✔
107
}
3✔
108

UNCOV
109
void CachingReader::freeChunk(CachingReaderChunkForOwner* pChunk) {
×
UNCOV
110
    DEBUG_ASSERT(pChunk);
×
UNCOV
111
    DEBUG_ASSERT(pChunk->getState() != CachingReaderChunkForOwner::READ_PENDING);
×
112

UNCOV
113
    const int removed = m_allocatedCachingReaderChunks.remove(pChunk->getIndex());
×
114
    Q_UNUSED(removed); // only used in DEBUG_ASSERT
115
    // We'll tolerate not being in allocatedCachingReaderChunks,
116
    // because sometime you free a chunk right after you allocated it.
UNCOV
117
    DEBUG_ASSERT(removed <= 1);
×
118

UNCOV
119
    freeChunkFromList(pChunk);
×
UNCOV
120
}
×
121

122
void CachingReader::freeAllChunks() {
1✔
123
    for (const auto& pChunk : std::as_const(m_chunks)) {
81✔
124
        // We will receive CHUNK_READ_INVALID for all pending chunk reads
125
        // which should free the chunks individually.
126
        if (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING) {
80✔
127
            continue;
×
128
        }
129

130
        if (pChunk->getState() != CachingReaderChunkForOwner::FREE) {
80✔
131
            freeChunkFromList(pChunk);
3✔
132
        }
133
    }
134
    DEBUG_ASSERT(!m_mruCachingReaderChunk);
1✔
135
    DEBUG_ASSERT(!m_lruCachingReaderChunk);
1✔
136

137
    m_allocatedCachingReaderChunks.clear();
1✔
138
}
1✔
139

140
CachingReaderChunkForOwner* CachingReader::allocateChunk(SINT chunkIndex) {
250✔
141
    if (m_freeChunks.empty()) {
250✔
142
        return nullptr;
×
143
    }
144
    CachingReaderChunkForOwner* pChunk = m_freeChunks.front();
250✔
145
    m_freeChunks.pop_front();
250✔
146

147
    pChunk->init(chunkIndex);
250✔
148

149
    m_allocatedCachingReaderChunks.insert(chunkIndex, pChunk);
250✔
150

151
    return pChunk;
250✔
152
}
153

154
CachingReaderChunkForOwner* CachingReader::allocateChunkExpireLRU(SINT chunkIndex) {
250✔
155
    auto* pChunk = allocateChunk(chunkIndex);
250✔
156
    if (!pChunk) {
250✔
157
        if (m_lruCachingReaderChunk) {
×
158
            freeChunk(m_lruCachingReaderChunk);
×
159
            pChunk = allocateChunk(chunkIndex);
×
160
        } else {
161
            kLogger.warning() << "No cached LRU chunk available for freeing";
×
162
        }
163
    }
164
    if (kLogger.traceEnabled()) {
250✔
165
        kLogger.trace() << "allocateChunkExpireLRU" << chunkIndex << pChunk;
×
166
    }
167
    return pChunk;
250✔
168
}
169

170
CachingReaderChunkForOwner* CachingReader::lookupChunk(SINT chunkIndex) {
1,317✔
171
    // Defaults to nullptr if it's not in the hash.
172
    auto* pChunk = m_allocatedCachingReaderChunks.value(chunkIndex, nullptr);
1,317✔
173
    DEBUG_ASSERT(!pChunk || pChunk->getIndex() == chunkIndex);
1,317✔
174
    return pChunk;
1,317✔
175
}
176

177
void CachingReader::freshenChunk(CachingReaderChunkForOwner* pChunk) {
764✔
178
    DEBUG_ASSERT(pChunk);
764✔
179
    DEBUG_ASSERT(pChunk->getState() == CachingReaderChunkForOwner::READY);
764✔
180
    if (kLogger.traceEnabled()) {
764✔
181
        kLogger.trace()
×
182
                << "freshenChunk()"
×
183
                << pChunk->getIndex()
×
184
                << pChunk;
×
185
    }
186

187
    // Remove the chunk from the MRU/LRU list
188
    pChunk->removeFromList(
764✔
189
            &m_mruCachingReaderChunk,
190
            &m_lruCachingReaderChunk);
191

192
    // Reinsert has new head of MRU list
193
    pChunk->insertIntoListBefore(
764✔
194
            &m_mruCachingReaderChunk,
195
            &m_lruCachingReaderChunk,
196
            m_mruCachingReaderChunk);
197
}
764✔
198

199
CachingReaderChunkForOwner* CachingReader::lookupChunkAndFreshen(SINT chunkIndex) {
78✔
200
    auto* pChunk = lookupChunk(chunkIndex);
78✔
201
    if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
78✔
202
        freshenChunk(pChunk);
75✔
203
    }
204
    return pChunk;
78✔
205
}
206

207
// Invoked from the UI thread!!
208
void CachingReader::newTrack(TrackPointer pTrack) {
97✔
209
    auto newState = pTrack ? STATE_TRACK_LOADING : STATE_TRACK_UNLOADING;
97✔
210
    auto oldState = m_state.fetchAndStoreAcquire(newState);
97✔
211

212
    // TODO():
213
    // BaseTrackPlayerImpl::slotLoadTrack() distributes the new track via
214
    // emit loadingTrack(pNewTrack, pOldTrack);
215
    // but the newTrack may change if we load a new track while the previous one
216
    // is still loading. This leads to inconsistent states for example a different
217
    // track in the Mixxx Title and the Deck label.
218
    if (oldState == STATE_TRACK_LOADING &&
97✔
219
            newState == STATE_TRACK_LOADING) {
220
        kLogger.warning()
4✔
221
                << "Loading a new track while loading a track may lead to inconsistent states";
2✔
222
    }
223
    m_worker.newTrack(std::move(pTrack));
97✔
224
}
97✔
225

226
// Called from the engine thread
227
void CachingReader::process() {
2,795✔
228
    ReaderStatusUpdate update;
229
    while (m_readerStatusUpdateFIFO.read(&update, 1) == 1) {
3,010✔
230
        auto* pChunk = update.takeFromWorker();
215✔
231
        if (pChunk) {
215✔
232
            // Result of a read request (with a chunk)
233
            DEBUG_ASSERT(atomicLoadRelaxed(m_state) != STATE_IDLE);
136✔
234
            DEBUG_ASSERT(
136✔
235
                    update.status == CHUNK_READ_SUCCESS ||
236
                    update.status == CHUNK_READ_EOF ||
237
                    update.status == CHUNK_READ_INVALID ||
238
                    update.status == CHUNK_READ_DISCARDED);
239
            if (m_state.loadAcquire() == STATE_TRACK_LOADING) {
136✔
240
                // Discard all results from pending read requests for the
241
                // previous track before the next track has been loaded.
UNCOV
242
                freeChunk(pChunk);
×
UNCOV
243
                continue;
×
244
            }
245
            DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED);
136✔
246
            if (update.status == CHUNK_READ_SUCCESS) {
136✔
247
                // Insert or freshen the chunk in the MRU/LRU list after
248
                // obtaining ownership from the worker.
249
                freshenChunk(pChunk);
136✔
250
            } else {
251
                // Discard chunks that don't carry any data
252
                freeChunk(pChunk);
×
253
            }
254
            // Adjust the readable frame index range (if available)
255
            if (update.status != CHUNK_READ_DISCARDED) {
136✔
256
                m_readableFrameIndexRange = intersect(
272✔
257
                        m_readableFrameIndexRange,
258
                        update.readableFrameIndexRange());
136✔
259
            }
260
        } else {
261
            // State update (without a chunk)
262
            if (update.status == TRACK_LOADED) {
79✔
263
                // We have a new Track ready to go.
264
                // Assert that we either have had STATE_TRACK_LOADING before and all
265
                // chunks in the m_readerStatusUpdateFIFO have been discarded.
266
                // or the cache has been already cleared.
267
                // In case of two consecutive load events, we receive two consecutive
268
                // TRACK_LOADED without a chunk in between, assert this here.
269
                DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
75✔
270
                        (atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED &&
271
                                !m_mruCachingReaderChunk && !m_lruCachingReaderChunk));
272
                // now purge also the recently used chunk list from the old track.
273
                if (m_mruCachingReaderChunk || m_lruCachingReaderChunk) {
75✔
274
                    DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING);
1✔
275
                    freeAllChunks();
1✔
276
                }
277
                // Reset the readable frame index range
278
                m_readableFrameIndexRange = update.readableFrameIndexRange();
75✔
279
                m_state.storeRelease(STATE_TRACK_LOADED);
75✔
280
            } else {
281
                DEBUG_ASSERT(update.status == TRACK_UNLOADED);
4✔
282
                // This message could be processed later when a new
283
                // track is already loading! In this case the TRACK_LOADED will
284
                // be the very next status update.
285
                if (!m_state.testAndSetRelease(STATE_TRACK_UNLOADING, STATE_IDLE)) {
4✔
286
                    DEBUG_ASSERT(
×
287
                            atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
288
                            atomicLoadRelaxed(m_state) == STATE_IDLE);
289
                }
290
            }
291
        }
292
    }
293
}
2,795✔
294

295
CachingReader::ReadResult CachingReader::read(SINT startSample,
91✔
296
        SINT numSamples,
297
        bool reverse,
298
        CSAMPLE* buffer,
299
        mixxx::audio::ChannelCount channelCount) {
300
    // Check for bad inputs
301
    // Refuse to read from an invalid position
302
    VERIFY_OR_DEBUG_ASSERT(startSample % channelCount == 0) {
91✔
303
        kLogger.critical()
×
304
                << "Invalid arguments for read():"
×
305
                << "startSample =" << startSample;
×
306
        return ReadResult::UNAVAILABLE;
×
307
    }
308
    // Refuse to read from an invalid number of samples
309
    VERIFY_OR_DEBUG_ASSERT(numSamples % channelCount == 0) {
91✔
310
        kLogger.critical()
×
311
                << "Invalid arguments for read():"
×
312
                << "numSamples =" << numSamples;
×
313
        return ReadResult::UNAVAILABLE;
×
314
    }
315
    VERIFY_OR_DEBUG_ASSERT(numSamples >= 0) {
91✔
316
        kLogger.critical()
×
317
                << "Invalid arguments for read():"
×
318
                << "numSamples =" << numSamples;
×
319
        return ReadResult::UNAVAILABLE;
×
320
    }
321
    VERIFY_OR_DEBUG_ASSERT(buffer) {
91✔
322
        kLogger.critical()
×
323
                << "Invalid arguments for read():"
×
324
                << "buffer =" << buffer;
×
325
        return ReadResult::UNAVAILABLE;
×
326
    }
327

328
    // If no track is loaded, don't do anything.
329
    if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
91✔
330
        return ReadResult::UNAVAILABLE;
×
331
    }
332

333
    // If asked to read 0 samples, don't do anything. (this is a perfectly
334
    // reasonable request that happens sometimes.
335
    if (numSamples == 0) {
91✔
336
        return ReadResult::AVAILABLE; // nothing to do
3✔
337
    }
338

339
    // the samples are always read in forward direction
340
    // If reverse = true, the frames are copied in reverse order to the
341
    // destination buffer
342
    SINT sample = startSample;
88✔
343
    if (reverse) {
88✔
344
        // Start with the last sample in buffer
345
        sample -= numSamples;
17✔
346
    }
347

348
    SINT samplesRemaining = numSamples;
88✔
349

350
    // Process new messages from the reader thread before looking up
351
    // the first chunk and to update m_readableFrameIndexRange
352
    process();
88✔
353

354
    auto remainingFrameIndexRange =
355
            mixxx::IndexRange::forward(
88✔
356
                    CachingReaderChunk::samples2frames(sample, channelCount),
357
                    CachingReaderChunk::samples2frames(numSamples, channelCount));
358
    DEBUG_ASSERT(!remainingFrameIndexRange.empty());
88✔
359

360
    auto result = ReadResult::AVAILABLE;
88✔
361
    if (!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty()) {
88✔
362
        // Fill the buffer up to the first readable sample with
363
        // silence. This may happen when the engine is in preroll,
364
        // i.e. if the frame index points a region before the first
365
        // track sample.
366
        if (remainingFrameIndexRange.start() < m_readableFrameIndexRange.start()) {
78✔
367
            const auto prerollFrameIndexRange =
368
                    mixxx::IndexRange::between(
1✔
369
                            remainingFrameIndexRange.start(),
370
                            m_readableFrameIndexRange.start());
371
            DEBUG_ASSERT(prerollFrameIndexRange.length() <= remainingFrameIndexRange.length());
1✔
372
            if (kLogger.debugEnabled()) {
1✔
373
                kLogger.debug()
×
374
                        << "Preroll: Filling the first"
×
375
                        << prerollFrameIndexRange.length()
×
376
                        << "sample frames in"
×
377
                        << remainingFrameIndexRange
×
378
                        << "with silence. Audio signal starts at"
×
379
                        << m_readableFrameIndexRange.start();
×
380
            }
381
            const SINT prerollFrames = prerollFrameIndexRange.length();
1✔
382
            const SINT prerollSamples = CachingReaderChunk::frames2samples(
1✔
383
                    prerollFrames, channelCount);
384
            DEBUG_ASSERT(samplesRemaining >= prerollSamples);
1✔
385
            if (reverse) {
1✔
386
                SampleUtil::clear(&buffer[samplesRemaining - prerollSamples], prerollSamples);
1✔
387
            } else {
388
                SampleUtil::clear(buffer, prerollSamples);
×
389
                buffer += prerollSamples;
×
390
            }
391
            samplesRemaining -= prerollSamples;
1✔
392
            remainingFrameIndexRange.shrinkFront(prerollFrames);
1✔
393
            result = ReadResult::PARTIALLY_AVAILABLE;
1✔
394
        }
395

396
        // Read the actual samples from the audio source into the
397
        // buffer. The buffer will be filled with silence for every
398
        // unreadable sample or samples outside of the track region
399
        // later at the end of this function.
400
        if (!remainingFrameIndexRange.empty()) {
78✔
401
            // The intersection between the readable samples from the track
402
            // and the requested samples is not empty, so start reading.
403
            DEBUG_ASSERT(!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty());
78✔
404
            DEBUG_ASSERT(remainingFrameIndexRange.start() >= m_readableFrameIndexRange.start());
78✔
405

406
            const SINT firstChunkIndex =
407
                    CachingReaderChunk::indexForFrame(remainingFrameIndexRange.start());
78✔
408
            SINT lastChunkIndex =
409
                    CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
78✔
410
            for (SINT chunkIndex = firstChunkIndex;
153✔
411
                    chunkIndex <= lastChunkIndex;
153✔
412
                    ++chunkIndex) {
413

414
                // Process new messages from the reader thread before looking up
415
                // the next chunk
416
                process();
78✔
417

418
                // m_readableFrameIndexRange might change with every read operation!
419
                // On a cache miss audio data will be read from the audio source in
420
                // process() and the readable frame index range might get adjusted
421
                // if decoding errors occur.
422
                remainingFrameIndexRange =
423
                        intersect(
78✔
424
                                remainingFrameIndexRange,
425
                                m_readableFrameIndexRange);
78✔
426

427
                if (remainingFrameIndexRange.empty()) {
78✔
428
                    // No more readable data available. Exit the loop and
429
                    // fill the remaining buffer with silence.
430
                    kLogger.warning() << "Failed to read more sample data";
×
431
                    break;
×
432
                }
433
                lastChunkIndex =
434
                        CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
78✔
435
                if (lastChunkIndex < chunkIndex) {
78✔
436
                    // No more readable data available. Exit the loop and
437
                    // fill the remaining buffer with silence.
438
                    kLogger.warning() << "Abort reading of sample data";
×
439
                    break;
×
440
                }
441

442
                mixxx::IndexRange bufferedFrameIndexRange;
78✔
443
                const CachingReaderChunkForOwner* const pChunk = lookupChunkAndFreshen(chunkIndex);
78✔
444
                if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
78✔
445
                    if (reverse) {
75✔
446
                        bufferedFrameIndexRange =
447
                                pChunk->readBufferedSampleFramesReverse(
8✔
448
                                        &buffer[samplesRemaining],
8✔
449
                                        channelCount,
450
                                        remainingFrameIndexRange);
8✔
451
                    } else {
452
                        bufferedFrameIndexRange =
453
                                pChunk->readBufferedSampleFrames(
134✔
454
                                        buffer,
455
                                        channelCount,
456
                                        remainingFrameIndexRange);
67✔
457
                    }
458
                } else {
459
                    // This will happen regularly when jumping to a new position
460
                    // within the file and decoding of the audio data is still
461
                    // pending.
462
                    DEBUG_ASSERT(!pChunk ||
3✔
463
                            (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING));
464
                    Counter("CachingReader::read(): Failed to read chunk on cache miss")++;
3✔
465
                    if (kLogger.traceEnabled()) {
3✔
466
                        kLogger.trace()
×
467
                                << "Cache miss for chunk with index"
×
468
                                << chunkIndex
×
469
                                << "- abort reading";
×
470
                    }
471
                    // Abort reading (see below)
472
                    DEBUG_ASSERT(bufferedFrameIndexRange.empty());
3✔
473
                }
474
                if (bufferedFrameIndexRange.empty()) {
78✔
475
                    if (samplesRemaining == numSamples) {
3✔
476
                        DEBUG_ASSERT(chunkIndex == firstChunkIndex);
3✔
477
                        // We have not read a single frame caused by a cache miss of
478
                        // the first required chunk. Inform the calling code that no
479
                        // data has been written into the buffer and to handle this
480
                        // situation appropriately.
481
                        return ReadResult::UNAVAILABLE;
3✔
482
                    }
483
                    // No more readable data available. Exit the loop and
484
                    // finally fill the remaining buffer with silence.
485
                    break;
×
486
                }
487
                DEBUG_ASSERT(bufferedFrameIndexRange.isSubrangeOf(remainingFrameIndexRange));
75✔
488
                if (remainingFrameIndexRange.start() < bufferedFrameIndexRange.start()) {
75✔
489
                    const auto paddingFrameIndexRange =
490
                            mixxx::IndexRange::between(
×
491
                                    remainingFrameIndexRange.start(),
492
                                    bufferedFrameIndexRange.start());
493
                    kLogger.warning()
×
494
                            << "Inserting"
×
495
                            << paddingFrameIndexRange.length()
×
496
                            << "frames of silence for unreadable audio data";
×
497
                    SINT paddingSamples = CachingReaderChunk::frames2samples(
×
498
                            paddingFrameIndexRange.length(), channelCount);
499
                    DEBUG_ASSERT(samplesRemaining >= paddingSamples);
×
500
                    if (reverse) {
×
501
                        SampleUtil::clear(&buffer[samplesRemaining - paddingSamples], paddingSamples);
×
502
                    } else {
503
                        SampleUtil::clear(buffer, paddingSamples);
×
504
                        buffer += paddingSamples;
×
505
                    }
506
                    samplesRemaining -= paddingSamples;
×
507
                    remainingFrameIndexRange.shrinkFront(paddingFrameIndexRange.length());
×
508
                    result = ReadResult::PARTIALLY_AVAILABLE;
×
509
                }
510
                const SINT chunkSamples = CachingReaderChunk::frames2samples(
75✔
511
                        bufferedFrameIndexRange.length(), channelCount);
512
                DEBUG_ASSERT(chunkSamples > 0);
75✔
513
                if (!reverse) {
75✔
514
                    buffer += chunkSamples;
67✔
515
                }
516
                DEBUG_ASSERT(samplesRemaining >= chunkSamples);
75✔
517
                samplesRemaining -= chunkSamples;
75✔
518
                remainingFrameIndexRange.shrinkFront(bufferedFrameIndexRange.length());
75✔
519
            }
520
        }
521
    }
522
    // Finally fill the remaining buffer with silence
523
    DEBUG_ASSERT(samplesRemaining >= 0);
85✔
524
    if (samplesRemaining > 0) {
85✔
525
        SampleUtil::clear(buffer, samplesRemaining);
10✔
526
        result = ReadResult::PARTIALLY_AVAILABLE;
10✔
527
    }
528
    return result;
85✔
529
}
530

531
void CachingReader::hintAndMaybeWake(const HintVector& hintList) {
2,624✔
532
    // If no file is loaded, skip.
533
    if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
2,624✔
534
        return;
2,363✔
535
    }
536

537
    // For every chunk that the hints indicated, check if it is in the cache. If
538
    // any are not, then wake.
539
    bool shouldWake = false;
261✔
540

541
    for (const auto& hint: hintList) {
1,156✔
542
        SINT hintFrame = hint.frame;
895✔
543
        SINT hintFrameCount = hint.frameCount;
895✔
544

545
        // Handle some special length values
546
        if (hintFrameCount == Hint::kFrameCountForward) {
895✔
547
                hintFrameCount = kDefaultHintFrames;
615✔
548
        } else if (hintFrameCount == Hint::kFrameCountBackward) {
280✔
549
                hintFrame -= kDefaultHintFrames;
22✔
550
                hintFrameCount = kDefaultHintFrames;
22✔
551
            if (hintFrame < 0) {
22✔
552
                    hintFrameCount += hintFrame;
2✔
553
                if (hintFrameCount <= 0) {
2✔
554
                    continue;
4✔
555
                }
556
                hintFrame = 0;
1✔
557
            }
558
        }
559

560
        VERIFY_OR_DEBUG_ASSERT(hintFrameCount >= 0) {
894✔
561
            kLogger.warning() << "CachingReader: Ignoring negative hint length.";
×
562
            continue;
×
563
        }
564

565
        const auto readableFrameIndexRange = intersect(
894✔
566
                m_readableFrameIndexRange,
567
                mixxx::IndexRange::forward(hintFrame, hintFrameCount));
568
        if (readableFrameIndexRange.empty()) {
894✔
569
            continue;
3✔
570
        }
571

572
        const int firstChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.start());
891✔
573
        const int lastChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.end() - 1);
891✔
574
        for (int chunkIndex = firstChunkIndex; chunkIndex <= lastChunkIndex; ++chunkIndex) {
2,130✔
575
            CachingReaderChunkForOwner* pChunk = lookupChunk(chunkIndex);
1,239✔
576
            if (!pChunk) {
1,239✔
577
                shouldWake = true;
250✔
578
                pChunk = allocateChunkExpireLRU(chunkIndex);
250✔
579
                if (!pChunk) {
250✔
580
                    kLogger.warning()
×
581
                            << "Failed to allocate chunk"
×
582
                            << chunkIndex
×
583
                            << "for read request";
×
584
                    continue;
×
585
                }
586
                // Do not insert the allocated chunk into the MRU/LRU list,
587
                // because it will be handed over to the worker immediately
588
                CachingReaderChunkReadRequest request;
589
                request.giveToWorker(pChunk);
250✔
590
                if (kLogger.traceEnabled()) {
250✔
591
                    kLogger.trace()
×
592
                            << "Requesting read of chunk"
×
593
                            << request.chunk;
×
594
                }
595
                if (m_chunkReadRequestFIFO.write(&request, 1) != 1) {
250✔
596
                    kLogger.warning()
×
597
                            << "Failed to submit read request for chunk"
×
598
                            << chunkIndex;
×
599
                    // Revoke the chunk from the worker and free it
600
                    pChunk->takeFromWorker();
×
601
                    freeChunk(pChunk);
×
602
                }
603
            } else if (pChunk->getState() == CachingReaderChunkForOwner::READY) {
989✔
604
                // This will cause the chunk to be 'freshened' in the cache. The
605
                // chunk will be moved to the end of the LRU list.
606
                freshenChunk(pChunk);
553✔
607
            }
608
        }
609
    }
610

611
    // If there are chunks to be read, wake up.
612
    if (shouldWake) {
261✔
613
        m_worker.workReady();
124✔
614
    }
615
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc