1 #include "engine/cachingreader/cachingreader.h"
2
3 #include <QFileInfo>
4 #include <QtDebug>
5
6 #include "control/controlobject.h"
7 #include "moc_cachingreader.cpp"
8 #include "track/track.h"
9 #include "util/assert.h"
10 #include "util/compatibility.h"
11 #include "util/counter.h"
12 #include "util/logger.h"
13 #include "util/math.h"
14 #include "util/sample.h"
15
16 namespace {
17
18 mixxx::Logger kLogger("CachingReader");
19
20 // This is the default hint frameCount that is adopted in case of Hint::kFrameCountForward and
21 // Hint::kFrameCountBackward count is provided. It matches 23 ms @ 44.1 kHz
22 // TODO() Do we suffer cache misses if we use an audio buffer of above 23 ms?
23 const SINT kDefaultHintFrames = 1024;
24
25 // With CachingReaderChunk::kFrames = 8192 each chunk consumes
26 // 8192 frames * 2 channels/frame * 4-bytes per sample = 65 kB.
27 //
28 // 80 chunks -> 5120 KB = 5 MB
29 //
30 // Each deck (including sample decks) will use their own CachingReader.
31 // Consequently the total memory required for all allocated chunks depends
32 // on the number of decks. The amount of memory reserved for a single
33 // CachingReader must be multiplied by the number of decks to calculate
34 // the total amount!
35 //
36 // NOTE(uklotzde, 2019-09-05): Reduce this number to just few chunks
37 // (kNumberOfCachedChunksInMemory = 1, 2, 3, ...) for testing purposes
38 // to verify that the MRU/LRU cache works as expected. Even though
39 // massive drop outs are expected to occur Mixxx should run reliably!
40 const SINT kNumberOfCachedChunksInMemory = 80;
41
42 } // anonymous namespace
43
CachingReader(const QString & group,UserSettingsPointer config)44 CachingReader::CachingReader(const QString& group,
45 UserSettingsPointer config)
46 : m_pConfig(config),
47 // Limit the number of in-flight requests to the worker. This should
48 // prevent to overload the worker when it is not able to fetch those
49 // requests from the FIFO timely. Otherwise outdated requests pile up
50 // in the FIFO and it would take a long time to process them, just to
51 // discard the results that most likely have already become obsolete.
52 // TODO(XXX): Ideally the request FIFO would be implemented as a ring
53 // buffer, where new requests replace old requests when full. Those
54 // old requests need to be returned immediately to the CachingReader
55 // that must take ownership and free them!!!
56 m_chunkReadRequestFIFO(kNumberOfCachedChunksInMemory / 4),
57 // The capacity of the back channel must be equal to the number of
58 // allocated chunks, because the worker use writeBlocking(). Otherwise
59 // the worker could get stuck in a hot loop!!!
60 m_readerStatusUpdateFIFO(kNumberOfCachedChunksInMemory),
61 m_state(STATE_IDLE),
62 m_mruCachingReaderChunk(nullptr),
63 m_lruCachingReaderChunk(nullptr),
64 m_sampleBuffer(CachingReaderChunk::kSamples * kNumberOfCachedChunksInMemory),
65 m_worker(group, &m_chunkReadRequestFIFO, &m_readerStatusUpdateFIFO) {
66 m_allocatedCachingReaderChunks.reserve(kNumberOfCachedChunksInMemory);
67 // Divide up the allocated raw memory buffer into total_chunks
68 // chunks. Initialize each chunk to hold nothing and add it to the free
69 // list.
70 for (SINT i = 0; i < kNumberOfCachedChunksInMemory; ++i) {
71 CachingReaderChunkForOwner* c =
72 new CachingReaderChunkForOwner(
73 mixxx::SampleBuffer::WritableSlice(
74 m_sampleBuffer,
75 CachingReaderChunk::kSamples * i,
76 CachingReaderChunk::kSamples));
77 m_chunks.push_back(c);
78 m_freeChunks.push_back(c);
79 }
80
81 // Forward signals from worker
82 connect(&m_worker, &CachingReaderWorker::trackLoading,
83 this, &CachingReader::trackLoading,
84 Qt::DirectConnection);
85 connect(&m_worker, &CachingReaderWorker::trackLoaded,
86 this, &CachingReader::trackLoaded,
87 Qt::DirectConnection);
88 connect(&m_worker, &CachingReaderWorker::trackLoadFailed,
89 this, &CachingReader::trackLoadFailed,
90 Qt::DirectConnection);
91
92 m_worker.start(QThread::HighPriority);
93 }
94
~CachingReader()95 CachingReader::~CachingReader() {
96 m_worker.quitWait();
97 qDeleteAll(m_chunks);
98 }
99
freeChunkFromList(CachingReaderChunkForOwner * pChunk)100 void CachingReader::freeChunkFromList(CachingReaderChunkForOwner* pChunk) {
101 pChunk->removeFromList(
102 &m_mruCachingReaderChunk,
103 &m_lruCachingReaderChunk);
104 pChunk->free();
105 m_freeChunks.push_back(pChunk);
106 }
107
freeChunk(CachingReaderChunkForOwner * pChunk)108 void CachingReader::freeChunk(CachingReaderChunkForOwner* pChunk) {
109 DEBUG_ASSERT(pChunk);
110 DEBUG_ASSERT(pChunk->getState() != CachingReaderChunkForOwner::READ_PENDING);
111
112 const int removed = m_allocatedCachingReaderChunks.remove(pChunk->getIndex());
113 Q_UNUSED(removed); // only used in DEBUG_ASSERT
114 // We'll tolerate not being in allocatedCachingReaderChunks,
115 // because sometime you free a chunk right after you allocated it.
116 DEBUG_ASSERT(removed <= 1);
117
118 freeChunkFromList(pChunk);
119 }
120
freeAllChunks()121 void CachingReader::freeAllChunks() {
122 for (const auto& pChunk: qAsConst(m_chunks)) {
123 // We will receive CHUNK_READ_INVALID for all pending chunk reads
124 // which should free the chunks individually.
125 if (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING) {
126 continue;
127 }
128
129 if (pChunk->getState() != CachingReaderChunkForOwner::FREE) {
130 freeChunkFromList(pChunk);
131 }
132 }
133 DEBUG_ASSERT(!m_mruCachingReaderChunk);
134 DEBUG_ASSERT(!m_lruCachingReaderChunk);
135
136 m_allocatedCachingReaderChunks.clear();
137 }
138
allocateChunk(SINT chunkIndex)139 CachingReaderChunkForOwner* CachingReader::allocateChunk(SINT chunkIndex) {
140 if (m_freeChunks.empty()) {
141 return nullptr;
142 }
143 CachingReaderChunkForOwner* pChunk = m_freeChunks.front();
144 m_freeChunks.pop_front();
145
146 pChunk->init(chunkIndex);
147
148 m_allocatedCachingReaderChunks.insert(chunkIndex, pChunk);
149
150 return pChunk;
151 }
152
allocateChunkExpireLRU(SINT chunkIndex)153 CachingReaderChunkForOwner* CachingReader::allocateChunkExpireLRU(SINT chunkIndex) {
154 auto* pChunk = allocateChunk(chunkIndex);
155 if (!pChunk) {
156 if (m_lruCachingReaderChunk) {
157 freeChunk(m_lruCachingReaderChunk);
158 pChunk = allocateChunk(chunkIndex);
159 } else {
160 kLogger.warning() << "No cached LRU chunk available for freeing";
161 }
162 }
163 if (kLogger.traceEnabled()) {
164 kLogger.trace() << "allocateChunkExpireLRU" << chunkIndex << pChunk;
165 }
166 return pChunk;
167 }
168
lookupChunk(SINT chunkIndex)169 CachingReaderChunkForOwner* CachingReader::lookupChunk(SINT chunkIndex) {
170 // Defaults to nullptr if it's not in the hash.
171 auto* pChunk = m_allocatedCachingReaderChunks.value(chunkIndex, nullptr);
172 DEBUG_ASSERT(!pChunk || pChunk->getIndex() == chunkIndex);
173 return pChunk;
174 }
175
freshenChunk(CachingReaderChunkForOwner * pChunk)176 void CachingReader::freshenChunk(CachingReaderChunkForOwner* pChunk) {
177 DEBUG_ASSERT(pChunk);
178 DEBUG_ASSERT(pChunk->getState() == CachingReaderChunkForOwner::READY);
179 if (kLogger.traceEnabled()) {
180 kLogger.trace()
181 << "freshenChunk()"
182 << pChunk->getIndex()
183 << pChunk;
184 }
185
186 // Remove the chunk from the MRU/LRU list
187 pChunk->removeFromList(
188 &m_mruCachingReaderChunk,
189 &m_lruCachingReaderChunk);
190
191 // Reinsert has new head of MRU list
192 pChunk->insertIntoListBefore(
193 &m_mruCachingReaderChunk,
194 &m_lruCachingReaderChunk,
195 m_mruCachingReaderChunk);
196 }
197
lookupChunkAndFreshen(SINT chunkIndex)198 CachingReaderChunkForOwner* CachingReader::lookupChunkAndFreshen(SINT chunkIndex) {
199 auto* pChunk = lookupChunk(chunkIndex);
200 if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
201 freshenChunk(pChunk);
202 }
203 return pChunk;
204 }
205
206 // Invoked from the UI thread!!
newTrack(TrackPointer pTrack)207 void CachingReader::newTrack(TrackPointer pTrack) {
208 auto newState = pTrack ? STATE_TRACK_LOADING : STATE_TRACK_UNLOADING;
209 auto oldState = m_state.fetchAndStoreAcquire(newState);
210
211 // TODO():
212 // BaseTrackPlayerImpl::slotLoadTrack() distributes the new track via
213 // emit loadingTrack(pNewTrack, pOldTrack);
214 // but the newTrack may change if we load a new track while the previous one
215 // is still loading. This leads to inconsistent states for example a different
216 // track in the Mixxx Title and the Deck label.
217 if (oldState == STATE_TRACK_LOADING &&
218 newState == STATE_TRACK_LOADING) {
219 kLogger.warning()
220 << "Loading a new track while loading a track may lead to inconsistent states";
221 }
222 m_worker.newTrack(std::move(pTrack));
223 }
224
process()225 void CachingReader::process() {
226 ReaderStatusUpdate update;
227 while (m_readerStatusUpdateFIFO.read(&update, 1) == 1) {
228 auto* pChunk = update.takeFromWorker();
229 if (pChunk) {
230 // Result of a read request (with a chunk)
231 DEBUG_ASSERT(atomicLoadRelaxed(m_state) != STATE_IDLE);
232 DEBUG_ASSERT(
233 update.status == CHUNK_READ_SUCCESS ||
234 update.status == CHUNK_READ_EOF ||
235 update.status == CHUNK_READ_INVALID ||
236 update.status == CHUNK_READ_DISCARDED);
237 if (atomicLoadAcquire(m_state) == STATE_TRACK_LOADING) {
238 // Discard all results from pending read requests for the
239 // previous track before the next track has been loaded.
240 freeChunk(pChunk);
241 continue;
242 }
243 DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED);
244 if (update.status == CHUNK_READ_SUCCESS) {
245 // Insert or freshen the chunk in the MRU/LRU list after
246 // obtaining ownership from the worker.
247 freshenChunk(pChunk);
248 } else {
249 // Discard chunks that don't carry any data
250 freeChunk(pChunk);
251 }
252 // Adjust the readable frame index range (if available)
253 if (update.status != CHUNK_READ_DISCARDED) {
254 m_readableFrameIndexRange = intersect(
255 m_readableFrameIndexRange,
256 update.readableFrameIndexRange());
257 }
258 } else {
259 // State update (without a chunk)
260 if (update.status == TRACK_LOADED) {
261 // We have a new Track ready to go.
262 // Assert that we either have had STATE_TRACK_LOADING before and all
263 // chunks in the m_readerStatusUpdateFIFO have been discarded.
264 // or the cache has been already cleared.
265 // In case of two consecutive load events, we receive two consecutive
266 // TRACK_LOADED without a chunk in between, assert this here.
267 DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING ||
268 (atomicLoadRelaxed(m_state) == STATE_TRACK_LOADED &&
269 !m_mruCachingReaderChunk && !m_lruCachingReaderChunk));
270 // now purge also the recently used chunk list from the old track.
271 if (m_mruCachingReaderChunk || m_lruCachingReaderChunk) {
272 DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING);
273 freeAllChunks();
274 }
275 // Reset the readable frame index range
276 m_readableFrameIndexRange = update.readableFrameIndexRange();
277 m_state.storeRelease(STATE_TRACK_LOADED);
278 } else {
279 DEBUG_ASSERT(update.status == TRACK_UNLOADED);
280 // This message could be processed later when a new
281 // track is already loading! In this case the TRACK_LOADED will
282 // be the very next status update.
283 if (!m_state.testAndSetRelease(STATE_TRACK_UNLOADING, STATE_IDLE)) {
284 DEBUG_ASSERT(atomicLoadRelaxed(m_state) == STATE_TRACK_LOADING);
285 }
286 }
287 }
288 }
289 }
290
read(SINT startSample,SINT numSamples,bool reverse,CSAMPLE * buffer)291 CachingReader::ReadResult CachingReader::read(SINT startSample, SINT numSamples, bool reverse, CSAMPLE* buffer) {
292 // Check for bad inputs
293 VERIFY_OR_DEBUG_ASSERT(
294 // Refuse to read from an invalid position
295 (startSample % CachingReaderChunk::kChannels == 0) &&
296 // Refuse to read from an invalid number of samples
297 (numSamples % CachingReaderChunk::kChannels == 0) && (numSamples >= 0)) {
298 kLogger.critical()
299 << "Invalid arguments for read():"
300 << "startSample =" << startSample
301 << "numSamples =" << numSamples
302 << "reverse =" << reverse;
303 return ReadResult::UNAVAILABLE;
304 }
305 VERIFY_OR_DEBUG_ASSERT(buffer) {
306 return ReadResult::UNAVAILABLE;
307 }
308
309 // If no track is loaded, don't do anything.
310 if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
311 return ReadResult::UNAVAILABLE;
312 }
313
314 // If asked to read 0 samples, don't do anything. (this is a perfectly
315 // reasonable request that happens sometimes.
316 if (numSamples == 0) {
317 return ReadResult::AVAILABLE; // nothing to do
318 }
319
320 // the samples are always read in forward direction
321 // If reverse = true, the frames are copied in reverse order to the
322 // destination buffer
323 SINT sample = startSample;
324 if (reverse) {
325 // Start with the last sample in buffer
326 sample -= numSamples;
327 }
328
329 SINT samplesRemaining = numSamples;
330
331 // Process new messages from the reader thread before looking up
332 // the first chunk and to update m_readableFrameIndexRange
333 process();
334
335 auto remainingFrameIndexRange =
336 mixxx::IndexRange::forward(
337 CachingReaderChunk::samples2frames(sample),
338 CachingReaderChunk::samples2frames(numSamples));
339 DEBUG_ASSERT(!remainingFrameIndexRange.empty());
340
341 auto result = ReadResult::AVAILABLE;
342 if (!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty()) {
343 // Fill the buffer up to the first readable sample with
344 // silence. This may happen when the engine is in preroll,
345 // i.e. if the frame index points a region before the first
346 // track sample.
347 if (remainingFrameIndexRange.start() < m_readableFrameIndexRange.start()) {
348 const auto prerollFrameIndexRange =
349 mixxx::IndexRange::between(
350 remainingFrameIndexRange.start(),
351 m_readableFrameIndexRange.start());
352 DEBUG_ASSERT(prerollFrameIndexRange.length() <= remainingFrameIndexRange.length());
353 if (kLogger.debugEnabled()) {
354 kLogger.debug()
355 << "Preroll: Filling the first"
356 << prerollFrameIndexRange.length()
357 << "sample frames in"
358 << remainingFrameIndexRange
359 << "with silence. Audio signal starts at"
360 << m_readableFrameIndexRange.start();
361 }
362 const SINT prerollFrames = prerollFrameIndexRange.length();
363 const SINT prerollSamples = CachingReaderChunk::frames2samples(prerollFrames);
364 DEBUG_ASSERT(samplesRemaining >= prerollSamples);
365 if (reverse) {
366 SampleUtil::clear(&buffer[samplesRemaining - prerollSamples], prerollSamples);
367 } else {
368 SampleUtil::clear(buffer, prerollSamples);
369 buffer += prerollSamples;
370 }
371 samplesRemaining -= prerollSamples;
372 remainingFrameIndexRange.shrinkFront(prerollFrames);
373 result = ReadResult::PARTIALLY_AVAILABLE;
374 }
375
376 // Read the actual samples from the audio source into the
377 // buffer. The buffer will be filled with silence for every
378 // unreadable sample or samples outside of the track region
379 // later at the end of this function.
380 if (!remainingFrameIndexRange.empty()) {
381 // The intersection between the readable samples from the track
382 // and the requested samples is not empty, so start reading.
383 DEBUG_ASSERT(!intersect(remainingFrameIndexRange, m_readableFrameIndexRange).empty());
384 DEBUG_ASSERT(remainingFrameIndexRange.start() >= m_readableFrameIndexRange.start());
385
386 const SINT firstChunkIndex =
387 CachingReaderChunk::indexForFrame(remainingFrameIndexRange.start());
388 SINT lastChunkIndex =
389 CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
390 for (SINT chunkIndex = firstChunkIndex;
391 chunkIndex <= lastChunkIndex;
392 ++chunkIndex) {
393
394 // Process new messages from the reader thread before looking up
395 // the next chunk
396 process();
397
398 // m_readableFrameIndexRange might change with every read operation!
399 // On a cache miss audio data will be read from the audio source in
400 // process() and the readable frame index range might get adjusted
401 // if decoding errors occur.
402 remainingFrameIndexRange =
403 intersect(
404 remainingFrameIndexRange,
405 m_readableFrameIndexRange);
406
407 if (remainingFrameIndexRange.empty()) {
408 // No more readable data available. Exit the loop and
409 // fill the remaining buffer with silence.
410 kLogger.warning() << "Failed to read more sample data";
411 break;
412 }
413 lastChunkIndex =
414 CachingReaderChunk::indexForFrame(remainingFrameIndexRange.end() - 1);
415 if (lastChunkIndex < chunkIndex) {
416 // No more readable data available. Exit the loop and
417 // fill the remaining buffer with silence.
418 kLogger.warning() << "Abort reading of sample data";
419 break;
420 }
421
422 mixxx::IndexRange bufferedFrameIndexRange;
423 const CachingReaderChunkForOwner* const pChunk = lookupChunkAndFreshen(chunkIndex);
424 if (pChunk && (pChunk->getState() == CachingReaderChunkForOwner::READY)) {
425 if (reverse) {
426 bufferedFrameIndexRange =
427 pChunk->readBufferedSampleFramesReverse(
428 &buffer[samplesRemaining],
429 remainingFrameIndexRange);
430 } else {
431 bufferedFrameIndexRange =
432 pChunk->readBufferedSampleFrames(
433 buffer,
434 remainingFrameIndexRange);
435 }
436 } else {
437 // This will happen regularly when jumping to a new position
438 // within the file and decoding of the audio data is still
439 // pending.
440 DEBUG_ASSERT(!pChunk ||
441 (pChunk->getState() == CachingReaderChunkForOwner::READ_PENDING));
442 Counter("CachingReader::read(): Failed to read chunk on cache miss")++;
443 if (kLogger.traceEnabled()) {
444 kLogger.trace()
445 << "Cache miss for chunk with index"
446 << chunkIndex
447 << "- abort reading";
448 }
449 // Abort reading (see below)
450 DEBUG_ASSERT(bufferedFrameIndexRange.empty());
451 }
452 if (bufferedFrameIndexRange.empty()) {
453 if (samplesRemaining == numSamples) {
454 DEBUG_ASSERT(chunkIndex == firstChunkIndex);
455 // We have not read a single frame caused by a cache miss of
456 // the first required chunk. Inform the calling code that no
457 // data has been written into the buffer and to handle this
458 // situation appropriately.
459 return ReadResult::UNAVAILABLE;
460 }
461 // No more readable data available. Exit the loop and
462 // finally fill the remaining buffer with silence.
463 break;
464 }
465 DEBUG_ASSERT(bufferedFrameIndexRange.isSubrangeOf(remainingFrameIndexRange));
466 if (remainingFrameIndexRange.start() < bufferedFrameIndexRange.start()) {
467 const auto paddingFrameIndexRange =
468 mixxx::IndexRange::between(
469 remainingFrameIndexRange.start(),
470 bufferedFrameIndexRange.start());
471 kLogger.warning()
472 << "Inserting"
473 << paddingFrameIndexRange.length()
474 << "frames of silence for unreadable audio data";
475 SINT paddingSamples = CachingReaderChunk::frames2samples(paddingFrameIndexRange.length());
476 DEBUG_ASSERT(samplesRemaining >= paddingSamples);
477 if (reverse) {
478 SampleUtil::clear(&buffer[samplesRemaining - paddingSamples], paddingSamples);
479 } else {
480 SampleUtil::clear(buffer, paddingSamples);
481 buffer += paddingSamples;
482 }
483 samplesRemaining -= paddingSamples;
484 remainingFrameIndexRange.shrinkFront(paddingFrameIndexRange.length());
485 result = ReadResult::PARTIALLY_AVAILABLE;
486 }
487 const SINT chunkSamples =
488 CachingReaderChunk::frames2samples(bufferedFrameIndexRange.length());
489 DEBUG_ASSERT(chunkSamples > 0);
490 if (!reverse) {
491 buffer += chunkSamples;
492 }
493 DEBUG_ASSERT(samplesRemaining >= chunkSamples);
494 samplesRemaining -= chunkSamples;
495 remainingFrameIndexRange.shrinkFront(bufferedFrameIndexRange.length());
496 }
497 }
498 }
499 // Finally fill the remaining buffer with silence
500 DEBUG_ASSERT(samplesRemaining >= 0);
501 if (samplesRemaining > 0) {
502 SampleUtil::clear(buffer, samplesRemaining);
503 result = ReadResult::PARTIALLY_AVAILABLE;
504 }
505 return result;
506 }
507
hintAndMaybeWake(const HintVector & hintList)508 void CachingReader::hintAndMaybeWake(const HintVector& hintList) {
509 // If no file is loaded, skip.
510 if (atomicLoadRelaxed(m_state) != STATE_TRACK_LOADED) {
511 return;
512 }
513
514 // For every chunk that the hints indicated, check if it is in the cache. If
515 // any are not, then wake.
516 bool shouldWake = false;
517
518 for (const auto& hint: hintList) {
519 SINT hintFrame = hint.frame;
520 SINT hintFrameCount = hint.frameCount;
521
522 // Handle some special length values
523 if (hintFrameCount == Hint::kFrameCountForward) {
524 hintFrameCount = kDefaultHintFrames;
525 } else if (hintFrameCount == Hint::kFrameCountBackward) {
526 hintFrame -= kDefaultHintFrames;
527 hintFrameCount = kDefaultHintFrames;
528 if (hintFrame < 0) {
529 hintFrameCount += hintFrame;
530 hintFrame = 0;
531 }
532 }
533
534 VERIFY_OR_DEBUG_ASSERT(hintFrameCount >= 0) {
535 kLogger.warning() << "CachingReader: Ignoring negative hint length.";
536 continue;
537 }
538
539 const auto readableFrameIndexRange = intersect(
540 m_readableFrameIndexRange,
541 mixxx::IndexRange::forward(hintFrame, hintFrameCount));
542 if (readableFrameIndexRange.empty()) {
543 continue;
544 }
545
546 const int firstChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.start());
547 const int lastChunkIndex = CachingReaderChunk::indexForFrame(readableFrameIndexRange.end() - 1);
548 for (int chunkIndex = firstChunkIndex; chunkIndex <= lastChunkIndex; ++chunkIndex) {
549 CachingReaderChunkForOwner* pChunk = lookupChunk(chunkIndex);
550 if (!pChunk) {
551 shouldWake = true;
552 pChunk = allocateChunkExpireLRU(chunkIndex);
553 if (!pChunk) {
554 kLogger.warning()
555 << "Failed to allocate chunk"
556 << chunkIndex
557 << "for read request";
558 continue;
559 }
560 // Do not insert the allocated chunk into the MRU/LRU list,
561 // because it will be handed over to the worker immediately
562 CachingReaderChunkReadRequest request;
563 request.giveToWorker(pChunk);
564 if (kLogger.traceEnabled()) {
565 kLogger.trace()
566 << "Requesting read of chunk"
567 << request.chunk;
568 }
569 if (m_chunkReadRequestFIFO.write(&request, 1) != 1) {
570 kLogger.warning()
571 << "Failed to submit read request for chunk"
572 << chunkIndex;
573 // Revoke the chunk from the worker and free it
574 pChunk->takeFromWorker();
575 freeChunk(pChunk);
576 }
577 } else if (pChunk->getState() == CachingReaderChunkForOwner::READY) {
578 // This will cause the chunk to be 'freshened' in the cache. The
579 // chunk will be moved to the end of the LRU list.
580 freshenChunk(pChunk);
581 }
582 }
583 }
584
585 // If there are chunks to be read, wake up.
586 if (shouldWake) {
587 m_worker.workReady();
588 }
589 }
590