1 /* ScummVM - Graphic Adventure Engine
2  *
3  * ScummVM is the legal property of its developers, whose names
4  * are too numerous to list here. Please refer to the COPYRIGHT
5  * file distributed with this source distribution.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20  *
21  */
22 
23 #include "common/debug.h"
24 #include "common/util.h"
25 #include "common/memstream.h"
26 #include "common/stream.h"
27 #include "common/textconsole.h"
28 
29 #include "audio/decoders/codec.h"
30 #include "audio/decoders/quicktime.h"
31 #include "audio/decoders/quicktime_intern.h"
32 
33 // Codecs
34 #include "audio/decoders/aac.h"
35 #include "audio/decoders/adpcm.h"
36 #include "audio/decoders/qdm2.h"
37 #include "audio/decoders/raw.h"
38 
39 namespace Audio {
40 
41 /**
42  * An AudioStream that just returns silent samples and runs infinitely.
43  * Used to fill in the "empty edits" in the track queue which are just
44  * supposed to be no sound playing.
45  */
46 class SilentAudioStream : public AudioStream {
47 public:
SilentAudioStream(int rate,bool stereo)48 	SilentAudioStream(int rate, bool stereo) : _rate(rate), _isStereo(stereo) {}
49 
readBuffer(int16 * buffer,const int numSamples)50 	int readBuffer(int16 *buffer, const int numSamples) {
51 		memset(buffer, 0, numSamples * 2);
52 		return numSamples;
53 	}
54 
endOfData() const55 	bool endOfData() const { return false; } // it never ends!
isStereo() const56 	bool isStereo() const { return _isStereo; }
getRate() const57 	int getRate() const { return _rate; }
58 
59 private:
60 	int _rate;
61 	bool _isStereo;
62 };
63 
64 /**
65  * An AudioStream wrapper that forces audio to be played in mono.
66  * It currently just ignores the right channel if stereo.
67  */
68 class ForcedMonoAudioStream : public AudioStream {
69 public:
ForcedMonoAudioStream(AudioStream * parentStream,DisposeAfterUse::Flag disposeAfterUse=DisposeAfterUse::YES)70 	ForcedMonoAudioStream(AudioStream *parentStream, DisposeAfterUse::Flag disposeAfterUse = DisposeAfterUse::YES) :
71 			_parentStream(parentStream), _disposeAfterUse(disposeAfterUse) {}
72 
~ForcedMonoAudioStream()73 	~ForcedMonoAudioStream() {
74 		if (_disposeAfterUse == DisposeAfterUse::YES)
75 				delete _parentStream;
76 	}
77 
readBuffer(int16 * buffer,const int numSamples)78 	int readBuffer(int16 *buffer, const int numSamples) {
79 		if (!_parentStream->isStereo())
80 			return _parentStream->readBuffer(buffer, numSamples);
81 
82 		int16 temp[2];
83 		int samples = 0;
84 
85 		while (samples < numSamples && !endOfData()) {
86 			_parentStream->readBuffer(temp, 2);
87 			*buffer++ = temp[0];
88 			samples++;
89 		}
90 
91 		return samples;
92 	}
93 
endOfData() const94 	bool endOfData() const { return _parentStream->endOfData(); }
isStereo() const95 	bool isStereo() const { return false; }
getRate() const96 	int getRate() const { return _parentStream->getRate(); }
97 
98 private:
99 	AudioStream *_parentStream;
100 	DisposeAfterUse::Flag _disposeAfterUse;
101 };
102 
QuickTimeAudioDecoder()103 QuickTimeAudioDecoder::QuickTimeAudioDecoder() : Common::QuickTimeParser() {
104 }
105 
~QuickTimeAudioDecoder()106 QuickTimeAudioDecoder::~QuickTimeAudioDecoder() {
107 	for (uint32 i = 0; i < _audioTracks.size(); i++)
108 		delete _audioTracks[i];
109 }
110 
loadAudioFile(const Common::String & filename)111 bool QuickTimeAudioDecoder::loadAudioFile(const Common::String &filename) {
112 	if (!Common::QuickTimeParser::parseFile(filename))
113 		return false;
114 
115 	init();
116 	return true;
117 }
118 
loadAudioStream(Common::SeekableReadStream * stream,DisposeAfterUse::Flag disposeFileHandle)119 bool QuickTimeAudioDecoder::loadAudioStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle) {
120 	if (!Common::QuickTimeParser::parseStream(stream, disposeFileHandle))
121 		return false;
122 
123 	init();
124 	return true;
125 }
126 
init()127 void QuickTimeAudioDecoder::init() {
128 	Common::QuickTimeParser::init();
129 
130 	// Initialize all the audio streams
131 	// But ignore any streams we don't support
132 	for (uint32 i = 0; i < _tracks.size(); i++)
133 		if (_tracks[i]->codecType == CODEC_TYPE_AUDIO && ((AudioSampleDesc *)_tracks[i]->sampleDescs[0])->isAudioCodecSupported())
134 			_audioTracks.push_back(new QuickTimeAudioTrack(this, _tracks[i]));
135 }
136 
readSampleDesc(Track * track,uint32 format,uint32 descSize)137 Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(Track *track, uint32 format, uint32 descSize) {
138 	if (track->codecType == CODEC_TYPE_AUDIO) {
139 		debug(0, "Audio Codec FourCC: \'%s\'", tag2str(format));
140 
141 		AudioSampleDesc *entry = new AudioSampleDesc(track, format);
142 
143 		uint16 stsdVersion = _fd->readUint16BE();
144 		_fd->readUint16BE(); // revision level
145 		_fd->readUint32BE(); // vendor
146 
147 		entry->_channels = _fd->readUint16BE();			 // channel count
148 		entry->_bitsPerSample = _fd->readUint16BE();	  // sample size
149 
150 		_fd->readUint16BE(); // compression id = 0
151 		_fd->readUint16BE(); // packet size = 0
152 
153 		entry->_sampleRate = (_fd->readUint32BE() >> 16);
154 
155 		debug(0, "stsd version =%d", stsdVersion);
156 		if (stsdVersion == 0) {
157 			// Not used, except in special cases. See below.
158 			entry->_samplesPerFrame = entry->_bytesPerFrame = 0;
159 		} else if (stsdVersion == 1) {
160 			// Read QT version 1 fields. In version 0 these dont exist.
161 			entry->_samplesPerFrame = _fd->readUint32BE();
162 			debug(0, "stsd samples_per_frame =%d",entry->_samplesPerFrame);
163 			_fd->readUint32BE(); // bytes per packet
164 			entry->_bytesPerFrame = _fd->readUint32BE();
165 			debug(0, "stsd bytes_per_frame =%d", entry->_bytesPerFrame);
166 			_fd->readUint32BE(); // bytes per sample
167 		} else {
168 			warning("Unsupported QuickTime STSD audio version %d", stsdVersion);
169 			delete entry;
170 			return 0;
171 		}
172 
173 		// Version 0 files don't have some variables set, so we'll do that here
174 		if (format == MKTAG('i', 'm', 'a', '4')) {
175 			entry->_samplesPerFrame = 64;
176 			entry->_bytesPerFrame = 34 * entry->_channels;
177 		}
178 
179 		if (entry->_sampleRate == 0 && track->timeScale > 1)
180 			entry->_sampleRate = track->timeScale;
181 
182 		return entry;
183 	}
184 
185 	return 0;
186 }
187 
QuickTimeAudioTrack(QuickTimeAudioDecoder * decoder,Common::QuickTimeParser::Track * parentTrack)188 QuickTimeAudioDecoder::QuickTimeAudioTrack::QuickTimeAudioTrack(QuickTimeAudioDecoder *decoder, Common::QuickTimeParser::Track *parentTrack) {
189 	_decoder = decoder;
190 	_parentTrack = parentTrack;
191 	_queue = createStream();
192 	_samplesQueued = 0;
193 
194 	AudioSampleDesc *entry = (AudioSampleDesc *)_parentTrack->sampleDescs[0];
195 
196 	if (entry->getCodecTag() == MKTAG('r', 'a', 'w', ' ') || entry->getCodecTag() == MKTAG('t', 'w', 'o', 's'))
197 		_parentTrack->sampleSize = (entry->_bitsPerSample / 8) * entry->_channels;
198 
199 	// Initialize our edit parser too
200 	_curEdit = 0;
201 	enterNewEdit(Timestamp());
202 
203 	// If the edit doesn't start on a nice boundary, set us up to skip some samples
204 	Timestamp editStartTime(0, _parentTrack->editList[_curEdit].mediaTime, _parentTrack->timeScale);
205 	Timestamp trackPosition = getCurrentTrackTime();
206 	if (_parentTrack->editList[_curEdit].mediaTime != -1 && trackPosition != editStartTime)
207 		_skipSamples = editStartTime.convertToFramerate(getRate()) - trackPosition;
208 }
209 
~QuickTimeAudioTrack()210 QuickTimeAudioDecoder::QuickTimeAudioTrack::~QuickTimeAudioTrack() {
211 	delete _queue;
212 }
213 
queueAudio(const Timestamp & length)214 void QuickTimeAudioDecoder::QuickTimeAudioTrack::queueAudio(const Timestamp &length) {
215 	if (allDataRead() || (length.totalNumberOfFrames() != 0 && Timestamp(0, _samplesQueued, getRate()) >= length))
216 		return;
217 
218 	do {
219 		Timestamp nextEditTime(0, _parentTrack->editList[_curEdit].timeOffset + _parentTrack->editList[_curEdit].trackDuration, _decoder->_timeScale);
220 
221 		if (_parentTrack->editList[_curEdit].mediaTime == -1) {
222 			// We've got an empty edit, so fill it with silence
223 			Timestamp editLength(0, _parentTrack->editList[_curEdit].trackDuration, _decoder->_timeScale);
224 
225 			// If we seek into the middle of an empty edit, we need to adjust
226 			if (_skipSamples != Timestamp()) {
227 				editLength = editLength - _skipSamples;
228 				_skipSamples = Timestamp();
229 			}
230 
231 			queueStream(makeLimitingAudioStream(new SilentAudioStream(getRate(), isStereo()), editLength), editLength);
232 			_curEdit++;
233 			enterNewEdit(nextEditTime);
234 		} else {
235 			// Normal audio
236 			AudioStream *stream = readAudioChunk(_curChunk);
237 			Timestamp chunkLength = getChunkLength(_curChunk, _skipAACPrimer);
238 			_skipAACPrimer = false;
239 			_curChunk++;
240 
241 			// If we have any samples that we need to skip (ie. we seeked into
242 			// the middle of a chunk), skip them here.
243 			if (_skipSamples != Timestamp()) {
244 				if (_skipSamples > chunkLength) {
245 					// If the amount we need to skip is greater than the size
246 					// of the chunk, just skip it altogether.
247 					_curMediaPos = _curMediaPos + chunkLength;
248 					_skipSamples = _skipSamples - chunkLength;
249 					delete stream;
250 					continue;
251 				}
252 
253 				skipSamples(_skipSamples, stream);
254 				_curMediaPos = _curMediaPos + _skipSamples;
255 				chunkLength = chunkLength - _skipSamples;
256 				_skipSamples = Timestamp();
257 			}
258 
259 			// Calculate our overall position within the media
260 			Timestamp trackPosition = getCurrentTrackTime() + chunkLength;
261 
262 			// If we have reached the end of this edit (or have no more media to read),
263 			// we move on to the next edit
264 			if (trackPosition >= nextEditTime || _curChunk >= _parentTrack->chunkCount) {
265 				chunkLength = nextEditTime.convertToFramerate(getRate()) - getCurrentTrackTime();
266 				stream = makeLimitingAudioStream(stream, chunkLength);
267 				_curEdit++;
268 				enterNewEdit(nextEditTime);
269 
270 				// Next time around, we'll know how much to skip
271 				trackPosition = getCurrentTrackTime();
272 				if (!allDataRead() && _parentTrack->editList[_curEdit].mediaTime != -1 && nextEditTime != trackPosition)
273 					_skipSamples = nextEditTime.convertToFramerate(getRate()) - trackPosition;
274 			} else {
275 				_curMediaPos = _curMediaPos + chunkLength.convertToFramerate(_curMediaPos.framerate());
276 			}
277 
278 			queueStream(stream, chunkLength);
279 		}
280 	} while (!allDataRead() && Timestamp(0, _samplesQueued, getRate()) < length);
281 }
282 
getCurrentTrackTime() const283 Timestamp QuickTimeAudioDecoder::QuickTimeAudioTrack::getCurrentTrackTime() const {
284 	if (allDataRead())
285 		return getLength().convertToFramerate(getRate());
286 
287 	return Timestamp(0, _parentTrack->editList[_curEdit].timeOffset, _decoder->_timeScale).convertToFramerate(getRate())
288 			+ _curMediaPos - Timestamp(0, _parentTrack->editList[_curEdit].mediaTime, _parentTrack->timeScale).convertToFramerate(getRate());
289 }
290 
queueRemainingAudio()291 void QuickTimeAudioDecoder::QuickTimeAudioTrack::queueRemainingAudio() {
292 	queueAudio(getLength());
293 }
294 
readBuffer(int16 * buffer,const int numSamples)295 int QuickTimeAudioDecoder::QuickTimeAudioTrack::readBuffer(int16 *buffer, const int numSamples) {
296 	int samplesRead = _queue->readBuffer(buffer, numSamples);
297 	_samplesQueued -= samplesRead / (isStereo() ? 2 : 1);
298 	return samplesRead;
299 }
300 
allDataRead() const301 bool QuickTimeAudioDecoder::QuickTimeAudioTrack::allDataRead() const {
302 	return _curEdit == _parentTrack->editCount;
303 }
304 
endOfData() const305 bool QuickTimeAudioDecoder::QuickTimeAudioTrack::endOfData() const {
306 	return allDataRead() && _queue->endOfData();
307 }
308 
seek(const Timestamp & where)309 bool QuickTimeAudioDecoder::QuickTimeAudioTrack::seek(const Timestamp &where) {
310 	// Recreate the queue
311 	delete _queue;
312 	_queue = createStream();
313 	_samplesQueued = 0;
314 
315 	if (where >= getLength()) {
316 		// We're done
317 		_curEdit = _parentTrack->editCount;
318 		return true;
319 	}
320 
321 	// Find where we are in the stream
322 	findEdit(where);
323 
324 	// Now queue up some audio and skip whatever we need to skip
325 	Timestamp samplesToSkip = where.convertToFramerate(getRate()) - getCurrentTrackTime();
326 	queueAudio();
327 	if (_parentTrack->editList[_curEdit].mediaTime != -1)
328 		skipSamples(samplesToSkip, _queue);
329 
330 	return true;
331 }
332 
getLength() const333 Timestamp QuickTimeAudioDecoder::QuickTimeAudioTrack::getLength() const {
334 	return Timestamp(0, _parentTrack->duration, _decoder->_timeScale);
335 }
336 
createStream() const337 QueuingAudioStream *QuickTimeAudioDecoder::QuickTimeAudioTrack::createStream() const {
338 	AudioSampleDesc *entry = (AudioSampleDesc *)_parentTrack->sampleDescs[0];
339 	return makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
340 }
341 
isOldDemuxing() const342 bool QuickTimeAudioDecoder::QuickTimeAudioTrack::isOldDemuxing() const {
343 	return _parentTrack->timeToSampleCount == 1 && _parentTrack->timeToSample[0].duration == 1;
344 }
345 
readAudioChunk(uint chunk)346 AudioStream *QuickTimeAudioDecoder::QuickTimeAudioTrack::readAudioChunk(uint chunk) {
347 	AudioSampleDesc *entry = (AudioSampleDesc *)_parentTrack->sampleDescs[0];
348 	Common::MemoryWriteStreamDynamic *wStream = new Common::MemoryWriteStreamDynamic();
349 
350 	_decoder->_fd->seek(_parentTrack->chunkOffsets[chunk]);
351 
352 	// First, we have to get the sample count
353 	uint32 sampleCount = getAudioChunkSampleCount(chunk);
354 	assert(sampleCount != 0);
355 
356 	if (isOldDemuxing()) {
357 		// Old-style audio demuxing
358 
359 		// Then calculate the right sizes
360 		while (sampleCount > 0) {
361 			uint32 samples = 0, size = 0;
362 
363 			if (entry->_samplesPerFrame >= 160) {
364 				samples = entry->_samplesPerFrame;
365 				size = entry->_bytesPerFrame;
366 			} else if (entry->_samplesPerFrame > 1) {
367 				samples = MIN<uint32>((1024 / entry->_samplesPerFrame) * entry->_samplesPerFrame, sampleCount);
368 				size = (samples / entry->_samplesPerFrame) * entry->_bytesPerFrame;
369 			} else {
370 				samples = MIN<uint32>(1024, sampleCount);
371 				size = samples * _parentTrack->sampleSize;
372 			}
373 
374 			// Now, we read in the data for this data and output it
375 			byte *data = (byte *)malloc(size);
376 			_decoder->_fd->read(data, size);
377 			wStream->write(data, size);
378 			free(data);
379 			sampleCount -= samples;
380 		}
381 	} else {
382 		// New-style audio demuxing
383 
384 		// Find our starting sample
385 		uint32 startSample = 0;
386 		for (uint32 i = 0; i < chunk; i++)
387 			startSample += getAudioChunkSampleCount(i);
388 
389 		for (uint32 i = 0; i < sampleCount; i++) {
390 			uint32 size = (_parentTrack->sampleSize != 0) ? _parentTrack->sampleSize : _parentTrack->sampleSizes[i + startSample];
391 
392 			// Now, we read in the data for this data and output it
393 			byte *data = (byte *)malloc(size);
394 			_decoder->_fd->read(data, size);
395 			wStream->write(data, size);
396 			free(data);
397 		}
398 	}
399 
400 	AudioStream *audioStream = entry->createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES));
401 	delete wStream;
402 
403 	return audioStream;
404 }
405 
skipSamples(const Timestamp & length,AudioStream * stream)406 void QuickTimeAudioDecoder::QuickTimeAudioTrack::skipSamples(const Timestamp &length, AudioStream *stream) {
407 	int32 sampleCount = length.convertToFramerate(getRate()).totalNumberOfFrames();
408 
409 	if (sampleCount <= 0)
410 		return;
411 
412 	if (isStereo())
413 		sampleCount *= 2;
414 
415 	int16 *tempBuffer = new int16[sampleCount];
416 	uint32 result = stream->readBuffer(tempBuffer, sampleCount);
417 	delete[] tempBuffer;
418 
419 	// If this is the queue, make sure we subtract this number from the
420 	// amount queued
421 	if (stream == _queue)
422 		_samplesQueued -= result / (isStereo() ? 2 : 1);
423 }
424 
findEdit(const Timestamp & position)425 void QuickTimeAudioDecoder::QuickTimeAudioTrack::findEdit(const Timestamp &position) {
426 	// Go through the edits look for where we find out we need to be. As long
427 	// as the position is >= to the edit's start time, it is considered to be in that
428 	// edit. seek() already figured out if we reached the last edit, so we don't need
429 	// to handle that case here.
430 	for (_curEdit = 0; _curEdit < _parentTrack->editCount - 1; _curEdit++) {
431 		Timestamp nextEditTime(0, _parentTrack->editList[_curEdit + 1].timeOffset, _decoder->_timeScale);
432 		if (position < nextEditTime)
433 			break;
434 	}
435 
436 	enterNewEdit(position);
437 }
438 
enterNewEdit(const Timestamp & position)439 void QuickTimeAudioDecoder::QuickTimeAudioTrack::enterNewEdit(const Timestamp &position) {
440 	_skipSamples = Timestamp(); // make sure our skip variable doesn't remain around
441 
442 	// If we're at the end of the edit list, there's nothing else for us to do here
443 	if (allDataRead())
444 		return;
445 
446 	// For an empty edit, we may need to adjust the start time
447 	if (_parentTrack->editList[_curEdit].mediaTime == -1) {
448 		// Just invalidate the current media position (and make sure the scale
449 		// is in terms of our rate so it simplifies things later)
450 		_curMediaPos = Timestamp(0, 0, getRate());
451 
452 		// Also handle shortening of the empty edit if needed
453 		if (position != Timestamp())
454 			_skipSamples = position.convertToFramerate(_decoder->_timeScale) - Timestamp(0, _parentTrack->editList[_curEdit].timeOffset, _decoder->_timeScale);
455 		return;
456 	}
457 
458 	// I really hope I never need to implement this :P
459 	// But, I'll throw in this error just to make sure I catch anything with this...
460 	if (_parentTrack->editList[_curEdit].mediaRate != 1)
461 		error("Unhandled QuickTime audio rate change");
462 
463 	// Reinitialize the codec
464 	((AudioSampleDesc *)_parentTrack->sampleDescs[0])->initCodec();
465 	_skipAACPrimer = true;
466 
467 	// First, we need to track down what audio sample we need
468 	// Convert our variables from the media time (position) and the edit time (based on position)
469 	// and the media time
470 	Timestamp curAudioTime = Timestamp(0, _parentTrack->editList[_curEdit].mediaTime, _parentTrack->timeScale)
471 		+ position.convertToFramerate(_parentTrack->timeScale)
472 		- Timestamp(0, _parentTrack->editList[_curEdit].timeOffset, _decoder->_timeScale).convertToFramerate(_parentTrack->timeScale);
473 
474 	uint32 sample = curAudioTime.totalNumberOfFrames();
475 	uint32 seekSample = sample;
476 
477 	if (!isOldDemuxing()) {
478 		// For MPEG-4 style demuxing, we need to track down the sample based on the time
479 		// The old style demuxing doesn't require this because each "sample"'s duration
480 		// is just 1
481 		uint32 curSample = 0;
482 		seekSample = 0;
483 
484 		for (int32 i = 0; i < _parentTrack->timeToSampleCount; i++) {
485 			uint32 sampleCount = _parentTrack->timeToSample[i].count * _parentTrack->timeToSample[i].duration;
486 
487 			if (sample < curSample + sampleCount) {
488 				seekSample += (sample - curSample) / _parentTrack->timeToSample[i].duration;
489 				break;
490 			}
491 
492 			seekSample += _parentTrack->timeToSample[i].count;
493 			curSample += sampleCount;
494 		}
495 	}
496 
497 	// Now to track down what chunk it's in
498 	uint32 totalSamples = 0;
499 	_curChunk = 0;
500 	for (uint32 i = 0; i < _parentTrack->chunkCount; i++, _curChunk++) {
501 		uint32 chunkSampleCount = getAudioChunkSampleCount(i);
502 
503 		if (seekSample < totalSamples + chunkSampleCount)
504 			break;
505 
506 		totalSamples += chunkSampleCount;
507 	}
508 
509 	// Now we get to have fun and convert *back* to an actual time
510 	// We don't want the sample count to be modified at this point, though
511 	if (!isOldDemuxing())
512 		totalSamples = getAACSampleTime(totalSamples);
513 
514 	_curMediaPos = Timestamp(0, totalSamples, getRate());
515 }
516 
queueStream(AudioStream * stream,const Timestamp & length)517 void QuickTimeAudioDecoder::QuickTimeAudioTrack::queueStream(AudioStream *stream, const Timestamp &length) {
518 	// If the samples are stereo and the container is mono, force the samples
519 	// to be mono.
520 	if (stream->isStereo() && !isStereo())
521 		_queue->queueAudioStream(new ForcedMonoAudioStream(stream, DisposeAfterUse::YES), DisposeAfterUse::YES);
522 	else
523 		_queue->queueAudioStream(stream, DisposeAfterUse::YES);
524 
525 	_samplesQueued += length.convertToFramerate(getRate()).totalNumberOfFrames();
526 }
527 
getAudioChunkSampleCount(uint chunk) const528 uint32 QuickTimeAudioDecoder::QuickTimeAudioTrack::getAudioChunkSampleCount(uint chunk) const {
529 	uint32 sampleCount = 0;
530 
531 	for (uint32 i = 0; i < _parentTrack->sampleToChunkCount; i++)
532 		if (chunk >= _parentTrack->sampleToChunk[i].first)
533 			sampleCount = _parentTrack->sampleToChunk[i].count;
534 
535 	return sampleCount;
536 }
537 
getChunkLength(uint chunk,bool skipAACPrimer) const538 Timestamp QuickTimeAudioDecoder::QuickTimeAudioTrack::getChunkLength(uint chunk, bool skipAACPrimer) const {
539 	uint32 chunkSampleCount = getAudioChunkSampleCount(chunk);
540 
541 	if (isOldDemuxing())
542 		return Timestamp(0, chunkSampleCount, getRate());
543 
544 	// AAC needs some extra handling, of course
545 	return Timestamp(0, getAACSampleTime(chunkSampleCount, skipAACPrimer), getRate());
546 }
547 
getAACSampleTime(uint32 totalSampleCount,bool skipAACPrimer) const548 uint32 QuickTimeAudioDecoder::QuickTimeAudioTrack::getAACSampleTime(uint32 totalSampleCount, bool skipAACPrimer) const{
549 	uint32 curSample = 0;
550 	uint32 time = 0;
551 
552 	for (int32 i = 0; i < _parentTrack->timeToSampleCount; i++) {
553 		uint32 sampleCount = _parentTrack->timeToSample[i].count;
554 
555 		if (totalSampleCount < curSample + sampleCount) {
556 			time += (totalSampleCount - curSample) * _parentTrack->timeToSample[i].duration;
557 			break;
558 		}
559 
560 		time += _parentTrack->timeToSample[i].count * _parentTrack->timeToSample[i].duration;
561 		curSample += sampleCount;
562 	}
563 
564 	// The first chunk of AAC contains "duration" samples that are used as a primer
565 	// We need to subtract that number from the duration for the first chunk. See:
566 	// http://developer.apple.com/library/mac/#documentation/QuickTime/QTFF/QTFFAppenG/QTFFAppenG.html#//apple_ref/doc/uid/TP40000939-CH2-SW1
567 	// The skipping of both the primer and the remainder are handled by the AAC code,
568 	// whereas the timing of the remainder are handled by this time-to-sample chunk
569 	// code already.
570 	// We have to do this after each time we reinitialize the codec
571 	if (skipAACPrimer) {
572 		assert(_parentTrack->timeToSampleCount > 0);
573 		time -= _parentTrack->timeToSample[0].duration;
574 	}
575 
576 	return time;
577 }
578 
AudioSampleDesc(Common::QuickTimeParser::Track * parentTrack,uint32 codecTag)579 QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
580 	_channels = 0;
581 	_sampleRate = 0;
582 	_samplesPerFrame = 0;
583 	_bytesPerFrame = 0;
584 	_bitsPerSample = 0;
585 	_codec = 0;
586 }
587 
~AudioSampleDesc()588 QuickTimeAudioDecoder::AudioSampleDesc::~AudioSampleDesc() {
589 	delete _codec;
590 }
591 
isAudioCodecSupported() const592 bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
593 	// Check if the codec is a supported codec
594 	if (_codecTag == MKTAG('t', 'w', 'o', 's') || _codecTag == MKTAG('r', 'a', 'w', ' ') || _codecTag == MKTAG('i', 'm', 'a', '4'))
595 		return true;
596 
597 #ifdef AUDIO_QDM2_H
598 	if (_codecTag == MKTAG('Q', 'D', 'M', '2'))
599 		return true;
600 #endif
601 
602 	if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
603 		Common::String audioType;
604 		switch (_objectTypeMP4) {
605 		case 0x40: // AAC
606 #ifdef USE_FAAD
607 			return true;
608 #else
609 			audioType = "AAC";
610 			break;
611 #endif
612 		default:
613 			audioType = "Unknown";
614 			break;
615 		}
616 		warning("No MPEG-4 audio (%s) support", audioType.c_str());
617 	} else {
618 		warning("Audio Codec Not Supported: \'%s\'", tag2str(_codecTag));
619 	}
620 
621 	return false;
622 }
623 
createAudioStream(Common::SeekableReadStream * stream) const624 AudioStream *QuickTimeAudioDecoder::AudioSampleDesc::createAudioStream(Common::SeekableReadStream *stream) const {
625 	if (!stream)
626 		return 0;
627 
628 	if (_codec) {
629 		// If we've loaded a codec, make sure we use first
630 		AudioStream *audioStream = _codec->decodeFrame(*stream);
631 		delete stream;
632 		return audioStream;
633 	} else if (_codecTag == MKTAG('t', 'w', 'o', 's') || _codecTag == MKTAG('r', 'a', 'w', ' ')) {
634 		// Fortunately, most of the audio used in Myst videos is raw...
635 		uint16 flags = 0;
636 		if (_codecTag == MKTAG('r', 'a', 'w', ' '))
637 			flags |= FLAG_UNSIGNED;
638 		if (_channels == 2)
639 			flags |= FLAG_STEREO;
640 		if (_bitsPerSample == 16)
641 			flags |= FLAG_16BITS;
642 		uint32 dataSize = stream->size();
643 		byte *data = (byte *)malloc(dataSize);
644 		stream->read(data, dataSize);
645 		delete stream;
646 		return makeRawStream(data, dataSize, _sampleRate, flags);
647 	} else if (_codecTag == MKTAG('i', 'm', 'a', '4')) {
648 		// Riven uses this codec (as do some Myst ME videos)
649 		return makeADPCMStream(stream, DisposeAfterUse::YES, stream->size(), kADPCMApple, _sampleRate, _channels, 34);
650 	}
651 
652 	error("Unsupported audio codec");
653 	return NULL;
654 }
655 
initCodec()656 void QuickTimeAudioDecoder::AudioSampleDesc::initCodec() {
657 	delete _codec; _codec = 0;
658 
659 	switch (_codecTag) {
660 	case MKTAG('Q', 'D', 'M', '2'):
661 #ifdef AUDIO_QDM2_H
662 		_codec = makeQDM2Decoder(_extraData);
663 #endif
664 		break;
665 	case MKTAG('m', 'p', '4', 'a'):
666 #ifdef USE_FAAD
667 		if (_objectTypeMP4 == 0x40)
668 			_codec = makeAACDecoder(_extraData);
669 #endif
670 		break;
671 	default:
672 		break;
673 	}
674 }
675 
676 /**
677  * A wrapper around QuickTimeAudioDecoder that implements the SeekableAudioStream API
678  */
679 class QuickTimeAudioStream : public SeekableAudioStream, public QuickTimeAudioDecoder {
680 public:
QuickTimeAudioStream()681 	QuickTimeAudioStream() {}
~QuickTimeAudioStream()682 	~QuickTimeAudioStream() {}
683 
openFromFile(const Common::String & filename)684 	bool openFromFile(const Common::String &filename) {
685 		return QuickTimeAudioDecoder::loadAudioFile(filename) && !_audioTracks.empty();
686 	}
687 
openFromStream(Common::SeekableReadStream * stream,DisposeAfterUse::Flag disposeFileHandle)688 	bool openFromStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle) {
689 		return QuickTimeAudioDecoder::loadAudioStream(stream, disposeFileHandle) && !_audioTracks.empty();
690 	}
691 
692 	// AudioStream API
readBuffer(int16 * buffer,const int numSamples)693 	int readBuffer(int16 *buffer, const int numSamples) {
694 		int samples = 0;
695 
696 		while (samples < numSamples && !endOfData()) {
697 			if (!_audioTracks[0]->hasDataInQueue())
698 				_audioTracks[0]->queueAudio();
699 			samples += _audioTracks[0]->readBuffer(buffer + samples, numSamples - samples);
700 		}
701 
702 		return samples;
703 	}
704 
isStereo() const705 	bool isStereo() const { return _audioTracks[0]->isStereo(); }
getRate() const706 	int getRate() const { return _audioTracks[0]->getRate(); }
endOfData() const707 	bool endOfData() const { return _audioTracks[0]->endOfData(); }
708 
709 	// SeekableAudioStream API
seek(const Timestamp & where)710 	bool seek(const Timestamp &where) { return _audioTracks[0]->seek(where); }
getLength() const711 	Timestamp getLength() const { return _audioTracks[0]->getLength(); }
712 };
713 
makeQuickTimeStream(const Common::String & filename)714 SeekableAudioStream *makeQuickTimeStream(const Common::String &filename) {
715 	QuickTimeAudioStream *audioStream = new QuickTimeAudioStream();
716 
717 	if (!audioStream->openFromFile(filename)) {
718 		delete audioStream;
719 		return 0;
720 	}
721 
722 	return audioStream;
723 }
724 
makeQuickTimeStream(Common::SeekableReadStream * stream,DisposeAfterUse::Flag disposeAfterUse)725 SeekableAudioStream *makeQuickTimeStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeAfterUse) {
726 	QuickTimeAudioStream *audioStream = new QuickTimeAudioStream();
727 
728 	if (!audioStream->openFromStream(stream, disposeAfterUse)) {
729 		delete audioStream;
730 		return 0;
731 	}
732 
733 	return audioStream;
734 }
735 
736 } // End of namespace Audio
737