1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
12
13 #include <assert.h>
14 #include <stdlib.h>
15 #include <vector>
16
17 #include "webrtc/base/checks.h"
18 #include "webrtc/base/safe_conversions.h"
19 #include "webrtc/engine_configurations.h"
20 #include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
21 #include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
22 #include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
23 #include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
24 #include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
25 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
26 #include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
27 #include "webrtc/system_wrappers/interface/trace.h"
28 #include "webrtc/typedefs.h"
29
30 namespace webrtc {
31
32 namespace acm2 {
33
34 enum {
35 kACMToneEnd = 999
36 };
37
38 // Maximum number of bytes in one packet (PCM16B, 20 ms packets, stereo).
39 enum {
40 kMaxPacketSize = 2560
41 };
42
43 // Maximum number of payloads that can be packed in one RED packet. For
44 // regular RED, we only pack two payloads. In case of dual-streaming, in worst
45 // case we might pack 3 payloads in one RED packet.
46 enum {
47 kNumRedFragmentationVectors = 2,
48 kMaxNumFragmentationVectors = 3
49 };
50
51 // If packet N is arrived all packets prior to N - |kNackThresholdPackets| which
52 // are not received are considered as lost, and appear in NACK list.
53 enum {
54 kNackThresholdPackets = 2
55 };
56
57 namespace {
58
59 // TODO(turajs): the same functionality is used in NetEq. If both classes
60 // need them, make it a static function in ACMCodecDB.
IsCodecRED(const CodecInst * codec)61 bool IsCodecRED(const CodecInst* codec) {
62 return (STR_CASE_CMP(codec->plname, "RED") == 0);
63 }
64
IsCodecRED(int index)65 bool IsCodecRED(int index) {
66 return (IsCodecRED(&ACMCodecDB::database_[index]));
67 }
68
IsCodecCN(const CodecInst * codec)69 bool IsCodecCN(const CodecInst* codec) {
70 return (STR_CASE_CMP(codec->plname, "CN") == 0);
71 }
72
IsCodecCN(int index)73 bool IsCodecCN(int index) {
74 return (IsCodecCN(&ACMCodecDB::database_[index]));
75 }
76
77 // Stereo-to-mono can be used as in-place.
DownMix(const AudioFrame & frame,int length_out_buff,int16_t * out_buff)78 int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
79 if (length_out_buff < frame.samples_per_channel_) {
80 return -1;
81 }
82 for (int n = 0; n < frame.samples_per_channel_; ++n)
83 out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
84 return 0;
85 }
86
87 // Mono-to-stereo can be used as in-place.
UpMix(const AudioFrame & frame,int length_out_buff,int16_t * out_buff)88 int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
89 if (length_out_buff < frame.samples_per_channel_) {
90 return -1;
91 }
92 for (int n = frame.samples_per_channel_ - 1; n >= 0; --n) {
93 out_buff[2 * n + 1] = frame.data_[n];
94 out_buff[2 * n] = frame.data_[n];
95 }
96 return 0;
97 }
98
ConvertEncodedInfoToFragmentationHeader(const AudioEncoder::EncodedInfo & info,RTPFragmentationHeader * frag)99 void ConvertEncodedInfoToFragmentationHeader(
100 const AudioEncoder::EncodedInfo& info,
101 RTPFragmentationHeader* frag) {
102 if (info.redundant.empty()) {
103 frag->fragmentationVectorSize = 0;
104 return;
105 }
106
107 frag->VerifyAndAllocateFragmentationHeader(
108 static_cast<uint16_t>(info.redundant.size()));
109 frag->fragmentationVectorSize = static_cast<uint16_t>(info.redundant.size());
110 size_t offset = 0;
111 for (size_t i = 0; i < info.redundant.size(); ++i) {
112 frag->fragmentationOffset[i] = offset;
113 offset += info.redundant[i].encoded_bytes;
114 frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
115 frag->fragmentationTimeDiff[i] = rtc::checked_cast<uint16_t>(
116 info.encoded_timestamp - info.redundant[i].encoded_timestamp);
117 frag->fragmentationPlType[i] = info.redundant[i].payload_type;
118 }
119 }
120 } // namespace
121
AudioCodingModuleImpl(const AudioCodingModule::Config & config)122 AudioCodingModuleImpl::AudioCodingModuleImpl(
123 const AudioCodingModule::Config& config)
124 : acm_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
125 id_(config.id),
126 expected_codec_ts_(0xD87F3F9F),
127 expected_in_ts_(0xD87F3F9F),
128 receiver_(config),
129 codec_manager_(this),
130 previous_pltype_(255),
131 aux_rtp_header_(NULL),
132 receiver_initialized_(false),
133 first_10ms_data_(false),
134 first_frame_(true),
135 callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
136 packetization_callback_(NULL),
137 vad_callback_(NULL) {
138 if (InitializeReceiverSafe() < 0) {
139 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
140 "Cannot initialize receiver");
141 }
142 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created");
143 }
144
~AudioCodingModuleImpl()145 AudioCodingModuleImpl::~AudioCodingModuleImpl() {
146 if (aux_rtp_header_ != NULL) {
147 delete aux_rtp_header_;
148 aux_rtp_header_ = NULL;
149 }
150
151 delete callback_crit_sect_;
152 callback_crit_sect_ = NULL;
153
154 delete acm_crit_sect_;
155 acm_crit_sect_ = NULL;
156 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_,
157 "Destroyed");
158 }
159
Encode(const InputData & input_data)160 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
161 uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE]; // Make room for 1 RED payload.
162 AudioEncoder::EncodedInfo encoded_info;
163 uint8_t previous_pltype;
164
165 // Keep the scope of the ACM critical section limited.
166 {
167 CriticalSectionScoped lock(acm_crit_sect_);
168 // Check if there is an encoder before.
169 if (!HaveValidEncoder("Process")) {
170 return -1;
171 }
172
173 AudioEncoder* audio_encoder =
174 codec_manager_.current_encoder()->GetAudioEncoder();
175 // Scale the timestamp to the codec's RTP timestamp rate.
176 uint32_t rtp_timestamp =
177 first_frame_ ? input_data.input_timestamp
178 : last_rtp_timestamp_ +
179 rtc::CheckedDivExact(
180 input_data.input_timestamp - last_timestamp_,
181 static_cast<uint32_t>(rtc::CheckedDivExact(
182 audio_encoder->SampleRateHz(),
183 audio_encoder->RtpTimestampRateHz())));
184 last_timestamp_ = input_data.input_timestamp;
185 last_rtp_timestamp_ = rtp_timestamp;
186 first_frame_ = false;
187
188 encoded_info = audio_encoder->Encode(rtp_timestamp, input_data.audio,
189 input_data.length_per_channel,
190 sizeof(stream), stream);
191 if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) {
192 // Not enough data.
193 return 0;
194 }
195 previous_pltype = previous_pltype_; // Read it while we have the critsect.
196 }
197
198 RTPFragmentationHeader my_fragmentation;
199 ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
200 FrameType frame_type;
201 if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) {
202 frame_type = kFrameEmpty;
203 encoded_info.payload_type = previous_pltype;
204 } else {
205 DCHECK_GT(encoded_info.encoded_bytes, 0u);
206 frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
207 }
208
209 {
210 CriticalSectionScoped lock(callback_crit_sect_);
211 if (packetization_callback_) {
212 packetization_callback_->SendData(
213 frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
214 stream, encoded_info.encoded_bytes,
215 my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
216 : nullptr);
217 }
218
219 if (vad_callback_) {
220 // Callback with VAD decision.
221 vad_callback_->InFrameType(frame_type);
222 }
223 }
224 {
225 CriticalSectionScoped lock(acm_crit_sect_);
226 previous_pltype_ = encoded_info.payload_type;
227 }
228 return static_cast<int32_t>(encoded_info.encoded_bytes);
229 }
230
231 /////////////////////////////////////////
232 // Sender
233 //
234
235 // TODO(henrik.lundin): Remove this method; only used in tests.
ResetEncoder()236 int AudioCodingModuleImpl::ResetEncoder() {
237 CriticalSectionScoped lock(acm_crit_sect_);
238 if (!HaveValidEncoder("ResetEncoder")) {
239 return -1;
240 }
241 return 0;
242 }
243
244 // Can be called multiple times for Codec, CNG, RED.
RegisterSendCodec(const CodecInst & send_codec)245 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
246 CriticalSectionScoped lock(acm_crit_sect_);
247 return codec_manager_.RegisterSendCodec(send_codec);
248 }
249
250 // Get current send codec.
SendCodec(CodecInst * current_codec) const251 int AudioCodingModuleImpl::SendCodec(CodecInst* current_codec) const {
252 CriticalSectionScoped lock(acm_crit_sect_);
253 return codec_manager_.SendCodec(current_codec);
254 }
255
256 // Get current send frequency.
SendFrequency() const257 int AudioCodingModuleImpl::SendFrequency() const {
258 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
259 "SendFrequency()");
260 CriticalSectionScoped lock(acm_crit_sect_);
261
262 if (!codec_manager_.current_encoder()) {
263 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
264 "SendFrequency Failed, no codec is registered");
265 return -1;
266 }
267
268 return codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz();
269 }
270
271 // Get encode bitrate.
272 // Adaptive rate codecs return their current encode target rate, while other
273 // codecs return there longterm avarage or their fixed rate.
274 // TODO(henrik.lundin): Remove; not used.
SendBitrate() const275 int AudioCodingModuleImpl::SendBitrate() const {
276 CriticalSectionScoped lock(acm_crit_sect_);
277
278 if (!codec_manager_.current_encoder()) {
279 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
280 "SendBitrate Failed, no codec is registered");
281 return -1;
282 }
283
284 WebRtcACMCodecParams encoder_param;
285 codec_manager_.current_encoder()->EncoderParams(&encoder_param);
286
287 return encoder_param.codec_inst.rate;
288 }
289
290 // Set available bandwidth, inform the encoder about the estimated bandwidth
291 // received from the remote party.
292 // TODO(henrik.lundin): Remove; not used.
SetReceivedEstimatedBandwidth(int bw)293 int AudioCodingModuleImpl::SetReceivedEstimatedBandwidth(int bw) {
294 CriticalSectionScoped lock(acm_crit_sect_);
295 FATAL() << "Dead code?";
296 return -1;
297 // return codecs_[current_send_codec_idx_]->SetEstimatedBandwidth(bw);
298 }
299
300 // Register a transport callback which will be called to deliver
301 // the encoded buffers.
RegisterTransportCallback(AudioPacketizationCallback * transport)302 int AudioCodingModuleImpl::RegisterTransportCallback(
303 AudioPacketizationCallback* transport) {
304 CriticalSectionScoped lock(callback_crit_sect_);
305 packetization_callback_ = transport;
306 return 0;
307 }
308
309 // Add 10MS of raw (PCM) audio data to the encoder.
Add10MsData(const AudioFrame & audio_frame)310 int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) {
311 InputData input_data;
312 int r = Add10MsDataInternal(audio_frame, &input_data);
313 return r < 0 ? r : Encode(input_data);
314 }
315
Add10MsDataInternal(const AudioFrame & audio_frame,InputData * input_data)316 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
317 InputData* input_data) {
318 if (audio_frame.samples_per_channel_ <= 0) {
319 assert(false);
320 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
321 "Cannot Add 10 ms audio, payload length is negative or "
322 "zero");
323 return -1;
324 }
325
326 if (audio_frame.sample_rate_hz_ > 48000) {
327 assert(false);
328 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
329 "Cannot Add 10 ms audio, input frequency not valid");
330 return -1;
331 }
332
333 // If the length and frequency matches. We currently just support raw PCM.
334 if ((audio_frame.sample_rate_hz_ / 100)
335 != audio_frame.samples_per_channel_) {
336 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
337 "Cannot Add 10 ms audio, input frequency and length doesn't"
338 " match");
339 return -1;
340 }
341
342 if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2) {
343 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
344 "Cannot Add 10 ms audio, invalid number of channels.");
345 return -1;
346 }
347
348 CriticalSectionScoped lock(acm_crit_sect_);
349 // Do we have a codec registered?
350 if (!HaveValidEncoder("Add10MsData")) {
351 return -1;
352 }
353
354 const AudioFrame* ptr_frame;
355 // Perform a resampling, also down-mix if it is required and can be
356 // performed before resampling (a down mix prior to resampling will take
357 // place if both primary and secondary encoders are mono and input is in
358 // stereo).
359 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
360 return -1;
361 }
362
363 // Check whether we need an up-mix or down-mix?
364 bool remix =
365 ptr_frame->num_channels_ !=
366 codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels();
367
368 if (remix) {
369 if (ptr_frame->num_channels_ == 1) {
370 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
371 return -1;
372 } else {
373 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
374 return -1;
375 }
376 }
377
378 // When adding data to encoders this pointer is pointing to an audio buffer
379 // with correct number of channels.
380 const int16_t* ptr_audio = ptr_frame->data_;
381
382 // For pushing data to primary, point the |ptr_audio| to correct buffer.
383 if (codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels() !=
384 ptr_frame->num_channels_)
385 ptr_audio = input_data->buffer;
386
387 input_data->input_timestamp = ptr_frame->timestamp_;
388 input_data->audio = ptr_audio;
389 input_data->length_per_channel = ptr_frame->samples_per_channel_;
390 input_data->audio_channel =
391 codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels();
392
393 return 0;
394 }
395
396 // Perform a resampling and down-mix if required. We down-mix only if
397 // encoder is mono and input is stereo. In case of dual-streaming, both
398 // encoders has to be mono for down-mix to take place.
399 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
400 // is required, |*ptr_out| points to |in_frame|.
PreprocessToAddData(const AudioFrame & in_frame,const AudioFrame ** ptr_out)401 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
402 const AudioFrame** ptr_out) {
403 bool resample =
404 (in_frame.sample_rate_hz_ !=
405 codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz());
406
407 // This variable is true if primary codec and secondary codec (if exists)
408 // are both mono and input is stereo.
409 bool down_mix =
410 (in_frame.num_channels_ == 2) &&
411 (codec_manager_.current_encoder()->GetAudioEncoder()->NumChannels() == 1);
412
413 if (!first_10ms_data_) {
414 expected_in_ts_ = in_frame.timestamp_;
415 expected_codec_ts_ = in_frame.timestamp_;
416 first_10ms_data_ = true;
417 } else if (in_frame.timestamp_ != expected_in_ts_) {
418 // TODO(turajs): Do we need a warning here.
419 expected_codec_ts_ +=
420 (in_frame.timestamp_ - expected_in_ts_) *
421 static_cast<uint32_t>(
422 (static_cast<double>(codec_manager_.current_encoder()
423 ->GetAudioEncoder()
424 ->SampleRateHz()) /
425 static_cast<double>(in_frame.sample_rate_hz_)));
426 expected_in_ts_ = in_frame.timestamp_;
427 }
428
429
430 if (!down_mix && !resample) {
431 // No pre-processing is required.
432 expected_in_ts_ += in_frame.samples_per_channel_;
433 expected_codec_ts_ += in_frame.samples_per_channel_;
434 *ptr_out = &in_frame;
435 return 0;
436 }
437
438 *ptr_out = &preprocess_frame_;
439 preprocess_frame_.num_channels_ = in_frame.num_channels_;
440 int16_t audio[WEBRTC_10MS_PCM_AUDIO];
441 const int16_t* src_ptr_audio = in_frame.data_;
442 int16_t* dest_ptr_audio = preprocess_frame_.data_;
443 if (down_mix) {
444 // If a resampling is required the output of a down-mix is written into a
445 // local buffer, otherwise, it will be written to the output frame.
446 if (resample)
447 dest_ptr_audio = audio;
448 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0)
449 return -1;
450 preprocess_frame_.num_channels_ = 1;
451 // Set the input of the resampler is the down-mixed signal.
452 src_ptr_audio = audio;
453 }
454
455 preprocess_frame_.timestamp_ = expected_codec_ts_;
456 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
457 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
458 // If it is required, we have to do a resampling.
459 if (resample) {
460 // The result of the resampler is written to output frame.
461 dest_ptr_audio = preprocess_frame_.data_;
462
463 preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec(
464 src_ptr_audio, in_frame.sample_rate_hz_,
465 codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz(),
466 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
467 dest_ptr_audio);
468
469 if (preprocess_frame_.samples_per_channel_ < 0) {
470 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
471 "Cannot add 10 ms audio, resampling failed");
472 return -1;
473 }
474 preprocess_frame_.sample_rate_hz_ =
475 codec_manager_.current_encoder()->GetAudioEncoder()->SampleRateHz();
476 }
477
478 expected_codec_ts_ += preprocess_frame_.samples_per_channel_;
479 expected_in_ts_ += in_frame.samples_per_channel_;
480
481 return 0;
482 }
483
484 /////////////////////////////////////////
485 // (RED) Redundant Coding
486 //
487
REDStatus() const488 bool AudioCodingModuleImpl::REDStatus() const {
489 CriticalSectionScoped lock(acm_crit_sect_);
490 return codec_manager_.red_enabled();
491 }
492
493 // Configure RED status i.e on/off.
SetREDStatus(bool enable_red)494 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
495 CriticalSectionScoped lock(acm_crit_sect_);
496 #ifdef WEBRTC_CODEC_RED
497 return codec_manager_.SetCopyRed(enable_red) ? 0 : -1;
498 #else
499 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
500 " WEBRTC_CODEC_RED is undefined");
501 return -1;
502 #endif
503 }
504
505 /////////////////////////////////////////
506 // (FEC) Forward Error Correction (codec internal)
507 //
508
CodecFEC() const509 bool AudioCodingModuleImpl::CodecFEC() const {
510 CriticalSectionScoped lock(acm_crit_sect_);
511 return codec_manager_.codec_fec_enabled();
512 }
513
SetCodecFEC(bool enable_codec_fec)514 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
515 CriticalSectionScoped lock(acm_crit_sect_);
516 return codec_manager_.SetCodecFEC(enable_codec_fec);
517 }
518
SetPacketLossRate(int loss_rate)519 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
520 CriticalSectionScoped lock(acm_crit_sect_);
521 if (HaveValidEncoder("SetPacketLossRate") &&
522 codec_manager_.current_encoder()->SetPacketLossRate(loss_rate) < 0) {
523 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
524 "Set packet loss rate failed.");
525 return -1;
526 }
527 return 0;
528 }
529
530 /////////////////////////////////////////
531 // (VAD) Voice Activity Detection
532 //
SetVAD(bool enable_dtx,bool enable_vad,ACMVADMode mode)533 int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
534 bool enable_vad,
535 ACMVADMode mode) {
536 CriticalSectionScoped lock(acm_crit_sect_);
537 return codec_manager_.SetVAD(enable_dtx, enable_vad, mode);
538 }
539
540 // Get VAD/DTX settings.
VAD(bool * dtx_enabled,bool * vad_enabled,ACMVADMode * mode) const541 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
542 ACMVADMode* mode) const {
543 CriticalSectionScoped lock(acm_crit_sect_);
544 codec_manager_.VAD(dtx_enabled, vad_enabled, mode);
545 return 0;
546 }
547
548 /////////////////////////////////////////
549 // Receiver
550 //
551
InitializeReceiver()552 int AudioCodingModuleImpl::InitializeReceiver() {
553 CriticalSectionScoped lock(acm_crit_sect_);
554 return InitializeReceiverSafe();
555 }
556
557 // Initialize receiver, resets codec database etc.
InitializeReceiverSafe()558 int AudioCodingModuleImpl::InitializeReceiverSafe() {
559 // If the receiver is already initialized then we want to destroy any
560 // existing decoders. After a call to this function, we should have a clean
561 // start-up.
562 if (receiver_initialized_) {
563 if (receiver_.RemoveAllCodecs() < 0)
564 return -1;
565 }
566 receiver_.set_id(id_);
567 receiver_.ResetInitialDelay();
568 receiver_.SetMinimumDelay(0);
569 receiver_.SetMaximumDelay(0);
570 receiver_.FlushBuffers();
571
572 // Register RED and CN.
573 for (int i = 0; i < ACMCodecDB::kNumCodecs; i++) {
574 if (IsCodecRED(i) || IsCodecCN(i)) {
575 uint8_t pl_type = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
576 if (receiver_.AddCodec(i, pl_type, 1, NULL) < 0) {
577 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
578 "Cannot register master codec.");
579 return -1;
580 }
581 }
582 }
583 receiver_initialized_ = true;
584 return 0;
585 }
586
587 // TODO(turajs): If NetEq opens an API for reseting the state of decoders then
588 // implement this method. Otherwise it should be removed. I might be that by
589 // removing and registering a decoder we can achieve the effect of resetting.
590 // Reset the decoder state.
591 // TODO(henrik.lundin): Remove; only used in one test, and does nothing.
ResetDecoder()592 int AudioCodingModuleImpl::ResetDecoder() {
593 return 0;
594 }
595
596 // Get current receive frequency.
ReceiveFrequency() const597 int AudioCodingModuleImpl::ReceiveFrequency() const {
598 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
599 "ReceiveFrequency()");
600
601 CriticalSectionScoped lock(acm_crit_sect_);
602
603 int codec_id = receiver_.last_audio_codec_id();
604
605 return codec_id < 0 ? receiver_.current_sample_rate_hz() :
606 ACMCodecDB::database_[codec_id].plfreq;
607 }
608
609 // Get current playout frequency.
PlayoutFrequency() const610 int AudioCodingModuleImpl::PlayoutFrequency() const {
611 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
612 "PlayoutFrequency()");
613
614 CriticalSectionScoped lock(acm_crit_sect_);
615
616 return receiver_.current_sample_rate_hz();
617 }
618
619 // Register possible receive codecs, can be called multiple times,
620 // for codecs, CNG (NB, WB and SWB), DTMF, RED.
RegisterReceiveCodec(const CodecInst & codec)621 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
622 CriticalSectionScoped lock(acm_crit_sect_);
623 DCHECK(receiver_initialized_);
624 return codec_manager_.RegisterReceiveCodec(codec);
625 }
626
627 // Get current received codec.
ReceiveCodec(CodecInst * current_codec) const628 int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
629 CriticalSectionScoped lock(acm_crit_sect_);
630 return receiver_.LastAudioCodec(current_codec);
631 }
632
RegisterDecoder(int acm_codec_id,uint8_t payload_type,int channels,AudioDecoder * audio_decoder)633 int AudioCodingModuleImpl::RegisterDecoder(int acm_codec_id,
634 uint8_t payload_type,
635 int channels,
636 AudioDecoder* audio_decoder) {
637 return receiver_.AddCodec(acm_codec_id, payload_type, channels,
638 audio_decoder);
639 }
640
641 // Incoming packet from network parsed and ready for decode.
IncomingPacket(const uint8_t * incoming_payload,const size_t payload_length,const WebRtcRTPHeader & rtp_header)642 int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
643 const size_t payload_length,
644 const WebRtcRTPHeader& rtp_header) {
645 return receiver_.InsertPacket(rtp_header, incoming_payload, payload_length);
646 }
647
648 // Minimum playout delay (Used for lip-sync).
SetMinimumPlayoutDelay(int time_ms)649 int AudioCodingModuleImpl::SetMinimumPlayoutDelay(int time_ms) {
650 if ((time_ms < 0) || (time_ms > 10000)) {
651 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
652 "Delay must be in the range of 0-1000 milliseconds.");
653 return -1;
654 }
655 return receiver_.SetMinimumDelay(time_ms);
656 }
657
SetMaximumPlayoutDelay(int time_ms)658 int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
659 if ((time_ms < 0) || (time_ms > 10000)) {
660 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
661 "Delay must be in the range of 0-1000 milliseconds.");
662 return -1;
663 }
664 return receiver_.SetMaximumDelay(time_ms);
665 }
666
667 // Estimate the Bandwidth based on the incoming stream, needed for one way
668 // audio where the RTCP send the BW estimate.
669 // This is also done in the RTP module.
DecoderEstimatedBandwidth() const670 int AudioCodingModuleImpl::DecoderEstimatedBandwidth() const {
671 // We can estimate far-end to near-end bandwidth if the iSAC are sent. Check
672 // if the last received packets were iSAC packet then retrieve the bandwidth.
673 int last_audio_codec_id = receiver_.last_audio_codec_id();
674 if (last_audio_codec_id >= 0 &&
675 STR_CASE_CMP("ISAC", ACMCodecDB::database_[last_audio_codec_id].plname)) {
676 CriticalSectionScoped lock(acm_crit_sect_);
677 FATAL() << "Dead code?";
678 // return codecs_[last_audio_codec_id]->GetEstimatedBandwidth();
679 }
680 return -1;
681 }
682
683 // Set playout mode for: voice, fax, streaming or off.
SetPlayoutMode(AudioPlayoutMode mode)684 int AudioCodingModuleImpl::SetPlayoutMode(AudioPlayoutMode mode) {
685 receiver_.SetPlayoutMode(mode);
686 return 0; // TODO(turajs): return value is for backward compatibility.
687 }
688
689 // Get playout mode voice, fax, streaming or off.
PlayoutMode() const690 AudioPlayoutMode AudioCodingModuleImpl::PlayoutMode() const {
691 return receiver_.PlayoutMode();
692 }
693
694 // Get 10 milliseconds of raw audio data to play out.
695 // Automatic resample to the requested frequency.
PlayoutData10Ms(int desired_freq_hz,AudioFrame * audio_frame)696 int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
697 AudioFrame* audio_frame) {
698 // GetAudio always returns 10 ms, at the requested sample rate.
699 if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) {
700 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
701 "PlayoutData failed, RecOut Failed");
702 return -1;
703 }
704
705 audio_frame->id_ = id_;
706 return 0;
707 }
708
709 /////////////////////////////////////////
710 // Statistics
711 //
712
713 // TODO(turajs) change the return value to void. Also change the corresponding
714 // NetEq function.
GetNetworkStatistics(NetworkStatistics * statistics)715 int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) {
716 receiver_.GetNetworkStatistics(statistics);
717 return 0;
718 }
719
RegisterVADCallback(ACMVADCallback * vad_callback)720 int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) {
721 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
722 "RegisterVADCallback()");
723 CriticalSectionScoped lock(callback_crit_sect_);
724 vad_callback_ = vad_callback;
725 return 0;
726 }
727
728 // TODO(tlegrand): Modify this function to work for stereo, and add tests.
IncomingPayload(const uint8_t * incoming_payload,size_t payload_length,uint8_t payload_type,uint32_t timestamp)729 int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload,
730 size_t payload_length,
731 uint8_t payload_type,
732 uint32_t timestamp) {
733 // We are not acquiring any lock when interacting with |aux_rtp_header_| no
734 // other method uses this member variable.
735 if (aux_rtp_header_ == NULL) {
736 // This is the first time that we are using |dummy_rtp_header_|
737 // so we have to create it.
738 aux_rtp_header_ = new WebRtcRTPHeader;
739 aux_rtp_header_->header.payloadType = payload_type;
740 // Don't matter in this case.
741 aux_rtp_header_->header.ssrc = 0;
742 aux_rtp_header_->header.markerBit = false;
743 // Start with random numbers.
744 aux_rtp_header_->header.sequenceNumber = 0x1234; // Arbitrary.
745 aux_rtp_header_->type.Audio.channel = 1;
746 }
747
748 aux_rtp_header_->header.timestamp = timestamp;
749 IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_);
750 // Get ready for the next payload.
751 aux_rtp_header_->header.sequenceNumber++;
752 return 0;
753 }
754
ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx)755 int AudioCodingModuleImpl::ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx) {
756 CriticalSectionScoped lock(acm_crit_sect_);
757
758 if (!HaveValidEncoder("ReplaceInternalDTXWithWebRtc")) {
759 WEBRTC_TRACE(
760 webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
761 "Cannot replace codec internal DTX when no send codec is registered.");
762 return -1;
763 }
764
765 FATAL() << "Dead code?";
766 // int res = codecs_[current_send_codec_idx_]->ReplaceInternalDTX(
767 // use_webrtc_dtx);
768 // Check if VAD is turned on, or if there is any error.
769 // if (res == 1) {
770 // vad_enabled_ = true;
771 // } else if (res < 0) {
772 // WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
773 // "Failed to set ReplaceInternalDTXWithWebRtc(%d)",
774 // use_webrtc_dtx);
775 // return res;
776 // }
777
778 return 0;
779 }
780
IsInternalDTXReplacedWithWebRtc(bool * uses_webrtc_dtx)781 int AudioCodingModuleImpl::IsInternalDTXReplacedWithWebRtc(
782 bool* uses_webrtc_dtx) {
783 *uses_webrtc_dtx = true;
784 return 0;
785 }
786
787 // TODO(henrik.lundin): Remove? Only used in tests. Deprecated in VoiceEngine.
SetISACMaxRate(int max_bit_per_sec)788 int AudioCodingModuleImpl::SetISACMaxRate(int max_bit_per_sec) {
789 CriticalSectionScoped lock(acm_crit_sect_);
790
791 if (!HaveValidEncoder("SetISACMaxRate")) {
792 return -1;
793 }
794
795 return codec_manager_.current_encoder()->SetISACMaxRate(max_bit_per_sec);
796 }
797
798 // TODO(henrik.lundin): Remove? Only used in tests. Deprecated in VoiceEngine.
SetISACMaxPayloadSize(int max_size_bytes)799 int AudioCodingModuleImpl::SetISACMaxPayloadSize(int max_size_bytes) {
800 CriticalSectionScoped lock(acm_crit_sect_);
801
802 if (!HaveValidEncoder("SetISACMaxPayloadSize")) {
803 return -1;
804 }
805
806 return codec_manager_.current_encoder()->SetISACMaxPayloadSize(
807 max_size_bytes);
808 }
809
810 // TODO(henrik.lundin): Remove? Only used in tests.
ConfigISACBandwidthEstimator(int frame_size_ms,int rate_bit_per_sec,bool enforce_frame_size)811 int AudioCodingModuleImpl::ConfigISACBandwidthEstimator(
812 int frame_size_ms,
813 int rate_bit_per_sec,
814 bool enforce_frame_size) {
815 CriticalSectionScoped lock(acm_crit_sect_);
816
817 if (!HaveValidEncoder("ConfigISACBandwidthEstimator")) {
818 return -1;
819 }
820
821 FATAL() << "Dead code?";
822 return -1;
823 // return codecs_[current_send_codec_idx_]->ConfigISACBandwidthEstimator(
824 // frame_size_ms, rate_bit_per_sec, enforce_frame_size);
825 }
826
SetOpusApplication(OpusApplicationMode application,bool disable_dtx_if_needed)827 int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application,
828 bool disable_dtx_if_needed) {
829 CriticalSectionScoped lock(acm_crit_sect_);
830 if (!HaveValidEncoder("SetOpusApplication")) {
831 return -1;
832 }
833 return codec_manager_.current_encoder()->SetOpusApplication(
834 application, disable_dtx_if_needed);
835 }
836
837 // Informs Opus encoder of the maximum playback rate the receiver will render.
SetOpusMaxPlaybackRate(int frequency_hz)838 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
839 CriticalSectionScoped lock(acm_crit_sect_);
840 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
841 return -1;
842 }
843 return codec_manager_.current_encoder()->SetOpusMaxPlaybackRate(frequency_hz);
844 }
845
EnableOpusDtx(bool force_voip)846 int AudioCodingModuleImpl::EnableOpusDtx(bool force_voip) {
847 CriticalSectionScoped lock(acm_crit_sect_);
848 if (!HaveValidEncoder("EnableOpusDtx")) {
849 return -1;
850 }
851 return codec_manager_.current_encoder()->EnableOpusDtx(force_voip);
852 }
853
DisableOpusDtx()854 int AudioCodingModuleImpl::DisableOpusDtx() {
855 CriticalSectionScoped lock(acm_crit_sect_);
856 if (!HaveValidEncoder("DisableOpusDtx")) {
857 return -1;
858 }
859 return codec_manager_.current_encoder()->DisableOpusDtx();
860 }
861
PlayoutTimestamp(uint32_t * timestamp)862 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
863 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1;
864 }
865
HaveValidEncoder(const char * caller_name) const866 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
867 if (!codec_manager_.current_encoder()) {
868 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
869 "%s failed: No send codec is registered.", caller_name);
870 return false;
871 }
872 return true;
873 }
874
UnregisterReceiveCodec(uint8_t payload_type)875 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
876 return receiver_.RemoveCodec(payload_type);
877 }
878
879 // TODO(turajs): correct the type of |length_bytes| when it is corrected in
880 // GenericCodec.
REDPayloadISAC(int isac_rate,int isac_bw_estimate,uint8_t * payload,int16_t * length_bytes)881 int AudioCodingModuleImpl::REDPayloadISAC(int isac_rate,
882 int isac_bw_estimate,
883 uint8_t* payload,
884 int16_t* length_bytes) {
885 CriticalSectionScoped lock(acm_crit_sect_);
886 if (!HaveValidEncoder("EncodeData")) {
887 return -1;
888 }
889 FATAL() << "Dead code?";
890 return -1;
891 // int status;
892 // status = codecs_[current_send_codec_idx_]->REDPayloadISAC(isac_rate,
893 // isac_bw_estimate,
894 // payload,
895 // length_bytes);
896 // return status;
897 }
898
SetInitialPlayoutDelay(int delay_ms)899 int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
900 {
901 CriticalSectionScoped lock(acm_crit_sect_);
902 // Initialize receiver, if it is not initialized. Otherwise, initial delay
903 // is reset upon initialization of the receiver.
904 if (!receiver_initialized_)
905 InitializeReceiverSafe();
906 }
907 return receiver_.SetInitialDelay(delay_ms);
908 }
909
EnableNack(size_t max_nack_list_size)910 int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
911 return receiver_.EnableNack(max_nack_list_size);
912 }
913
DisableNack()914 void AudioCodingModuleImpl::DisableNack() {
915 receiver_.DisableNack();
916 }
917
GetNackList(int64_t round_trip_time_ms) const918 std::vector<uint16_t> AudioCodingModuleImpl::GetNackList(
919 int64_t round_trip_time_ms) const {
920 return receiver_.GetNackList(round_trip_time_ms);
921 }
922
LeastRequiredDelayMs() const923 int AudioCodingModuleImpl::LeastRequiredDelayMs() const {
924 return receiver_.LeastRequiredDelayMs();
925 }
926
GetDecodingCallStatistics(AudioDecodingCallStats * call_stats) const927 void AudioCodingModuleImpl::GetDecodingCallStatistics(
928 AudioDecodingCallStats* call_stats) const {
929 receiver_.GetDecodingCallStatistics(call_stats);
930 }
931
932 } // namespace acm2
933
RegisterSendCodec(AudioEncoder * send_codec)934 bool AudioCodingImpl::RegisterSendCodec(AudioEncoder* send_codec) {
935 FATAL() << "Not implemented yet.";
936 return false;
937 }
938
RegisterSendCodec(int encoder_type,uint8_t payload_type,int frame_size_samples)939 bool AudioCodingImpl::RegisterSendCodec(int encoder_type,
940 uint8_t payload_type,
941 int frame_size_samples) {
942 std::string codec_name;
943 int sample_rate_hz;
944 int channels;
945 if (!MapCodecTypeToParameters(
946 encoder_type, &codec_name, &sample_rate_hz, &channels)) {
947 return false;
948 }
949 webrtc::CodecInst codec;
950 AudioCodingModule::Codec(
951 codec_name.c_str(), &codec, sample_rate_hz, channels);
952 codec.pltype = payload_type;
953 if (frame_size_samples > 0) {
954 codec.pacsize = frame_size_samples;
955 }
956 return acm_old_->RegisterSendCodec(codec) == 0;
957 }
958
GetSenderInfo() const959 const AudioEncoder* AudioCodingImpl::GetSenderInfo() const {
960 FATAL() << "Not implemented yet.";
961 return NULL;
962 }
963
GetSenderCodecInst()964 const CodecInst* AudioCodingImpl::GetSenderCodecInst() {
965 if (acm_old_->SendCodec(¤t_send_codec_) != 0) {
966 return NULL;
967 }
968 return ¤t_send_codec_;
969 }
970
Add10MsAudio(const AudioFrame & audio_frame)971 int AudioCodingImpl::Add10MsAudio(const AudioFrame& audio_frame) {
972 acm2::AudioCodingModuleImpl::InputData input_data;
973 if (acm_old_->Add10MsDataInternal(audio_frame, &input_data) != 0)
974 return -1;
975 return acm_old_->Encode(input_data);
976 }
977
GetReceiverInfo() const978 const ReceiverInfo* AudioCodingImpl::GetReceiverInfo() const {
979 FATAL() << "Not implemented yet.";
980 return NULL;
981 }
982
RegisterReceiveCodec(AudioDecoder * receive_codec)983 bool AudioCodingImpl::RegisterReceiveCodec(AudioDecoder* receive_codec) {
984 FATAL() << "Not implemented yet.";
985 return false;
986 }
987
RegisterReceiveCodec(int decoder_type,uint8_t payload_type)988 bool AudioCodingImpl::RegisterReceiveCodec(int decoder_type,
989 uint8_t payload_type) {
990 std::string codec_name;
991 int sample_rate_hz;
992 int channels;
993 if (!MapCodecTypeToParameters(
994 decoder_type, &codec_name, &sample_rate_hz, &channels)) {
995 return false;
996 }
997 webrtc::CodecInst codec;
998 AudioCodingModule::Codec(
999 codec_name.c_str(), &codec, sample_rate_hz, channels);
1000 codec.pltype = payload_type;
1001 return acm_old_->RegisterReceiveCodec(codec) == 0;
1002 }
1003
InsertPacket(const uint8_t * incoming_payload,size_t payload_len_bytes,const WebRtcRTPHeader & rtp_info)1004 bool AudioCodingImpl::InsertPacket(const uint8_t* incoming_payload,
1005 size_t payload_len_bytes,
1006 const WebRtcRTPHeader& rtp_info) {
1007 return acm_old_->IncomingPacket(
1008 incoming_payload, payload_len_bytes, rtp_info) == 0;
1009 }
1010
InsertPayload(const uint8_t * incoming_payload,size_t payload_len_byte,uint8_t payload_type,uint32_t timestamp)1011 bool AudioCodingImpl::InsertPayload(const uint8_t* incoming_payload,
1012 size_t payload_len_byte,
1013 uint8_t payload_type,
1014 uint32_t timestamp) {
1015 FATAL() << "Not implemented yet.";
1016 return false;
1017 }
1018
SetMinimumPlayoutDelay(int time_ms)1019 bool AudioCodingImpl::SetMinimumPlayoutDelay(int time_ms) {
1020 FATAL() << "Not implemented yet.";
1021 return false;
1022 }
1023
SetMaximumPlayoutDelay(int time_ms)1024 bool AudioCodingImpl::SetMaximumPlayoutDelay(int time_ms) {
1025 FATAL() << "Not implemented yet.";
1026 return false;
1027 }
1028
LeastRequiredDelayMs() const1029 int AudioCodingImpl::LeastRequiredDelayMs() const {
1030 FATAL() << "Not implemented yet.";
1031 return -1;
1032 }
1033
PlayoutTimestamp(uint32_t * timestamp)1034 bool AudioCodingImpl::PlayoutTimestamp(uint32_t* timestamp) {
1035 FATAL() << "Not implemented yet.";
1036 return false;
1037 }
1038
Get10MsAudio(AudioFrame * audio_frame)1039 bool AudioCodingImpl::Get10MsAudio(AudioFrame* audio_frame) {
1040 return acm_old_->PlayoutData10Ms(playout_frequency_hz_, audio_frame) == 0;
1041 }
1042
GetNetworkStatistics(NetworkStatistics * network_statistics)1043 bool AudioCodingImpl::GetNetworkStatistics(
1044 NetworkStatistics* network_statistics) {
1045 FATAL() << "Not implemented yet.";
1046 return false;
1047 }
1048
EnableNack(size_t max_nack_list_size)1049 bool AudioCodingImpl::EnableNack(size_t max_nack_list_size) {
1050 FATAL() << "Not implemented yet.";
1051 return false;
1052 }
1053
DisableNack()1054 void AudioCodingImpl::DisableNack() {
1055 // A bug in the linker of Visual Studio 2013 Update 3 prevent us from using
1056 // FATAL() here, if we do so then the linker hang when the WPO is turned on.
1057 // TODO(sebmarchand): Re-evaluate this when we upgrade the toolchain.
1058 }
1059
SetVad(bool enable_dtx,bool enable_vad,ACMVADMode vad_mode)1060 bool AudioCodingImpl::SetVad(bool enable_dtx,
1061 bool enable_vad,
1062 ACMVADMode vad_mode) {
1063 return acm_old_->SetVAD(enable_dtx, enable_vad, vad_mode) == 0;
1064 }
1065
GetNackList(int round_trip_time_ms) const1066 std::vector<uint16_t> AudioCodingImpl::GetNackList(
1067 int round_trip_time_ms) const {
1068 return acm_old_->GetNackList(round_trip_time_ms);
1069 }
1070
GetDecodingCallStatistics(AudioDecodingCallStats * call_stats) const1071 void AudioCodingImpl::GetDecodingCallStatistics(
1072 AudioDecodingCallStats* call_stats) const {
1073 acm_old_->GetDecodingCallStatistics(call_stats);
1074 }
1075
MapCodecTypeToParameters(int codec_type,std::string * codec_name,int * sample_rate_hz,int * channels)1076 bool AudioCodingImpl::MapCodecTypeToParameters(int codec_type,
1077 std::string* codec_name,
1078 int* sample_rate_hz,
1079 int* channels) {
1080 switch (codec_type) {
1081 #ifdef WEBRTC_CODEC_PCM16
1082 case acm2::ACMCodecDB::kPCM16B:
1083 *codec_name = "L16";
1084 *sample_rate_hz = 8000;
1085 *channels = 1;
1086 break;
1087 case acm2::ACMCodecDB::kPCM16Bwb:
1088 *codec_name = "L16";
1089 *sample_rate_hz = 16000;
1090 *channels = 1;
1091 break;
1092 case acm2::ACMCodecDB::kPCM16Bswb32kHz:
1093 *codec_name = "L16";
1094 *sample_rate_hz = 32000;
1095 *channels = 1;
1096 break;
1097 case acm2::ACMCodecDB::kPCM16B_2ch:
1098 *codec_name = "L16";
1099 *sample_rate_hz = 8000;
1100 *channels = 2;
1101 break;
1102 case acm2::ACMCodecDB::kPCM16Bwb_2ch:
1103 *codec_name = "L16";
1104 *sample_rate_hz = 16000;
1105 *channels = 2;
1106 break;
1107 case acm2::ACMCodecDB::kPCM16Bswb32kHz_2ch:
1108 *codec_name = "L16";
1109 *sample_rate_hz = 32000;
1110 *channels = 2;
1111 break;
1112 #endif
1113 #if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
1114 case acm2::ACMCodecDB::kISAC:
1115 *codec_name = "ISAC";
1116 *sample_rate_hz = 16000;
1117 *channels = 1;
1118 break;
1119 #endif
1120 #ifdef WEBRTC_CODEC_ISAC
1121 case acm2::ACMCodecDB::kISACSWB:
1122 *codec_name = "ISAC";
1123 *sample_rate_hz = 32000;
1124 *channels = 1;
1125 break;
1126 case acm2::ACMCodecDB::kISACFB:
1127 *codec_name = "ISAC";
1128 *sample_rate_hz = 48000;
1129 *channels = 1;
1130 break;
1131 #endif
1132 #ifdef WEBRTC_CODEC_ILBC
1133 case acm2::ACMCodecDB::kILBC:
1134 *codec_name = "ILBC";
1135 *sample_rate_hz = 8000;
1136 *channels = 1;
1137 break;
1138 #endif
1139 case acm2::ACMCodecDB::kPCMA:
1140 *codec_name = "PCMA";
1141 *sample_rate_hz = 8000;
1142 *channels = 1;
1143 break;
1144 case acm2::ACMCodecDB::kPCMA_2ch:
1145 *codec_name = "PCMA";
1146 *sample_rate_hz = 8000;
1147 *channels = 2;
1148 break;
1149 case acm2::ACMCodecDB::kPCMU:
1150 *codec_name = "PCMU";
1151 *sample_rate_hz = 8000;
1152 *channels = 1;
1153 break;
1154 case acm2::ACMCodecDB::kPCMU_2ch:
1155 *codec_name = "PCMU";
1156 *sample_rate_hz = 8000;
1157 *channels = 2;
1158 break;
1159 #ifdef WEBRTC_CODEC_G722
1160 case acm2::ACMCodecDB::kG722:
1161 *codec_name = "G722";
1162 *sample_rate_hz = 16000;
1163 *channels = 1;
1164 break;
1165 case acm2::ACMCodecDB::kG722_2ch:
1166 *codec_name = "G722";
1167 *sample_rate_hz = 16000;
1168 *channels = 2;
1169 break;
1170 #endif
1171 #ifdef WEBRTC_CODEC_OPUS
1172 case acm2::ACMCodecDB::kOpus:
1173 *codec_name = "opus";
1174 *sample_rate_hz = 48000;
1175 *channels = 2;
1176 break;
1177 #endif
1178 case acm2::ACMCodecDB::kCNNB:
1179 *codec_name = "CN";
1180 *sample_rate_hz = 8000;
1181 *channels = 1;
1182 break;
1183 case acm2::ACMCodecDB::kCNWB:
1184 *codec_name = "CN";
1185 *sample_rate_hz = 16000;
1186 *channels = 1;
1187 break;
1188 case acm2::ACMCodecDB::kCNSWB:
1189 *codec_name = "CN";
1190 *sample_rate_hz = 32000;
1191 *channels = 1;
1192 break;
1193 case acm2::ACMCodecDB::kRED:
1194 *codec_name = "red";
1195 *sample_rate_hz = 8000;
1196 *channels = 1;
1197 break;
1198 #ifdef WEBRTC_CODEC_AVT
1199 case acm2::ACMCodecDB::kAVT:
1200 *codec_name = "telephone-event";
1201 *sample_rate_hz = 8000;
1202 *channels = 1;
1203 break;
1204 #endif
1205 default:
1206 FATAL() << "Codec type " << codec_type << " not supported.";
1207 }
1208 return true;
1209 }
1210
1211 } // namespace webrtc
1212