1 /*
2  *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
12 
13 #include <algorithm>
14 #include <map>
15 #include <memory>
16 #include <vector>
17 
18 #include "api/video/encoded_image.h"
19 #include "api/video_codecs/sdp_video_format.h"
20 #include "api/video_codecs/video_encoder.h"
21 #include "common_video/libyuv/include/webrtc_libyuv.h"
22 #include "modules/video_coding/include/video_codec_interface.h"
23 #include "modules/video_coding/include/video_coding_defines.h"
24 #include "rtc_base/checks.h"
25 #include "test/gtest.h"
26 
27 using ::testing::_;
28 using ::testing::AllOf;
29 using ::testing::Field;
30 using ::testing::Return;
31 
32 namespace webrtc {
33 namespace test {
34 
35 namespace {
36 
37 const int kDefaultWidth = 1280;
38 const int kDefaultHeight = 720;
39 const int kNumberOfSimulcastStreams = 3;
40 const int kColorY = 66;
41 const int kColorU = 22;
42 const int kColorV = 33;
43 const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
44 const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
45 const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
46 const float kMaxFramerates[kNumberOfSimulcastStreams] = {30, 30, 30};
47 const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
48 const int kNoTemporalLayerProfile[3] = {0, 0, 0};
49 
50 const VideoEncoder::Capabilities kCapabilities(false);
51 const VideoEncoder::Settings kSettings(kCapabilities, 1, 1200);
52 
53 template <typename T>
SetExpectedValues3(T value0,T value1,T value2,T * expected_values)54 void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
55   expected_values[0] = value0;
56   expected_values[1] = value1;
57   expected_values[2] = value2;
58 }
59 
60 enum PlaneType {
61   kYPlane = 0,
62   kUPlane = 1,
63   kVPlane = 2,
64   kNumOfPlanes = 3,
65 };
66 
67 }  // namespace
68 
69 class SimulcastTestFixtureImpl::TestEncodedImageCallback
70     : public EncodedImageCallback {
71  public:
TestEncodedImageCallback()72   TestEncodedImageCallback() {
73     memset(temporal_layer_, -1, sizeof(temporal_layer_));
74     memset(layer_sync_, false, sizeof(layer_sync_));
75   }
76 
OnEncodedImage(const EncodedImage & encoded_image,const CodecSpecificInfo * codec_specific_info)77   Result OnEncodedImage(const EncodedImage& encoded_image,
78                         const CodecSpecificInfo* codec_specific_info) override {
79     bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
80     bool is_h264 = (codec_specific_info->codecType == kVideoCodecH264);
81     // Only store the base layer.
82     if (encoded_image.SpatialIndex().value_or(0) == 0) {
83       if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
84         encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create(
85             encoded_image.data(), encoded_image.size()));
86         encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
87       } else {
88         encoded_frame_.SetEncodedData(EncodedImageBuffer::Create(
89             encoded_image.data(), encoded_image.size()));
90       }
91     }
92     if (is_vp8) {
93       layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
94           codec_specific_info->codecSpecific.VP8.layerSync;
95       temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
96           codec_specific_info->codecSpecific.VP8.temporalIdx;
97     } else if (is_h264) {
98       layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
99           codec_specific_info->codecSpecific.H264.base_layer_sync;
100       temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
101           codec_specific_info->codecSpecific.H264.temporal_idx;
102     }
103     return Result(Result::OK, encoded_image.Timestamp());
104   }
105   // This method only makes sense for VP8.
GetLastEncodedFrameInfo(int * temporal_layer,bool * layer_sync,int stream)106   void GetLastEncodedFrameInfo(int* temporal_layer,
107                                bool* layer_sync,
108                                int stream) {
109     *temporal_layer = temporal_layer_[stream];
110     *layer_sync = layer_sync_[stream];
111   }
GetLastEncodedKeyFrame(EncodedImage * encoded_key_frame)112   void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
113     *encoded_key_frame = encoded_key_frame_;
114   }
GetLastEncodedFrame(EncodedImage * encoded_frame)115   void GetLastEncodedFrame(EncodedImage* encoded_frame) {
116     *encoded_frame = encoded_frame_;
117   }
118 
119  private:
120   EncodedImage encoded_key_frame_;
121   EncodedImage encoded_frame_;
122   int temporal_layer_[kNumberOfSimulcastStreams];
123   bool layer_sync_[kNumberOfSimulcastStreams];
124 };
125 
126 class SimulcastTestFixtureImpl::TestDecodedImageCallback
127     : public DecodedImageCallback {
128  public:
TestDecodedImageCallback()129   TestDecodedImageCallback() : decoded_frames_(0) {}
Decoded(VideoFrame & decoded_image)130   int32_t Decoded(VideoFrame& decoded_image) override {
131     rtc::scoped_refptr<I420BufferInterface> i420_buffer =
132         decoded_image.video_frame_buffer()->ToI420();
133     for (int i = 0; i < decoded_image.width(); ++i) {
134       EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
135     }
136 
137     // TODO(mikhal): Verify the difference between U,V and the original.
138     for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
139       EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
140       EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
141     }
142     decoded_frames_++;
143     return 0;
144   }
Decoded(VideoFrame & decoded_image,int64_t decode_time_ms)145   int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
146     RTC_NOTREACHED();
147     return -1;
148   }
Decoded(VideoFrame & decoded_image,absl::optional<int32_t> decode_time_ms,absl::optional<uint8_t> qp)149   void Decoded(VideoFrame& decoded_image,
150                absl::optional<int32_t> decode_time_ms,
151                absl::optional<uint8_t> qp) override {
152     Decoded(decoded_image);
153   }
DecodedFrames()154   int DecodedFrames() { return decoded_frames_; }
155 
156  private:
157   int decoded_frames_;
158 };
159 
160 namespace {
161 
SetPlane(uint8_t * data,uint8_t value,int width,int height,int stride)162 void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
163   for (int i = 0; i < height; i++, data += stride) {
164     // Setting allocated area to zero - setting only image size to
165     // requested values - will make it easier to distinguish between image
166     // size and frame size (accounting for stride).
167     memset(data, value, width);
168     memset(data + width, 0, stride - width);
169   }
170 }
171 
172 // Fills in an I420Buffer from |plane_colors|.
CreateImage(const rtc::scoped_refptr<I420Buffer> & buffer,int plane_colors[kNumOfPlanes])173 void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
174                  int plane_colors[kNumOfPlanes]) {
175   SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
176            buffer->height(), buffer->StrideY());
177 
178   SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
179            buffer->ChromaHeight(), buffer->StrideU());
180 
181   SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
182            buffer->ChromaHeight(), buffer->StrideV());
183 }
184 
ConfigureStream(int width,int height,int max_bitrate,int min_bitrate,int target_bitrate,float max_framerate,SpatialLayer * stream,int num_temporal_layers)185 void ConfigureStream(int width,
186                      int height,
187                      int max_bitrate,
188                      int min_bitrate,
189                      int target_bitrate,
190                      float max_framerate,
191                      SpatialLayer* stream,
192                      int num_temporal_layers) {
193   assert(stream);
194   stream->width = width;
195   stream->height = height;
196   stream->maxBitrate = max_bitrate;
197   stream->minBitrate = min_bitrate;
198   stream->targetBitrate = target_bitrate;
199   stream->maxFramerate = max_framerate;
200   if (num_temporal_layers >= 0) {
201     stream->numberOfTemporalLayers = num_temporal_layers;
202   }
203   stream->qpMax = 45;
204   stream->active = true;
205 }
206 
207 }  // namespace
208 
DefaultSettings(VideoCodec * settings,const int * temporal_layer_profile,VideoCodecType codec_type,bool reverse_layer_order)209 void SimulcastTestFixtureImpl::DefaultSettings(
210     VideoCodec* settings,
211     const int* temporal_layer_profile,
212     VideoCodecType codec_type,
213     bool reverse_layer_order) {
214   RTC_CHECK(settings);
215   *settings = {};
216   settings->codecType = codec_type;
217   settings->startBitrate = 300;
218   settings->minBitrate = 30;
219   settings->maxBitrate = 0;
220   settings->maxFramerate = 30;
221   settings->width = kDefaultWidth;
222   settings->height = kDefaultHeight;
223   settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
224   settings->active = true;
225   ASSERT_EQ(3, kNumberOfSimulcastStreams);
226   int layer_order[3] = {0, 1, 2};
227   if (reverse_layer_order) {
228     layer_order[0] = 2;
229     layer_order[2] = 0;
230   }
231   settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
232                                        kDefaultOutlierFrameSizePercent};
233   ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
234                   kMinBitrates[0], kTargetBitrates[0], kMaxFramerates[0],
235                   &settings->simulcastStream[layer_order[0]],
236                   temporal_layer_profile[0]);
237   ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
238                   kMinBitrates[1], kTargetBitrates[1], kMaxFramerates[1],
239                   &settings->simulcastStream[layer_order[1]],
240                   temporal_layer_profile[1]);
241   ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
242                   kMinBitrates[2], kTargetBitrates[2], kMaxFramerates[2],
243                   &settings->simulcastStream[layer_order[2]],
244                   temporal_layer_profile[2]);
245   if (codec_type == kVideoCodecVP8) {
246     settings->VP8()->denoisingOn = true;
247     settings->VP8()->automaticResizeOn = false;
248     settings->VP8()->frameDroppingOn = true;
249     settings->VP8()->keyFrameInterval = 3000;
250   } else {
251     settings->H264()->frameDroppingOn = true;
252     settings->H264()->keyFrameInterval = 3000;
253   }
254 }
255 
SimulcastTestFixtureImpl(std::unique_ptr<VideoEncoderFactory> encoder_factory,std::unique_ptr<VideoDecoderFactory> decoder_factory,SdpVideoFormat video_format)256 SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
257     std::unique_ptr<VideoEncoderFactory> encoder_factory,
258     std::unique_ptr<VideoDecoderFactory> decoder_factory,
259     SdpVideoFormat video_format)
260     : codec_type_(PayloadStringToCodecType(video_format.name)) {
261   encoder_ = encoder_factory->CreateVideoEncoder(video_format);
262   decoder_ = decoder_factory->CreateVideoDecoder(video_format);
263   SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264)
264                  ? kDefaultTemporalLayerProfile
265                  : kNoTemporalLayerProfile);
266 }
267 
~SimulcastTestFixtureImpl()268 SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
269   encoder_->Release();
270   decoder_->Release();
271 }
272 
SetUpCodec(const int * temporal_layer_profile)273 void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
274   encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
275   decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
276   DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
277   SetUpRateAllocator();
278   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
279   EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
280   input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
281   input_buffer_->InitializeData();
282   input_frame_ = std::make_unique<webrtc::VideoFrame>(
283       webrtc::VideoFrame::Builder()
284           .set_video_frame_buffer(input_buffer_)
285           .set_rotation(webrtc::kVideoRotation_0)
286           .set_timestamp_us(0)
287           .build());
288 }
289 
SetUpRateAllocator()290 void SimulcastTestFixtureImpl::SetUpRateAllocator() {
291   rate_allocator_.reset(new SimulcastRateAllocator(settings_));
292 }
293 
SetRates(uint32_t bitrate_kbps,uint32_t fps)294 void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
295   encoder_->SetRates(VideoEncoder::RateControlParameters(
296       rate_allocator_->Allocate(
297           VideoBitrateAllocationParameters(bitrate_kbps * 1000, fps)),
298       static_cast<double>(fps)));
299 }
300 
RunActiveStreamsTest(const std::vector<bool> active_streams)301 void SimulcastTestFixtureImpl::RunActiveStreamsTest(
302     const std::vector<bool> active_streams) {
303   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
304                                           VideoFrameType::kVideoFrameDelta);
305   UpdateActiveStreams(active_streams);
306   // Set sufficient bitrate for all streams so we can test active without
307   // bitrate being an issue.
308   SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
309 
310   ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
311   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
312   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
313 
314   ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
315   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
316   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
317 }
318 
UpdateActiveStreams(const std::vector<bool> active_streams)319 void SimulcastTestFixtureImpl::UpdateActiveStreams(
320     const std::vector<bool> active_streams) {
321   ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
322   for (size_t i = 0; i < active_streams.size(); ++i) {
323     settings_.simulcastStream[i].active = active_streams[i];
324   }
325   // Re initialize the allocator and encoder with the new settings.
326   // TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
327   // reconfiguration of the allocator and encoder. When the video bitrate
328   // allocator has support for updating active streams without a
329   // reinitialization, we can just call that here instead.
330   SetUpRateAllocator();
331   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
332 }
333 
ExpectStreams(VideoFrameType frame_type,const std::vector<bool> expected_streams_active)334 void SimulcastTestFixtureImpl::ExpectStreams(
335     VideoFrameType frame_type,
336     const std::vector<bool> expected_streams_active) {
337   ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
338             kNumberOfSimulcastStreams);
339   if (expected_streams_active[0]) {
340     EXPECT_CALL(
341         encoder_callback_,
342         OnEncodedImage(
343             AllOf(Field(&EncodedImage::_frameType, frame_type),
344                   Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
345                   Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
346             _))
347         .Times(1)
348         .WillRepeatedly(Return(
349             EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
350   }
351   if (expected_streams_active[1]) {
352     EXPECT_CALL(
353         encoder_callback_,
354         OnEncodedImage(
355             AllOf(Field(&EncodedImage::_frameType, frame_type),
356                   Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
357                   Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
358             _))
359         .Times(1)
360         .WillRepeatedly(Return(
361             EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
362   }
363   if (expected_streams_active[2]) {
364     EXPECT_CALL(encoder_callback_,
365                 OnEncodedImage(
366                     AllOf(Field(&EncodedImage::_frameType, frame_type),
367                           Field(&EncodedImage::_encodedWidth, kDefaultWidth),
368                           Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
369                     _))
370         .Times(1)
371         .WillRepeatedly(Return(
372             EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
373   }
374 }
375 
ExpectStreams(VideoFrameType frame_type,int expected_video_streams)376 void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
377                                              int expected_video_streams) {
378   ASSERT_GE(expected_video_streams, 0);
379   ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
380   std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
381   for (int i = 0; i < expected_video_streams; ++i) {
382     expected_streams_active[i] = true;
383   }
384   ExpectStreams(frame_type, expected_streams_active);
385 }
386 
VerifyTemporalIdxAndSyncForAllSpatialLayers(TestEncodedImageCallback * encoder_callback,const int * expected_temporal_idx,const bool * expected_layer_sync,int num_spatial_layers)387 void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
388     TestEncodedImageCallback* encoder_callback,
389     const int* expected_temporal_idx,
390     const bool* expected_layer_sync,
391     int num_spatial_layers) {
392   int temporal_layer = -1;
393   bool layer_sync = false;
394   for (int i = 0; i < num_spatial_layers; i++) {
395     encoder_callback->GetLastEncodedFrameInfo(&temporal_layer, &layer_sync, i);
396     EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
397     EXPECT_EQ(expected_layer_sync[i], layer_sync);
398   }
399 }
400 
401 // We currently expect all active streams to generate a key frame even though
402 // a key frame was only requested for some of them.
TestKeyFrameRequestsOnAllStreams()403 void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
404   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
405   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
406                                           VideoFrameType::kVideoFrameDelta);
407   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
408   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
409 
410   ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
411   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
412   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
413 
414   frame_types[0] = VideoFrameType::kVideoFrameKey;
415   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
416   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
417   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
418 
419   std::fill(frame_types.begin(), frame_types.end(),
420             VideoFrameType::kVideoFrameDelta);
421   frame_types[1] = VideoFrameType::kVideoFrameKey;
422   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
423   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
424   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
425 
426   std::fill(frame_types.begin(), frame_types.end(),
427             VideoFrameType::kVideoFrameDelta);
428   frame_types[2] = VideoFrameType::kVideoFrameKey;
429   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
430   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
431   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
432 
433   std::fill(frame_types.begin(), frame_types.end(),
434             VideoFrameType::kVideoFrameDelta);
435   ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
436   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
437   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
438 }
439 
TestPaddingAllStreams()440 void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
441   // We should always encode the base layer.
442   SetRates(kMinBitrates[0] - 1, 30);
443   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
444                                           VideoFrameType::kVideoFrameDelta);
445   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
446   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
447 
448   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
449   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
450   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
451 }
452 
TestPaddingTwoStreams()453 void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
454   // We have just enough to get only the first stream and padding for two.
455   SetRates(kMinBitrates[0], 30);
456   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
457                                           VideoFrameType::kVideoFrameDelta);
458   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
459   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
460 
461   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
462   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
463   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
464 }
465 
TestPaddingTwoStreamsOneMaxedOut()466 void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
467   // We are just below limit of sending second stream, so we should get
468   // the first stream maxed out (at |maxBitrate|), and padding for two.
469   SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
470   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
471                                           VideoFrameType::kVideoFrameDelta);
472   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
473   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
474 
475   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
476   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
477   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
478 }
479 
TestPaddingOneStream()480 void SimulcastTestFixtureImpl::TestPaddingOneStream() {
481   // We have just enough to send two streams, so padding for one stream.
482   SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
483   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
484                                           VideoFrameType::kVideoFrameDelta);
485   ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
486   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
487 
488   ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
489   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
490   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
491 }
492 
TestPaddingOneStreamTwoMaxedOut()493 void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
494   // We are just below limit of sending third stream, so we should get
495   // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
496   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
497   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
498                                           VideoFrameType::kVideoFrameDelta);
499   ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
500   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
501 
502   ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
503   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
504   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
505 }
506 
TestSendAllStreams()507 void SimulcastTestFixtureImpl::TestSendAllStreams() {
508   // We have just enough to send all streams.
509   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
510   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
511                                           VideoFrameType::kVideoFrameDelta);
512   ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
513   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
514 
515   ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
516   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
517   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
518 }
519 
TestDisablingStreams()520 void SimulcastTestFixtureImpl::TestDisablingStreams() {
521   // We should get three media streams.
522   SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
523   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
524                                           VideoFrameType::kVideoFrameDelta);
525   ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
526   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
527 
528   ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
529   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
530   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
531 
532   // We should only get two streams and padding for one.
533   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
534   ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
535   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
536   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
537 
538   // We should only get the first stream and padding for two.
539   SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
540   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
541   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
542   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
543 
544   // We don't have enough bitrate for the thumbnail stream, but we should get
545   // it anyway with current configuration.
546   SetRates(kTargetBitrates[0] - 1, 30);
547   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
548   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
549   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
550 
551   // We should only get two streams and padding for one.
552   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
553   // We get a key frame because a new stream is being enabled.
554   ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
555   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
556   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
557 
558   // We should get all three streams.
559   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
560   // We get a key frame because a new stream is being enabled.
561   ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
562   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
563   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
564 }
565 
TestActiveStreams()566 void SimulcastTestFixtureImpl::TestActiveStreams() {
567   // All streams on.
568   RunActiveStreamsTest({true, true, true});
569   // All streams off.
570   RunActiveStreamsTest({false, false, false});
571   // Low stream off.
572   RunActiveStreamsTest({false, true, true});
573   // Middle stream off.
574   RunActiveStreamsTest({true, false, true});
575   // High stream off.
576   RunActiveStreamsTest({true, true, false});
577   // Only low stream turned on.
578   RunActiveStreamsTest({true, false, false});
579   // Only middle stream turned on.
580   RunActiveStreamsTest({false, true, false});
581   // Only high stream turned on.
582   RunActiveStreamsTest({false, false, true});
583 }
584 
SwitchingToOneStream(int width,int height)585 void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
586   const int* temporal_layer_profile = nullptr;
587   // Disable all streams except the last and set the bitrate of the last to
588   // 100 kbps. This verifies the way GTP switches to screenshare mode.
589   if (codec_type_ == kVideoCodecVP8) {
590     settings_.VP8()->numberOfTemporalLayers = 1;
591     temporal_layer_profile = kDefaultTemporalLayerProfile;
592   } else {
593     temporal_layer_profile = kNoTemporalLayerProfile;
594   }
595   settings_.maxBitrate = 100;
596   settings_.startBitrate = 100;
597   settings_.width = width;
598   settings_.height = height;
599   for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
600     settings_.simulcastStream[i].maxBitrate = 0;
601     settings_.simulcastStream[i].width = settings_.width;
602     settings_.simulcastStream[i].height = settings_.height;
603     settings_.simulcastStream[i].numberOfTemporalLayers = 1;
604   }
605   // Setting input image to new resolution.
606   input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
607   input_buffer_->InitializeData();
608 
609   input_frame_ = std::make_unique<webrtc::VideoFrame>(
610       webrtc::VideoFrame::Builder()
611           .set_video_frame_buffer(input_buffer_)
612           .set_rotation(webrtc::kVideoRotation_0)
613           .set_timestamp_us(0)
614           .build());
615 
616   // The for loop above did not set the bitrate of the highest layer.
617   settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
618       0;
619   // The highest layer has to correspond to the non-simulcast resolution.
620   settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
621       settings_.width;
622   settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
623       settings_.height;
624   SetUpRateAllocator();
625   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
626 
627   // Encode one frame and verify.
628   SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
629   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
630                                           VideoFrameType::kVideoFrameDelta);
631   EXPECT_CALL(
632       encoder_callback_,
633       OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
634                                  VideoFrameType::kVideoFrameKey),
635                            Field(&EncodedImage::_encodedWidth, width),
636                            Field(&EncodedImage::_encodedHeight, height)),
637                      _))
638       .Times(1)
639       .WillRepeatedly(Return(
640           EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
641   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
642 
643   // Switch back.
644   DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
645   // Start at the lowest bitrate for enabling base stream.
646   settings_.startBitrate = kMinBitrates[0];
647   SetUpRateAllocator();
648   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
649   SetRates(settings_.startBitrate, 30);
650   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
651   // Resize |input_frame_| to the new resolution.
652   input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
653   input_buffer_->InitializeData();
654   input_frame_ = std::make_unique<webrtc::VideoFrame>(
655       webrtc::VideoFrame::Builder()
656           .set_video_frame_buffer(input_buffer_)
657           .set_rotation(webrtc::kVideoRotation_0)
658           .set_timestamp_us(0)
659           .build());
660   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
661 }
662 
TestSwitchingToOneStream()663 void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
664   SwitchingToOneStream(1024, 768);
665 }
666 
TestSwitchingToOneOddStream()667 void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
668   SwitchingToOneStream(1023, 769);
669 }
670 
TestSwitchingToOneSmallStream()671 void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
672   SwitchingToOneStream(4, 4);
673 }
674 
675 // Test the layer pattern and sync flag for various spatial-temporal patterns.
676 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
677 // temporal_layer id and layer_sync is expected for all streams.
TestSpatioTemporalLayers333PatternEncoder()678 void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
679   bool is_h264 = codec_type_ == kVideoCodecH264;
680   TestEncodedImageCallback encoder_callback;
681   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
682   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
683 
684   int expected_temporal_idx[3] = {-1, -1, -1};
685   bool expected_layer_sync[3] = {false, false, false};
686 
687   // First frame: #0.
688   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
689   SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
690   SetExpectedValues3<bool>(!is_h264, !is_h264, !is_h264, expected_layer_sync);
691   VerifyTemporalIdxAndSyncForAllSpatialLayers(
692       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
693 
694   // Next frame: #1.
695   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
696   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
697   SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
698   SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
699   VerifyTemporalIdxAndSyncForAllSpatialLayers(
700       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
701 
702   // Next frame: #2.
703   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
704   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
705   SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
706   SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
707   VerifyTemporalIdxAndSyncForAllSpatialLayers(
708       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
709 
710   // Next frame: #3.
711   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
712   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
713   SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
714   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
715   VerifyTemporalIdxAndSyncForAllSpatialLayers(
716       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
717 
718   // Next frame: #4.
719   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
720   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
721   SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
722   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
723   VerifyTemporalIdxAndSyncForAllSpatialLayers(
724       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
725 
726   // Next frame: #5.
727   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
728   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
729   SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
730   SetExpectedValues3<bool>(is_h264, is_h264, is_h264, expected_layer_sync);
731   VerifyTemporalIdxAndSyncForAllSpatialLayers(
732       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
733 }
734 
735 // Test the layer pattern and sync flag for various spatial-temporal patterns.
736 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
737 // 1 temporal layer for highest resolution.
738 // For this profile, we expect the temporal index pattern to be:
739 // 1st stream: 0, 2, 1, 2, ....
740 // 2nd stream: 0, 1, 0, 1, ...
741 // 3rd stream: -1, -1, -1, -1, ....
742 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
743 // should always have temporal layer idx set to kNoTemporalIdx = -1.
744 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
745 // TODO(marpan): Although this seems safe for now, we should fix this.
TestSpatioTemporalLayers321PatternEncoder()746 void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
747   EXPECT_EQ(codec_type_, kVideoCodecVP8);
748   int temporal_layer_profile[3] = {3, 2, 1};
749   SetUpCodec(temporal_layer_profile);
750   TestEncodedImageCallback encoder_callback;
751   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
752   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
753 
754   int expected_temporal_idx[3] = {-1, -1, -1};
755   bool expected_layer_sync[3] = {false, false, false};
756 
757   // First frame: #0.
758   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
759   SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
760   SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
761   VerifyTemporalIdxAndSyncForAllSpatialLayers(
762       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
763 
764   // Next frame: #1.
765   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
766   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
767   SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
768   SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
769   VerifyTemporalIdxAndSyncForAllSpatialLayers(
770       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
771 
772   // Next frame: #2.
773   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
774   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
775   SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
776   SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
777   VerifyTemporalIdxAndSyncForAllSpatialLayers(
778       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
779 
780   // Next frame: #3.
781   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
782   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
783   SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
784   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
785   VerifyTemporalIdxAndSyncForAllSpatialLayers(
786       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
787 
788   // Next frame: #4.
789   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
790   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
791   SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
792   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
793   VerifyTemporalIdxAndSyncForAllSpatialLayers(
794       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
795 
796   // Next frame: #5.
797   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
798   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
799   SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
800   SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
801   VerifyTemporalIdxAndSyncForAllSpatialLayers(
802       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
803 }
804 
TestStrideEncodeDecode()805 void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
806   TestEncodedImageCallback encoder_callback;
807   TestDecodedImageCallback decoder_callback;
808   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
809   decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
810 
811   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
812   // Setting two (possibly) problematic use cases for stride:
813   // 1. stride > width 2. stride_y != stride_uv/2
814   int stride_y = kDefaultWidth + 20;
815   int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
816   input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
817                                      stride_uv, stride_uv);
818   input_frame_ = std::make_unique<webrtc::VideoFrame>(
819       webrtc::VideoFrame::Builder()
820           .set_video_frame_buffer(input_buffer_)
821           .set_rotation(webrtc::kVideoRotation_0)
822           .set_timestamp_us(0)
823           .build());
824 
825   // Set color.
826   int plane_offset[kNumOfPlanes];
827   plane_offset[kYPlane] = kColorY;
828   plane_offset[kUPlane] = kColorU;
829   plane_offset[kVPlane] = kColorV;
830   CreateImage(input_buffer_, plane_offset);
831 
832   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
833 
834   // Change color.
835   plane_offset[kYPlane] += 1;
836   plane_offset[kUPlane] += 1;
837   plane_offset[kVPlane] += 1;
838   CreateImage(input_buffer_, plane_offset);
839   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
840   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
841 
842   EncodedImage encoded_frame;
843   // Only encoding one frame - so will be a key frame.
844   encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
845   EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0));
846   encoder_callback.GetLastEncodedFrame(&encoded_frame);
847   decoder_->Decode(encoded_frame, false, 0);
848   EXPECT_EQ(2, decoder_callback.DecodedFrames());
849 }
850 
TestDecodeWidthHeightSet()851 void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
852   MockEncodedImageCallback encoder_callback;
853   MockDecodedImageCallback decoder_callback;
854 
855   EncodedImage encoded_frame[3];
856   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
857   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
858   decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
859 
860   EXPECT_CALL(encoder_callback, OnEncodedImage(_, _))
861       .Times(3)
862       .WillRepeatedly(
863           ::testing::Invoke([&](const EncodedImage& encoded_image,
864                                 const CodecSpecificInfo* codec_specific_info) {
865             EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
866 
867             size_t index = encoded_image.SpatialIndex().value_or(0);
868             encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create(
869                 encoded_image.data(), encoded_image.size()));
870             encoded_frame[index]._frameType = encoded_image._frameType;
871             return EncodedImageCallback::Result(
872                 EncodedImageCallback::Result::OK, 0);
873           }));
874   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
875 
876   EXPECT_CALL(decoder_callback, Decoded(_, _, _))
877       .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
878                                      absl::optional<int32_t> decode_time_ms,
879                                      absl::optional<uint8_t> qp) {
880         EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
881         EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
882       }));
883   EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0));
884 
885   EXPECT_CALL(decoder_callback, Decoded(_, _, _))
886       .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
887                                      absl::optional<int32_t> decode_time_ms,
888                                      absl::optional<uint8_t> qp) {
889         EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
890         EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
891       }));
892   EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0));
893 
894   EXPECT_CALL(decoder_callback, Decoded(_, _, _))
895       .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
896                                      absl::optional<int32_t> decode_time_ms,
897                                      absl::optional<uint8_t> qp) {
898         EXPECT_EQ(decodedImage.width(), kDefaultWidth);
899         EXPECT_EQ(decodedImage.height(), kDefaultHeight);
900       }));
901   EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0));
902 }
903 
904 void SimulcastTestFixtureImpl::
TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation()905     TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() {
906   VideoEncoder::EncoderInfo encoder_info = encoder_->GetEncoderInfo();
907   EXPECT_EQ(encoder_info.fps_allocation[0].size(),
908             static_cast<size_t>(kDefaultTemporalLayerProfile[0]));
909   EXPECT_EQ(encoder_info.fps_allocation[1].size(),
910             static_cast<size_t>(kDefaultTemporalLayerProfile[1]));
911   EXPECT_EQ(encoder_info.fps_allocation[2].size(),
912             static_cast<size_t>(kDefaultTemporalLayerProfile[2]));
913 }
914 }  // namespace test
915 }  // namespace webrtc
916