1 /*
2 * Copyright 2017 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "video/rtp_video_stream_receiver2.h"
12
13 #include <memory>
14 #include <utility>
15
16 #include "api/video/video_codec_type.h"
17 #include "api/video/video_frame_type.h"
18 #include "common_video/h264/h264_common.h"
19 #include "media/base/media_constants.h"
20 #include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
21 #include "modules/rtp_rtcp/source/rtp_format.h"
22 #include "modules/rtp_rtcp/source/rtp_format_vp9.h"
23 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
24 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
25 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
26 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
27 #include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
28 #include "modules/utility/include/process_thread.h"
29 #include "modules/video_coding/frame_object.h"
30 #include "modules/video_coding/include/video_coding_defines.h"
31 #include "modules/video_coding/rtp_frame_reference_finder.h"
32 #include "rtc_base/byte_buffer.h"
33 #include "rtc_base/logging.h"
34 #include "system_wrappers/include/clock.h"
35 #include "system_wrappers/include/field_trial.h"
36 #include "test/field_trial.h"
37 #include "test/gmock.h"
38 #include "test/gtest.h"
39 #include "test/mock_frame_transformer.h"
40 #include "test/time_controller/simulated_task_queue.h"
41
42 using ::testing::_;
43 using ::testing::ElementsAre;
44 using ::testing::Invoke;
45 using ::testing::SizeIs;
46 using ::testing::Values;
47
48 namespace webrtc {
49
50 namespace {
51
52 const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
53
GetAbsoluteCaptureTimestamps(const video_coding::EncodedFrame * frame)54 std::vector<uint64_t> GetAbsoluteCaptureTimestamps(
55 const video_coding::EncodedFrame* frame) {
56 std::vector<uint64_t> result;
57 for (const auto& packet_info : frame->PacketInfos()) {
58 if (packet_info.absolute_capture_time()) {
59 result.push_back(
60 packet_info.absolute_capture_time()->absolute_capture_timestamp);
61 }
62 }
63 return result;
64 }
65
GetGenericVideoHeader(VideoFrameType frame_type)66 RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
67 RTPVideoHeader video_header;
68 video_header.is_first_packet_in_frame = true;
69 video_header.is_last_packet_in_frame = true;
70 video_header.codec = kVideoCodecGeneric;
71 video_header.frame_type = frame_type;
72 return video_header;
73 }
74
75 class MockTransport : public Transport {
76 public:
77 MOCK_METHOD(bool,
78 SendRtp,
79 (const uint8_t*, size_t length, const PacketOptions& options),
80 (override));
81 MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
82 };
83
84 class MockNackSender : public NackSender {
85 public:
86 MOCK_METHOD(void,
87 SendNack,
88 (const std::vector<uint16_t>& sequence_numbers,
89 bool buffering_allowed),
90 (override));
91 };
92
93 class MockKeyFrameRequestSender : public KeyFrameRequestSender {
94 public:
95 MOCK_METHOD(void, RequestKeyFrame, (), (override));
96 };
97
98 class MockOnCompleteFrameCallback
99 : public video_coding::OnCompleteFrameCallback {
100 public:
101 MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
102 MOCK_METHOD(void,
103 DoOnCompleteFrameFailNullptr,
104 (video_coding::EncodedFrame*),
105 ());
106 MOCK_METHOD(void,
107 DoOnCompleteFrameFailLength,
108 (video_coding::EncodedFrame*),
109 ());
110 MOCK_METHOD(void,
111 DoOnCompleteFrameFailBitstream,
112 (video_coding::EncodedFrame*),
113 ());
OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame)114 void OnCompleteFrame(
115 std::unique_ptr<video_coding::EncodedFrame> frame) override {
116 if (!frame) {
117 DoOnCompleteFrameFailNullptr(nullptr);
118 return;
119 }
120 EXPECT_EQ(buffer_.Length(), frame->size());
121 if (buffer_.Length() != frame->size()) {
122 DoOnCompleteFrameFailLength(frame.get());
123 return;
124 }
125 if (frame->size() != buffer_.Length() ||
126 memcmp(buffer_.Data(), frame->data(), buffer_.Length()) != 0) {
127 DoOnCompleteFrameFailBitstream(frame.get());
128 return;
129 }
130 DoOnCompleteFrame(frame.get());
131 }
132
ClearExpectedBitstream()133 void ClearExpectedBitstream() { buffer_.Clear(); }
134
AppendExpectedBitstream(const uint8_t data[],size_t size_in_bytes)135 void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
136 // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
137 buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
138 }
139 rtc::ByteBufferWriter buffer_;
140 };
141
142 class MockRtpPacketSink : public RtpPacketSinkInterface {
143 public:
144 MOCK_METHOD(void, OnRtpPacket, (const RtpPacketReceived&), (override));
145 };
146
147 constexpr uint32_t kSsrc = 111;
148 constexpr uint16_t kSequenceNumber = 222;
149 constexpr int kPayloadType = 100;
150 constexpr int kRedPayloadType = 125;
151
CreateRtpPacketReceived()152 std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
153 auto packet = std::make_unique<RtpPacketReceived>();
154 packet->SetSsrc(kSsrc);
155 packet->SetSequenceNumber(kSequenceNumber);
156 packet->SetPayloadType(kPayloadType);
157 return packet;
158 }
159
160 MATCHER_P(SamePacketAs, other, "") {
161 return arg.Ssrc() == other.Ssrc() &&
162 arg.SequenceNumber() == other.SequenceNumber();
163 }
164
165 } // namespace
166
167 class RtpVideoStreamReceiver2Test : public ::testing::Test {
168 public:
RtpVideoStreamReceiver2Test()169 RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {}
RtpVideoStreamReceiver2Test(std::string field_trials)170 explicit RtpVideoStreamReceiver2Test(std::string field_trials)
171 : override_field_trials_(field_trials),
172 config_(CreateConfig()),
173 process_thread_(ProcessThread::Create("TestThread")) {
174 rtp_receive_statistics_ =
175 ReceiveStatistics::Create(Clock::GetRealTimeClock());
176 rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver2>(
177 TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
178 nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
179 nullptr, process_thread_.get(), &mock_nack_sender_,
180 &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_,
181 nullptr, nullptr);
182 VideoCodec codec;
183 codec.codecType = kVideoCodecGeneric;
184 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {},
185 /*raw_payload=*/false);
186 }
187
GetDefaultH264VideoHeader()188 RTPVideoHeader GetDefaultH264VideoHeader() {
189 RTPVideoHeader video_header;
190 video_header.codec = kVideoCodecH264;
191 video_header.video_type_header.emplace<RTPVideoHeaderH264>();
192 return video_header;
193 }
194
195 // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
196 // code.
AddSps(RTPVideoHeader * video_header,uint8_t sps_id,rtc::CopyOnWriteBuffer * data)197 void AddSps(RTPVideoHeader* video_header,
198 uint8_t sps_id,
199 rtc::CopyOnWriteBuffer* data) {
200 NaluInfo info;
201 info.type = H264::NaluType::kSps;
202 info.sps_id = sps_id;
203 info.pps_id = -1;
204 data->AppendData<uint8_t, 2>({H264::NaluType::kSps, sps_id});
205 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
206 h264.nalus[h264.nalus_length++] = info;
207 }
208
AddPps(RTPVideoHeader * video_header,uint8_t sps_id,uint8_t pps_id,rtc::CopyOnWriteBuffer * data)209 void AddPps(RTPVideoHeader* video_header,
210 uint8_t sps_id,
211 uint8_t pps_id,
212 rtc::CopyOnWriteBuffer* data) {
213 NaluInfo info;
214 info.type = H264::NaluType::kPps;
215 info.sps_id = sps_id;
216 info.pps_id = pps_id;
217 data->AppendData<uint8_t, 2>({H264::NaluType::kPps, pps_id});
218 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
219 h264.nalus[h264.nalus_length++] = info;
220 }
221
AddIdr(RTPVideoHeader * video_header,int pps_id)222 void AddIdr(RTPVideoHeader* video_header, int pps_id) {
223 NaluInfo info;
224 info.type = H264::NaluType::kIdr;
225 info.sps_id = -1;
226 info.pps_id = pps_id;
227 auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
228 h264.nalus[h264.nalus_length++] = info;
229 }
230
231 protected:
CreateConfig()232 static VideoReceiveStream::Config CreateConfig() {
233 VideoReceiveStream::Config config(nullptr);
234 config.rtp.remote_ssrc = 1111;
235 config.rtp.local_ssrc = 2222;
236 config.rtp.red_payload_type = kRedPayloadType;
237 return config;
238 }
239
240 TokenTaskQueue task_queue_;
241 TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_{&task_queue_};
242
243 const webrtc::test::ScopedFieldTrials override_field_trials_;
244 VideoReceiveStream::Config config_;
245 MockNackSender mock_nack_sender_;
246 MockKeyFrameRequestSender mock_key_frame_request_sender_;
247 MockTransport mock_transport_;
248 MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
249 std::unique_ptr<ProcessThread> process_thread_;
250 std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
251 std::unique_ptr<RtpVideoStreamReceiver2> rtp_video_stream_receiver_;
252 };
253
TEST_F(RtpVideoStreamReceiver2Test,CacheColorSpaceFromLastPacketOfKeyframe)254 TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) {
255 // Test that color space is cached from the last packet of a key frame and
256 // that it's not reset by padding packets without color space.
257 constexpr int kVp9PayloadType = 99;
258 const ColorSpace kColorSpace(
259 ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
260 ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
261 const std::vector<uint8_t> kKeyFramePayload = {0, 1, 2, 3, 4, 5,
262 6, 7, 8, 9, 10};
263 const std::vector<uint8_t> kDeltaFramePayload = {0, 1, 2, 3, 4};
264
265 // Anonymous helper class that generates received packets.
266 class {
267 public:
268 void SetPayload(const std::vector<uint8_t>& payload,
269 VideoFrameType video_frame_type) {
270 video_frame_type_ = video_frame_type;
271 RtpPacketizer::PayloadSizeLimits pay_load_size_limits;
272 // Reduce max payload length to make sure the key frame generates two
273 // packets.
274 pay_load_size_limits.max_payload_len = 8;
275 RTPVideoHeaderVP9 rtp_video_header_vp9;
276 rtp_video_header_vp9.InitRTPVideoHeaderVP9();
277 rtp_video_header_vp9.inter_pic_predicted =
278 (video_frame_type == VideoFrameType::kVideoFrameDelta);
279 rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
280 payload, pay_load_size_limits, rtp_video_header_vp9);
281 }
282
283 size_t NumPackets() { return rtp_packetizer_->NumPackets(); }
284 void SetColorSpace(const ColorSpace& color_space) {
285 color_space_ = color_space;
286 }
287
288 RtpPacketReceived NextPacket() {
289 RtpHeaderExtensionMap extension_map;
290 extension_map.Register<ColorSpaceExtension>(1);
291 RtpPacketToSend packet_to_send(&extension_map);
292 packet_to_send.SetSequenceNumber(sequence_number_++);
293 packet_to_send.SetSsrc(kSsrc);
294 packet_to_send.SetPayloadType(kVp9PayloadType);
295 bool include_color_space =
296 (rtp_packetizer_->NumPackets() == 1u &&
297 video_frame_type_ == VideoFrameType::kVideoFrameKey);
298 if (include_color_space) {
299 EXPECT_TRUE(
300 packet_to_send.SetExtension<ColorSpaceExtension>(color_space_));
301 }
302 rtp_packetizer_->NextPacket(&packet_to_send);
303
304 RtpPacketReceived received_packet(&extension_map);
305 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
306 return received_packet;
307 }
308
309 private:
310 uint16_t sequence_number_ = 0;
311 VideoFrameType video_frame_type_;
312 ColorSpace color_space_;
313 std::unique_ptr<RtpPacketizer> rtp_packetizer_;
314 } received_packet_generator;
315 received_packet_generator.SetColorSpace(kColorSpace);
316
317 // Prepare the receiver for VP9.
318 VideoCodec codec;
319 codec.codecType = kVideoCodecVP9;
320 std::map<std::string, std::string> codec_params;
321 rtp_video_stream_receiver_->AddReceiveCodec(kVp9PayloadType, codec,
322 codec_params,
323 /*raw_payload=*/false);
324
325 // Generate key frame packets.
326 received_packet_generator.SetPayload(kKeyFramePayload,
327 VideoFrameType::kVideoFrameKey);
328 EXPECT_EQ(received_packet_generator.NumPackets(), 2u);
329 RtpPacketReceived key_frame_packet1 = received_packet_generator.NextPacket();
330 RtpPacketReceived key_frame_packet2 = received_packet_generator.NextPacket();
331
332 // Generate delta frame packet.
333 received_packet_generator.SetPayload(kDeltaFramePayload,
334 VideoFrameType::kVideoFrameDelta);
335 EXPECT_EQ(received_packet_generator.NumPackets(), 1u);
336 RtpPacketReceived delta_frame_packet = received_packet_generator.NextPacket();
337
338 rtp_video_stream_receiver_->StartReceive();
339 mock_on_complete_frame_callback_.AppendExpectedBitstream(
340 kKeyFramePayload.data(), kKeyFramePayload.size());
341
342 // Send the key frame and expect a callback with color space information.
343 EXPECT_FALSE(key_frame_packet1.GetExtension<ColorSpaceExtension>());
344 EXPECT_TRUE(key_frame_packet2.GetExtension<ColorSpaceExtension>());
345 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
346 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
347 .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
348 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
349 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
350 }));
351 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet2);
352 // Resend the first key frame packet to simulate padding for example.
353 rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
354
355 mock_on_complete_frame_callback_.ClearExpectedBitstream();
356 mock_on_complete_frame_callback_.AppendExpectedBitstream(
357 kDeltaFramePayload.data(), kDeltaFramePayload.size());
358
359 // Expect delta frame to have color space set even though color space not
360 // included in the RTP packet.
361 EXPECT_FALSE(delta_frame_packet.GetExtension<ColorSpaceExtension>());
362 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
363 .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
364 ASSERT_TRUE(frame->EncodedImage().ColorSpace());
365 EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
366 }));
367 rtp_video_stream_receiver_->OnRtpPacket(delta_frame_packet);
368 }
369
TEST_F(RtpVideoStreamReceiver2Test,GenericKeyFrame)370 TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrame) {
371 RtpPacketReceived rtp_packet;
372 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
373 rtp_packet.SetPayloadType(kPayloadType);
374 rtp_packet.SetSequenceNumber(1);
375 RTPVideoHeader video_header =
376 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
377 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
378 data.size());
379 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
380 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
381 video_header);
382 }
383
TEST_F(RtpVideoStreamReceiver2Test,PacketInfoIsPropagatedIntoVideoFrames)384 TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) {
385 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
386 constexpr int kId0 = 1;
387
388 RtpHeaderExtensionMap extension_map;
389 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
390 RtpPacketReceived rtp_packet(&extension_map);
391 rtp_packet.SetPayloadType(kPayloadType);
392 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
393 rtp_packet.SetSequenceNumber(1);
394 rtp_packet.SetTimestamp(1);
395 rtp_packet.SetSsrc(kSsrc);
396 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
397 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
398 /*estimated_capture_clock_offset=*/absl::nullopt});
399
400 RTPVideoHeader video_header =
401 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
402 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
403 data.size());
404 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
405 .WillOnce(Invoke(
406 [kAbsoluteCaptureTimestamp](video_coding::EncodedFrame* frame) {
407 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame),
408 ElementsAre(kAbsoluteCaptureTimestamp));
409 }));
410 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
411 video_header);
412 }
413
TEST_F(RtpVideoStreamReceiver2Test,MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue)414 TEST_F(RtpVideoStreamReceiver2Test,
415 MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue) {
416 constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
417 constexpr int kId0 = 1;
418
419 RtpHeaderExtensionMap extension_map;
420 extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
421 RtpPacketReceived rtp_packet(&extension_map);
422 rtp_packet.SetPayloadType(kPayloadType);
423
424 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
425 uint16_t sequence_number = 1;
426 uint32_t rtp_timestamp = 1;
427 rtp_packet.SetSequenceNumber(sequence_number);
428 rtp_packet.SetTimestamp(rtp_timestamp);
429 rtp_packet.SetSsrc(kSsrc);
430 rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
431 AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
432 /*estimated_capture_clock_offset=*/absl::nullopt});
433
434 RTPVideoHeader video_header =
435 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
436 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
437 data.size());
438 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
439 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
440 video_header);
441
442 // Rtp packet without absolute capture time.
443 rtp_packet = RtpPacketReceived(&extension_map);
444 rtp_packet.SetPayloadType(kPayloadType);
445 rtp_packet.SetSequenceNumber(++sequence_number);
446 rtp_packet.SetTimestamp(++rtp_timestamp);
447 rtp_packet.SetSsrc(kSsrc);
448
449 // There is no absolute capture time in the second packet.
450 // Expect rtp video stream receiver to extrapolate it for the resulting video
451 // frame using absolute capture time from the previous packet.
452 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
453 .WillOnce(Invoke([](video_coding::EncodedFrame* frame) {
454 EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
455 }));
456 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
457 video_header);
458 }
459
TEST_F(RtpVideoStreamReceiver2Test,NoInfiniteRecursionOnEncapsulatedRedPacket)460 TEST_F(RtpVideoStreamReceiver2Test,
461 NoInfiniteRecursionOnEncapsulatedRedPacket) {
462 const std::vector<uint8_t> data({
463 0x80, // RTP version.
464 kRedPayloadType, // Payload type.
465 0, 0, 0, 0, 0, 0, // Don't care.
466 0, 0, 0x4, 0x57, // SSRC
467 kRedPayloadType, // RED header.
468 0, 0, 0, 0, 0 // Don't care.
469 });
470 RtpPacketReceived packet;
471 EXPECT_TRUE(packet.Parse(data.data(), data.size()));
472 rtp_video_stream_receiver_->StartReceive();
473 rtp_video_stream_receiver_->OnRtpPacket(packet);
474 }
475
TEST_F(RtpVideoStreamReceiver2Test,DropsPacketWithRedPayloadTypeAndEmptyPayload)476 TEST_F(RtpVideoStreamReceiver2Test,
477 DropsPacketWithRedPayloadTypeAndEmptyPayload) {
478 const uint8_t kRedPayloadType = 125;
479 config_.rtp.red_payload_type = kRedPayloadType;
480 SetUp(); // re-create rtp_video_stream_receiver with red payload type.
481 // clang-format off
482 const uint8_t data[] = {
483 0x80, // RTP version.
484 kRedPayloadType, // Payload type.
485 0, 0, 0, 0, 0, 0, // Don't care.
486 0, 0, 0x4, 0x57, // SSRC
487 // Empty rtp payload.
488 };
489 // clang-format on
490 RtpPacketReceived packet;
491 // Manually convert to CopyOnWriteBuffer to be sure capacity == size
492 // and asan bot can catch read buffer overflow.
493 EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(data)));
494 rtp_video_stream_receiver_->StartReceive();
495 rtp_video_stream_receiver_->OnRtpPacket(packet);
496 // Expect asan doesn't find anything.
497 }
498
TEST_F(RtpVideoStreamReceiver2Test,GenericKeyFrameBitstreamError)499 TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrameBitstreamError) {
500 RtpPacketReceived rtp_packet;
501 rtp_packet.SetPayloadType(kPayloadType);
502 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
503 rtp_packet.SetSequenceNumber(1);
504 RTPVideoHeader video_header =
505 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
506 constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
507 mock_on_complete_frame_callback_.AppendExpectedBitstream(
508 expected_bitsteam, sizeof(expected_bitsteam));
509 EXPECT_CALL(mock_on_complete_frame_callback_,
510 DoOnCompleteFrameFailBitstream(_));
511 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
512 video_header);
513 }
514
515 class RtpVideoStreamReceiver2TestH264
516 : public RtpVideoStreamReceiver2Test,
517 public ::testing::WithParamInterface<std::string> {
518 protected:
RtpVideoStreamReceiver2TestH264()519 RtpVideoStreamReceiver2TestH264() : RtpVideoStreamReceiver2Test(GetParam()) {}
520 };
521
522 INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
523 RtpVideoStreamReceiver2TestH264,
524 Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
525
TEST_P(RtpVideoStreamReceiver2TestH264,InBandSpsPps)526 TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) {
527 rtc::CopyOnWriteBuffer sps_data;
528 RtpPacketReceived rtp_packet;
529 RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
530 AddSps(&sps_video_header, 0, &sps_data);
531 rtp_packet.SetSequenceNumber(0);
532 rtp_packet.SetPayloadType(kPayloadType);
533 sps_video_header.is_first_packet_in_frame = true;
534 sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
535 mock_on_complete_frame_callback_.AppendExpectedBitstream(
536 kH264StartCode, sizeof(kH264StartCode));
537 mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
538 sps_data.size());
539 rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
540 sps_video_header);
541
542 rtc::CopyOnWriteBuffer pps_data;
543 RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
544 AddPps(&pps_video_header, 0, 1, &pps_data);
545 rtp_packet.SetSequenceNumber(1);
546 pps_video_header.is_first_packet_in_frame = true;
547 pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
548 mock_on_complete_frame_callback_.AppendExpectedBitstream(
549 kH264StartCode, sizeof(kH264StartCode));
550 mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
551 pps_data.size());
552 rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
553 pps_video_header);
554
555 rtc::CopyOnWriteBuffer idr_data;
556 RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
557 AddIdr(&idr_video_header, 1);
558 rtp_packet.SetSequenceNumber(2);
559 idr_video_header.is_first_packet_in_frame = true;
560 idr_video_header.is_last_packet_in_frame = true;
561 idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
562 const uint8_t idr[] = {0x65, 1, 2, 3};
563 idr_data.AppendData(idr);
564 mock_on_complete_frame_callback_.AppendExpectedBitstream(
565 kH264StartCode, sizeof(kH264StartCode));
566 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
567 idr_data.size());
568 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
569 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
570 idr_video_header);
571 }
572
TEST_P(RtpVideoStreamReceiver2TestH264,OutOfBandFmtpSpsPps)573 TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
574 constexpr int kPayloadType = 99;
575 VideoCodec codec;
576 std::map<std::string, std::string> codec_params;
577 // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
578 // .
579 codec_params.insert(
580 {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
581 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, codec_params,
582 /*raw_payload=*/false);
583 const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
584 0x53, 0x05, 0x89, 0x88};
585 mock_on_complete_frame_callback_.AppendExpectedBitstream(
586 kH264StartCode, sizeof(kH264StartCode));
587 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
588 sizeof(binary_sps));
589 const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
590 mock_on_complete_frame_callback_.AppendExpectedBitstream(
591 kH264StartCode, sizeof(kH264StartCode));
592 mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
593 sizeof(binary_pps));
594
595 RtpPacketReceived rtp_packet;
596 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
597 AddIdr(&video_header, 0);
598 rtp_packet.SetPayloadType(kPayloadType);
599 rtp_packet.SetSequenceNumber(2);
600 video_header.is_first_packet_in_frame = true;
601 video_header.is_last_packet_in_frame = true;
602 video_header.codec = kVideoCodecH264;
603 video_header.frame_type = VideoFrameType::kVideoFrameKey;
604 rtc::CopyOnWriteBuffer data({1, 2, 3});
605 mock_on_complete_frame_callback_.AppendExpectedBitstream(
606 kH264StartCode, sizeof(kH264StartCode));
607 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
608 data.size());
609 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
610 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
611 video_header);
612 }
613
TEST_P(RtpVideoStreamReceiver2TestH264,ForceSpsPpsIdrIsKeyframe)614 TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
615 constexpr int kPayloadType = 99;
616 VideoCodec codec;
617 std::map<std::string, std::string> codec_params;
618 if (GetParam() ==
619 "") { // Forcing can be done either with field trial or codec_params.
620 codec_params.insert({cricket::kH264FmtpSpsPpsIdrInKeyframe, ""});
621 }
622 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, codec_params,
623 /*raw_payload=*/false);
624 rtc::CopyOnWriteBuffer sps_data;
625 RtpPacketReceived rtp_packet;
626 RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
627 AddSps(&sps_video_header, 0, &sps_data);
628 rtp_packet.SetSequenceNumber(0);
629 rtp_packet.SetPayloadType(kPayloadType);
630 sps_video_header.is_first_packet_in_frame = true;
631 sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
632 mock_on_complete_frame_callback_.AppendExpectedBitstream(
633 kH264StartCode, sizeof(kH264StartCode));
634 mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
635 sps_data.size());
636 rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
637 sps_video_header);
638
639 rtc::CopyOnWriteBuffer pps_data;
640 RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
641 AddPps(&pps_video_header, 0, 1, &pps_data);
642 rtp_packet.SetSequenceNumber(1);
643 pps_video_header.is_first_packet_in_frame = true;
644 pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
645 mock_on_complete_frame_callback_.AppendExpectedBitstream(
646 kH264StartCode, sizeof(kH264StartCode));
647 mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
648 pps_data.size());
649 rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
650 pps_video_header);
651
652 rtc::CopyOnWriteBuffer idr_data;
653 RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
654 AddIdr(&idr_video_header, 1);
655 rtp_packet.SetSequenceNumber(2);
656 idr_video_header.is_first_packet_in_frame = true;
657 idr_video_header.is_last_packet_in_frame = true;
658 idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
659 const uint8_t idr[] = {0x65, 1, 2, 3};
660 idr_data.AppendData(idr);
661 mock_on_complete_frame_callback_.AppendExpectedBitstream(
662 kH264StartCode, sizeof(kH264StartCode));
663 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
664 idr_data.size());
665 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
666 .WillOnce([&](video_coding::EncodedFrame* frame) {
667 EXPECT_TRUE(frame->is_keyframe());
668 });
669 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
670 idr_video_header);
671 mock_on_complete_frame_callback_.ClearExpectedBitstream();
672 mock_on_complete_frame_callback_.AppendExpectedBitstream(
673 kH264StartCode, sizeof(kH264StartCode));
674 mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
675 idr_data.size());
676 rtp_packet.SetSequenceNumber(3);
677 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
678 .WillOnce([&](video_coding::EncodedFrame* frame) {
679 EXPECT_FALSE(frame->is_keyframe());
680 });
681 rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
682 idr_video_header);
683 }
684
TEST_F(RtpVideoStreamReceiver2Test,PaddingInMediaStream)685 TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
686 RtpPacketReceived rtp_packet;
687 RTPVideoHeader video_header = GetDefaultH264VideoHeader();
688 rtc::CopyOnWriteBuffer data({1, 2, 3});
689 rtp_packet.SetPayloadType(kPayloadType);
690 rtp_packet.SetSequenceNumber(2);
691 video_header.is_first_packet_in_frame = true;
692 video_header.is_last_packet_in_frame = true;
693 video_header.codec = kVideoCodecGeneric;
694 video_header.frame_type = VideoFrameType::kVideoFrameKey;
695 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
696 data.size());
697
698 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
699 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
700 video_header);
701
702 rtp_packet.SetSequenceNumber(3);
703 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
704 video_header);
705
706 rtp_packet.SetSequenceNumber(4);
707 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
708 video_header.frame_type = VideoFrameType::kVideoFrameDelta;
709 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
710 video_header);
711
712 rtp_packet.SetSequenceNumber(6);
713 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
714 video_header);
715
716 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
717 rtp_packet.SetSequenceNumber(5);
718 rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
719 video_header);
720 }
721
TEST_F(RtpVideoStreamReceiver2Test,RequestKeyframeIfFirstFrameIsDelta)722 TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
723 RtpPacketReceived rtp_packet;
724 rtp_packet.SetPayloadType(kPayloadType);
725 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
726 rtp_packet.SetSequenceNumber(1);
727 RTPVideoHeader video_header =
728 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
729 EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
730 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
731 video_header);
732 }
733
TEST_F(RtpVideoStreamReceiver2Test,RequestKeyframeWhenPacketBufferGetsFull)734 TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) {
735 constexpr int kPacketBufferMaxSize = 2048;
736
737 RtpPacketReceived rtp_packet;
738 rtp_packet.SetPayloadType(kPayloadType);
739 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
740 RTPVideoHeader video_header =
741 GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
742 // Incomplete frames so that the packet buffer is filling up.
743 video_header.is_last_packet_in_frame = false;
744 uint16_t start_sequence_number = 1234;
745 rtp_packet.SetSequenceNumber(start_sequence_number);
746 while (rtp_packet.SequenceNumber() - start_sequence_number <
747 kPacketBufferMaxSize) {
748 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
749 video_header);
750 rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
751 }
752
753 EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
754 rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
755 video_header);
756 }
757
TEST_F(RtpVideoStreamReceiver2Test,SecondarySinksGetRtpNotifications)758 TEST_F(RtpVideoStreamReceiver2Test, SecondarySinksGetRtpNotifications) {
759 rtp_video_stream_receiver_->StartReceive();
760
761 MockRtpPacketSink secondary_sink_1;
762 MockRtpPacketSink secondary_sink_2;
763
764 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_1);
765 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_2);
766
767 auto rtp_packet = CreateRtpPacketReceived();
768 EXPECT_CALL(secondary_sink_1, OnRtpPacket(SamePacketAs(*rtp_packet)));
769 EXPECT_CALL(secondary_sink_2, OnRtpPacket(SamePacketAs(*rtp_packet)));
770
771 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
772
773 // Test tear-down.
774 rtp_video_stream_receiver_->StopReceive();
775 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_1);
776 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_2);
777 }
778
TEST_F(RtpVideoStreamReceiver2Test,RemovedSecondarySinksGetNoRtpNotifications)779 TEST_F(RtpVideoStreamReceiver2Test,
780 RemovedSecondarySinksGetNoRtpNotifications) {
781 rtp_video_stream_receiver_->StartReceive();
782
783 MockRtpPacketSink secondary_sink;
784
785 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
786 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
787
788 auto rtp_packet = CreateRtpPacketReceived();
789
790 EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
791
792 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
793
794 // Test tear-down.
795 rtp_video_stream_receiver_->StopReceive();
796 }
797
TEST_F(RtpVideoStreamReceiver2Test,OnlyRemovedSecondarySinksExcludedFromNotifications)798 TEST_F(RtpVideoStreamReceiver2Test,
799 OnlyRemovedSecondarySinksExcludedFromNotifications) {
800 rtp_video_stream_receiver_->StartReceive();
801
802 MockRtpPacketSink kept_secondary_sink;
803 MockRtpPacketSink removed_secondary_sink;
804
805 rtp_video_stream_receiver_->AddSecondarySink(&kept_secondary_sink);
806 rtp_video_stream_receiver_->AddSecondarySink(&removed_secondary_sink);
807 rtp_video_stream_receiver_->RemoveSecondarySink(&removed_secondary_sink);
808
809 auto rtp_packet = CreateRtpPacketReceived();
810 EXPECT_CALL(kept_secondary_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
811
812 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
813
814 // Test tear-down.
815 rtp_video_stream_receiver_->StopReceive();
816 rtp_video_stream_receiver_->RemoveSecondarySink(&kept_secondary_sink);
817 }
818
TEST_F(RtpVideoStreamReceiver2Test,SecondariesOfNonStartedStreamGetNoNotifications)819 TEST_F(RtpVideoStreamReceiver2Test,
820 SecondariesOfNonStartedStreamGetNoNotifications) {
821 // Explicitly showing that the stream is not in the |started| state,
822 // regardless of whether streams start out |started| or |stopped|.
823 rtp_video_stream_receiver_->StopReceive();
824
825 MockRtpPacketSink secondary_sink;
826 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
827
828 auto rtp_packet = CreateRtpPacketReceived();
829 EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
830
831 rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
832
833 // Test tear-down.
834 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
835 }
836
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorOnePacket)837 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) {
838 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
839 const int kSpatialIndex = 1;
840
841 rtp_video_stream_receiver_->StartReceive();
842
843 RtpHeaderExtensionMap extension_map;
844 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
845 RtpPacketReceived rtp_packet(&extension_map);
846 rtp_packet.SetPayloadType(kPayloadType);
847
848 RtpGenericFrameDescriptor generic_descriptor;
849 generic_descriptor.SetFirstPacketInSubFrame(true);
850 generic_descriptor.SetLastPacketInSubFrame(true);
851 generic_descriptor.SetFrameId(100);
852 generic_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
853 generic_descriptor.AddFrameDependencyDiff(90);
854 generic_descriptor.AddFrameDependencyDiff(80);
855 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
856 generic_descriptor));
857
858 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
859 memcpy(payload, data.data(), data.size());
860 // The first byte is the header, so we ignore the first byte of |data|.
861 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
862 data.size() - 1);
863
864 rtp_packet.SetMarker(true);
865 rtp_packet.SetPayloadType(kPayloadType);
866 rtp_packet.SetSequenceNumber(1);
867
868 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
869 .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
870 EXPECT_EQ(frame->num_references, 2U);
871 EXPECT_EQ(frame->references[0], frame->id.picture_id - 90);
872 EXPECT_EQ(frame->references[1], frame->id.picture_id - 80);
873 EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
874 EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
875 }));
876
877 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
878 }
879
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorTwoPackets)880 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
881 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
882 const int kSpatialIndex = 1;
883
884 rtp_video_stream_receiver_->StartReceive();
885
886 RtpHeaderExtensionMap extension_map;
887 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
888 RtpPacketReceived first_packet(&extension_map);
889
890 RtpGenericFrameDescriptor first_packet_descriptor;
891 first_packet_descriptor.SetFirstPacketInSubFrame(true);
892 first_packet_descriptor.SetLastPacketInSubFrame(false);
893 first_packet_descriptor.SetFrameId(100);
894 first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
895 first_packet_descriptor.SetResolution(480, 360);
896 ASSERT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
897 first_packet_descriptor));
898
899 uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
900 memcpy(first_packet_payload, data.data(), data.size());
901 // The first byte is the header, so we ignore the first byte of |data|.
902 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
903 data.size() - 1);
904
905 first_packet.SetPayloadType(kPayloadType);
906 first_packet.SetSequenceNumber(1);
907 rtp_video_stream_receiver_->OnRtpPacket(first_packet);
908
909 RtpPacketReceived second_packet(&extension_map);
910 RtpGenericFrameDescriptor second_packet_descriptor;
911 second_packet_descriptor.SetFirstPacketInSubFrame(false);
912 second_packet_descriptor.SetLastPacketInSubFrame(true);
913 ASSERT_TRUE(second_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
914 second_packet_descriptor));
915
916 second_packet.SetMarker(true);
917 second_packet.SetPayloadType(kPayloadType);
918 second_packet.SetSequenceNumber(2);
919
920 uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
921 memcpy(second_packet_payload, data.data(), data.size());
922 // The first byte is the header, so we ignore the first byte of |data|.
923 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
924 data.size() - 1);
925
926 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
927 .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
928 EXPECT_EQ(frame->num_references, 0U);
929 EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
930 EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
931 EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
932 EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
933 }));
934
935 rtp_video_stream_receiver_->OnRtpPacket(second_packet);
936 }
937
TEST_F(RtpVideoStreamReceiver2Test,ParseGenericDescriptorRawPayload)938 TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorRawPayload) {
939 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
940 const int kRawPayloadType = 123;
941
942 VideoCodec codec;
943 rtp_video_stream_receiver_->AddReceiveCodec(kRawPayloadType, codec, {},
944 /*raw_payload=*/true);
945 rtp_video_stream_receiver_->StartReceive();
946
947 RtpHeaderExtensionMap extension_map;
948 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
949 RtpPacketReceived rtp_packet(&extension_map);
950
951 RtpGenericFrameDescriptor generic_descriptor;
952 generic_descriptor.SetFirstPacketInSubFrame(true);
953 generic_descriptor.SetLastPacketInSubFrame(true);
954 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
955 generic_descriptor));
956
957 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
958 memcpy(payload, data.data(), data.size());
959 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
960 data.size());
961
962 rtp_packet.SetMarker(true);
963 rtp_packet.SetPayloadType(kRawPayloadType);
964 rtp_packet.SetSequenceNumber(1);
965
966 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
967 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
968 }
969
TEST_F(RtpVideoStreamReceiver2Test,UnwrapsFrameId)970 TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) {
971 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
972 const int kPayloadType = 123;
973
974 VideoCodec codec;
975 rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {},
976 /*raw_payload=*/true);
977 rtp_video_stream_receiver_->StartReceive();
978 RtpHeaderExtensionMap extension_map;
979 extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
980
981 uint16_t rtp_sequence_number = 1;
982 auto inject_packet = [&](uint16_t wrapped_frame_id) {
983 RtpPacketReceived rtp_packet(&extension_map);
984
985 RtpGenericFrameDescriptor generic_descriptor;
986 generic_descriptor.SetFirstPacketInSubFrame(true);
987 generic_descriptor.SetLastPacketInSubFrame(true);
988 generic_descriptor.SetFrameId(wrapped_frame_id);
989 ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
990 generic_descriptor));
991
992 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
993 ASSERT_TRUE(payload);
994 memcpy(payload, data.data(), data.size());
995 mock_on_complete_frame_callback_.ClearExpectedBitstream();
996 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
997 data.size());
998 rtp_packet.SetMarker(true);
999 rtp_packet.SetPayloadType(kPayloadType);
1000 rtp_packet.SetSequenceNumber(++rtp_sequence_number);
1001 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
1002 };
1003
1004 int64_t first_picture_id;
1005 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1006 .WillOnce([&](video_coding::EncodedFrame* frame) {
1007 first_picture_id = frame->id.picture_id;
1008 });
1009 inject_packet(/*wrapped_frame_id=*/0xffff);
1010
1011 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1012 .WillOnce([&](video_coding::EncodedFrame* frame) {
1013 EXPECT_EQ(frame->id.picture_id - first_picture_id, 3);
1014 });
1015 inject_packet(/*wrapped_frame_id=*/0x0002);
1016 }
1017
1018 class RtpVideoStreamReceiver2DependencyDescriptorTest
1019 : public RtpVideoStreamReceiver2Test {
1020 public:
RtpVideoStreamReceiver2DependencyDescriptorTest()1021 RtpVideoStreamReceiver2DependencyDescriptorTest() {
1022 VideoCodec codec;
1023 rtp_video_stream_receiver_->AddReceiveCodec(payload_type_, codec, {},
1024 /*raw_payload=*/true);
1025 extension_map_.Register<RtpDependencyDescriptorExtension>(7);
1026 rtp_video_stream_receiver_->StartReceive();
1027 }
1028
1029 // Returns some valid structure for the DependencyDescriptors.
1030 // First template of that structure always fit for a key frame.
CreateStreamStructure()1031 static FrameDependencyStructure CreateStreamStructure() {
1032 FrameDependencyStructure stream_structure;
1033 stream_structure.num_decode_targets = 1;
1034 stream_structure.templates = {
1035 FrameDependencyTemplate().Dtis("S"),
1036 FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
1037 };
1038 return stream_structure;
1039 }
1040
InjectPacketWith(const FrameDependencyStructure & stream_structure,const DependencyDescriptor & dependency_descriptor)1041 void InjectPacketWith(const FrameDependencyStructure& stream_structure,
1042 const DependencyDescriptor& dependency_descriptor) {
1043 const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
1044 RtpPacketReceived rtp_packet(&extension_map_);
1045 ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
1046 stream_structure, dependency_descriptor));
1047 uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
1048 ASSERT_TRUE(payload);
1049 memcpy(payload, data.data(), data.size());
1050 mock_on_complete_frame_callback_.ClearExpectedBitstream();
1051 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
1052 data.size());
1053 rtp_packet.SetMarker(true);
1054 rtp_packet.SetPayloadType(payload_type_);
1055 rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
1056 rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
1057 }
1058
1059 private:
1060 const int payload_type_ = 123;
1061 RtpHeaderExtensionMap extension_map_;
1062 uint16_t rtp_sequence_number_ = 321;
1063 };
1064
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,UnwrapsFrameId)1065 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) {
1066 FrameDependencyStructure stream_structure = CreateStreamStructure();
1067
1068 DependencyDescriptor keyframe_descriptor;
1069 keyframe_descriptor.attached_structure =
1070 std::make_unique<FrameDependencyStructure>(stream_structure);
1071 keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
1072 keyframe_descriptor.frame_number = 0xfff0;
1073 // DependencyDescriptor doesn't support reordering delta frame before
1074 // keyframe. Thus feed a key frame first, then test reodered delta frames.
1075 int64_t first_picture_id;
1076 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1077 .WillOnce([&](video_coding::EncodedFrame* frame) {
1078 first_picture_id = frame->id.picture_id;
1079 });
1080 InjectPacketWith(stream_structure, keyframe_descriptor);
1081
1082 DependencyDescriptor deltaframe1_descriptor;
1083 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1084 deltaframe1_descriptor.frame_number = 0xfffe;
1085
1086 DependencyDescriptor deltaframe2_descriptor;
1087 deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
1088 deltaframe2_descriptor.frame_number = 0x0002;
1089
1090 // Parser should unwrap frame ids correctly even if packets were reordered by
1091 // the network.
1092 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1093 .WillOnce([&](video_coding::EncodedFrame* frame) {
1094 // 0x0002 - 0xfff0
1095 EXPECT_EQ(frame->id.picture_id - first_picture_id, 18);
1096 })
1097 .WillOnce([&](video_coding::EncodedFrame* frame) {
1098 // 0xfffe - 0xfff0
1099 EXPECT_EQ(frame->id.picture_id - first_picture_id, 14);
1100 });
1101 InjectPacketWith(stream_structure, deltaframe2_descriptor);
1102 InjectPacketWith(stream_structure, deltaframe1_descriptor);
1103 }
1104
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,DropsLateDeltaFramePacketWithDependencyDescriptorExtension)1105 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1106 DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
1107 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1108 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1109 // Make sure template ids for these two structures do not collide:
1110 // adjust structure_id (that is also used as template id offset).
1111 stream_structure1.structure_id = 13;
1112 stream_structure2.structure_id =
1113 stream_structure1.structure_id + stream_structure1.templates.size();
1114
1115 DependencyDescriptor keyframe1_descriptor;
1116 keyframe1_descriptor.attached_structure =
1117 std::make_unique<FrameDependencyStructure>(stream_structure1);
1118 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1119 keyframe1_descriptor.frame_number = 1;
1120 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1121 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1122
1123 // Pass in 2nd key frame with different structure.
1124 DependencyDescriptor keyframe2_descriptor;
1125 keyframe2_descriptor.attached_structure =
1126 std::make_unique<FrameDependencyStructure>(stream_structure2);
1127 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1128 keyframe2_descriptor.frame_number = 3;
1129 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
1130 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1131
1132 // Pass in late delta frame that uses structure of the 1st key frame.
1133 DependencyDescriptor deltaframe_descriptor;
1134 deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
1135 deltaframe_descriptor.frame_number = 2;
1136 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
1137 InjectPacketWith(stream_structure1, deltaframe_descriptor);
1138 }
1139
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,DropsLateKeyFramePacketWithDependencyDescriptorExtension)1140 TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
1141 DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
1142 FrameDependencyStructure stream_structure1 = CreateStreamStructure();
1143 FrameDependencyStructure stream_structure2 = CreateStreamStructure();
1144 // Make sure template ids for these two structures do not collide:
1145 // adjust structure_id (that is also used as template id offset).
1146 stream_structure1.structure_id = 13;
1147 stream_structure2.structure_id =
1148 stream_structure1.structure_id + stream_structure1.templates.size();
1149
1150 DependencyDescriptor keyframe1_descriptor;
1151 keyframe1_descriptor.attached_structure =
1152 std::make_unique<FrameDependencyStructure>(stream_structure1);
1153 keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
1154 keyframe1_descriptor.frame_number = 1;
1155
1156 DependencyDescriptor keyframe2_descriptor;
1157 keyframe2_descriptor.attached_structure =
1158 std::make_unique<FrameDependencyStructure>(stream_structure2);
1159 keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
1160 keyframe2_descriptor.frame_number = 3;
1161
1162 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1163 .WillOnce([&](video_coding::EncodedFrame* frame) {
1164 EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3);
1165 });
1166 InjectPacketWith(stream_structure2, keyframe2_descriptor);
1167 InjectPacketWith(stream_structure1, keyframe1_descriptor);
1168
1169 // Pass in delta frame that uses structure of the 2nd key frame. Late key
1170 // frame shouldn't block it.
1171 DependencyDescriptor deltaframe_descriptor;
1172 deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
1173 deltaframe_descriptor.frame_number = 4;
1174 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
1175 .WillOnce([&](video_coding::EncodedFrame* frame) {
1176 EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4);
1177 });
1178 InjectPacketWith(stream_structure2, deltaframe_descriptor);
1179 }
1180
1181 #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
1182 using RtpVideoStreamReceiver2DeathTest = RtpVideoStreamReceiver2Test;
TEST_F(RtpVideoStreamReceiver2DeathTest,RepeatedSecondarySinkDisallowed)1183 TEST_F(RtpVideoStreamReceiver2DeathTest, RepeatedSecondarySinkDisallowed) {
1184 MockRtpPacketSink secondary_sink;
1185
1186 rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
1187 EXPECT_DEATH(rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink),
1188 "");
1189
1190 // Test tear-down.
1191 rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
1192 }
1193 #endif
1194
TEST_F(RtpVideoStreamReceiver2Test,TransformFrame)1195 TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
1196 rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
1197 new rtc::RefCountedObject<testing::NiceMock<MockFrameTransformer>>();
1198 EXPECT_CALL(*mock_frame_transformer,
1199 RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
1200 auto receiver = std::make_unique<RtpVideoStreamReceiver2>(
1201 TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
1202 nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
1203 nullptr, process_thread_.get(), &mock_nack_sender_, nullptr,
1204 &mock_on_complete_frame_callback_, nullptr, mock_frame_transformer);
1205 VideoCodec video_codec;
1206 video_codec.codecType = kVideoCodecGeneric;
1207 receiver->AddReceiveCodec(kPayloadType, video_codec, {},
1208 /*raw_payload=*/false);
1209
1210 RtpPacketReceived rtp_packet;
1211 rtp_packet.SetPayloadType(kPayloadType);
1212 rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
1213 rtp_packet.SetSequenceNumber(1);
1214 RTPVideoHeader video_header =
1215 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1216 mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
1217 data.size());
1218 EXPECT_CALL(*mock_frame_transformer, Transform(_));
1219 receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
1220
1221 EXPECT_CALL(*mock_frame_transformer,
1222 UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
1223 receiver = nullptr;
1224 }
1225
1226 // Test default behavior and when playout delay is overridden by field trial.
1227 const VideoPlayoutDelay kTransmittedPlayoutDelay = {100, 200};
1228 const VideoPlayoutDelay kForcedPlayoutDelay = {70, 90};
1229 struct PlayoutDelayOptions {
1230 std::string field_trial;
1231 VideoPlayoutDelay expected_delay;
1232 };
1233 const PlayoutDelayOptions kDefaultBehavior = {
1234 /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay};
1235 const PlayoutDelayOptions kOverridePlayoutDelay = {
1236 /*field_trial=*/"WebRTC-ForcePlayoutDelay/min_ms:70,max_ms:90/",
1237 /*expected_delay=*/kForcedPlayoutDelay};
1238
1239 class RtpVideoStreamReceiver2TestPlayoutDelay
1240 : public RtpVideoStreamReceiver2Test,
1241 public ::testing::WithParamInterface<PlayoutDelayOptions> {
1242 protected:
RtpVideoStreamReceiver2TestPlayoutDelay()1243 RtpVideoStreamReceiver2TestPlayoutDelay()
1244 : RtpVideoStreamReceiver2Test(GetParam().field_trial) {}
1245 };
1246
1247 INSTANTIATE_TEST_SUITE_P(PlayoutDelay,
1248 RtpVideoStreamReceiver2TestPlayoutDelay,
1249 Values(kDefaultBehavior, kOverridePlayoutDelay));
1250
TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay,PlayoutDelay)1251 TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) {
1252 rtc::CopyOnWriteBuffer payload_data({1, 2, 3, 4});
1253 RtpHeaderExtensionMap extension_map;
1254 extension_map.Register<PlayoutDelayLimits>(1);
1255 RtpPacketToSend packet_to_send(&extension_map);
1256 packet_to_send.SetPayloadType(kPayloadType);
1257 packet_to_send.SetSequenceNumber(1);
1258
1259 // Set playout delay on outgoing packet.
1260 EXPECT_TRUE(packet_to_send.SetExtension<PlayoutDelayLimits>(
1261 kTransmittedPlayoutDelay));
1262 uint8_t* payload = packet_to_send.AllocatePayload(payload_data.size());
1263 memcpy(payload, payload_data.data(), payload_data.size());
1264
1265 RtpPacketReceived received_packet(&extension_map);
1266 received_packet.Parse(packet_to_send.data(), packet_to_send.size());
1267
1268 RTPVideoHeader video_header =
1269 GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
1270 mock_on_complete_frame_callback_.AppendExpectedBitstream(payload_data.data(),
1271 payload_data.size());
1272 // Expect the playout delay of encoded frame to be the same as the transmitted
1273 // playout delay unless it was overridden by a field trial.
1274 EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
1275 .WillOnce(Invoke([expected_playout_delay = GetParam().expected_delay](
1276 video_coding::EncodedFrame* frame) {
1277 EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay);
1278 }));
1279 rtp_video_stream_receiver_->OnReceivedPayloadData(
1280 received_packet.PayloadBuffer(), received_packet, video_header);
1281 }
1282
1283 } // namespace webrtc
1284