1 use futures::channel::{mpsc, oneshot};
2 use futures::executor::{block_on, block_on_stream};
3 use futures::future::{FutureExt, poll_fn};
4 use futures::stream::{Stream, StreamExt};
5 use futures::sink::{Sink, SinkExt};
6 use futures::task::{Context, Poll};
7 use futures::pin_mut;
8 use futures_test::task::{new_count_waker, noop_context};
9 use std::sync::{Arc, Mutex};
10 use std::sync::atomic::{AtomicUsize, Ordering};
11 use std::thread;
12
13 trait AssertSend: Send {}
14 impl AssertSend for mpsc::Sender<i32> {}
15 impl AssertSend for mpsc::Receiver<i32> {}
16
17 #[test]
send_recv()18 fn send_recv() {
19 let (mut tx, rx) = mpsc::channel::<i32>(16);
20
21 block_on(tx.send(1)).unwrap();
22 drop(tx);
23 let v: Vec<_> = block_on(rx.collect());
24 assert_eq!(v, vec![1]);
25 }
26
27 #[test]
send_recv_no_buffer()28 fn send_recv_no_buffer() {
29 // Run on a task context
30 block_on(poll_fn(move |cx| {
31 let (tx, rx) = mpsc::channel::<i32>(0);
32 pin_mut!(tx, rx);
33
34 assert!(tx.as_mut().poll_flush(cx).is_ready());
35 assert!(tx.as_mut().poll_ready(cx).is_ready());
36
37 // Send first message
38 assert!(tx.as_mut().start_send(1).is_ok());
39 assert!(tx.as_mut().poll_ready(cx).is_pending());
40
41 // poll_ready said Pending, so no room in buffer, therefore new sends
42 // should get rejected with is_full.
43 assert!(tx.as_mut().start_send(0).unwrap_err().is_full());
44 assert!(tx.as_mut().poll_ready(cx).is_pending());
45
46 // Take the value
47 assert_eq!(rx.as_mut().poll_next(cx), Poll::Ready(Some(1)));
48 assert!(tx.as_mut().poll_ready(cx).is_ready());
49
50 // Send second message
51 assert!(tx.as_mut().poll_ready(cx).is_ready());
52 assert!(tx.as_mut().start_send(2).is_ok());
53 assert!(tx.as_mut().poll_ready(cx).is_pending());
54
55 // Take the value
56 assert_eq!(rx.as_mut().poll_next(cx), Poll::Ready(Some(2)));
57 assert!(tx.as_mut().poll_ready(cx).is_ready());
58
59 Poll::Ready(())
60 }));
61 }
62
63 #[test]
send_shared_recv()64 fn send_shared_recv() {
65 let (mut tx1, rx) = mpsc::channel::<i32>(16);
66 let mut rx = block_on_stream(rx);
67 let mut tx2 = tx1.clone();
68
69 block_on(tx1.send(1)).unwrap();
70 assert_eq!(rx.next(), Some(1));
71
72 block_on(tx2.send(2)).unwrap();
73 assert_eq!(rx.next(), Some(2));
74 }
75
76 #[test]
send_recv_threads()77 fn send_recv_threads() {
78 let (mut tx, rx) = mpsc::channel::<i32>(16);
79
80 let t = thread::spawn(move|| {
81 block_on(tx.send(1)).unwrap();
82 });
83
84 let v: Vec<_> = block_on(rx.take(1).collect());
85 assert_eq!(v, vec![1]);
86
87 t.join().unwrap();
88 }
89
90 #[test]
send_recv_threads_no_capacity()91 fn send_recv_threads_no_capacity() {
92 let (mut tx, rx) = mpsc::channel::<i32>(0);
93
94 let t = thread::spawn(move || {
95 block_on(tx.send(1)).unwrap();
96 block_on(tx.send(2)).unwrap();
97 });
98
99 let v: Vec<_> = block_on(rx.collect());
100 assert_eq!(v, vec![1, 2]);
101
102 t.join().unwrap();
103 }
104
105 #[test]
recv_close_gets_none()106 fn recv_close_gets_none() {
107 let (mut tx, mut rx) = mpsc::channel::<i32>(10);
108
109 // Run on a task context
110 block_on(poll_fn(move |cx| {
111 rx.close();
112
113 assert_eq!(rx.poll_next_unpin(cx), Poll::Ready(None));
114 match tx.poll_ready(cx) {
115 Poll::Pending | Poll::Ready(Ok(_)) => panic!(),
116 Poll::Ready(Err(e)) => assert!(e.is_disconnected()),
117 };
118
119 Poll::Ready(())
120 }));
121 }
122
123 #[test]
tx_close_gets_none()124 fn tx_close_gets_none() {
125 let (_, mut rx) = mpsc::channel::<i32>(10);
126
127 // Run on a task context
128 block_on(poll_fn(move |cx| {
129 assert_eq!(rx.poll_next_unpin(cx), Poll::Ready(None));
130 Poll::Ready(())
131 }));
132 }
133
134 // #[test]
135 // fn spawn_sends_items() {
136 // let core = local_executor::Core::new();
137 // let stream = unfold(0, |i| Some(ok::<_,u8>((i, i + 1))));
138 // let rx = mpsc::spawn(stream, &core, 1);
139 // assert_eq!(core.run(rx.take(4).collect()).unwrap(),
140 // [0, 1, 2, 3]);
141 // }
142
143 // #[test]
144 // fn spawn_kill_dead_stream() {
145 // use std::thread;
146 // use std::time::Duration;
147 // use futures::future::Either;
148 // use futures::sync::oneshot;
149 //
150 // // a stream which never returns anything (maybe a remote end isn't
151 // // responding), but dropping it leads to observable side effects
152 // // (like closing connections, releasing limited resources, ...)
153 // #[derive(Debug)]
154 // struct Dead {
155 // // when dropped you should get Err(oneshot::Canceled) on the
156 // // receiving end
157 // done: oneshot::Sender<()>,
158 // }
159 // impl Stream for Dead {
160 // type Item = ();
161 // type Error = ();
162 //
163 // fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
164 // Ok(Poll::Pending)
165 // }
166 // }
167 //
168 // // need to implement a timeout for the test, as it would hang
169 // // forever right now
170 // let (timeout_tx, timeout_rx) = oneshot::channel();
171 // thread::spawn(move || {
172 // thread::sleep(Duration::from_millis(1000));
173 // let _ = timeout_tx.send(());
174 // });
175 //
176 // let core = local_executor::Core::new();
177 // let (done_tx, done_rx) = oneshot::channel();
178 // let stream = Dead{done: done_tx};
179 // let rx = mpsc::spawn(stream, &core, 1);
180 // let res = core.run(
181 // Ok::<_, ()>(())
182 // .into_future()
183 // .then(move |_| {
184 // // now drop the spawned stream: maybe some timeout exceeded,
185 // // or some connection on this end was closed by the remote
186 // // end.
187 // drop(rx);
188 // // and wait for the spawned stream to release its resources
189 // done_rx
190 // })
191 // .select2(timeout_rx)
192 // );
193 // match res {
194 // Err(Either::A((oneshot::Canceled, _))) => (),
195 // _ => {
196 // panic!("dead stream wasn't canceled");
197 // },
198 // }
199 // }
200
201 #[test]
stress_shared_unbounded()202 fn stress_shared_unbounded() {
203 const AMT: u32 = 10000;
204 const NTHREADS: u32 = 8;
205 let (tx, rx) = mpsc::unbounded::<i32>();
206
207 let t = thread::spawn(move|| {
208 let result: Vec<_> = block_on(rx.collect());
209 assert_eq!(result.len(), (AMT * NTHREADS) as usize);
210 for item in result {
211 assert_eq!(item, 1);
212 }
213 });
214
215 for _ in 0..NTHREADS {
216 let tx = tx.clone();
217
218 thread::spawn(move|| {
219 for _ in 0..AMT {
220 tx.unbounded_send(1).unwrap();
221 }
222 });
223 }
224
225 drop(tx);
226
227 t.join().ok().unwrap();
228 }
229
230 #[test]
stress_shared_bounded_hard()231 fn stress_shared_bounded_hard() {
232 const AMT: u32 = 10000;
233 const NTHREADS: u32 = 8;
234 let (tx, rx) = mpsc::channel::<i32>(0);
235
236 let t = thread::spawn(move|| {
237 let result: Vec<_> = block_on(rx.collect());
238 assert_eq!(result.len(), (AMT * NTHREADS) as usize);
239 for item in result {
240 assert_eq!(item, 1);
241 }
242 });
243
244 for _ in 0..NTHREADS {
245 let mut tx = tx.clone();
246
247 thread::spawn(move || {
248 for _ in 0..AMT {
249 block_on(tx.send(1)).unwrap();
250 }
251 });
252 }
253
254 drop(tx);
255
256 t.join().unwrap();
257 }
258
259 #[test]
stress_receiver_multi_task_bounded_hard()260 fn stress_receiver_multi_task_bounded_hard() {
261 const AMT: usize = 10_000;
262 const NTHREADS: u32 = 2;
263
264 let (mut tx, rx) = mpsc::channel::<usize>(0);
265 let rx = Arc::new(Mutex::new(Some(rx)));
266 let n = Arc::new(AtomicUsize::new(0));
267
268 let mut th = vec![];
269
270 for _ in 0..NTHREADS {
271 let rx = rx.clone();
272 let n = n.clone();
273
274 let t = thread::spawn(move || {
275 let mut i = 0;
276
277 loop {
278 i += 1;
279 let mut rx_opt = rx.lock().unwrap();
280 if let Some(rx) = &mut *rx_opt {
281 if i % 5 == 0 {
282 let item = block_on(rx.next());
283
284 if item.is_none() {
285 *rx_opt = None;
286 break;
287 }
288
289 n.fetch_add(1, Ordering::Relaxed);
290 } else {
291 // Just poll
292 let n = n.clone();
293 match rx.poll_next_unpin(&mut noop_context()) {
294 Poll::Ready(Some(_)) => {
295 n.fetch_add(1, Ordering::Relaxed);
296 }
297 Poll::Ready(None) => {
298 *rx_opt = None;
299 break
300 },
301 Poll::Pending => {},
302 }
303 }
304 } else {
305 break;
306 }
307 }
308 });
309
310 th.push(t);
311 }
312
313
314 for i in 0..AMT {
315 block_on(tx.send(i)).unwrap();
316 }
317 drop(tx);
318
319 for t in th {
320 t.join().unwrap();
321 }
322
323 assert_eq!(AMT, n.load(Ordering::Relaxed));
324 }
325
326 /// Stress test that receiver properly receives all the messages
327 /// after sender dropped.
328 #[test]
stress_drop_sender()329 fn stress_drop_sender() {
330 fn list() -> impl Stream<Item=i32> {
331 let (tx, rx) = mpsc::channel(1);
332 thread::spawn(move || {
333 block_on(send_one_two_three(tx));
334 });
335 rx
336 }
337
338 for _ in 0..10000 {
339 let v: Vec<_> = block_on(list().collect());
340 assert_eq!(v, vec![1, 2, 3]);
341 }
342 }
343
send_one_two_three(mut tx: mpsc::Sender<i32>)344 async fn send_one_two_three(mut tx: mpsc::Sender<i32>) {
345 for i in 1..=3 {
346 tx.send(i).await.unwrap();
347 }
348 }
349
350 /// Stress test that after receiver dropped,
351 /// no messages are lost.
stress_close_receiver_iter()352 fn stress_close_receiver_iter() {
353 let (tx, rx) = mpsc::unbounded();
354 let mut rx = block_on_stream(rx);
355 let (unwritten_tx, unwritten_rx) = std::sync::mpsc::channel();
356 let th = thread::spawn(move || {
357 for i in 1.. {
358 if tx.unbounded_send(i).is_err() {
359 unwritten_tx.send(i).expect("unwritten_tx");
360 return;
361 }
362 }
363 });
364
365 // Read one message to make sure thread effectively started
366 assert_eq!(Some(1), rx.next());
367
368 rx.close();
369
370 for i in 2.. {
371 match rx.next() {
372 Some(r) => assert!(i == r),
373 None => {
374 let unwritten = unwritten_rx.recv().expect("unwritten_rx");
375 assert_eq!(unwritten, i);
376 th.join().unwrap();
377 return;
378 }
379 }
380 }
381 }
382
383 #[test]
stress_close_receiver()384 fn stress_close_receiver() {
385 for _ in 0..10000 {
386 stress_close_receiver_iter();
387 }
388 }
389
stress_poll_ready_sender(mut sender: mpsc::Sender<u32>, count: u32)390 async fn stress_poll_ready_sender(mut sender: mpsc::Sender<u32>, count: u32) {
391 for i in (1..=count).rev() {
392 sender.send(i).await.unwrap();
393 }
394 }
395
396 /// Tests that after `poll_ready` indicates capacity a channel can always send without waiting.
397 #[test]
stress_poll_ready()398 fn stress_poll_ready() {
399 const AMT: u32 = 1000;
400 const NTHREADS: u32 = 8;
401
402 /// Run a stress test using the specified channel capacity.
403 fn stress(capacity: usize) {
404 let (tx, rx) = mpsc::channel(capacity);
405 let mut threads = Vec::new();
406 for _ in 0..NTHREADS {
407 let sender = tx.clone();
408 threads.push(thread::spawn(move || {
409 block_on(stress_poll_ready_sender(sender, AMT))
410 }));
411 }
412 drop(tx);
413
414 let result: Vec<_> = block_on(rx.collect());
415 assert_eq!(result.len() as u32, AMT * NTHREADS);
416
417 for thread in threads {
418 thread.join().unwrap();
419 }
420 }
421
422 stress(0);
423 stress(1);
424 stress(8);
425 stress(16);
426 }
427
428 #[test]
try_send_1()429 fn try_send_1() {
430 const N: usize = 3000;
431 let (mut tx, rx) = mpsc::channel(0);
432
433 let t = thread::spawn(move || {
434 for i in 0..N {
435 loop {
436 if tx.try_send(i).is_ok() {
437 break
438 }
439 }
440 }
441 });
442
443 let result: Vec<_> = block_on(rx.collect());
444 for (i, j) in result.into_iter().enumerate() {
445 assert_eq!(i, j);
446 }
447
448 t.join().unwrap();
449 }
450
451 #[test]
try_send_2()452 fn try_send_2() {
453 let (mut tx, rx) = mpsc::channel(0);
454 let mut rx = block_on_stream(rx);
455
456 tx.try_send("hello").unwrap();
457
458 let (readytx, readyrx) = oneshot::channel::<()>();
459
460 let th = thread::spawn(move || {
461 block_on(poll_fn(|cx| {
462 assert!(tx.poll_ready(cx).is_pending());
463 Poll::Ready(())
464 }));
465
466 drop(readytx);
467 block_on(tx.send("goodbye")).unwrap();
468 });
469
470 let _ = block_on(readyrx);
471 assert_eq!(rx.next(), Some("hello"));
472 assert_eq!(rx.next(), Some("goodbye"));
473 assert_eq!(rx.next(), None);
474
475 th.join().unwrap();
476 }
477
478 #[test]
try_send_fail()479 fn try_send_fail() {
480 let (mut tx, rx) = mpsc::channel(0);
481 let mut rx = block_on_stream(rx);
482
483 tx.try_send("hello").unwrap();
484
485 // This should fail
486 assert!(tx.try_send("fail").is_err());
487
488 assert_eq!(rx.next(), Some("hello"));
489
490 tx.try_send("goodbye").unwrap();
491 drop(tx);
492
493 assert_eq!(rx.next(), Some("goodbye"));
494 assert_eq!(rx.next(), None);
495 }
496
497 #[test]
try_send_recv()498 fn try_send_recv() {
499 let (mut tx, mut rx) = mpsc::channel(1);
500 tx.try_send("hello").unwrap();
501 tx.try_send("hello").unwrap();
502 tx.try_send("hello").unwrap_err(); // should be full
503 rx.try_next().unwrap();
504 rx.try_next().unwrap();
505 rx.try_next().unwrap_err(); // should be empty
506 tx.try_send("hello").unwrap();
507 rx.try_next().unwrap();
508 rx.try_next().unwrap_err(); // should be empty
509 }
510
511 #[test]
same_receiver()512 fn same_receiver() {
513 let (mut txa1, _) = mpsc::channel::<i32>(1);
514 let txa2 = txa1.clone();
515
516 let (mut txb1, _) = mpsc::channel::<i32>(1);
517 let txb2 = txb1.clone();
518
519 assert!(txa1.same_receiver(&txa2));
520 assert!(txb1.same_receiver(&txb2));
521 assert!(!txa1.same_receiver(&txb1));
522
523 txa1.disconnect();
524 txb1.close_channel();
525
526 assert!(!txa1.same_receiver(&txa2));
527 assert!(txb1.same_receiver(&txb2));
528 }
529
530 #[test]
hash_receiver()531 fn hash_receiver() {
532 use std::hash::Hasher;
533 use std::collections::hash_map::DefaultHasher;
534
535 let mut hasher_a1 = DefaultHasher::new();
536 let mut hasher_a2 = DefaultHasher::new();
537 let mut hasher_b1 = DefaultHasher::new();
538 let mut hasher_b2 = DefaultHasher::new();
539 let (mut txa1, _) = mpsc::channel::<i32>(1);
540 let txa2 = txa1.clone();
541
542 let (mut txb1, _) = mpsc::channel::<i32>(1);
543 let txb2 = txb1.clone();
544
545 txa1.hash_receiver(&mut hasher_a1);
546 let hash_a1 = hasher_a1.finish();
547 txa2.hash_receiver(&mut hasher_a2);
548 let hash_a2 = hasher_a2.finish();
549 txb1.hash_receiver(&mut hasher_b1);
550 let hash_b1 = hasher_b1.finish();
551 txb2.hash_receiver(&mut hasher_b2);
552 let hash_b2 = hasher_b2.finish();
553
554 assert_eq!(hash_a1, hash_a2);
555 assert_eq!(hash_b1, hash_b2);
556 assert!(hash_a1 != hash_b1);
557
558 txa1.disconnect();
559 txb1.close_channel();
560
561 let mut hasher_a1 = DefaultHasher::new();
562 let mut hasher_a2 = DefaultHasher::new();
563 let mut hasher_b1 = DefaultHasher::new();
564 let mut hasher_b2 = DefaultHasher::new();
565
566 txa1.hash_receiver(&mut hasher_a1);
567 let hash_a1 = hasher_a1.finish();
568 txa2.hash_receiver(&mut hasher_a2);
569 let hash_a2 = hasher_a2.finish();
570 txb1.hash_receiver(&mut hasher_b1);
571 let hash_b1 = hasher_b1.finish();
572 txb2.hash_receiver(&mut hasher_b2);
573 let hash_b2 = hasher_b2.finish();
574
575 assert!(hash_a1 != hash_a2);
576 assert_eq!(hash_b1, hash_b2);
577 }
578
579 #[test]
send_backpressure()580 fn send_backpressure() {
581 let (waker, counter) = new_count_waker();
582 let mut cx = Context::from_waker(&waker);
583
584 let (mut tx, mut rx) = mpsc::channel(1);
585 block_on(tx.send(1)).unwrap();
586
587 let mut task = tx.send(2);
588 assert_eq!(task.poll_unpin(&mut cx), Poll::Pending);
589 assert_eq!(counter, 0);
590
591 let item = block_on(rx.next()).unwrap();
592 assert_eq!(item, 1);
593 assert_eq!(counter, 1);
594 assert_eq!(task.poll_unpin(&mut cx), Poll::Ready(Ok(())));
595
596 let item = block_on(rx.next()).unwrap();
597 assert_eq!(item, 2);
598 }
599
600 #[test]
send_backpressure_multi_senders()601 fn send_backpressure_multi_senders() {
602 let (waker, counter) = new_count_waker();
603 let mut cx = Context::from_waker(&waker);
604
605 let (mut tx1, mut rx) = mpsc::channel(1);
606 let mut tx2 = tx1.clone();
607 block_on(tx1.send(1)).unwrap();
608
609 let mut task = tx2.send(2);
610 assert_eq!(task.poll_unpin(&mut cx), Poll::Pending);
611 assert_eq!(counter, 0);
612
613 let item = block_on(rx.next()).unwrap();
614 assert_eq!(item, 1);
615 assert_eq!(counter, 1);
616 assert_eq!(task.poll_unpin(&mut cx), Poll::Ready(Ok(())));
617
618 let item = block_on(rx.next()).unwrap();
619 assert_eq!(item, 2);
620 }
621