1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Multi-producer, single-consumer FIFO queue communication primitives.
13 //! This module provides message-based communication over channels, concretely
14 //! defined among three types:
20 //! A `Sender` or `SyncSender` is used to send data to a `Receiver`. Both
21 //! senders are clone-able (multi-producer) such that many threads can send
22 //! simultaneously to one receiver (single-consumer).
24 //! These channels come in two flavors:
26 //! 1. An asynchronous, infinitely buffered channel. The `channel()` function
27 //! will return a `(Sender, Receiver)` tuple where all sends will be
28 //! **asynchronous** (they never block). The channel conceptually has an
31 //! 2. A synchronous, bounded channel. The `sync_channel()` function will return
32 //! a `(SyncSender, Receiver)` tuple where the storage for pending messages
33 //! is a pre-allocated buffer of a fixed size. All sends will be
34 //! **synchronous** by blocking until there is buffer space available. Note
35 //! that a bound of 0 is allowed, causing the channel to become a
36 //! "rendezvous" channel where each sender atomically hands off a message to
41 //! The send and receive operations on channels will all return a `Result`
42 //! indicating whether the operation succeeded or not. An unsuccessful operation
43 //! is normally indicative of the other half of a channel having "hung up" by
44 //! being dropped in its corresponding thread.
46 //! Once half of a channel has been deallocated, most operations can no longer
47 //! continue to make progress, so `Err` will be returned. Many applications will
48 //! continue to `unwrap()` the results returned from this module, instigating a
49 //! propagation of failure among threads if one unexpectedly dies.
57 //! use std::sync::mpsc::channel;
59 //! // Create a simple streaming channel
60 //! let (tx, rx) = channel();
61 //! thread::spawn(move|| {
62 //! tx.send(10).unwrap();
64 //! assert_eq!(rx.recv().unwrap(), 10);
71 //! use std::sync::mpsc::channel;
73 //! // Create a shared channel that can be sent along from many threads
74 //! // where tx is the sending half (tx for transmission), and rx is the receiving
75 //! // half (rx for receiving).
76 //! let (tx, rx) = channel();
78 //! let tx = tx.clone();
79 //! thread::spawn(move|| {
80 //! tx.send(i).unwrap();
85 //! let j = rx.recv().unwrap();
86 //! assert!(0 <= j && j < 10);
90 //! Propagating panics:
93 //! use std::sync::mpsc::channel;
95 //! // The call to recv() will return an error because the channel has already
96 //! // hung up (or been deallocated)
97 //! let (tx, rx) = channel::<i32>();
99 //! assert!(rx.recv().is_err());
102 //! Synchronous channels:
106 //! use std::sync::mpsc::sync_channel;
108 //! let (tx, rx) = sync_channel::<i32>(0);
109 //! thread::spawn(move|| {
110 //! // This will wait for the parent thread to start receiving
111 //! tx.send(53).unwrap();
113 //! rx.recv().unwrap();
116 #![stable(feature = "rust1", since = "1.0.0")]
118 // A description of how Rust's channel implementation works
120 // Channels are supposed to be the basic building block for all other
121 // concurrent primitives that are used in Rust. As a result, the channel type
122 // needs to be highly optimized, flexible, and broad enough for use everywhere.
124 // The choice of implementation of all channels is to be built on lock-free data
125 // structures. The channels themselves are then consequently also lock-free data
126 // structures. As always with lock-free code, this is a very "here be dragons"
127 // territory, especially because I'm unaware of any academic papers that have
128 // gone into great length about channels of these flavors.
130 // ## Flavors of channels
132 // From the perspective of a consumer of this library, there is only one flavor
133 // of channel. This channel can be used as a stream and cloned to allow multiple
134 // senders. Under the hood, however, there are actually three flavors of
137 // * Flavor::Oneshots - these channels are highly optimized for the one-send use case.
138 // They contain as few atomics as possible and involve one and
139 // exactly one allocation.
140 // * Streams - these channels are optimized for the non-shared use case. They
141 // use a different concurrent queue that is more tailored for this
142 // use case. The initial allocation of this flavor of channel is not
144 // * Shared - this is the most general form of channel that this module offers,
145 // a channel with multiple senders. This type is as optimized as it
146 // can be, but the previous two types mentioned are much faster for
149 // ## Concurrent queues
151 // The basic idea of Rust's Sender/Receiver types is that send() never blocks, but
152 // recv() obviously blocks. This means that under the hood there must be some
153 // shared and concurrent queue holding all of the actual data.
155 // With two flavors of channels, two flavors of queues are also used. We have
156 // chosen to use queues from a well-known author that are abbreviated as SPSC
157 // and MPSC (single producer, single consumer and multiple producer, single
158 // consumer). SPSC queues are used for streams while MPSC queues are used for
161 // ### SPSC optimizations
163 // The SPSC queue found online is essentially a linked list of nodes where one
164 // half of the nodes are the "queue of data" and the other half of nodes are a
165 // cache of unused nodes. The unused nodes are used such that an allocation is
166 // not required on every push() and a free doesn't need to happen on every
169 // As found online, however, the cache of nodes is of an infinite size. This
170 // means that if a channel at one point in its life had 50k items in the queue,
171 // then the queue will always have the capacity for 50k items. I believed that
172 // this was an unnecessary limitation of the implementation, so I have altered
173 // the queue to optionally have a bound on the cache size.
175 // By default, streams will have an unbounded SPSC queue with a small-ish cache
176 // size. The hope is that the cache is still large enough to have very fast
177 // send() operations while not too large such that millions of channels can
180 // ### MPSC optimizations
182 // Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
183 // a linked list under the hood to earn its unboundedness, but I have not put
184 // forth much effort into having a cache of nodes similar to the SPSC queue.
186 // For now, I believe that this is "ok" because shared channels are not the most
187 // common type, but soon we may wish to revisit this queue choice and determine
188 // another candidate for backend storage of shared channels.
190 // ## Overview of the Implementation
192 // Now that there's a little background on the concurrent queues used, it's
193 // worth going into much more detail about the channels themselves. The basic
194 // pseudocode for a send/recv are:
198 // queue.push(t) return if queue.pop()
199 // if increment() == -1 deschedule {
200 // wakeup() if decrement() > 0
201 // cancel_deschedule()
205 // As mentioned before, there are no locks in this implementation, only atomic
206 // instructions are used.
208 // ### The internal atomic counter
210 // Every channel has a shared counter with each half to keep track of the size
211 // of the queue. This counter is used to abort descheduling by the receiver and
212 // to know when to wake up on the sending side.
214 // As seen in the pseudocode, senders will increment this count and receivers
215 // will decrement the count. The theory behind this is that if a sender sees a
216 // -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
217 // then it doesn't need to block.
219 // The recv() method has a beginning call to pop(), and if successful, it needs
220 // to decrement the count. It is a crucial implementation detail that this
221 // decrement does *not* happen to the shared counter. If this were the case,
222 // then it would be possible for the counter to be very negative when there were
223 // no receivers waiting, in which case the senders would have to determine when
224 // it was actually appropriate to wake up a receiver.
226 // Instead, the "steal count" is kept track of separately (not atomically
227 // because it's only used by receivers), and then the decrement() call when
228 // descheduling will lump in all of the recent steals into one large decrement.
230 // The implication of this is that if a sender sees a -1 count, then there's
231 // guaranteed to be a waiter waiting!
233 // ## Native Implementation
235 // A major goal of these channels is to work seamlessly on and off the runtime.
236 // All of the previous race conditions have been worded in terms of
237 // scheduler-isms (which is obviously not available without the runtime).
239 // For now, native usage of channels (off the runtime) will fall back onto
240 // mutexes/cond vars for descheduling/atomic decisions. The no-contention path
241 // is still entirely lock-free, the "deschedule" blocks above are surrounded by
242 // a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
243 // condition variable.
247 // Being able to support selection over channels has greatly influenced this
248 // design, and not only does selection need to work inside the runtime, but also
249 // outside the runtime.
251 // The implementation is fairly straightforward. The goal of select() is not to
252 // return some data, but only to return which channel can receive data without
253 // blocking. The implementation is essentially the entire blocking procedure
254 // followed by an increment as soon as its woken up. The cancellation procedure
255 // involves an increment and swapping out of to_wake to acquire ownership of the
256 // thread to unblock.
258 // Sadly this current implementation requires multiple allocations, so I have
259 // seen the throughput of select() be much worse than it should be. I do not
260 // believe that there is anything fundamental that needs to change about these
261 // channels, however, in order to support a more efficient select().
265 // And now that you've seen all the races that I found and attempted to fix,
266 // here's the code for you to find some more!
272 use cell
::UnsafeCell
;
275 pub use self::select
::{Select, Handle}
;
276 use self::select
::StartResult
;
277 use self::select
::StartResult
::*;
278 use self::blocking
::SignalToken
;
289 /// The receiving-half of Rust's channel type. This half can only be owned by
291 #[stable(feature = "rust1", since = "1.0.0")]
292 pub struct Receiver
<T
> {
293 inner
: UnsafeCell
<Flavor
<T
>>,
296 // The receiver port can be sent from place to place, so long as it
297 // is not used to receive non-sendable things.
298 unsafe impl<T
: Send
> Send
for Receiver
<T
> { }
300 /// An iterator over messages on a receiver, this iterator will block
301 /// whenever `next` is called, waiting for a new message, and `None` will be
302 /// returned when the corresponding channel has hung up.
303 #[stable(feature = "rust1", since = "1.0.0")]
304 pub struct Iter
<'a
, T
: 'a
> {
308 /// An owning iterator over messages on a receiver, this iterator will block
309 /// whenever `next` is called, waiting for a new message, and `None` will be
310 /// returned when the corresponding channel has hung up.
311 #[stable(feature = "receiver_into_iter", since = "1.1.0")]
312 pub struct IntoIter
<T
> {
316 /// The sending-half of Rust's asynchronous channel type. This half can only be
317 /// owned by one thread, but it can be cloned to send to other threads.
318 #[stable(feature = "rust1", since = "1.0.0")]
319 pub struct Sender
<T
> {
320 inner
: UnsafeCell
<Flavor
<T
>>,
323 // The send port can be sent from place to place, so long as it
324 // is not used to send non-sendable things.
325 unsafe impl<T
: Send
> Send
for Sender
<T
> { }
327 /// The sending-half of Rust's synchronous channel type. This half can only be
328 /// owned by one thread, but it can be cloned to send to other threads.
329 #[stable(feature = "rust1", since = "1.0.0")]
330 pub struct SyncSender
<T
> {
331 inner
: Arc
<UnsafeCell
<sync
::Packet
<T
>>>,
334 unsafe impl<T
: Send
> Send
for SyncSender
<T
> {}
336 impl<T
> !Sync
for SyncSender
<T
> {}
338 /// An error returned from the `send` function on channels.
340 /// A `send` operation can only fail if the receiving end of a channel is
341 /// disconnected, implying that the data could never be received. The error
342 /// contains the data being sent as a payload so it can be recovered.
343 #[stable(feature = "rust1", since = "1.0.0")]
344 #[derive(PartialEq, Eq, Clone, Copy)]
345 pub struct SendError
<T
>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
347 /// An error returned from the `recv` function on a `Receiver`.
349 /// The `recv` operation can only fail if the sending half of a channel is
350 /// disconnected, implying that no further messages will ever be received.
351 #[derive(PartialEq, Eq, Clone, Copy, Debug)]
352 #[stable(feature = "rust1", since = "1.0.0")]
353 pub struct RecvError
;
355 /// This enumeration is the list of the possible reasons that `try_recv` could
356 /// not return data when called.
357 #[derive(PartialEq, Eq, Clone, Copy, Debug)]
358 #[stable(feature = "rust1", since = "1.0.0")]
359 pub enum TryRecvError
{
360 /// This channel is currently empty, but the sender(s) have not yet
361 /// disconnected, so data may yet become available.
362 #[stable(feature = "rust1", since = "1.0.0")]
365 /// This channel's sending half has become disconnected, and there will
366 /// never be any more data received on this channel
367 #[stable(feature = "rust1", since = "1.0.0")]
371 /// This enumeration is the list of the possible error outcomes for the
372 /// `SyncSender::try_send` method.
373 #[stable(feature = "rust1", since = "1.0.0")]
374 #[derive(PartialEq, Eq, Clone, Copy)]
375 pub enum TrySendError
<T
> {
376 /// The data could not be sent on the channel because it would require that
377 /// the callee block to send the data.
379 /// If this is a buffered channel, then the buffer is full at this time. If
380 /// this is not a buffered channel, then there is no receiver available to
381 /// acquire the data.
382 #[stable(feature = "rust1", since = "1.0.0")]
385 /// This channel's receiving half has disconnected, so the data could not be
386 /// sent. The data is returned back to the callee in this case.
387 #[stable(feature = "rust1", since = "1.0.0")]
392 Oneshot(Arc
<UnsafeCell
<oneshot
::Packet
<T
>>>),
393 Stream(Arc
<UnsafeCell
<stream
::Packet
<T
>>>),
394 Shared(Arc
<UnsafeCell
<shared
::Packet
<T
>>>),
395 Sync(Arc
<UnsafeCell
<sync
::Packet
<T
>>>),
399 trait UnsafeFlavor
<T
> {
400 fn inner_unsafe(&self) -> &UnsafeCell
<Flavor
<T
>>;
401 unsafe fn inner_mut
<'a
>(&'a
self) -> &'a
mut Flavor
<T
> {
402 &mut *self.inner_unsafe().get()
404 unsafe fn inner
<'a
>(&'a
self) -> &'a Flavor
<T
> {
405 &*self.inner_unsafe().get()
408 impl<T
> UnsafeFlavor
<T
> for Sender
<T
> {
409 fn inner_unsafe(&self) -> &UnsafeCell
<Flavor
<T
>> {
413 impl<T
> UnsafeFlavor
<T
> for Receiver
<T
> {
414 fn inner_unsafe(&self) -> &UnsafeCell
<Flavor
<T
>> {
419 /// Creates a new asynchronous channel, returning the sender/receiver halves.
421 /// All data sent on the sender will become available on the receiver, and no
422 /// send will block the calling thread (this channel has an "infinite buffer").
427 /// use std::sync::mpsc::channel;
430 /// // tx is is the sending half (tx for transmission), and rx is the receiving
431 /// // half (rx for receiving).
432 /// let (tx, rx) = channel();
434 /// // Spawn off an expensive computation
435 /// thread::spawn(move|| {
436 /// # fn expensive_computation() {}
437 /// tx.send(expensive_computation()).unwrap();
440 /// // Do some useful work for awhile
442 /// // Let's see what that answer was
443 /// println!("{:?}", rx.recv().unwrap());
445 #[stable(feature = "rust1", since = "1.0.0")]
446 pub fn channel
<T
>() -> (Sender
<T
>, Receiver
<T
>) {
447 let a
= Arc
::new(UnsafeCell
::new(oneshot
::Packet
::new()));
448 (Sender
::new(Flavor
::Oneshot(a
.clone())), Receiver
::new(Flavor
::Oneshot(a
)))
451 /// Creates a new synchronous, bounded channel.
453 /// Like asynchronous channels, the `Receiver` will block until a message
454 /// becomes available. These channels differ greatly in the semantics of the
455 /// sender from asynchronous channels, however.
457 /// This channel has an internal buffer on which messages will be queued. When
458 /// the internal buffer becomes full, future sends will *block* waiting for the
459 /// buffer to open up. Note that a buffer size of 0 is valid, in which case this
460 /// becomes "rendezvous channel" where each send will not return until a recv
461 /// is paired with it.
463 /// As with asynchronous channels, all senders will panic in `send` if the
464 /// `Receiver` has been destroyed.
469 /// use std::sync::mpsc::sync_channel;
472 /// let (tx, rx) = sync_channel(1);
474 /// // this returns immediately
475 /// tx.send(1).unwrap();
477 /// thread::spawn(move|| {
478 /// // this will block until the previous message has been received
479 /// tx.send(2).unwrap();
482 /// assert_eq!(rx.recv().unwrap(), 1);
483 /// assert_eq!(rx.recv().unwrap(), 2);
485 #[stable(feature = "rust1", since = "1.0.0")]
486 pub fn sync_channel
<T
>(bound
: usize) -> (SyncSender
<T
>, Receiver
<T
>) {
487 let a
= Arc
::new(UnsafeCell
::new(sync
::Packet
::new(bound
)));
488 (SyncSender
::new(a
.clone()), Receiver
::new(Flavor
::Sync(a
)))
491 ////////////////////////////////////////////////////////////////////////////////
493 ////////////////////////////////////////////////////////////////////////////////
496 fn new(inner
: Flavor
<T
>) -> Sender
<T
> {
498 inner
: UnsafeCell
::new(inner
),
502 /// Attempts to send a value on this channel, returning it back if it could
505 /// A successful send occurs when it is determined that the other end of
506 /// the channel has not hung up already. An unsuccessful send would be one
507 /// where the corresponding receiver has already been deallocated. Note
508 /// that a return value of `Err` means that the data will never be
509 /// received, but a return value of `Ok` does *not* mean that the data
510 /// will be received. It is possible for the corresponding receiver to
511 /// hang up immediately after this function returns `Ok`.
513 /// This method will never block the current thread.
518 /// use std::sync::mpsc::channel;
520 /// let (tx, rx) = channel();
522 /// // This send is always successful
523 /// tx.send(1).unwrap();
525 /// // This send will fail because the receiver is gone
527 /// assert_eq!(tx.send(1).err().unwrap().0, 1);
529 #[stable(feature = "rust1", since = "1.0.0")]
530 pub fn send(&self, t
: T
) -> Result
<(), SendError
<T
>> {
531 let (new_inner
, ret
) = match *unsafe { self.inner() }
{
532 Flavor
::Oneshot(ref p
) => {
536 return (*p
).send(t
).map_err(SendError
);
539 Arc
::new(UnsafeCell
::new(stream
::Packet
::new()));
540 let rx
= Receiver
::new(Flavor
::Stream(a
.clone()));
541 match (*p
).upgrade(rx
) {
542 oneshot
::UpSuccess
=> {
543 let ret
= (*a
.get()).send(t
);
546 oneshot
::UpDisconnected
=> (a
, Err(t
)),
547 oneshot
::UpWoke(token
) => {
548 // This send cannot panic because the thread is
549 // asleep (we're looking at it), so the receiver
551 (*a
.get()).send(t
).ok().unwrap();
559 Flavor
::Stream(ref p
) => return unsafe {
560 (*p
.get()).send(t
).map_err(SendError
)
562 Flavor
::Shared(ref p
) => return unsafe {
563 (*p
.get()).send(t
).map_err(SendError
)
565 Flavor
::Sync(..) => unreachable
!(),
569 let tmp
= Sender
::new(Flavor
::Stream(new_inner
));
570 mem
::swap(self.inner_mut(), tmp
.inner_mut());
572 ret
.map_err(SendError
)
576 #[stable(feature = "rust1", since = "1.0.0")]
577 impl<T
> Clone
for Sender
<T
> {
578 fn clone(&self) -> Sender
<T
> {
579 let (packet
, sleeper
, guard
) = match *unsafe { self.inner() }
{
580 Flavor
::Oneshot(ref p
) => {
581 let a
= Arc
::new(UnsafeCell
::new(shared
::Packet
::new()));
583 let guard
= (*a
.get()).postinit_lock();
584 let rx
= Receiver
::new(Flavor
::Shared(a
.clone()));
585 match (*p
.get()).upgrade(rx
) {
587 oneshot
::UpDisconnected
=> (a
, None
, guard
),
588 oneshot
::UpWoke(task
) => (a
, Some(task
), guard
)
592 Flavor
::Stream(ref p
) => {
593 let a
= Arc
::new(UnsafeCell
::new(shared
::Packet
::new()));
595 let guard
= (*a
.get()).postinit_lock();
596 let rx
= Receiver
::new(Flavor
::Shared(a
.clone()));
597 match (*p
.get()).upgrade(rx
) {
599 stream
::UpDisconnected
=> (a
, None
, guard
),
600 stream
::UpWoke(task
) => (a
, Some(task
), guard
),
604 Flavor
::Shared(ref p
) => {
605 unsafe { (*p.get()).clone_chan(); }
606 return Sender
::new(Flavor
::Shared(p
.clone()));
608 Flavor
::Sync(..) => unreachable
!(),
612 (*packet
.get()).inherit_blocker(sleeper
, guard
);
614 let tmp
= Sender
::new(Flavor
::Shared(packet
.clone()));
615 mem
::swap(self.inner_mut(), tmp
.inner_mut());
617 Sender
::new(Flavor
::Shared(packet
))
621 #[stable(feature = "rust1", since = "1.0.0")]
622 impl<T
> Drop
for Sender
<T
> {
624 match *unsafe { self.inner_mut() }
{
625 Flavor
::Oneshot(ref mut p
) => unsafe { (*p.get()).drop_chan(); }
,
626 Flavor
::Stream(ref mut p
) => unsafe { (*p.get()).drop_chan(); }
,
627 Flavor
::Shared(ref mut p
) => unsafe { (*p.get()).drop_chan(); }
,
628 Flavor
::Sync(..) => unreachable
!(),
633 ////////////////////////////////////////////////////////////////////////////////
635 ////////////////////////////////////////////////////////////////////////////////
637 impl<T
> SyncSender
<T
> {
638 fn new(inner
: Arc
<UnsafeCell
<sync
::Packet
<T
>>>) -> SyncSender
<T
> {
639 SyncSender { inner: inner }
642 /// Sends a value on this synchronous channel.
644 /// This function will *block* until space in the internal buffer becomes
645 /// available or a receiver is available to hand off the message to.
647 /// Note that a successful send does *not* guarantee that the receiver will
648 /// ever see the data if there is a buffer on this channel. Items may be
649 /// enqueued in the internal buffer for the receiver to receive at a later
650 /// time. If the buffer size is 0, however, it can be guaranteed that the
651 /// receiver has indeed received the data if this function returns success.
653 /// This function will never panic, but it may return `Err` if the
654 /// `Receiver` has disconnected and is no longer able to receive
656 #[stable(feature = "rust1", since = "1.0.0")]
657 pub fn send(&self, t
: T
) -> Result
<(), SendError
<T
>> {
658 unsafe { (*self.inner.get()).send(t).map_err(SendError) }
661 /// Attempts to send a value on this channel without blocking.
663 /// This method differs from `send` by returning immediately if the
664 /// channel's buffer is full or no receiver is waiting to acquire some
665 /// data. Compared with `send`, this function has two failure cases
666 /// instead of one (one for disconnection, one for a full buffer).
668 /// See `SyncSender::send` for notes about guarantees of whether the
669 /// receiver has received the data or not if this function is successful.
670 #[stable(feature = "rust1", since = "1.0.0")]
671 pub fn try_send(&self, t
: T
) -> Result
<(), TrySendError
<T
>> {
672 unsafe { (*self.inner.get()).try_send(t) }
676 #[stable(feature = "rust1", since = "1.0.0")]
677 impl<T
> Clone
for SyncSender
<T
> {
678 fn clone(&self) -> SyncSender
<T
> {
679 unsafe { (*self.inner.get()).clone_chan(); }
680 SyncSender
::new(self.inner
.clone())
684 #[stable(feature = "rust1", since = "1.0.0")]
685 impl<T
> Drop
for SyncSender
<T
> {
687 unsafe { (*self.inner.get()).drop_chan(); }
691 ////////////////////////////////////////////////////////////////////////////////
693 ////////////////////////////////////////////////////////////////////////////////
695 impl<T
> Receiver
<T
> {
696 fn new(inner
: Flavor
<T
>) -> Receiver
<T
> {
697 Receiver { inner: UnsafeCell::new(inner) }
700 /// Attempts to return a pending value on this receiver without blocking
702 /// This method will never block the caller in order to wait for data to
703 /// become available. Instead, this will always return immediately with a
704 /// possible option of pending data on the channel.
706 /// This is useful for a flavor of "optimistic check" before deciding to
707 /// block on a receiver.
708 #[stable(feature = "rust1", since = "1.0.0")]
709 pub fn try_recv(&self) -> Result
<T
, TryRecvError
> {
711 let new_port
= match *unsafe { self.inner() }
{
712 Flavor
::Oneshot(ref p
) => {
713 match unsafe { (*p.get()).try_recv() }
{
714 Ok(t
) => return Ok(t
),
715 Err(oneshot
::Empty
) => return Err(TryRecvError
::Empty
),
716 Err(oneshot
::Disconnected
) => {
717 return Err(TryRecvError
::Disconnected
)
719 Err(oneshot
::Upgraded(rx
)) => rx
,
722 Flavor
::Stream(ref p
) => {
723 match unsafe { (*p.get()).try_recv() }
{
724 Ok(t
) => return Ok(t
),
725 Err(stream
::Empty
) => return Err(TryRecvError
::Empty
),
726 Err(stream
::Disconnected
) => {
727 return Err(TryRecvError
::Disconnected
)
729 Err(stream
::Upgraded(rx
)) => rx
,
732 Flavor
::Shared(ref p
) => {
733 match unsafe { (*p.get()).try_recv() }
{
734 Ok(t
) => return Ok(t
),
735 Err(shared
::Empty
) => return Err(TryRecvError
::Empty
),
736 Err(shared
::Disconnected
) => {
737 return Err(TryRecvError
::Disconnected
)
741 Flavor
::Sync(ref p
) => {
742 match unsafe { (*p.get()).try_recv() }
{
743 Ok(t
) => return Ok(t
),
744 Err(sync
::Empty
) => return Err(TryRecvError
::Empty
),
745 Err(sync
::Disconnected
) => {
746 return Err(TryRecvError
::Disconnected
)
752 mem
::swap(self.inner_mut(),
753 new_port
.inner_mut());
758 /// Attempts to wait for a value on this receiver, returning an error if the
759 /// corresponding channel has hung up.
761 /// This function will always block the current thread if there is no data
762 /// available and it's possible for more data to be sent. Once a message is
763 /// sent to the corresponding `Sender`, then this receiver will wake up and
764 /// return that message.
766 /// If the corresponding `Sender` has disconnected, or it disconnects while
767 /// this call is blocking, this call will wake up and return `Err` to
768 /// indicate that no more messages can ever be received on this channel.
769 /// However, since channels are buffered, messages sent before the disconnect
770 /// will still be properly received.
775 /// use std::sync::mpsc;
778 /// let (send, recv) = mpsc::channel();
779 /// let handle = thread::spawn(move || {
780 /// send.send(1u8).unwrap();
783 /// handle.join().unwrap();
785 /// assert_eq!(Ok(1), recv.recv());
788 /// Buffering behavior:
791 /// use std::sync::mpsc;
793 /// use std::sync::mpsc::RecvError;
795 /// let (send, recv) = mpsc::channel();
796 /// let handle = thread::spawn(move || {
797 /// send.send(1u8).unwrap();
798 /// send.send(2).unwrap();
799 /// send.send(3).unwrap();
803 /// // wait for the thread to join so we ensure the sender is dropped
804 /// handle.join().unwrap();
806 /// assert_eq!(Ok(1), recv.recv());
807 /// assert_eq!(Ok(2), recv.recv());
808 /// assert_eq!(Ok(3), recv.recv());
809 /// assert_eq!(Err(RecvError), recv.recv());
811 #[stable(feature = "rust1", since = "1.0.0")]
812 pub fn recv(&self) -> Result
<T
, RecvError
> {
814 let new_port
= match *unsafe { self.inner() }
{
815 Flavor
::Oneshot(ref p
) => {
816 match unsafe { (*p.get()).recv() }
{
817 Ok(t
) => return Ok(t
),
818 Err(oneshot
::Empty
) => return unreachable
!(),
819 Err(oneshot
::Disconnected
) => return Err(RecvError
),
820 Err(oneshot
::Upgraded(rx
)) => rx
,
823 Flavor
::Stream(ref p
) => {
824 match unsafe { (*p.get()).recv() }
{
825 Ok(t
) => return Ok(t
),
826 Err(stream
::Empty
) => return unreachable
!(),
827 Err(stream
::Disconnected
) => return Err(RecvError
),
828 Err(stream
::Upgraded(rx
)) => rx
,
831 Flavor
::Shared(ref p
) => {
832 match unsafe { (*p.get()).recv() }
{
833 Ok(t
) => return Ok(t
),
834 Err(shared
::Empty
) => return unreachable
!(),
835 Err(shared
::Disconnected
) => return Err(RecvError
),
838 Flavor
::Sync(ref p
) => return unsafe {
839 (*p
.get()).recv().map_err(|()| RecvError
)
843 mem
::swap(self.inner_mut(), new_port
.inner_mut());
848 /// Returns an iterator that will block waiting for messages, but never
849 /// `panic!`. It will return `None` when the channel has hung up.
850 #[stable(feature = "rust1", since = "1.0.0")]
851 pub fn iter(&self) -> Iter
<T
> {
856 impl<T
> select
::Packet
for Receiver
<T
> {
857 fn can_recv(&self) -> bool
{
859 let new_port
= match *unsafe { self.inner() }
{
860 Flavor
::Oneshot(ref p
) => {
861 match unsafe { (*p.get()).can_recv() }
{
862 Ok(ret
) => return ret
,
863 Err(upgrade
) => upgrade
,
866 Flavor
::Stream(ref p
) => {
867 match unsafe { (*p.get()).can_recv() }
{
868 Ok(ret
) => return ret
,
869 Err(upgrade
) => upgrade
,
872 Flavor
::Shared(ref p
) => {
873 return unsafe { (*p.get()).can_recv() }
;
875 Flavor
::Sync(ref p
) => {
876 return unsafe { (*p.get()).can_recv() }
;
880 mem
::swap(self.inner_mut(),
881 new_port
.inner_mut());
886 fn start_selection(&self, mut token
: SignalToken
) -> StartResult
{
888 let (t
, new_port
) = match *unsafe { self.inner() }
{
889 Flavor
::Oneshot(ref p
) => {
890 match unsafe { (*p.get()).start_selection(token) }
{
891 oneshot
::SelSuccess
=> return Installed
,
892 oneshot
::SelCanceled
=> return Abort
,
893 oneshot
::SelUpgraded(t
, rx
) => (t
, rx
),
896 Flavor
::Stream(ref p
) => {
897 match unsafe { (*p.get()).start_selection(token) }
{
898 stream
::SelSuccess
=> return Installed
,
899 stream
::SelCanceled
=> return Abort
,
900 stream
::SelUpgraded(t
, rx
) => (t
, rx
),
903 Flavor
::Shared(ref p
) => {
904 return unsafe { (*p.get()).start_selection(token) }
;
906 Flavor
::Sync(ref p
) => {
907 return unsafe { (*p.get()).start_selection(token) }
;
912 mem
::swap(self.inner_mut(), new_port
.inner_mut());
917 fn abort_selection(&self) -> bool
{
918 let mut was_upgrade
= false;
920 let result
= match *unsafe { self.inner() }
{
921 Flavor
::Oneshot(ref p
) => unsafe { (*p.get()).abort_selection() }
,
922 Flavor
::Stream(ref p
) => unsafe {
923 (*p
.get()).abort_selection(was_upgrade
)
925 Flavor
::Shared(ref p
) => return unsafe {
926 (*p
.get()).abort_selection(was_upgrade
)
928 Flavor
::Sync(ref p
) => return unsafe {
929 (*p
.get()).abort_selection()
932 let new_port
= match result { Ok(b) => return b, Err(p) => p }
;
935 mem
::swap(self.inner_mut(),
936 new_port
.inner_mut());
942 #[stable(feature = "rust1", since = "1.0.0")]
943 impl<'a
, T
> Iterator
for Iter
<'a
, T
> {
946 fn next(&mut self) -> Option
<T
> { self.rx.recv().ok() }
949 #[stable(feature = "receiver_into_iter", since = "1.1.0")]
950 impl<'a
, T
> IntoIterator
for &'a Receiver
<T
> {
952 type IntoIter
= Iter
<'a
, T
>;
954 fn into_iter(self) -> Iter
<'a
, T
> { self.iter() }
957 impl<T
> Iterator
for IntoIter
<T
> {
959 fn next(&mut self) -> Option
<T
> { self.rx.recv().ok() }
962 #[stable(feature = "receiver_into_iter", since = "1.1.0")]
963 impl <T
> IntoIterator
for Receiver
<T
> {
965 type IntoIter
= IntoIter
<T
>;
967 fn into_iter(self) -> IntoIter
<T
> {
968 IntoIter { rx: self }
972 #[stable(feature = "rust1", since = "1.0.0")]
973 impl<T
> Drop
for Receiver
<T
> {
975 match *unsafe { self.inner_mut() }
{
976 Flavor
::Oneshot(ref mut p
) => unsafe { (*p.get()).drop_port(); }
,
977 Flavor
::Stream(ref mut p
) => unsafe { (*p.get()).drop_port(); }
,
978 Flavor
::Shared(ref mut p
) => unsafe { (*p.get()).drop_port(); }
,
979 Flavor
::Sync(ref mut p
) => unsafe { (*p.get()).drop_port(); }
,
984 #[stable(feature = "rust1", since = "1.0.0")]
985 impl<T
> fmt
::Debug
for SendError
<T
> {
986 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
987 "SendError(..)".fmt(f
)
991 #[stable(feature = "rust1", since = "1.0.0")]
992 impl<T
> fmt
::Display
for SendError
<T
> {
993 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
994 "sending on a closed channel".fmt(f
)
998 #[stable(feature = "rust1", since = "1.0.0")]
999 impl<T
: Send
+ Reflect
> error
::Error
for SendError
<T
> {
1000 fn description(&self) -> &str {
1001 "sending on a closed channel"
1004 fn cause(&self) -> Option
<&error
::Error
> {
1009 #[stable(feature = "rust1", since = "1.0.0")]
1010 impl<T
> fmt
::Debug
for TrySendError
<T
> {
1011 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1013 TrySendError
::Full(..) => "Full(..)".fmt(f
),
1014 TrySendError
::Disconnected(..) => "Disconnected(..)".fmt(f
),
1019 #[stable(feature = "rust1", since = "1.0.0")]
1020 impl<T
> fmt
::Display
for TrySendError
<T
> {
1021 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1023 TrySendError
::Full(..) => {
1024 "sending on a full channel".fmt(f
)
1026 TrySendError
::Disconnected(..) => {
1027 "sending on a closed channel".fmt(f
)
1033 #[stable(feature = "rust1", since = "1.0.0")]
1034 impl<T
: Send
+ Reflect
> error
::Error
for TrySendError
<T
> {
1036 fn description(&self) -> &str {
1038 TrySendError
::Full(..) => {
1039 "sending on a full channel"
1041 TrySendError
::Disconnected(..) => {
1042 "sending on a closed channel"
1047 fn cause(&self) -> Option
<&error
::Error
> {
1052 #[stable(feature = "rust1", since = "1.0.0")]
1053 impl fmt
::Display
for RecvError
{
1054 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1055 "receiving on a closed channel".fmt(f
)
1059 #[stable(feature = "rust1", since = "1.0.0")]
1060 impl error
::Error
for RecvError
{
1062 fn description(&self) -> &str {
1063 "receiving on a closed channel"
1066 fn cause(&self) -> Option
<&error
::Error
> {
1071 #[stable(feature = "rust1", since = "1.0.0")]
1072 impl fmt
::Display
for TryRecvError
{
1073 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1075 TryRecvError
::Empty
=> {
1076 "receiving on an empty channel".fmt(f
)
1078 TryRecvError
::Disconnected
=> {
1079 "receiving on a closed channel".fmt(f
)
1085 #[stable(feature = "rust1", since = "1.0.0")]
1086 impl error
::Error
for TryRecvError
{
1088 fn description(&self) -> &str {
1090 TryRecvError
::Empty
=> {
1091 "receiving on an empty channel"
1093 TryRecvError
::Disconnected
=> {
1094 "receiving on a closed channel"
1099 fn cause(&self) -> Option
<&error
::Error
> {
1112 pub fn stress_factor() -> usize {
1113 match env
::var("RUST_TEST_STRESS") {
1114 Ok(val
) => val
.parse().unwrap(),
1121 let (tx
, rx
) = channel
::<i32>();
1122 tx
.send(1).unwrap();
1123 assert_eq
!(rx
.recv().unwrap(), 1);
1128 let (tx
, _rx
) = channel
::<Box
<isize>>();
1129 tx
.send(box 1).unwrap();
1133 fn drop_full_shared() {
1134 let (tx
, _rx
) = channel
::<Box
<isize>>();
1137 tx
.send(box 1).unwrap();
1142 let (tx
, rx
) = channel
::<i32>();
1143 tx
.send(1).unwrap();
1144 assert_eq
!(rx
.recv().unwrap(), 1);
1145 let tx
= tx
.clone();
1146 tx
.send(1).unwrap();
1147 assert_eq
!(rx
.recv().unwrap(), 1);
1151 fn smoke_threads() {
1152 let (tx
, rx
) = channel
::<i32>();
1153 let _t
= thread
::spawn(move|| {
1154 tx
.send(1).unwrap();
1156 assert_eq
!(rx
.recv().unwrap(), 1);
1160 fn smoke_port_gone() {
1161 let (tx
, rx
) = channel
::<i32>();
1163 assert
!(tx
.send(1).is_err());
1167 fn smoke_shared_port_gone() {
1168 let (tx
, rx
) = channel
::<i32>();
1170 assert
!(tx
.send(1).is_err())
1174 fn smoke_shared_port_gone2() {
1175 let (tx
, rx
) = channel
::<i32>();
1177 let tx2
= tx
.clone();
1179 assert
!(tx2
.send(1).is_err());
1183 fn port_gone_concurrent() {
1184 let (tx
, rx
) = channel
::<i32>();
1185 let _t
= thread
::spawn(move|| {
1188 while tx
.send(1).is_ok() {}
1192 fn port_gone_concurrent_shared() {
1193 let (tx
, rx
) = channel
::<i32>();
1194 let tx2
= tx
.clone();
1195 let _t
= thread
::spawn(move|| {
1198 while tx
.send(1).is_ok() && tx2
.send(1).is_ok() {}
1202 fn smoke_chan_gone() {
1203 let (tx
, rx
) = channel
::<i32>();
1205 assert
!(rx
.recv().is_err());
1209 fn smoke_chan_gone_shared() {
1210 let (tx
, rx
) = channel
::<()>();
1211 let tx2
= tx
.clone();
1214 assert
!(rx
.recv().is_err());
1218 fn chan_gone_concurrent() {
1219 let (tx
, rx
) = channel
::<i32>();
1220 let _t
= thread
::spawn(move|| {
1221 tx
.send(1).unwrap();
1222 tx
.send(1).unwrap();
1224 while rx
.recv().is_ok() {}
1229 let (tx
, rx
) = channel
::<i32>();
1230 let t
= thread
::spawn(move|| {
1231 for _
in 0..10000 { tx.send(1).unwrap(); }
1234 assert_eq
!(rx
.recv().unwrap(), 1);
1236 t
.join().ok().unwrap();
1240 fn stress_shared() {
1241 const AMT
: u32 = 10000;
1242 const NTHREADS
: u32 = 8;
1243 let (tx
, rx
) = channel
::<i32>();
1245 let t
= thread
::spawn(move|| {
1246 for _
in 0..AMT
* NTHREADS
{
1247 assert_eq
!(rx
.recv().unwrap(), 1);
1249 match rx
.try_recv() {
1255 for _
in 0..NTHREADS
{
1256 let tx
= tx
.clone();
1257 thread
::spawn(move|| {
1258 for _
in 0..AMT { tx.send(1).unwrap(); }
1262 t
.join().ok().unwrap();
1266 fn send_from_outside_runtime() {
1267 let (tx1
, rx1
) = channel
::<()>();
1268 let (tx2
, rx2
) = channel
::<i32>();
1269 let t1
= thread
::spawn(move|| {
1270 tx1
.send(()).unwrap();
1272 assert_eq
!(rx2
.recv().unwrap(), 1);
1275 rx1
.recv().unwrap();
1276 let t2
= thread
::spawn(move|| {
1278 tx2
.send(1).unwrap();
1281 t1
.join().ok().unwrap();
1282 t2
.join().ok().unwrap();
1286 fn recv_from_outside_runtime() {
1287 let (tx
, rx
) = channel
::<i32>();
1288 let t
= thread
::spawn(move|| {
1290 assert_eq
!(rx
.recv().unwrap(), 1);
1294 tx
.send(1).unwrap();
1296 t
.join().ok().unwrap();
1301 let (tx1
, rx1
) = channel
::<i32>();
1302 let (tx2
, rx2
) = channel
::<i32>();
1303 let t1
= thread
::spawn(move|| {
1304 assert_eq
!(rx1
.recv().unwrap(), 1);
1305 tx2
.send(2).unwrap();
1307 let t2
= thread
::spawn(move|| {
1308 tx1
.send(1).unwrap();
1309 assert_eq
!(rx2
.recv().unwrap(), 2);
1311 t1
.join().ok().unwrap();
1312 t2
.join().ok().unwrap();
1316 fn oneshot_single_thread_close_port_first() {
1317 // Simple test of closing without sending
1318 let (_tx
, rx
) = channel
::<i32>();
1323 fn oneshot_single_thread_close_chan_first() {
1324 // Simple test of closing without sending
1325 let (tx
, _rx
) = channel
::<i32>();
1330 fn oneshot_single_thread_send_port_close() {
1331 // Testing that the sender cleans up the payload if receiver is closed
1332 let (tx
, rx
) = channel
::<Box
<i32>>();
1334 assert
!(tx
.send(box 0).is_err());
1338 fn oneshot_single_thread_recv_chan_close() {
1339 // Receiving on a closed chan will panic
1340 let res
= thread
::spawn(move|| {
1341 let (tx
, rx
) = channel
::<i32>();
1346 assert
!(res
.is_err());
1350 fn oneshot_single_thread_send_then_recv() {
1351 let (tx
, rx
) = channel
::<Box
<i32>>();
1352 tx
.send(box 10).unwrap();
1353 assert
!(rx
.recv().unwrap() == box 10);
1357 fn oneshot_single_thread_try_send_open() {
1358 let (tx
, rx
) = channel
::<i32>();
1359 assert
!(tx
.send(10).is_ok());
1360 assert
!(rx
.recv().unwrap() == 10);
1364 fn oneshot_single_thread_try_send_closed() {
1365 let (tx
, rx
) = channel
::<i32>();
1367 assert
!(tx
.send(10).is_err());
1371 fn oneshot_single_thread_try_recv_open() {
1372 let (tx
, rx
) = channel
::<i32>();
1373 tx
.send(10).unwrap();
1374 assert
!(rx
.recv() == Ok(10));
1378 fn oneshot_single_thread_try_recv_closed() {
1379 let (tx
, rx
) = channel
::<i32>();
1381 assert
!(rx
.recv().is_err());
1385 fn oneshot_single_thread_peek_data() {
1386 let (tx
, rx
) = channel
::<i32>();
1387 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Empty
));
1388 tx
.send(10).unwrap();
1389 assert_eq
!(rx
.try_recv(), Ok(10));
1393 fn oneshot_single_thread_peek_close() {
1394 let (tx
, rx
) = channel
::<i32>();
1396 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Disconnected
));
1397 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Disconnected
));
1401 fn oneshot_single_thread_peek_open() {
1402 let (_tx
, rx
) = channel
::<i32>();
1403 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Empty
));
1407 fn oneshot_multi_task_recv_then_send() {
1408 let (tx
, rx
) = channel
::<Box
<i32>>();
1409 let _t
= thread
::spawn(move|| {
1410 assert
!(rx
.recv().unwrap() == box 10);
1413 tx
.send(box 10).unwrap();
1417 fn oneshot_multi_task_recv_then_close() {
1418 let (tx
, rx
) = channel
::<Box
<i32>>();
1419 let _t
= thread
::spawn(move|| {
1422 let res
= thread
::spawn(move|| {
1423 assert
!(rx
.recv().unwrap() == box 10);
1425 assert
!(res
.is_err());
1429 fn oneshot_multi_thread_close_stress() {
1430 for _
in 0..stress_factor() {
1431 let (tx
, rx
) = channel
::<i32>();
1432 let _t
= thread
::spawn(move|| {
1440 fn oneshot_multi_thread_send_close_stress() {
1441 for _
in 0..stress_factor() {
1442 let (tx
, rx
) = channel
::<i32>();
1443 let _t
= thread
::spawn(move|| {
1446 let _
= thread
::spawn(move|| {
1447 tx
.send(1).unwrap();
1453 fn oneshot_multi_thread_recv_close_stress() {
1454 for _
in 0..stress_factor() {
1455 let (tx
, rx
) = channel
::<i32>();
1456 thread
::spawn(move|| {
1457 let res
= thread
::spawn(move|| {
1460 assert
!(res
.is_err());
1462 let _t
= thread
::spawn(move|| {
1463 thread
::spawn(move|| {
1471 fn oneshot_multi_thread_send_recv_stress() {
1472 for _
in 0..stress_factor() {
1473 let (tx
, rx
) = channel
::<Box
<isize>>();
1474 let _t
= thread
::spawn(move|| {
1475 tx
.send(box 10).unwrap();
1477 assert
!(rx
.recv().unwrap() == box 10);
1482 fn stream_send_recv_stress() {
1483 for _
in 0..stress_factor() {
1484 let (tx
, rx
) = channel();
1489 fn send(tx
: Sender
<Box
<i32>>, i
: i32) {
1490 if i
== 10 { return }
1492 thread
::spawn(move|| {
1493 tx
.send(box i
).unwrap();
1498 fn recv(rx
: Receiver
<Box
<i32>>, i
: i32) {
1499 if i
== 10 { return }
1501 thread
::spawn(move|| {
1502 assert
!(rx
.recv().unwrap() == box i
);
1511 // Regression test that we don't run out of stack in scheduler context
1512 let (tx
, rx
) = channel();
1513 for _
in 0..10000 { tx.send(()).unwrap(); }
1514 for _
in 0..10000 { rx.recv().unwrap(); }
1518 fn shared_chan_stress() {
1519 let (tx
, rx
) = channel();
1520 let total
= stress_factor() + 100;
1522 let tx
= tx
.clone();
1523 thread
::spawn(move|| {
1524 tx
.send(()).unwrap();
1534 fn test_nested_recv_iter() {
1535 let (tx
, rx
) = channel
::<i32>();
1536 let (total_tx
, total_rx
) = channel
::<i32>();
1538 let _t
= thread
::spawn(move|| {
1540 for x
in rx
.iter() {
1543 total_tx
.send(acc
).unwrap();
1546 tx
.send(3).unwrap();
1547 tx
.send(1).unwrap();
1548 tx
.send(2).unwrap();
1550 assert_eq
!(total_rx
.recv().unwrap(), 6);
1554 fn test_recv_iter_break() {
1555 let (tx
, rx
) = channel
::<i32>();
1556 let (count_tx
, count_rx
) = channel();
1558 let _t
= thread
::spawn(move|| {
1560 for x
in rx
.iter() {
1567 count_tx
.send(count
).unwrap();
1570 tx
.send(2).unwrap();
1571 tx
.send(2).unwrap();
1572 tx
.send(2).unwrap();
1575 assert_eq
!(count_rx
.recv().unwrap(), 4);
1579 fn test_recv_into_iter_owned() {
1581 let (tx
, rx
) = channel
::<i32>();
1582 tx
.send(1).unwrap();
1583 tx
.send(2).unwrap();
1587 assert_eq
!(iter
.next().unwrap(), 1);
1588 assert_eq
!(iter
.next().unwrap(), 2);
1589 assert_eq
!(iter
.next().is_none(), true);
1593 fn test_recv_into_iter_borrowed() {
1594 let (tx
, rx
) = channel
::<i32>();
1595 tx
.send(1).unwrap();
1596 tx
.send(2).unwrap();
1598 let mut iter
= (&rx
).into_iter();
1599 assert_eq
!(iter
.next().unwrap(), 1);
1600 assert_eq
!(iter
.next().unwrap(), 2);
1601 assert_eq
!(iter
.next().is_none(), true);
1605 fn try_recv_states() {
1606 let (tx1
, rx1
) = channel
::<i32>();
1607 let (tx2
, rx2
) = channel
::<()>();
1608 let (tx3
, rx3
) = channel
::<()>();
1609 let _t
= thread
::spawn(move|| {
1610 rx2
.recv().unwrap();
1611 tx1
.send(1).unwrap();
1612 tx3
.send(()).unwrap();
1613 rx2
.recv().unwrap();
1615 tx3
.send(()).unwrap();
1618 assert_eq
!(rx1
.try_recv(), Err(TryRecvError
::Empty
));
1619 tx2
.send(()).unwrap();
1620 rx3
.recv().unwrap();
1621 assert_eq
!(rx1
.try_recv(), Ok(1));
1622 assert_eq
!(rx1
.try_recv(), Err(TryRecvError
::Empty
));
1623 tx2
.send(()).unwrap();
1624 rx3
.recv().unwrap();
1625 assert_eq
!(rx1
.try_recv(), Err(TryRecvError
::Disconnected
));
1628 // This bug used to end up in a livelock inside of the Receiver destructor
1629 // because the internal state of the Shared packet was corrupted
1631 fn destroy_upgraded_shared_port_when_sender_still_active() {
1632 let (tx
, rx
) = channel();
1633 let (tx2
, rx2
) = channel();
1634 let _t
= thread
::spawn(move|| {
1635 rx
.recv().unwrap(); // wait on a oneshot
1636 drop(rx
); // destroy a shared
1637 tx2
.send(()).unwrap();
1639 // make sure the other thread has gone to sleep
1640 for _
in 0..5000 { thread::yield_now(); }
1642 // upgrade to a shared chan and send a message
1645 t
.send(()).unwrap();
1647 // wait for the child thread to exit before we exit
1648 rx2
.recv().unwrap();
1660 pub fn stress_factor() -> usize {
1661 match env
::var("RUST_TEST_STRESS") {
1662 Ok(val
) => val
.parse().unwrap(),
1669 let (tx
, rx
) = sync_channel
::<i32>(1);
1670 tx
.send(1).unwrap();
1671 assert_eq
!(rx
.recv().unwrap(), 1);
1676 let (tx
, _rx
) = sync_channel
::<Box
<isize>>(1);
1677 tx
.send(box 1).unwrap();
1682 let (tx
, rx
) = sync_channel
::<i32>(1);
1683 tx
.send(1).unwrap();
1684 assert_eq
!(rx
.recv().unwrap(), 1);
1685 let tx
= tx
.clone();
1686 tx
.send(1).unwrap();
1687 assert_eq
!(rx
.recv().unwrap(), 1);
1691 fn smoke_threads() {
1692 let (tx
, rx
) = sync_channel
::<i32>(0);
1693 let _t
= thread
::spawn(move|| {
1694 tx
.send(1).unwrap();
1696 assert_eq
!(rx
.recv().unwrap(), 1);
1700 fn smoke_port_gone() {
1701 let (tx
, rx
) = sync_channel
::<i32>(0);
1703 assert
!(tx
.send(1).is_err());
1707 fn smoke_shared_port_gone2() {
1708 let (tx
, rx
) = sync_channel
::<i32>(0);
1710 let tx2
= tx
.clone();
1712 assert
!(tx2
.send(1).is_err());
1716 fn port_gone_concurrent() {
1717 let (tx
, rx
) = sync_channel
::<i32>(0);
1718 let _t
= thread
::spawn(move|| {
1721 while tx
.send(1).is_ok() {}
1725 fn port_gone_concurrent_shared() {
1726 let (tx
, rx
) = sync_channel
::<i32>(0);
1727 let tx2
= tx
.clone();
1728 let _t
= thread
::spawn(move|| {
1731 while tx
.send(1).is_ok() && tx2
.send(1).is_ok() {}
1735 fn smoke_chan_gone() {
1736 let (tx
, rx
) = sync_channel
::<i32>(0);
1738 assert
!(rx
.recv().is_err());
1742 fn smoke_chan_gone_shared() {
1743 let (tx
, rx
) = sync_channel
::<()>(0);
1744 let tx2
= tx
.clone();
1747 assert
!(rx
.recv().is_err());
1751 fn chan_gone_concurrent() {
1752 let (tx
, rx
) = sync_channel
::<i32>(0);
1753 thread
::spawn(move|| {
1754 tx
.send(1).unwrap();
1755 tx
.send(1).unwrap();
1757 while rx
.recv().is_ok() {}
1762 let (tx
, rx
) = sync_channel
::<i32>(0);
1763 thread
::spawn(move|| {
1764 for _
in 0..10000 { tx.send(1).unwrap(); }
1767 assert_eq
!(rx
.recv().unwrap(), 1);
1772 fn stress_shared() {
1773 const AMT
: u32 = 1000;
1774 const NTHREADS
: u32 = 8;
1775 let (tx
, rx
) = sync_channel
::<i32>(0);
1776 let (dtx
, drx
) = sync_channel
::<()>(0);
1778 thread
::spawn(move|| {
1779 for _
in 0..AMT
* NTHREADS
{
1780 assert_eq
!(rx
.recv().unwrap(), 1);
1782 match rx
.try_recv() {
1786 dtx
.send(()).unwrap();
1789 for _
in 0..NTHREADS
{
1790 let tx
= tx
.clone();
1791 thread
::spawn(move|| {
1792 for _
in 0..AMT { tx.send(1).unwrap(); }
1796 drx
.recv().unwrap();
1800 fn oneshot_single_thread_close_port_first() {
1801 // Simple test of closing without sending
1802 let (_tx
, rx
) = sync_channel
::<i32>(0);
1807 fn oneshot_single_thread_close_chan_first() {
1808 // Simple test of closing without sending
1809 let (tx
, _rx
) = sync_channel
::<i32>(0);
1814 fn oneshot_single_thread_send_port_close() {
1815 // Testing that the sender cleans up the payload if receiver is closed
1816 let (tx
, rx
) = sync_channel
::<Box
<i32>>(0);
1818 assert
!(tx
.send(box 0).is_err());
1822 fn oneshot_single_thread_recv_chan_close() {
1823 // Receiving on a closed chan will panic
1824 let res
= thread
::spawn(move|| {
1825 let (tx
, rx
) = sync_channel
::<i32>(0);
1830 assert
!(res
.is_err());
1834 fn oneshot_single_thread_send_then_recv() {
1835 let (tx
, rx
) = sync_channel
::<Box
<i32>>(1);
1836 tx
.send(box 10).unwrap();
1837 assert
!(rx
.recv().unwrap() == box 10);
1841 fn oneshot_single_thread_try_send_open() {
1842 let (tx
, rx
) = sync_channel
::<i32>(1);
1843 assert_eq
!(tx
.try_send(10), Ok(()));
1844 assert
!(rx
.recv().unwrap() == 10);
1848 fn oneshot_single_thread_try_send_closed() {
1849 let (tx
, rx
) = sync_channel
::<i32>(0);
1851 assert_eq
!(tx
.try_send(10), Err(TrySendError
::Disconnected(10)));
1855 fn oneshot_single_thread_try_send_closed2() {
1856 let (tx
, _rx
) = sync_channel
::<i32>(0);
1857 assert_eq
!(tx
.try_send(10), Err(TrySendError
::Full(10)));
1861 fn oneshot_single_thread_try_recv_open() {
1862 let (tx
, rx
) = sync_channel
::<i32>(1);
1863 tx
.send(10).unwrap();
1864 assert
!(rx
.recv() == Ok(10));
1868 fn oneshot_single_thread_try_recv_closed() {
1869 let (tx
, rx
) = sync_channel
::<i32>(0);
1871 assert
!(rx
.recv().is_err());
1875 fn oneshot_single_thread_peek_data() {
1876 let (tx
, rx
) = sync_channel
::<i32>(1);
1877 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Empty
));
1878 tx
.send(10).unwrap();
1879 assert_eq
!(rx
.try_recv(), Ok(10));
1883 fn oneshot_single_thread_peek_close() {
1884 let (tx
, rx
) = sync_channel
::<i32>(0);
1886 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Disconnected
));
1887 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Disconnected
));
1891 fn oneshot_single_thread_peek_open() {
1892 let (_tx
, rx
) = sync_channel
::<i32>(0);
1893 assert_eq
!(rx
.try_recv(), Err(TryRecvError
::Empty
));
1897 fn oneshot_multi_task_recv_then_send() {
1898 let (tx
, rx
) = sync_channel
::<Box
<i32>>(0);
1899 let _t
= thread
::spawn(move|| {
1900 assert
!(rx
.recv().unwrap() == box 10);
1903 tx
.send(box 10).unwrap();
1907 fn oneshot_multi_task_recv_then_close() {
1908 let (tx
, rx
) = sync_channel
::<Box
<i32>>(0);
1909 let _t
= thread
::spawn(move|| {
1912 let res
= thread
::spawn(move|| {
1913 assert
!(rx
.recv().unwrap() == box 10);
1915 assert
!(res
.is_err());
1919 fn oneshot_multi_thread_close_stress() {
1920 for _
in 0..stress_factor() {
1921 let (tx
, rx
) = sync_channel
::<i32>(0);
1922 let _t
= thread
::spawn(move|| {
1930 fn oneshot_multi_thread_send_close_stress() {
1931 for _
in 0..stress_factor() {
1932 let (tx
, rx
) = sync_channel
::<i32>(0);
1933 let _t
= thread
::spawn(move|| {
1936 let _
= thread
::spawn(move || {
1937 tx
.send(1).unwrap();
1943 fn oneshot_multi_thread_recv_close_stress() {
1944 for _
in 0..stress_factor() {
1945 let (tx
, rx
) = sync_channel
::<i32>(0);
1946 let _t
= thread
::spawn(move|| {
1947 let res
= thread
::spawn(move|| {
1950 assert
!(res
.is_err());
1952 let _t
= thread
::spawn(move|| {
1953 thread
::spawn(move|| {
1961 fn oneshot_multi_thread_send_recv_stress() {
1962 for _
in 0..stress_factor() {
1963 let (tx
, rx
) = sync_channel
::<Box
<i32>>(0);
1964 let _t
= thread
::spawn(move|| {
1965 tx
.send(box 10).unwrap();
1967 assert
!(rx
.recv().unwrap() == box 10);
1972 fn stream_send_recv_stress() {
1973 for _
in 0..stress_factor() {
1974 let (tx
, rx
) = sync_channel
::<Box
<i32>>(0);
1979 fn send(tx
: SyncSender
<Box
<i32>>, i
: i32) {
1980 if i
== 10 { return }
1982 thread
::spawn(move|| {
1983 tx
.send(box i
).unwrap();
1988 fn recv(rx
: Receiver
<Box
<i32>>, i
: i32) {
1989 if i
== 10 { return }
1991 thread
::spawn(move|| {
1992 assert
!(rx
.recv().unwrap() == box i
);
2001 // Regression test that we don't run out of stack in scheduler context
2002 let (tx
, rx
) = sync_channel(10000);
2003 for _
in 0..10000 { tx.send(()).unwrap(); }
2004 for _
in 0..10000 { rx.recv().unwrap(); }
2008 fn shared_chan_stress() {
2009 let (tx
, rx
) = sync_channel(0);
2010 let total
= stress_factor() + 100;
2012 let tx
= tx
.clone();
2013 thread
::spawn(move|| {
2014 tx
.send(()).unwrap();
2024 fn test_nested_recv_iter() {
2025 let (tx
, rx
) = sync_channel
::<i32>(0);
2026 let (total_tx
, total_rx
) = sync_channel
::<i32>(0);
2028 let _t
= thread
::spawn(move|| {
2030 for x
in rx
.iter() {
2033 total_tx
.send(acc
).unwrap();
2036 tx
.send(3).unwrap();
2037 tx
.send(1).unwrap();
2038 tx
.send(2).unwrap();
2040 assert_eq
!(total_rx
.recv().unwrap(), 6);
2044 fn test_recv_iter_break() {
2045 let (tx
, rx
) = sync_channel
::<i32>(0);
2046 let (count_tx
, count_rx
) = sync_channel(0);
2048 let _t
= thread
::spawn(move|| {
2050 for x
in rx
.iter() {
2057 count_tx
.send(count
).unwrap();
2060 tx
.send(2).unwrap();
2061 tx
.send(2).unwrap();
2062 tx
.send(2).unwrap();
2063 let _
= tx
.try_send(2);
2065 assert_eq
!(count_rx
.recv().unwrap(), 4);
2069 fn try_recv_states() {
2070 let (tx1
, rx1
) = sync_channel
::<i32>(1);
2071 let (tx2
, rx2
) = sync_channel
::<()>(1);
2072 let (tx3
, rx3
) = sync_channel
::<()>(1);
2073 let _t
= thread
::spawn(move|| {
2074 rx2
.recv().unwrap();
2075 tx1
.send(1).unwrap();
2076 tx3
.send(()).unwrap();
2077 rx2
.recv().unwrap();
2079 tx3
.send(()).unwrap();
2082 assert_eq
!(rx1
.try_recv(), Err(TryRecvError
::Empty
));
2083 tx2
.send(()).unwrap();
2084 rx3
.recv().unwrap();
2085 assert_eq
!(rx1
.try_recv(), Ok(1));
2086 assert_eq
!(rx1
.try_recv(), Err(TryRecvError
::Empty
));
2087 tx2
.send(()).unwrap();
2088 rx3
.recv().unwrap();
2089 assert_eq
!(rx1
.try_recv(), Err(TryRecvError
::Disconnected
));
2092 // This bug used to end up in a livelock inside of the Receiver destructor
2093 // because the internal state of the Shared packet was corrupted
2095 fn destroy_upgraded_shared_port_when_sender_still_active() {
2096 let (tx
, rx
) = sync_channel
::<()>(0);
2097 let (tx2
, rx2
) = sync_channel
::<()>(0);
2098 let _t
= thread
::spawn(move|| {
2099 rx
.recv().unwrap(); // wait on a oneshot
2100 drop(rx
); // destroy a shared
2101 tx2
.send(()).unwrap();
2103 // make sure the other thread has gone to sleep
2104 for _
in 0..5000 { thread::yield_now(); }
2106 // upgrade to a shared chan and send a message
2109 t
.send(()).unwrap();
2111 // wait for the child thread to exit before we exit
2112 rx2
.recv().unwrap();
2117 let (tx
, rx
) = sync_channel
::<i32>(0);
2118 let _t
= thread
::spawn(move|| { rx.recv().unwrap(); }
);
2119 assert_eq
!(tx
.send(1), Ok(()));
2124 let (tx
, rx
) = sync_channel
::<i32>(0);
2125 let _t
= thread
::spawn(move|| { drop(rx); }
);
2126 assert
!(tx
.send(1).is_err());
2131 let (tx
, rx
) = sync_channel
::<i32>(1);
2132 assert_eq
!(tx
.send(1), Ok(()));
2133 let _t
=thread
::spawn(move|| { drop(rx); }
);
2134 assert
!(tx
.send(1).is_err());
2139 let (tx
, rx
) = sync_channel
::<i32>(0);
2140 let tx2
= tx
.clone();
2141 let (done
, donerx
) = channel();
2142 let done2
= done
.clone();
2143 let _t
= thread
::spawn(move|| {
2144 assert
!(tx
.send(1).is_err());
2145 done
.send(()).unwrap();
2147 let _t
= thread
::spawn(move|| {
2148 assert
!(tx2
.send(2).is_err());
2149 done2
.send(()).unwrap();
2152 donerx
.recv().unwrap();
2153 donerx
.recv().unwrap();
2158 let (tx
, _rx
) = sync_channel
::<i32>(0);
2159 assert_eq
!(tx
.try_send(1), Err(TrySendError
::Full(1)));
2164 let (tx
, _rx
) = sync_channel
::<i32>(1);
2165 assert_eq
!(tx
.try_send(1), Ok(()));
2166 assert_eq
!(tx
.try_send(1), Err(TrySendError
::Full(1)));
2171 let (tx
, rx
) = sync_channel
::<i32>(1);
2172 assert_eq
!(tx
.try_send(1), Ok(()));
2174 assert_eq
!(tx
.try_send(1), Err(TrySendError
::Disconnected(1)));
2180 let (tx1
, rx1
) = sync_channel
::<()>(3);
2181 let (tx2
, rx2
) = sync_channel
::<()>(3);
2183 let _t
= thread
::spawn(move|| {
2184 rx1
.recv().unwrap();
2185 tx2
.try_send(()).unwrap();
2188 tx1
.try_send(()).unwrap();
2189 rx2
.recv().unwrap();