]> git.proxmox.com Git - cargo.git/blob - vendor/crossbeam-0.3.0/src/epoch/participants.rs
New upstream version 0.23.0
[cargo.git] / vendor / crossbeam-0.3.0 / src / epoch / participants.rs
1 // Manages the global participant list, which is an intrustive list in
2 // which items are lazily removed on traversal (after being
3 // "logically" deleted by becoming inactive.)
4
5 use std::mem;
6 use std::ops::{Deref, DerefMut};
7 use std::sync::atomic::Ordering::{Relaxed, Acquire, Release};
8
9 use epoch::{Atomic, Owned, Guard};
10 use epoch::participant::Participant;
11 use CachePadded;
12
13 /// Global, threadsafe list of threads participating in epoch management.
14 #[derive(Debug)]
15 pub struct Participants {
16 head: Atomic<ParticipantNode>
17 }
18
19 #[derive(Debug)]
20 pub struct ParticipantNode(CachePadded<Participant>);
21
22 impl ParticipantNode {
23 pub fn new() -> ParticipantNode {
24 ParticipantNode(CachePadded::new(Participant::new()))
25 }
26 }
27
28 impl Deref for ParticipantNode {
29 type Target = Participant;
30 fn deref(&self) -> &Participant {
31 &self.0
32 }
33 }
34
35 impl DerefMut for ParticipantNode {
36 fn deref_mut(&mut self) -> &mut Participant {
37 &mut self.0
38 }
39 }
40
41 impl Participants {
42 #[cfg(not(feature = "nightly"))]
43 pub fn new() -> Participants {
44 Participants { head: Atomic::null() }
45 }
46
47 #[cfg(feature = "nightly")]
48 pub const fn new() -> Participants {
49 Participants { head: Atomic::null() }
50 }
51
52 /// Enroll a new thread in epoch management by adding a new `Particpant`
53 /// record to the global list.
54 pub fn enroll(&self) -> *const Participant {
55 let mut participant = Owned::new(ParticipantNode::new());
56
57 // we ultimately use epoch tracking to free Participant nodes, but we
58 // can't actually enter an epoch here, so fake it; we know the node
59 // can't be removed until marked inactive anyway.
60 let fake_guard = ();
61 let g: &'static Guard = unsafe { mem::transmute(&fake_guard) };
62 loop {
63 let head = self.head.load(Relaxed, g);
64 participant.next.store_shared(head, Relaxed);
65 match self.head.cas_and_ref(head, participant, Release, g) {
66 Ok(shared) => {
67 let shared: &Participant = &shared;
68 return shared;
69 }
70 Err(owned) => {
71 participant = owned;
72 }
73 }
74 }
75 }
76
77 pub fn iter<'a>(&'a self, g: &'a Guard) -> Iter<'a> {
78 Iter {
79 guard: g,
80 next: &self.head,
81 needs_acq: true,
82 }
83 }
84 }
85
86 #[derive(Debug)]
87 pub struct Iter<'a> {
88 // pin to an epoch so that we can free inactive nodes
89 guard: &'a Guard,
90 next: &'a Atomic<ParticipantNode>,
91
92 // an Acquire read is needed only for the first read, due to release
93 // sequences
94 needs_acq: bool,
95 }
96
97 impl<'a> Iterator for Iter<'a> {
98 type Item = &'a Participant;
99 fn next(&mut self) -> Option<&'a Participant> {
100 let mut cur = if self.needs_acq {
101 self.needs_acq = false;
102 self.next.load(Acquire, self.guard)
103 } else {
104 self.next.load(Relaxed, self.guard)
105 };
106
107 while let Some(n) = cur {
108 // attempt to clean up inactive nodes
109 if !n.active.load(Relaxed) {
110 cur = n.next.load(Relaxed, self.guard);
111 // TODO: actually reclaim inactive participants!
112 } else {
113 self.next = &n.next;
114 return Some(&n)
115 }
116 }
117
118 None
119 }
120 }