crossbeam-epoch/src/guard.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use core::fmt; |
2 | | use core::mem; |
3 | | |
4 | | use scopeguard::defer; |
5 | | |
6 | | use crate::atomic::Shared; |
7 | | use crate::collector::Collector; |
8 | | use crate::deferred::Deferred; |
9 | | use crate::internal::Local; |
10 | | |
11 | | /// A guard that keeps the current thread pinned. |
12 | | /// |
13 | | /// # Pinning |
14 | | /// |
15 | | /// The current thread is pinned by calling [`pin`], which returns a new guard: |
16 | | /// |
17 | | /// ``` |
18 | | /// use crossbeam_epoch as epoch; |
19 | | /// |
20 | | /// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference. |
21 | | /// // This is not really necessary, but makes passing references to the guard a bit easier. |
22 | | /// let guard = &epoch::pin(); |
23 | | /// ``` |
24 | | /// |
25 | | /// When a guard gets dropped, the current thread is automatically unpinned. |
26 | | /// |
27 | | /// # Pointers on the stack |
28 | | /// |
29 | | /// Having a guard allows us to create pointers on the stack to heap-allocated objects. |
30 | | /// For example: |
31 | | /// |
32 | | /// ``` |
33 | | /// use crossbeam_epoch::{self as epoch, Atomic}; |
34 | | /// use std::sync::atomic::Ordering::SeqCst; |
35 | | /// |
36 | | /// // Create a heap-allocated number. |
37 | | /// let a = Atomic::new(777); |
38 | | /// |
39 | | /// // Pin the current thread. |
40 | | /// let guard = &epoch::pin(); |
41 | | /// |
42 | | /// // Load the heap-allocated object and create pointer `p` on the stack. |
43 | | /// let p = a.load(SeqCst, guard); |
44 | | /// |
45 | | /// // Dereference the pointer and print the value: |
46 | | /// if let Some(num) = unsafe { p.as_ref() } { |
47 | | /// println!("The number is {}.", num); |
48 | | /// } |
49 | | /// ``` |
50 | | /// |
51 | | /// # Multiple guards |
52 | | /// |
53 | | /// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the |
54 | | /// thread will actually be pinned only when the first guard is created and unpinned when the last |
55 | | /// one is dropped: |
56 | | /// |
57 | | /// ``` |
58 | | /// use crossbeam_epoch as epoch; |
59 | | /// |
60 | | /// let guard1 = epoch::pin(); |
61 | | /// let guard2 = epoch::pin(); |
62 | | /// assert!(epoch::is_pinned()); |
63 | | /// drop(guard1); |
64 | | /// assert!(epoch::is_pinned()); |
65 | | /// drop(guard2); |
66 | | /// assert!(!epoch::is_pinned()); |
67 | | /// ``` |
68 | | /// |
69 | | /// [`pin`]: super::pin |
70 | | pub struct Guard { |
71 | | pub(crate) local: *const Local, |
72 | | } |
73 | | |
74 | | impl Guard { |
75 | | /// Stores a function so that it can be executed at some point after all currently pinned |
76 | | /// threads get unpinned. |
77 | | /// |
78 | | /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache |
79 | | /// becomes full, some functions are moved into the global cache. At the same time, some |
80 | | /// functions from both local and global caches may get executed in order to incrementally |
81 | | /// clean up the caches as they fill up. |
82 | | /// |
83 | | /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it |
84 | | /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might |
85 | | /// never run, but the epoch-based garbage collection will make an effort to execute it |
86 | | /// reasonably soon. |
87 | | /// |
88 | | /// If this method is called from an [`unprotected`] guard, the function will simply be |
89 | | /// executed immediately. |
90 | 0 | pub fn defer<F, R>(&self, f: F) |
91 | 0 | where |
92 | 0 | F: FnOnce() -> R, |
93 | 0 | F: Send + 'static, |
94 | 0 | { |
95 | 0 | unsafe { |
96 | 0 | self.defer_unchecked(f); |
97 | 0 | } |
98 | 0 | } |
99 | | |
100 | | /// Stores a function so that it can be executed at some point after all currently pinned |
101 | | /// threads get unpinned. |
102 | | /// |
103 | | /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache |
104 | | /// becomes full, some functions are moved into the global cache. At the same time, some |
105 | | /// functions from both local and global caches may get executed in order to incrementally |
106 | | /// clean up the caches as they fill up. |
107 | | /// |
108 | | /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it |
109 | | /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might |
110 | | /// never run, but the epoch-based garbage collection will make an effort to execute it |
111 | | /// reasonably soon. |
112 | | /// |
113 | | /// If this method is called from an [`unprotected`] guard, the function will simply be |
114 | | /// executed immediately. |
115 | | /// |
116 | | /// # Safety |
117 | | /// |
118 | | /// The given function must not hold reference onto the stack. It is highly recommended that |
119 | | /// the passed function is **always** marked with `move` in order to prevent accidental |
120 | | /// borrows. |
121 | | /// |
122 | | /// ``` |
123 | | /// use crossbeam_epoch as epoch; |
124 | | /// |
125 | | /// let guard = &epoch::pin(); |
126 | | /// let message = "Hello!"; |
127 | | /// unsafe { |
128 | | /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. |
129 | | /// guard.defer_unchecked(move || { |
130 | | /// println!("{}", message); |
131 | | /// }); |
132 | | /// } |
133 | | /// ``` |
134 | | /// |
135 | | /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by |
136 | | /// the closure must be `Send`. |
137 | | /// |
138 | | /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove |
139 | | /// `F: Send` for typical use cases. For example, consider the following code snippet, which |
140 | | /// exemplifies the typical use case of deferring the deallocation of a shared reference: |
141 | | /// |
142 | | /// ```ignore |
143 | | /// let shared = Owned::new(7i32).into_shared(guard); |
144 | | /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`! |
145 | | /// ``` |
146 | | /// |
147 | | /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function, |
148 | | /// because it's called only after the grace period and `shared` is no longer shared with other |
149 | | /// threads. But we don't expect type systems to prove this. |
150 | | /// |
151 | | /// # Examples |
152 | | /// |
153 | | /// When a heap-allocated object in a data structure becomes unreachable, it has to be |
154 | | /// deallocated. However, the current thread and other threads may be still holding references |
155 | | /// on the stack to that same object. Therefore it cannot be deallocated before those references |
156 | | /// get dropped. This method can defer deallocation until all those threads get unpinned and |
157 | | /// consequently drop all their references on the stack. |
158 | | /// |
159 | | /// ``` |
160 | | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; |
161 | | /// use std::sync::atomic::Ordering::SeqCst; |
162 | | /// |
163 | | /// let a = Atomic::new("foo"); |
164 | | /// |
165 | | /// // Now suppose that `a` is shared among multiple threads and concurrently |
166 | | /// // accessed and modified... |
167 | | /// |
168 | | /// // Pin the current thread. |
169 | | /// let guard = &epoch::pin(); |
170 | | /// |
171 | | /// // Steal the object currently stored in `a` and swap it with another one. |
172 | | /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); |
173 | | /// |
174 | | /// if !p.is_null() { |
175 | | /// // The object `p` is pointing to is now unreachable. |
176 | | /// // Defer its deallocation until all currently pinned threads get unpinned. |
177 | | /// unsafe { |
178 | | /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. |
179 | | /// guard.defer_unchecked(move || { |
180 | | /// println!("{} is now being deallocated.", p.deref()); |
181 | | /// // Now we have unique access to the object pointed to by `p` and can turn it |
182 | | /// // into an `Owned`. Dropping the `Owned` will deallocate the object. |
183 | | /// drop(p.into_owned()); |
184 | | /// }); |
185 | | /// } |
186 | | /// } |
187 | | /// ``` |
188 | | pub unsafe fn defer_unchecked<F, R>(&self, f: F) |
189 | | where |
190 | | F: FnOnce() -> R, |
191 | | { |
192 | 4.00M | if let Some(local3.84M ) = self.local.as_ref() { |
193 | 3.84M | local.defer(Deferred::new(move || drop(f())3.68M ), self); <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::{closure#0} Line | Count | Source | 193 | 129 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::internal::Local>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>>::{closure#0} Line | Count | Source | 193 | 68 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::decrement::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<usize>>::resize::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 16 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 15 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::resize::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 11 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::resize::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<usize>>::resize::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::collector::tests::count_drops::Elem>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::collector::tests::count_drops::Elem>>::{closure#0} Line | Count | Source | 193 | 100k | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::count_destroy::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 100k | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::list::Entry>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::list::Entry>>::{closure#0} Line | Count | Source | 193 | 8.19k | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::internal::Local>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>>::{closure#0} Line | Count | Source | 193 | 50 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::collector::tests::stress::Elem>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::collector::tests::stress::Elem>>::{closure#0} Line | Count | Source | 193 | 737k | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::incremental::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 100k | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::destroy_array::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::sync::queue::test::push_try_pop_many_mpmc::LR>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::sync::queue::test::push_try_pop_many_mpmc::LR>>>::{closure#0} Line | Count | Source | 193 | 4 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<i32>::{closure#0}, crossbeam_epoch::atomic::Owned<i32>>::{closure#0} Line | Count | Source | 193 | 110 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<alloc::vec::Vec<crossbeam_epoch::collector::tests::drop_array::Elem>>::{closure#0}, crossbeam_epoch::atomic::Owned<alloc::vec::Vec<crossbeam_epoch::collector::tests::drop_array::Elem>>>::{closure#0} Line | Count | Source | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<i64>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<i64>>>::{closure#0} Line | Count | Source | 193 | 2.57M | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::{closure#0} Line | Count | Source | 193 | 64.3k | local.defer(Deferred::new(move || drop(f())), self); |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::buffering::{closure#0}, ()>::{closure#0} Line | Count | Source | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); |
|
194 | 3.84M | } else { |
195 | 161k | drop(f()); |
196 | 161k | } |
197 | 4.00M | } <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<i32, i32>>::decrement_with_pin<crossbeam_epoch::default::pin>::{closure#0}, ()> Line | Count | Source | 192 | 3 | if let Some(local) = self.local.as_ref() { | 193 | 3 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 3 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 3 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<i32, i32>>::decrement::{closure#0}, ()> Line | Count | Source | 192 | 1 | if let Some(local) = self.local.as_ref() { | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 1 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 1 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>> Line | Count | Source | 192 | 167 | if let Some(local165 ) = self.local.as_ref() { | 193 | 165 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 165 | } else { | 195 | 2 | drop(f()); | 196 | 2 | } | 197 | 167 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::internal::Local>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>> Line | Count | Source | 192 | 99 | if let Some(local97 ) = self.local.as_ref() { | 193 | 97 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 97 | } else { | 195 | 2 | drop(f()); | 196 | 2 | } | 197 | 99 | } |
Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<injector::destructors::Elem>>::resize::{closure#0}, ()> Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<usize>>::resize::{closure#0}, ()> Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<i32, ()>>::decrement_with_pin<crossbeam_epoch::default::pin>::{closure#0}, ()> Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<i32, ()>>::decrement::{closure#0}, ()> <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::decrement::{closure#0}, ()> Line | Count | Source | 192 | 1 | if let Some(local) = self.local.as_ref() { | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 1 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 1 | } |
Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_skiplist::base::Node<i32, i32>>::decrement::{closure#0}, ()> Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<i32>>::resize::{closure#0}, ()> <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<usize>>::resize::{closure#0}, ()> Line | Count | Source | 192 | 16 | if let Some(local) = self.local.as_ref() { | 193 | 16 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 16 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 16 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize::{closure#0}, ()> Line | Count | Source | 192 | 15 | if let Some(local) = self.local.as_ref() { | 193 | 15 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 15 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 15 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::resize::{closure#0}, ()> Line | Count | Source | 192 | 10 | if let Some(local) = self.local.as_ref() { | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 10 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 10 | } |
Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<i32>>::resize::{closure#0}, ()> Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<i32>>::resize::{closure#0}, ()> <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::resize::{closure#0}, ()> Line | Count | Source | 192 | 10 | if let Some(local) = self.local.as_ref() { | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 10 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 10 | } |
Unexecuted instantiation: <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<i32>>::resize::{closure#0}, ()> <crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<usize>>::resize::{closure#0}, ()> Line | Count | Source | 192 | 10 | if let Some(local) = self.local.as_ref() { | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 10 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 10 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize::{closure#0}, ()> Line | Count | Source | 192 | 11 | if let Some(local) = self.local.as_ref() { | 193 | 11 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 11 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 11 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::collector::tests::count_drops::Elem>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::collector::tests::count_drops::Elem>> Line | Count | Source | 192 | 100k | if let Some(local) = self.local.as_ref() { | 193 | 100k | local.defer(Deferred::new(move || drop(f())), self); | 194 | 100k | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 100k | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::list::Entry>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::list::Entry>> Line | Count | Source | 192 | 8.16k | if let Some(local8.16k ) = self.local.as_ref() { | 193 | 8.16k | local.defer(Deferred::new(move || drop(f())), self); | 194 | 8.16k | } else { | 195 | 3 | drop(f()); | 196 | 3 | } | 197 | 8.16k | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::count_destroy::{closure#0}, ()> Line | Count | Source | 192 | 100k | if let Some(local) = self.local.as_ref() { | 193 | 100k | local.defer(Deferred::new(move || drop(f())), self); | 194 | 100k | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 100k | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::collector::tests::stress::Elem>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::collector::tests::stress::Elem>> Line | Count | Source | 192 | 731k | if let Some(local) = self.local.as_ref() { | 193 | 731k | local.defer(Deferred::new(move || drop(f())), self); | 194 | 731k | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 731k | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::internal::Local>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>> Line | Count | Source | 192 | 67 | if let Some(local52 ) = self.local.as_ref() { | 193 | 52 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 52 | } else { | 195 | 15 | drop(f()); | 196 | 15 | } | 197 | 67 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::sync::queue::test::push_try_pop_many_mpmc::LR>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::sync::queue::test::push_try_pop_many_mpmc::LR>>> Line | Count | Source | 192 | 4 | if let Some(local) = self.local.as_ref() { | 193 | 4 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 4 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 4 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<i32>::{closure#0}, crossbeam_epoch::atomic::Owned<i32>> Line | Count | Source | 192 | 110 | if let Some(local) = self.local.as_ref() { | 193 | 110 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 110 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 110 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::destroy_array::{closure#0}, ()> Line | Count | Source | 192 | 1 | if let Some(local) = self.local.as_ref() { | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 1 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 1 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::buffering::{closure#0}, ()> Line | Count | Source | 192 | 10 | if let Some(local) = self.local.as_ref() { | 193 | 10 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 10 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 10 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<alloc::vec::Vec<crossbeam_epoch::collector::tests::drop_array::Elem>>::{closure#0}, crossbeam_epoch::atomic::Owned<alloc::vec::Vec<crossbeam_epoch::collector::tests::drop_array::Elem>>> Line | Count | Source | 192 | 1 | if let Some(local) = self.local.as_ref() { | 193 | 1 | local.defer(Deferred::new(move || drop(f())), self); | 194 | 1 | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 1 | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<i64>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<i64>>> Line | Count | Source | 192 | 2.89M | if let Some(local2.73M ) = self.local.as_ref() { | 193 | 2.73M | local.defer(Deferred::new(move || drop(f())), self); | 194 | 2.73M | } else { | 195 | 161k | drop(f()); | 196 | 161k | } | 197 | 2.89M | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<<crossbeam_epoch::guard::Guard>::defer_destroy<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>::{closure#0}, crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>> Line | Count | Source | 192 | 64.9k | if let Some(local64.7k ) = self.local.as_ref() { | 193 | 64.7k | local.defer(Deferred::new(move || drop(f())), self); | 194 | 64.7k | } else { | 195 | 221 | drop(f()); | 196 | 221 | } | 197 | 64.9k | } |
<crossbeam_epoch::guard::Guard>::defer_unchecked::<crossbeam_epoch::collector::tests::incremental::{closure#0}, ()> Line | Count | Source | 192 | 100k | if let Some(local) = self.local.as_ref() { | 193 | 100k | local.defer(Deferred::new(move || drop(f())), self); | 194 | 100k | } else { | 195 | 0 | drop(f()); | 196 | 0 | } | 197 | 100k | } |
|
198 | | |
199 | | /// Stores a destructor for an object so that it can be deallocated and dropped at some point |
200 | | /// after all currently pinned threads get unpinned. |
201 | | /// |
202 | | /// This method first stores the destructor into the thread-local (or handle-local) cache. If |
203 | | /// this cache becomes full, some destructors are moved into the global cache. At the same |
204 | | /// time, some destructors from both local and global caches may get executed in order to |
205 | | /// incrementally clean up the caches as they fill up. |
206 | | /// |
207 | | /// There is no guarantee when exactly the destructor will be executed. The only guarantee is |
208 | | /// that it won't be executed until all currently pinned threads get unpinned. In theory, the |
209 | | /// destructor might never run, but the epoch-based garbage collection will make an effort to |
210 | | /// execute it reasonably soon. |
211 | | /// |
212 | | /// If this method is called from an [`unprotected`] guard, the destructor will simply be |
213 | | /// executed immediately. |
214 | | /// |
215 | | /// # Safety |
216 | | /// |
217 | | /// The object must not be reachable by other threads anymore, otherwise it might be still in |
218 | | /// use when the destructor runs. |
219 | | /// |
220 | | /// Apart from that, keep in mind that another thread may execute the destructor, so the object |
221 | | /// must be sendable to other threads. |
222 | | /// |
223 | | /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove |
224 | | /// `T: Send` for typical use cases. For example, consider the following code snippet, which |
225 | | /// exemplifies the typical use case of deferring the deallocation of a shared reference: |
226 | | /// |
227 | | /// ```ignore |
228 | | /// let shared = Owned::new(7i32).into_shared(guard); |
229 | | /// guard.defer_destroy(shared); // `Shared` is not `Send`! |
230 | | /// ``` |
231 | | /// |
232 | | /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because |
233 | | /// it's called only after the grace period and `shared` is no longer shared with other |
234 | | /// threads. But we don't expect type systems to prove this. |
235 | | /// |
236 | | /// # Examples |
237 | | /// |
238 | | /// When a heap-allocated object in a data structure becomes unreachable, it has to be |
239 | | /// deallocated. However, the current thread and other threads may be still holding references |
240 | | /// on the stack to that same object. Therefore it cannot be deallocated before those references |
241 | | /// get dropped. This method can defer deallocation until all those threads get unpinned and |
242 | | /// consequently drop all their references on the stack. |
243 | | /// |
244 | | /// ``` |
245 | | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; |
246 | | /// use std::sync::atomic::Ordering::SeqCst; |
247 | | /// |
248 | | /// let a = Atomic::new("foo"); |
249 | | /// |
250 | | /// // Now suppose that `a` is shared among multiple threads and concurrently |
251 | | /// // accessed and modified... |
252 | | /// |
253 | | /// // Pin the current thread. |
254 | | /// let guard = &epoch::pin(); |
255 | | /// |
256 | | /// // Steal the object currently stored in `a` and swap it with another one. |
257 | | /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); |
258 | | /// |
259 | | /// if !p.is_null() { |
260 | | /// // The object `p` is pointing to is now unreachable. |
261 | | /// // Defer its deallocation until all currently pinned threads get unpinned. |
262 | | /// unsafe { |
263 | | /// guard.defer_destroy(p); |
264 | | /// } |
265 | | /// } |
266 | | /// ``` |
267 | 3.78M | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { |
268 | 3.78M | self.defer_unchecked(move || ptr.into_owned()3.68M ); <crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>::{closure#0} Line | Count | Source | 268 | 131 | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::internal::Local>::{closure#0} Line | Count | Source | 268 | 70 | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>::{closure#0} Line | Count | Source | 268 | 64.4k | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<alloc::vec::Vec<crossbeam_epoch::collector::tests::drop_array::Elem>>::{closure#0} Line | Count | Source | 268 | 1 | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<i64>>::{closure#0} Line | Count | Source | 268 | 2.77M | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<i32>::{closure#0} Line | Count | Source | 268 | 110 | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::sync::queue::test::push_try_pop_many_mpmc::LR>>::{closure#0} Line | Count | Source | 268 | 4 | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::internal::Local>::{closure#0} Line | Count | Source | 268 | 65 | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::collector::tests::stress::Elem>::{closure#0} Line | Count | Source | 268 | 737k | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::list::Entry>::{closure#0} Line | Count | Source | 268 | 8.19k | self.defer_unchecked(move || ptr.into_owned()); |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::collector::tests::count_drops::Elem>::{closure#0} Line | Count | Source | 268 | 100k | self.defer_unchecked(move || ptr.into_owned()); |
|
269 | 3.78M | } <crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> Line | Count | Source | 267 | 167 | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 167 | self.defer_unchecked(move || ptr.into_owned()); | 269 | 167 | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::internal::Local> Line | Count | Source | 267 | 99 | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 99 | self.defer_unchecked(move || ptr.into_owned()); | 269 | 99 | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::sync::queue::test::push_try_pop_many_mpmc::LR>> Line | Count | Source | 267 | 4 | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 4 | self.defer_unchecked(move || ptr.into_owned()); | 269 | 4 | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<alloc::vec::Vec<crossbeam_epoch::collector::tests::drop_array::Elem>> Line | Count | Source | 267 | 1 | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 1 | self.defer_unchecked(move || ptr.into_owned()); | 269 | 1 | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> Line | Count | Source | 267 | 64.9k | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 64.9k | self.defer_unchecked(move || ptr.into_owned()); | 269 | 64.9k | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::list::Entry> Line | Count | Source | 267 | 8.16k | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 8.16k | self.defer_unchecked(move || ptr.into_owned()); | 269 | 8.16k | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::collector::tests::stress::Elem> Line | Count | Source | 267 | 708k | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 708k | self.defer_unchecked(move || ptr.into_owned()); | 269 | 708k | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::internal::Local> Line | Count | Source | 267 | 67 | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 67 | self.defer_unchecked(move || ptr.into_owned()); | 269 | 67 | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::sync::queue::Node<i64>> Line | Count | Source | 267 | 2.90M | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 2.90M | self.defer_unchecked(move || ptr.into_owned()); | 269 | 2.90M | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<crossbeam_epoch::collector::tests::count_drops::Elem> Line | Count | Source | 267 | 100k | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 100k | self.defer_unchecked(move || ptr.into_owned()); | 269 | 100k | } |
<crossbeam_epoch::guard::Guard>::defer_destroy::<i32> Line | Count | Source | 267 | 110 | pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { | 268 | 110 | self.defer_unchecked(move || ptr.into_owned()); | 269 | 110 | } |
|
270 | | |
271 | | /// Clears up the thread-local cache of deferred functions by executing them or moving into the |
272 | | /// global cache. |
273 | | /// |
274 | | /// Call this method after deferring execution of a function if you want to get it executed as |
275 | | /// soon as possible. Flushing will make sure it is residing in in the global cache, so that |
276 | | /// any thread has a chance of taking the function and executing it. |
277 | | /// |
278 | | /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens). |
279 | | /// |
280 | | /// # Examples |
281 | | /// |
282 | | /// ``` |
283 | | /// use crossbeam_epoch as epoch; |
284 | | /// |
285 | | /// let guard = &epoch::pin(); |
286 | | /// guard.defer(move || { |
287 | | /// println!("This better be printed as soon as possible!"); |
288 | | /// }); |
289 | | /// guard.flush(); |
290 | | /// ``` |
291 | | pub fn flush(&self) { |
292 | 280 | if let Some(local) = unsafe { self.local.as_ref() } { |
293 | 280 | local.flush(self); |
294 | 280 | }0 |
295 | 280 | } <crossbeam_epoch::guard::Guard>::flush Line | Count | Source | 292 | 76 | if let Some(local) = unsafe { self.local.as_ref() } { | 293 | 76 | local.flush(self); | 294 | 76 | }0 | 295 | 76 | } |
<crossbeam_epoch::guard::Guard>::flush Line | Count | Source | 292 | 204 | if let Some(local) = unsafe { self.local.as_ref() } { | 293 | 204 | local.flush(self); | 294 | 204 | }0 | 295 | 204 | } |
|
296 | | |
297 | | /// Unpins and then immediately re-pins the thread. |
298 | | /// |
299 | | /// This method is useful when you don't want delay the advancement of the global epoch by |
300 | | /// holding an old epoch. For safety, you should not maintain any guard-based reference across |
301 | | /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this |
302 | | /// is the only active guard for the current thread. |
303 | | /// |
304 | | /// If this method is called from an [`unprotected`] guard, then the call will be just no-op. |
305 | | /// |
306 | | /// # Examples |
307 | | /// |
308 | | /// ``` |
309 | | /// use crossbeam_epoch::{self as epoch, Atomic}; |
310 | | /// use std::sync::atomic::Ordering::SeqCst; |
311 | | /// |
312 | | /// let a = Atomic::new(777); |
313 | | /// let mut guard = epoch::pin(); |
314 | | /// { |
315 | | /// let p = a.load(SeqCst, &guard); |
316 | | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); |
317 | | /// } |
318 | | /// guard.repin(); |
319 | | /// { |
320 | | /// let p = a.load(SeqCst, &guard); |
321 | | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); |
322 | | /// } |
323 | | /// ``` |
324 | | pub fn repin(&mut self) { |
325 | 3 | if let Some(local) = unsafe { self.local.as_ref() } { |
326 | 3 | local.repin(); |
327 | 3 | }0 |
328 | 3 | } <crossbeam_epoch::guard::Guard>::repin Line | Count | Source | 325 | 1 | if let Some(local) = unsafe { self.local.as_ref() } { | 326 | 1 | local.repin(); | 327 | 1 | }0 | 328 | 1 | } |
<crossbeam_epoch::guard::Guard>::repin Line | Count | Source | 325 | 2 | if let Some(local) = unsafe { self.local.as_ref() } { | 326 | 2 | local.repin(); | 327 | 2 | }0 | 328 | 2 | } |
|
329 | | |
330 | | /// Temporarily unpins the thread, executes the given function and then re-pins the thread. |
331 | | /// |
332 | | /// This method is useful when you need to perform a long-running operation (e.g. sleeping) |
333 | | /// and don't need to maintain any guard-based reference across the call (the latter is enforced |
334 | | /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the |
335 | | /// current thread. |
336 | | /// |
337 | | /// If this method is called from an [`unprotected`] guard, then the passed function is called |
338 | | /// directly without unpinning the thread. |
339 | | /// |
340 | | /// # Examples |
341 | | /// |
342 | | /// ``` |
343 | | /// use crossbeam_epoch::{self as epoch, Atomic}; |
344 | | /// use std::sync::atomic::Ordering::SeqCst; |
345 | | /// use std::thread; |
346 | | /// use std::time::Duration; |
347 | | /// |
348 | | /// let a = Atomic::new(777); |
349 | | /// let mut guard = epoch::pin(); |
350 | | /// { |
351 | | /// let p = a.load(SeqCst, &guard); |
352 | | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); |
353 | | /// } |
354 | | /// guard.repin_after(|| thread::sleep(Duration::from_millis(50))); |
355 | | /// { |
356 | | /// let p = a.load(SeqCst, &guard); |
357 | | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); |
358 | | /// } |
359 | | /// ``` |
360 | | pub fn repin_after<F, R>(&mut self, f: F) -> R |
361 | | where |
362 | | F: FnOnce() -> R, |
363 | | { |
364 | 0 | if let Some(local) = unsafe { self.local.as_ref() } { |
365 | 0 | // We need to acquire a handle here to ensure the Local doesn't |
366 | 0 | // disappear from under us. |
367 | 0 | local.acquire_handle(); |
368 | 0 | local.unpin(); |
369 | 0 | } |
370 | | |
371 | | // Ensure the Guard is re-pinned even if the function panics |
372 | 0 | defer! { |
373 | | if let Some(local) = unsafe { self.local.as_ref() } { |
374 | | mem::forget(local.pin()); |
375 | | local.release_handle(); |
376 | | } |
377 | | } |
378 | | |
379 | 0 | f() |
380 | 0 | } |
381 | | |
382 | | /// Returns the `Collector` associated with this guard. |
383 | | /// |
384 | | /// This method is useful when you need to ensure that all guards used with |
385 | | /// a data structure come from the same collector. |
386 | | /// |
387 | | /// If this method is called from an [`unprotected`] guard, then `None` is returned. |
388 | | /// |
389 | | /// # Examples |
390 | | /// |
391 | | /// ``` |
392 | | /// use crossbeam_epoch as epoch; |
393 | | /// |
394 | | /// let guard1 = epoch::pin(); |
395 | | /// let guard2 = epoch::pin(); |
396 | | /// assert!(guard1.collector() == guard2.collector()); |
397 | | /// ``` |
398 | 693 | pub fn collector(&self) -> Option<&Collector> { |
399 | 693 | unsafe { self.local.as_ref().map(|local| local.collector()) } |
400 | 693 | } |
401 | | } |
402 | | |
403 | | impl Drop for Guard { |
404 | | #[inline] |
405 | | fn drop(&mut self) { |
406 | 13.2M | if let Some(local) = unsafe { self.local.as_ref() } { |
407 | 13.2M | local.unpin(); |
408 | 13.2M | }0 |
409 | 13.2M | } <crossbeam_epoch::guard::Guard as core::ops::drop::Drop>::drop Line | Count | Source | 406 | 1.01M | if let Some(local) = unsafe { self.local.as_ref() } { | 407 | 1.01M | local.unpin(); | 408 | 1.01M | }0 | 409 | 1.01M | } |
<crossbeam_epoch::guard::Guard as core::ops::drop::Drop>::drop Line | Count | Source | 406 | 12.2M | if let Some(local) = unsafe { self.local.as_ref() } { | 407 | 12.2M | local.unpin(); | 408 | 12.2M | }0 | 409 | 12.2M | } |
|
410 | | } |
411 | | |
412 | | impl fmt::Debug for Guard { |
413 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
414 | 0 | f.pad("Guard { .. }") |
415 | 0 | } |
416 | | } |
417 | | |
418 | | /// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. |
419 | | /// |
420 | | /// This guard should be used in special occasions only. Note that it doesn't actually keep any |
421 | | /// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely. |
422 | | /// |
423 | | /// Note that calling [`defer`] with a dummy guard will not defer the function - it will just |
424 | | /// execute the function immediately. |
425 | | /// |
426 | | /// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`. |
427 | | /// |
428 | | /// # Safety |
429 | | /// |
430 | | /// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the |
431 | | /// [`Atomic`] is not being concurrently modified by other threads. |
432 | | /// |
433 | | /// # Examples |
434 | | /// |
435 | | /// ``` |
436 | | /// use crossbeam_epoch::{self as epoch, Atomic}; |
437 | | /// use std::sync::atomic::Ordering::Relaxed; |
438 | | /// |
439 | | /// let a = Atomic::new(7); |
440 | | /// |
441 | | /// unsafe { |
442 | | /// // Load `a` without pinning the current thread. |
443 | | /// a.load(Relaxed, epoch::unprotected()); |
444 | | /// |
445 | | /// // It's possible to create more dummy guards by calling `clone()`. |
446 | | /// let dummy = &epoch::unprotected().clone(); |
447 | | /// |
448 | | /// dummy.defer(move || { |
449 | | /// println!("This gets executed immediately."); |
450 | | /// }); |
451 | | /// |
452 | | /// // Dropping `dummy` doesn't affect the current thread - it's just a noop. |
453 | | /// } |
454 | | /// ``` |
455 | | /// |
456 | | /// The most common use of this function is when constructing or destructing a data structure. |
457 | | /// |
458 | | /// For example, we can use a dummy guard in the destructor of a Treiber stack because at that |
459 | | /// point no other thread could concurrently modify the [`Atomic`]s we are accessing. |
460 | | /// |
461 | | /// If we were to actually pin the current thread during destruction, that would just unnecessarily |
462 | | /// delay garbage collection and incur some performance cost, so in cases like these `unprotected` |
463 | | /// is very helpful. |
464 | | /// |
465 | | /// ``` |
466 | | /// use crossbeam_epoch::{self as epoch, Atomic}; |
467 | | /// use std::mem::ManuallyDrop; |
468 | | /// use std::sync::atomic::Ordering::Relaxed; |
469 | | /// |
470 | | /// struct Stack<T> { |
471 | | /// head: Atomic<Node<T>>, |
472 | | /// } |
473 | | /// |
474 | | /// struct Node<T> { |
475 | | /// data: ManuallyDrop<T>, |
476 | | /// next: Atomic<Node<T>>, |
477 | | /// } |
478 | | /// |
479 | | /// impl<T> Drop for Stack<T> { |
480 | | /// fn drop(&mut self) { |
481 | | /// unsafe { |
482 | | /// // Unprotected load. |
483 | | /// let mut node = self.head.load(Relaxed, epoch::unprotected()); |
484 | | /// |
485 | | /// while let Some(n) = node.as_ref() { |
486 | | /// // Unprotected load. |
487 | | /// let next = n.next.load(Relaxed, epoch::unprotected()); |
488 | | /// |
489 | | /// // Take ownership of the node, then drop its data and deallocate it. |
490 | | /// let mut o = node.into_owned(); |
491 | | /// ManuallyDrop::drop(&mut o.data); |
492 | | /// drop(o); |
493 | | /// |
494 | | /// node = next; |
495 | | /// } |
496 | | /// } |
497 | | /// } |
498 | | /// } |
499 | | /// ``` |
500 | | /// |
501 | | /// [`Atomic`]: super::Atomic |
502 | | /// [`defer`]: Guard::defer |
503 | | #[inline] |
504 | 1.07k | pub unsafe fn unprotected() -> &'static Guard { |
505 | 1.07k | // An unprotected guard is just a `Guard` with its field `local` set to null. |
506 | 1.07k | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in |
507 | 1.07k | // a `static` |
508 | 1.07k | struct GuardWrapper(Guard); |
509 | 1.07k | unsafe impl Sync for GuardWrapper {} |
510 | 1.07k | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { |
511 | 1.07k | local: core::ptr::null(), |
512 | 1.07k | }); |
513 | 1.07k | &UNPROTECTED.0 |
514 | 1.07k | } crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 24 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 24 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 24 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 24 | // a `static` | 508 | 24 | struct GuardWrapper(Guard); | 509 | 24 | unsafe impl Sync for GuardWrapper {} | 510 | 24 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 24 | local: core::ptr::null(), | 512 | 24 | }); | 513 | 24 | &UNPROTECTED.0 | 514 | 24 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 485 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 485 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 485 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 485 | // a `static` | 508 | 485 | struct GuardWrapper(Guard); | 509 | 485 | unsafe impl Sync for GuardWrapper {} | 510 | 485 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 485 | local: core::ptr::null(), | 512 | 485 | }); | 513 | 485 | &UNPROTECTED.0 | 514 | 485 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 21 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 21 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 21 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 21 | // a `static` | 508 | 21 | struct GuardWrapper(Guard); | 509 | 21 | unsafe impl Sync for GuardWrapper {} | 510 | 21 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 21 | local: core::ptr::null(), | 512 | 21 | }); | 513 | 21 | &UNPROTECTED.0 | 514 | 21 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 4 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 4 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 4 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 4 | // a `static` | 508 | 4 | struct GuardWrapper(Guard); | 509 | 4 | unsafe impl Sync for GuardWrapper {} | 510 | 4 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 4 | local: core::ptr::null(), | 512 | 4 | }); | 513 | 4 | &UNPROTECTED.0 | 514 | 4 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 174 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 174 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 174 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 174 | // a `static` | 508 | 174 | struct GuardWrapper(Guard); | 509 | 174 | unsafe impl Sync for GuardWrapper {} | 510 | 174 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 174 | local: core::ptr::null(), | 512 | 174 | }); | 513 | 174 | &UNPROTECTED.0 | 514 | 174 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 1 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 1 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 1 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 1 | // a `static` | 508 | 1 | struct GuardWrapper(Guard); | 509 | 1 | unsafe impl Sync for GuardWrapper {} | 510 | 1 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 1 | local: core::ptr::null(), | 512 | 1 | }); | 513 | 1 | &UNPROTECTED.0 | 514 | 1 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 28 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 28 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 28 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 28 | // a `static` | 508 | 28 | struct GuardWrapper(Guard); | 509 | 28 | unsafe impl Sync for GuardWrapper {} | 510 | 28 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 28 | local: core::ptr::null(), | 512 | 28 | }); | 513 | 28 | &UNPROTECTED.0 | 514 | 28 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 22 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 22 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 22 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 22 | // a `static` | 508 | 22 | struct GuardWrapper(Guard); | 509 | 22 | unsafe impl Sync for GuardWrapper {} | 510 | 22 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 22 | local: core::ptr::null(), | 512 | 22 | }); | 513 | 22 | &UNPROTECTED.0 | 514 | 22 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 31 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 31 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 31 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 31 | // a `static` | 508 | 31 | struct GuardWrapper(Guard); | 509 | 31 | unsafe impl Sync for GuardWrapper {} | 510 | 31 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 31 | local: core::ptr::null(), | 512 | 31 | }); | 513 | 31 | &UNPROTECTED.0 | 514 | 31 | } |
crossbeam_epoch::guard::unprotected Line | Count | Source | 504 | 282 | pub unsafe fn unprotected() -> &'static Guard { | 505 | 282 | // An unprotected guard is just a `Guard` with its field `local` set to null. | 506 | 282 | // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in | 507 | 282 | // a `static` | 508 | 282 | struct GuardWrapper(Guard); | 509 | 282 | unsafe impl Sync for GuardWrapper {} | 510 | 282 | static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { | 511 | 282 | local: core::ptr::null(), | 512 | 282 | }); | 513 | 282 | &UNPROTECTED.0 | 514 | 282 | } |
|