crossbeam-utils/src/atomic/consume.rs
Line | Count | Source |
1 | | #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] |
2 | | use crate::primitive::sync::atomic::compiler_fence; |
3 | | use core::sync::atomic::Ordering; |
4 | | |
5 | | /// Trait which allows reading from primitive atomic types with "consume" ordering. |
6 | | pub trait AtomicConsume { |
7 | | /// Type returned by `load_consume`. |
8 | | type Val; |
9 | | |
10 | | /// Loads a value from the atomic using a "consume" memory ordering. |
11 | | /// |
12 | | /// This is similar to the "acquire" ordering, except that an ordering is |
13 | | /// only guaranteed with operations that "depend on" the result of the load. |
14 | | /// However consume loads are usually much faster than acquire loads on |
15 | | /// architectures with a weak memory model since they don't require memory |
16 | | /// fence instructions. |
17 | | /// |
18 | | /// The exact definition of "depend on" is a bit vague, but it works as you |
19 | | /// would expect in practice since a lot of software, especially the Linux |
20 | | /// kernel, rely on this behavior. |
21 | | /// |
22 | | /// This is currently only implemented on ARM and AArch64, where a fence |
23 | | /// can be avoided. On other architectures this will fall back to a simple |
24 | | /// `load(Ordering::Acquire)`. |
25 | | fn load_consume(&self) -> Self::Val; |
26 | | } |
27 | | |
28 | | #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] |
29 | | macro_rules! impl_consume { |
30 | | () => { |
31 | | #[inline] |
32 | | fn load_consume(&self) -> Self::Val { |
33 | | let result = self.load(Ordering::Relaxed); |
34 | | compiler_fence(Ordering::Acquire); |
35 | | result |
36 | | } |
37 | | }; |
38 | | } |
39 | | |
40 | | #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] |
41 | | macro_rules! impl_consume { |
42 | | () => { |
43 | | #[inline] |
44 | 2.39k | fn load_consume(&self) -> Self::Val { <core::sync::atomic::AtomicUsize as crossbeam_utils::atomic::consume::AtomicConsume>::load_consume Line | Count | Source | 44 | 684 | fn load_consume(&self) -> Self::Val { |
<core::sync::atomic::AtomicUsize as crossbeam_utils::atomic::consume::AtomicConsume>::load_consume Line | Count | Source | 44 | 5 | fn load_consume(&self) -> Self::Val { |
<core::sync::atomic::AtomicUsize as crossbeam_utils::atomic::consume::AtomicConsume>::load_consume Line | Count | Source | 44 | 1.70k | fn load_consume(&self) -> Self::Val { |
|
45 | | self.load(Ordering::Acquire) |
46 | | } |
47 | | }; |
48 | | } |
49 | | |
50 | | macro_rules! impl_atomic { |
51 | | ($atomic:ident, $val:ty) => { |
52 | | impl AtomicConsume for ::core::sync::atomic::$atomic { |
53 | | type Val = $val; |
54 | | impl_consume!(); |
55 | | } |
56 | | #[cfg(loom_crossbeam)] |
57 | | impl AtomicConsume for ::loom::sync::atomic::$atomic { |
58 | | type Val = $val; |
59 | | impl_consume!(); |
60 | | } |
61 | | }; |
62 | | } |
63 | | |
64 | | impl_atomic!(AtomicBool, bool); |
65 | | impl_atomic!(AtomicUsize, usize); |
66 | | #[cfg(not(loom_crossbeam))] |
67 | | impl_atomic!(AtomicIsize, isize); |
68 | | #[cfg(has_atomic_u8)] |
69 | | impl_atomic!(AtomicU8, u8); |
70 | | #[cfg(has_atomic_u8)] |
71 | | impl_atomic!(AtomicI8, i8); |
72 | | #[cfg(has_atomic_u16)] |
73 | | impl_atomic!(AtomicU16, u16); |
74 | | #[cfg(has_atomic_u16)] |
75 | | impl_atomic!(AtomicI16, i16); |
76 | | #[cfg(has_atomic_u32)] |
77 | | impl_atomic!(AtomicU32, u32); |
78 | | #[cfg(has_atomic_u32)] |
79 | | impl_atomic!(AtomicI32, i32); |
80 | | #[cfg(has_atomic_u64)] |
81 | | impl_atomic!(AtomicU64, u64); |
82 | | #[cfg(has_atomic_u64)] |
83 | | impl_atomic!(AtomicI64, i64); |
84 | | |
85 | | impl<T> AtomicConsume for ::core::sync::atomic::AtomicPtr<T> { |
86 | | type Val = *mut T; |
87 | | impl_consume!(); |
88 | | } |
89 | | |
90 | | #[cfg(loom_crossbeam)] |
91 | | impl<T> AtomicConsume for ::loom::sync::atomic::AtomicPtr<T> { |
92 | | type Val = *mut T; |
93 | | impl_consume!(); |
94 | | } |