crossbeam-deque/src/deque.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // TODO(@jeehoonkang): we mutates `batch_size` inside `for i in 0..batch_size {}`. It is difficult |
2 | | // to read because we're mutating the range bound. |
3 | | #![allow(clippy::mut_range_bound)] |
4 | | |
5 | | use std::cell::{Cell, UnsafeCell}; |
6 | | use std::cmp; |
7 | | use std::fmt; |
8 | | use std::iter::FromIterator; |
9 | | use std::marker::PhantomData; |
10 | | use std::mem::{self, MaybeUninit}; |
11 | | use std::ptr; |
12 | | use std::sync::atomic::{self, AtomicIsize, AtomicPtr, AtomicUsize, Ordering}; |
13 | | use std::sync::Arc; |
14 | | |
15 | | use crate::epoch::{self, Atomic, Owned}; |
16 | | use crate::utils::{Backoff, CachePadded}; |
17 | | |
18 | | // Minimum buffer capacity. |
19 | | const MIN_CAP: usize = 64; |
20 | | // Maximum number of tasks that can be stolen in `steal_batch()` and `steal_batch_and_pop()`. |
21 | | const MAX_BATCH: usize = 32; |
22 | | // If a buffer of at least this size is retired, thread-local garbage is flushed so that it gets |
23 | | // deallocated as soon as possible. |
24 | | const FLUSH_THRESHOLD_BYTES: usize = 1 << 10; |
25 | | |
26 | | /// A buffer that holds tasks in a worker queue. |
27 | | /// |
28 | | /// This is just a pointer to the buffer and its length - dropping an instance of this struct will |
29 | | /// *not* deallocate the buffer. |
30 | | struct Buffer<T> { |
31 | | /// Pointer to the allocated memory. |
32 | | ptr: *mut T, |
33 | | |
34 | | /// Capacity of the buffer. Always a power of two. |
35 | | cap: usize, |
36 | | } |
37 | | |
38 | | unsafe impl<T> Send for Buffer<T> {} |
39 | | |
40 | | impl<T> Buffer<T> { |
41 | | /// Allocates a new buffer with the specified capacity. |
42 | 180 | fn alloc(cap: usize) -> Buffer<T> { |
43 | 179 | debug_assert_eq!(cap, cap.next_power_of_two()); |
44 | | |
45 | 181 | let mut v = Vec::with_capacity(cap); |
46 | 181 | let ptr = v.as_mut_ptr(); |
47 | 181 | mem::forget(v); |
48 | 181 | |
49 | 181 | Buffer { ptr, cap } |
50 | 181 | } <crossbeam_deque::deque::Buffer<usize>>::alloc Line | Count | Source | 42 | 16 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 16 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 16 | let mut v = Vec::with_capacity(cap); | 46 | 16 | let ptr = v.as_mut_ptr(); | 47 | 16 | mem::forget(v); | 48 | 16 | | 49 | 16 | Buffer { ptr, cap } | 50 | 16 | } |
<crossbeam_deque::deque::Buffer<injector::destructors::Elem>>::alloc Line | Count | Source | 42 | 7 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 7 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 7 | let mut v = Vec::with_capacity(cap); | 46 | 7 | let ptr = v.as_mut_ptr(); | 47 | 7 | mem::forget(v); | 48 | 7 | | 49 | 7 | Buffer { ptr, cap } | 50 | 7 | } |
<crossbeam_deque::deque::Buffer<i32>>::alloc Line | Count | Source | 42 | 1 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 1 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 1 | let mut v = Vec::with_capacity(cap); | 46 | 1 | let ptr = v.as_mut_ptr(); | 47 | 1 | mem::forget(v); | 48 | 1 | | 49 | 1 | Buffer { ptr, cap } | 50 | 1 | } |
<crossbeam_deque::deque::Buffer<usize>>::alloc Line | Count | Source | 42 | 35 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 35 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 35 | let mut v = Vec::with_capacity(cap); | 46 | 35 | let ptr = v.as_mut_ptr(); | 47 | 35 | mem::forget(v); | 48 | 35 | | 49 | 35 | Buffer { ptr, cap } | 50 | 35 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::alloc Line | Count | Source | 42 | 16 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 16 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 16 | let mut v = Vec::with_capacity(cap); | 46 | 16 | let ptr = v.as_mut_ptr(); | 47 | 16 | mem::forget(v); | 48 | 16 | | 49 | 16 | Buffer { ptr, cap } | 50 | 16 | } |
<crossbeam_deque::deque::Buffer<fifo::destructors::Elem>>::alloc Line | Count | Source | 42 | 19 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 19 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 19 | let mut v = Vec::with_capacity(cap); | 46 | 19 | let ptr = v.as_mut_ptr(); | 47 | 19 | mem::forget(v); | 48 | 19 | | 49 | 19 | Buffer { ptr, cap } | 50 | 19 | } |
<crossbeam_deque::deque::Buffer<i32>>::alloc Line | Count | Source | 42 | 2 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 1 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 3 | let mut v = Vec::with_capacity(cap); | 46 | 3 | let ptr = v.as_mut_ptr(); | 47 | 3 | mem::forget(v); | 48 | 3 | | 49 | 3 | Buffer { ptr, cap } | 50 | 3 | } |
<crossbeam_deque::deque::Buffer<i32>>::alloc Line | Count | Source | 42 | 22 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 22 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 22 | let mut v = Vec::with_capacity(cap); | 46 | 22 | let ptr = v.as_mut_ptr(); | 47 | 22 | mem::forget(v); | 48 | 22 | | 49 | 22 | Buffer { ptr, cap } | 50 | 22 | } |
<crossbeam_deque::deque::Buffer<usize>>::alloc Line | Count | Source | 42 | 29 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 29 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 29 | let mut v = Vec::with_capacity(cap); | 46 | 29 | let ptr = v.as_mut_ptr(); | 47 | 29 | mem::forget(v); | 48 | 29 | | 49 | 29 | Buffer { ptr, cap } | 50 | 29 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::alloc Line | Count | Source | 42 | 12 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 12 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 12 | let mut v = Vec::with_capacity(cap); | 46 | 12 | let ptr = v.as_mut_ptr(); | 47 | 12 | mem::forget(v); | 48 | 12 | | 49 | 12 | Buffer { ptr, cap } | 50 | 12 | } |
<crossbeam_deque::deque::Buffer<i32>>::alloc Line | Count | Source | 42 | 2 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 2 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 2 | let mut v = Vec::with_capacity(cap); | 46 | 2 | let ptr = v.as_mut_ptr(); | 47 | 2 | mem::forget(v); | 48 | 2 | | 49 | 2 | Buffer { ptr, cap } | 50 | 2 | } |
<crossbeam_deque::deque::Buffer<lifo::destructors::Elem>>::alloc Line | Count | Source | 42 | 19 | fn alloc(cap: usize) -> Buffer<T> { | 43 | 19 | debug_assert_eq!(cap, cap.next_power_of_two()); | 44 | | | 45 | 19 | let mut v = Vec::with_capacity(cap); | 46 | 19 | let ptr = v.as_mut_ptr(); | 47 | 19 | mem::forget(v); | 48 | 19 | | 49 | 19 | Buffer { ptr, cap } | 50 | 19 | } |
|
51 | | |
52 | | /// Deallocates the buffer. |
53 | 175 | unsafe fn dealloc(self) { |
54 | 175 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); |
55 | 175 | } <crossbeam_deque::deque::Buffer<injector::destructors::Elem>>::dealloc Line | Count | Source | 53 | 8 | unsafe fn dealloc(self) { | 54 | 8 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 8 | } |
<crossbeam_deque::deque::Buffer<usize>>::dealloc Line | Count | Source | 53 | 12 | unsafe fn dealloc(self) { | 54 | 12 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 12 | } |
<crossbeam_deque::deque::Buffer<i32>>::dealloc Line | Count | Source | 53 | 1 | unsafe fn dealloc(self) { | 54 | 1 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 1 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::dealloc Line | Count | Source | 53 | 16 | unsafe fn dealloc(self) { | 54 | 16 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 16 | } |
<crossbeam_deque::deque::Buffer<fifo::destructors::Elem>>::dealloc Line | Count | Source | 53 | 19 | unsafe fn dealloc(self) { | 54 | 19 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 19 | } |
<crossbeam_deque::deque::Buffer<i32>>::dealloc Line | Count | Source | 53 | 2 | unsafe fn dealloc(self) { | 54 | 2 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 2 | } |
<crossbeam_deque::deque::Buffer<usize>>::dealloc Line | Count | Source | 53 | 34 | unsafe fn dealloc(self) { | 54 | 34 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 34 | } |
<crossbeam_deque::deque::Buffer<i32>>::dealloc Line | Count | Source | 53 | 22 | unsafe fn dealloc(self) { | 54 | 22 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 22 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::dealloc Line | Count | Source | 53 | 12 | unsafe fn dealloc(self) { | 54 | 12 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 12 | } |
<crossbeam_deque::deque::Buffer<lifo::destructors::Elem>>::dealloc Line | Count | Source | 53 | 19 | unsafe fn dealloc(self) { | 54 | 19 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 19 | } |
<crossbeam_deque::deque::Buffer<i32>>::dealloc Line | Count | Source | 53 | 2 | unsafe fn dealloc(self) { | 54 | 2 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 2 | } |
<crossbeam_deque::deque::Buffer<usize>>::dealloc Line | Count | Source | 53 | 28 | unsafe fn dealloc(self) { | 54 | 28 | drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); | 55 | 28 | } |
|
56 | | |
57 | | /// Returns a pointer to the task at the specified `index`. |
58 | 2.65M | unsafe fn at(&self, index: isize) -> *mut T { |
59 | 2.65M | // `self.cap` is always a power of two. |
60 | 2.65M | self.ptr.offset(index & (self.cap - 1) as isize) |
61 | 2.65M | } <crossbeam_deque::deque::Buffer<usize>>::at Line | Count | Source | 58 | 66.9k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 66.9k | // `self.cap` is always a power of two. | 60 | 66.9k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 66.9k | } |
<crossbeam_deque::deque::Buffer<injector::destructors::Elem>>::at Line | Count | Source | 58 | 15.5k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 15.5k | // `self.cap` is always a power of two. | 60 | 15.5k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 15.5k | } |
<crossbeam_deque::deque::Buffer<i32>>::at Line | Count | Source | 58 | 2 | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 2 | // `self.cap` is always a power of two. | 60 | 2 | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 2 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::at Line | Count | Source | 58 | 327k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 327k | // `self.cap` is always a power of two. | 60 | 327k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 327k | } |
<crossbeam_deque::deque::Buffer<usize>>::at Line | Count | Source | 58 | 974k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 974k | // `self.cap` is always a power of two. | 60 | 974k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 974k | } |
<crossbeam_deque::deque::Buffer<fifo::destructors::Elem>>::at Line | Count | Source | 58 | 248k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 248k | // `self.cap` is always a power of two. | 60 | 248k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 248k | } |
<crossbeam_deque::deque::Buffer<i32>>::at Line | Count | Source | 58 | 26 | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 26 | // `self.cap` is always a power of two. | 60 | 26 | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 26 | } |
<crossbeam_deque::deque::Buffer<i32>>::at Line | Count | Source | 58 | 146 | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 146 | // `self.cap` is always a power of two. | 60 | 146 | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 146 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::at Line | Count | Source | 58 | 320k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 320k | // `self.cap` is always a power of two. | 60 | 320k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 320k | } |
<crossbeam_deque::deque::Buffer<usize>>::at Line | Count | Source | 58 | 456k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 456k | // `self.cap` is always a power of two. | 60 | 456k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 456k | } |
<crossbeam_deque::deque::Buffer<lifo::destructors::Elem>>::at Line | Count | Source | 58 | 246k | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 246k | // `self.cap` is always a power of two. | 60 | 246k | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 246k | } |
<crossbeam_deque::deque::Buffer<i32>>::at Line | Count | Source | 58 | 26 | unsafe fn at(&self, index: isize) -> *mut T { | 59 | 26 | // `self.cap` is always a power of two. | 60 | 26 | self.ptr.offset(index & (self.cap - 1) as isize) | 61 | 26 | } |
|
62 | | |
63 | | /// Writes `task` into the specified `index`. |
64 | | /// |
65 | | /// This method might be concurrently called with another `read` at the same index, which is |
66 | | /// technically speaking a data race and therefore UB. We should use an atomic store here, but |
67 | | /// that would be more expensive and difficult to implement generically for all types `T`. |
68 | | /// Hence, as a hack, we use a volatile write instead. |
69 | 1.00M | unsafe fn write(&self, index: isize, task: T) { |
70 | 1.00M | ptr::write_volatile(self.at(index), task) |
71 | 1.00M | } <crossbeam_deque::deque::Buffer<usize>>::write Line | Count | Source | 69 | 33.8k | unsafe fn write(&self, index: isize, task: T) { | 70 | 33.8k | ptr::write_volatile(self.at(index), task) | 71 | 33.8k | } |
<crossbeam_deque::deque::Buffer<injector::destructors::Elem>>::write Line | Count | Source | 69 | 7.23k | unsafe fn write(&self, index: isize, task: T) { | 70 | 7.23k | ptr::write_volatile(self.at(index), task) | 71 | 7.23k | } |
<crossbeam_deque::deque::Buffer<i32>>::write Line | Count | Source | 69 | 1 | unsafe fn write(&self, index: isize, task: T) { | 70 | 1 | ptr::write_volatile(self.at(index), task) | 71 | 1 | } |
<crossbeam_deque::deque::Buffer<usize>>::write Line | Count | Source | 69 | 602k | unsafe fn write(&self, index: isize, task: T) { | 70 | 602k | ptr::write_volatile(self.at(index), task) | 71 | 602k | } |
<crossbeam_deque::deque::Buffer<fifo::destructors::Elem>>::write Line | Count | Source | 69 | 59.1k | unsafe fn write(&self, index: isize, task: T) { | 70 | 59.1k | ptr::write_volatile(self.at(index), task) | 71 | 59.1k | } |
<crossbeam_deque::deque::Buffer<i32>>::write Line | Count | Source | 69 | 13 | unsafe fn write(&self, index: isize, task: T) { | 70 | 13 | ptr::write_volatile(self.at(index), task) | 71 | 13 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::write Line | Count | Source | 69 | 50.0k | unsafe fn write(&self, index: isize, task: T) { | 70 | 50.0k | ptr::write_volatile(self.at(index), task) | 71 | 50.0k | } |
<crossbeam_deque::deque::Buffer<i32>>::write Line | Count | Source | 69 | 74 | unsafe fn write(&self, index: isize, task: T) { | 70 | 74 | ptr::write_volatile(self.at(index), task) | 71 | 74 | } |
<crossbeam_deque::deque::Buffer<usize>>::write Line | Count | Source | 69 | 144k | unsafe fn write(&self, index: isize, task: T) { | 70 | 144k | ptr::write_volatile(self.at(index), task) | 71 | 144k | } |
<crossbeam_deque::deque::Buffer<lifo::destructors::Elem>>::write Line | Count | Source | 69 | 57.9k | unsafe fn write(&self, index: isize, task: T) { | 70 | 57.9k | ptr::write_volatile(self.at(index), task) | 71 | 57.9k | } |
<crossbeam_deque::deque::Buffer<i32>>::write Line | Count | Source | 69 | 13 | unsafe fn write(&self, index: isize, task: T) { | 70 | 13 | ptr::write_volatile(self.at(index), task) | 71 | 13 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::write Line | Count | Source | 69 | 50.0k | unsafe fn write(&self, index: isize, task: T) { | 70 | 50.0k | ptr::write_volatile(self.at(index), task) | 71 | 50.0k | } |
|
72 | | |
73 | | /// Reads a task from the specified `index`. |
74 | | /// |
75 | | /// This method might be concurrently called with another `write` at the same index, which is |
76 | | /// technically speaking a data race and therefore UB. We should use an atomic load here, but |
77 | | /// that would be more expensive and difficult to implement generically for all types `T`. |
78 | | /// Hence, as a hack, we use a volatile write instead. |
79 | 1.11M | unsafe fn read(&self, index: isize) -> T { |
80 | 1.11M | ptr::read_volatile(self.at(index)) |
81 | 1.11M | } <crossbeam_deque::deque::Buffer<usize>>::read Line | Count | Source | 79 | 33.8k | unsafe fn read(&self, index: isize) -> T { | 80 | 33.8k | ptr::read_volatile(self.at(index)) | 81 | 33.8k | } |
<crossbeam_deque::deque::Buffer<injector::destructors::Elem>>::read Line | Count | Source | 79 | 7.90k | unsafe fn read(&self, index: isize) -> T { | 80 | 7.90k | ptr::read_volatile(self.at(index)) | 81 | 7.90k | } |
<crossbeam_deque::deque::Buffer<i32>>::read Line | Count | Source | 79 | 1 | unsafe fn read(&self, index: isize) -> T { | 80 | 1 | ptr::read_volatile(self.at(index)) | 81 | 1 | } |
<crossbeam_deque::deque::Buffer<usize>>::read Line | Count | Source | 79 | 596k | unsafe fn read(&self, index: isize) -> T { | 80 | 596k | ptr::read_volatile(self.at(index)) | 81 | 596k | } |
<crossbeam_deque::deque::Buffer<fifo::destructors::Elem>>::read Line | Count | Source | 79 | 18.0k | unsafe fn read(&self, index: isize) -> T { | 80 | 18.0k | ptr::read_volatile(self.at(index)) | 81 | 18.0k | } |
<crossbeam_deque::deque::Buffer<i32>>::read Line | Count | Source | 79 | 13 | unsafe fn read(&self, index: isize) -> T { | 80 | 13 | ptr::read_volatile(self.at(index)) | 81 | 13 | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::read Line | Count | Source | 79 | 83.6k | unsafe fn read(&self, index: isize) -> T { | 80 | 83.6k | ptr::read_volatile(self.at(index)) | 81 | 83.6k | } |
<crossbeam_deque::deque::Buffer<i32>>::read Line | Count | Source | 79 | 53 | unsafe fn read(&self, index: isize) -> T { | 80 | 53 | ptr::read_volatile(self.at(index)) | 81 | 53 | } |
<crossbeam_deque::deque::Buffer<usize>>::read Line | Count | Source | 79 | 249k | unsafe fn read(&self, index: isize) -> T { | 80 | 249k | ptr::read_volatile(self.at(index)) | 81 | 249k | } |
<crossbeam_deque::deque::Buffer<i32>>::read Line | Count | Source | 79 | 13 | unsafe fn read(&self, index: isize) -> T { | 80 | 13 | ptr::read_volatile(self.at(index)) | 81 | 13 | } |
<crossbeam_deque::deque::Buffer<lifo::destructors::Elem>>::read Line | Count | Source | 79 | 17.3k | unsafe fn read(&self, index: isize) -> T { | 80 | 17.3k | ptr::read_volatile(self.at(index)) | 81 | 17.3k | } |
<crossbeam_deque::deque::Buffer<alloc::boxed::Box<usize>>>::read Line | Count | Source | 79 | 106k | unsafe fn read(&self, index: isize) -> T { | 80 | 106k | ptr::read_volatile(self.at(index)) | 81 | 106k | } |
|
82 | | } |
83 | | |
84 | | impl<T> Clone for Buffer<T> { |
85 | | fn clone(&self) -> Buffer<T> { |
86 | | Buffer { |
87 | | ptr: self.ptr, |
88 | | cap: self.cap, |
89 | | } |
90 | | } |
91 | | } |
92 | | |
93 | | impl<T> Copy for Buffer<T> {} |
94 | | |
95 | | /// Internal queue data shared between the worker and stealers. |
96 | | /// |
97 | | /// The implementation is based on the following work: |
98 | | /// |
99 | | /// 1. [Chase and Lev. Dynamic circular work-stealing deque. SPAA 2005.][chase-lev] |
100 | | /// 2. [Le, Pop, Cohen, and Nardelli. Correct and efficient work-stealing for weak memory models. |
101 | | /// PPoPP 2013.][weak-mem] |
102 | | /// 3. [Norris and Demsky. CDSchecker: checking concurrent data structures written with C/C++ |
103 | | /// atomics. OOPSLA 2013.][checker] |
104 | | /// |
105 | | /// [chase-lev]: https://dl.acm.org/citation.cfm?id=1073974 |
106 | | /// [weak-mem]: https://dl.acm.org/citation.cfm?id=2442524 |
107 | | /// [checker]: https://dl.acm.org/citation.cfm?id=2509514 |
108 | | struct Inner<T> { |
109 | | /// The front index. |
110 | | front: AtomicIsize, |
111 | | |
112 | | /// The back index. |
113 | | back: AtomicIsize, |
114 | | |
115 | | /// The underlying buffer. |
116 | | buffer: CachePadded<Atomic<Buffer<T>>>, |
117 | | } |
118 | | |
119 | | impl<T> Drop for Inner<T> { |
120 | 104 | fn drop(&mut self) { |
121 | 104 | // Load the back index, front index, and buffer. |
122 | 104 | let b = self.back.load(Ordering::Relaxed); |
123 | 104 | let f = self.front.load(Ordering::Relaxed); |
124 | | |
125 | | unsafe { |
126 | 104 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); |
127 | 104 | |
128 | 104 | // Go through the buffer from front to back and drop all tasks in the queue. |
129 | 104 | let mut i = f; |
130 | 83.8k | while i != b { |
131 | 83.7k | buffer.deref().at(i).drop_in_place(); |
132 | 83.7k | i = i.wrapping_add(1); |
133 | 83.7k | } |
134 | | |
135 | | // Free the memory allocated by the buffer. |
136 | 104 | buffer.into_owned().into_box().dealloc(); |
137 | 104 | } |
138 | 104 | } <crossbeam_deque::deque::Inner<usize> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 14 | fn drop(&mut self) { | 121 | 14 | // Load the back index, front index, and buffer. | 122 | 14 | let b = self.back.load(Ordering::Relaxed); | 123 | 14 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 14 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 14 | | 128 | 14 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 14 | let mut i = f; | 130 | 14 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 14 | buffer.into_owned().into_box().dealloc(); | 137 | 14 | } | 138 | 14 | } |
<crossbeam_deque::deque::Inner<injector::destructors::Elem> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 8 | fn drop(&mut self) { | 121 | 8 | // Load the back index, front index, and buffer. | 122 | 8 | let b = self.back.load(Ordering::Relaxed); | 123 | 8 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 8 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 8 | | 128 | 8 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 8 | let mut i = f; | 130 | 8 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 8 | buffer.into_owned().into_box().dealloc(); | 137 | 8 | } | 138 | 8 | } |
<crossbeam_deque::deque::Inner<i32> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 1 | fn drop(&mut self) { | 121 | 1 | // Load the back index, front index, and buffer. | 122 | 1 | let b = self.back.load(Ordering::Relaxed); | 123 | 1 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 1 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 1 | | 128 | 1 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 1 | let mut i = f; | 130 | 1 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 1 | buffer.into_owned().into_box().dealloc(); | 137 | 1 | } | 138 | 1 | } |
<crossbeam_deque::deque::Inner<alloc::boxed::Box<usize>> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 1 | fn drop(&mut self) { | 121 | 1 | // Load the back index, front index, and buffer. | 122 | 1 | let b = self.back.load(Ordering::Relaxed); | 123 | 1 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 1 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 1 | | 128 | 1 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 1 | let mut i = f; | 130 | 1 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 1 | buffer.into_owned().into_box().dealloc(); | 137 | 1 | } | 138 | 1 | } |
<crossbeam_deque::deque::Inner<fifo::destructors::Elem> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 9 | fn drop(&mut self) { | 121 | 9 | // Load the back index, front index, and buffer. | 122 | 9 | let b = self.back.load(Ordering::Relaxed); | 123 | 9 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 9 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 9 | | 128 | 9 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 9 | let mut i = f; | 130 | 40.8k | while i != b { | 131 | 40.7k | buffer.deref().at(i).drop_in_place(); | 132 | 40.7k | i = i.wrapping_add(1); | 133 | 40.7k | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 9 | buffer.into_owned().into_box().dealloc(); | 137 | 9 | } | 138 | 9 | } |
<crossbeam_deque::deque::Inner<usize> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 16 | fn drop(&mut self) { | 121 | 16 | // Load the back index, front index, and buffer. | 122 | 16 | let b = self.back.load(Ordering::Relaxed); | 123 | 16 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 16 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 16 | | 128 | 16 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 16 | let mut i = f; | 130 | 2.18k | while i != b { | 131 | 2.16k | buffer.deref().at(i).drop_in_place(); | 132 | 2.16k | i = i.wrapping_add(1); | 133 | 2.16k | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 16 | buffer.into_owned().into_box().dealloc(); | 137 | 16 | } | 138 | 16 | } |
<crossbeam_deque::deque::Inner<i32> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 2 | fn drop(&mut self) { | 121 | 2 | // Load the back index, front index, and buffer. | 122 | 2 | let b = self.back.load(Ordering::Relaxed); | 123 | 2 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 2 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 2 | | 128 | 2 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 2 | let mut i = f; | 130 | 2 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 2 | buffer.into_owned().into_box().dealloc(); | 137 | 2 | } | 138 | 2 | } |
<crossbeam_deque::deque::Inner<i32> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 22 | fn drop(&mut self) { | 121 | 22 | // Load the back index, front index, and buffer. | 122 | 22 | let b = self.back.load(Ordering::Relaxed); | 123 | 22 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 22 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 22 | | 128 | 22 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 22 | let mut i = f; | 130 | 42 | while i != b { | 131 | 20 | buffer.deref().at(i).drop_in_place(); | 132 | 20 | i = i.wrapping_add(1); | 133 | 20 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 22 | buffer.into_owned().into_box().dealloc(); | 137 | 22 | } | 138 | 22 | } |
<crossbeam_deque::deque::Inner<alloc::boxed::Box<usize>> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 1 | fn drop(&mut self) { | 121 | 1 | // Load the back index, front index, and buffer. | 122 | 1 | let b = self.back.load(Ordering::Relaxed); | 123 | 1 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 1 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 1 | | 128 | 1 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 1 | let mut i = f; | 130 | 1 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 1 | buffer.into_owned().into_box().dealloc(); | 137 | 1 | } | 138 | 1 | } |
<crossbeam_deque::deque::Inner<lifo::destructors::Elem> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 9 | fn drop(&mut self) { | 121 | 9 | // Load the back index, front index, and buffer. | 122 | 9 | let b = self.back.load(Ordering::Relaxed); | 123 | 9 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 9 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 9 | | 128 | 9 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 9 | let mut i = f; | 130 | 40.8k | while i != b { | 131 | 40.7k | buffer.deref().at(i).drop_in_place(); | 132 | 40.7k | i = i.wrapping_add(1); | 133 | 40.7k | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 9 | buffer.into_owned().into_box().dealloc(); | 137 | 9 | } | 138 | 9 | } |
<crossbeam_deque::deque::Inner<usize> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 19 | fn drop(&mut self) { | 121 | 19 | // Load the back index, front index, and buffer. | 122 | 19 | let b = self.back.load(Ordering::Relaxed); | 123 | 19 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 19 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 19 | | 128 | 19 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 19 | let mut i = f; | 130 | 19 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 19 | buffer.into_owned().into_box().dealloc(); | 137 | 19 | } | 138 | 19 | } |
<crossbeam_deque::deque::Inner<i32> as core::ops::drop::Drop>::drop Line | Count | Source | 120 | 2 | fn drop(&mut self) { | 121 | 2 | // Load the back index, front index, and buffer. | 122 | 2 | let b = self.back.load(Ordering::Relaxed); | 123 | 2 | let f = self.front.load(Ordering::Relaxed); | 124 | | | 125 | | unsafe { | 126 | 2 | let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); | 127 | 2 | | 128 | 2 | // Go through the buffer from front to back and drop all tasks in the queue. | 129 | 2 | let mut i = f; | 130 | 2 | while i != b { | 131 | 0 | buffer.deref().at(i).drop_in_place(); | 132 | 0 | i = i.wrapping_add(1); | 133 | 0 | } | 134 | | | 135 | | // Free the memory allocated by the buffer. | 136 | 2 | buffer.into_owned().into_box().dealloc(); | 137 | 2 | } | 138 | 2 | } |
|
139 | | } |
140 | | |
141 | | /// Worker queue flavor: FIFO or LIFO. |
142 | 16.2k | #[derive(Clone, Copy, Debug, Eq, PartialEq16.2k )] <crossbeam_deque::deque::Flavor as core::cmp::PartialEq>::eq Line | Count | Source | 142 | 4 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] |
<crossbeam_deque::deque::Flavor as core::cmp::PartialEq>::eq Line | Count | Source | 142 | 16.2k | #[derive(Clone, Copy, Debug, Eq, PartialEq16.2k )] |
|
143 | | enum Flavor { |
144 | | /// The first-in first-out flavor. |
145 | | Fifo, |
146 | | |
147 | | /// The last-in first-out flavor. |
148 | | Lifo, |
149 | | } |
150 | | |
151 | | /// A worker queue. |
152 | | /// |
153 | | /// This is a FIFO or LIFO queue that is owned by a single thread, but other threads may steal |
154 | | /// tasks from it. Task schedulers typically create a single worker queue per thread. |
155 | | /// |
156 | | /// # Examples |
157 | | /// |
158 | | /// A FIFO worker: |
159 | | /// |
160 | | /// ``` |
161 | | /// use crossbeam_deque::{Steal, Worker}; |
162 | | /// |
163 | | /// let w = Worker::new_fifo(); |
164 | | /// let s = w.stealer(); |
165 | | /// |
166 | | /// w.push(1); |
167 | | /// w.push(2); |
168 | | /// w.push(3); |
169 | | /// |
170 | | /// assert_eq!(s.steal(), Steal::Success(1)); |
171 | | /// assert_eq!(w.pop(), Some(2)); |
172 | | /// assert_eq!(w.pop(), Some(3)); |
173 | | /// ``` |
174 | | /// |
175 | | /// A LIFO worker: |
176 | | /// |
177 | | /// ``` |
178 | | /// use crossbeam_deque::{Steal, Worker}; |
179 | | /// |
180 | | /// let w = Worker::new_lifo(); |
181 | | /// let s = w.stealer(); |
182 | | /// |
183 | | /// w.push(1); |
184 | | /// w.push(2); |
185 | | /// w.push(3); |
186 | | /// |
187 | | /// assert_eq!(s.steal(), Steal::Success(1)); |
188 | | /// assert_eq!(w.pop(), Some(3)); |
189 | | /// assert_eq!(w.pop(), Some(2)); |
190 | | /// ``` |
191 | | pub struct Worker<T> { |
192 | | /// A reference to the inner representation of the queue. |
193 | | inner: Arc<CachePadded<Inner<T>>>, |
194 | | |
195 | | /// A copy of `inner.buffer` for quick access. |
196 | | buffer: Cell<Buffer<T>>, |
197 | | |
198 | | /// The flavor of the queue. |
199 | | flavor: Flavor, |
200 | | |
201 | | /// Indicates that the worker cannot be shared among threads. |
202 | | _marker: PhantomData<*mut ()>, // !Send + !Sync |
203 | | } |
204 | | |
205 | | unsafe impl<T: Send> Send for Worker<T> {} |
206 | | |
207 | | impl<T> Worker<T> { |
208 | | /// Creates a FIFO worker queue. |
209 | | /// |
210 | | /// Tasks are pushed and popped from opposite ends. |
211 | | /// |
212 | | /// # Examples |
213 | | /// |
214 | | /// ``` |
215 | | /// use crossbeam_deque::Worker; |
216 | | /// |
217 | | /// let w = Worker::<i32>::new_fifo(); |
218 | | /// ``` |
219 | 66 | pub fn new_fifo() -> Worker<T> { |
220 | 66 | let buffer = Buffer::alloc(MIN_CAP); |
221 | 66 | |
222 | 66 | let inner = Arc::new(CachePadded::new(Inner { |
223 | 66 | front: AtomicIsize::new(0), |
224 | 66 | back: AtomicIsize::new(0), |
225 | 66 | buffer: CachePadded::new(Atomic::new(buffer)), |
226 | 66 | })); |
227 | 66 | |
228 | 66 | Worker { |
229 | 66 | inner, |
230 | 66 | buffer: Cell::new(buffer), |
231 | 66 | flavor: Flavor::Fifo, |
232 | 66 | _marker: PhantomData, |
233 | 66 | } |
234 | 66 | } <crossbeam_deque::deque::Worker<injector::destructors::Elem>>::new_fifo Line | Count | Source | 219 | 7 | pub fn new_fifo() -> Worker<T> { | 220 | 7 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 7 | | 222 | 7 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 7 | front: AtomicIsize::new(0), | 224 | 7 | back: AtomicIsize::new(0), | 225 | 7 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 7 | })); | 227 | 7 | | 228 | 7 | Worker { | 229 | 7 | inner, | 230 | 7 | buffer: Cell::new(buffer), | 231 | 7 | flavor: Flavor::Fifo, | 232 | 7 | _marker: PhantomData, | 233 | 7 | } | 234 | 7 | } |
<crossbeam_deque::deque::Worker<usize>>::new_fifo Line | Count | Source | 219 | 16 | pub fn new_fifo() -> Worker<T> { | 220 | 16 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 16 | | 222 | 16 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 16 | front: AtomicIsize::new(0), | 224 | 16 | back: AtomicIsize::new(0), | 225 | 16 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 16 | })); | 227 | 16 | | 228 | 16 | Worker { | 229 | 16 | inner, | 230 | 16 | buffer: Cell::new(buffer), | 231 | 16 | flavor: Flavor::Fifo, | 232 | 16 | _marker: PhantomData, | 233 | 16 | } | 234 | 16 | } |
<crossbeam_deque::deque::Worker<i32>>::new_fifo Line | Count | Source | 219 | 1 | pub fn new_fifo() -> Worker<T> { | 220 | 1 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 1 | | 222 | 1 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 1 | front: AtomicIsize::new(0), | 224 | 1 | back: AtomicIsize::new(0), | 225 | 1 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 1 | })); | 227 | 1 | | 228 | 1 | Worker { | 229 | 1 | inner, | 230 | 1 | buffer: Cell::new(buffer), | 231 | 1 | flavor: Flavor::Fifo, | 232 | 1 | _marker: PhantomData, | 233 | 1 | } | 234 | 1 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::new_fifo Line | Count | Source | 219 | 1 | pub fn new_fifo() -> Worker<T> { | 220 | 1 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 1 | | 222 | 1 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 1 | front: AtomicIsize::new(0), | 224 | 1 | back: AtomicIsize::new(0), | 225 | 1 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 1 | })); | 227 | 1 | | 228 | 1 | Worker { | 229 | 1 | inner, | 230 | 1 | buffer: Cell::new(buffer), | 231 | 1 | flavor: Flavor::Fifo, | 232 | 1 | _marker: PhantomData, | 233 | 1 | } | 234 | 1 | } |
<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::new_fifo Line | Count | Source | 219 | 9 | pub fn new_fifo() -> Worker<T> { | 220 | 9 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 9 | | 222 | 9 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 9 | front: AtomicIsize::new(0), | 224 | 9 | back: AtomicIsize::new(0), | 225 | 9 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 9 | })); | 227 | 9 | | 228 | 9 | Worker { | 229 | 9 | inner, | 230 | 9 | buffer: Cell::new(buffer), | 231 | 9 | flavor: Flavor::Fifo, | 232 | 9 | _marker: PhantomData, | 233 | 9 | } | 234 | 9 | } |
<crossbeam_deque::deque::Worker<i32>>::new_fifo Line | Count | Source | 219 | 2 | pub fn new_fifo() -> Worker<T> { | 220 | 2 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 2 | | 222 | 2 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 2 | front: AtomicIsize::new(0), | 224 | 2 | back: AtomicIsize::new(0), | 225 | 2 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 2 | })); | 227 | 2 | | 228 | 2 | Worker { | 229 | 2 | inner, | 230 | 2 | buffer: Cell::new(buffer), | 231 | 2 | flavor: Flavor::Fifo, | 232 | 2 | _marker: PhantomData, | 233 | 2 | } | 234 | 2 | } |
<crossbeam_deque::deque::Worker<usize>>::new_fifo Line | Count | Source | 219 | 19 | pub fn new_fifo() -> Worker<T> { | 220 | 19 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 19 | | 222 | 19 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 19 | front: AtomicIsize::new(0), | 224 | 19 | back: AtomicIsize::new(0), | 225 | 19 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 19 | })); | 227 | 19 | | 228 | 19 | Worker { | 229 | 19 | inner, | 230 | 19 | buffer: Cell::new(buffer), | 231 | 19 | flavor: Flavor::Fifo, | 232 | 19 | _marker: PhantomData, | 233 | 19 | } | 234 | 19 | } |
<crossbeam_deque::deque::Worker<i32>>::new_fifo Line | Count | Source | 219 | 11 | pub fn new_fifo() -> Worker<T> { | 220 | 11 | let buffer = Buffer::alloc(MIN_CAP); | 221 | 11 | | 222 | 11 | let inner = Arc::new(CachePadded::new(Inner { | 223 | 11 | front: AtomicIsize::new(0), | 224 | 11 | back: AtomicIsize::new(0), | 225 | 11 | buffer: CachePadded::new(Atomic::new(buffer)), | 226 | 11 | })); | 227 | 11 | | 228 | 11 | Worker { | 229 | 11 | inner, | 230 | 11 | buffer: Cell::new(buffer), | 231 | 11 | flavor: Flavor::Fifo, | 232 | 11 | _marker: PhantomData, | 233 | 11 | } | 234 | 11 | } |
|
235 | | |
236 | | /// Creates a LIFO worker queue. |
237 | | /// |
238 | | /// Tasks are pushed and popped from the same end. |
239 | | /// |
240 | | /// # Examples |
241 | | /// |
242 | | /// ``` |
243 | | /// use crossbeam_deque::Worker; |
244 | | /// |
245 | | /// let w = Worker::<i32>::new_lifo(); |
246 | | /// ``` |
247 | 42 | pub fn new_lifo() -> Worker<T> { |
248 | 42 | let buffer = Buffer::alloc(MIN_CAP); |
249 | 42 | |
250 | 42 | let inner = Arc::new(CachePadded::new(Inner { |
251 | 42 | front: AtomicIsize::new(0), |
252 | 42 | back: AtomicIsize::new(0), |
253 | 42 | buffer: CachePadded::new(Atomic::new(buffer)), |
254 | 42 | })); |
255 | 42 | |
256 | 42 | Worker { |
257 | 42 | inner, |
258 | 42 | buffer: Cell::new(buffer), |
259 | 42 | flavor: Flavor::Lifo, |
260 | 42 | _marker: PhantomData, |
261 | 42 | } |
262 | 42 | } <crossbeam_deque::deque::Worker<i32>>::new_lifo Line | Count | Source | 247 | 11 | pub fn new_lifo() -> Worker<T> { | 248 | 11 | let buffer = Buffer::alloc(MIN_CAP); | 249 | 11 | | 250 | 11 | let inner = Arc::new(CachePadded::new(Inner { | 251 | 11 | front: AtomicIsize::new(0), | 252 | 11 | back: AtomicIsize::new(0), | 253 | 11 | buffer: CachePadded::new(Atomic::new(buffer)), | 254 | 11 | })); | 255 | 11 | | 256 | 11 | Worker { | 257 | 11 | inner, | 258 | 11 | buffer: Cell::new(buffer), | 259 | 11 | flavor: Flavor::Lifo, | 260 | 11 | _marker: PhantomData, | 261 | 11 | } | 262 | 11 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::new_lifo Line | Count | Source | 247 | 1 | pub fn new_lifo() -> Worker<T> { | 248 | 1 | let buffer = Buffer::alloc(MIN_CAP); | 249 | 1 | | 250 | 1 | let inner = Arc::new(CachePadded::new(Inner { | 251 | 1 | front: AtomicIsize::new(0), | 252 | 1 | back: AtomicIsize::new(0), | 253 | 1 | buffer: CachePadded::new(Atomic::new(buffer)), | 254 | 1 | })); | 255 | 1 | | 256 | 1 | Worker { | 257 | 1 | inner, | 258 | 1 | buffer: Cell::new(buffer), | 259 | 1 | flavor: Flavor::Lifo, | 260 | 1 | _marker: PhantomData, | 261 | 1 | } | 262 | 1 | } |
<crossbeam_deque::deque::Worker<usize>>::new_lifo Line | Count | Source | 247 | 19 | pub fn new_lifo() -> Worker<T> { | 248 | 19 | let buffer = Buffer::alloc(MIN_CAP); | 249 | 19 | | 250 | 19 | let inner = Arc::new(CachePadded::new(Inner { | 251 | 19 | front: AtomicIsize::new(0), | 252 | 19 | back: AtomicIsize::new(0), | 253 | 19 | buffer: CachePadded::new(Atomic::new(buffer)), | 254 | 19 | })); | 255 | 19 | | 256 | 19 | Worker { | 257 | 19 | inner, | 258 | 19 | buffer: Cell::new(buffer), | 259 | 19 | flavor: Flavor::Lifo, | 260 | 19 | _marker: PhantomData, | 261 | 19 | } | 262 | 19 | } |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::new_lifo Line | Count | Source | 247 | 9 | pub fn new_lifo() -> Worker<T> { | 248 | 9 | let buffer = Buffer::alloc(MIN_CAP); | 249 | 9 | | 250 | 9 | let inner = Arc::new(CachePadded::new(Inner { | 251 | 9 | front: AtomicIsize::new(0), | 252 | 9 | back: AtomicIsize::new(0), | 253 | 9 | buffer: CachePadded::new(Atomic::new(buffer)), | 254 | 9 | })); | 255 | 9 | | 256 | 9 | Worker { | 257 | 9 | inner, | 258 | 9 | buffer: Cell::new(buffer), | 259 | 9 | flavor: Flavor::Lifo, | 260 | 9 | _marker: PhantomData, | 261 | 9 | } | 262 | 9 | } |
<crossbeam_deque::deque::Worker<i32>>::new_lifo Line | Count | Source | 247 | 2 | pub fn new_lifo() -> Worker<T> { | 248 | 2 | let buffer = Buffer::alloc(MIN_CAP); | 249 | 2 | | 250 | 2 | let inner = Arc::new(CachePadded::new(Inner { | 251 | 2 | front: AtomicIsize::new(0), | 252 | 2 | back: AtomicIsize::new(0), | 253 | 2 | buffer: CachePadded::new(Atomic::new(buffer)), | 254 | 2 | })); | 255 | 2 | | 256 | 2 | Worker { | 257 | 2 | inner, | 258 | 2 | buffer: Cell::new(buffer), | 259 | 2 | flavor: Flavor::Lifo, | 260 | 2 | _marker: PhantomData, | 261 | 2 | } | 262 | 2 | } |
|
263 | | |
264 | | /// Creates a stealer for this queue. |
265 | | /// |
266 | | /// The returned stealer can be shared among threads and cloned. |
267 | | /// |
268 | | /// # Examples |
269 | | /// |
270 | | /// ``` |
271 | | /// use crossbeam_deque::Worker; |
272 | | /// |
273 | | /// let w = Worker::<i32>::new_lifo(); |
274 | | /// let s = w.stealer(); |
275 | | /// ``` |
276 | 79 | pub fn stealer(&self) -> Stealer<T> { |
277 | 79 | Stealer { |
278 | 79 | inner: self.inner.clone(), |
279 | 79 | flavor: self.flavor, |
280 | 79 | } |
281 | 79 | } <crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::stealer Line | Count | Source | 276 | 8 | pub fn stealer(&self) -> Stealer<T> { | 277 | 8 | Stealer { | 278 | 8 | inner: self.inner.clone(), | 279 | 8 | flavor: self.flavor, | 280 | 8 | } | 281 | 8 | } |
<crossbeam_deque::deque::Worker<usize>>::stealer Line | Count | Source | 276 | 16 | pub fn stealer(&self) -> Stealer<T> { | 277 | 16 | Stealer { | 278 | 16 | inner: self.inner.clone(), | 279 | 16 | flavor: self.flavor, | 280 | 16 | } | 281 | 16 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::stealer Line | Count | Source | 276 | 8 | pub fn stealer(&self) -> Stealer<T> { | 277 | 8 | Stealer { | 278 | 8 | inner: self.inner.clone(), | 279 | 8 | flavor: self.flavor, | 280 | 8 | } | 281 | 8 | } |
<crossbeam_deque::deque::Worker<i32>>::stealer Line | Count | Source | 276 | 2 | pub fn stealer(&self) -> Stealer<T> { | 277 | 2 | Stealer { | 278 | 2 | inner: self.inner.clone(), | 279 | 2 | flavor: self.flavor, | 280 | 2 | } | 281 | 2 | } |
<crossbeam_deque::deque::Worker<i32>>::stealer Line | Count | Source | 276 | 10 | pub fn stealer(&self) -> Stealer<T> { | 277 | 10 | Stealer { | 278 | 10 | inner: self.inner.clone(), | 279 | 10 | flavor: self.flavor, | 280 | 10 | } | 281 | 10 | } |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::stealer Line | Count | Source | 276 | 8 | pub fn stealer(&self) -> Stealer<T> { | 277 | 8 | Stealer { | 278 | 8 | inner: self.inner.clone(), | 279 | 8 | flavor: self.flavor, | 280 | 8 | } | 281 | 8 | } |
<crossbeam_deque::deque::Worker<usize>>::stealer Line | Count | Source | 276 | 17 | pub fn stealer(&self) -> Stealer<T> { | 277 | 17 | Stealer { | 278 | 17 | inner: self.inner.clone(), | 279 | 17 | flavor: self.flavor, | 280 | 17 | } | 281 | 17 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::stealer Line | Count | Source | 276 | 8 | pub fn stealer(&self) -> Stealer<T> { | 277 | 8 | Stealer { | 278 | 8 | inner: self.inner.clone(), | 279 | 8 | flavor: self.flavor, | 280 | 8 | } | 281 | 8 | } |
<crossbeam_deque::deque::Worker<i32>>::stealer Line | Count | Source | 276 | 2 | pub fn stealer(&self) -> Stealer<T> { | 277 | 2 | Stealer { | 278 | 2 | inner: self.inner.clone(), | 279 | 2 | flavor: self.flavor, | 280 | 2 | } | 281 | 2 | } |
|
282 | | |
283 | | /// Resizes the internal buffer to the new capacity of `new_cap`. |
284 | | #[cold] |
285 | 72 | unsafe fn resize(&self, new_cap: usize) { |
286 | 72 | // Load the back index, front index, and buffer. |
287 | 72 | let b = self.inner.back.load(Ordering::Relaxed); |
288 | 72 | let f = self.inner.front.load(Ordering::Relaxed); |
289 | 72 | let buffer = self.buffer.get(); |
290 | 72 | |
291 | 72 | // Allocate a new buffer and copy data from the old buffer to the new one. |
292 | 72 | let new = Buffer::alloc(new_cap); |
293 | 72 | let mut i = f; |
294 | 444k | while i != b { |
295 | 444k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); |
296 | 444k | i = i.wrapping_add(1); |
297 | 444k | } |
298 | | |
299 | 72 | let guard = &epoch::pin(); |
300 | 72 | |
301 | 72 | // Replace the old buffer with the new one. |
302 | 72 | self.buffer.replace(new); |
303 | 72 | let old = |
304 | 72 | self.inner |
305 | 72 | .buffer |
306 | 72 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); |
307 | 72 | |
308 | 72 | // Destroy the old buffer later. |
309 | 72 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); <crossbeam_deque::deque::Worker<usize>>::resize::{closure#0} Line | Count | Source | 309 | 16 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); |
<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::resize::{closure#0} Line | Count | Source | 309 | 10 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize::{closure#0} Line | Count | Source | 309 | 15 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize::{closure#0} Line | Count | Source | 309 | 11 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::resize::{closure#0} Line | Count | Source | 309 | 10 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); |
<crossbeam_deque::deque::Worker<usize>>::resize::{closure#0} Line | Count | Source | 309 | 10 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); |
|
310 | 72 | |
311 | 72 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate |
312 | 72 | // it as soon as possible. |
313 | 72 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { |
314 | 72 | guard.flush(); |
315 | 72 | }0 |
316 | 72 | } Unexecuted instantiation: <crossbeam_deque::deque::Worker<usize>>::resize Unexecuted instantiation: <crossbeam_deque::deque::Worker<injector::destructors::Elem>>::resize Unexecuted instantiation: <crossbeam_deque::deque::Worker<i32>>::resize <crossbeam_deque::deque::Worker<usize>>::resize Line | Count | Source | 285 | 16 | unsafe fn resize(&self, new_cap: usize) { | 286 | 16 | // Load the back index, front index, and buffer. | 287 | 16 | let b = self.inner.back.load(Ordering::Relaxed); | 288 | 16 | let f = self.inner.front.load(Ordering::Relaxed); | 289 | 16 | let buffer = self.buffer.get(); | 290 | 16 | | 291 | 16 | // Allocate a new buffer and copy data from the old buffer to the new one. | 292 | 16 | let new = Buffer::alloc(new_cap); | 293 | 16 | let mut i = f; | 294 | 68.8k | while i != b { | 295 | 68.8k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); | 296 | 68.8k | i = i.wrapping_add(1); | 297 | 68.8k | } | 298 | | | 299 | 16 | let guard = &epoch::pin(); | 300 | 16 | | 301 | 16 | // Replace the old buffer with the new one. | 302 | 16 | self.buffer.replace(new); | 303 | 16 | let old = | 304 | 16 | self.inner | 305 | 16 | .buffer | 306 | 16 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); | 307 | 16 | | 308 | 16 | // Destroy the old buffer later. | 309 | 16 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); | 310 | 16 | | 311 | 16 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate | 312 | 16 | // it as soon as possible. | 313 | 16 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { | 314 | 16 | guard.flush(); | 315 | 16 | }0 | 316 | 16 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize Line | Count | Source | 285 | 15 | unsafe fn resize(&self, new_cap: usize) { | 286 | 15 | // Load the back index, front index, and buffer. | 287 | 15 | let b = self.inner.back.load(Ordering::Relaxed); | 288 | 15 | let f = self.inner.front.load(Ordering::Relaxed); | 289 | 15 | let buffer = self.buffer.get(); | 290 | 15 | | 291 | 15 | // Allocate a new buffer and copy data from the old buffer to the new one. | 292 | 15 | let new = Buffer::alloc(new_cap); | 293 | 15 | let mut i = f; | 294 | 97.2k | while i != b { | 295 | 97.2k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); | 296 | 97.2k | i = i.wrapping_add(1); | 297 | 97.2k | } | 298 | | | 299 | 15 | let guard = &epoch::pin(); | 300 | 15 | | 301 | 15 | // Replace the old buffer with the new one. | 302 | 15 | self.buffer.replace(new); | 303 | 15 | let old = | 304 | 15 | self.inner | 305 | 15 | .buffer | 306 | 15 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); | 307 | 15 | | 308 | 15 | // Destroy the old buffer later. | 309 | 15 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); | 310 | 15 | | 311 | 15 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate | 312 | 15 | // it as soon as possible. | 313 | 15 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { | 314 | 15 | guard.flush(); | 315 | 15 | }0 | 316 | 15 | } |
<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::resize Line | Count | Source | 285 | 10 | unsafe fn resize(&self, new_cap: usize) { | 286 | 10 | // Load the back index, front index, and buffer. | 287 | 10 | let b = self.inner.back.load(Ordering::Relaxed); | 288 | 10 | let f = self.inner.front.load(Ordering::Relaxed); | 289 | 10 | let buffer = self.buffer.get(); | 290 | 10 | | 291 | 10 | // Allocate a new buffer and copy data from the old buffer to the new one. | 292 | 10 | let new = Buffer::alloc(new_cap); | 293 | 10 | let mut i = f; | 294 | 65.4k | while i != b { | 295 | 65.4k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); | 296 | 65.4k | i = i.wrapping_add(1); | 297 | 65.4k | } | 298 | | | 299 | 10 | let guard = &epoch::pin(); | 300 | 10 | | 301 | 10 | // Replace the old buffer with the new one. | 302 | 10 | self.buffer.replace(new); | 303 | 10 | let old = | 304 | 10 | self.inner | 305 | 10 | .buffer | 306 | 10 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); | 307 | 10 | | 308 | 10 | // Destroy the old buffer later. | 309 | 10 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); | 310 | 10 | | 311 | 10 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate | 312 | 10 | // it as soon as possible. | 313 | 10 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { | 314 | 10 | guard.flush(); | 315 | 10 | }0 | 316 | 10 | } |
Unexecuted instantiation: <crossbeam_deque::deque::Worker<i32>>::resize Unexecuted instantiation: <crossbeam_deque::deque::Worker<i32>>::resize <crossbeam_deque::deque::Worker<usize>>::resize Line | Count | Source | 285 | 10 | unsafe fn resize(&self, new_cap: usize) { | 286 | 10 | // Load the back index, front index, and buffer. | 287 | 10 | let b = self.inner.back.load(Ordering::Relaxed); | 288 | 10 | let f = self.inner.front.load(Ordering::Relaxed); | 289 | 10 | let buffer = self.buffer.get(); | 290 | 10 | | 291 | 10 | // Allocate a new buffer and copy data from the old buffer to the new one. | 292 | 10 | let new = Buffer::alloc(new_cap); | 293 | 10 | let mut i = f; | 294 | 65.4k | while i != b { | 295 | 65.4k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); | 296 | 65.4k | i = i.wrapping_add(1); | 297 | 65.4k | } | 298 | | | 299 | 10 | let guard = &epoch::pin(); | 300 | 10 | | 301 | 10 | // Replace the old buffer with the new one. | 302 | 10 | self.buffer.replace(new); | 303 | 10 | let old = | 304 | 10 | self.inner | 305 | 10 | .buffer | 306 | 10 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); | 307 | 10 | | 308 | 10 | // Destroy the old buffer later. | 309 | 10 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); | 310 | 10 | | 311 | 10 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate | 312 | 10 | // it as soon as possible. | 313 | 10 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { | 314 | 10 | guard.flush(); | 315 | 10 | }0 | 316 | 10 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::resize Line | Count | Source | 285 | 11 | unsafe fn resize(&self, new_cap: usize) { | 286 | 11 | // Load the back index, front index, and buffer. | 287 | 11 | let b = self.inner.back.load(Ordering::Relaxed); | 288 | 11 | let f = self.inner.front.load(Ordering::Relaxed); | 289 | 11 | let buffer = self.buffer.get(); | 290 | 11 | | 291 | 11 | // Allocate a new buffer and copy data from the old buffer to the new one. | 292 | 11 | let new = Buffer::alloc(new_cap); | 293 | 11 | let mut i = f; | 294 | 81.8k | while i != b { | 295 | 81.8k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); | 296 | 81.8k | i = i.wrapping_add(1); | 297 | 81.8k | } | 298 | | | 299 | 11 | let guard = &epoch::pin(); | 300 | 11 | | 301 | 11 | // Replace the old buffer with the new one. | 302 | 11 | self.buffer.replace(new); | 303 | 11 | let old = | 304 | 11 | self.inner | 305 | 11 | .buffer | 306 | 11 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); | 307 | 11 | | 308 | 11 | // Destroy the old buffer later. | 309 | 11 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); | 310 | 11 | | 311 | 11 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate | 312 | 11 | // it as soon as possible. | 313 | 11 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { | 314 | 11 | guard.flush(); | 315 | 11 | }0 | 316 | 11 | } |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::resize Line | Count | Source | 285 | 10 | unsafe fn resize(&self, new_cap: usize) { | 286 | 10 | // Load the back index, front index, and buffer. | 287 | 10 | let b = self.inner.back.load(Ordering::Relaxed); | 288 | 10 | let f = self.inner.front.load(Ordering::Relaxed); | 289 | 10 | let buffer = self.buffer.get(); | 290 | 10 | | 291 | 10 | // Allocate a new buffer and copy data from the old buffer to the new one. | 292 | 10 | let new = Buffer::alloc(new_cap); | 293 | 10 | let mut i = f; | 294 | 65.4k | while i != b { | 295 | 65.4k | ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1); | 296 | 65.4k | i = i.wrapping_add(1); | 297 | 65.4k | } | 298 | | | 299 | 10 | let guard = &epoch::pin(); | 300 | 10 | | 301 | 10 | // Replace the old buffer with the new one. | 302 | 10 | self.buffer.replace(new); | 303 | 10 | let old = | 304 | 10 | self.inner | 305 | 10 | .buffer | 306 | 10 | .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); | 307 | 10 | | 308 | 10 | // Destroy the old buffer later. | 309 | 10 | guard.defer_unchecked(move || old.into_owned().into_box().dealloc()); | 310 | 10 | | 311 | 10 | // If the buffer is very large, then flush the thread-local garbage in order to deallocate | 312 | 10 | // it as soon as possible. | 313 | 10 | if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES { | 314 | 10 | guard.flush(); | 315 | 10 | }0 | 316 | 10 | } |
Unexecuted instantiation: <crossbeam_deque::deque::Worker<i32>>::resize |
317 | | |
318 | | /// Reserves enough capacity so that `reserve_cap` tasks can be pushed without growing the |
319 | | /// buffer. |
320 | 256k | fn reserve(&self, reserve_cap: usize) { |
321 | 256k | if reserve_cap > 0 { |
322 | | // Compute the current length. |
323 | 177k | let b = self.inner.back.load(Ordering::Relaxed); |
324 | 177k | let f = self.inner.front.load(Ordering::SeqCst); |
325 | 177k | let len = b.wrapping_sub(f) as usize; |
326 | 177k | |
327 | 177k | // The current capacity. |
328 | 177k | let cap = self.buffer.get().cap; |
329 | 177k | |
330 | 177k | // Is there enough capacity to push `reserve_cap` tasks? |
331 | 177k | if cap - len < reserve_cap { |
332 | | // Keep doubling the capacity as much as is needed. |
333 | 18.4E | let mut new_cap = cap * 2; |
334 | 18.4E | while new_cap - len < reserve_cap { |
335 | 0 | new_cap *= 2; |
336 | 0 | } |
337 | | |
338 | | // Resize the buffer. |
339 | 0 | unsafe { |
340 | 0 | self.resize(new_cap); |
341 | 0 | } |
342 | 178k | } |
343 | 78.5k | } |
344 | 257k | } <crossbeam_deque::deque::Worker<usize>>::reserve Line | Count | Source | 320 | 58.6k | fn reserve(&self, reserve_cap: usize) { | 321 | 58.6k | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 31.2k | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 31.2k | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 31.2k | let len = b.wrapping_sub(f) as usize; | 326 | 31.2k | | 327 | 31.2k | // The current capacity. | 328 | 31.2k | let cap = self.buffer.get().cap; | 329 | 31.2k | | 330 | 31.2k | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 31.2k | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 18.4E | let mut new_cap = cap * 2; | 334 | 18.4E | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 31.2k | } | 343 | 27.4k | } | 344 | 58.7k | } |
<crossbeam_deque::deque::Worker<injector::destructors::Elem>>::reserve Line | Count | Source | 320 | 261 | fn reserve(&self, reserve_cap: usize) { | 321 | 261 | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 261 | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 261 | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 261 | let len = b.wrapping_sub(f) as usize; | 326 | 261 | | 327 | 261 | // The current capacity. | 328 | 261 | let cap = self.buffer.get().cap; | 329 | 261 | | 330 | 261 | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 261 | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 0 | let mut new_cap = cap * 2; | 334 | 0 | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 261 | } | 343 | 0 | } | 344 | 261 | } |
<crossbeam_deque::deque::Worker<usize>>::reserve Line | Count | Source | 320 | 85.9k | fn reserve(&self, reserve_cap: usize) { | 321 | 85.9k | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 66.4k | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 66.4k | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 66.4k | let len = b.wrapping_sub(f) as usize; | 326 | 66.4k | | 327 | 66.4k | // The current capacity. | 328 | 66.4k | let cap = self.buffer.get().cap; | 329 | 66.4k | | 330 | 66.4k | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 66.4k | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 18.4E | let mut new_cap = cap * 2; | 334 | 18.4E | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 66.5k | } | 343 | 19.4k | } | 344 | 85.9k | } |
<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::reserve Line | Count | Source | 320 | 290 | fn reserve(&self, reserve_cap: usize) { | 321 | 290 | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 290 | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 290 | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 290 | let len = b.wrapping_sub(f) as usize; | 326 | 290 | | 327 | 290 | // The current capacity. | 328 | 290 | let cap = self.buffer.get().cap; | 329 | 290 | | 330 | 290 | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 290 | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 0 | let mut new_cap = cap * 2; | 334 | 0 | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 290 | } | 343 | 0 | } | 344 | 290 | } |
<crossbeam_deque::deque::Worker<i32>>::reserve Line | Count | Source | 320 | 12 | fn reserve(&self, reserve_cap: usize) { | 321 | 12 | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 11 | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 11 | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 11 | let len = b.wrapping_sub(f) as usize; | 326 | 11 | | 327 | 11 | // The current capacity. | 328 | 11 | let cap = self.buffer.get().cap; | 329 | 11 | | 330 | 11 | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 11 | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 18.4E | let mut new_cap = cap * 2; | 334 | 18.4E | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 12 | } | 343 | 1 | } | 344 | 13 | } |
<crossbeam_deque::deque::Worker<usize>>::reserve Line | Count | Source | 320 | 110k | fn reserve(&self, reserve_cap: usize) { | 321 | 110k | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 78.6k | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 78.6k | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 78.6k | let len = b.wrapping_sub(f) as usize; | 326 | 78.6k | | 327 | 78.6k | // The current capacity. | 328 | 78.6k | let cap = self.buffer.get().cap; | 329 | 78.6k | | 330 | 78.6k | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 78.6k | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 18.4E | let mut new_cap = cap * 2; | 334 | 18.4E | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 79.6k | } | 343 | 31.6k | } | 344 | 111k | } |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::reserve Line | Count | Source | 320 | 850 | fn reserve(&self, reserve_cap: usize) { | 321 | 850 | if reserve_cap > 0 { | 322 | | // Compute the current length. | 323 | 850 | let b = self.inner.back.load(Ordering::Relaxed); | 324 | 850 | let f = self.inner.front.load(Ordering::SeqCst); | 325 | 850 | let len = b.wrapping_sub(f) as usize; | 326 | 850 | | 327 | 850 | // The current capacity. | 328 | 850 | let cap = self.buffer.get().cap; | 329 | 850 | | 330 | 850 | // Is there enough capacity to push `reserve_cap` tasks? | 331 | 850 | if cap - len < reserve_cap { | 332 | | // Keep doubling the capacity as much as is needed. | 333 | 0 | let mut new_cap = cap * 2; | 334 | 0 | while new_cap - len < reserve_cap { | 335 | 0 | new_cap *= 2; | 336 | 0 | } | 337 | | | 338 | | // Resize the buffer. | 339 | 0 | unsafe { | 340 | 0 | self.resize(new_cap); | 341 | 0 | } | 342 | 850 | } | 343 | 0 | } | 344 | 850 | } |
|
345 | | |
346 | | /// Returns `true` if the queue is empty. |
347 | | /// |
348 | | /// ``` |
349 | | /// use crossbeam_deque::Worker; |
350 | | /// |
351 | | /// let w = Worker::new_lifo(); |
352 | | /// |
353 | | /// assert!(w.is_empty()); |
354 | | /// w.push(1); |
355 | | /// assert!(!w.is_empty()); |
356 | | /// ``` |
357 | 10 | pub fn is_empty(&self) -> bool { |
358 | 10 | let b = self.inner.back.load(Ordering::Relaxed); |
359 | 10 | let f = self.inner.front.load(Ordering::SeqCst); |
360 | 10 | b.wrapping_sub(f) <= 0 |
361 | 10 | } <crossbeam_deque::deque::Worker<i32>>::is_empty Line | Count | Source | 357 | 5 | pub fn is_empty(&self) -> bool { | 358 | 5 | let b = self.inner.back.load(Ordering::Relaxed); | 359 | 5 | let f = self.inner.front.load(Ordering::SeqCst); | 360 | 5 | b.wrapping_sub(f) <= 0 | 361 | 5 | } |
<crossbeam_deque::deque::Worker<i32>>::is_empty Line | Count | Source | 357 | 5 | pub fn is_empty(&self) -> bool { | 358 | 5 | let b = self.inner.back.load(Ordering::Relaxed); | 359 | 5 | let f = self.inner.front.load(Ordering::SeqCst); | 360 | 5 | b.wrapping_sub(f) <= 0 | 361 | 5 | } |
|
362 | | |
363 | | /// Returns the number of tasks in the deque. |
364 | | /// |
365 | | /// ``` |
366 | | /// use crossbeam_deque::Worker; |
367 | | /// |
368 | | /// let w = Worker::new_lifo(); |
369 | | /// |
370 | | /// assert_eq!(w.len(), 0); |
371 | | /// w.push(1); |
372 | | /// assert_eq!(w.len(), 1); |
373 | | /// w.push(1); |
374 | | /// assert_eq!(w.len(), 2); |
375 | | /// ``` |
376 | | pub fn len(&self) -> usize { |
377 | | let b = self.inner.back.load(Ordering::Relaxed); |
378 | | let f = self.inner.front.load(Ordering::SeqCst); |
379 | | b.wrapping_sub(f).max(0) as usize |
380 | | } |
381 | | |
382 | | /// Pushes a task into the queue. |
383 | | /// |
384 | | /// # Examples |
385 | | /// |
386 | | /// ``` |
387 | | /// use crossbeam_deque::Worker; |
388 | | /// |
389 | | /// let w = Worker::new_lifo(); |
390 | | /// w.push(1); |
391 | | /// w.push(2); |
392 | | /// ``` |
393 | 463k | pub fn push(&self, task: T) { |
394 | | // Load the back index, front index, and buffer. |
395 | 463k | let b = self.inner.back.load(Ordering::Relaxed); |
396 | 463k | let f = self.inner.front.load(Ordering::Acquire); |
397 | 463k | let mut buffer = self.buffer.get(); |
398 | 463k | |
399 | 463k | // Calculate the length of the queue. |
400 | 463k | let len = b.wrapping_sub(f); |
401 | 463k | |
402 | 463k | // Is the queue full? |
403 | 463k | if len >= buffer.cap as isize { |
404 | 66 | // Yes. Grow the underlying buffer. |
405 | 66 | unsafe { |
406 | 66 | self.resize(2 * buffer.cap); |
407 | 66 | } |
408 | 66 | buffer = self.buffer.get(); |
409 | 463k | } |
410 | | |
411 | | // Write `task` into the slot. |
412 | 463k | unsafe { |
413 | 463k | buffer.write(b, task); |
414 | 463k | } |
415 | 463k | |
416 | 463k | atomic::fence(Ordering::Release); |
417 | 463k | |
418 | 463k | // Increment the back index. |
419 | 463k | // |
420 | 463k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data |
421 | 463k | // races because it doesn't understand fences. |
422 | 463k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); |
423 | 463k | } <crossbeam_deque::deque::Worker<i32>>::push Line | Count | Source | 393 | 1 | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 1 | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 1 | let f = self.inner.front.load(Ordering::Acquire); | 397 | 1 | let mut buffer = self.buffer.get(); | 398 | 1 | | 399 | 1 | // Calculate the length of the queue. | 400 | 1 | let len = b.wrapping_sub(f); | 401 | 1 | | 402 | 1 | // Is the queue full? | 403 | 1 | if len >= buffer.cap as isize { | 404 | 0 | // Yes. Grow the underlying buffer. | 405 | 0 | unsafe { | 406 | 0 | self.resize(2 * buffer.cap); | 407 | 0 | } | 408 | 0 | buffer = self.buffer.get(); | 409 | 1 | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 1 | unsafe { | 413 | 1 | buffer.write(b, task); | 414 | 1 | } | 415 | 1 | | 416 | 1 | atomic::fence(Ordering::Release); | 417 | 1 | | 418 | 1 | // Increment the back index. | 419 | 1 | // | 420 | 1 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 1 | // races because it doesn't understand fences. | 422 | 1 | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 1 | } |
<crossbeam_deque::deque::Worker<usize>>::push Line | Count | Source | 393 | 143k | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 143k | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 143k | let f = self.inner.front.load(Ordering::Acquire); | 397 | 143k | let mut buffer = self.buffer.get(); | 398 | 143k | | 399 | 143k | // Calculate the length of the queue. | 400 | 143k | let len = b.wrapping_sub(f); | 401 | 143k | | 402 | 143k | // Is the queue full? | 403 | 143k | if len >= buffer.cap as isize { | 404 | 16 | // Yes. Grow the underlying buffer. | 405 | 16 | unsafe { | 406 | 16 | self.resize(2 * buffer.cap); | 407 | 16 | } | 408 | 16 | buffer = self.buffer.get(); | 409 | 143k | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 143k | unsafe { | 413 | 143k | buffer.write(b, task); | 414 | 143k | } | 415 | 143k | | 416 | 143k | atomic::fence(Ordering::Release); | 417 | 143k | | 418 | 143k | // Increment the back index. | 419 | 143k | // | 420 | 143k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 143k | // races because it doesn't understand fences. | 422 | 143k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 143k | } |
<crossbeam_deque::deque::Worker<i32>>::push Line | Count | Source | 393 | 13 | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 13 | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 13 | let f = self.inner.front.load(Ordering::Acquire); | 397 | 13 | let mut buffer = self.buffer.get(); | 398 | 13 | | 399 | 13 | // Calculate the length of the queue. | 400 | 13 | let len = b.wrapping_sub(f); | 401 | 13 | | 402 | 13 | // Is the queue full? | 403 | 13 | if len >= buffer.cap as isize { | 404 | 0 | // Yes. Grow the underlying buffer. | 405 | 0 | unsafe { | 406 | 0 | self.resize(2 * buffer.cap); | 407 | 0 | } | 408 | 0 | buffer = self.buffer.get(); | 409 | 13 | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 13 | unsafe { | 413 | 13 | buffer.write(b, task); | 414 | 13 | } | 415 | 13 | | 416 | 13 | atomic::fence(Ordering::Release); | 417 | 13 | | 418 | 13 | // Increment the back index. | 419 | 13 | // | 420 | 13 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 13 | // races because it doesn't understand fences. | 422 | 13 | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 13 | } |
<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::push Line | Count | Source | 393 | 50.0k | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 50.0k | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 50.0k | let f = self.inner.front.load(Ordering::Acquire); | 397 | 50.0k | let mut buffer = self.buffer.get(); | 398 | 50.0k | | 399 | 50.0k | // Calculate the length of the queue. | 400 | 50.0k | let len = b.wrapping_sub(f); | 401 | 50.0k | | 402 | 50.0k | // Is the queue full? | 403 | 50.0k | if len >= buffer.cap as isize { | 404 | 10 | // Yes. Grow the underlying buffer. | 405 | 10 | unsafe { | 406 | 10 | self.resize(2 * buffer.cap); | 407 | 10 | } | 408 | 10 | buffer = self.buffer.get(); | 409 | 49.9k | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 50.0k | unsafe { | 413 | 50.0k | buffer.write(b, task); | 414 | 50.0k | } | 415 | 50.0k | | 416 | 50.0k | atomic::fence(Ordering::Release); | 417 | 50.0k | | 418 | 50.0k | // Increment the back index. | 419 | 50.0k | // | 420 | 50.0k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 50.0k | // races because it doesn't understand fences. | 422 | 50.0k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 50.0k | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::push Line | Count | Source | 393 | 50.0k | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 50.0k | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 50.0k | let f = self.inner.front.load(Ordering::Acquire); | 397 | 50.0k | let mut buffer = self.buffer.get(); | 398 | 50.0k | | 399 | 50.0k | // Calculate the length of the queue. | 400 | 50.0k | let len = b.wrapping_sub(f); | 401 | 50.0k | | 402 | 50.0k | // Is the queue full? | 403 | 50.0k | if len >= buffer.cap as isize { | 404 | 10 | // Yes. Grow the underlying buffer. | 405 | 10 | unsafe { | 406 | 10 | self.resize(2 * buffer.cap); | 407 | 10 | } | 408 | 10 | buffer = self.buffer.get(); | 409 | 49.9k | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 50.0k | unsafe { | 413 | 50.0k | buffer.write(b, task); | 414 | 50.0k | } | 415 | 50.0k | | 416 | 50.0k | atomic::fence(Ordering::Release); | 417 | 50.0k | | 418 | 50.0k | // Increment the back index. | 419 | 50.0k | // | 420 | 50.0k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 50.0k | // races because it doesn't understand fences. | 422 | 50.0k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 50.0k | } |
<crossbeam_deque::deque::Worker<i32>>::push Line | Count | Source | 393 | 46 | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 46 | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 46 | let f = self.inner.front.load(Ordering::Acquire); | 397 | 46 | let mut buffer = self.buffer.get(); | 398 | 46 | | 399 | 46 | // Calculate the length of the queue. | 400 | 46 | let len = b.wrapping_sub(f); | 401 | 46 | | 402 | 46 | // Is the queue full? | 403 | 46 | if len >= buffer.cap as isize { | 404 | 0 | // Yes. Grow the underlying buffer. | 405 | 0 | unsafe { | 406 | 0 | self.resize(2 * buffer.cap); | 407 | 0 | } | 408 | 0 | buffer = self.buffer.get(); | 409 | 46 | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 46 | unsafe { | 413 | 46 | buffer.write(b, task); | 414 | 46 | } | 415 | 46 | | 416 | 46 | atomic::fence(Ordering::Release); | 417 | 46 | | 418 | 46 | // Increment the back index. | 419 | 46 | // | 420 | 46 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 46 | // races because it doesn't understand fences. | 422 | 46 | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 46 | } |
<crossbeam_deque::deque::Worker<usize>>::push Line | Count | Source | 393 | 119k | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 119k | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 119k | let f = self.inner.front.load(Ordering::Acquire); | 397 | 119k | let mut buffer = self.buffer.get(); | 398 | 119k | | 399 | 119k | // Calculate the length of the queue. | 400 | 119k | let len = b.wrapping_sub(f); | 401 | 119k | | 402 | 119k | // Is the queue full? | 403 | 119k | if len >= buffer.cap as isize { | 404 | 10 | // Yes. Grow the underlying buffer. | 405 | 10 | unsafe { | 406 | 10 | self.resize(2 * buffer.cap); | 407 | 10 | } | 408 | 10 | buffer = self.buffer.get(); | 409 | 119k | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 119k | unsafe { | 413 | 119k | buffer.write(b, task); | 414 | 119k | } | 415 | 119k | | 416 | 119k | atomic::fence(Ordering::Release); | 417 | 119k | | 418 | 119k | // Increment the back index. | 419 | 119k | // | 420 | 119k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 119k | // races because it doesn't understand fences. | 422 | 119k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 119k | } |
<crossbeam_deque::deque::Worker<i32>>::push Line | Count | Source | 393 | 13 | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 13 | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 13 | let f = self.inner.front.load(Ordering::Acquire); | 397 | 13 | let mut buffer = self.buffer.get(); | 398 | 13 | | 399 | 13 | // Calculate the length of the queue. | 400 | 13 | let len = b.wrapping_sub(f); | 401 | 13 | | 402 | 13 | // Is the queue full? | 403 | 13 | if len >= buffer.cap as isize { | 404 | 0 | // Yes. Grow the underlying buffer. | 405 | 0 | unsafe { | 406 | 0 | self.resize(2 * buffer.cap); | 407 | 0 | } | 408 | 0 | buffer = self.buffer.get(); | 409 | 13 | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 13 | unsafe { | 413 | 13 | buffer.write(b, task); | 414 | 13 | } | 415 | 13 | | 416 | 13 | atomic::fence(Ordering::Release); | 417 | 13 | | 418 | 13 | // Increment the back index. | 419 | 13 | // | 420 | 13 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 13 | // races because it doesn't understand fences. | 422 | 13 | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 13 | } |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::push Line | Count | Source | 393 | 50.0k | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 50.0k | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 50.0k | let f = self.inner.front.load(Ordering::Acquire); | 397 | 50.0k | let mut buffer = self.buffer.get(); | 398 | 50.0k | | 399 | 50.0k | // Calculate the length of the queue. | 400 | 50.0k | let len = b.wrapping_sub(f); | 401 | 50.0k | | 402 | 50.0k | // Is the queue full? | 403 | 50.0k | if len >= buffer.cap as isize { | 404 | 10 | // Yes. Grow the underlying buffer. | 405 | 10 | unsafe { | 406 | 10 | self.resize(2 * buffer.cap); | 407 | 10 | } | 408 | 10 | buffer = self.buffer.get(); | 409 | 49.9k | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 50.0k | unsafe { | 413 | 50.0k | buffer.write(b, task); | 414 | 50.0k | } | 415 | 50.0k | | 416 | 50.0k | atomic::fence(Ordering::Release); | 417 | 50.0k | | 418 | 50.0k | // Increment the back index. | 419 | 50.0k | // | 420 | 50.0k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 50.0k | // races because it doesn't understand fences. | 422 | 50.0k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 50.0k | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::push Line | Count | Source | 393 | 50.0k | pub fn push(&self, task: T) { | 394 | | // Load the back index, front index, and buffer. | 395 | 50.0k | let b = self.inner.back.load(Ordering::Relaxed); | 396 | 50.0k | let f = self.inner.front.load(Ordering::Acquire); | 397 | 50.0k | let mut buffer = self.buffer.get(); | 398 | 50.0k | | 399 | 50.0k | // Calculate the length of the queue. | 400 | 50.0k | let len = b.wrapping_sub(f); | 401 | 50.0k | | 402 | 50.0k | // Is the queue full? | 403 | 50.0k | if len >= buffer.cap as isize { | 404 | 10 | // Yes. Grow the underlying buffer. | 405 | 10 | unsafe { | 406 | 10 | self.resize(2 * buffer.cap); | 407 | 10 | } | 408 | 10 | buffer = self.buffer.get(); | 409 | 49.9k | } | 410 | | | 411 | | // Write `task` into the slot. | 412 | 50.0k | unsafe { | 413 | 50.0k | buffer.write(b, task); | 414 | 50.0k | } | 415 | 50.0k | | 416 | 50.0k | atomic::fence(Ordering::Release); | 417 | 50.0k | | 418 | 50.0k | // Increment the back index. | 419 | 50.0k | // | 420 | 50.0k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 421 | 50.0k | // races because it doesn't understand fences. | 422 | 50.0k | self.inner.back.store(b.wrapping_add(1), Ordering::Release); | 423 | 50.0k | } |
|
424 | | |
425 | | /// Pops a task from the queue. |
426 | | /// |
427 | | /// # Examples |
428 | | /// |
429 | | /// ``` |
430 | | /// use crossbeam_deque::Worker; |
431 | | /// |
432 | | /// let w = Worker::new_fifo(); |
433 | | /// w.push(1); |
434 | | /// w.push(2); |
435 | | /// |
436 | | /// assert_eq!(w.pop(), Some(1)); |
437 | | /// assert_eq!(w.pop(), Some(2)); |
438 | | /// assert_eq!(w.pop(), None); |
439 | | /// ``` |
440 | 1.00M | pub fn pop(&self) -> Option<T> { |
441 | 1.00M | // Load the back and front index. |
442 | 1.00M | let b = self.inner.back.load(Ordering::Relaxed); |
443 | 1.00M | let f = self.inner.front.load(Ordering::Relaxed); |
444 | 1.00M | |
445 | 1.00M | // Calculate the length of the queue. |
446 | 1.00M | let len = b.wrapping_sub(f); |
447 | 1.00M | |
448 | 1.00M | // Is the queue empty? |
449 | 1.00M | if len <= 0 { |
450 | 786k | return None; |
451 | 220k | } |
452 | 220k | |
453 | 220k | match self.flavor { |
454 | 220k | // Pop from the front of the queue. |
455 | 220k | Flavor::Fifo => { |
456 | | // Try incrementing the front index to pop the task. |
457 | 131k | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); |
458 | 131k | let new_f = f.wrapping_add(1); |
459 | 131k | |
460 | 131k | if b.wrapping_sub(new_f) < 0 { |
461 | 87 | self.inner.front.store(f, Ordering::Relaxed); |
462 | 87 | return None; |
463 | 131k | } |
464 | 131k | |
465 | 131k | unsafe { |
466 | 131k | // Read the popped task. |
467 | 131k | let buffer = self.buffer.get(); |
468 | 131k | let task = buffer.read(f); |
469 | | |
470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. |
471 | 131k | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 444.1k { |
472 | 5 | self.resize(buffer.cap / 2); |
473 | 168k | } |
474 | | |
475 | 168k | Some(task) |
476 | | } |
477 | | } |
478 | | |
479 | | // Pop from the back of the queue. |
480 | | Flavor::Lifo => { |
481 | | // Decrement the back index. |
482 | 89.1k | let b = b.wrapping_sub(1); |
483 | 89.1k | self.inner.back.store(b, Ordering::Relaxed); |
484 | 89.1k | |
485 | 89.1k | atomic::fence(Ordering::SeqCst); |
486 | 89.1k | |
487 | 89.1k | // Load the front index. |
488 | 89.1k | let f = self.inner.front.load(Ordering::Relaxed); |
489 | 89.1k | |
490 | 89.1k | // Compute the length after the back index was decremented. |
491 | 89.1k | let len = b.wrapping_sub(f); |
492 | 89.1k | |
493 | 89.1k | if len < 0 { |
494 | | // The queue is empty. Restore the back index to the original task. |
495 | 1.38k | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); |
496 | 1.38k | None |
497 | | } else { |
498 | | // Read the task to be popped. |
499 | 87.7k | let buffer = self.buffer.get(); |
500 | 87.7k | let mut task = unsafe { Some(buffer.read(b)) }; |
501 | 87.7k | |
502 | 87.7k | // Are we popping the last task from the queue? |
503 | 87.7k | if len == 0 { |
504 | | // Try incrementing the front index. |
505 | 22.2k | if self |
506 | 22.2k | .inner |
507 | 22.2k | .front |
508 | 22.2k | .compare_exchange( |
509 | 22.2k | f, |
510 | 22.2k | f.wrapping_add(1), |
511 | 22.2k | Ordering::SeqCst, |
512 | 22.2k | Ordering::Relaxed, |
513 | 22.2k | ) |
514 | 22.2k | .is_err() |
515 | 1.63k | { |
516 | 1.63k | // Failed. We didn't pop anything. |
517 | 1.63k | mem::forget(task.take()); |
518 | 20.6k | } |
519 | | |
520 | | // Restore the back index to the original task. |
521 | 22.2k | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); |
522 | | } else { |
523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. |
524 | 65.4k | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 427.0k { |
525 | 1 | unsafe { |
526 | 1 | self.resize(buffer.cap / 2); |
527 | 1 | } |
528 | 65.6k | } |
529 | | } |
530 | | |
531 | 87.9k | task |
532 | | } |
533 | | } |
534 | | } |
535 | 1.04M | } <crossbeam_deque::deque::Worker<injector::destructors::Elem>>::pop Line | Count | Source | 440 | 8.02k | pub fn pop(&self) -> Option<T> { | 441 | 8.02k | // Load the back and front index. | 442 | 8.02k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 8.02k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 8.02k | | 445 | 8.02k | // Calculate the length of the queue. | 446 | 8.02k | let len = b.wrapping_sub(f); | 447 | 8.02k | | 448 | 8.02k | // Is the queue empty? | 449 | 8.02k | if len <= 0 { | 450 | 132 | return None; | 451 | 7.89k | } | 452 | 7.89k | | 453 | 7.89k | match self.flavor { | 454 | 7.89k | // Pop from the front of the queue. | 455 | 7.89k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 7.89k | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 7.89k | let new_f = f.wrapping_add(1); | 459 | 7.89k | | 460 | 7.89k | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 7.89k | } | 464 | 7.89k | | 465 | 7.89k | unsafe { | 466 | 7.89k | // Read the popped task. | 467 | 7.89k | let buffer = self.buffer.get(); | 468 | 7.89k | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 7.89k | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 7.90k | } | 474 | | | 475 | 7.90k | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 8.03k | } |
<crossbeam_deque::deque::Worker<usize>>::pop Line | Count | Source | 440 | 479k | pub fn pop(&self) -> Option<T> { | 441 | 479k | // Load the back and front index. | 442 | 479k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 479k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 479k | | 445 | 479k | // Calculate the length of the queue. | 446 | 479k | let len = b.wrapping_sub(f); | 447 | 479k | | 448 | 479k | // Is the queue empty? | 449 | 479k | if len <= 0 { | 450 | 460k | return None; | 451 | 18.8k | } | 452 | 18.8k | | 453 | 18.8k | match self.flavor { | 454 | 18.8k | // Pop from the front of the queue. | 455 | 18.8k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 18.8k | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 18.8k | let new_f = f.wrapping_add(1); | 459 | 18.8k | | 460 | 18.8k | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 18.8k | } | 464 | 18.8k | | 465 | 18.8k | unsafe { | 466 | 18.8k | // Read the popped task. | 467 | 18.8k | let buffer = self.buffer.get(); | 468 | 18.8k | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 18.8k | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 33.7k | } | 474 | | | 475 | 33.7k | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 494k | } |
<crossbeam_deque::deque::Worker<i32>>::pop Line | Count | Source | 440 | 1 | pub fn pop(&self) -> Option<T> { | 441 | 1 | // Load the back and front index. | 442 | 1 | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 1 | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 1 | | 445 | 1 | // Calculate the length of the queue. | 446 | 1 | let len = b.wrapping_sub(f); | 447 | 1 | | 448 | 1 | // Is the queue empty? | 449 | 1 | if len <= 0 { | 450 | 0 | return None; | 451 | 1 | } | 452 | 1 | | 453 | 1 | match self.flavor { | 454 | 1 | // Pop from the front of the queue. | 455 | 1 | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 1 | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 1 | let new_f = f.wrapping_add(1); | 459 | 1 | | 460 | 1 | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 1 | } | 464 | 1 | | 465 | 1 | unsafe { | 466 | 1 | // Read the popped task. | 467 | 1 | let buffer = self.buffer.get(); | 468 | 1 | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 1 | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 1 | } | 474 | | | 475 | 1 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 1 | } |
<crossbeam_deque::deque::Worker<fifo::destructors::Elem>>::pop Line | Count | Source | 440 | 9.08k | pub fn pop(&self) -> Option<T> { | 441 | 9.08k | // Load the back and front index. | 442 | 9.08k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 9.08k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 9.08k | | 445 | 9.08k | // Calculate the length of the queue. | 446 | 9.08k | let len = b.wrapping_sub(f); | 447 | 9.08k | | 448 | 9.08k | // Is the queue empty? | 449 | 9.08k | if len <= 0 { | 450 | 145 | return None; | 451 | 8.94k | } | 452 | 8.94k | | 453 | 8.94k | match self.flavor { | 454 | 8.94k | // Pop from the front of the queue. | 455 | 8.94k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 8.94k | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 8.94k | let new_f = f.wrapping_add(1); | 459 | 8.94k | | 460 | 8.94k | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 8.94k | } | 464 | 8.94k | | 465 | 8.94k | unsafe { | 466 | 8.94k | // Read the popped task. | 467 | 8.94k | let buffer = self.buffer.get(); | 468 | 8.94k | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 8.94k | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 41.00k { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 8.94k | } | 474 | | | 475 | 8.94k | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 9.08k | } |
<crossbeam_deque::deque::Worker<i32>>::pop Line | Count | Source | 440 | 10 | pub fn pop(&self) -> Option<T> { | 441 | 10 | // Load the back and front index. | 442 | 10 | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 10 | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 10 | | 445 | 10 | // Calculate the length of the queue. | 446 | 10 | let len = b.wrapping_sub(f); | 447 | 10 | | 448 | 10 | // Is the queue empty? | 449 | 10 | if len <= 0 { | 450 | 4 | return None; | 451 | 6 | } | 452 | 6 | | 453 | 6 | match self.flavor { | 454 | 6 | // Pop from the front of the queue. | 455 | 6 | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 6 | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 6 | let new_f = f.wrapping_add(1); | 459 | 6 | | 460 | 6 | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 6 | } | 464 | 6 | | 465 | 6 | unsafe { | 466 | 6 | // Read the popped task. | 467 | 6 | let buffer = self.buffer.get(); | 468 | 6 | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 6 | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 6 | } | 474 | | | 475 | 6 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 10 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::pop Line | Count | Source | 440 | 43.1k | pub fn pop(&self) -> Option<T> { | 441 | 43.1k | // Load the back and front index. | 442 | 43.1k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 43.1k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 43.1k | | 445 | 43.1k | // Calculate the length of the queue. | 446 | 43.1k | let len = b.wrapping_sub(f); | 447 | 43.1k | | 448 | 43.1k | // Is the queue empty? | 449 | 43.1k | if len <= 0 { | 450 | 0 | return None; | 451 | 43.1k | } | 452 | 43.1k | | 453 | 43.1k | match self.flavor { | 454 | 43.1k | // Pop from the front of the queue. | 455 | 43.1k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 43.1k | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 43.1k | let new_f = f.wrapping_add(1); | 459 | 43.1k | | 460 | 43.1k | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 43.1k | } | 464 | 43.1k | | 465 | 43.1k | unsafe { | 466 | 43.1k | // Read the popped task. | 467 | 43.1k | let buffer = self.buffer.get(); | 468 | 43.1k | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 43.1k | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 4 { | 472 | 5 | self.resize(buffer.cap / 2); | 473 | 43.1k | } | 474 | | | 475 | 43.1k | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 43.1k | } |
<crossbeam_deque::deque::Worker<usize>>::pop Line | Count | Source | 440 | 205k | pub fn pop(&self) -> Option<T> { | 441 | 205k | // Load the back and front index. | 442 | 205k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 205k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 205k | | 445 | 205k | // Calculate the length of the queue. | 446 | 205k | let len = b.wrapping_sub(f); | 447 | 205k | | 448 | 205k | // Is the queue empty? | 449 | 205k | if len <= 0 { | 450 | 150k | return None; | 451 | 54.9k | } | 452 | 54.9k | | 453 | 54.9k | match self.flavor { | 454 | 54.9k | // Pop from the front of the queue. | 455 | 54.9k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 54.9k | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 54.9k | let new_f = f.wrapping_add(1); | 459 | 54.9k | | 460 | 54.9k | if b.wrapping_sub(new_f) < 0 { | 461 | 87 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 87 | return None; | 463 | 54.9k | } | 464 | 54.9k | | 465 | 54.9k | unsafe { | 466 | 54.9k | // Read the popped task. | 467 | 54.9k | let buffer = self.buffer.get(); | 468 | 54.9k | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 54.9k | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 75.1k | } | 474 | | | 475 | 75.1k | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 0 | let b = b.wrapping_sub(1); | 483 | 0 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 0 |
| 485 | 0 | atomic::fence(Ordering::SeqCst); | 486 | 0 |
| 487 | 0 | // Load the front index. | 488 | 0 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 0 |
| 490 | 0 | // Compute the length after the back index was decremented. | 491 | 0 | let len = b.wrapping_sub(f); | 492 | 0 |
| 493 | 0 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 0 | let buffer = self.buffer.get(); | 500 | 0 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 0 |
| 502 | 0 | // Are we popping the last task from the queue? | 503 | 0 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 0 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 0 | } | 529 | | } | 530 | | | 531 | 0 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 225k | } |
<crossbeam_deque::deque::Worker<i32>>::pop Line | Count | Source | 440 | 23 | pub fn pop(&self) -> Option<T> { | 441 | 23 | // Load the back and front index. | 442 | 23 | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 23 | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 23 | | 445 | 23 | // Calculate the length of the queue. | 446 | 23 | let len = b.wrapping_sub(f); | 447 | 23 | | 448 | 23 | // Is the queue empty? | 449 | 23 | if len <= 0 { | 450 | 0 | return None; | 451 | 23 | } | 452 | 23 | | 453 | 23 | match self.flavor { | 454 | 23 | // Pop from the front of the queue. | 455 | 23 | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 11 | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 11 | let new_f = f.wrapping_add(1); | 459 | 11 | | 460 | 11 | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 11 | } | 464 | 11 | | 465 | 11 | unsafe { | 466 | 11 | // Read the popped task. | 467 | 11 | let buffer = self.buffer.get(); | 468 | 11 | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 11 | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 12 | } | 474 | | | 475 | 12 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 12 | let b = b.wrapping_sub(1); | 483 | 12 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 12 | | 485 | 12 | atomic::fence(Ordering::SeqCst); | 486 | 12 | | 487 | 12 | // Load the front index. | 488 | 12 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 12 | | 490 | 12 | // Compute the length after the back index was decremented. | 491 | 12 | let len = b.wrapping_sub(f); | 492 | 12 | | 493 | 12 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 12 | let buffer = self.buffer.get(); | 500 | 12 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 12 | | 502 | 12 | // Are we popping the last task from the queue? | 503 | 12 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 6 | if self | 506 | 6 | .inner | 507 | 6 | .front | 508 | 6 | .compare_exchange( | 509 | 6 | f, | 510 | 6 | f.wrapping_add(1), | 511 | 6 | Ordering::SeqCst, | 512 | 6 | Ordering::Relaxed, | 513 | 6 | ) | 514 | 6 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 6 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 6 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 6 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 40 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 6 | } | 529 | | } | 530 | | | 531 | 12 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 24 | } |
<crossbeam_deque::deque::Worker<lifo::destructors::Elem>>::pop Line | Count | Source | 440 | 9.25k | pub fn pop(&self) -> Option<T> { | 441 | 9.25k | // Load the back and front index. | 442 | 9.25k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 9.25k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 9.25k | | 445 | 9.25k | // Calculate the length of the queue. | 446 | 9.25k | let len = b.wrapping_sub(f); | 447 | 9.25k | | 448 | 9.25k | // Is the queue empty? | 449 | 9.25k | if len <= 0 { | 450 | 426 | return None; | 451 | 8.82k | } | 452 | 8.82k | | 453 | 8.82k | match self.flavor { | 454 | 8.82k | // Pop from the front of the queue. | 455 | 8.82k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 82 | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 82 | let new_f = f.wrapping_add(1); | 459 | 82 | | 460 | 82 | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 82 | } | 464 | 82 | | 465 | 82 | unsafe { | 466 | 82 | // Read the popped task. | 467 | 82 | let buffer = self.buffer.get(); | 468 | 82 | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 82 | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 0 | } | 474 | | | 475 | 0 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 8.74k | let b = b.wrapping_sub(1); | 483 | 8.74k | self.inner.back.store(b, Ordering::Relaxed); | 484 | 8.74k | | 485 | 8.74k | atomic::fence(Ordering::SeqCst); | 486 | 8.74k | | 487 | 8.74k | // Load the front index. | 488 | 8.74k | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 8.74k | | 490 | 8.74k | // Compute the length after the back index was decremented. | 491 | 8.74k | let len = b.wrapping_sub(f); | 492 | 8.74k | | 493 | 8.74k | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 8.74k | let buffer = self.buffer.get(); | 500 | 8.74k | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 8.74k | | 502 | 8.74k | // Are we popping the last task from the queue? | 503 | 8.74k | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 155 | if self | 506 | 155 | .inner | 507 | 155 | .front | 508 | 155 | .compare_exchange( | 509 | 155 | f, | 510 | 155 | f.wrapping_add(1), | 511 | 155 | Ordering::SeqCst, | 512 | 155 | Ordering::Relaxed, | 513 | 155 | ) | 514 | 155 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 155 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 155 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 8.59k | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 41.00k { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 8.61k | } | 529 | | } | 530 | | | 531 | 8.76k | task | 532 | | } | 533 | | } | 534 | | } | 535 | 9.19k | } |
<crossbeam_deque::deque::Worker<i32>>::pop Line | Count | Source | 440 | 10 | pub fn pop(&self) -> Option<T> { | 441 | 10 | // Load the back and front index. | 442 | 10 | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 10 | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 10 | | 445 | 10 | // Calculate the length of the queue. | 446 | 10 | let len = b.wrapping_sub(f); | 447 | 10 | | 448 | 10 | // Is the queue empty? | 449 | 10 | if len <= 0 { | 450 | 4 | return None; | 451 | 6 | } | 452 | 6 | | 453 | 6 | match self.flavor { | 454 | 6 | // Pop from the front of the queue. | 455 | 6 | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 0 | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 0 | let new_f = f.wrapping_add(1); | 459 | 0 |
| 460 | 0 | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 0 | } | 464 | 0 |
| 465 | 0 | unsafe { | 466 | 0 | // Read the popped task. | 467 | 0 | let buffer = self.buffer.get(); | 468 | 0 | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 0 | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 4 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 0 | } | 474 | | | 475 | 0 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 6 | let b = b.wrapping_sub(1); | 483 | 6 | self.inner.back.store(b, Ordering::Relaxed); | 484 | 6 | | 485 | 6 | atomic::fence(Ordering::SeqCst); | 486 | 6 | | 487 | 6 | // Load the front index. | 488 | 6 | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 6 | | 490 | 6 | // Compute the length after the back index was decremented. | 491 | 6 | let len = b.wrapping_sub(f); | 492 | 6 | | 493 | 6 | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 6 | let buffer = self.buffer.get(); | 500 | 6 | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 6 | | 502 | 6 | // Are we popping the last task from the queue? | 503 | 6 | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 3 | if self | 506 | 3 | .inner | 507 | 3 | .front | 508 | 3 | .compare_exchange( | 509 | 3 | f, | 510 | 3 | f.wrapping_add(1), | 511 | 3 | Ordering::SeqCst, | 512 | 3 | Ordering::Relaxed, | 513 | 3 | ) | 514 | 3 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 3 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 3 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 3 | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 40 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 3 | } | 529 | | } | 530 | | | 531 | 6 | task | 532 | | } | 533 | | } | 534 | | } | 535 | 10 | } |
<crossbeam_deque::deque::Worker<alloc::boxed::Box<usize>>>::pop Line | Count | Source | 440 | 26.0k | pub fn pop(&self) -> Option<T> { | 441 | 26.0k | // Load the back and front index. | 442 | 26.0k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 26.0k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 26.0k | | 445 | 26.0k | // Calculate the length of the queue. | 446 | 26.0k | let len = b.wrapping_sub(f); | 447 | 26.0k | | 448 | 26.0k | // Is the queue empty? | 449 | 26.0k | if len <= 0 { | 450 | 0 | return None; | 451 | 26.0k | } | 452 | 26.0k | | 453 | 26.0k | match self.flavor { | 454 | 26.0k | // Pop from the front of the queue. | 455 | 26.0k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 0 | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 0 | let new_f = f.wrapping_add(1); | 459 | 0 |
| 460 | 0 | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 0 | } | 464 | 0 |
| 465 | 0 | unsafe { | 466 | 0 | // Read the popped task. | 467 | 0 | let buffer = self.buffer.get(); | 468 | 0 | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 0 | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 4 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 0 | } | 474 | | | 475 | 0 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 26.0k | let b = b.wrapping_sub(1); | 483 | 26.0k | self.inner.back.store(b, Ordering::Relaxed); | 484 | 26.0k | | 485 | 26.0k | atomic::fence(Ordering::SeqCst); | 486 | 26.0k | | 487 | 26.0k | // Load the front index. | 488 | 26.0k | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 26.0k | | 490 | 26.0k | // Compute the length after the back index was decremented. | 491 | 26.0k | let len = b.wrapping_sub(f); | 492 | 26.0k | | 493 | 26.0k | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 0 | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 26.0k | let buffer = self.buffer.get(); | 500 | 26.0k | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 26.0k | | 502 | 26.0k | // Are we popping the last task from the queue? | 503 | 26.0k | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 0 | if self | 506 | 0 | .inner | 507 | 0 | .front | 508 | 0 | .compare_exchange( | 509 | 0 | f, | 510 | 0 | f.wrapping_add(1), | 511 | 0 | Ordering::SeqCst, | 512 | 0 | Ordering::Relaxed, | 513 | 0 | ) | 514 | 0 | .is_err() | 515 | 0 | { | 516 | 0 | // Failed. We didn't pop anything. | 517 | 0 | mem::forget(task.take()); | 518 | 0 | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 0 | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 26.0k | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { | 525 | 1 | unsafe { | 526 | 1 | self.resize(buffer.cap / 2); | 527 | 1 | } | 528 | 26.0k | } | 529 | | } | 530 | | | 531 | 26.0k | task | 532 | | } | 533 | | } | 534 | | } | 535 | 26.0k | } |
<crossbeam_deque::deque::Worker<usize>>::pop Line | Count | Source | 440 | 226k | pub fn pop(&self) -> Option<T> { | 441 | 226k | // Load the back and front index. | 442 | 226k | let b = self.inner.back.load(Ordering::Relaxed); | 443 | 226k | let f = self.inner.front.load(Ordering::Relaxed); | 444 | 226k | | 445 | 226k | // Calculate the length of the queue. | 446 | 226k | let len = b.wrapping_sub(f); | 447 | 226k | | 448 | 226k | // Is the queue empty? | 449 | 226k | if len <= 0 { | 450 | 174k | return None; | 451 | 51.8k | } | 452 | 51.8k | | 453 | 51.8k | match self.flavor { | 454 | 51.8k | // Pop from the front of the queue. | 455 | 51.8k | Flavor::Fifo => { | 456 | | // Try incrementing the front index to pop the task. | 457 | 18.4E | let f = self.inner.front.fetch_add(1, Ordering::SeqCst); | 458 | 18.4E | let new_f = f.wrapping_add(1); | 459 | 18.4E | | 460 | 18.4E | if b.wrapping_sub(new_f) < 0 { | 461 | 0 | self.inner.front.store(f, Ordering::Relaxed); | 462 | 0 | return None; | 463 | 18.4E | } | 464 | 18.4E | | 465 | 18.4E | unsafe { | 466 | 18.4E | // Read the popped task. | 467 | 18.4E | let buffer = self.buffer.get(); | 468 | 18.4E | let task = buffer.read(f); | 469 | | | 470 | | // Shrink the buffer if `len - 1` is less than one fourth of the capacity. | 471 | 18.4E | if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 40 { | 472 | 0 | self.resize(buffer.cap / 2); | 473 | 0 | } | 474 | | | 475 | 0 | Some(task) | 476 | | } | 477 | | } | 478 | | | 479 | | // Pop from the back of the queue. | 480 | | Flavor::Lifo => { | 481 | | // Decrement the back index. | 482 | 54.2k | let b = b.wrapping_sub(1); | 483 | 54.2k | self.inner.back.store(b, Ordering::Relaxed); | 484 | 54.2k | | 485 | 54.2k | atomic::fence(Ordering::SeqCst); | 486 | 54.2k | | 487 | 54.2k | // Load the front index. | 488 | 54.2k | let f = self.inner.front.load(Ordering::Relaxed); | 489 | 54.2k | | 490 | 54.2k | // Compute the length after the back index was decremented. | 491 | 54.2k | let len = b.wrapping_sub(f); | 492 | 54.2k | | 493 | 54.2k | if len < 0 { | 494 | | // The queue is empty. Restore the back index to the original task. | 495 | 1.38k | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 496 | 1.38k | None | 497 | | } else { | 498 | | // Read the task to be popped. | 499 | 52.8k | let buffer = self.buffer.get(); | 500 | 52.8k | let mut task = unsafe { Some(buffer.read(b)) }; | 501 | 52.8k | | 502 | 52.8k | // Are we popping the last task from the queue? | 503 | 52.8k | if len == 0 { | 504 | | // Try incrementing the front index. | 505 | 22.0k | if self | 506 | 22.0k | .inner | 507 | 22.0k | .front | 508 | 22.0k | .compare_exchange( | 509 | 22.0k | f, | 510 | 22.0k | f.wrapping_add(1), | 511 | 22.0k | Ordering::SeqCst, | 512 | 22.0k | Ordering::Relaxed, | 513 | 22.0k | ) | 514 | 22.0k | .is_err() | 515 | 1.63k | { | 516 | 1.63k | // Failed. We didn't pop anything. | 517 | 1.63k | mem::forget(task.take()); | 518 | 20.4k | } | 519 | | | 520 | | // Restore the back index to the original task. | 521 | 22.1k | self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); | 522 | | } else { | 523 | | // Shrink the buffer if `len` is less than one fourth of the capacity. | 524 | 30.8k | if buffer.cap > MIN_CAP && len < buffer.cap as isize / 40 { | 525 | 0 | unsafe { | 526 | 0 | self.resize(buffer.cap / 2); | 527 | 0 | } | 528 | 30.9k | } | 529 | | } | 530 | | | 531 | 53.1k | task | 532 | | } | 533 | | } | 534 | | } | 535 | 229k | } |
|
536 | | } |
537 | | |
538 | | impl<T> fmt::Debug for Worker<T> { |
539 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
540 | | f.pad("Worker { .. }") |
541 | | } |
542 | | } |
543 | | |
544 | | /// A stealer handle of a worker queue. |
545 | | /// |
546 | | /// Stealers can be shared among threads. |
547 | | /// |
548 | | /// Task schedulers typically have a single worker queue per worker thread. |
549 | | /// |
550 | | /// # Examples |
551 | | /// |
552 | | /// ``` |
553 | | /// use crossbeam_deque::{Steal, Worker}; |
554 | | /// |
555 | | /// let w = Worker::new_lifo(); |
556 | | /// w.push(1); |
557 | | /// w.push(2); |
558 | | /// |
559 | | /// let s = w.stealer(); |
560 | | /// assert_eq!(s.steal(), Steal::Success(1)); |
561 | | /// assert_eq!(s.steal(), Steal::Success(2)); |
562 | | /// assert_eq!(s.steal(), Steal::Empty); |
563 | | /// ``` |
564 | | pub struct Stealer<T> { |
565 | | /// A reference to the inner representation of the queue. |
566 | | inner: Arc<CachePadded<Inner<T>>>, |
567 | | |
568 | | /// The flavor of the queue. |
569 | | flavor: Flavor, |
570 | | } |
571 | | |
572 | | unsafe impl<T: Send> Send for Stealer<T> {} |
573 | | unsafe impl<T: Send> Sync for Stealer<T> {} |
574 | | |
575 | | impl<T> Stealer<T> { |
576 | | /// Returns `true` if the queue is empty. |
577 | | /// |
578 | | /// ``` |
579 | | /// use crossbeam_deque::Worker; |
580 | | /// |
581 | | /// let w = Worker::new_lifo(); |
582 | | /// let s = w.stealer(); |
583 | | /// |
584 | | /// assert!(s.is_empty()); |
585 | | /// w.push(1); |
586 | | /// assert!(!s.is_empty()); |
587 | | /// ``` |
588 | 10 | pub fn is_empty(&self) -> bool { |
589 | 10 | let f = self.inner.front.load(Ordering::Acquire); |
590 | 10 | atomic::fence(Ordering::SeqCst); |
591 | 10 | let b = self.inner.back.load(Ordering::Acquire); |
592 | 10 | b.wrapping_sub(f) <= 0 |
593 | 10 | } <crossbeam_deque::deque::Stealer<i32>>::is_empty Line | Count | Source | 588 | 5 | pub fn is_empty(&self) -> bool { | 589 | 5 | let f = self.inner.front.load(Ordering::Acquire); | 590 | 5 | atomic::fence(Ordering::SeqCst); | 591 | 5 | let b = self.inner.back.load(Ordering::Acquire); | 592 | 5 | b.wrapping_sub(f) <= 0 | 593 | 5 | } |
<crossbeam_deque::deque::Stealer<i32>>::is_empty Line | Count | Source | 588 | 5 | pub fn is_empty(&self) -> bool { | 589 | 5 | let f = self.inner.front.load(Ordering::Acquire); | 590 | 5 | atomic::fence(Ordering::SeqCst); | 591 | 5 | let b = self.inner.back.load(Ordering::Acquire); | 592 | 5 | b.wrapping_sub(f) <= 0 | 593 | 5 | } |
|
594 | | |
595 | | /// Steals a task from the queue. |
596 | | /// |
597 | | /// # Examples |
598 | | /// |
599 | | /// ``` |
600 | | /// use crossbeam_deque::{Steal, Worker}; |
601 | | /// |
602 | | /// let w = Worker::new_lifo(); |
603 | | /// w.push(1); |
604 | | /// w.push(2); |
605 | | /// |
606 | | /// let s = w.stealer(); |
607 | | /// assert_eq!(s.steal(), Steal::Success(1)); |
608 | | /// assert_eq!(s.steal(), Steal::Success(2)); |
609 | | /// ``` |
610 | 538k | pub fn steal(&self) -> Steal<T> { |
611 | 538k | // Load the front index. |
612 | 538k | let f = self.inner.front.load(Ordering::Acquire); |
613 | 538k | |
614 | 538k | // A SeqCst fence is needed here. |
615 | 538k | // |
616 | 538k | // If the current thread is already pinned (reentrantly), we must manually issue the |
617 | 538k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't |
618 | 538k | // have to. |
619 | 538k | if epoch::is_pinned() { |
620 | 0 | atomic::fence(Ordering::SeqCst); |
621 | 538k | } |
622 | | |
623 | 538k | let guard = &epoch::pin(); |
624 | 538k | |
625 | 538k | // Load the back index. |
626 | 538k | let b = self.inner.back.load(Ordering::Acquire); |
627 | 538k | |
628 | 538k | // Is the queue empty? |
629 | 538k | if b.wrapping_sub(f) <= 0 { |
630 | 214k | return Steal::Empty; |
631 | 323k | } |
632 | 323k | |
633 | 323k | // Load the buffer and read the task at the front. |
634 | 323k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); |
635 | 323k | let task = unsafe { buffer.deref().read(f) }; |
636 | 323k | |
637 | 323k | // Try incrementing the front index to steal the task. |
638 | 323k | if self |
639 | 323k | .inner |
640 | 323k | .front |
641 | 323k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) |
642 | 323k | .is_err() |
643 | | { |
644 | | // We didn't steal this task, forget it. |
645 | 185k | mem::forget(task); |
646 | 185k | return Steal::Retry; |
647 | 155k | } |
648 | 155k | |
649 | 155k | // Return the stolen task. |
650 | 155k | Steal::Success(task) |
651 | 556k | } <crossbeam_deque::deque::Stealer<alloc::boxed::Box<usize>>>::steal Line | Count | Source | 610 | 45.2k | pub fn steal(&self) -> Steal<T> { | 611 | 45.2k | // Load the front index. | 612 | 45.2k | let f = self.inner.front.load(Ordering::Acquire); | 613 | 45.2k | | 614 | 45.2k | // A SeqCst fence is needed here. | 615 | 45.2k | // | 616 | 45.2k | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 45.2k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 45.2k | // have to. | 619 | 45.2k | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 45.2k | } | 622 | | | 623 | 45.2k | let guard = &epoch::pin(); | 624 | 45.2k | | 625 | 45.2k | // Load the back index. | 626 | 45.2k | let b = self.inner.back.load(Ordering::Acquire); | 627 | 45.2k | | 628 | 45.2k | // Is the queue empty? | 629 | 45.2k | if b.wrapping_sub(f) <= 0 { | 630 | 1 | return Steal::Empty; | 631 | 45.2k | } | 632 | 45.2k | | 633 | 45.2k | // Load the buffer and read the task at the front. | 634 | 45.2k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 45.2k | let task = unsafe { buffer.deref().read(f) }; | 636 | 45.2k | | 637 | 45.2k | // Try incrementing the front index to steal the task. | 638 | 45.2k | if self | 639 | 45.2k | .inner | 640 | 45.2k | .front | 641 | 45.2k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 45.2k | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 40.3k | mem::forget(task); | 646 | 40.3k | return Steal::Retry; | 647 | 6.84k | } | 648 | 6.84k | | 649 | 6.84k | // Return the stolen task. | 650 | 6.84k | Steal::Success(task) | 651 | 47.1k | } |
<crossbeam_deque::deque::Stealer<fifo::destructors::Elem>>::steal Line | Count | Source | 610 | 145 | pub fn steal(&self) -> Steal<T> { | 611 | 145 | // Load the front index. | 612 | 145 | let f = self.inner.front.load(Ordering::Acquire); | 613 | 145 | | 614 | 145 | // A SeqCst fence is needed here. | 615 | 145 | // | 616 | 145 | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 145 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 145 | // have to. | 619 | 145 | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 145 | } | 622 | | | 623 | 145 | let guard = &epoch::pin(); | 624 | 145 | | 625 | 145 | // Load the back index. | 626 | 145 | let b = self.inner.back.load(Ordering::Acquire); | 627 | 145 | | 628 | 145 | // Is the queue empty? | 629 | 145 | if b.wrapping_sub(f) <= 0 { | 630 | 0 | return Steal::Empty; | 631 | 145 | } | 632 | 145 | | 633 | 145 | // Load the buffer and read the task at the front. | 634 | 145 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 145 | let task = unsafe { buffer.deref().read(f) }; | 636 | 145 | | 637 | 145 | // Try incrementing the front index to steal the task. | 638 | 145 | if self | 639 | 145 | .inner | 640 | 145 | .front | 641 | 145 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 145 | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 8 | mem::forget(task); | 646 | 8 | return Steal::Retry; | 647 | 137 | } | 648 | 137 | | 649 | 137 | // Return the stolen task. | 650 | 137 | Steal::Success(task) | 651 | 145 | } |
<crossbeam_deque::deque::Stealer<i32>>::steal Line | Count | Source | 610 | 11 | pub fn steal(&self) -> Steal<T> { | 611 | 11 | // Load the front index. | 612 | 11 | let f = self.inner.front.load(Ordering::Acquire); | 613 | 11 | | 614 | 11 | // A SeqCst fence is needed here. | 615 | 11 | // | 616 | 11 | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 11 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 11 | // have to. | 619 | 11 | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 11 | } | 622 | | | 623 | 11 | let guard = &epoch::pin(); | 624 | 11 | | 625 | 11 | // Load the back index. | 626 | 11 | let b = self.inner.back.load(Ordering::Acquire); | 627 | 11 | | 628 | 11 | // Is the queue empty? | 629 | 11 | if b.wrapping_sub(f) <= 0 { | 630 | 4 | return Steal::Empty; | 631 | 7 | } | 632 | 7 | | 633 | 7 | // Load the buffer and read the task at the front. | 634 | 7 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 7 | let task = unsafe { buffer.deref().read(f) }; | 636 | 7 | | 637 | 7 | // Try incrementing the front index to steal the task. | 638 | 7 | if self | 639 | 7 | .inner | 640 | 7 | .front | 641 | 7 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 7 | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 0 | mem::forget(task); | 646 | 0 | return Steal::Retry; | 647 | 7 | } | 648 | 7 | | 649 | 7 | // Return the stolen task. | 650 | 7 | Steal::Success(task) | 651 | 11 | } |
<crossbeam_deque::deque::Stealer<usize>>::steal Line | Count | Source | 610 | 174k | pub fn steal(&self) -> Steal<T> { | 611 | 174k | // Load the front index. | 612 | 174k | let f = self.inner.front.load(Ordering::Acquire); | 613 | 174k | | 614 | 174k | // A SeqCst fence is needed here. | 615 | 174k | // | 616 | 174k | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 174k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 174k | // have to. | 619 | 174k | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 174k | } | 622 | | | 623 | 174k | let guard = &epoch::pin(); | 624 | 174k | | 625 | 174k | // Load the back index. | 626 | 174k | let b = self.inner.back.load(Ordering::Acquire); | 627 | 174k | | 628 | 174k | // Is the queue empty? | 629 | 174k | if b.wrapping_sub(f) <= 0 { | 630 | 84.5k | return Steal::Empty; | 631 | 90.1k | } | 632 | 90.1k | | 633 | 90.1k | // Load the buffer and read the task at the front. | 634 | 90.1k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 90.1k | let task = unsafe { buffer.deref().read(f) }; | 636 | 90.1k | | 637 | 90.1k | // Try incrementing the front index to steal the task. | 638 | 90.1k | if self | 639 | 90.1k | .inner | 640 | 90.1k | .front | 641 | 90.1k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 90.1k | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 31.8k | mem::forget(task); | 646 | 31.8k | return Steal::Retry; | 647 | 63.3k | } | 648 | 63.3k | | 649 | 63.3k | // Return the stolen task. | 650 | 63.3k | Steal::Success(task) | 651 | 179k | } |
<crossbeam_deque::deque::Stealer<i32>>::steal Line | Count | Source | 610 | 6 | pub fn steal(&self) -> Steal<T> { | 611 | 6 | // Load the front index. | 612 | 6 | let f = self.inner.front.load(Ordering::Acquire); | 613 | 6 | | 614 | 6 | // A SeqCst fence is needed here. | 615 | 6 | // | 616 | 6 | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 6 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 6 | // have to. | 619 | 6 | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 6 | } | 622 | | | 623 | 6 | let guard = &epoch::pin(); | 624 | 6 | | 625 | 6 | // Load the back index. | 626 | 6 | let b = self.inner.back.load(Ordering::Acquire); | 627 | 6 | | 628 | 6 | // Is the queue empty? | 629 | 6 | if b.wrapping_sub(f) <= 0 { | 630 | 0 | return Steal::Empty; | 631 | 6 | } | 632 | 6 | | 633 | 6 | // Load the buffer and read the task at the front. | 634 | 6 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 6 | let task = unsafe { buffer.deref().read(f) }; | 636 | 6 | | 637 | 6 | // Try incrementing the front index to steal the task. | 638 | 6 | if self | 639 | 6 | .inner | 640 | 6 | .front | 641 | 6 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 6 | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 0 | mem::forget(task); | 646 | 0 | return Steal::Retry; | 647 | 6 | } | 648 | 6 | | 649 | 6 | // Return the stolen task. | 650 | 6 | Steal::Success(task) | 651 | 6 | } |
<crossbeam_deque::deque::Stealer<alloc::boxed::Box<usize>>>::steal Line | Count | Source | 610 | 114k | pub fn steal(&self) -> Steal<T> { | 611 | 114k | // Load the front index. | 612 | 114k | let f = self.inner.front.load(Ordering::Acquire); | 613 | 114k | | 614 | 114k | // A SeqCst fence is needed here. | 615 | 114k | // | 616 | 114k | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 114k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 114k | // have to. | 619 | 114k | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 114k | } | 622 | | | 623 | 114k | let guard = &epoch::pin(); | 624 | 114k | | 625 | 114k | // Load the back index. | 626 | 114k | let b = self.inner.back.load(Ordering::Acquire); | 627 | 114k | | 628 | 114k | // Is the queue empty? | 629 | 114k | if b.wrapping_sub(f) <= 0 { | 630 | 30.5k | return Steal::Empty; | 631 | 83.9k | } | 632 | 83.9k | | 633 | 83.9k | // Load the buffer and read the task at the front. | 634 | 83.9k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 83.9k | let task = unsafe { buffer.deref().read(f) }; | 636 | 83.9k | | 637 | 83.9k | // Try incrementing the front index to steal the task. | 638 | 83.9k | if self | 639 | 83.9k | .inner | 640 | 83.9k | .front | 641 | 83.9k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 83.9k | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 65.7k | mem::forget(task); | 646 | 65.7k | return Steal::Retry; | 647 | 23.9k | } | 648 | 23.9k | | 649 | 23.9k | // Return the stolen task. | 650 | 23.9k | Steal::Success(task) | 651 | 120k | } |
<crossbeam_deque::deque::Stealer<lifo::destructors::Elem>>::steal Line | Count | Source | 610 | 426 | pub fn steal(&self) -> Steal<T> { | 611 | 426 | // Load the front index. | 612 | 426 | let f = self.inner.front.load(Ordering::Acquire); | 613 | 426 | | 614 | 426 | // A SeqCst fence is needed here. | 615 | 426 | // | 616 | 426 | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 426 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 426 | // have to. | 619 | 426 | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 426 | } | 622 | | | 623 | 426 | let guard = &epoch::pin(); | 624 | 426 | | 625 | 426 | // Load the back index. | 626 | 426 | let b = self.inner.back.load(Ordering::Acquire); | 627 | 426 | | 628 | 426 | // Is the queue empty? | 629 | 426 | if b.wrapping_sub(f) <= 0 { | 630 | 0 | return Steal::Empty; | 631 | 426 | } | 632 | 426 | | 633 | 426 | // Load the buffer and read the task at the front. | 634 | 426 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 426 | let task = unsafe { buffer.deref().read(f) }; | 636 | 426 | | 637 | 426 | // Try incrementing the front index to steal the task. | 638 | 426 | if self | 639 | 426 | .inner | 640 | 426 | .front | 641 | 426 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 426 | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 309 | mem::forget(task); | 646 | 309 | return Steal::Retry; | 647 | 118 | } | 648 | 118 | | 649 | 118 | // Return the stolen task. | 650 | 118 | Steal::Success(task) | 651 | 427 | } |
<crossbeam_deque::deque::Stealer<i32>>::steal Line | Count | Source | 610 | 11 | pub fn steal(&self) -> Steal<T> { | 611 | 11 | // Load the front index. | 612 | 11 | let f = self.inner.front.load(Ordering::Acquire); | 613 | 11 | | 614 | 11 | // A SeqCst fence is needed here. | 615 | 11 | // | 616 | 11 | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 11 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 11 | // have to. | 619 | 11 | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 11 | } | 622 | | | 623 | 11 | let guard = &epoch::pin(); | 624 | 11 | | 625 | 11 | // Load the back index. | 626 | 11 | let b = self.inner.back.load(Ordering::Acquire); | 627 | 11 | | 628 | 11 | // Is the queue empty? | 629 | 11 | if b.wrapping_sub(f) <= 0 { | 630 | 4 | return Steal::Empty; | 631 | 7 | } | 632 | 7 | | 633 | 7 | // Load the buffer and read the task at the front. | 634 | 7 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 7 | let task = unsafe { buffer.deref().read(f) }; | 636 | 7 | | 637 | 7 | // Try incrementing the front index to steal the task. | 638 | 7 | if self | 639 | 7 | .inner | 640 | 7 | .front | 641 | 7 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 7 | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 0 | mem::forget(task); | 646 | 0 | return Steal::Retry; | 647 | 7 | } | 648 | 7 | | 649 | 7 | // Return the stolen task. | 650 | 7 | Steal::Success(task) | 651 | 11 | } |
<crossbeam_deque::deque::Stealer<usize>>::steal Line | Count | Source | 610 | 203k | pub fn steal(&self) -> Steal<T> { | 611 | 203k | // Load the front index. | 612 | 203k | let f = self.inner.front.load(Ordering::Acquire); | 613 | 203k | | 614 | 203k | // A SeqCst fence is needed here. | 615 | 203k | // | 616 | 203k | // If the current thread is already pinned (reentrantly), we must manually issue the | 617 | 203k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 618 | 203k | // have to. | 619 | 203k | if epoch::is_pinned() { | 620 | 0 | atomic::fence(Ordering::SeqCst); | 621 | 203k | } | 622 | | | 623 | 203k | let guard = &epoch::pin(); | 624 | 203k | | 625 | 203k | // Load the back index. | 626 | 203k | let b = self.inner.back.load(Ordering::Acquire); | 627 | 203k | | 628 | 203k | // Is the queue empty? | 629 | 203k | if b.wrapping_sub(f) <= 0 { | 630 | 99.5k | return Steal::Empty; | 631 | 104k | } | 632 | 104k | | 633 | 104k | // Load the buffer and read the task at the front. | 634 | 104k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 635 | 104k | let task = unsafe { buffer.deref().read(f) }; | 636 | 104k | | 637 | 104k | // Try incrementing the front index to steal the task. | 638 | 104k | if self | 639 | 104k | .inner | 640 | 104k | .front | 641 | 104k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 642 | 104k | .is_err() | 643 | | { | 644 | | // We didn't steal this task, forget it. | 645 | 47.3k | mem::forget(task); | 646 | 47.3k | return Steal::Retry; | 647 | 61.4k | } | 648 | 61.4k | | 649 | 61.4k | // Return the stolen task. | 650 | 61.4k | Steal::Success(task) | 651 | 208k | } |
|
652 | | |
653 | | /// Steals a batch of tasks and pushes them into another worker. |
654 | | /// |
655 | | /// How many tasks exactly will be stolen is not specified. That said, this method will try to |
656 | | /// steal around half of the tasks in the queue, but also not more than some constant limit. |
657 | | /// |
658 | | /// # Examples |
659 | | /// |
660 | | /// ``` |
661 | | /// use crossbeam_deque::Worker; |
662 | | /// |
663 | | /// let w1 = Worker::new_fifo(); |
664 | | /// w1.push(1); |
665 | | /// w1.push(2); |
666 | | /// w1.push(3); |
667 | | /// w1.push(4); |
668 | | /// |
669 | | /// let s = w1.stealer(); |
670 | | /// let w2 = Worker::new_fifo(); |
671 | | /// |
672 | | /// let _ = s.steal_batch(&w2); |
673 | | /// assert_eq!(w2.pop(), Some(1)); |
674 | | /// assert_eq!(w2.pop(), Some(2)); |
675 | | /// ``` |
676 | 280k | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { |
677 | 280k | if Arc::ptr_eq(&self.inner, &dest.inner) { |
678 | 0 | if dest.is_empty() { |
679 | 0 | return Steal::Empty; |
680 | | } else { |
681 | 0 | return Steal::Success(()); |
682 | | } |
683 | 280k | } |
684 | 280k | |
685 | 280k | // Load the front index. |
686 | 280k | let mut f = self.inner.front.load(Ordering::Acquire); |
687 | 280k | |
688 | 280k | // A SeqCst fence is needed here. |
689 | 280k | // |
690 | 280k | // If the current thread is already pinned (reentrantly), we must manually issue the |
691 | 280k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't |
692 | 280k | // have to. |
693 | 280k | if epoch::is_pinned() { |
694 | 0 | atomic::fence(Ordering::SeqCst); |
695 | 280k | } |
696 | | |
697 | 280k | let guard = &epoch::pin(); |
698 | 280k | |
699 | 280k | // Load the back index. |
700 | 280k | let b = self.inner.back.load(Ordering::Acquire); |
701 | 280k | |
702 | 280k | // Is the queue empty? |
703 | 280k | let len = b.wrapping_sub(f); |
704 | 280k | if len <= 0 { |
705 | 183k | return Steal::Empty; |
706 | 96.9k | } |
707 | 96.9k | |
708 | 96.9k | // Reserve capacity for the stolen batch. |
709 | 96.9k | let batch_size = cmp::min((len as usize + 1) / 2, MAX_BATCH); |
710 | 96.9k | dest.reserve(batch_size); |
711 | 96.9k | let mut batch_size = batch_size as isize; |
712 | 96.9k | |
713 | 96.9k | // Get the destination buffer and back index. |
714 | 96.9k | let dest_buffer = dest.buffer.get(); |
715 | 96.9k | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); |
716 | 96.9k | |
717 | 96.9k | // Load the buffer. |
718 | 96.9k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); |
719 | 96.9k | |
720 | 96.9k | match self.flavor { |
721 | 96.9k | // Steal a batch of tasks from the front at once. |
722 | 96.9k | Flavor::Fifo => { |
723 | | // Copy the batch from the source to the destination buffer. |
724 | 40.1k | match dest.flavor { |
725 | 40.1k | Flavor::Fifo => { |
726 | 267k | for i in 0..batch_size40.1k { |
727 | 267k | unsafe { |
728 | 267k | let task = buffer.deref().read(f.wrapping_add(i)); |
729 | 267k | dest_buffer.write(dest_b.wrapping_add(i), task); |
730 | 267k | } |
731 | | } |
732 | | } |
733 | | Flavor::Lifo => { |
734 | 2 | for i in 0..batch_size1 { |
735 | 2 | unsafe { |
736 | 2 | let task = buffer.deref().read(f.wrapping_add(i)); |
737 | 2 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); |
738 | 2 | } |
739 | | } |
740 | | } |
741 | | } |
742 | | |
743 | | // Try incrementing the front index to steal the batch. |
744 | 44.2k | if self |
745 | 44.2k | .inner |
746 | 44.2k | .front |
747 | 44.2k | .compare_exchange( |
748 | 44.2k | f, |
749 | 44.2k | f.wrapping_add(batch_size), |
750 | 44.2k | Ordering::SeqCst, |
751 | 44.2k | Ordering::Relaxed, |
752 | 44.2k | ) |
753 | 44.2k | .is_err() |
754 | | { |
755 | 41.4k | return Steal::Retry; |
756 | 3.85k | } |
757 | 3.85k | |
758 | 3.85k | dest_b = dest_b.wrapping_add(batch_size); |
759 | | } |
760 | | |
761 | | // Steal a batch of tasks from the front one by one. |
762 | | Flavor::Lifo => { |
763 | 71.1k | for i in 0..batch_size56.8k { |
764 | | // If this is not the first steal, check whether the queue is empty. |
765 | 71.1k | if i > 0 { |
766 | | // We've already got the current front index. Now execute the fence to |
767 | | // synchronize with other threads. |
768 | 14.1k | atomic::fence(Ordering::SeqCst); |
769 | 14.1k | |
770 | 14.1k | // Load the back index. |
771 | 14.1k | let b = self.inner.back.load(Ordering::Acquire); |
772 | 14.1k | |
773 | 14.1k | // Is the queue empty? |
774 | 14.1k | if b.wrapping_sub(f) <= 0 { |
775 | 508 | batch_size = i; |
776 | 508 | break; |
777 | 13.6k | } |
778 | 56.9k | } |
779 | | |
780 | | // Read the task at the front. |
781 | 70.6k | let task = unsafe { buffer.deref().read(f) }; |
782 | 70.6k | |
783 | 70.6k | // Try incrementing the front index to steal the task. |
784 | 70.6k | if self |
785 | 70.6k | .inner |
786 | 70.6k | .front |
787 | 70.6k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) |
788 | 70.6k | .is_err() |
789 | | { |
790 | | // We didn't steal this task, forget it and break from the loop. |
791 | 51.1k | mem::forget(task); |
792 | 51.1k | batch_size = i; |
793 | 51.1k | break; |
794 | 19.4k | } |
795 | 19.4k | |
796 | 19.4k | // Write the stolen task into the destination buffer. |
797 | 19.4k | unsafe { |
798 | 19.4k | dest_buffer.write(dest_b, task); |
799 | 19.4k | } |
800 | 19.4k | |
801 | 19.4k | // Move the source front index and the destination back index one step forward. |
802 | 19.4k | f = f.wrapping_add(1); |
803 | 19.4k | dest_b = dest_b.wrapping_add(1); |
804 | | } |
805 | | |
806 | | // If we didn't steal anything, the operation needs to be retried. |
807 | 58.3k | if batch_size == 0 { |
808 | 50.7k | return Steal::Retry; |
809 | 7.64k | } |
810 | 7.64k | |
811 | 7.64k | // If stealing into a FIFO queue, stolen tasks need to be reversed. |
812 | 7.64k | if dest.flavor == Flavor::Fifo { |
813 | 363 | for i1 in 0..batch_size / 2 { |
814 | 1 | unsafe { |
815 | 1 | let i1 = dest_b.wrapping_sub(batch_size - i); |
816 | 1 | let i2 = dest_b.wrapping_sub(i + 1); |
817 | 1 | let t1 = dest_buffer.read(i1); |
818 | 1 | let t2 = dest_buffer.read(i2); |
819 | 1 | dest_buffer.write(i1, t2); |
820 | 1 | dest_buffer.write(i2, t1); |
821 | 1 | } |
822 | | } |
823 | 7.28k | } |
824 | | } |
825 | | } |
826 | | |
827 | 11.1k | atomic::fence(Ordering::Release); |
828 | 11.1k | |
829 | 11.1k | // Update the back index in the destination queue. |
830 | 11.1k | // |
831 | 11.1k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data |
832 | 11.1k | // races because it doesn't understand fences. |
833 | 11.1k | dest.inner.back.store(dest_b, Ordering::Release); |
834 | 11.1k | |
835 | 11.1k | // Return with success. |
836 | 11.1k | Steal::Success(()) |
837 | 287k | } <crossbeam_deque::deque::Stealer<fifo::destructors::Elem>>::steal_batch Line | Count | Source | 676 | 145 | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 677 | 145 | if Arc::ptr_eq(&self.inner, &dest.inner) { | 678 | 0 | if dest.is_empty() { | 679 | 0 | return Steal::Empty; | 680 | | } else { | 681 | 0 | return Steal::Success(()); | 682 | | } | 683 | 145 | } | 684 | 145 | | 685 | 145 | // Load the front index. | 686 | 145 | let mut f = self.inner.front.load(Ordering::Acquire); | 687 | 145 | | 688 | 145 | // A SeqCst fence is needed here. | 689 | 145 | // | 690 | 145 | // If the current thread is already pinned (reentrantly), we must manually issue the | 691 | 145 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 692 | 145 | // have to. | 693 | 145 | if epoch::is_pinned() { | 694 | 0 | atomic::fence(Ordering::SeqCst); | 695 | 145 | } | 696 | | | 697 | 145 | let guard = &epoch::pin(); | 698 | 145 | | 699 | 145 | // Load the back index. | 700 | 145 | let b = self.inner.back.load(Ordering::Acquire); | 701 | 145 | | 702 | 145 | // Is the queue empty? | 703 | 145 | let len = b.wrapping_sub(f); | 704 | 145 | if len <= 0 { | 705 | 0 | return Steal::Empty; | 706 | 145 | } | 707 | 145 | | 708 | 145 | // Reserve capacity for the stolen batch. | 709 | 145 | let batch_size = cmp::min((len as usize + 1) / 2, MAX_BATCH); | 710 | 145 | dest.reserve(batch_size); | 711 | 145 | let mut batch_size = batch_size as isize; | 712 | 145 | | 713 | 145 | // Get the destination buffer and back index. | 714 | 145 | let dest_buffer = dest.buffer.get(); | 715 | 145 | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 716 | 145 | | 717 | 145 | // Load the buffer. | 718 | 145 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 719 | 145 | | 720 | 145 | match self.flavor { | 721 | 145 | // Steal a batch of tasks from the front at once. | 722 | 145 | Flavor::Fifo => { | 723 | | // Copy the batch from the source to the destination buffer. | 724 | 145 | match dest.flavor { | 725 | 145 | Flavor::Fifo => { | 726 | 4.62k | for i in 0..batch_size145 { | 727 | 4.62k | unsafe { | 728 | 4.62k | let task = buffer.deref().read(f.wrapping_add(i)); | 729 | 4.62k | dest_buffer.write(dest_b.wrapping_add(i), task); | 730 | 4.62k | } | 731 | | } | 732 | | } | 733 | | Flavor::Lifo => { | 734 | 0 | for i in 0..batch_size { | 735 | 0 | unsafe { | 736 | 0 | let task = buffer.deref().read(f.wrapping_add(i)); | 737 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 738 | 0 | } | 739 | | } | 740 | | } | 741 | | } | 742 | | | 743 | | // Try incrementing the front index to steal the batch. | 744 | 145 | if self | 745 | 145 | .inner | 746 | 145 | .front | 747 | 145 | .compare_exchange( | 748 | 145 | f, | 749 | 145 | f.wrapping_add(batch_size), | 750 | 145 | Ordering::SeqCst, | 751 | 145 | Ordering::Relaxed, | 752 | 145 | ) | 753 | 145 | .is_err() | 754 | | { | 755 | 15 | return Steal::Retry; | 756 | 130 | } | 757 | 130 | | 758 | 130 | dest_b = dest_b.wrapping_add(batch_size); | 759 | | } | 760 | | | 761 | | // Steal a batch of tasks from the front one by one. | 762 | | Flavor::Lifo => { | 763 | 0 | for i in 0..batch_size { | 764 | | // If this is not the first steal, check whether the queue is empty. | 765 | 0 | if i > 0 { | 766 | | // We've already got the current front index. Now execute the fence to | 767 | | // synchronize with other threads. | 768 | 0 | atomic::fence(Ordering::SeqCst); | 769 | 0 |
| 770 | 0 | // Load the back index. | 771 | 0 | let b = self.inner.back.load(Ordering::Acquire); | 772 | 0 |
| 773 | 0 | // Is the queue empty? | 774 | 0 | if b.wrapping_sub(f) <= 0 { | 775 | 0 | batch_size = i; | 776 | 0 | break; | 777 | 0 | } | 778 | 0 | } | 779 | | | 780 | | // Read the task at the front. | 781 | 0 | let task = unsafe { buffer.deref().read(f) }; | 782 | 0 |
| 783 | 0 | // Try incrementing the front index to steal the task. | 784 | 0 | if self | 785 | 0 | .inner | 786 | 0 | .front | 787 | 0 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 788 | 0 | .is_err() | 789 | | { | 790 | | // We didn't steal this task, forget it and break from the loop. | 791 | 0 | mem::forget(task); | 792 | 0 | batch_size = i; | 793 | 0 | break; | 794 | 0 | } | 795 | 0 |
| 796 | 0 | // Write the stolen task into the destination buffer. | 797 | 0 | unsafe { | 798 | 0 | dest_buffer.write(dest_b, task); | 799 | 0 | } | 800 | 0 |
| 801 | 0 | // Move the source front index and the destination back index one step forward. | 802 | 0 | f = f.wrapping_add(1); | 803 | 0 | dest_b = dest_b.wrapping_add(1); | 804 | | } | 805 | | | 806 | | // If we didn't steal anything, the operation needs to be retried. | 807 | 0 | if batch_size == 0 { | 808 | 0 | return Steal::Retry; | 809 | 0 | } | 810 | 0 |
| 811 | 0 | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 812 | 0 | if dest.flavor == Flavor::Fifo { | 813 | 0 | for i in 0..batch_size / 2 { | 814 | 0 | unsafe { | 815 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 816 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 817 | 0 | let t1 = dest_buffer.read(i1); | 818 | 0 | let t2 = dest_buffer.read(i2); | 819 | 0 | dest_buffer.write(i1, t2); | 820 | 0 | dest_buffer.write(i2, t1); | 821 | 0 | } | 822 | | } | 823 | 0 | } | 824 | | } | 825 | | } | 826 | | | 827 | 130 | atomic::fence(Ordering::Release); | 828 | 130 | | 829 | 130 | // Update the back index in the destination queue. | 830 | 130 | // | 831 | 130 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 832 | 130 | // races because it doesn't understand fences. | 833 | 130 | dest.inner.back.store(dest_b, Ordering::Release); | 834 | 130 | | 835 | 130 | // Return with success. | 836 | 130 | Steal::Success(()) | 837 | 145 | } |
<crossbeam_deque::deque::Stealer<usize>>::steal_batch Line | Count | Source | 676 | 125k | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 677 | 125k | if Arc::ptr_eq(&self.inner, &dest.inner) { | 678 | 0 | if dest.is_empty() { | 679 | 0 | return Steal::Empty; | 680 | | } else { | 681 | 0 | return Steal::Success(()); | 682 | | } | 683 | 125k | } | 684 | 125k | | 685 | 125k | // Load the front index. | 686 | 125k | let mut f = self.inner.front.load(Ordering::Acquire); | 687 | 125k | | 688 | 125k | // A SeqCst fence is needed here. | 689 | 125k | // | 690 | 125k | // If the current thread is already pinned (reentrantly), we must manually issue the | 691 | 125k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 692 | 125k | // have to. | 693 | 125k | if epoch::is_pinned() { | 694 | 0 | atomic::fence(Ordering::SeqCst); | 695 | 125k | } | 696 | | | 697 | 125k | let guard = &epoch::pin(); | 698 | 125k | | 699 | 125k | // Load the back index. | 700 | 125k | let b = self.inner.back.load(Ordering::Acquire); | 701 | 125k | | 702 | 125k | // Is the queue empty? | 703 | 125k | let len = b.wrapping_sub(f); | 704 | 125k | if len <= 0 { | 705 | 84.0k | return Steal::Empty; | 706 | 41.5k | } | 707 | 41.5k | | 708 | 41.5k | // Reserve capacity for the stolen batch. | 709 | 41.5k | let batch_size = cmp::min((len as usize + 1) / 2, MAX_BATCH); | 710 | 41.5k | dest.reserve(batch_size); | 711 | 41.5k | let mut batch_size = batch_size as isize; | 712 | 41.5k | | 713 | 41.5k | // Get the destination buffer and back index. | 714 | 41.5k | let dest_buffer = dest.buffer.get(); | 715 | 41.5k | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 716 | 41.5k | | 717 | 41.5k | // Load the buffer. | 718 | 41.5k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 719 | 41.5k | | 720 | 41.5k | match self.flavor { | 721 | 41.5k | // Steal a batch of tasks from the front at once. | 722 | 41.5k | Flavor::Fifo => { | 723 | | // Copy the batch from the source to the destination buffer. | 724 | 41.5k | match dest.flavor { | 725 | 41.5k | Flavor::Fifo => { | 726 | 262k | for i in 0..batch_size41.5k { | 727 | 262k | unsafe { | 728 | 262k | let task = buffer.deref().read(f.wrapping_add(i)); | 729 | 262k | dest_buffer.write(dest_b.wrapping_add(i), task); | 730 | 262k | } | 731 | | } | 732 | | } | 733 | | Flavor::Lifo => { | 734 | 0 | for i in 0..batch_size { | 735 | 0 | unsafe { | 736 | 0 | let task = buffer.deref().read(f.wrapping_add(i)); | 737 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 738 | 0 | } | 739 | | } | 740 | | } | 741 | | } | 742 | | | 743 | | // Try incrementing the front index to steal the batch. | 744 | 44.1k | if self | 745 | 44.1k | .inner | 746 | 44.1k | .front | 747 | 44.1k | .compare_exchange( | 748 | 44.1k | f, | 749 | 44.1k | f.wrapping_add(batch_size), | 750 | 44.1k | Ordering::SeqCst, | 751 | 44.1k | Ordering::Relaxed, | 752 | 44.1k | ) | 753 | 44.1k | .is_err() | 754 | | { | 755 | 41.4k | return Steal::Retry; | 756 | 3.72k | } | 757 | 3.72k | | 758 | 3.72k | dest_b = dest_b.wrapping_add(batch_size); | 759 | | } | 760 | | | 761 | | // Steal a batch of tasks from the front one by one. | 762 | | Flavor::Lifo => { | 763 | 0 | for i in 0..batch_size { | 764 | | // If this is not the first steal, check whether the queue is empty. | 765 | 0 | if i > 0 { | 766 | | // We've already got the current front index. Now execute the fence to | 767 | | // synchronize with other threads. | 768 | 0 | atomic::fence(Ordering::SeqCst); | 769 | 0 |
| 770 | 0 | // Load the back index. | 771 | 0 | let b = self.inner.back.load(Ordering::Acquire); | 772 | 0 |
| 773 | 0 | // Is the queue empty? | 774 | 0 | if b.wrapping_sub(f) <= 0 { | 775 | 0 | batch_size = i; | 776 | 0 | break; | 777 | 0 | } | 778 | 0 | } | 779 | | | 780 | | // Read the task at the front. | 781 | 0 | let task = unsafe { buffer.deref().read(f) }; | 782 | 0 |
| 783 | 0 | // Try incrementing the front index to steal the task. | 784 | 0 | if self | 785 | 0 | .inner | 786 | 0 | .front | 787 | 0 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 788 | 0 | .is_err() | 789 | | { | 790 | | // We didn't steal this task, forget it and break from the loop. | 791 | 0 | mem::forget(task); | 792 | 0 | batch_size = i; | 793 | 0 | break; | 794 | 0 | } | 795 | 0 |
| 796 | 0 | // Write the stolen task into the destination buffer. | 797 | 0 | unsafe { | 798 | 0 | dest_buffer.write(dest_b, task); | 799 | 0 | } | 800 | 0 |
| 801 | 0 | // Move the source front index and the destination back index one step forward. | 802 | 0 | f = f.wrapping_add(1); | 803 | 0 | dest_b = dest_b.wrapping_add(1); | 804 | | } | 805 | | | 806 | | // If we didn't steal anything, the operation needs to be retried. | 807 | 0 | if batch_size == 0 { | 808 | 0 | return Steal::Retry; | 809 | 0 | } | 810 | 0 |
| 811 | 0 | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 812 | 0 | if dest.flavor == Flavor::Fifo { | 813 | 0 | for i in 0..batch_size / 2 { | 814 | 0 | unsafe { | 815 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 816 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 817 | 0 | let t1 = dest_buffer.read(i1); | 818 | 0 | let t2 = dest_buffer.read(i2); | 819 | 0 | dest_buffer.write(i1, t2); | 820 | 0 | dest_buffer.write(i2, t1); | 821 | 0 | } | 822 | | } | 823 | 0 | } | 824 | | } | 825 | | } | 826 | | | 827 | 3.72k | atomic::fence(Ordering::Release); | 828 | 3.72k | | 829 | 3.72k | // Update the back index in the destination queue. | 830 | 3.72k | // | 831 | 3.72k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 832 | 3.72k | // races because it doesn't understand fences. | 833 | 3.72k | dest.inner.back.store(dest_b, Ordering::Release); | 834 | 3.72k | | 835 | 3.72k | // Return with success. | 836 | 3.72k | Steal::Success(()) | 837 | 129k | } |
<crossbeam_deque::deque::Stealer<i32>>::steal_batch Line | Count | Source | 676 | 4 | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 677 | 4 | if Arc::ptr_eq(&self.inner, &dest.inner) { | 678 | 0 | if dest.is_empty() { | 679 | 0 | return Steal::Empty; | 680 | | } else { | 681 | 0 | return Steal::Success(()); | 682 | | } | 683 | 4 | } | 684 | 4 | | 685 | 4 | // Load the front index. | 686 | 4 | let mut f = self.inner.front.load(Ordering::Acquire); | 687 | 4 | | 688 | 4 | // A SeqCst fence is needed here. | 689 | 4 | // | 690 | 4 | // If the current thread is already pinned (reentrantly), we must manually issue the | 691 | 4 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 692 | 4 | // have to. | 693 | 4 | if epoch::is_pinned() { | 694 | 0 | atomic::fence(Ordering::SeqCst); | 695 | 4 | } | 696 | | | 697 | 4 | let guard = &epoch::pin(); | 698 | 4 | | 699 | 4 | // Load the back index. | 700 | 4 | let b = self.inner.back.load(Ordering::Acquire); | 701 | 4 | | 702 | 4 | // Is the queue empty? | 703 | 4 | let len = b.wrapping_sub(f); | 704 | 4 | if len <= 0 { | 705 | 0 | return Steal::Empty; | 706 | 4 | } | 707 | 4 | | 708 | 4 | // Reserve capacity for the stolen batch. | 709 | 4 | let batch_size = cmp::min((len as usize + 1) / 2, MAX_BATCH); | 710 | 4 | dest.reserve(batch_size); | 711 | 4 | let mut batch_size = batch_size as isize; | 712 | 4 | | 713 | 4 | // Get the destination buffer and back index. | 714 | 4 | let dest_buffer = dest.buffer.get(); | 715 | 4 | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 716 | 4 | | 717 | 4 | // Load the buffer. | 718 | 4 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 719 | 4 | | 720 | 4 | match self.flavor { | 721 | 4 | // Steal a batch of tasks from the front at once. | 722 | 4 | Flavor::Fifo => { | 723 | | // Copy the batch from the source to the destination buffer. | 724 | 2 | match dest.flavor { | 725 | 2 | Flavor::Fifo => { | 726 | 2 | for i in 0..batch_size1 { | 727 | 2 | unsafe { | 728 | 2 | let task = buffer.deref().read(f.wrapping_add(i)); | 729 | 2 | dest_buffer.write(dest_b.wrapping_add(i), task); | 730 | 2 | } | 731 | | } | 732 | | } | 733 | | Flavor::Lifo => { | 734 | 2 | for i in 0..batch_size1 { | 735 | 2 | unsafe { | 736 | 2 | let task = buffer.deref().read(f.wrapping_add(i)); | 737 | 2 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 738 | 2 | } | 739 | | } | 740 | | } | 741 | | } | 742 | | | 743 | | // Try incrementing the front index to steal the batch. | 744 | 2 | if self | 745 | 2 | .inner | 746 | 2 | .front | 747 | 2 | .compare_exchange( | 748 | 2 | f, | 749 | 2 | f.wrapping_add(batch_size), | 750 | 2 | Ordering::SeqCst, | 751 | 2 | Ordering::Relaxed, | 752 | 2 | ) | 753 | 2 | .is_err() | 754 | | { | 755 | 0 | return Steal::Retry; | 756 | 2 | } | 757 | 2 | | 758 | 2 | dest_b = dest_b.wrapping_add(batch_size); | 759 | | } | 760 | | | 761 | | // Steal a batch of tasks from the front one by one. | 762 | | Flavor::Lifo => { | 763 | 4 | for i in 0..batch_size2 { | 764 | | // If this is not the first steal, check whether the queue is empty. | 765 | 4 | if i > 0 { | 766 | | // We've already got the current front index. Now execute the fence to | 767 | | // synchronize with other threads. | 768 | 2 | atomic::fence(Ordering::SeqCst); | 769 | 2 | | 770 | 2 | // Load the back index. | 771 | 2 | let b = self.inner.back.load(Ordering::Acquire); | 772 | 2 | | 773 | 2 | // Is the queue empty? | 774 | 2 | if b.wrapping_sub(f) <= 0 { | 775 | 0 | batch_size = i; | 776 | 0 | break; | 777 | 2 | } | 778 | 2 | } | 779 | | | 780 | | // Read the task at the front. | 781 | 4 | let task = unsafe { buffer.deref().read(f) }; | 782 | 4 | | 783 | 4 | // Try incrementing the front index to steal the task. | 784 | 4 | if self | 785 | 4 | .inner | 786 | 4 | .front | 787 | 4 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 788 | 4 | .is_err() | 789 | | { | 790 | | // We didn't steal this task, forget it and break from the loop. | 791 | 0 | mem::forget(task); | 792 | 0 | batch_size = i; | 793 | 0 | break; | 794 | 4 | } | 795 | 4 | | 796 | 4 | // Write the stolen task into the destination buffer. | 797 | 4 | unsafe { | 798 | 4 | dest_buffer.write(dest_b, task); | 799 | 4 | } | 800 | 4 | | 801 | 4 | // Move the source front index and the destination back index one step forward. | 802 | 4 | f = f.wrapping_add(1); | 803 | 4 | dest_b = dest_b.wrapping_add(1); | 804 | | } | 805 | | | 806 | | // If we didn't steal anything, the operation needs to be retried. | 807 | 2 | if batch_size == 0 { | 808 | 0 | return Steal::Retry; | 809 | 2 | } | 810 | 2 | | 811 | 2 | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 812 | 2 | if dest.flavor == Flavor::Fifo { | 813 | 1 | for i in 0..batch_size / 2 { | 814 | 1 | unsafe { | 815 | 1 | let i1 = dest_b.wrapping_sub(batch_size - i); | 816 | 1 | let i2 = dest_b.wrapping_sub(i + 1); | 817 | 1 | let t1 = dest_buffer.read(i1); | 818 | 1 | let t2 = dest_buffer.read(i2); | 819 | 1 | dest_buffer.write(i1, t2); | 820 | 1 | dest_buffer.write(i2, t1); | 821 | 1 | } | 822 | | } | 823 | 1 | } | 824 | | } | 825 | | } | 826 | | | 827 | 4 | atomic::fence(Ordering::Release); | 828 | 4 | | 829 | 4 | // Update the back index in the destination queue. | 830 | 4 | // | 831 | 4 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 832 | 4 | // races because it doesn't understand fences. | 833 | 4 | dest.inner.back.store(dest_b, Ordering::Release); | 834 | 4 | | 835 | 4 | // Return with success. | 836 | 4 | Steal::Success(()) | 837 | 4 | } |
<crossbeam_deque::deque::Stealer<lifo::destructors::Elem>>::steal_batch Line | Count | Source | 676 | 426 | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 677 | 426 | if Arc::ptr_eq(&self.inner, &dest.inner) { | 678 | 0 | if dest.is_empty() { | 679 | 0 | return Steal::Empty; | 680 | | } else { | 681 | 0 | return Steal::Success(()); | 682 | | } | 683 | 426 | } | 684 | 426 | | 685 | 426 | // Load the front index. | 686 | 426 | let mut f = self.inner.front.load(Ordering::Acquire); | 687 | 426 | | 688 | 426 | // A SeqCst fence is needed here. | 689 | 426 | // | 690 | 426 | // If the current thread is already pinned (reentrantly), we must manually issue the | 691 | 426 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 692 | 426 | // have to. | 693 | 426 | if epoch::is_pinned() { | 694 | 0 | atomic::fence(Ordering::SeqCst); | 695 | 426 | } | 696 | | | 697 | 426 | let guard = &epoch::pin(); | 698 | 426 | | 699 | 426 | // Load the back index. | 700 | 426 | let b = self.inner.back.load(Ordering::Acquire); | 701 | 426 | | 702 | 426 | // Is the queue empty? | 703 | 426 | let len = b.wrapping_sub(f); | 704 | 426 | if len <= 0 { | 705 | 0 | return Steal::Empty; | 706 | 426 | } | 707 | 426 | | 708 | 426 | // Reserve capacity for the stolen batch. | 709 | 426 | let batch_size = cmp::min((len as usize + 1) / 2, MAX_BATCH); | 710 | 426 | dest.reserve(batch_size); | 711 | 426 | let mut batch_size = batch_size as isize; | 712 | 426 | | 713 | 426 | // Get the destination buffer and back index. | 714 | 426 | let dest_buffer = dest.buffer.get(); | 715 | 426 | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 716 | 426 | | 717 | 426 | // Load the buffer. | 718 | 426 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 719 | 426 | | 720 | 426 | match self.flavor { | 721 | 426 | // Steal a batch of tasks from the front at once. | 722 | 426 | Flavor::Fifo => { | 723 | | // Copy the batch from the source to the destination buffer. | 724 | 18.4E | match dest.flavor { | 725 | 18.4E | Flavor::Fifo => { | 726 | 18.4E | for i0 in 0..batch_size { | 727 | 0 | unsafe { | 728 | 0 | let task = buffer.deref().read(f.wrapping_add(i)); | 729 | 0 | dest_buffer.write(dest_b.wrapping_add(i), task); | 730 | 0 | } | 731 | | } | 732 | | } | 733 | | Flavor::Lifo => { | 734 | 0 | for i in 0..batch_size { | 735 | 0 | unsafe { | 736 | 0 | let task = buffer.deref().read(f.wrapping_add(i)); | 737 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 738 | 0 | } | 739 | | } | 740 | | } | 741 | | } | 742 | | | 743 | | // Try incrementing the front index to steal the batch. | 744 | 0 | if self | 745 | 0 | .inner | 746 | 0 | .front | 747 | 0 | .compare_exchange( | 748 | 0 | f, | 749 | 0 | f.wrapping_add(batch_size), | 750 | 0 | Ordering::SeqCst, | 751 | 0 | Ordering::Relaxed, | 752 | 0 | ) | 753 | 0 | .is_err() | 754 | | { | 755 | 0 | return Steal::Retry; | 756 | 0 | } | 757 | 0 |
| 758 | 0 | dest_b = dest_b.wrapping_add(batch_size); | 759 | | } | 760 | | | 761 | | // Steal a batch of tasks from the front one by one. | 762 | | Flavor::Lifo => { | 763 | 3.99k | for i in 0..batch_size427 { | 764 | | // If this is not the first steal, check whether the queue is empty. | 765 | 3.99k | if i > 0 { | 766 | | // We've already got the current front index. Now execute the fence to | 767 | | // synchronize with other threads. | 768 | 3.58k | atomic::fence(Ordering::SeqCst); | 769 | 3.58k | | 770 | 3.58k | // Load the back index. | 771 | 3.58k | let b = self.inner.back.load(Ordering::Acquire); | 772 | 3.58k | | 773 | 3.58k | // Is the queue empty? | 774 | 3.58k | if b.wrapping_sub(f) <= 0 { | 775 | 0 | batch_size = i; | 776 | 0 | break; | 777 | 3.58k | } | 778 | 417 | } | 779 | | | 780 | | // Read the task at the front. | 781 | 3.99k | let task = unsafe { buffer.deref().read(f) }; | 782 | 3.99k | | 783 | 3.99k | // Try incrementing the front index to steal the task. | 784 | 3.99k | if self | 785 | 3.99k | .inner | 786 | 3.99k | .front | 787 | 3.99k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 788 | 3.99k | .is_err() | 789 | | { | 790 | | // We didn't steal this task, forget it and break from the loop. | 791 | 314 | mem::forget(task); | 792 | 314 | batch_size = i; | 793 | 314 | break; | 794 | 3.68k | } | 795 | 3.68k | | 796 | 3.68k | // Write the stolen task into the destination buffer. | 797 | 3.68k | unsafe { | 798 | 3.68k | dest_buffer.write(dest_b, task); | 799 | 3.68k | } | 800 | 3.68k | | 801 | 3.68k | // Move the source front index and the destination back index one step forward. | 802 | 3.68k | f = f.wrapping_add(1); | 803 | 3.68k | dest_b = dest_b.wrapping_add(1); | 804 | | } | 805 | | | 806 | | // If we didn't steal anything, the operation needs to be retried. | 807 | 427 | if batch_size == 0 { | 808 | 311 | return Steal::Retry; | 809 | 116 | } | 810 | 116 | | 811 | 116 | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 812 | 116 | if dest.flavor == Flavor::Fifo { | 813 | 0 | for i in 0..batch_size / 2 { | 814 | 0 | unsafe { | 815 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 816 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 817 | 0 | let t1 = dest_buffer.read(i1); | 818 | 0 | let t2 = dest_buffer.read(i2); | 819 | 0 | dest_buffer.write(i1, t2); | 820 | 0 | dest_buffer.write(i2, t1); | 821 | 0 | } | 822 | | } | 823 | 116 | } | 824 | | } | 825 | | } | 826 | | | 827 | 116 | atomic::fence(Ordering::Release); | 828 | 116 | | 829 | 116 | // Update the back index in the destination queue. | 830 | 116 | // | 831 | 116 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 832 | 116 | // races because it doesn't understand fences. | 833 | 116 | dest.inner.back.store(dest_b, Ordering::Release); | 834 | 116 | | 835 | 116 | // Return with success. | 836 | 116 | Steal::Success(()) | 837 | 427 | } |
<crossbeam_deque::deque::Stealer<usize>>::steal_batch Line | Count | Source | 676 | 154k | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 677 | 154k | if Arc::ptr_eq(&self.inner, &dest.inner) { | 678 | 0 | if dest.is_empty() { | 679 | 0 | return Steal::Empty; | 680 | | } else { | 681 | 0 | return Steal::Success(()); | 682 | | } | 683 | 154k | } | 684 | 154k | | 685 | 154k | // Load the front index. | 686 | 154k | let mut f = self.inner.front.load(Ordering::Acquire); | 687 | 154k | | 688 | 154k | // A SeqCst fence is needed here. | 689 | 154k | // | 690 | 154k | // If the current thread is already pinned (reentrantly), we must manually issue the | 691 | 154k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 692 | 154k | // have to. | 693 | 154k | if epoch::is_pinned() { | 694 | 0 | atomic::fence(Ordering::SeqCst); | 695 | 154k | } | 696 | | | 697 | 154k | let guard = &epoch::pin(); | 698 | 154k | | 699 | 154k | // Load the back index. | 700 | 154k | let b = self.inner.back.load(Ordering::Acquire); | 701 | 154k | | 702 | 154k | // Is the queue empty? | 703 | 154k | let len = b.wrapping_sub(f); | 704 | 154k | if len <= 0 { | 705 | 99.6k | return Steal::Empty; | 706 | 54.8k | } | 707 | 54.8k | | 708 | 54.8k | // Reserve capacity for the stolen batch. | 709 | 54.8k | let batch_size = cmp::min((len as usize + 1) / 2, MAX_BATCH); | 710 | 54.8k | dest.reserve(batch_size); | 711 | 54.8k | let mut batch_size = batch_size as isize; | 712 | 54.8k | | 713 | 54.8k | // Get the destination buffer and back index. | 714 | 54.8k | let dest_buffer = dest.buffer.get(); | 715 | 54.8k | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 716 | 54.8k | | 717 | 54.8k | // Load the buffer. | 718 | 54.8k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 719 | 54.8k | | 720 | 54.8k | match self.flavor { | 721 | 54.8k | // Steal a batch of tasks from the front at once. | 722 | 54.8k | Flavor::Fifo => { | 723 | | // Copy the batch from the source to the destination buffer. | 724 | 18.4E | match dest.flavor { | 725 | 18.4E | Flavor::Fifo => { | 726 | 18.4E | for i0 in 0..batch_size { | 727 | 0 | unsafe { | 728 | 0 | let task = buffer.deref().read(f.wrapping_add(i)); | 729 | 0 | dest_buffer.write(dest_b.wrapping_add(i), task); | 730 | 0 | } | 731 | | } | 732 | | } | 733 | | Flavor::Lifo => { | 734 | 0 | for i in 0..batch_size { | 735 | 0 | unsafe { | 736 | 0 | let task = buffer.deref().read(f.wrapping_add(i)); | 737 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 738 | 0 | } | 739 | | } | 740 | | } | 741 | | } | 742 | | | 743 | | // Try incrementing the front index to steal the batch. | 744 | 0 | if self | 745 | 0 | .inner | 746 | 0 | .front | 747 | 0 | .compare_exchange( | 748 | 0 | f, | 749 | 0 | f.wrapping_add(batch_size), | 750 | 0 | Ordering::SeqCst, | 751 | 0 | Ordering::Relaxed, | 752 | 0 | ) | 753 | 0 | .is_err() | 754 | | { | 755 | 0 | return Steal::Retry; | 756 | 0 | } | 757 | 0 |
| 758 | 0 | dest_b = dest_b.wrapping_add(batch_size); | 759 | | } | 760 | | | 761 | | // Steal a batch of tasks from the front one by one. | 762 | | Flavor::Lifo => { | 763 | 67.1k | for i in 0..batch_size56.4k { | 764 | | // If this is not the first steal, check whether the queue is empty. | 765 | 67.1k | if i > 0 { | 766 | | // We've already got the current front index. Now execute the fence to | 767 | | // synchronize with other threads. | 768 | 10.5k | atomic::fence(Ordering::SeqCst); | 769 | 10.5k | | 770 | 10.5k | // Load the back index. | 771 | 10.5k | let b = self.inner.back.load(Ordering::Acquire); | 772 | 10.5k | | 773 | 10.5k | // Is the queue empty? | 774 | 10.5k | if b.wrapping_sub(f) <= 0 { | 775 | 508 | batch_size = i; | 776 | 508 | break; | 777 | 10.0k | } | 778 | 56.5k | } | 779 | | | 780 | | // Read the task at the front. | 781 | 66.6k | let task = unsafe { buffer.deref().read(f) }; | 782 | 66.6k | | 783 | 66.6k | // Try incrementing the front index to steal the task. | 784 | 66.6k | if self | 785 | 66.6k | .inner | 786 | 66.6k | .front | 787 | 66.6k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 788 | 66.6k | .is_err() | 789 | | { | 790 | | // We didn't steal this task, forget it and break from the loop. | 791 | 50.8k | mem::forget(task); | 792 | 50.8k | batch_size = i; | 793 | 50.8k | break; | 794 | 15.7k | } | 795 | 15.7k | | 796 | 15.7k | // Write the stolen task into the destination buffer. | 797 | 15.7k | unsafe { | 798 | 15.7k | dest_buffer.write(dest_b, task); | 799 | 15.7k | } | 800 | 15.7k | | 801 | 15.7k | // Move the source front index and the destination back index one step forward. | 802 | 15.7k | f = f.wrapping_add(1); | 803 | 15.7k | dest_b = dest_b.wrapping_add(1); | 804 | | } | 805 | | | 806 | | // If we didn't steal anything, the operation needs to be retried. | 807 | 57.9k | if batch_size == 0 { | 808 | 50.4k | return Steal::Retry; | 809 | 7.52k | } | 810 | 7.52k | | 811 | 7.52k | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 812 | 7.52k | if dest.flavor == Flavor::Fifo { | 813 | 362 | for i0 in 0..batch_size / 2 { | 814 | 0 | unsafe { | 815 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 816 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 817 | 0 | let t1 = dest_buffer.read(i1); | 818 | 0 | let t2 = dest_buffer.read(i2); | 819 | 0 | dest_buffer.write(i1, t2); | 820 | 0 | dest_buffer.write(i2, t1); | 821 | 0 | } | 822 | | } | 823 | 7.16k | } | 824 | | } | 825 | | } | 826 | | | 827 | 7.16k | atomic::fence(Ordering::Release); | 828 | 7.16k | | 829 | 7.16k | // Update the back index in the destination queue. | 830 | 7.16k | // | 831 | 7.16k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 832 | 7.16k | // races because it doesn't understand fences. | 833 | 7.16k | dest.inner.back.store(dest_b, Ordering::Release); | 834 | 7.16k | | 835 | 7.16k | // Return with success. | 836 | 7.16k | Steal::Success(()) | 837 | 157k | } |
|
838 | | |
839 | | /// Steals a batch of tasks, pushes them into another worker, and pops a task from that worker. |
840 | | /// |
841 | | /// How many tasks exactly will be stolen is not specified. That said, this method will try to |
842 | | /// steal around half of the tasks in the queue, but also not more than some constant limit. |
843 | | /// |
844 | | /// # Examples |
845 | | /// |
846 | | /// ``` |
847 | | /// use crossbeam_deque::{Steal, Worker}; |
848 | | /// |
849 | | /// let w1 = Worker::new_fifo(); |
850 | | /// w1.push(1); |
851 | | /// w1.push(2); |
852 | | /// w1.push(3); |
853 | | /// w1.push(4); |
854 | | /// |
855 | | /// let s = w1.stealer(); |
856 | | /// let w2 = Worker::new_fifo(); |
857 | | /// |
858 | | /// assert_eq!(s.steal_batch_and_pop(&w2), Steal::Success(1)); |
859 | | /// assert_eq!(w2.pop(), Some(2)); |
860 | | /// ``` |
861 | 281k | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { |
862 | 281k | if Arc::ptr_eq(&self.inner, &dest.inner) { |
863 | 0 | match dest.pop() { |
864 | 0 | None => return Steal::Empty, |
865 | 0 | Some(task) => return Steal::Success(task), |
866 | | } |
867 | 281k | } |
868 | 281k | |
869 | 281k | // Load the front index. |
870 | 281k | let mut f = self.inner.front.load(Ordering::Acquire); |
871 | 281k | |
872 | 281k | // A SeqCst fence is needed here. |
873 | 281k | // |
874 | 281k | // If the current thread is already pinned (reentrantly), we must manually issue the |
875 | 281k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't |
876 | 281k | // have to. |
877 | 281k | if epoch::is_pinned() { |
878 | 0 | atomic::fence(Ordering::SeqCst); |
879 | 281k | } |
880 | | |
881 | 281k | let guard = &epoch::pin(); |
882 | 281k | |
883 | 281k | // Load the back index. |
884 | 281k | let b = self.inner.back.load(Ordering::Acquire); |
885 | 281k | |
886 | 281k | // Is the queue empty? |
887 | 281k | let len = b.wrapping_sub(f); |
888 | 281k | if len <= 0 { |
889 | 175k | return Steal::Empty; |
890 | 106k | } |
891 | 106k | |
892 | 106k | // Reserve capacity for the stolen batch. |
893 | 106k | let batch_size = cmp::min((len as usize - 1) / 2, MAX_BATCH - 1); |
894 | 106k | dest.reserve(batch_size); |
895 | 106k | let mut batch_size = batch_size as isize; |
896 | 106k | |
897 | 106k | // Get the destination buffer and back index. |
898 | 106k | let dest_buffer = dest.buffer.get(); |
899 | 106k | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); |
900 | 106k | |
901 | 106k | // Load the buffer |
902 | 106k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); |
903 | 106k | |
904 | 106k | // Read the task at the front. |
905 | 106k | let mut task = unsafe { buffer.deref().read(f) }; |
906 | 106k | |
907 | 106k | match self.flavor { |
908 | 106k | // Steal a batch of tasks from the front at once. |
909 | 106k | Flavor::Fifo => { |
910 | | // Copy the batch from the source to the destination buffer. |
911 | 49.5k | match dest.flavor { |
912 | 49.5k | Flavor::Fifo => { |
913 | 257k | for i in 0..batch_size49.5k { |
914 | 257k | unsafe { |
915 | 257k | let task = buffer.deref().read(f.wrapping_add(i + 1)); |
916 | 257k | dest_buffer.write(dest_b.wrapping_add(i), task); |
917 | 257k | } |
918 | | } |
919 | | } |
920 | | Flavor::Lifo => { |
921 | 2 | for i in 0..batch_size1 { |
922 | 2 | unsafe { |
923 | 2 | let task = buffer.deref().read(f.wrapping_add(i + 1)); |
924 | 2 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); |
925 | 2 | } |
926 | | } |
927 | | } |
928 | | } |
929 | | |
930 | | // Try incrementing the front index to steal the batch. |
931 | 43.7k | if self |
932 | 43.7k | .inner |
933 | 43.7k | .front |
934 | 43.7k | .compare_exchange( |
935 | 43.7k | f, |
936 | 43.7k | f.wrapping_add(batch_size + 1), |
937 | 43.7k | Ordering::SeqCst, |
938 | 43.7k | Ordering::Relaxed, |
939 | 43.7k | ) |
940 | 43.7k | .is_err() |
941 | | { |
942 | | // We didn't steal this task, forget it. |
943 | 40.8k | mem::forget(task); |
944 | 40.8k | return Steal::Retry; |
945 | 4.09k | } |
946 | 4.09k | |
947 | 4.09k | dest_b = dest_b.wrapping_add(batch_size); |
948 | | } |
949 | | |
950 | | // Steal a batch of tasks from the front one by one. |
951 | | Flavor::Lifo => { |
952 | | // Try incrementing the front index to steal the task. |
953 | 56.4k | if self |
954 | 56.4k | .inner |
955 | 56.4k | .front |
956 | 56.4k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) |
957 | 56.4k | .is_err() |
958 | | { |
959 | | // We didn't steal this task, forget it. |
960 | 49.4k | mem::forget(task); |
961 | 49.4k | return Steal::Retry; |
962 | 7.03k | } |
963 | 7.03k | |
964 | 7.03k | // Move the front index one step forward. |
965 | 7.03k | f = f.wrapping_add(1); |
966 | | |
967 | | // Repeat the same procedure for the batch steals. |
968 | 12.8k | for i in 0..batch_size7.03k { |
969 | | // We've already got the current front index. Now execute the fence to |
970 | | // synchronize with other threads. |
971 | 12.8k | atomic::fence(Ordering::SeqCst); |
972 | 12.8k | |
973 | 12.8k | // Load the back index. |
974 | 12.8k | let b = self.inner.back.load(Ordering::Acquire); |
975 | 12.8k | |
976 | 12.8k | // Is the queue empty? |
977 | 12.8k | if b.wrapping_sub(f) <= 0 { |
978 | 339 | batch_size = i; |
979 | 339 | break; |
980 | 12.5k | } |
981 | 12.5k | |
982 | 12.5k | // Read the task at the front. |
983 | 12.5k | let tmp = unsafe { buffer.deref().read(f) }; |
984 | 12.5k | |
985 | 12.5k | // Try incrementing the front index to steal the task. |
986 | 12.5k | if self |
987 | 12.5k | .inner |
988 | 12.5k | .front |
989 | 12.5k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) |
990 | 12.5k | .is_err() |
991 | | { |
992 | | // We didn't steal this task, forget it and break from the loop. |
993 | 48 | mem::forget(tmp); |
994 | 48 | batch_size = i; |
995 | 48 | break; |
996 | 12.4k | } |
997 | 12.4k | |
998 | 12.4k | // Write the previously stolen task into the destination buffer. |
999 | 12.4k | unsafe { |
1000 | 12.4k | dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); |
1001 | 12.4k | } |
1002 | 12.4k | |
1003 | 12.4k | // Move the source front index and the destination back index one step forward. |
1004 | 12.4k | f = f.wrapping_add(1); |
1005 | 12.4k | dest_b = dest_b.wrapping_add(1); |
1006 | | } |
1007 | | |
1008 | | // If stealing into a FIFO queue, stolen tasks need to be reversed. |
1009 | 9.00k | if dest.flavor == Flavor::Fifo { |
1010 | 1 | for i in 0..batch_size / 2 { |
1011 | 1 | unsafe { |
1012 | 1 | let i1 = dest_b.wrapping_sub(batch_size - i); |
1013 | 1 | let i2 = dest_b.wrapping_sub(i + 1); |
1014 | 1 | let t1 = dest_buffer.read(i1); |
1015 | 1 | let t2 = dest_buffer.read(i2); |
1016 | 1 | dest_buffer.write(i1, t2); |
1017 | 1 | dest_buffer.write(i2, t1); |
1018 | 1 | } |
1019 | | } |
1020 | 9.00k | } |
1021 | | } |
1022 | | } |
1023 | | |
1024 | 13.1k | atomic::fence(Ordering::Release); |
1025 | 13.1k | |
1026 | 13.1k | // Update the back index in the destination queue. |
1027 | 13.1k | // |
1028 | 13.1k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data |
1029 | 13.1k | // races because it doesn't understand fences. |
1030 | 13.1k | dest.inner.back.store(dest_b, Ordering::Release); |
1031 | 13.1k | |
1032 | 13.1k | // Return with success. |
1033 | 13.1k | Steal::Success(task) |
1034 | 279k | } <crossbeam_deque::deque::Stealer<fifo::destructors::Elem>>::steal_batch_and_pop Line | Count | Source | 861 | 145 | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 862 | 145 | if Arc::ptr_eq(&self.inner, &dest.inner) { | 863 | 0 | match dest.pop() { | 864 | 0 | None => return Steal::Empty, | 865 | 0 | Some(task) => return Steal::Success(task), | 866 | | } | 867 | 145 | } | 868 | 145 | | 869 | 145 | // Load the front index. | 870 | 145 | let mut f = self.inner.front.load(Ordering::Acquire); | 871 | 145 | | 872 | 145 | // A SeqCst fence is needed here. | 873 | 145 | // | 874 | 145 | // If the current thread is already pinned (reentrantly), we must manually issue the | 875 | 145 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 876 | 145 | // have to. | 877 | 145 | if epoch::is_pinned() { | 878 | 0 | atomic::fence(Ordering::SeqCst); | 879 | 145 | } | 880 | | | 881 | 145 | let guard = &epoch::pin(); | 882 | 145 | | 883 | 145 | // Load the back index. | 884 | 145 | let b = self.inner.back.load(Ordering::Acquire); | 885 | 145 | | 886 | 145 | // Is the queue empty? | 887 | 145 | let len = b.wrapping_sub(f); | 888 | 145 | if len <= 0 { | 889 | 0 | return Steal::Empty; | 890 | 145 | } | 891 | 145 | | 892 | 145 | // Reserve capacity for the stolen batch. | 893 | 145 | let batch_size = cmp::min((len as usize - 1) / 2, MAX_BATCH - 1); | 894 | 145 | dest.reserve(batch_size); | 895 | 145 | let mut batch_size = batch_size as isize; | 896 | 145 | | 897 | 145 | // Get the destination buffer and back index. | 898 | 145 | let dest_buffer = dest.buffer.get(); | 899 | 145 | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 900 | 145 | | 901 | 145 | // Load the buffer | 902 | 145 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 903 | 145 | | 904 | 145 | // Read the task at the front. | 905 | 145 | let mut task = unsafe { buffer.deref().read(f) }; | 906 | 145 | | 907 | 145 | match self.flavor { | 908 | 145 | // Steal a batch of tasks from the front at once. | 909 | 145 | Flavor::Fifo => { | 910 | | // Copy the batch from the source to the destination buffer. | 911 | 145 | match dest.flavor { | 912 | 145 | Flavor::Fifo => { | 913 | 4.48k | for i in 0..batch_size145 { | 914 | 4.48k | unsafe { | 915 | 4.48k | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 916 | 4.48k | dest_buffer.write(dest_b.wrapping_add(i), task); | 917 | 4.48k | } | 918 | | } | 919 | | } | 920 | | Flavor::Lifo => { | 921 | 0 | for i in 0..batch_size { | 922 | 0 | unsafe { | 923 | 0 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 924 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 925 | 0 | } | 926 | | } | 927 | | } | 928 | | } | 929 | | | 930 | | // Try incrementing the front index to steal the batch. | 931 | 145 | if self | 932 | 145 | .inner | 933 | 145 | .front | 934 | 145 | .compare_exchange( | 935 | 145 | f, | 936 | 145 | f.wrapping_add(batch_size + 1), | 937 | 145 | Ordering::SeqCst, | 938 | 145 | Ordering::Relaxed, | 939 | 145 | ) | 940 | 145 | .is_err() | 941 | | { | 942 | | // We didn't steal this task, forget it. | 943 | 23 | mem::forget(task); | 944 | 23 | return Steal::Retry; | 945 | 122 | } | 946 | 122 | | 947 | 122 | dest_b = dest_b.wrapping_add(batch_size); | 948 | | } | 949 | | | 950 | | // Steal a batch of tasks from the front one by one. | 951 | | Flavor::Lifo => { | 952 | | // Try incrementing the front index to steal the task. | 953 | 0 | if self | 954 | 0 | .inner | 955 | 0 | .front | 956 | 0 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 957 | 0 | .is_err() | 958 | | { | 959 | | // We didn't steal this task, forget it. | 960 | 0 | mem::forget(task); | 961 | 0 | return Steal::Retry; | 962 | 0 | } | 963 | 0 |
| 964 | 0 | // Move the front index one step forward. | 965 | 0 | f = f.wrapping_add(1); | 966 | | | 967 | | // Repeat the same procedure for the batch steals. | 968 | 0 | for i in 0..batch_size { | 969 | | // We've already got the current front index. Now execute the fence to | 970 | | // synchronize with other threads. | 971 | 0 | atomic::fence(Ordering::SeqCst); | 972 | 0 |
| 973 | 0 | // Load the back index. | 974 | 0 | let b = self.inner.back.load(Ordering::Acquire); | 975 | 0 |
| 976 | 0 | // Is the queue empty? | 977 | 0 | if b.wrapping_sub(f) <= 0 { | 978 | 0 | batch_size = i; | 979 | 0 | break; | 980 | 0 | } | 981 | 0 |
| 982 | 0 | // Read the task at the front. | 983 | 0 | let tmp = unsafe { buffer.deref().read(f) }; | 984 | 0 |
| 985 | 0 | // Try incrementing the front index to steal the task. | 986 | 0 | if self | 987 | 0 | .inner | 988 | 0 | .front | 989 | 0 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 990 | 0 | .is_err() | 991 | | { | 992 | | // We didn't steal this task, forget it and break from the loop. | 993 | 0 | mem::forget(tmp); | 994 | 0 | batch_size = i; | 995 | 0 | break; | 996 | 0 | } | 997 | 0 |
| 998 | 0 | // Write the previously stolen task into the destination buffer. | 999 | 0 | unsafe { | 1000 | 0 | dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); | 1001 | 0 | } | 1002 | 0 |
| 1003 | 0 | // Move the source front index and the destination back index one step forward. | 1004 | 0 | f = f.wrapping_add(1); | 1005 | 0 | dest_b = dest_b.wrapping_add(1); | 1006 | | } | 1007 | | | 1008 | | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 1009 | 0 | if dest.flavor == Flavor::Fifo { | 1010 | 0 | for i in 0..batch_size / 2 { | 1011 | 0 | unsafe { | 1012 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 1013 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 1014 | 0 | let t1 = dest_buffer.read(i1); | 1015 | 0 | let t2 = dest_buffer.read(i2); | 1016 | 0 | dest_buffer.write(i1, t2); | 1017 | 0 | dest_buffer.write(i2, t1); | 1018 | 0 | } | 1019 | | } | 1020 | 0 | } | 1021 | | } | 1022 | | } | 1023 | | | 1024 | 122 | atomic::fence(Ordering::Release); | 1025 | 122 | | 1026 | 122 | // Update the back index in the destination queue. | 1027 | 122 | // | 1028 | 122 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 1029 | 122 | // races because it doesn't understand fences. | 1030 | 122 | dest.inner.back.store(dest_b, Ordering::Release); | 1031 | 122 | | 1032 | 122 | // Return with success. | 1033 | 122 | Steal::Success(task) | 1034 | 145 | } |
<crossbeam_deque::deque::Stealer<usize>>::steal_batch_and_pop Line | Count | Source | 861 | 126k | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 862 | 126k | if Arc::ptr_eq(&self.inner, &dest.inner) { | 863 | 0 | match dest.pop() { | 864 | 0 | None => return Steal::Empty, | 865 | 0 | Some(task) => return Steal::Success(task), | 866 | | } | 867 | 126k | } | 868 | 126k | | 869 | 126k | // Load the front index. | 870 | 126k | let mut f = self.inner.front.load(Ordering::Acquire); | 871 | 126k | | 872 | 126k | // A SeqCst fence is needed here. | 873 | 126k | // | 874 | 126k | // If the current thread is already pinned (reentrantly), we must manually issue the | 875 | 126k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 876 | 126k | // have to. | 877 | 126k | if epoch::is_pinned() { | 878 | 0 | atomic::fence(Ordering::SeqCst); | 879 | 126k | } | 880 | | | 881 | 126k | let guard = &epoch::pin(); | 882 | 126k | | 883 | 126k | // Load the back index. | 884 | 126k | let b = self.inner.back.load(Ordering::Acquire); | 885 | 126k | | 886 | 126k | // Is the queue empty? | 887 | 126k | let len = b.wrapping_sub(f); | 888 | 126k | if len <= 0 { | 889 | 76.0k | return Steal::Empty; | 890 | 50.3k | } | 891 | 50.3k | | 892 | 50.3k | // Reserve capacity for the stolen batch. | 893 | 50.3k | let batch_size = cmp::min((len as usize - 1) / 2, MAX_BATCH - 1); | 894 | 50.3k | dest.reserve(batch_size); | 895 | 50.3k | let mut batch_size = batch_size as isize; | 896 | 50.3k | | 897 | 50.3k | // Get the destination buffer and back index. | 898 | 50.3k | let dest_buffer = dest.buffer.get(); | 899 | 50.3k | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 900 | 50.3k | | 901 | 50.3k | // Load the buffer | 902 | 50.3k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 903 | 50.3k | | 904 | 50.3k | // Read the task at the front. | 905 | 50.3k | let mut task = unsafe { buffer.deref().read(f) }; | 906 | 50.3k | | 907 | 50.3k | match self.flavor { | 908 | 50.3k | // Steal a batch of tasks from the front at once. | 909 | 50.3k | Flavor::Fifo => { | 910 | | // Copy the batch from the source to the destination buffer. | 911 | 50.3k | match dest.flavor { | 912 | 50.3k | Flavor::Fifo => { | 913 | 253k | for i in 0..batch_size50.3k { | 914 | 253k | unsafe { | 915 | 253k | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 916 | 253k | dest_buffer.write(dest_b.wrapping_add(i), task); | 917 | 253k | } | 918 | | } | 919 | | } | 920 | | Flavor::Lifo => { | 921 | 0 | for i in 0..batch_size { | 922 | 0 | unsafe { | 923 | 0 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 924 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 925 | 0 | } | 926 | | } | 927 | | } | 928 | | } | 929 | | | 930 | | // Try incrementing the front index to steal the batch. | 931 | 43.6k | if self | 932 | 43.6k | .inner | 933 | 43.6k | .front | 934 | 43.6k | .compare_exchange( | 935 | 43.6k | f, | 936 | 43.6k | f.wrapping_add(batch_size + 1), | 937 | 43.6k | Ordering::SeqCst, | 938 | 43.6k | Ordering::Relaxed, | 939 | 43.6k | ) | 940 | 43.6k | .is_err() | 941 | | { | 942 | | // We didn't steal this task, forget it. | 943 | 40.8k | mem::forget(task); | 944 | 40.8k | return Steal::Retry; | 945 | 3.97k | } | 946 | 3.97k | | 947 | 3.97k | dest_b = dest_b.wrapping_add(batch_size); | 948 | | } | 949 | | | 950 | | // Steal a batch of tasks from the front one by one. | 951 | | Flavor::Lifo => { | 952 | | // Try incrementing the front index to steal the task. | 953 | 0 | if self | 954 | 0 | .inner | 955 | 0 | .front | 956 | 0 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 957 | 0 | .is_err() | 958 | | { | 959 | | // We didn't steal this task, forget it. | 960 | 0 | mem::forget(task); | 961 | 0 | return Steal::Retry; | 962 | 0 | } | 963 | 0 |
| 964 | 0 | // Move the front index one step forward. | 965 | 0 | f = f.wrapping_add(1); | 966 | | | 967 | | // Repeat the same procedure for the batch steals. | 968 | 0 | for i in 0..batch_size { | 969 | | // We've already got the current front index. Now execute the fence to | 970 | | // synchronize with other threads. | 971 | 0 | atomic::fence(Ordering::SeqCst); | 972 | 0 |
| 973 | 0 | // Load the back index. | 974 | 0 | let b = self.inner.back.load(Ordering::Acquire); | 975 | 0 |
| 976 | 0 | // Is the queue empty? | 977 | 0 | if b.wrapping_sub(f) <= 0 { | 978 | 0 | batch_size = i; | 979 | 0 | break; | 980 | 0 | } | 981 | 0 |
| 982 | 0 | // Read the task at the front. | 983 | 0 | let tmp = unsafe { buffer.deref().read(f) }; | 984 | 0 |
| 985 | 0 | // Try incrementing the front index to steal the task. | 986 | 0 | if self | 987 | 0 | .inner | 988 | 0 | .front | 989 | 0 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 990 | 0 | .is_err() | 991 | | { | 992 | | // We didn't steal this task, forget it and break from the loop. | 993 | 0 | mem::forget(tmp); | 994 | 0 | batch_size = i; | 995 | 0 | break; | 996 | 0 | } | 997 | 0 |
| 998 | 0 | // Write the previously stolen task into the destination buffer. | 999 | 0 | unsafe { | 1000 | 0 | dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); | 1001 | 0 | } | 1002 | 0 |
| 1003 | 0 | // Move the source front index and the destination back index one step forward. | 1004 | 0 | f = f.wrapping_add(1); | 1005 | 0 | dest_b = dest_b.wrapping_add(1); | 1006 | | } | 1007 | | | 1008 | | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 1009 | 0 | if dest.flavor == Flavor::Fifo { | 1010 | 0 | for i in 0..batch_size / 2 { | 1011 | 0 | unsafe { | 1012 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 1013 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 1014 | 0 | let t1 = dest_buffer.read(i1); | 1015 | 0 | let t2 = dest_buffer.read(i2); | 1016 | 0 | dest_buffer.write(i1, t2); | 1017 | 0 | dest_buffer.write(i2, t1); | 1018 | 0 | } | 1019 | | } | 1020 | 0 | } | 1021 | | } | 1022 | | } | 1023 | | | 1024 | 3.97k | atomic::fence(Ordering::Release); | 1025 | 3.97k | | 1026 | 3.97k | // Update the back index in the destination queue. | 1027 | 3.97k | // | 1028 | 3.97k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 1029 | 3.97k | // races because it doesn't understand fences. | 1030 | 3.97k | dest.inner.back.store(dest_b, Ordering::Release); | 1031 | 3.97k | | 1032 | 3.97k | // Return with success. | 1033 | 3.97k | Steal::Success(task) | 1034 | 120k | } |
<crossbeam_deque::deque::Stealer<i32>>::steal_batch_and_pop Line | Count | Source | 861 | 4 | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 862 | 4 | if Arc::ptr_eq(&self.inner, &dest.inner) { | 863 | 0 | match dest.pop() { | 864 | 0 | None => return Steal::Empty, | 865 | 0 | Some(task) => return Steal::Success(task), | 866 | | } | 867 | 4 | } | 868 | 4 | | 869 | 4 | // Load the front index. | 870 | 4 | let mut f = self.inner.front.load(Ordering::Acquire); | 871 | 4 | | 872 | 4 | // A SeqCst fence is needed here. | 873 | 4 | // | 874 | 4 | // If the current thread is already pinned (reentrantly), we must manually issue the | 875 | 4 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 876 | 4 | // have to. | 877 | 4 | if epoch::is_pinned() { | 878 | 0 | atomic::fence(Ordering::SeqCst); | 879 | 4 | } | 880 | | | 881 | 4 | let guard = &epoch::pin(); | 882 | 4 | | 883 | 4 | // Load the back index. | 884 | 4 | let b = self.inner.back.load(Ordering::Acquire); | 885 | 4 | | 886 | 4 | // Is the queue empty? | 887 | 4 | let len = b.wrapping_sub(f); | 888 | 4 | if len <= 0 { | 889 | 0 | return Steal::Empty; | 890 | 4 | } | 891 | 4 | | 892 | 4 | // Reserve capacity for the stolen batch. | 893 | 4 | let batch_size = cmp::min((len as usize - 1) / 2, MAX_BATCH - 1); | 894 | 4 | dest.reserve(batch_size); | 895 | 4 | let mut batch_size = batch_size as isize; | 896 | 4 | | 897 | 4 | // Get the destination buffer and back index. | 898 | 4 | let dest_buffer = dest.buffer.get(); | 899 | 4 | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 900 | 4 | | 901 | 4 | // Load the buffer | 902 | 4 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 903 | 4 | | 904 | 4 | // Read the task at the front. | 905 | 4 | let mut task = unsafe { buffer.deref().read(f) }; | 906 | 4 | | 907 | 4 | match self.flavor { | 908 | 4 | // Steal a batch of tasks from the front at once. | 909 | 4 | Flavor::Fifo => { | 910 | | // Copy the batch from the source to the destination buffer. | 911 | 3 | match dest.flavor { | 912 | 3 | Flavor::Fifo => { | 913 | 2 | for i in 0..batch_size { | 914 | 2 | unsafe { | 915 | 2 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 916 | 2 | dest_buffer.write(dest_b.wrapping_add(i), task); | 917 | 2 | } | 918 | | } | 919 | | } | 920 | | Flavor::Lifo => { | 921 | 2 | for i in 0..batch_size1 { | 922 | 2 | unsafe { | 923 | 2 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 924 | 2 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 925 | 2 | } | 926 | | } | 927 | | } | 928 | | } | 929 | | | 930 | | // Try incrementing the front index to steal the batch. | 931 | 2 | if self | 932 | 2 | .inner | 933 | 2 | .front | 934 | 2 | .compare_exchange( | 935 | 2 | f, | 936 | 2 | f.wrapping_add(batch_size + 1), | 937 | 2 | Ordering::SeqCst, | 938 | 2 | Ordering::Relaxed, | 939 | 2 | ) | 940 | 2 | .is_err() | 941 | | { | 942 | | // We didn't steal this task, forget it. | 943 | 0 | mem::forget(task); | 944 | 0 | return Steal::Retry; | 945 | 2 | } | 946 | 2 | | 947 | 2 | dest_b = dest_b.wrapping_add(batch_size); | 948 | | } | 949 | | | 950 | | // Steal a batch of tasks from the front one by one. | 951 | | Flavor::Lifo => { | 952 | | // Try incrementing the front index to steal the task. | 953 | 1 | if self | 954 | 1 | .inner | 955 | 1 | .front | 956 | 1 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 957 | 1 | .is_err() | 958 | | { | 959 | | // We didn't steal this task, forget it. | 960 | 0 | mem::forget(task); | 961 | 0 | return Steal::Retry; | 962 | 1 | } | 963 | 1 | | 964 | 1 | // Move the front index one step forward. | 965 | 1 | f = f.wrapping_add(1); | 966 | | | 967 | | // Repeat the same procedure for the batch steals. | 968 | 4 | for i in 0..batch_size1 { | 969 | | // We've already got the current front index. Now execute the fence to | 970 | | // synchronize with other threads. | 971 | 4 | atomic::fence(Ordering::SeqCst); | 972 | 4 | | 973 | 4 | // Load the back index. | 974 | 4 | let b = self.inner.back.load(Ordering::Acquire); | 975 | 4 | | 976 | 4 | // Is the queue empty? | 977 | 4 | if b.wrapping_sub(f) <= 0 { | 978 | 0 | batch_size = i; | 979 | 0 | break; | 980 | 4 | } | 981 | 4 | | 982 | 4 | // Read the task at the front. | 983 | 4 | let tmp = unsafe { buffer.deref().read(f) }; | 984 | 4 | | 985 | 4 | // Try incrementing the front index to steal the task. | 986 | 4 | if self | 987 | 4 | .inner | 988 | 4 | .front | 989 | 4 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 990 | 4 | .is_err() | 991 | | { | 992 | | // We didn't steal this task, forget it and break from the loop. | 993 | 0 | mem::forget(tmp); | 994 | 0 | batch_size = i; | 995 | 0 | break; | 996 | 4 | } | 997 | 4 | | 998 | 4 | // Write the previously stolen task into the destination buffer. | 999 | 4 | unsafe { | 1000 | 4 | dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); | 1001 | 4 | } | 1002 | 4 | | 1003 | 4 | // Move the source front index and the destination back index one step forward. | 1004 | 4 | f = f.wrapping_add(1); | 1005 | 4 | dest_b = dest_b.wrapping_add(1); | 1006 | | } | 1007 | | | 1008 | | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 1009 | 2 | if dest.flavor == Flavor::Fifo { | 1010 | 1 | for i in 0..batch_size / 2 { | 1011 | 1 | unsafe { | 1012 | 1 | let i1 = dest_b.wrapping_sub(batch_size - i); | 1013 | 1 | let i2 = dest_b.wrapping_sub(i + 1); | 1014 | 1 | let t1 = dest_buffer.read(i1); | 1015 | 1 | let t2 = dest_buffer.read(i2); | 1016 | 1 | dest_buffer.write(i1, t2); | 1017 | 1 | dest_buffer.write(i2, t1); | 1018 | 1 | } | 1019 | | } | 1020 | 1 | } | 1021 | | } | 1022 | | } | 1023 | | | 1024 | 4 | atomic::fence(Ordering::Release); | 1025 | 4 | | 1026 | 4 | // Update the back index in the destination queue. | 1027 | 4 | // | 1028 | 4 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 1029 | 4 | // races because it doesn't understand fences. | 1030 | 4 | dest.inner.back.store(dest_b, Ordering::Release); | 1031 | 4 | | 1032 | 4 | // Return with success. | 1033 | 4 | Steal::Success(task) | 1034 | 4 | } |
<crossbeam_deque::deque::Stealer<lifo::destructors::Elem>>::steal_batch_and_pop Line | Count | Source | 861 | 427 | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 862 | 427 | if Arc::ptr_eq(&self.inner, &dest.inner) { | 863 | 0 | match dest.pop() { | 864 | 0 | None => return Steal::Empty, | 865 | 0 | Some(task) => return Steal::Success(task), | 866 | | } | 867 | 427 | } | 868 | 427 | | 869 | 427 | // Load the front index. | 870 | 427 | let mut f = self.inner.front.load(Ordering::Acquire); | 871 | 427 | | 872 | 427 | // A SeqCst fence is needed here. | 873 | 427 | // | 874 | 427 | // If the current thread is already pinned (reentrantly), we must manually issue the | 875 | 427 | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 876 | 427 | // have to. | 877 | 427 | if epoch::is_pinned() { | 878 | 0 | atomic::fence(Ordering::SeqCst); | 879 | 427 | } | 880 | | | 881 | 427 | let guard = &epoch::pin(); | 882 | 427 | | 883 | 427 | // Load the back index. | 884 | 427 | let b = self.inner.back.load(Ordering::Acquire); | 885 | 427 | | 886 | 427 | // Is the queue empty? | 887 | 427 | let len = b.wrapping_sub(f); | 888 | 427 | if len <= 0 { | 889 | 0 | return Steal::Empty; | 890 | 427 | } | 891 | 427 | | 892 | 427 | // Reserve capacity for the stolen batch. | 893 | 427 | let batch_size = cmp::min((len as usize - 1) / 2, MAX_BATCH - 1); | 894 | 427 | dest.reserve(batch_size); | 895 | 427 | let mut batch_size = batch_size as isize; | 896 | 427 | | 897 | 427 | // Get the destination buffer and back index. | 898 | 427 | let dest_buffer = dest.buffer.get(); | 899 | 427 | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 900 | 427 | | 901 | 427 | // Load the buffer | 902 | 427 | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 903 | 427 | | 904 | 427 | // Read the task at the front. | 905 | 427 | let mut task = unsafe { buffer.deref().read(f) }; | 906 | 427 | | 907 | 427 | match self.flavor { | 908 | 427 | // Steal a batch of tasks from the front at once. | 909 | 427 | Flavor::Fifo => { | 910 | | // Copy the batch from the source to the destination buffer. | 911 | 1 | match dest.flavor { | 912 | 1 | Flavor::Fifo => { | 913 | 1 | for i0 in 0..batch_size { | 914 | 0 | unsafe { | 915 | 0 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 916 | 0 | dest_buffer.write(dest_b.wrapping_add(i), task); | 917 | 0 | } | 918 | | } | 919 | | } | 920 | | Flavor::Lifo => { | 921 | 0 | for i in 0..batch_size { | 922 | 0 | unsafe { | 923 | 0 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 924 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 925 | 0 | } | 926 | | } | 927 | | } | 928 | | } | 929 | | | 930 | | // Try incrementing the front index to steal the batch. | 931 | 0 | if self | 932 | 0 | .inner | 933 | 0 | .front | 934 | 0 | .compare_exchange( | 935 | 0 | f, | 936 | 0 | f.wrapping_add(batch_size + 1), | 937 | 0 | Ordering::SeqCst, | 938 | 0 | Ordering::Relaxed, | 939 | 0 | ) | 940 | 0 | .is_err() | 941 | | { | 942 | | // We didn't steal this task, forget it. | 943 | 0 | mem::forget(task); | 944 | 0 | return Steal::Retry; | 945 | 0 | } | 946 | 0 |
| 947 | 0 | dest_b = dest_b.wrapping_add(batch_size); | 948 | | } | 949 | | | 950 | | // Steal a batch of tasks from the front one by one. | 951 | | Flavor::Lifo => { | 952 | | // Try incrementing the front index to steal the task. | 953 | 426 | if self | 954 | 426 | .inner | 955 | 426 | .front | 956 | 426 | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 957 | 426 | .is_err() | 958 | | { | 959 | | // We didn't steal this task, forget it. | 960 | 289 | mem::forget(task); | 961 | 289 | return Steal::Retry; | 962 | 137 | } | 963 | 137 | | 964 | 137 | // Move the front index one step forward. | 965 | 137 | f = f.wrapping_add(1); | 966 | | | 967 | | // Repeat the same procedure for the batch steals. | 968 | 4.25k | for i in 0..batch_size137 { | 969 | | // We've already got the current front index. Now execute the fence to | 970 | | // synchronize with other threads. | 971 | 4.25k | atomic::fence(Ordering::SeqCst); | 972 | 4.25k | | 973 | 4.25k | // Load the back index. | 974 | 4.25k | let b = self.inner.back.load(Ordering::Acquire); | 975 | 4.25k | | 976 | 4.25k | // Is the queue empty? | 977 | 4.25k | if b.wrapping_sub(f) <= 0 { | 978 | 0 | batch_size = i; | 979 | 0 | break; | 980 | 4.25k | } | 981 | 4.25k | | 982 | 4.25k | // Read the task at the front. | 983 | 4.25k | let tmp = unsafe { buffer.deref().read(f) }; | 984 | 4.25k | | 985 | 4.25k | // Try incrementing the front index to steal the task. | 986 | 4.25k | if self | 987 | 4.25k | .inner | 988 | 4.25k | .front | 989 | 4.25k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 990 | 4.25k | .is_err() | 991 | | { | 992 | | // We didn't steal this task, forget it and break from the loop. | 993 | 1 | mem::forget(tmp); | 994 | 1 | batch_size = i; | 995 | 1 | break; | 996 | 4.25k | } | 997 | 4.25k | | 998 | 4.25k | // Write the previously stolen task into the destination buffer. | 999 | 4.25k | unsafe { | 1000 | 4.25k | dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); | 1001 | 4.25k | } | 1002 | 4.25k | | 1003 | 4.25k | // Move the source front index and the destination back index one step forward. | 1004 | 4.25k | f = f.wrapping_add(1); | 1005 | 4.25k | dest_b = dest_b.wrapping_add(1); | 1006 | | } | 1007 | | | 1008 | | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 1009 | 138 | if dest.flavor == Flavor::Fifo { | 1010 | 0 | for i in 0..batch_size / 2 { | 1011 | 0 | unsafe { | 1012 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 1013 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 1014 | 0 | let t1 = dest_buffer.read(i1); | 1015 | 0 | let t2 = dest_buffer.read(i2); | 1016 | 0 | dest_buffer.write(i1, t2); | 1017 | 0 | dest_buffer.write(i2, t1); | 1018 | 0 | } | 1019 | | } | 1020 | 138 | } | 1021 | | } | 1022 | | } | 1023 | | | 1024 | 138 | atomic::fence(Ordering::Release); | 1025 | 138 | | 1026 | 138 | // Update the back index in the destination queue. | 1027 | 138 | // | 1028 | 138 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 1029 | 138 | // races because it doesn't understand fences. | 1030 | 138 | dest.inner.back.store(dest_b, Ordering::Release); | 1031 | 138 | | 1032 | 138 | // Return with success. | 1033 | 138 | Steal::Success(task) | 1034 | 427 | } |
<crossbeam_deque::deque::Stealer<usize>>::steal_batch_and_pop Line | Count | Source | 861 | 155k | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 862 | 155k | if Arc::ptr_eq(&self.inner, &dest.inner) { | 863 | 0 | match dest.pop() { | 864 | 0 | None => return Steal::Empty, | 865 | 0 | Some(task) => return Steal::Success(task), | 866 | | } | 867 | 155k | } | 868 | 155k | | 869 | 155k | // Load the front index. | 870 | 155k | let mut f = self.inner.front.load(Ordering::Acquire); | 871 | 155k | | 872 | 155k | // A SeqCst fence is needed here. | 873 | 155k | // | 874 | 155k | // If the current thread is already pinned (reentrantly), we must manually issue the | 875 | 155k | // fence. Otherwise, the following pinning will issue the fence anyway, so we don't | 876 | 155k | // have to. | 877 | 155k | if epoch::is_pinned() { | 878 | 0 | atomic::fence(Ordering::SeqCst); | 879 | 155k | } | 880 | | | 881 | 155k | let guard = &epoch::pin(); | 882 | 155k | | 883 | 155k | // Load the back index. | 884 | 155k | let b = self.inner.back.load(Ordering::Acquire); | 885 | 155k | | 886 | 155k | // Is the queue empty? | 887 | 155k | let len = b.wrapping_sub(f); | 888 | 155k | if len <= 0 { | 889 | 99.9k | return Steal::Empty; | 890 | 55.1k | } | 891 | 55.1k | | 892 | 55.1k | // Reserve capacity for the stolen batch. | 893 | 55.1k | let batch_size = cmp::min((len as usize - 1) / 2, MAX_BATCH - 1); | 894 | 55.1k | dest.reserve(batch_size); | 895 | 55.1k | let mut batch_size = batch_size as isize; | 896 | 55.1k | | 897 | 55.1k | // Get the destination buffer and back index. | 898 | 55.1k | let dest_buffer = dest.buffer.get(); | 899 | 55.1k | let mut dest_b = dest.inner.back.load(Ordering::Relaxed); | 900 | 55.1k | | 901 | 55.1k | // Load the buffer | 902 | 55.1k | let buffer = self.inner.buffer.load(Ordering::Acquire, guard); | 903 | 55.1k | | 904 | 55.1k | // Read the task at the front. | 905 | 55.1k | let mut task = unsafe { buffer.deref().read(f) }; | 906 | 55.1k | | 907 | 55.1k | match self.flavor { | 908 | 55.1k | // Steal a batch of tasks from the front at once. | 909 | 55.1k | Flavor::Fifo => { | 910 | | // Copy the batch from the source to the destination buffer. | 911 | 18.4E | match dest.flavor { | 912 | 18.4E | Flavor::Fifo => { | 913 | 18.4E | for i0 in 0..batch_size { | 914 | 0 | unsafe { | 915 | 0 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 916 | 0 | dest_buffer.write(dest_b.wrapping_add(i), task); | 917 | 0 | } | 918 | | } | 919 | | } | 920 | | Flavor::Lifo => { | 921 | 0 | for i in 0..batch_size { | 922 | 0 | unsafe { | 923 | 0 | let task = buffer.deref().read(f.wrapping_add(i + 1)); | 924 | 0 | dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); | 925 | 0 | } | 926 | | } | 927 | | } | 928 | | } | 929 | | | 930 | | // Try incrementing the front index to steal the batch. | 931 | 0 | if self | 932 | 0 | .inner | 933 | 0 | .front | 934 | 0 | .compare_exchange( | 935 | 0 | f, | 936 | 0 | f.wrapping_add(batch_size + 1), | 937 | 0 | Ordering::SeqCst, | 938 | 0 | Ordering::Relaxed, | 939 | 0 | ) | 940 | 0 | .is_err() | 941 | | { | 942 | | // We didn't steal this task, forget it. | 943 | 0 | mem::forget(task); | 944 | 0 | return Steal::Retry; | 945 | 0 | } | 946 | 0 |
| 947 | 0 | dest_b = dest_b.wrapping_add(batch_size); | 948 | | } | 949 | | | 950 | | // Steal a batch of tasks from the front one by one. | 951 | | Flavor::Lifo => { | 952 | | // Try incrementing the front index to steal the task. | 953 | 56.0k | if self | 954 | 56.0k | .inner | 955 | 56.0k | .front | 956 | 56.0k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 957 | 56.0k | .is_err() | 958 | | { | 959 | | // We didn't steal this task, forget it. | 960 | 49.1k | mem::forget(task); | 961 | 49.1k | return Steal::Retry; | 962 | 6.90k | } | 963 | 6.90k | | 964 | 6.90k | // Move the front index one step forward. | 965 | 6.90k | f = f.wrapping_add(1); | 966 | | | 967 | | // Repeat the same procedure for the batch steals. | 968 | 8.58k | for i in 0..batch_size6.90k { | 969 | | // We've already got the current front index. Now execute the fence to | 970 | | // synchronize with other threads. | 971 | 8.58k | atomic::fence(Ordering::SeqCst); | 972 | 8.58k | | 973 | 8.58k | // Load the back index. | 974 | 8.58k | let b = self.inner.back.load(Ordering::Acquire); | 975 | 8.58k | | 976 | 8.58k | // Is the queue empty? | 977 | 8.58k | if b.wrapping_sub(f) <= 0 { | 978 | 339 | batch_size = i; | 979 | 339 | break; | 980 | 8.25k | } | 981 | 8.25k | | 982 | 8.25k | // Read the task at the front. | 983 | 8.25k | let tmp = unsafe { buffer.deref().read(f) }; | 984 | 8.25k | | 985 | 8.25k | // Try incrementing the front index to steal the task. | 986 | 8.25k | if self | 987 | 8.25k | .inner | 988 | 8.25k | .front | 989 | 8.25k | .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) | 990 | 8.25k | .is_err() | 991 | | { | 992 | | // We didn't steal this task, forget it and break from the loop. | 993 | 47 | mem::forget(tmp); | 994 | 47 | batch_size = i; | 995 | 47 | break; | 996 | 8.20k | } | 997 | 8.20k | | 998 | 8.20k | // Write the previously stolen task into the destination buffer. | 999 | 8.20k | unsafe { | 1000 | 8.20k | dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); | 1001 | 8.20k | } | 1002 | 8.20k | | 1003 | 8.20k | // Move the source front index and the destination back index one step forward. | 1004 | 8.20k | f = f.wrapping_add(1); | 1005 | 8.20k | dest_b = dest_b.wrapping_add(1); | 1006 | | } | 1007 | | | 1008 | | // If stealing into a FIFO queue, stolen tasks need to be reversed. | 1009 | 8.86k | if dest.flavor == Flavor::Fifo { | 1010 | 0 | for i in 0..batch_size / 2 { | 1011 | 0 | unsafe { | 1012 | 0 | let i1 = dest_b.wrapping_sub(batch_size - i); | 1013 | 0 | let i2 = dest_b.wrapping_sub(i + 1); | 1014 | 0 | let t1 = dest_buffer.read(i1); | 1015 | 0 | let t2 = dest_buffer.read(i2); | 1016 | 0 | dest_buffer.write(i1, t2); | 1017 | 0 | dest_buffer.write(i2, t1); | 1018 | 0 | } | 1019 | | } | 1020 | 8.86k | } | 1021 | | } | 1022 | | } | 1023 | | | 1024 | 8.86k | atomic::fence(Ordering::Release); | 1025 | 8.86k | | 1026 | 8.86k | // Update the back index in the destination queue. | 1027 | 8.86k | // | 1028 | 8.86k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data | 1029 | 8.86k | // races because it doesn't understand fences. | 1030 | 8.86k | dest.inner.back.store(dest_b, Ordering::Release); | 1031 | 8.86k | | 1032 | 8.86k | // Return with success. | 1033 | 8.86k | Steal::Success(task) | 1034 | 157k | } |
|
1035 | | } |
1036 | | |
1037 | | impl<T> Clone for Stealer<T> { |
1038 | | fn clone(&self) -> Stealer<T> { |
1039 | | Stealer { |
1040 | | inner: self.inner.clone(), |
1041 | | flavor: self.flavor, |
1042 | | } |
1043 | | } |
1044 | | } |
1045 | | |
1046 | | impl<T> fmt::Debug for Stealer<T> { |
1047 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1048 | | f.pad("Stealer { .. }") |
1049 | | } |
1050 | | } |
1051 | | |
1052 | | // Bits indicating the state of a slot: |
1053 | | // * If a task has been written into the slot, `WRITE` is set. |
1054 | | // * If a task has been read from the slot, `READ` is set. |
1055 | | // * If the block is being destroyed, `DESTROY` is set. |
1056 | | const WRITE: usize = 1; |
1057 | | const READ: usize = 2; |
1058 | | const DESTROY: usize = 4; |
1059 | | |
1060 | | // Each block covers one "lap" of indices. |
1061 | | const LAP: usize = 64; |
1062 | | // The maximum number of values a block can hold. |
1063 | | const BLOCK_CAP: usize = LAP - 1; |
1064 | | // How many lower bits are reserved for metadata. |
1065 | | const SHIFT: usize = 1; |
1066 | | // Indicates that the block is not the last one. |
1067 | | const HAS_NEXT: usize = 1; |
1068 | | |
1069 | | /// A slot in a block. |
1070 | | struct Slot<T> { |
1071 | | /// The task. |
1072 | | task: UnsafeCell<MaybeUninit<T>>, |
1073 | | |
1074 | | /// The state of the slot. |
1075 | | state: AtomicUsize, |
1076 | | } |
1077 | | |
1078 | | impl<T> Slot<T> { |
1079 | | /// Waits until a task is written into the slot. |
1080 | 344k | fn wait_write(&self) { |
1081 | 344k | let backoff = Backoff::new(); |
1082 | 344k | while self.state.load(Ordering::Acquire) & WRITE == 0 { |
1083 | 34 | backoff.snooze(); |
1084 | 34 | } |
1085 | 344k | } <crossbeam_deque::deque::Slot<usize>>::wait_write Line | Count | Source | 1080 | 285k | fn wait_write(&self) { | 1081 | 285k | let backoff = Backoff::new(); | 1082 | 285k | while self.state.load(Ordering::Acquire) & WRITE == 0 { | 1083 | 34 | backoff.snooze(); | 1084 | 34 | } | 1085 | 285k | } |
<crossbeam_deque::deque::Slot<alloc::boxed::Box<usize>>>::wait_write Line | Count | Source | 1080 | 49.9k | fn wait_write(&self) { | 1081 | 49.9k | let backoff = Backoff::new(); | 1082 | 49.9k | while self.state.load(Ordering::Acquire) & WRITE == 0 { | 1083 | 0 | backoff.snooze(); | 1084 | 0 | } | 1085 | 49.9k | } |
<crossbeam_deque::deque::Slot<injector::destructors::Elem>>::wait_write Line | Count | Source | 1080 | 9.18k | fn wait_write(&self) { | 1081 | 9.18k | let backoff = Backoff::new(); | 1082 | 9.18k | while self.state.load(Ordering::Acquire) & WRITE == 0 { | 1083 | 0 | backoff.snooze(); | 1084 | 0 | } | 1085 | 9.18k | } |
<crossbeam_deque::deque::Slot<i32>>::wait_write Line | Count | Source | 1080 | 6 | fn wait_write(&self) { | 1081 | 6 | let backoff = Backoff::new(); | 1082 | 6 | while self.state.load(Ordering::Acquire) & WRITE == 0 { | 1083 | 0 | backoff.snooze(); | 1084 | 0 | } | 1085 | 6 | } |
<crossbeam_deque::deque::Slot<i32>>::wait_write Line | Count | Source | 1080 | 13 | fn wait_write(&self) { | 1081 | 13 | let backoff = Backoff::new(); | 1082 | 13 | while self.state.load(Ordering::Acquire) & WRITE == 0 { | 1083 | 0 | backoff.snooze(); | 1084 | 0 | } | 1085 | 13 | } |
|
1086 | | } |
1087 | | |
1088 | | /// A block in a linked list. |
1089 | | /// |
1090 | | /// Each block in the list can hold up to `BLOCK_CAP` values. |
1091 | | struct Block<T> { |
1092 | | /// The next block in the linked list. |
1093 | | next: AtomicPtr<Block<T>>, |
1094 | | |
1095 | | /// Slots for values. |
1096 | | slots: [Slot<T>; BLOCK_CAP], |
1097 | | } |
1098 | | |
1099 | | impl<T> Block<T> { |
1100 | | /// Creates an empty block that starts at `start_index`. |
1101 | 6.26k | fn new() -> Block<T> { |
1102 | 6.26k | // SAFETY: This is safe because: |
1103 | 6.26k | // [1] `Block::next` (AtomicPtr) may be safely zero initialized. |
1104 | 6.26k | // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. |
1105 | 6.26k | // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it |
1106 | 6.26k | // holds a MaybeUninit. |
1107 | 6.26k | // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. |
1108 | 6.26k | unsafe { MaybeUninit::zeroed().assume_init() } |
1109 | 6.26k | } <crossbeam_deque::deque::Block<usize>>::new Line | Count | Source | 1101 | 4.67k | fn new() -> Block<T> { | 1102 | 4.67k | // SAFETY: This is safe because: | 1103 | 4.67k | // [1] `Block::next` (AtomicPtr) may be safely zero initialized. | 1104 | 4.67k | // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. | 1105 | 4.67k | // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it | 1106 | 4.67k | // holds a MaybeUninit. | 1107 | 4.67k | // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. | 1108 | 4.67k | unsafe { MaybeUninit::zeroed().assume_init() } | 1109 | 4.67k | } |
<crossbeam_deque::deque::Block<i32>>::new Line | Count | Source | 1101 | 2 | fn new() -> Block<T> { | 1102 | 2 | // SAFETY: This is safe because: | 1103 | 2 | // [1] `Block::next` (AtomicPtr) may be safely zero initialized. | 1104 | 2 | // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. | 1105 | 2 | // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it | 1106 | 2 | // holds a MaybeUninit. | 1107 | 2 | // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. | 1108 | 2 | unsafe { MaybeUninit::zeroed().assume_init() } | 1109 | 2 | } |
<crossbeam_deque::deque::Block<alloc::boxed::Box<usize>>>::new Line | Count | Source | 1101 | 794 | fn new() -> Block<T> { | 1102 | 794 | // SAFETY: This is safe because: | 1103 | 794 | // [1] `Block::next` (AtomicPtr) may be safely zero initialized. | 1104 | 794 | // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. | 1105 | 794 | // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it | 1106 | 794 | // holds a MaybeUninit. | 1107 | 794 | // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. | 1108 | 794 | unsafe { MaybeUninit::zeroed().assume_init() } | 1109 | 794 | } |
<crossbeam_deque::deque::Block<injector::destructors::Elem>>::new Line | Count | Source | 1101 | 794 | fn new() -> Block<T> { | 1102 | 794 | // SAFETY: This is safe because: | 1103 | 794 | // [1] `Block::next` (AtomicPtr) may be safely zero initialized. | 1104 | 794 | // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. | 1105 | 794 | // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it | 1106 | 794 | // holds a MaybeUninit. | 1107 | 794 | // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. | 1108 | 794 | unsafe { MaybeUninit::zeroed().assume_init() } | 1109 | 794 | } |
<crossbeam_deque::deque::Block<i32>>::new Line | Count | Source | 1101 | 5 | fn new() -> Block<T> { | 1102 | 5 | // SAFETY: This is safe because: | 1103 | 5 | // [1] `Block::next` (AtomicPtr) may be safely zero initialized. | 1104 | 5 | // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. | 1105 | 5 | // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it | 1106 | 5 | // holds a MaybeUninit. | 1107 | 5 | // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. | 1108 | 5 | unsafe { MaybeUninit::zeroed().assume_init() } | 1109 | 5 | } |
|
1110 | | |
1111 | | /// Waits until the next pointer is set. |
1112 | 5.59k | fn wait_next(&self) -> *mut Block<T> { |
1113 | 5.59k | let backoff = Backoff::new(); |
1114 | | loop { |
1115 | 5.77k | let next = self.next.load(Ordering::Acquire); |
1116 | 5.77k | if !next.is_null() { |
1117 | 5.59k | return next; |
1118 | 176 | } |
1119 | 176 | backoff.snooze(); |
1120 | | } |
1121 | 5.59k | } <crossbeam_deque::deque::Block<usize>>::wait_next Line | Count | Source | 1112 | 4.65k | fn wait_next(&self) -> *mut Block<T> { | 1113 | 4.65k | let backoff = Backoff::new(); | 1114 | | loop { | 1115 | 4.83k | let next = self.next.load(Ordering::Acquire); | 1116 | 4.83k | if !next.is_null() { | 1117 | 4.65k | return next; | 1118 | 176 | } | 1119 | 176 | backoff.snooze(); | 1120 | | } | 1121 | 4.65k | } |
Unexecuted instantiation: <crossbeam_deque::deque::Block<i32>>::wait_next <crossbeam_deque::deque::Block<alloc::boxed::Box<usize>>>::wait_next Line | Count | Source | 1112 | 793 | fn wait_next(&self) -> *mut Block<T> { | 1113 | 793 | let backoff = Backoff::new(); | 1114 | | loop { | 1115 | 793 | let next = self.next.load(Ordering::Acquire); | 1116 | 793 | if !next.is_null() { | 1117 | 793 | return next; | 1118 | 0 | } | 1119 | 0 | backoff.snooze(); | 1120 | | } | 1121 | 793 | } |
<crossbeam_deque::deque::Block<injector::destructors::Elem>>::wait_next Line | Count | Source | 1112 | 146 | fn wait_next(&self) -> *mut Block<T> { | 1113 | 146 | let backoff = Backoff::new(); | 1114 | | loop { | 1115 | 146 | let next = self.next.load(Ordering::Acquire); | 1116 | 146 | if !next.is_null() { | 1117 | 146 | return next; | 1118 | 0 | } | 1119 | 0 | backoff.snooze(); | 1120 | | } | 1121 | 146 | } |
Unexecuted instantiation: <crossbeam_deque::deque::Block<i32>>::wait_next |
1122 | | |
1123 | | /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. |
1124 | 5.61k | unsafe fn destroy(this: *mut Block<T>, count: usize) { |
1125 | | // It is not necessary to set the `DESTROY` bit in the last slot because that slot has |
1126 | | // begun destruction of the block. |
1127 | 341k | for i in (0..count)5.61k .rev() { |
1128 | 341k | let slot = (*this).slots.get_unchecked(i); |
1129 | | |
1130 | | // Mark the `DESTROY` bit if a thread is still using the slot. |
1131 | 341k | if slot.state.load(Ordering::Acquire) & READ == 0 |
1132 | 20 | && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 |
1133 | | { |
1134 | | // If a thread is still using the slot, it will continue destruction of the block. |
1135 | 19 | return; |
1136 | 342k | } |
1137 | | } |
1138 | | |
1139 | | // No thread is using the block, now it is safe to destroy it. |
1140 | 5.60k | drop(Box::from_raw(this)); |
1141 | 5.61k | } Unexecuted instantiation: <crossbeam_deque::deque::Block<i32>>::destroy <crossbeam_deque::deque::Block<alloc::boxed::Box<usize>>>::destroy Line | Count | Source | 1124 | 793 | unsafe fn destroy(this: *mut Block<T>, count: usize) { | 1125 | | // It is not necessary to set the `DESTROY` bit in the last slot because that slot has | 1126 | | // begun destruction of the block. | 1127 | 49.1k | for i in (0..count)793 .rev() { | 1128 | 49.1k | let slot = (*this).slots.get_unchecked(i); | 1129 | | | 1130 | | // Mark the `DESTROY` bit if a thread is still using the slot. | 1131 | 49.1k | if slot.state.load(Ordering::Acquire) & READ == 0 | 1132 | 0 | && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 | 1133 | | { | 1134 | | // If a thread is still using the slot, it will continue destruction of the block. | 1135 | 0 | return; | 1136 | 49.1k | } | 1137 | | } | 1138 | | | 1139 | | // No thread is using the block, now it is safe to destroy it. | 1140 | 793 | drop(Box::from_raw(this)); | 1141 | 793 | } |
<crossbeam_deque::deque::Block<injector::destructors::Elem>>::destroy Line | Count | Source | 1124 | 150 | unsafe fn destroy(this: *mut Block<T>, count: usize) { | 1125 | | // It is not necessary to set the `DESTROY` bit in the last slot because that slot has | 1126 | | // begun destruction of the block. | 1127 | 5.22k | for i in (0..count)150 .rev() { | 1128 | 5.22k | let slot = (*this).slots.get_unchecked(i); | 1129 | | | 1130 | | // Mark the `DESTROY` bit if a thread is still using the slot. | 1131 | 5.22k | if slot.state.load(Ordering::Acquire) & READ == 0 | 1132 | 5 | && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 | 1133 | | { | 1134 | | // If a thread is still using the slot, it will continue destruction of the block. | 1135 | 4 | return; | 1136 | 5.22k | } | 1137 | | } | 1138 | | | 1139 | | // No thread is using the block, now it is safe to destroy it. | 1140 | 146 | drop(Box::from_raw(this)); | 1141 | 150 | } |
<crossbeam_deque::deque::Block<usize>>::destroy Line | Count | Source | 1124 | 4.67k | unsafe fn destroy(this: *mut Block<T>, count: usize) { | 1125 | | // It is not necessary to set the `DESTROY` bit in the last slot because that slot has | 1126 | | // begun destruction of the block. | 1127 | 287k | for i in (0..count)4.67k .rev() { | 1128 | 287k | let slot = (*this).slots.get_unchecked(i); | 1129 | | | 1130 | | // Mark the `DESTROY` bit if a thread is still using the slot. | 1131 | 287k | if slot.state.load(Ordering::Acquire) & READ == 0 | 1132 | 15 | && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 | 1133 | | { | 1134 | | // If a thread is still using the slot, it will continue destruction of the block. | 1135 | 15 | return; | 1136 | 287k | } | 1137 | | } | 1138 | | | 1139 | | // No thread is using the block, now it is safe to destroy it. | 1140 | 4.66k | drop(Box::from_raw(this)); | 1141 | 4.67k | } |
Unexecuted instantiation: <crossbeam_deque::deque::Block<i32>>::destroy |
1142 | | } |
1143 | | |
1144 | | /// A position in a queue. |
1145 | | struct Position<T> { |
1146 | | /// The index in the queue. |
1147 | | index: AtomicUsize, |
1148 | | |
1149 | | /// The block in the linked list. |
1150 | | block: AtomicPtr<Block<T>>, |
1151 | | } |
1152 | | |
1153 | | /// An injector queue. |
1154 | | /// |
1155 | | /// This is a FIFO queue that can be shared among multiple threads. Task schedulers typically have |
1156 | | /// a single injector queue, which is the entry point for new tasks. |
1157 | | /// |
1158 | | /// # Examples |
1159 | | /// |
1160 | | /// ``` |
1161 | | /// use crossbeam_deque::{Injector, Steal}; |
1162 | | /// |
1163 | | /// let q = Injector::new(); |
1164 | | /// q.push(1); |
1165 | | /// q.push(2); |
1166 | | /// |
1167 | | /// assert_eq!(q.steal(), Steal::Success(1)); |
1168 | | /// assert_eq!(q.steal(), Steal::Success(2)); |
1169 | | /// assert_eq!(q.steal(), Steal::Empty); |
1170 | | /// ``` |
1171 | | pub struct Injector<T> { |
1172 | | /// The head of the queue. |
1173 | | head: CachePadded<Position<T>>, |
1174 | | |
1175 | | /// The tail of the queue. |
1176 | | tail: CachePadded<Position<T>>, |
1177 | | |
1178 | | /// Indicates that dropping a `Injector<T>` may drop values of type `T`. |
1179 | | _marker: PhantomData<T>, |
1180 | | } |
1181 | | |
1182 | | unsafe impl<T: Send> Send for Injector<T> {} |
1183 | | unsafe impl<T: Send> Sync for Injector<T> {} |
1184 | | |
1185 | | impl<T> Default for Injector<T> { |
1186 | 13 | fn default() -> Self { |
1187 | 13 | let block = Box::into_raw(Box::new(Block::<T>::new())); |
1188 | 13 | Self { |
1189 | 13 | head: CachePadded::new(Position { |
1190 | 13 | block: AtomicPtr::new(block), |
1191 | 13 | index: AtomicUsize::new(0), |
1192 | 13 | }), |
1193 | 13 | tail: CachePadded::new(Position { |
1194 | 13 | block: AtomicPtr::new(block), |
1195 | 13 | index: AtomicUsize::new(0), |
1196 | 13 | }), |
1197 | 13 | _marker: PhantomData, |
1198 | 13 | } |
1199 | 13 | } <crossbeam_deque::deque::Injector<alloc::boxed::Box<usize>> as core::default::Default>::default Line | Count | Source | 1186 | 1 | fn default() -> Self { | 1187 | 1 | let block = Box::into_raw(Box::new(Block::<T>::new())); | 1188 | 1 | Self { | 1189 | 1 | head: CachePadded::new(Position { | 1190 | 1 | block: AtomicPtr::new(block), | 1191 | 1 | index: AtomicUsize::new(0), | 1192 | 1 | }), | 1193 | 1 | tail: CachePadded::new(Position { | 1194 | 1 | block: AtomicPtr::new(block), | 1195 | 1 | index: AtomicUsize::new(0), | 1196 | 1 | }), | 1197 | 1 | _marker: PhantomData, | 1198 | 1 | } | 1199 | 1 | } |
<crossbeam_deque::deque::Injector<usize> as core::default::Default>::default Line | Count | Source | 1186 | 4 | fn default() -> Self { | 1187 | 4 | let block = Box::into_raw(Box::new(Block::<T>::new())); | 1188 | 4 | Self { | 1189 | 4 | head: CachePadded::new(Position { | 1190 | 4 | block: AtomicPtr::new(block), | 1191 | 4 | index: AtomicUsize::new(0), | 1192 | 4 | }), | 1193 | 4 | tail: CachePadded::new(Position { | 1194 | 4 | block: AtomicPtr::new(block), | 1195 | 4 | index: AtomicUsize::new(0), | 1196 | 4 | }), | 1197 | 4 | _marker: PhantomData, | 1198 | 4 | } | 1199 | 4 | } |
<crossbeam_deque::deque::Injector<injector::destructors::Elem> as core::default::Default>::default Line | Count | Source | 1186 | 1 | fn default() -> Self { | 1187 | 1 | let block = Box::into_raw(Box::new(Block::<T>::new())); | 1188 | 1 | Self { | 1189 | 1 | head: CachePadded::new(Position { | 1190 | 1 | block: AtomicPtr::new(block), | 1191 | 1 | index: AtomicUsize::new(0), | 1192 | 1 | }), | 1193 | 1 | tail: CachePadded::new(Position { | 1194 | 1 | block: AtomicPtr::new(block), | 1195 | 1 | index: AtomicUsize::new(0), | 1196 | 1 | }), | 1197 | 1 | _marker: PhantomData, | 1198 | 1 | } | 1199 | 1 | } |
<crossbeam_deque::deque::Injector<i32> as core::default::Default>::default Line | Count | Source | 1186 | 2 | fn default() -> Self { | 1187 | 2 | let block = Box::into_raw(Box::new(Block::<T>::new())); | 1188 | 2 | Self { | 1189 | 2 | head: CachePadded::new(Position { | 1190 | 2 | block: AtomicPtr::new(block), | 1191 | 2 | index: AtomicUsize::new(0), | 1192 | 2 | }), | 1193 | 2 | tail: CachePadded::new(Position { | 1194 | 2 | block: AtomicPtr::new(block), | 1195 | 2 | index: AtomicUsize::new(0), | 1196 | 2 | }), | 1197 | 2 | _marker: PhantomData, | 1198 | 2 | } | 1199 | 2 | } |
<crossbeam_deque::deque::Injector<i32> as core::default::Default>::default Line | Count | Source | 1186 | 5 | fn default() -> Self { | 1187 | 5 | let block = Box::into_raw(Box::new(Block::<T>::new())); | 1188 | 5 | Self { | 1189 | 5 | head: CachePadded::new(Position { | 1190 | 5 | block: AtomicPtr::new(block), | 1191 | 5 | index: AtomicUsize::new(0), | 1192 | 5 | }), | 1193 | 5 | tail: CachePadded::new(Position { | 1194 | 5 | block: AtomicPtr::new(block), | 1195 | 5 | index: AtomicUsize::new(0), | 1196 | 5 | }), | 1197 | 5 | _marker: PhantomData, | 1198 | 5 | } | 1199 | 5 | } |
|
1200 | | } |
1201 | | |
1202 | | impl<T> Injector<T> { |
1203 | | /// Creates a new injector queue. |
1204 | | /// |
1205 | | /// # Examples |
1206 | | /// |
1207 | | /// ``` |
1208 | | /// use crossbeam_deque::Injector; |
1209 | | /// |
1210 | | /// let q = Injector::<i32>::new(); |
1211 | | /// ``` |
1212 | 13 | pub fn new() -> Injector<T> { |
1213 | 13 | Self::default() |
1214 | 13 | } <crossbeam_deque::deque::Injector<i32>>::new Line | Count | Source | 1212 | 2 | pub fn new() -> Injector<T> { | 1213 | 2 | Self::default() | 1214 | 2 | } |
<crossbeam_deque::deque::Injector<injector::destructors::Elem>>::new Line | Count | Source | 1212 | 1 | pub fn new() -> Injector<T> { | 1213 | 1 | Self::default() | 1214 | 1 | } |
<crossbeam_deque::deque::Injector<alloc::boxed::Box<usize>>>::new Line | Count | Source | 1212 | 1 | pub fn new() -> Injector<T> { | 1213 | 1 | Self::default() | 1214 | 1 | } |
<crossbeam_deque::deque::Injector<usize>>::new Line | Count | Source | 1212 | 4 | pub fn new() -> Injector<T> { | 1213 | 4 | Self::default() | 1214 | 4 | } |
<crossbeam_deque::deque::Injector<i32>>::new Line | Count | Source | 1212 | 5 | pub fn new() -> Injector<T> { | 1213 | 5 | Self::default() | 1214 | 5 | } |
|
1215 | | |
1216 | | /// Pushes a task into the queue. |
1217 | | /// |
1218 | | /// # Examples |
1219 | | /// |
1220 | | /// ``` |
1221 | | /// use crossbeam_deque::Injector; |
1222 | | /// |
1223 | | /// let w = Injector::new(); |
1224 | | /// w.push(1); |
1225 | | /// w.push(2); |
1226 | | /// ``` |
1227 | 373k | pub fn push(&self, task: T) { |
1228 | 373k | let backoff = Backoff::new(); |
1229 | 373k | let mut tail = self.tail.index.load(Ordering::Acquire); |
1230 | 373k | let mut block = self.tail.block.load(Ordering::Acquire); |
1231 | 373k | let mut next_block = None; |
1232 | | |
1233 | 374k | loop { |
1234 | 374k | // Calculate the offset of the index into the block. |
1235 | 374k | let offset = (tail >> SHIFT) % LAP; |
1236 | 374k | |
1237 | 374k | // If we reached the end of the block, wait until the next one is installed. |
1238 | 374k | if offset == BLOCK_CAP { |
1239 | 16 | backoff.snooze(); |
1240 | 16 | tail = self.tail.index.load(Ordering::Acquire); |
1241 | 16 | block = self.tail.block.load(Ordering::Acquire); |
1242 | | continue; |
1243 | 374k | } |
1244 | | |
1245 | | // If we're going to have to install the next block, allocate it in advance in order to |
1246 | | // make the wait for other threads as short as possible. |
1247 | 374k | if offset + 1 == BLOCK_CAP && next_block.is_none()6.28k { |
1248 | 6.25k | next_block = Some(Box::new(Block::<T>::new())); |
1249 | 338k | } |
1250 | | |
1251 | 344k | let new_tail = tail + (1 << SHIFT); |
1252 | 344k | |
1253 | 344k | // Try advancing the tail forward. |
1254 | 344k | match self.tail.index.compare_exchange_weak( |
1255 | 344k | tail, |
1256 | 344k | new_tail, |
1257 | 344k | Ordering::SeqCst, |
1258 | 344k | Ordering::Acquire, |
1259 | 344k | ) { |
1260 | 344k | Ok(_) => unsafe { |
1261 | | // If we've reached the end of the block, install the next one. |
1262 | 343k | if offset + 1 == BLOCK_CAP { |
1263 | 6.24k | let next_block = Box::into_raw(next_block.unwrap()); |
1264 | 6.24k | let next_index = new_tail.wrapping_add(1 << SHIFT); |
1265 | 6.24k | |
1266 | 6.24k | self.tail.block.store(next_block, Ordering::Release); |
1267 | 6.24k | self.tail.index.store(next_index, Ordering::Release); |
1268 | 6.24k | (*block).next.store(next_block, Ordering::Release); |
1269 | 336k | } |
1270 | | |
1271 | | // Write the task into the slot. |
1272 | 343k | let slot = (*block).slots.get_unchecked(offset); |
1273 | 343k | slot.task.get().write(MaybeUninit::new(task)); |
1274 | 343k | slot.state.fetch_or(WRITE, Ordering::Release); |
1275 | 343k | |
1276 | 343k | return; |
1277 | | }, |
1278 | 1.20k | Err(t) => { |
1279 | 1.20k | tail = t; |
1280 | 1.20k | block = self.tail.block.load(Ordering::Acquire); |
1281 | 1.20k | backoff.spin(); |
1282 | 1.20k | } |
1283 | | } |
1284 | | } |
1285 | 343k | } <crossbeam_deque::deque::Injector<alloc::boxed::Box<usize>>>::push Line | Count | Source | 1227 | 50.0k | pub fn push(&self, task: T) { | 1228 | 50.0k | let backoff = Backoff::new(); | 1229 | 50.0k | let mut tail = self.tail.index.load(Ordering::Acquire); | 1230 | 50.0k | let mut block = self.tail.block.load(Ordering::Acquire); | 1231 | 50.0k | let mut next_block = None; | 1232 | | | 1233 | 50.0k | loop { | 1234 | 50.0k | // Calculate the offset of the index into the block. | 1235 | 50.0k | let offset = (tail >> SHIFT) % LAP; | 1236 | 50.0k | | 1237 | 50.0k | // If we reached the end of the block, wait until the next one is installed. | 1238 | 50.0k | if offset == BLOCK_CAP { | 1239 | 0 | backoff.snooze(); | 1240 | 0 | tail = self.tail.index.load(Ordering::Acquire); | 1241 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1242 | | continue; | 1243 | 50.0k | } | 1244 | | | 1245 | | // If we're going to have to install the next block, allocate it in advance in order to | 1246 | | // make the wait for other threads as short as possible. | 1247 | 50.0k | if offset + 1 == BLOCK_CAP && next_block.is_none()793 { | 1248 | 793 | next_block = Some(Box::new(Block::<T>::new())); | 1249 | 49.2k | } | 1250 | | | 1251 | 50.0k | let new_tail = tail + (1 << SHIFT); | 1252 | 50.0k | | 1253 | 50.0k | // Try advancing the tail forward. | 1254 | 50.0k | match self.tail.index.compare_exchange_weak( | 1255 | 50.0k | tail, | 1256 | 50.0k | new_tail, | 1257 | 50.0k | Ordering::SeqCst, | 1258 | 50.0k | Ordering::Acquire, | 1259 | 50.0k | ) { | 1260 | 50.0k | Ok(_) => unsafe { | 1261 | | // If we've reached the end of the block, install the next one. | 1262 | 50.0k | if offset + 1 == BLOCK_CAP { | 1263 | 793 | let next_block = Box::into_raw(next_block.unwrap()); | 1264 | 793 | let next_index = new_tail.wrapping_add(1 << SHIFT); | 1265 | 793 | | 1266 | 793 | self.tail.block.store(next_block, Ordering::Release); | 1267 | 793 | self.tail.index.store(next_index, Ordering::Release); | 1268 | 793 | (*block).next.store(next_block, Ordering::Release); | 1269 | 49.2k | } | 1270 | | | 1271 | | // Write the task into the slot. | 1272 | 50.0k | let slot = (*block).slots.get_unchecked(offset); | 1273 | 50.0k | slot.task.get().write(MaybeUninit::new(task)); | 1274 | 50.0k | slot.state.fetch_or(WRITE, Ordering::Release); | 1275 | 50.0k | | 1276 | 50.0k | return; | 1277 | | }, | 1278 | 0 | Err(t) => { | 1279 | 0 | tail = t; | 1280 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1281 | 0 | backoff.spin(); | 1282 | 0 | } | 1283 | | } | 1284 | | } | 1285 | 50.0k | } |
<crossbeam_deque::deque::Injector<usize>>::push Line | Count | Source | 1227 | 273k | pub fn push(&self, task: T) { | 1228 | 273k | let backoff = Backoff::new(); | 1229 | 273k | let mut tail = self.tail.index.load(Ordering::Acquire); | 1230 | 273k | let mut block = self.tail.block.load(Ordering::Acquire); | 1231 | 273k | let mut next_block = None; | 1232 | | | 1233 | 274k | loop { | 1234 | 274k | // Calculate the offset of the index into the block. | 1235 | 274k | let offset = (tail >> SHIFT) % LAP; | 1236 | 274k | | 1237 | 274k | // If we reached the end of the block, wait until the next one is installed. | 1238 | 274k | if offset == BLOCK_CAP { | 1239 | 16 | backoff.snooze(); | 1240 | 16 | tail = self.tail.index.load(Ordering::Acquire); | 1241 | 16 | block = self.tail.block.load(Ordering::Acquire); | 1242 | | continue; | 1243 | 274k | } | 1244 | | | 1245 | | // If we're going to have to install the next block, allocate it in advance in order to | 1246 | | // make the wait for other threads as short as possible. | 1247 | 274k | if offset + 1 == BLOCK_CAP && next_block.is_none()4.69k { | 1248 | 4.66k | next_block = Some(Box::new(Block::<T>::new())); | 1249 | 239k | } | 1250 | | | 1251 | 244k | let new_tail = tail + (1 << SHIFT); | 1252 | 244k | | 1253 | 244k | // Try advancing the tail forward. | 1254 | 244k | match self.tail.index.compare_exchange_weak( | 1255 | 244k | tail, | 1256 | 244k | new_tail, | 1257 | 244k | Ordering::SeqCst, | 1258 | 244k | Ordering::Acquire, | 1259 | 244k | ) { | 1260 | 244k | Ok(_) => unsafe { | 1261 | | // If we've reached the end of the block, install the next one. | 1262 | 243k | if offset + 1 == BLOCK_CAP { | 1263 | 4.66k | let next_block = Box::into_raw(next_block.unwrap()); | 1264 | 4.66k | let next_index = new_tail.wrapping_add(1 << SHIFT); | 1265 | 4.66k | | 1266 | 4.66k | self.tail.block.store(next_block, Ordering::Release); | 1267 | 4.66k | self.tail.index.store(next_index, Ordering::Release); | 1268 | 4.66k | (*block).next.store(next_block, Ordering::Release); | 1269 | 238k | } | 1270 | | | 1271 | | // Write the task into the slot. | 1272 | 243k | let slot = (*block).slots.get_unchecked(offset); | 1273 | 243k | slot.task.get().write(MaybeUninit::new(task)); | 1274 | 243k | slot.state.fetch_or(WRITE, Ordering::Release); | 1275 | 243k | | 1276 | 243k | return; | 1277 | | }, | 1278 | 1.20k | Err(t) => { | 1279 | 1.20k | tail = t; | 1280 | 1.20k | block = self.tail.block.load(Ordering::Acquire); | 1281 | 1.20k | backoff.spin(); | 1282 | 1.20k | } | 1283 | | } | 1284 | | } | 1285 | 243k | } |
<crossbeam_deque::deque::Injector<i32>>::push Line | Count | Source | 1227 | 6 | pub fn push(&self, task: T) { | 1228 | 6 | let backoff = Backoff::new(); | 1229 | 6 | let mut tail = self.tail.index.load(Ordering::Acquire); | 1230 | 6 | let mut block = self.tail.block.load(Ordering::Acquire); | 1231 | 6 | let mut next_block = None; | 1232 | | | 1233 | 6 | loop { | 1234 | 6 | // Calculate the offset of the index into the block. | 1235 | 6 | let offset = (tail >> SHIFT) % LAP; | 1236 | 6 | | 1237 | 6 | // If we reached the end of the block, wait until the next one is installed. | 1238 | 6 | if offset == BLOCK_CAP { | 1239 | 0 | backoff.snooze(); | 1240 | 0 | tail = self.tail.index.load(Ordering::Acquire); | 1241 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1242 | | continue; | 1243 | 6 | } | 1244 | | | 1245 | | // If we're going to have to install the next block, allocate it in advance in order to | 1246 | | // make the wait for other threads as short as possible. | 1247 | 6 | if offset + 1 == BLOCK_CAP && next_block.is_none()0 { | 1248 | 0 | next_block = Some(Box::new(Block::<T>::new())); | 1249 | 6 | } | 1250 | | | 1251 | 6 | let new_tail = tail + (1 << SHIFT); | 1252 | 6 | | 1253 | 6 | // Try advancing the tail forward. | 1254 | 6 | match self.tail.index.compare_exchange_weak( | 1255 | 6 | tail, | 1256 | 6 | new_tail, | 1257 | 6 | Ordering::SeqCst, | 1258 | 6 | Ordering::Acquire, | 1259 | 6 | ) { | 1260 | 6 | Ok(_) => unsafe { | 1261 | | // If we've reached the end of the block, install the next one. | 1262 | 6 | if offset + 1 == BLOCK_CAP { | 1263 | 0 | let next_block = Box::into_raw(next_block.unwrap()); | 1264 | 0 | let next_index = new_tail.wrapping_add(1 << SHIFT); | 1265 | 0 |
| 1266 | 0 | self.tail.block.store(next_block, Ordering::Release); | 1267 | 0 | self.tail.index.store(next_index, Ordering::Release); | 1268 | 0 | (*block).next.store(next_block, Ordering::Release); | 1269 | 6 | } | 1270 | | | 1271 | | // Write the task into the slot. | 1272 | 6 | let slot = (*block).slots.get_unchecked(offset); | 1273 | 6 | slot.task.get().write(MaybeUninit::new(task)); | 1274 | 6 | slot.state.fetch_or(WRITE, Ordering::Release); | 1275 | 6 | | 1276 | 6 | return; | 1277 | | }, | 1278 | 0 | Err(t) => { | 1279 | 0 | tail = t; | 1280 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1281 | 0 | backoff.spin(); | 1282 | 0 | } | 1283 | | } | 1284 | | } | 1285 | 6 | } |
<crossbeam_deque::deque::Injector<injector::destructors::Elem>>::push Line | Count | Source | 1227 | 50.0k | pub fn push(&self, task: T) { | 1228 | 50.0k | let backoff = Backoff::new(); | 1229 | 50.0k | let mut tail = self.tail.index.load(Ordering::Acquire); | 1230 | 50.0k | let mut block = self.tail.block.load(Ordering::Acquire); | 1231 | 50.0k | let mut next_block = None; | 1232 | | | 1233 | 50.0k | loop { | 1234 | 50.0k | // Calculate the offset of the index into the block. | 1235 | 50.0k | let offset = (tail >> SHIFT) % LAP; | 1236 | 50.0k | | 1237 | 50.0k | // If we reached the end of the block, wait until the next one is installed. | 1238 | 50.0k | if offset == BLOCK_CAP { | 1239 | 0 | backoff.snooze(); | 1240 | 0 | tail = self.tail.index.load(Ordering::Acquire); | 1241 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1242 | | continue; | 1243 | 50.0k | } | 1244 | | | 1245 | | // If we're going to have to install the next block, allocate it in advance in order to | 1246 | | // make the wait for other threads as short as possible. | 1247 | 50.0k | if offset + 1 == BLOCK_CAP && next_block.is_none()793 { | 1248 | 793 | next_block = Some(Box::new(Block::<T>::new())); | 1249 | 49.2k | } | 1250 | | | 1251 | 50.0k | let new_tail = tail + (1 << SHIFT); | 1252 | 50.0k | | 1253 | 50.0k | // Try advancing the tail forward. | 1254 | 50.0k | match self.tail.index.compare_exchange_weak( | 1255 | 50.0k | tail, | 1256 | 50.0k | new_tail, | 1257 | 50.0k | Ordering::SeqCst, | 1258 | 50.0k | Ordering::Acquire, | 1259 | 50.0k | ) { | 1260 | 50.0k | Ok(_) => unsafe { | 1261 | | // If we've reached the end of the block, install the next one. | 1262 | 50.0k | if offset + 1 == BLOCK_CAP { | 1263 | 793 | let next_block = Box::into_raw(next_block.unwrap()); | 1264 | 793 | let next_index = new_tail.wrapping_add(1 << SHIFT); | 1265 | 793 | | 1266 | 793 | self.tail.block.store(next_block, Ordering::Release); | 1267 | 793 | self.tail.index.store(next_index, Ordering::Release); | 1268 | 793 | (*block).next.store(next_block, Ordering::Release); | 1269 | 49.2k | } | 1270 | | | 1271 | | // Write the task into the slot. | 1272 | 50.0k | let slot = (*block).slots.get_unchecked(offset); | 1273 | 50.0k | slot.task.get().write(MaybeUninit::new(task)); | 1274 | 50.0k | slot.state.fetch_or(WRITE, Ordering::Release); | 1275 | 50.0k | | 1276 | 50.0k | return; | 1277 | | }, | 1278 | 0 | Err(t) => { | 1279 | 0 | tail = t; | 1280 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1281 | 0 | backoff.spin(); | 1282 | 0 | } | 1283 | | } | 1284 | | } | 1285 | 50.0k | } |
<crossbeam_deque::deque::Injector<i32>>::push Line | Count | Source | 1227 | 21 | pub fn push(&self, task: T) { | 1228 | 21 | let backoff = Backoff::new(); | 1229 | 21 | let mut tail = self.tail.index.load(Ordering::Acquire); | 1230 | 21 | let mut block = self.tail.block.load(Ordering::Acquire); | 1231 | 21 | let mut next_block = None; | 1232 | | | 1233 | 21 | loop { | 1234 | 21 | // Calculate the offset of the index into the block. | 1235 | 21 | let offset = (tail >> SHIFT) % LAP; | 1236 | 21 | | 1237 | 21 | // If we reached the end of the block, wait until the next one is installed. | 1238 | 21 | if offset == BLOCK_CAP { | 1239 | 0 | backoff.snooze(); | 1240 | 0 | tail = self.tail.index.load(Ordering::Acquire); | 1241 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1242 | | continue; | 1243 | 21 | } | 1244 | | | 1245 | | // If we're going to have to install the next block, allocate it in advance in order to | 1246 | | // make the wait for other threads as short as possible. | 1247 | 21 | if offset + 1 == BLOCK_CAP && next_block.is_none()0 { | 1248 | 0 | next_block = Some(Box::new(Block::<T>::new())); | 1249 | 21 | } | 1250 | | | 1251 | 21 | let new_tail = tail + (1 << SHIFT); | 1252 | 21 | | 1253 | 21 | // Try advancing the tail forward. | 1254 | 21 | match self.tail.index.compare_exchange_weak( | 1255 | 21 | tail, | 1256 | 21 | new_tail, | 1257 | 21 | Ordering::SeqCst, | 1258 | 21 | Ordering::Acquire, | 1259 | 21 | ) { | 1260 | 21 | Ok(_) => unsafe { | 1261 | | // If we've reached the end of the block, install the next one. | 1262 | 21 | if offset + 1 == BLOCK_CAP { | 1263 | 0 | let next_block = Box::into_raw(next_block.unwrap()); | 1264 | 0 | let next_index = new_tail.wrapping_add(1 << SHIFT); | 1265 | 0 |
| 1266 | 0 | self.tail.block.store(next_block, Ordering::Release); | 1267 | 0 | self.tail.index.store(next_index, Ordering::Release); | 1268 | 0 | (*block).next.store(next_block, Ordering::Release); | 1269 | 21 | } | 1270 | | | 1271 | | // Write the task into the slot. | 1272 | 21 | let slot = (*block).slots.get_unchecked(offset); | 1273 | 21 | slot.task.get().write(MaybeUninit::new(task)); | 1274 | 21 | slot.state.fetch_or(WRITE, Ordering::Release); | 1275 | 21 | | 1276 | 21 | return; | 1277 | | }, | 1278 | 0 | Err(t) => { | 1279 | 0 | tail = t; | 1280 | 0 | block = self.tail.block.load(Ordering::Acquire); | 1281 | 0 | backoff.spin(); | 1282 | 0 | } | 1283 | | } | 1284 | | } | 1285 | 21 | } |
|
1286 | | |
1287 | | /// Steals a task from the queue. |
1288 | | /// |
1289 | | /// # Examples |
1290 | | /// |
1291 | | /// ``` |
1292 | | /// use crossbeam_deque::{Injector, Steal}; |
1293 | | /// |
1294 | | /// let q = Injector::new(); |
1295 | | /// q.push(1); |
1296 | | /// q.push(2); |
1297 | | /// |
1298 | | /// assert_eq!(q.steal(), Steal::Success(1)); |
1299 | | /// assert_eq!(q.steal(), Steal::Success(2)); |
1300 | | /// assert_eq!(q.steal(), Steal::Empty); |
1301 | | /// ``` |
1302 | 823k | pub fn steal(&self) -> Steal<T> { |
1303 | 823k | let mut head; |
1304 | 823k | let mut block; |
1305 | 823k | let mut offset; |
1306 | 823k | |
1307 | 823k | let backoff = Backoff::new(); |
1308 | 827k | loop { |
1309 | 827k | head = self.head.index.load(Ordering::Acquire); |
1310 | 827k | block = self.head.block.load(Ordering::Acquire); |
1311 | 827k | |
1312 | 827k | // Calculate the offset of the index into the block. |
1313 | 827k | offset = (head >> SHIFT) % LAP; |
1314 | 827k | |
1315 | 827k | // If we reached the end of the block, wait until the next one is installed. |
1316 | 827k | if offset == BLOCK_CAP { |
1317 | 3.83k | backoff.snooze(); |
1318 | 3.83k | } else { |
1319 | 823k | break; |
1320 | 823k | } |
1321 | 823k | } |
1322 | 823k | |
1323 | 823k | let mut new_head = head + (1 << SHIFT); |
1324 | 823k | |
1325 | 823k | if new_head & HAS_NEXT == 0 { |
1326 | 441k | atomic::fence(Ordering::SeqCst); |
1327 | 441k | let tail = self.tail.index.load(Ordering::Relaxed); |
1328 | 441k | |
1329 | 441k | // If the tail equals the head, that means the queue is empty. |
1330 | 441k | if head >> SHIFT == tail >> SHIFT { |
1331 | 379k | return Steal::Empty; |
1332 | 61.5k | } |
1333 | 61.5k | |
1334 | 61.5k | // If head and tail are not in the same block, set `HAS_NEXT` in head. |
1335 | 61.5k | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { |
1336 | 1.96k | new_head |= HAS_NEXT; |
1337 | 116k | } |
1338 | 382k | } |
1339 | | |
1340 | | // Try moving the head index forward. |
1341 | 500k | if self |
1342 | 500k | .head |
1343 | 500k | .index |
1344 | 500k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) |
1345 | 500k | .is_err() |
1346 | | { |
1347 | 189k | return Steal::Retry; |
1348 | 311k | } |
1349 | 311k | |
1350 | 311k | unsafe { |
1351 | 311k | // If we've reached the end of the block, move to the next one. |
1352 | 311k | if offset + 1 == BLOCK_CAP { |
1353 | 4.49k | let next = (*block).wait_next(); |
1354 | 4.49k | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); |
1355 | 4.49k | if !(*next).next.load(Ordering::Relaxed).is_null() { |
1356 | 3.66k | next_index |= HAS_NEXT; |
1357 | 3.66k | }830 |
1358 | | |
1359 | 4.49k | self.head.block.store(next, Ordering::Release); |
1360 | 4.49k | self.head.index.store(next_index, Ordering::Release); |
1361 | 306k | } |
1362 | | |
1363 | | // Read the task. |
1364 | 311k | let slot = (*block).slots.get_unchecked(offset); |
1365 | 311k | slot.wait_write(); |
1366 | 311k | let task = slot.task.get().read().assume_init(); |
1367 | | |
1368 | | // Destroy the block if we've reached the end, or if another thread wanted to destroy |
1369 | | // but couldn't because we were busy reading from the slot. |
1370 | 311k | if (offset + 1 == BLOCK_CAP) || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0)306k { |
1371 | 4.49k | Block::destroy(block, offset); |
1372 | 275k | } |
1373 | | |
1374 | 280k | Steal::Success(task) |
1375 | | } |
1376 | 849k | } <crossbeam_deque::deque::Injector<usize>>::steal Line | Count | Source | 1302 | 767k | pub fn steal(&self) -> Steal<T> { | 1303 | 767k | let mut head; | 1304 | 767k | let mut block; | 1305 | 767k | let mut offset; | 1306 | 767k | | 1307 | 767k | let backoff = Backoff::new(); | 1308 | 771k | loop { | 1309 | 771k | head = self.head.index.load(Ordering::Acquire); | 1310 | 771k | block = self.head.block.load(Ordering::Acquire); | 1311 | 771k | | 1312 | 771k | // Calculate the offset of the index into the block. | 1313 | 771k | offset = (head >> SHIFT) % LAP; | 1314 | 771k | | 1315 | 771k | // If we reached the end of the block, wait until the next one is installed. | 1316 | 771k | if offset == BLOCK_CAP { | 1317 | 3.46k | backoff.snooze(); | 1318 | 3.46k | } else { | 1319 | 767k | break; | 1320 | 767k | } | 1321 | 767k | } | 1322 | 767k | | 1323 | 767k | let mut new_head = head + (1 << SHIFT); | 1324 | 767k | | 1325 | 767k | if new_head & HAS_NEXT == 0 { | 1326 | 441k | atomic::fence(Ordering::SeqCst); | 1327 | 441k | let tail = self.tail.index.load(Ordering::Relaxed); | 1328 | 441k | | 1329 | 441k | // If the tail equals the head, that means the queue is empty. | 1330 | 441k | if head >> SHIFT == tail >> SHIFT { | 1331 | 379k | return Steal::Empty; | 1332 | 61.5k | } | 1333 | 61.5k | | 1334 | 61.5k | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1335 | 61.5k | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1336 | 1.95k | new_head |= HAS_NEXT; | 1337 | 116k | } | 1338 | 326k | } | 1339 | | | 1340 | | // Try moving the head index forward. | 1341 | 444k | if self | 1342 | 444k | .head | 1343 | 444k | .index | 1344 | 444k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1345 | 444k | .is_err() | 1346 | | { | 1347 | 183k | return Steal::Retry; | 1348 | 261k | } | 1349 | 261k | | 1350 | 261k | unsafe { | 1351 | 261k | // If we've reached the end of the block, move to the next one. | 1352 | 261k | if offset + 1 == BLOCK_CAP { | 1353 | 3.68k | let next = (*block).wait_next(); | 1354 | 3.68k | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1355 | 3.68k | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1356 | 2.85k | next_index |= HAS_NEXT; | 1357 | 2.85k | }829 | 1358 | | | 1359 | 3.68k | self.head.block.store(next, Ordering::Release); | 1360 | 3.68k | self.head.index.store(next_index, Ordering::Release); | 1361 | 257k | } | 1362 | | | 1363 | | // Read the task. | 1364 | 261k | let slot = (*block).slots.get_unchecked(offset); | 1365 | 261k | slot.wait_write(); | 1366 | 261k | let task = slot.task.get().read().assume_init(); | 1367 | | | 1368 | | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1369 | | // but couldn't because we were busy reading from the slot. | 1370 | 261k | if (offset + 1 == BLOCK_CAP) || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0)257k { | 1371 | 3.68k | Block::destroy(block, offset); | 1372 | 225k | } | 1373 | | | 1374 | 229k | Steal::Success(task) | 1375 | | } | 1376 | 792k | } |
<crossbeam_deque::deque::Injector<alloc::boxed::Box<usize>>>::steal Line | Count | Source | 1302 | 54.5k | pub fn steal(&self) -> Steal<T> { | 1303 | 54.5k | let mut head; | 1304 | 54.5k | let mut block; | 1305 | 54.5k | let mut offset; | 1306 | 54.5k | | 1307 | 54.5k | let backoff = Backoff::new(); | 1308 | 54.8k | loop { | 1309 | 54.8k | head = self.head.index.load(Ordering::Acquire); | 1310 | 54.8k | block = self.head.block.load(Ordering::Acquire); | 1311 | 54.8k | | 1312 | 54.8k | // Calculate the offset of the index into the block. | 1313 | 54.8k | offset = (head >> SHIFT) % LAP; | 1314 | 54.8k | | 1315 | 54.8k | // If we reached the end of the block, wait until the next one is installed. | 1316 | 54.8k | if offset == BLOCK_CAP { | 1317 | 369 | backoff.snooze(); | 1318 | 369 | } else { | 1319 | 54.5k | break; | 1320 | 54.5k | } | 1321 | 54.5k | } | 1322 | 54.5k | | 1323 | 54.5k | let mut new_head = head + (1 << SHIFT); | 1324 | 54.5k | | 1325 | 54.5k | if new_head & HAS_NEXT == 0 { | 1326 | 45 | atomic::fence(Ordering::SeqCst); | 1327 | 45 | let tail = self.tail.index.load(Ordering::Relaxed); | 1328 | 45 | | 1329 | 45 | // If the tail equals the head, that means the queue is empty. | 1330 | 45 | if head >> SHIFT == tail >> SHIFT { | 1331 | 3 | return Steal::Empty; | 1332 | 42 | } | 1333 | 42 | | 1334 | 42 | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1335 | 42 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1336 | 1 | new_head |= HAS_NEXT; | 1337 | 41 | } | 1338 | 54.4k | } | 1339 | | | 1340 | | // Try moving the head index forward. | 1341 | 54.5k | if self | 1342 | 54.5k | .head | 1343 | 54.5k | .index | 1344 | 54.5k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1345 | 54.5k | .is_err() | 1346 | | { | 1347 | 5.59k | return Steal::Retry; | 1348 | 48.9k | } | 1349 | 48.9k | | 1350 | 48.9k | unsafe { | 1351 | 48.9k | // If we've reached the end of the block, move to the next one. | 1352 | 48.9k | if offset + 1 == BLOCK_CAP { | 1353 | 793 | let next = (*block).wait_next(); | 1354 | 793 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1355 | 793 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1356 | 792 | next_index |= HAS_NEXT; | 1357 | 792 | }1 | 1358 | | | 1359 | 793 | self.head.block.store(next, Ordering::Release); | 1360 | 793 | self.head.index.store(next_index, Ordering::Release); | 1361 | 48.1k | } | 1362 | | | 1363 | | // Read the task. | 1364 | 48.9k | let slot = (*block).slots.get_unchecked(offset); | 1365 | 48.9k | slot.wait_write(); | 1366 | 48.9k | let task = slot.task.get().read().assume_init(); | 1367 | | | 1368 | | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1369 | | // but couldn't because we were busy reading from the slot. | 1370 | 48.9k | if (offset + 1 == BLOCK_CAP) || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0)48.1k { | 1371 | 793 | Block::destroy(block, offset); | 1372 | 49.1k | } | 1373 | | | 1374 | 49.9k | Steal::Success(task) | 1375 | | } | 1376 | 55.5k | } |
<crossbeam_deque::deque::Injector<injector::destructors::Elem>>::steal Line | Count | Source | 1302 | 1.13k | pub fn steal(&self) -> Steal<T> { | 1303 | 1.13k | let mut head; | 1304 | 1.13k | let mut block; | 1305 | 1.13k | let mut offset; | 1306 | 1.13k | | 1307 | 1.13k | let backoff = Backoff::new(); | 1308 | 1.13k | loop { | 1309 | 1.13k | head = self.head.index.load(Ordering::Acquire); | 1310 | 1.13k | block = self.head.block.load(Ordering::Acquire); | 1311 | 1.13k | | 1312 | 1.13k | // Calculate the offset of the index into the block. | 1313 | 1.13k | offset = (head >> SHIFT) % LAP; | 1314 | 1.13k | | 1315 | 1.13k | // If we reached the end of the block, wait until the next one is installed. | 1316 | 1.13k | if offset == BLOCK_CAP { | 1317 | 0 | backoff.snooze(); | 1318 | 0 | } else { | 1319 | 1.13k | break; | 1320 | 1.13k | } | 1321 | 1.13k | } | 1322 | 1.13k | | 1323 | 1.13k | let mut new_head = head + (1 << SHIFT); | 1324 | 1.13k | | 1325 | 1.13k | if new_head & HAS_NEXT == 0 { | 1326 | 1 | atomic::fence(Ordering::SeqCst); | 1327 | 1 | let tail = self.tail.index.load(Ordering::Relaxed); | 1328 | 1 | | 1329 | 1 | // If the tail equals the head, that means the queue is empty. | 1330 | 1 | if head >> SHIFT == tail >> SHIFT { | 1331 | 0 | return Steal::Empty; | 1332 | 1 | } | 1333 | 1 | | 1334 | 1 | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1335 | 1 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1336 | 1 | new_head |= HAS_NEXT; | 1337 | 1 | }0 | 1338 | 1.13k | } | 1339 | | | 1340 | | // Try moving the head index forward. | 1341 | 1.13k | if self | 1342 | 1.13k | .head | 1343 | 1.13k | .index | 1344 | 1.13k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1345 | 1.13k | .is_err() | 1346 | | { | 1347 | 1 | return Steal::Retry; | 1348 | 1.13k | } | 1349 | 1.13k | | 1350 | 1.13k | unsafe { | 1351 | 1.13k | // If we've reached the end of the block, move to the next one. | 1352 | 1.13k | if offset + 1 == BLOCK_CAP { | 1353 | 15 | let next = (*block).wait_next(); | 1354 | 15 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1355 | 15 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1356 | 15 | next_index |= HAS_NEXT; | 1357 | 15 | }0 | 1358 | | | 1359 | 15 | self.head.block.store(next, Ordering::Release); | 1360 | 15 | self.head.index.store(next_index, Ordering::Release); | 1361 | 1.11k | } | 1362 | | | 1363 | | // Read the task. | 1364 | 1.13k | let slot = (*block).slots.get_unchecked(offset); | 1365 | 1.13k | slot.wait_write(); | 1366 | 1.13k | let task = slot.task.get().read().assume_init(); | 1367 | | | 1368 | | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1369 | | // but couldn't because we were busy reading from the slot. | 1370 | 1.13k | if (offset + 1 == BLOCK_CAP) || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0)1.11k { | 1371 | 15 | Block::destroy(block, offset); | 1372 | 1.11k | } | 1373 | | | 1374 | 1.13k | Steal::Success(task) | 1375 | | } | 1376 | 1.13k | } |
<crossbeam_deque::deque::Injector<i32>>::steal Line | Count | Source | 1302 | 9 | pub fn steal(&self) -> Steal<T> { | 1303 | 9 | let mut head; | 1304 | 9 | let mut block; | 1305 | 9 | let mut offset; | 1306 | 9 | | 1307 | 9 | let backoff = Backoff::new(); | 1308 | 9 | loop { | 1309 | 9 | head = self.head.index.load(Ordering::Acquire); | 1310 | 9 | block = self.head.block.load(Ordering::Acquire); | 1311 | 9 | | 1312 | 9 | // Calculate the offset of the index into the block. | 1313 | 9 | offset = (head >> SHIFT) % LAP; | 1314 | 9 | | 1315 | 9 | // If we reached the end of the block, wait until the next one is installed. | 1316 | 9 | if offset == BLOCK_CAP { | 1317 | 0 | backoff.snooze(); | 1318 | 0 | } else { | 1319 | 9 | break; | 1320 | 9 | } | 1321 | 9 | } | 1322 | 9 | | 1323 | 9 | let mut new_head = head + (1 << SHIFT); | 1324 | 9 | | 1325 | 9 | if new_head & HAS_NEXT == 0 { | 1326 | 9 | atomic::fence(Ordering::SeqCst); | 1327 | 9 | let tail = self.tail.index.load(Ordering::Relaxed); | 1328 | 9 | | 1329 | 9 | // If the tail equals the head, that means the queue is empty. | 1330 | 9 | if head >> SHIFT == tail >> SHIFT { | 1331 | 3 | return Steal::Empty; | 1332 | 6 | } | 1333 | 6 | | 1334 | 6 | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1335 | 6 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1336 | 0 | new_head |= HAS_NEXT; | 1337 | 6 | } | 1338 | 0 | } | 1339 | | | 1340 | | // Try moving the head index forward. | 1341 | 6 | if self | 1342 | 6 | .head | 1343 | 6 | .index | 1344 | 6 | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1345 | 6 | .is_err() | 1346 | | { | 1347 | 0 | return Steal::Retry; | 1348 | 6 | } | 1349 | 6 | | 1350 | 6 | unsafe { | 1351 | 6 | // If we've reached the end of the block, move to the next one. | 1352 | 6 | if offset + 1 == BLOCK_CAP { | 1353 | 0 | let next = (*block).wait_next(); | 1354 | 0 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1355 | 0 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1356 | 0 | next_index |= HAS_NEXT; | 1357 | 0 | } | 1358 | | | 1359 | 0 | self.head.block.store(next, Ordering::Release); | 1360 | 0 | self.head.index.store(next_index, Ordering::Release); | 1361 | 6 | } | 1362 | | | 1363 | | // Read the task. | 1364 | 6 | let slot = (*block).slots.get_unchecked(offset); | 1365 | 6 | slot.wait_write(); | 1366 | 6 | let task = slot.task.get().read().assume_init(); | 1367 | | | 1368 | | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1369 | | // but couldn't because we were busy reading from the slot. | 1370 | 6 | if (offset + 1 == BLOCK_CAP) || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0) { | 1371 | 0 | Block::destroy(block, offset); | 1372 | 6 | } | 1373 | | | 1374 | 6 | Steal::Success(task) | 1375 | | } | 1376 | 9 | } |
<crossbeam_deque::deque::Injector<i32>>::steal Line | Count | Source | 1302 | 3 | pub fn steal(&self) -> Steal<T> { | 1303 | 3 | let mut head; | 1304 | 3 | let mut block; | 1305 | 3 | let mut offset; | 1306 | 3 | | 1307 | 3 | let backoff = Backoff::new(); | 1308 | 3 | loop { | 1309 | 3 | head = self.head.index.load(Ordering::Acquire); | 1310 | 3 | block = self.head.block.load(Ordering::Acquire); | 1311 | 3 | | 1312 | 3 | // Calculate the offset of the index into the block. | 1313 | 3 | offset = (head >> SHIFT) % LAP; | 1314 | 3 | | 1315 | 3 | // If we reached the end of the block, wait until the next one is installed. | 1316 | 3 | if offset == BLOCK_CAP { | 1317 | 0 | backoff.snooze(); | 1318 | 0 | } else { | 1319 | 3 | break; | 1320 | 3 | } | 1321 | 3 | } | 1322 | 3 | | 1323 | 3 | let mut new_head = head + (1 << SHIFT); | 1324 | 3 | | 1325 | 3 | if new_head & HAS_NEXT == 0 { | 1326 | 3 | atomic::fence(Ordering::SeqCst); | 1327 | 3 | let tail = self.tail.index.load(Ordering::Relaxed); | 1328 | 3 | | 1329 | 3 | // If the tail equals the head, that means the queue is empty. | 1330 | 3 | if head >> SHIFT == tail >> SHIFT { | 1331 | 0 | return Steal::Empty; | 1332 | 3 | } | 1333 | 3 | | 1334 | 3 | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1335 | 3 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1336 | 0 | new_head |= HAS_NEXT; | 1337 | 3 | } | 1338 | 0 | } | 1339 | | | 1340 | | // Try moving the head index forward. | 1341 | 3 | if self | 1342 | 3 | .head | 1343 | 3 | .index | 1344 | 3 | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1345 | 3 | .is_err() | 1346 | | { | 1347 | 0 | return Steal::Retry; | 1348 | 3 | } | 1349 | 3 | | 1350 | 3 | unsafe { | 1351 | 3 | // If we've reached the end of the block, move to the next one. | 1352 | 3 | if offset + 1 == BLOCK_CAP { | 1353 | 0 | let next = (*block).wait_next(); | 1354 | 0 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1355 | 0 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1356 | 0 | next_index |= HAS_NEXT; | 1357 | 0 | } | 1358 | | | 1359 | 0 | self.head.block.store(next, Ordering::Release); | 1360 | 0 | self.head.index.store(next_index, Ordering::Release); | 1361 | 3 | } | 1362 | | | 1363 | | // Read the task. | 1364 | 3 | let slot = (*block).slots.get_unchecked(offset); | 1365 | 3 | slot.wait_write(); | 1366 | 3 | let task = slot.task.get().read().assume_init(); | 1367 | | | 1368 | | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1369 | | // but couldn't because we were busy reading from the slot. | 1370 | 3 | if (offset + 1 == BLOCK_CAP) || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0) { | 1371 | 0 | Block::destroy(block, offset); | 1372 | 3 | } | 1373 | | | 1374 | 3 | Steal::Success(task) | 1375 | | } | 1376 | 3 | } |
|
1377 | | |
1378 | | /// Steals a batch of tasks and pushes them into a worker. |
1379 | | /// |
1380 | | /// How many tasks exactly will be stolen is not specified. That said, this method will try to |
1381 | | /// steal around half of the tasks in the queue, but also not more than some constant limit. |
1382 | | /// |
1383 | | /// # Examples |
1384 | | /// |
1385 | | /// ``` |
1386 | | /// use crossbeam_deque::{Injector, Worker}; |
1387 | | /// |
1388 | | /// let q = Injector::new(); |
1389 | | /// q.push(1); |
1390 | | /// q.push(2); |
1391 | | /// q.push(3); |
1392 | | /// q.push(4); |
1393 | | /// |
1394 | | /// let w = Worker::new_fifo(); |
1395 | | /// let _ = q.steal_batch(&w); |
1396 | | /// assert_eq!(w.pop(), Some(1)); |
1397 | | /// assert_eq!(w.pop(), Some(2)); |
1398 | | /// ``` |
1399 | 470k | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { |
1400 | 470k | let mut head; |
1401 | 470k | let mut block; |
1402 | 470k | let mut offset; |
1403 | 470k | |
1404 | 470k | let backoff = Backoff::new(); |
1405 | 471k | loop { |
1406 | 471k | head = self.head.index.load(Ordering::Acquire); |
1407 | 471k | block = self.head.block.load(Ordering::Acquire); |
1408 | 471k | |
1409 | 471k | // Calculate the offset of the index into the block. |
1410 | 471k | offset = (head >> SHIFT) % LAP; |
1411 | 471k | |
1412 | 471k | // If we reached the end of the block, wait until the next one is installed. |
1413 | 471k | if offset == BLOCK_CAP { |
1414 | 1.07k | backoff.snooze(); |
1415 | 1.07k | } else { |
1416 | 470k | break; |
1417 | 470k | } |
1418 | 470k | } |
1419 | 470k | |
1420 | 470k | let mut new_head = head; |
1421 | 470k | let advance; |
1422 | 470k | |
1423 | 470k | if new_head & HAS_NEXT == 0 { |
1424 | 412k | atomic::fence(Ordering::SeqCst); |
1425 | 412k | let tail = self.tail.index.load(Ordering::Relaxed); |
1426 | 412k | |
1427 | 412k | // If the tail equals the head, that means the queue is empty. |
1428 | 412k | if head >> SHIFT == tail >> SHIFT { |
1429 | 351k | return Steal::Empty; |
1430 | 61.1k | } |
1431 | 61.1k | |
1432 | 61.1k | // If head and tail are not in the same block, set `HAS_NEXT` in head. Also, calculate |
1433 | 61.1k | // the right batch size to steal. |
1434 | 61.1k | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { |
1435 | 593 | new_head |= HAS_NEXT; |
1436 | 593 | // We can steal all tasks till the end of the block. |
1437 | 593 | advance = (BLOCK_CAP - offset).min(MAX_BATCH); |
1438 | 78.1k | } else { |
1439 | 78.1k | let len = (tail - head) >> SHIFT; |
1440 | 78.1k | // Steal half of the available tasks. |
1441 | 78.1k | advance = ((len + 1) / 2).min(MAX_BATCH); |
1442 | 78.1k | } |
1443 | 58.2k | } else { |
1444 | 58.2k | // We can steal all tasks till the end of the block. |
1445 | 58.2k | advance = (BLOCK_CAP - offset).min(MAX_BATCH); |
1446 | 58.2k | } |
1447 | | |
1448 | 136k | new_head += advance << SHIFT; |
1449 | 136k | let new_offset = offset + advance; |
1450 | 136k | |
1451 | 136k | // Try moving the head index forward. |
1452 | 136k | if self |
1453 | 136k | .head |
1454 | 136k | .index |
1455 | 136k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) |
1456 | 136k | .is_err() |
1457 | | { |
1458 | 70.3k | return Steal::Retry; |
1459 | 66.6k | } |
1460 | 66.6k | |
1461 | 66.6k | // Reserve capacity for the stolen batch. |
1462 | 66.6k | let batch_size = new_offset - offset; |
1463 | 66.6k | dest.reserve(batch_size); |
1464 | 66.6k | |
1465 | 66.6k | // Get the destination buffer and back index. |
1466 | 66.6k | let dest_buffer = dest.buffer.get(); |
1467 | 66.6k | let dest_b = dest.inner.back.load(Ordering::Relaxed); |
1468 | 66.6k | |
1469 | 66.6k | unsafe { |
1470 | 66.6k | // If we've reached the end of the block, move to the next one. |
1471 | 66.6k | if new_offset == BLOCK_CAP { |
1472 | 575 | let next = (*block).wait_next(); |
1473 | 575 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); |
1474 | 575 | if !(*next).next.load(Ordering::Relaxed).is_null() { |
1475 | 76 | next_index |= HAS_NEXT; |
1476 | 499 | } |
1477 | | |
1478 | 575 | self.head.block.store(next, Ordering::Release); |
1479 | 575 | self.head.index.store(next_index, Ordering::Release); |
1480 | 66.0k | } |
1481 | | |
1482 | | // Copy values from the injector into the destination queue. |
1483 | 66.6k | match dest.flavor { |
1484 | 66.6k | Flavor::Fifo => { |
1485 | 66.6k | for i36.1k in 0..batch_size { |
1486 | 36.1k | // Read the task. |
1487 | 36.1k | let slot = (*block).slots.get_unchecked(offset + i); |
1488 | 36.1k | slot.wait_write(); |
1489 | 36.1k | let task = slot.task.get().read().assume_init(); |
1490 | 36.1k | |
1491 | 36.1k | // Write it into the destination queue. |
1492 | 36.1k | dest_buffer.write(dest_b.wrapping_add(i as isize), task); |
1493 | 36.1k | } |
1494 | | } |
1495 | | |
1496 | | Flavor::Lifo => { |
1497 | 2 | for i in 0..batch_size1 { |
1498 | 2 | // Read the task. |
1499 | 2 | let slot = (*block).slots.get_unchecked(offset + i); |
1500 | 2 | slot.wait_write(); |
1501 | 2 | let task = slot.task.get().read().assume_init(); |
1502 | 2 | |
1503 | 2 | // Write it into the destination queue. |
1504 | 2 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); |
1505 | 2 | } |
1506 | | } |
1507 | | } |
1508 | | |
1509 | 30.0k | atomic::fence(Ordering::Release); |
1510 | 30.0k | |
1511 | 30.0k | // Update the back index in the destination queue. |
1512 | 30.0k | // |
1513 | 30.0k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report |
1514 | 30.0k | // data races because it doesn't understand fences. |
1515 | 30.0k | dest.inner |
1516 | 30.0k | .back |
1517 | 30.0k | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); |
1518 | 30.0k | |
1519 | 30.0k | // Destroy the block if we've reached the end, or if another thread wanted to destroy |
1520 | 30.0k | // but couldn't because we were busy reading from the slot. |
1521 | 30.0k | if new_offset == BLOCK_CAP { |
1522 | 575 | Block::destroy(block, offset); |
1523 | 575 | } else { |
1524 | 33.3k | for i in offset..new_offset29.4k { |
1525 | 33.3k | let slot = (*block).slots.get_unchecked(i); |
1526 | 33.3k | |
1527 | 33.3k | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { |
1528 | 7 | Block::destroy(block, offset); |
1529 | 7 | break; |
1530 | 33.3k | } |
1531 | | } |
1532 | | } |
1533 | | |
1534 | 30.1k | Steal::Success(()) |
1535 | | } |
1536 | 451k | } <crossbeam_deque::deque::Injector<injector::destructors::Elem>>::steal_batch Line | Count | Source | 1399 | 132 | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 1400 | 132 | let mut head; | 1401 | 132 | let mut block; | 1402 | 132 | let mut offset; | 1403 | 132 | | 1404 | 132 | let backoff = Backoff::new(); | 1405 | 133 | loop { | 1406 | 133 | head = self.head.index.load(Ordering::Acquire); | 1407 | 133 | block = self.head.block.load(Ordering::Acquire); | 1408 | 133 | | 1409 | 133 | // Calculate the offset of the index into the block. | 1410 | 133 | offset = (head >> SHIFT) % LAP; | 1411 | 133 | | 1412 | 133 | // If we reached the end of the block, wait until the next one is installed. | 1413 | 133 | if offset == BLOCK_CAP { | 1414 | 1 | backoff.snooze(); | 1415 | 1 | } else { | 1416 | 132 | break; | 1417 | 132 | } | 1418 | 132 | } | 1419 | 132 | | 1420 | 132 | let mut new_head = head; | 1421 | 132 | let advance; | 1422 | 132 | | 1423 | 132 | if new_head & HAS_NEXT == 0 { | 1424 | 0 | atomic::fence(Ordering::SeqCst); | 1425 | 0 | let tail = self.tail.index.load(Ordering::Relaxed); | 1426 | 0 |
| 1427 | 0 | // If the tail equals the head, that means the queue is empty. | 1428 | 0 | if head >> SHIFT == tail >> SHIFT { | 1429 | 0 | return Steal::Empty; | 1430 | 0 | } | 1431 | 0 |
| 1432 | 0 | // If head and tail are not in the same block, set `HAS_NEXT` in head. Also, calculate | 1433 | 0 | // the right batch size to steal. | 1434 | 0 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1435 | 0 | new_head |= HAS_NEXT; | 1436 | 0 | // We can steal all tasks till the end of the block. | 1437 | 0 | advance = (BLOCK_CAP - offset).min(MAX_BATCH); | 1438 | 0 | } else { | 1439 | 0 | let len = (tail - head) >> SHIFT; | 1440 | 0 | // Steal half of the available tasks. | 1441 | 0 | advance = ((len + 1) / 2).min(MAX_BATCH); | 1442 | 0 | } | 1443 | 132 | } else { | 1444 | 132 | // We can steal all tasks till the end of the block. | 1445 | 132 | advance = (BLOCK_CAP - offset).min(MAX_BATCH); | 1446 | 132 | } | 1447 | | | 1448 | 132 | new_head += advance << SHIFT; | 1449 | 132 | let new_offset = offset + advance; | 1450 | 132 | | 1451 | 132 | // Try moving the head index forward. | 1452 | 132 | if self | 1453 | 132 | .head | 1454 | 132 | .index | 1455 | 132 | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1456 | 132 | .is_err() | 1457 | | { | 1458 | 1 | return Steal::Retry; | 1459 | 131 | } | 1460 | 131 | | 1461 | 131 | // Reserve capacity for the stolen batch. | 1462 | 131 | let batch_size = new_offset - offset; | 1463 | 131 | dest.reserve(batch_size); | 1464 | 131 | | 1465 | 131 | // Get the destination buffer and back index. | 1466 | 131 | let dest_buffer = dest.buffer.get(); | 1467 | 131 | let dest_b = dest.inner.back.load(Ordering::Relaxed); | 1468 | 131 | | 1469 | 131 | unsafe { | 1470 | 131 | // If we've reached the end of the block, move to the next one. | 1471 | 131 | if new_offset == BLOCK_CAP { | 1472 | 76 | let next = (*block).wait_next(); | 1473 | 76 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1474 | 76 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1475 | 76 | next_index |= HAS_NEXT; | 1476 | 76 | }0 | 1477 | | | 1478 | 76 | self.head.block.store(next, Ordering::Release); | 1479 | 76 | self.head.index.store(next_index, Ordering::Release); | 1480 | 55 | } | 1481 | | | 1482 | | // Copy values from the injector into the destination queue. | 1483 | 131 | match dest.flavor { | 1484 | 131 | Flavor::Fifo => { | 1485 | 3.90k | for i in 0..batch_size131 { | 1486 | 3.90k | // Read the task. | 1487 | 3.90k | let slot = (*block).slots.get_unchecked(offset + i); | 1488 | 3.90k | slot.wait_write(); | 1489 | 3.90k | let task = slot.task.get().read().assume_init(); | 1490 | 3.90k | | 1491 | 3.90k | // Write it into the destination queue. | 1492 | 3.90k | dest_buffer.write(dest_b.wrapping_add(i as isize), task); | 1493 | 3.90k | } | 1494 | | } | 1495 | | | 1496 | | Flavor::Lifo => { | 1497 | 0 | for i in 0..batch_size { | 1498 | 0 | // Read the task. | 1499 | 0 | let slot = (*block).slots.get_unchecked(offset + i); | 1500 | 0 | slot.wait_write(); | 1501 | 0 | let task = slot.task.get().read().assume_init(); | 1502 | 0 |
| 1503 | 0 | // Write it into the destination queue. | 1504 | 0 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); | 1505 | 0 | } | 1506 | | } | 1507 | | } | 1508 | | | 1509 | 130 | atomic::fence(Ordering::Release); | 1510 | 130 | | 1511 | 130 | // Update the back index in the destination queue. | 1512 | 130 | // | 1513 | 130 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report | 1514 | 130 | // data races because it doesn't understand fences. | 1515 | 130 | dest.inner | 1516 | 130 | .back | 1517 | 130 | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); | 1518 | 130 | | 1519 | 130 | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1520 | 130 | // but couldn't because we were busy reading from the slot. | 1521 | 130 | if new_offset == BLOCK_CAP { | 1522 | 76 | Block::destroy(block, offset); | 1523 | 76 | } else { | 1524 | 1.72k | for i in offset..new_offset54 { | 1525 | 1.72k | let slot = (*block).slots.get_unchecked(i); | 1526 | 1.72k | | 1527 | 1.72k | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { | 1528 | 2 | Block::destroy(block, offset); | 1529 | 2 | break; | 1530 | 1.72k | } | 1531 | | } | 1532 | | } | 1533 | | | 1534 | 130 | Steal::Success(()) | 1535 | | } | 1536 | 131 | } |
<crossbeam_deque::deque::Injector<usize>>::steal_batch Line | Count | Source | 1399 | 470k | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 1400 | 470k | let mut head; | 1401 | 470k | let mut block; | 1402 | 470k | let mut offset; | 1403 | 470k | | 1404 | 470k | let backoff = Backoff::new(); | 1405 | 471k | loop { | 1406 | 471k | head = self.head.index.load(Ordering::Acquire); | 1407 | 471k | block = self.head.block.load(Ordering::Acquire); | 1408 | 471k | | 1409 | 471k | // Calculate the offset of the index into the block. | 1410 | 471k | offset = (head >> SHIFT) % LAP; | 1411 | 471k | | 1412 | 471k | // If we reached the end of the block, wait until the next one is installed. | 1413 | 471k | if offset == BLOCK_CAP { | 1414 | 1.07k | backoff.snooze(); | 1415 | 1.07k | } else { | 1416 | 470k | break; | 1417 | 470k | } | 1418 | 470k | } | 1419 | 470k | | 1420 | 470k | let mut new_head = head; | 1421 | 470k | let advance; | 1422 | 470k | | 1423 | 470k | if new_head & HAS_NEXT == 0 { | 1424 | 412k | atomic::fence(Ordering::SeqCst); | 1425 | 412k | let tail = self.tail.index.load(Ordering::Relaxed); | 1426 | 412k | | 1427 | 412k | // If the tail equals the head, that means the queue is empty. | 1428 | 412k | if head >> SHIFT == tail >> SHIFT { | 1429 | 351k | return Steal::Empty; | 1430 | 61.1k | } | 1431 | 61.1k | | 1432 | 61.1k | // If head and tail are not in the same block, set `HAS_NEXT` in head. Also, calculate | 1433 | 61.1k | // the right batch size to steal. | 1434 | 61.1k | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1435 | 593 | new_head |= HAS_NEXT; | 1436 | 593 | // We can steal all tasks till the end of the block. | 1437 | 593 | advance = (BLOCK_CAP - offset).min(MAX_BATCH); | 1438 | 78.1k | } else { | 1439 | 78.1k | let len = (tail - head) >> SHIFT; | 1440 | 78.1k | // Steal half of the available tasks. | 1441 | 78.1k | advance = ((len + 1) / 2).min(MAX_BATCH); | 1442 | 78.1k | } | 1443 | 58.0k | } else { | 1444 | 58.0k | // We can steal all tasks till the end of the block. | 1445 | 58.0k | advance = (BLOCK_CAP - offset).min(MAX_BATCH); | 1446 | 58.0k | } | 1447 | | | 1448 | 136k | new_head += advance << SHIFT; | 1449 | 136k | let new_offset = offset + advance; | 1450 | 136k | | 1451 | 136k | // Try moving the head index forward. | 1452 | 136k | if self | 1453 | 136k | .head | 1454 | 136k | .index | 1455 | 136k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1456 | 136k | .is_err() | 1457 | | { | 1458 | 70.3k | return Steal::Retry; | 1459 | 66.5k | } | 1460 | 66.5k | | 1461 | 66.5k | // Reserve capacity for the stolen batch. | 1462 | 66.5k | let batch_size = new_offset - offset; | 1463 | 66.5k | dest.reserve(batch_size); | 1464 | 66.5k | | 1465 | 66.5k | // Get the destination buffer and back index. | 1466 | 66.5k | let dest_buffer = dest.buffer.get(); | 1467 | 66.5k | let dest_b = dest.inner.back.load(Ordering::Relaxed); | 1468 | 66.5k | | 1469 | 66.5k | unsafe { | 1470 | 66.5k | // If we've reached the end of the block, move to the next one. | 1471 | 66.5k | if new_offset == BLOCK_CAP { | 1472 | 499 | let next = (*block).wait_next(); | 1473 | 499 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1474 | 499 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1475 | 0 | next_index |= HAS_NEXT; | 1476 | 499 | } | 1477 | | | 1478 | 499 | self.head.block.store(next, Ordering::Release); | 1479 | 499 | self.head.index.store(next_index, Ordering::Release); | 1480 | 66.0k | } | 1481 | | | 1482 | | // Copy values from the injector into the destination queue. | 1483 | 66.5k | match dest.flavor { | 1484 | 66.5k | Flavor::Fifo => { | 1485 | 66.5k | for i32.1k in 0..batch_size { | 1486 | 32.1k | // Read the task. | 1487 | 32.1k | let slot = (*block).slots.get_unchecked(offset + i); | 1488 | 32.1k | slot.wait_write(); | 1489 | 32.1k | let task = slot.task.get().read().assume_init(); | 1490 | 32.1k | | 1491 | 32.1k | // Write it into the destination queue. | 1492 | 32.1k | dest_buffer.write(dest_b.wrapping_add(i as isize), task); | 1493 | 32.1k | } | 1494 | | } | 1495 | | | 1496 | | Flavor::Lifo => { | 1497 | 0 | for i in 0..batch_size { | 1498 | 0 | // Read the task. | 1499 | 0 | let slot = (*block).slots.get_unchecked(offset + i); | 1500 | 0 | slot.wait_write(); | 1501 | 0 | let task = slot.task.get().read().assume_init(); | 1502 | 0 |
| 1503 | 0 | // Write it into the destination queue. | 1504 | 0 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); | 1505 | 0 | } | 1506 | | } | 1507 | | } | 1508 | | | 1509 | 29.9k | atomic::fence(Ordering::Release); | 1510 | 29.9k | | 1511 | 29.9k | // Update the back index in the destination queue. | 1512 | 29.9k | // | 1513 | 29.9k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report | 1514 | 29.9k | // data races because it doesn't understand fences. | 1515 | 29.9k | dest.inner | 1516 | 29.9k | .back | 1517 | 29.9k | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); | 1518 | 29.9k | | 1519 | 29.9k | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1520 | 29.9k | // but couldn't because we were busy reading from the slot. | 1521 | 29.9k | if new_offset == BLOCK_CAP { | 1522 | 499 | Block::destroy(block, offset); | 1523 | 499 | } else { | 1524 | 31.5k | for i in offset..new_offset29.4k { | 1525 | 31.5k | let slot = (*block).slots.get_unchecked(i); | 1526 | 31.5k | | 1527 | 31.5k | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { | 1528 | 5 | Block::destroy(block, offset); | 1529 | 5 | break; | 1530 | 31.5k | } | 1531 | | } | 1532 | | } | 1533 | | | 1534 | 29.9k | Steal::Success(()) | 1535 | | } | 1536 | 451k | } |
<crossbeam_deque::deque::Injector<i32>>::steal_batch Line | Count | Source | 1399 | 2 | pub fn steal_batch(&self, dest: &Worker<T>) -> Steal<()> { | 1400 | 2 | let mut head; | 1401 | 2 | let mut block; | 1402 | 2 | let mut offset; | 1403 | 2 | | 1404 | 2 | let backoff = Backoff::new(); | 1405 | 2 | loop { | 1406 | 2 | head = self.head.index.load(Ordering::Acquire); | 1407 | 2 | block = self.head.block.load(Ordering::Acquire); | 1408 | 2 | | 1409 | 2 | // Calculate the offset of the index into the block. | 1410 | 2 | offset = (head >> SHIFT) % LAP; | 1411 | 2 | | 1412 | 2 | // If we reached the end of the block, wait until the next one is installed. | 1413 | 2 | if offset == BLOCK_CAP { | 1414 | 0 | backoff.snooze(); | 1415 | 0 | } else { | 1416 | 2 | break; | 1417 | 2 | } | 1418 | 2 | } | 1419 | 2 | | 1420 | 2 | let mut new_head = head; | 1421 | 2 | let advance; | 1422 | 2 | | 1423 | 2 | if new_head & HAS_NEXT == 0 { | 1424 | 2 | atomic::fence(Ordering::SeqCst); | 1425 | 2 | let tail = self.tail.index.load(Ordering::Relaxed); | 1426 | 2 | | 1427 | 2 | // If the tail equals the head, that means the queue is empty. | 1428 | 2 | if head >> SHIFT == tail >> SHIFT { | 1429 | 0 | return Steal::Empty; | 1430 | 2 | } | 1431 | 2 | | 1432 | 2 | // If head and tail are not in the same block, set `HAS_NEXT` in head. Also, calculate | 1433 | 2 | // the right batch size to steal. | 1434 | 2 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1435 | 0 | new_head |= HAS_NEXT; | 1436 | 0 | // We can steal all tasks till the end of the block. | 1437 | 0 | advance = (BLOCK_CAP - offset).min(MAX_BATCH); | 1438 | 2 | } else { | 1439 | 2 | let len = (tail - head) >> SHIFT; | 1440 | 2 | // Steal half of the available tasks. | 1441 | 2 | advance = ((len + 1) / 2).min(MAX_BATCH); | 1442 | 2 | } | 1443 | 0 | } else { | 1444 | 0 | // We can steal all tasks till the end of the block. | 1445 | 0 | advance = (BLOCK_CAP - offset).min(MAX_BATCH); | 1446 | 0 | } | 1447 | | | 1448 | 2 | new_head += advance << SHIFT; | 1449 | 2 | let new_offset = offset + advance; | 1450 | 2 | | 1451 | 2 | // Try moving the head index forward. | 1452 | 2 | if self | 1453 | 2 | .head | 1454 | 2 | .index | 1455 | 2 | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1456 | 2 | .is_err() | 1457 | | { | 1458 | 0 | return Steal::Retry; | 1459 | 2 | } | 1460 | 2 | | 1461 | 2 | // Reserve capacity for the stolen batch. | 1462 | 2 | let batch_size = new_offset - offset; | 1463 | 2 | dest.reserve(batch_size); | 1464 | 2 | | 1465 | 2 | // Get the destination buffer and back index. | 1466 | 2 | let dest_buffer = dest.buffer.get(); | 1467 | 2 | let dest_b = dest.inner.back.load(Ordering::Relaxed); | 1468 | 2 | | 1469 | 2 | unsafe { | 1470 | 2 | // If we've reached the end of the block, move to the next one. | 1471 | 2 | if new_offset == BLOCK_CAP { | 1472 | 0 | let next = (*block).wait_next(); | 1473 | 0 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1474 | 0 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1475 | 0 | next_index |= HAS_NEXT; | 1476 | 0 | } | 1477 | | | 1478 | 0 | self.head.block.store(next, Ordering::Release); | 1479 | 0 | self.head.index.store(next_index, Ordering::Release); | 1480 | 2 | } | 1481 | | | 1482 | | // Copy values from the injector into the destination queue. | 1483 | 2 | match dest.flavor { | 1484 | 2 | Flavor::Fifo => { | 1485 | 2 | for i in 0..batch_size1 { | 1486 | 2 | // Read the task. | 1487 | 2 | let slot = (*block).slots.get_unchecked(offset + i); | 1488 | 2 | slot.wait_write(); | 1489 | 2 | let task = slot.task.get().read().assume_init(); | 1490 | 2 | | 1491 | 2 | // Write it into the destination queue. | 1492 | 2 | dest_buffer.write(dest_b.wrapping_add(i as isize), task); | 1493 | 2 | } | 1494 | | } | 1495 | | | 1496 | | Flavor::Lifo => { | 1497 | 2 | for i in 0..batch_size1 { | 1498 | 2 | // Read the task. | 1499 | 2 | let slot = (*block).slots.get_unchecked(offset + i); | 1500 | 2 | slot.wait_write(); | 1501 | 2 | let task = slot.task.get().read().assume_init(); | 1502 | 2 | | 1503 | 2 | // Write it into the destination queue. | 1504 | 2 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); | 1505 | 2 | } | 1506 | | } | 1507 | | } | 1508 | | | 1509 | 2 | atomic::fence(Ordering::Release); | 1510 | 2 | | 1511 | 2 | // Update the back index in the destination queue. | 1512 | 2 | // | 1513 | 2 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report | 1514 | 2 | // data races because it doesn't understand fences. | 1515 | 2 | dest.inner | 1516 | 2 | .back | 1517 | 2 | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); | 1518 | 2 | | 1519 | 2 | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1520 | 2 | // but couldn't because we were busy reading from the slot. | 1521 | 2 | if new_offset == BLOCK_CAP { | 1522 | 0 | Block::destroy(block, offset); | 1523 | 0 | } else { | 1524 | 4 | for i in offset..new_offset2 { | 1525 | 4 | let slot = (*block).slots.get_unchecked(i); | 1526 | 4 | | 1527 | 4 | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { | 1528 | 0 | Block::destroy(block, offset); | 1529 | 0 | break; | 1530 | 4 | } | 1531 | | } | 1532 | | } | 1533 | | | 1534 | 2 | Steal::Success(()) | 1535 | | } | 1536 | 2 | } |
|
1537 | | |
1538 | | /// Steals a batch of tasks, pushes them into a worker, and pops a task from that worker. |
1539 | | /// |
1540 | | /// How many tasks exactly will be stolen is not specified. That said, this method will try to |
1541 | | /// steal around half of the tasks in the queue, but also not more than some constant limit. |
1542 | | /// |
1543 | | /// # Examples |
1544 | | /// |
1545 | | /// ``` |
1546 | | /// use crossbeam_deque::{Injector, Steal, Worker}; |
1547 | | /// |
1548 | | /// let q = Injector::new(); |
1549 | | /// q.push(1); |
1550 | | /// q.push(2); |
1551 | | /// q.push(3); |
1552 | | /// q.push(4); |
1553 | | /// |
1554 | | /// let w = Worker::new_fifo(); |
1555 | | /// assert_eq!(q.steal_batch_and_pop(&w), Steal::Success(1)); |
1556 | | /// assert_eq!(w.pop(), Some(2)); |
1557 | | /// ``` |
1558 | 406k | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { |
1559 | 406k | let mut head; |
1560 | 406k | let mut block; |
1561 | 406k | let mut offset; |
1562 | 406k | |
1563 | 406k | let backoff = Backoff::new(); |
1564 | 407k | loop { |
1565 | 407k | head = self.head.index.load(Ordering::Acquire); |
1566 | 407k | block = self.head.block.load(Ordering::Acquire); |
1567 | 407k | |
1568 | 407k | // Calculate the offset of the index into the block. |
1569 | 407k | offset = (head >> SHIFT) % LAP; |
1570 | 407k | |
1571 | 407k | // If we reached the end of the block, wait until the next one is installed. |
1572 | 407k | if offset == BLOCK_CAP { |
1573 | 1.02k | backoff.snooze(); |
1574 | 1.02k | } else { |
1575 | 406k | break; |
1576 | 406k | } |
1577 | 406k | } |
1578 | 406k | |
1579 | 406k | let mut new_head = head; |
1580 | 406k | let advance; |
1581 | 406k | |
1582 | 406k | if new_head & HAS_NEXT == 0 { |
1583 | 425k | atomic::fence(Ordering::SeqCst); |
1584 | 425k | let tail = self.tail.index.load(Ordering::Relaxed); |
1585 | 425k | |
1586 | 425k | // If the tail equals the head, that means the queue is empty. |
1587 | 425k | if head >> SHIFT == tail >> SHIFT { |
1588 | 352k | return Steal::Empty; |
1589 | 72.6k | } |
1590 | 72.6k | |
1591 | 72.6k | // If head and tail are not in the same block, set `HAS_NEXT` in head. |
1592 | 72.6k | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { |
1593 | 704 | new_head |= HAS_NEXT; |
1594 | 704 | // We can steal all tasks till the end of the block. |
1595 | 704 | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); |
1596 | 83.6k | } else { |
1597 | 83.6k | let len = (tail - head) >> SHIFT; |
1598 | 83.6k | // Steal half of the available tasks. |
1599 | 83.6k | advance = ((len + 1) / 2).min(MAX_BATCH + 1); |
1600 | 83.6k | } |
1601 | 18.4E | } else { |
1602 | 18.4E | // We can steal all tasks till the end of the block. |
1603 | 18.4E | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); |
1604 | 18.4E | } |
1605 | | |
1606 | 65.8k | new_head += advance << SHIFT; |
1607 | 65.8k | let new_offset = offset + advance; |
1608 | 65.8k | |
1609 | 65.8k | // Try moving the head index forward. |
1610 | 65.8k | if self |
1611 | 65.8k | .head |
1612 | 65.8k | .index |
1613 | 65.8k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) |
1614 | 65.8k | .is_err() |
1615 | | { |
1616 | 68.5k | return Steal::Retry; |
1617 | 18.4E | } |
1618 | 18.4E | |
1619 | 18.4E | // Reserve capacity for the stolen batch. |
1620 | 18.4E | let batch_size = new_offset - offset - 1; |
1621 | 18.4E | dest.reserve(batch_size); |
1622 | 18.4E | |
1623 | 18.4E | // Get the destination buffer and back index. |
1624 | 18.4E | let dest_buffer = dest.buffer.get(); |
1625 | 18.4E | let dest_b = dest.inner.back.load(Ordering::Relaxed); |
1626 | 18.4E | |
1627 | 18.4E | unsafe { |
1628 | 18.4E | // If we've reached the end of the block, move to the next one. |
1629 | 18.4E | if new_offset == BLOCK_CAP { |
1630 | 534 | let next = (*block).wait_next(); |
1631 | 534 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); |
1632 | 534 | if !(*next).next.load(Ordering::Relaxed).is_null() { |
1633 | 55 | next_index |= HAS_NEXT; |
1634 | 479 | } |
1635 | | |
1636 | 534 | self.head.block.store(next, Ordering::Release); |
1637 | 534 | self.head.index.store(next_index, Ordering::Release); |
1638 | 18.4E | } |
1639 | | |
1640 | | // Read the task. |
1641 | 18.4E | let slot = (*block).slots.get_unchecked(offset); |
1642 | 18.4E | slot.wait_write(); |
1643 | 18.4E | let task = slot.task.get().read().assume_init(); |
1644 | 18.4E | |
1645 | 18.4E | match dest.flavor { |
1646 | 18.4E | Flavor::Fifo => { |
1647 | | // Copy values from the injector into the destination queue. |
1648 | 18.4E | for i5.70k in 0..batch_size { |
1649 | 5.70k | // Read the task. |
1650 | 5.70k | let slot = (*block).slots.get_unchecked(offset + i + 1); |
1651 | 5.70k | slot.wait_write(); |
1652 | 5.70k | let task = slot.task.get().read().assume_init(); |
1653 | 5.70k | |
1654 | 5.70k | // Write it into the destination queue. |
1655 | 5.70k | dest_buffer.write(dest_b.wrapping_add(i as isize), task); |
1656 | 5.70k | } |
1657 | | } |
1658 | | |
1659 | | Flavor::Lifo => { |
1660 | | // Copy values from the injector into the destination queue. |
1661 | 2 | for i in 0..batch_size1 { |
1662 | 2 | // Read the task. |
1663 | 2 | let slot = (*block).slots.get_unchecked(offset + i + 1); |
1664 | 2 | slot.wait_write(); |
1665 | 2 | let task = slot.task.get().read().assume_init(); |
1666 | 2 | |
1667 | 2 | // Write it into the destination queue. |
1668 | 2 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); |
1669 | 2 | } |
1670 | | } |
1671 | | } |
1672 | | |
1673 | 27.8k | atomic::fence(Ordering::Release); |
1674 | 27.8k | |
1675 | 27.8k | // Update the back index in the destination queue. |
1676 | 27.8k | // |
1677 | 27.8k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report |
1678 | 27.8k | // data races because it doesn't understand fences. |
1679 | 27.8k | dest.inner |
1680 | 27.8k | .back |
1681 | 27.8k | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); |
1682 | 27.8k | |
1683 | 27.8k | // Destroy the block if we've reached the end, or if another thread wanted to destroy |
1684 | 27.8k | // but couldn't because we were busy reading from the slot. |
1685 | 27.8k | if new_offset == BLOCK_CAP { |
1686 | 534 | Block::destroy(block, offset); |
1687 | 534 | } else { |
1688 | 32.5k | for i in offset..new_offset27.3k { |
1689 | 32.5k | let slot = (*block).slots.get_unchecked(i); |
1690 | 32.5k | |
1691 | 32.5k | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { |
1692 | 8 | Block::destroy(block, offset); |
1693 | 8 | break; |
1694 | 32.5k | } |
1695 | | } |
1696 | | } |
1697 | | |
1698 | 29.0k | Steal::Success(task) |
1699 | | } |
1700 | 450k | } <crossbeam_deque::deque::Injector<injector::destructors::Elem>>::steal_batch_and_pop Line | Count | Source | 1558 | 132 | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 1559 | 132 | let mut head; | 1560 | 132 | let mut block; | 1561 | 132 | let mut offset; | 1562 | 132 | | 1563 | 132 | let backoff = Backoff::new(); | 1564 | 134 | loop { | 1565 | 134 | head = self.head.index.load(Ordering::Acquire); | 1566 | 134 | block = self.head.block.load(Ordering::Acquire); | 1567 | 134 | | 1568 | 134 | // Calculate the offset of the index into the block. | 1569 | 134 | offset = (head >> SHIFT) % LAP; | 1570 | 134 | | 1571 | 134 | // If we reached the end of the block, wait until the next one is installed. | 1572 | 134 | if offset == BLOCK_CAP { | 1573 | 2 | backoff.snooze(); | 1574 | 2 | } else { | 1575 | 132 | break; | 1576 | 132 | } | 1577 | 132 | } | 1578 | 132 | | 1579 | 132 | let mut new_head = head; | 1580 | 132 | let advance; | 1581 | 132 | | 1582 | 132 | if new_head & HAS_NEXT == 0 { | 1583 | 0 | atomic::fence(Ordering::SeqCst); | 1584 | 0 | let tail = self.tail.index.load(Ordering::Relaxed); | 1585 | 0 |
| 1586 | 0 | // If the tail equals the head, that means the queue is empty. | 1587 | 0 | if head >> SHIFT == tail >> SHIFT { | 1588 | 0 | return Steal::Empty; | 1589 | 0 | } | 1590 | 0 |
| 1591 | 0 | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1592 | 0 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1593 | 0 | new_head |= HAS_NEXT; | 1594 | 0 | // We can steal all tasks till the end of the block. | 1595 | 0 | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); | 1596 | 0 | } else { | 1597 | 0 | let len = (tail - head) >> SHIFT; | 1598 | 0 | // Steal half of the available tasks. | 1599 | 0 | advance = ((len + 1) / 2).min(MAX_BATCH + 1); | 1600 | 0 | } | 1601 | 132 | } else { | 1602 | 132 | // We can steal all tasks till the end of the block. | 1603 | 132 | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); | 1604 | 132 | } | 1605 | | | 1606 | 132 | new_head += advance << SHIFT; | 1607 | 132 | let new_offset = offset + advance; | 1608 | 132 | | 1609 | 132 | // Try moving the head index forward. | 1610 | 132 | if self | 1611 | 132 | .head | 1612 | 132 | .index | 1613 | 132 | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1614 | 132 | .is_err() | 1615 | | { | 1616 | 1 | return Steal::Retry; | 1617 | 131 | } | 1618 | 131 | | 1619 | 131 | // Reserve capacity for the stolen batch. | 1620 | 131 | let batch_size = new_offset - offset - 1; | 1621 | 131 | dest.reserve(batch_size); | 1622 | 131 | | 1623 | 131 | // Get the destination buffer and back index. | 1624 | 131 | let dest_buffer = dest.buffer.get(); | 1625 | 131 | let dest_b = dest.inner.back.load(Ordering::Relaxed); | 1626 | 131 | | 1627 | 131 | unsafe { | 1628 | 131 | // If we've reached the end of the block, move to the next one. | 1629 | 131 | if new_offset == BLOCK_CAP { | 1630 | 55 | let next = (*block).wait_next(); | 1631 | 55 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1632 | 55 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1633 | 55 | next_index |= HAS_NEXT; | 1634 | 55 | }0 | 1635 | | | 1636 | 55 | self.head.block.store(next, Ordering::Release); | 1637 | 55 | self.head.index.store(next_index, Ordering::Release); | 1638 | 76 | } | 1639 | | | 1640 | | // Read the task. | 1641 | 131 | let slot = (*block).slots.get_unchecked(offset); | 1642 | 131 | slot.wait_write(); | 1643 | 131 | let task = slot.task.get().read().assume_init(); | 1644 | 131 | | 1645 | 131 | match dest.flavor { | 1646 | 131 | Flavor::Fifo => { | 1647 | | // Copy values from the injector into the destination queue. | 1648 | 4.02k | for i in 0..batch_size131 { | 1649 | 4.02k | // Read the task. | 1650 | 4.02k | let slot = (*block).slots.get_unchecked(offset + i + 1); | 1651 | 4.02k | slot.wait_write(); | 1652 | 4.02k | let task = slot.task.get().read().assume_init(); | 1653 | 4.02k | | 1654 | 4.02k | // Write it into the destination queue. | 1655 | 4.02k | dest_buffer.write(dest_b.wrapping_add(i as isize), task); | 1656 | 4.02k | } | 1657 | | } | 1658 | | | 1659 | | Flavor::Lifo => { | 1660 | | // Copy values from the injector into the destination queue. | 1661 | 0 | for i in 0..batch_size { | 1662 | 0 | // Read the task. | 1663 | 0 | let slot = (*block).slots.get_unchecked(offset + i + 1); | 1664 | 0 | slot.wait_write(); | 1665 | 0 | let task = slot.task.get().read().assume_init(); | 1666 | 0 |
| 1667 | 0 | // Write it into the destination queue. | 1668 | 0 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); | 1669 | 0 | } | 1670 | | } | 1671 | | } | 1672 | | | 1673 | 131 | atomic::fence(Ordering::Release); | 1674 | 131 | | 1675 | 131 | // Update the back index in the destination queue. | 1676 | 131 | // | 1677 | 131 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report | 1678 | 131 | // data races because it doesn't understand fences. | 1679 | 131 | dest.inner | 1680 | 131 | .back | 1681 | 131 | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); | 1682 | 131 | | 1683 | 131 | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1684 | 131 | // but couldn't because we were busy reading from the slot. | 1685 | 131 | if new_offset == BLOCK_CAP { | 1686 | 55 | Block::destroy(block, offset); | 1687 | 55 | } else { | 1688 | 2.50k | for i in offset..new_offset76 { | 1689 | 2.50k | let slot = (*block).slots.get_unchecked(i); | 1690 | 2.50k | | 1691 | 2.50k | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { | 1692 | 2 | Block::destroy(block, offset); | 1693 | 2 | break; | 1694 | 2.50k | } | 1695 | | } | 1696 | | } | 1697 | | | 1698 | 131 | Steal::Success(task) | 1699 | | } | 1700 | 132 | } |
<crossbeam_deque::deque::Injector<usize>>::steal_batch_and_pop Line | Count | Source | 1558 | 406k | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 1559 | 406k | let mut head; | 1560 | 406k | let mut block; | 1561 | 406k | let mut offset; | 1562 | 406k | | 1563 | 406k | let backoff = Backoff::new(); | 1564 | 407k | loop { | 1565 | 407k | head = self.head.index.load(Ordering::Acquire); | 1566 | 407k | block = self.head.block.load(Ordering::Acquire); | 1567 | 407k | | 1568 | 407k | // Calculate the offset of the index into the block. | 1569 | 407k | offset = (head >> SHIFT) % LAP; | 1570 | 407k | | 1571 | 407k | // If we reached the end of the block, wait until the next one is installed. | 1572 | 407k | if offset == BLOCK_CAP { | 1573 | 1.02k | backoff.snooze(); | 1574 | 1.02k | } else { | 1575 | 406k | break; | 1576 | 406k | } | 1577 | 406k | } | 1578 | 406k | | 1579 | 406k | let mut new_head = head; | 1580 | 406k | let advance; | 1581 | 406k | | 1582 | 406k | if new_head & HAS_NEXT == 0 { | 1583 | 425k | atomic::fence(Ordering::SeqCst); | 1584 | 425k | let tail = self.tail.index.load(Ordering::Relaxed); | 1585 | 425k | | 1586 | 425k | // If the tail equals the head, that means the queue is empty. | 1587 | 425k | if head >> SHIFT == tail >> SHIFT { | 1588 | 352k | return Steal::Empty; | 1589 | 72.6k | } | 1590 | 72.6k | | 1591 | 72.6k | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1592 | 72.6k | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1593 | 704 | new_head |= HAS_NEXT; | 1594 | 704 | // We can steal all tasks till the end of the block. | 1595 | 704 | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); | 1596 | 83.6k | } else { | 1597 | 83.6k | let len = (tail - head) >> SHIFT; | 1598 | 83.6k | // Steal half of the available tasks. | 1599 | 83.6k | advance = ((len + 1) / 2).min(MAX_BATCH + 1); | 1600 | 83.6k | } | 1601 | 18.4E | } else { | 1602 | 18.4E | // We can steal all tasks till the end of the block. | 1603 | 18.4E | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); | 1604 | 18.4E | } | 1605 | | | 1606 | 65.6k | new_head += advance << SHIFT; | 1607 | 65.6k | let new_offset = offset + advance; | 1608 | 65.6k | | 1609 | 65.6k | // Try moving the head index forward. | 1610 | 65.6k | if self | 1611 | 65.6k | .head | 1612 | 65.6k | .index | 1613 | 65.6k | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1614 | 65.6k | .is_err() | 1615 | | { | 1616 | 68.5k | return Steal::Retry; | 1617 | 18.4E | } | 1618 | 18.4E | | 1619 | 18.4E | // Reserve capacity for the stolen batch. | 1620 | 18.4E | let batch_size = new_offset - offset - 1; | 1621 | 18.4E | dest.reserve(batch_size); | 1622 | 18.4E | | 1623 | 18.4E | // Get the destination buffer and back index. | 1624 | 18.4E | let dest_buffer = dest.buffer.get(); | 1625 | 18.4E | let dest_b = dest.inner.back.load(Ordering::Relaxed); | 1626 | 18.4E | | 1627 | 18.4E | unsafe { | 1628 | 18.4E | // If we've reached the end of the block, move to the next one. | 1629 | 18.4E | if new_offset == BLOCK_CAP { | 1630 | 479 | let next = (*block).wait_next(); | 1631 | 479 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1632 | 479 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1633 | 0 | next_index |= HAS_NEXT; | 1634 | 479 | } | 1635 | | | 1636 | 479 | self.head.block.store(next, Ordering::Release); | 1637 | 479 | self.head.index.store(next_index, Ordering::Release); | 1638 | 18.4E | } | 1639 | | | 1640 | | // Read the task. | 1641 | 18.4E | let slot = (*block).slots.get_unchecked(offset); | 1642 | 18.4E | slot.wait_write(); | 1643 | 18.4E | let task = slot.task.get().read().assume_init(); | 1644 | 18.4E | | 1645 | 18.4E | match dest.flavor { | 1646 | 18.4E | Flavor::Fifo => { | 1647 | | // Copy values from the injector into the destination queue. | 1648 | 18.4E | for i1.67k in 0..batch_size { | 1649 | 1.67k | // Read the task. | 1650 | 1.67k | let slot = (*block).slots.get_unchecked(offset + i + 1); | 1651 | 1.67k | slot.wait_write(); | 1652 | 1.67k | let task = slot.task.get().read().assume_init(); | 1653 | 1.67k | | 1654 | 1.67k | // Write it into the destination queue. | 1655 | 1.67k | dest_buffer.write(dest_b.wrapping_add(i as isize), task); | 1656 | 1.67k | } | 1657 | | } | 1658 | | | 1659 | | Flavor::Lifo => { | 1660 | | // Copy values from the injector into the destination queue. | 1661 | 0 | for i in 0..batch_size { | 1662 | 0 | // Read the task. | 1663 | 0 | let slot = (*block).slots.get_unchecked(offset + i + 1); | 1664 | 0 | slot.wait_write(); | 1665 | 0 | let task = slot.task.get().read().assume_init(); | 1666 | 0 |
| 1667 | 0 | // Write it into the destination queue. | 1668 | 0 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); | 1669 | 0 | } | 1670 | | } | 1671 | | } | 1672 | | | 1673 | 27.7k | atomic::fence(Ordering::Release); | 1674 | 27.7k | | 1675 | 27.7k | // Update the back index in the destination queue. | 1676 | 27.7k | // | 1677 | 27.7k | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report | 1678 | 27.7k | // data races because it doesn't understand fences. | 1679 | 27.7k | dest.inner | 1680 | 27.7k | .back | 1681 | 27.7k | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); | 1682 | 27.7k | | 1683 | 27.7k | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1684 | 27.7k | // but couldn't because we were busy reading from the slot. | 1685 | 27.7k | if new_offset == BLOCK_CAP { | 1686 | 479 | Block::destroy(block, offset); | 1687 | 479 | } else { | 1688 | 30.0k | for i in offset..new_offset27.2k { | 1689 | 30.0k | let slot = (*block).slots.get_unchecked(i); | 1690 | 30.0k | | 1691 | 30.0k | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { | 1692 | 6 | Block::destroy(block, offset); | 1693 | 6 | break; | 1694 | 30.0k | } | 1695 | | } | 1696 | | } | 1697 | | | 1698 | 28.9k | Steal::Success(task) | 1699 | | } | 1700 | 449k | } |
<crossbeam_deque::deque::Injector<i32>>::steal_batch_and_pop Line | Count | Source | 1558 | 2 | pub fn steal_batch_and_pop(&self, dest: &Worker<T>) -> Steal<T> { | 1559 | 2 | let mut head; | 1560 | 2 | let mut block; | 1561 | 2 | let mut offset; | 1562 | 2 | | 1563 | 2 | let backoff = Backoff::new(); | 1564 | 2 | loop { | 1565 | 2 | head = self.head.index.load(Ordering::Acquire); | 1566 | 2 | block = self.head.block.load(Ordering::Acquire); | 1567 | 2 | | 1568 | 2 | // Calculate the offset of the index into the block. | 1569 | 2 | offset = (head >> SHIFT) % LAP; | 1570 | 2 | | 1571 | 2 | // If we reached the end of the block, wait until the next one is installed. | 1572 | 2 | if offset == BLOCK_CAP { | 1573 | 0 | backoff.snooze(); | 1574 | 0 | } else { | 1575 | 2 | break; | 1576 | 2 | } | 1577 | 2 | } | 1578 | 2 | | 1579 | 2 | let mut new_head = head; | 1580 | 2 | let advance; | 1581 | 2 | | 1582 | 2 | if new_head & HAS_NEXT == 0 { | 1583 | 2 | atomic::fence(Ordering::SeqCst); | 1584 | 2 | let tail = self.tail.index.load(Ordering::Relaxed); | 1585 | 2 | | 1586 | 2 | // If the tail equals the head, that means the queue is empty. | 1587 | 2 | if head >> SHIFT == tail >> SHIFT { | 1588 | 0 | return Steal::Empty; | 1589 | 2 | } | 1590 | 2 | | 1591 | 2 | // If head and tail are not in the same block, set `HAS_NEXT` in head. | 1592 | 2 | if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { | 1593 | 0 | new_head |= HAS_NEXT; | 1594 | 0 | // We can steal all tasks till the end of the block. | 1595 | 0 | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); | 1596 | 2 | } else { | 1597 | 2 | let len = (tail - head) >> SHIFT; | 1598 | 2 | // Steal half of the available tasks. | 1599 | 2 | advance = ((len + 1) / 2).min(MAX_BATCH + 1); | 1600 | 2 | } | 1601 | 0 | } else { | 1602 | 0 | // We can steal all tasks till the end of the block. | 1603 | 0 | advance = (BLOCK_CAP - offset).min(MAX_BATCH + 1); | 1604 | 0 | } | 1605 | | | 1606 | 2 | new_head += advance << SHIFT; | 1607 | 2 | let new_offset = offset + advance; | 1608 | 2 | | 1609 | 2 | // Try moving the head index forward. | 1610 | 2 | if self | 1611 | 2 | .head | 1612 | 2 | .index | 1613 | 2 | .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) | 1614 | 2 | .is_err() | 1615 | | { | 1616 | 0 | return Steal::Retry; | 1617 | 2 | } | 1618 | 2 | | 1619 | 2 | // Reserve capacity for the stolen batch. | 1620 | 2 | let batch_size = new_offset - offset - 1; | 1621 | 2 | dest.reserve(batch_size); | 1622 | 2 | | 1623 | 2 | // Get the destination buffer and back index. | 1624 | 2 | let dest_buffer = dest.buffer.get(); | 1625 | 2 | let dest_b = dest.inner.back.load(Ordering::Relaxed); | 1626 | 2 | | 1627 | 2 | unsafe { | 1628 | 2 | // If we've reached the end of the block, move to the next one. | 1629 | 2 | if new_offset == BLOCK_CAP { | 1630 | 0 | let next = (*block).wait_next(); | 1631 | 0 | let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); | 1632 | 0 | if !(*next).next.load(Ordering::Relaxed).is_null() { | 1633 | 0 | next_index |= HAS_NEXT; | 1634 | 0 | } | 1635 | | | 1636 | 0 | self.head.block.store(next, Ordering::Release); | 1637 | 0 | self.head.index.store(next_index, Ordering::Release); | 1638 | 2 | } | 1639 | | | 1640 | | // Read the task. | 1641 | 2 | let slot = (*block).slots.get_unchecked(offset); | 1642 | 2 | slot.wait_write(); | 1643 | 2 | let task = slot.task.get().read().assume_init(); | 1644 | 2 | | 1645 | 2 | match dest.flavor { | 1646 | 2 | Flavor::Fifo => { | 1647 | | // Copy values from the injector into the destination queue. | 1648 | 2 | for i in 0..batch_size1 { | 1649 | 2 | // Read the task. | 1650 | 2 | let slot = (*block).slots.get_unchecked(offset + i + 1); | 1651 | 2 | slot.wait_write(); | 1652 | 2 | let task = slot.task.get().read().assume_init(); | 1653 | 2 | | 1654 | 2 | // Write it into the destination queue. | 1655 | 2 | dest_buffer.write(dest_b.wrapping_add(i as isize), task); | 1656 | 2 | } | 1657 | | } | 1658 | | | 1659 | | Flavor::Lifo => { | 1660 | | // Copy values from the injector into the destination queue. | 1661 | 2 | for i in 0..batch_size1 { | 1662 | 2 | // Read the task. | 1663 | 2 | let slot = (*block).slots.get_unchecked(offset + i + 1); | 1664 | 2 | slot.wait_write(); | 1665 | 2 | let task = slot.task.get().read().assume_init(); | 1666 | 2 | | 1667 | 2 | // Write it into the destination queue. | 1668 | 2 | dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); | 1669 | 2 | } | 1670 | | } | 1671 | | } | 1672 | | | 1673 | 2 | atomic::fence(Ordering::Release); | 1674 | 2 | | 1675 | 2 | // Update the back index in the destination queue. | 1676 | 2 | // | 1677 | 2 | // This ordering could be `Relaxed`, but then thread sanitizer would falsely report | 1678 | 2 | // data races because it doesn't understand fences. | 1679 | 2 | dest.inner | 1680 | 2 | .back | 1681 | 2 | .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); | 1682 | 2 | | 1683 | 2 | // Destroy the block if we've reached the end, or if another thread wanted to destroy | 1684 | 2 | // but couldn't because we were busy reading from the slot. | 1685 | 2 | if new_offset == BLOCK_CAP { | 1686 | 0 | Block::destroy(block, offset); | 1687 | 0 | } else { | 1688 | 5 | for i in offset..new_offset2 { | 1689 | 5 | let slot = (*block).slots.get_unchecked(i); | 1690 | 5 | | 1691 | 5 | if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { | 1692 | 0 | Block::destroy(block, offset); | 1693 | 0 | break; | 1694 | 5 | } | 1695 | | } | 1696 | | } | 1697 | | | 1698 | 2 | Steal::Success(task) | 1699 | | } | 1700 | 2 | } |
|
1701 | | |
1702 | | /// Returns `true` if the queue is empty. |
1703 | | /// |
1704 | | /// # Examples |
1705 | | /// |
1706 | | /// ``` |
1707 | | /// use crossbeam_deque::Injector; |
1708 | | /// |
1709 | | /// let q = Injector::new(); |
1710 | | /// |
1711 | | /// assert!(q.is_empty()); |
1712 | | /// q.push(1); |
1713 | | /// assert!(!q.is_empty()); |
1714 | | /// ``` |
1715 | 7 | pub fn is_empty(&self) -> bool { |
1716 | 7 | let head = self.head.index.load(Ordering::SeqCst); |
1717 | 7 | let tail = self.tail.index.load(Ordering::SeqCst); |
1718 | 7 | head >> SHIFT == tail >> SHIFT |
1719 | 7 | } |
1720 | | |
1721 | | /// Returns the number of tasks in the queue. |
1722 | | /// |
1723 | | /// # Examples |
1724 | | /// |
1725 | | /// ``` |
1726 | | /// use crossbeam_deque::Injector; |
1727 | | /// |
1728 | | /// let q = Injector::new(); |
1729 | | /// |
1730 | | /// assert_eq!(q.len(), 0); |
1731 | | /// q.push(1); |
1732 | | /// assert_eq!(q.len(), 1); |
1733 | | /// q.push(1); |
1734 | | /// assert_eq!(q.len(), 2); |
1735 | | /// ``` |
1736 | | pub fn len(&self) -> usize { |
1737 | | loop { |
1738 | | // Load the tail index, then load the head index. |
1739 | | let mut tail = self.tail.index.load(Ordering::SeqCst); |
1740 | | let mut head = self.head.index.load(Ordering::SeqCst); |
1741 | | |
1742 | | // If the tail index didn't change, we've got consistent indices to work with. |
1743 | | if self.tail.index.load(Ordering::SeqCst) == tail { |
1744 | | // Erase the lower bits. |
1745 | | tail &= !((1 << SHIFT) - 1); |
1746 | | head &= !((1 << SHIFT) - 1); |
1747 | | |
1748 | | // Fix up indices if they fall onto block ends. |
1749 | | if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { |
1750 | | tail = tail.wrapping_add(1 << SHIFT); |
1751 | | } |
1752 | | if (head >> SHIFT) & (LAP - 1) == LAP - 1 { |
1753 | | head = head.wrapping_add(1 << SHIFT); |
1754 | | } |
1755 | | |
1756 | | // Rotate indices so that head falls into the first block. |
1757 | | let lap = (head >> SHIFT) / LAP; |
1758 | | tail = tail.wrapping_sub((lap * LAP) << SHIFT); |
1759 | | head = head.wrapping_sub((lap * LAP) << SHIFT); |
1760 | | |
1761 | | // Remove the lower bits. |
1762 | | tail >>= SHIFT; |
1763 | | head >>= SHIFT; |
1764 | | |
1765 | | // Return the difference minus the number of blocks between tail and head. |
1766 | | return tail - head - tail / LAP; |
1767 | | } |
1768 | | } |
1769 | | } |
1770 | | } |
1771 | | |
1772 | | impl<T> Drop for Injector<T> { |
1773 | 13 | fn drop(&mut self) { |
1774 | 13 | let mut head = self.head.index.load(Ordering::Relaxed); |
1775 | 13 | let mut tail = self.tail.index.load(Ordering::Relaxed); |
1776 | 13 | let mut block = self.head.block.load(Ordering::Relaxed); |
1777 | 13 | |
1778 | 13 | // Erase the lower bits. |
1779 | 13 | head &= !((1 << SHIFT) - 1); |
1780 | 13 | tail &= !((1 << SHIFT) - 1); |
1781 | | |
1782 | | unsafe { |
1783 | | // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. |
1784 | 41.4k | while head != tail { |
1785 | 41.4k | let offset = (head >> SHIFT) % LAP; |
1786 | 41.4k | |
1787 | 41.4k | if offset < BLOCK_CAP { |
1788 | 40.8k | // Drop the task in the slot. |
1789 | 40.8k | let slot = (*block).slots.get_unchecked(offset); |
1790 | 40.8k | let p = &mut *slot.task.get(); |
1791 | 40.8k | p.as_mut_ptr().drop_in_place(); |
1792 | 40.8k | } else { |
1793 | 647 | // Deallocate the block and move to the next one. |
1794 | 647 | let next = (*block).next.load(Ordering::Relaxed); |
1795 | 647 | drop(Box::from_raw(block)); |
1796 | 647 | block = next; |
1797 | 647 | } |
1798 | | |
1799 | 41.4k | head = head.wrapping_add(1 << SHIFT); |
1800 | | } |
1801 | | |
1802 | | // Deallocate the last remaining block. |
1803 | 13 | drop(Box::from_raw(block)); |
1804 | 13 | } |
1805 | 13 | } <crossbeam_deque::deque::Injector<i32> as core::ops::drop::Drop>::drop Line | Count | Source | 1773 | 2 | fn drop(&mut self) { | 1774 | 2 | let mut head = self.head.index.load(Ordering::Relaxed); | 1775 | 2 | let mut tail = self.tail.index.load(Ordering::Relaxed); | 1776 | 2 | let mut block = self.head.block.load(Ordering::Relaxed); | 1777 | 2 | | 1778 | 2 | // Erase the lower bits. | 1779 | 2 | head &= !((1 << SHIFT) - 1); | 1780 | 2 | tail &= !((1 << SHIFT) - 1); | 1781 | | | 1782 | | unsafe { | 1783 | | // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. | 1784 | 2 | while head != tail { | 1785 | 0 | let offset = (head >> SHIFT) % LAP; | 1786 | 0 |
| 1787 | 0 | if offset < BLOCK_CAP { | 1788 | 0 | // Drop the task in the slot. | 1789 | 0 | let slot = (*block).slots.get_unchecked(offset); | 1790 | 0 | let p = &mut *slot.task.get(); | 1791 | 0 | p.as_mut_ptr().drop_in_place(); | 1792 | 0 | } else { | 1793 | 0 | // Deallocate the block and move to the next one. | 1794 | 0 | let next = (*block).next.load(Ordering::Relaxed); | 1795 | 0 | drop(Box::from_raw(block)); | 1796 | 0 | block = next; | 1797 | 0 | } | 1798 | | | 1799 | 0 | head = head.wrapping_add(1 << SHIFT); | 1800 | | } | 1801 | | | 1802 | | // Deallocate the last remaining block. | 1803 | 2 | drop(Box::from_raw(block)); | 1804 | 2 | } | 1805 | 2 | } |
<crossbeam_deque::deque::Injector<injector::destructors::Elem> as core::ops::drop::Drop>::drop Line | Count | Source | 1773 | 1 | fn drop(&mut self) { | 1774 | 1 | let mut head = self.head.index.load(Ordering::Relaxed); | 1775 | 1 | let mut tail = self.tail.index.load(Ordering::Relaxed); | 1776 | 1 | let mut block = self.head.block.load(Ordering::Relaxed); | 1777 | 1 | | 1778 | 1 | // Erase the lower bits. | 1779 | 1 | head &= !((1 << SHIFT) - 1); | 1780 | 1 | tail &= !((1 << SHIFT) - 1); | 1781 | | | 1782 | | unsafe { | 1783 | | // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. | 1784 | 41.4k | while head != tail { | 1785 | 41.4k | let offset = (head >> SHIFT) % LAP; | 1786 | 41.4k | | 1787 | 41.4k | if offset < BLOCK_CAP { | 1788 | 40.8k | // Drop the task in the slot. | 1789 | 40.8k | let slot = (*block).slots.get_unchecked(offset); | 1790 | 40.8k | let p = &mut *slot.task.get(); | 1791 | 40.8k | p.as_mut_ptr().drop_in_place(); | 1792 | 40.8k | } else { | 1793 | 647 | // Deallocate the block and move to the next one. | 1794 | 647 | let next = (*block).next.load(Ordering::Relaxed); | 1795 | 647 | drop(Box::from_raw(block)); | 1796 | 647 | block = next; | 1797 | 647 | } | 1798 | | | 1799 | 41.4k | head = head.wrapping_add(1 << SHIFT); | 1800 | | } | 1801 | | | 1802 | | // Deallocate the last remaining block. | 1803 | 1 | drop(Box::from_raw(block)); | 1804 | 1 | } | 1805 | 1 | } |
<crossbeam_deque::deque::Injector<alloc::boxed::Box<usize>> as core::ops::drop::Drop>::drop Line | Count | Source | 1773 | 1 | fn drop(&mut self) { | 1774 | 1 | let mut head = self.head.index.load(Ordering::Relaxed); | 1775 | 1 | let mut tail = self.tail.index.load(Ordering::Relaxed); | 1776 | 1 | let mut block = self.head.block.load(Ordering::Relaxed); | 1777 | 1 | | 1778 | 1 | // Erase the lower bits. | 1779 | 1 | head &= !((1 << SHIFT) - 1); | 1780 | 1 | tail &= !((1 << SHIFT) - 1); | 1781 | | | 1782 | | unsafe { | 1783 | | // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. | 1784 | 1 | while head != tail { | 1785 | 0 | let offset = (head >> SHIFT) % LAP; | 1786 | 0 |
| 1787 | 0 | if offset < BLOCK_CAP { | 1788 | 0 | // Drop the task in the slot. | 1789 | 0 | let slot = (*block).slots.get_unchecked(offset); | 1790 | 0 | let p = &mut *slot.task.get(); | 1791 | 0 | p.as_mut_ptr().drop_in_place(); | 1792 | 0 | } else { | 1793 | 0 | // Deallocate the block and move to the next one. | 1794 | 0 | let next = (*block).next.load(Ordering::Relaxed); | 1795 | 0 | drop(Box::from_raw(block)); | 1796 | 0 | block = next; | 1797 | 0 | } | 1798 | | | 1799 | 0 | head = head.wrapping_add(1 << SHIFT); | 1800 | | } | 1801 | | | 1802 | | // Deallocate the last remaining block. | 1803 | 1 | drop(Box::from_raw(block)); | 1804 | 1 | } | 1805 | 1 | } |
<crossbeam_deque::deque::Injector<usize> as core::ops::drop::Drop>::drop Line | Count | Source | 1773 | 4 | fn drop(&mut self) { | 1774 | 4 | let mut head = self.head.index.load(Ordering::Relaxed); | 1775 | 4 | let mut tail = self.tail.index.load(Ordering::Relaxed); | 1776 | 4 | let mut block = self.head.block.load(Ordering::Relaxed); | 1777 | 4 | | 1778 | 4 | // Erase the lower bits. | 1779 | 4 | head &= !((1 << SHIFT) - 1); | 1780 | 4 | tail &= !((1 << SHIFT) - 1); | 1781 | | | 1782 | | unsafe { | 1783 | | // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. | 1784 | 4 | while head != tail { | 1785 | 0 | let offset = (head >> SHIFT) % LAP; | 1786 | 0 |
| 1787 | 0 | if offset < BLOCK_CAP { | 1788 | 0 | // Drop the task in the slot. | 1789 | 0 | let slot = (*block).slots.get_unchecked(offset); | 1790 | 0 | let p = &mut *slot.task.get(); | 1791 | 0 | p.as_mut_ptr().drop_in_place(); | 1792 | 0 | } else { | 1793 | 0 | // Deallocate the block and move to the next one. | 1794 | 0 | let next = (*block).next.load(Ordering::Relaxed); | 1795 | 0 | drop(Box::from_raw(block)); | 1796 | 0 | block = next; | 1797 | 0 | } | 1798 | | | 1799 | 0 | head = head.wrapping_add(1 << SHIFT); | 1800 | | } | 1801 | | | 1802 | | // Deallocate the last remaining block. | 1803 | 4 | drop(Box::from_raw(block)); | 1804 | 4 | } | 1805 | 4 | } |
<crossbeam_deque::deque::Injector<i32> as core::ops::drop::Drop>::drop Line | Count | Source | 1773 | 5 | fn drop(&mut self) { | 1774 | 5 | let mut head = self.head.index.load(Ordering::Relaxed); | 1775 | 5 | let mut tail = self.tail.index.load(Ordering::Relaxed); | 1776 | 5 | let mut block = self.head.block.load(Ordering::Relaxed); | 1777 | 5 | | 1778 | 5 | // Erase the lower bits. | 1779 | 5 | head &= !((1 << SHIFT) - 1); | 1780 | 5 | tail &= !((1 << SHIFT) - 1); | 1781 | | | 1782 | | unsafe { | 1783 | | // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. | 1784 | 15 | while head != tail { | 1785 | 10 | let offset = (head >> SHIFT) % LAP; | 1786 | 10 | | 1787 | 10 | if offset < BLOCK_CAP { | 1788 | 10 | // Drop the task in the slot. | 1789 | 10 | let slot = (*block).slots.get_unchecked(offset); | 1790 | 10 | let p = &mut *slot.task.get(); | 1791 | 10 | p.as_mut_ptr().drop_in_place(); | 1792 | 10 | } else { | 1793 | 0 | // Deallocate the block and move to the next one. | 1794 | 0 | let next = (*block).next.load(Ordering::Relaxed); | 1795 | 0 | drop(Box::from_raw(block)); | 1796 | 0 | block = next; | 1797 | 0 | } | 1798 | | | 1799 | 10 | head = head.wrapping_add(1 << SHIFT); | 1800 | | } | 1801 | | | 1802 | | // Deallocate the last remaining block. | 1803 | 5 | drop(Box::from_raw(block)); | 1804 | 5 | } | 1805 | 5 | } |
|
1806 | | } |
1807 | | |
1808 | | impl<T> fmt::Debug for Injector<T> { |
1809 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1810 | | f.pad("Worker { .. }") |
1811 | | } |
1812 | | } |
1813 | | |
1814 | | /// Possible outcomes of a steal operation. |
1815 | | /// |
1816 | | /// # Examples |
1817 | | /// |
1818 | | /// There are lots of ways to chain results of steal operations together: |
1819 | | /// |
1820 | | /// ``` |
1821 | | /// use crossbeam_deque::Steal::{self, Empty, Retry, Success}; |
1822 | | /// |
1823 | | /// let collect = |v: Vec<Steal<i32>>| v.into_iter().collect::<Steal<i32>>(); |
1824 | | /// |
1825 | | /// assert_eq!(collect(vec![Empty, Empty, Empty]), Empty); |
1826 | | /// assert_eq!(collect(vec![Empty, Retry, Empty]), Retry); |
1827 | | /// assert_eq!(collect(vec![Retry, Success(1), Empty]), Success(1)); |
1828 | | /// |
1829 | | /// assert_eq!(collect(vec![Empty, Empty]).or_else(|| Retry), Retry); |
1830 | | /// assert_eq!(collect(vec![Retry, Empty]).or_else(|| Success(1)), Success(1)); |
1831 | | /// ``` |
1832 | | #[must_use] |
1833 | 48 | #[derive(PartialEq47 , Eq, Copy, Clone)] <crossbeam_deque::deque::Steal<usize> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 1 | #[derive(PartialEq, Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<i32> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 6 | #[derive(PartialEq, Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<usize> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 1 | #[derive(PartialEq, Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<i32> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 9 | #[derive(PartialEq, Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<()> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 6 | #[derive(PartialEq, Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<i32> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 15 | #[derive(PartialEq14 , Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<usize> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 1 | #[derive(PartialEq, Eq, Copy, Clone)] |
<crossbeam_deque::deque::Steal<i32> as core::cmp::PartialEq>::eq Line | Count | Source | 1833 | 9 | #[derive(PartialEq, Eq, Copy, Clone)] |
|
1834 | | pub enum Steal<T> { |
1835 | | /// The queue was empty at the time of stealing. |
1836 | | Empty, |
1837 | | |
1838 | | /// At least one task was successfully stolen. |
1839 | | Success(T), |
1840 | | |
1841 | | /// The steal operation needs to be retried. |
1842 | | Retry, |
1843 | | } |
1844 | | |
1845 | | impl<T> Steal<T> { |
1846 | | /// Returns `true` if the queue was empty at the time of stealing. |
1847 | | /// |
1848 | | /// # Examples |
1849 | | /// |
1850 | | /// ``` |
1851 | | /// use crossbeam_deque::Steal::{Empty, Retry, Success}; |
1852 | | /// |
1853 | | /// assert!(!Success(7).is_empty()); |
1854 | | /// assert!(!Retry::<i32>.is_empty()); |
1855 | | /// |
1856 | | /// assert!(Empty::<i32>.is_empty()); |
1857 | | /// ``` |
1858 | | pub fn is_empty(&self) -> bool { |
1859 | | match self { |
1860 | | Steal::Empty => true, |
1861 | | _ => false, |
1862 | | } |
1863 | | } |
1864 | | |
1865 | | /// Returns `true` if at least one task was stolen. |
1866 | | /// |
1867 | | /// # Examples |
1868 | | /// |
1869 | | /// ``` |
1870 | | /// use crossbeam_deque::Steal::{Empty, Retry, Success}; |
1871 | | /// |
1872 | | /// assert!(!Empty::<i32>.is_success()); |
1873 | | /// assert!(!Retry::<i32>.is_success()); |
1874 | | /// |
1875 | | /// assert!(Success(7).is_success()); |
1876 | | /// ``` |
1877 | | pub fn is_success(&self) -> bool { |
1878 | | match self { |
1879 | | Steal::Success(_) => true, |
1880 | | _ => false, |
1881 | | } |
1882 | | } |
1883 | | |
1884 | | /// Returns `true` if the steal operation needs to be retried. |
1885 | | /// |
1886 | | /// # Examples |
1887 | | /// |
1888 | | /// ``` |
1889 | | /// use crossbeam_deque::Steal::{Empty, Retry, Success}; |
1890 | | /// |
1891 | | /// assert!(!Empty::<i32>.is_retry()); |
1892 | | /// assert!(!Success(7).is_retry()); |
1893 | | /// |
1894 | | /// assert!(Retry::<i32>.is_retry()); |
1895 | | /// ``` |
1896 | | pub fn is_retry(&self) -> bool { |
1897 | | match self { |
1898 | | Steal::Retry => true, |
1899 | | _ => false, |
1900 | | } |
1901 | | } |
1902 | | |
1903 | | /// Returns the result of the operation, if successful. |
1904 | | /// |
1905 | | /// # Examples |
1906 | | /// |
1907 | | /// ``` |
1908 | | /// use crossbeam_deque::Steal::{Empty, Retry, Success}; |
1909 | | /// |
1910 | | /// assert_eq!(Empty::<i32>.success(), None); |
1911 | | /// assert_eq!(Retry::<i32>.success(), None); |
1912 | | /// |
1913 | | /// assert_eq!(Success(7).success(), Some(7)); |
1914 | | /// ``` |
1915 | | pub fn success(self) -> Option<T> { |
1916 | | match self { |
1917 | | Steal::Success(res) => Some(res), |
1918 | | _ => None, |
1919 | | } |
1920 | | } |
1921 | | |
1922 | | /// If no task was stolen, attempts another steal operation. |
1923 | | /// |
1924 | | /// Returns this steal result if it is `Success`. Otherwise, closure `f` is invoked and then: |
1925 | | /// |
1926 | | /// * If the second steal resulted in `Success`, it is returned. |
1927 | | /// * If both steals were unsuccessful but any resulted in `Retry`, then `Retry` is returned. |
1928 | | /// * If both resulted in `None`, then `None` is returned. |
1929 | | /// |
1930 | | /// # Examples |
1931 | | /// |
1932 | | /// ``` |
1933 | | /// use crossbeam_deque::Steal::{Empty, Retry, Success}; |
1934 | | /// |
1935 | | /// assert_eq!(Success(1).or_else(|| Success(2)), Success(1)); |
1936 | | /// assert_eq!(Retry.or_else(|| Success(2)), Success(2)); |
1937 | | /// |
1938 | | /// assert_eq!(Retry.or_else(|| Empty), Retry::<i32>); |
1939 | | /// assert_eq!(Empty.or_else(|| Retry), Retry::<i32>); |
1940 | | /// |
1941 | | /// assert_eq!(Empty.or_else(|| Empty), Empty::<i32>); |
1942 | | /// ``` |
1943 | | pub fn or_else<F>(self, f: F) -> Steal<T> |
1944 | | where |
1945 | | F: FnOnce() -> Steal<T>, |
1946 | | { |
1947 | | match self { |
1948 | | Steal::Empty => f(), |
1949 | | Steal::Success(_) => self, |
1950 | | Steal::Retry => { |
1951 | | if let Steal::Success(res) = f() { |
1952 | | Steal::Success(res) |
1953 | | } else { |
1954 | | Steal::Retry |
1955 | | } |
1956 | | } |
1957 | | } |
1958 | | } |
1959 | | } |
1960 | | |
1961 | | impl<T> fmt::Debug for Steal<T> { |
1962 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1963 | 0 | match self { |
1964 | 0 | Steal::Empty => f.pad("Empty"), |
1965 | 0 | Steal::Success(_) => f.pad("Success(..)"), |
1966 | 0 | Steal::Retry => f.pad("Retry"), |
1967 | | } |
1968 | 0 | } Unexecuted instantiation: <crossbeam_deque::deque::Steal<i32> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<usize> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<i32> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<usize> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<()> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<i32> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<i32> as core::fmt::Debug>::fmt Unexecuted instantiation: <crossbeam_deque::deque::Steal<usize> as core::fmt::Debug>::fmt |
1969 | | } |
1970 | | |
1971 | | impl<T> FromIterator<Steal<T>> for Steal<T> { |
1972 | | /// Consumes items until a `Success` is found and returns it. |
1973 | | /// |
1974 | | /// If no `Success` was found, but there was at least one `Retry`, then returns `Retry`. |
1975 | | /// Otherwise, `Empty` is returned. |
1976 | | fn from_iter<I>(iter: I) -> Steal<T> |
1977 | | where |
1978 | | I: IntoIterator<Item = Steal<T>>, |
1979 | | { |
1980 | | let mut retry = false; |
1981 | | for s in iter { |
1982 | | match &s { |
1983 | | Steal::Empty => {} |
1984 | | Steal::Success(_) => return s, |
1985 | | Steal::Retry => retry = true, |
1986 | | } |
1987 | | } |
1988 | | |
1989 | | if retry { |
1990 | | Steal::Retry |
1991 | | } else { |
1992 | | Steal::Empty |
1993 | | } |
1994 | | } |
1995 | | } |