crossbeam-skiplist/src/base.rs
Line | Count | Source (jump to first uncovered line) |
1 | | //! TODO: docs |
2 | | |
3 | | use alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout}; |
4 | | use core::borrow::Borrow; |
5 | | use core::cmp; |
6 | | use core::fmt; |
7 | | use core::marker::PhantomData; |
8 | | use core::mem; |
9 | | use core::ops::{Bound, Deref, Index, RangeBounds}; |
10 | | use core::ptr; |
11 | | use core::sync::atomic::{fence, AtomicUsize, Ordering}; |
12 | | |
13 | | use crate::epoch::{self, Atomic, Collector, Guard, Shared}; |
14 | | use crate::utils::CachePadded; |
15 | | |
16 | | /// Number of bits needed to store height. |
17 | | const HEIGHT_BITS: usize = 5; |
18 | | |
19 | | /// Maximum height of a skip list tower. |
20 | | const MAX_HEIGHT: usize = 1 << HEIGHT_BITS; |
21 | | |
22 | | /// The bits of `refs_and_height` that keep the height. |
23 | | const HEIGHT_MASK: usize = (1 << HEIGHT_BITS) - 1; |
24 | | |
25 | | /// The tower of atomic pointers. |
26 | | /// |
27 | | /// The actual size of the tower will vary depending on the height that a node |
28 | | /// was allocated with. |
29 | | #[repr(C)] |
30 | | struct Tower<K, V> { |
31 | | pointers: [Atomic<Node<K, V>>; 0], |
32 | | } |
33 | | |
34 | | impl<K, V> Index<usize> for Tower<K, V> { |
35 | | type Output = Atomic<Node<K, V>>; |
36 | 3.37k | fn index(&self, index: usize) -> &Atomic<Node<K, V>> { |
37 | 3.37k | // This implementation is actually unsafe since we don't check if the |
38 | 3.37k | // index is in-bounds. But this is fine since this is only used internally. |
39 | 3.37k | unsafe { self.pointers.get_unchecked(index) } |
40 | 3.37k | } <crossbeam_skiplist::base::Tower<i32, i32> as core::ops::index::Index<usize>>::index Line | Count | Source | 36 | 850 | fn index(&self, index: usize) -> &Atomic<Node<K, V>> { | 37 | 850 | // This implementation is actually unsafe since we don't check if the | 38 | 850 | // index is in-bounds. But this is fine since this is only used internally. | 39 | 850 | unsafe { self.pointers.get_unchecked(index) } | 40 | 850 | } |
<crossbeam_skiplist::base::Tower<i32, ()> as core::ops::index::Index<usize>>::index Line | Count | Source | 36 | 21 | fn index(&self, index: usize) -> &Atomic<Node<K, V>> { | 37 | 21 | // This implementation is actually unsafe since we don't check if the | 38 | 21 | // index is in-bounds. But this is fine since this is only used internally. | 39 | 21 | unsafe { self.pointers.get_unchecked(index) } | 40 | 21 | } |
<crossbeam_skiplist::base::Tower<base::drops::Key, base::drops::Value> as core::ops::index::Index<usize>>::index Line | Count | Source | 36 | 69 | fn index(&self, index: usize) -> &Atomic<Node<K, V>> { | 37 | 69 | // This implementation is actually unsafe since we don't check if the | 38 | 69 | // index is in-bounds. But this is fine since this is only used internally. | 39 | 69 | unsafe { self.pointers.get_unchecked(index) } | 40 | 69 | } |
<crossbeam_skiplist::base::Tower<i32, i32> as core::ops::index::Index<usize>>::index Line | Count | Source | 36 | 2.43k | fn index(&self, index: usize) -> &Atomic<Node<K, V>> { | 37 | 2.43k | // This implementation is actually unsafe since we don't check if the | 38 | 2.43k | // index is in-bounds. But this is fine since this is only used internally. | 39 | 2.43k | unsafe { self.pointers.get_unchecked(index) } | 40 | 2.43k | } |
<crossbeam_skiplist::base::Tower<alloc::string::String, alloc::boxed::Box<i32>> as core::ops::index::Index<usize>>::index Line | Count | Source | 36 | 1 | fn index(&self, index: usize) -> &Atomic<Node<K, V>> { | 37 | 1 | // This implementation is actually unsafe since we don't check if the | 38 | 1 | // index is in-bounds. But this is fine since this is only used internally. | 39 | 1 | unsafe { self.pointers.get_unchecked(index) } | 40 | 1 | } |
|
41 | | } |
42 | | |
43 | | /// Tower at the head of a skip list. |
44 | | /// |
45 | | /// This is located in the `SkipList` struct itself and holds a full height |
46 | | /// tower. |
47 | | #[repr(C)] |
48 | | struct Head<K, V> { |
49 | | pointers: [Atomic<Node<K, V>>; MAX_HEIGHT], |
50 | | } |
51 | | |
52 | | impl<K, V> Head<K, V> { |
53 | | /// Initializes a `Head`. |
54 | | #[inline] |
55 | 24 | fn new() -> Head<K, V> { |
56 | 24 | // Initializing arrays in rust is a pain... |
57 | 24 | Head { |
58 | 24 | pointers: Default::default(), |
59 | 24 | } |
60 | 24 | } <crossbeam_skiplist::base::Head<i32, i32>>::new Line | Count | Source | 55 | 3 | fn new() -> Head<K, V> { | 56 | 3 | // Initializing arrays in rust is a pain... | 57 | 3 | Head { | 58 | 3 | pointers: Default::default(), | 59 | 3 | } | 60 | 3 | } |
<crossbeam_skiplist::base::Head<i32, ()>>::new Line | Count | Source | 55 | 1 | fn new() -> Head<K, V> { | 56 | 1 | // Initializing arrays in rust is a pain... | 57 | 1 | Head { | 58 | 1 | pointers: Default::default(), | 59 | 1 | } | 60 | 1 | } |
<crossbeam_skiplist::base::Head<base::drops::Key, base::drops::Value>>::new Line | Count | Source | 55 | 1 | fn new() -> Head<K, V> { | 56 | 1 | // Initializing arrays in rust is a pain... | 57 | 1 | Head { | 58 | 1 | pointers: Default::default(), | 59 | 1 | } | 60 | 1 | } |
<crossbeam_skiplist::base::Head<i32, i32>>::new Line | Count | Source | 55 | 18 | fn new() -> Head<K, V> { | 56 | 18 | // Initializing arrays in rust is a pain... | 57 | 18 | Head { | 58 | 18 | pointers: Default::default(), | 59 | 18 | } | 60 | 18 | } |
<crossbeam_skiplist::base::Head<alloc::string::String, alloc::boxed::Box<i32>>>::new Line | Count | Source | 55 | 1 | fn new() -> Head<K, V> { | 56 | 1 | // Initializing arrays in rust is a pain... | 57 | 1 | Head { | 58 | 1 | pointers: Default::default(), | 59 | 1 | } | 60 | 1 | } |
|
61 | | } |
62 | | |
63 | | impl<K, V> Deref for Head<K, V> { |
64 | | type Target = Tower<K, V>; |
65 | 1.01k | fn deref(&self) -> &Tower<K, V> { |
66 | 1.01k | unsafe { &*(self as *const _ as *const Tower<K, V>) } |
67 | 1.01k | } <crossbeam_skiplist::base::Head<i32, i32> as core::ops::deref::Deref>::deref Line | Count | Source | 65 | 170 | fn deref(&self) -> &Tower<K, V> { | 66 | 170 | unsafe { &*(self as *const _ as *const Tower<K, V>) } | 67 | 170 | } |
<crossbeam_skiplist::base::Head<i32, ()> as core::ops::deref::Deref>::deref Line | Count | Source | 65 | 10 | fn deref(&self) -> &Tower<K, V> { | 66 | 10 | unsafe { &*(self as *const _ as *const Tower<K, V>) } | 67 | 10 | } |
<crossbeam_skiplist::base::Head<alloc::string::String, alloc::boxed::Box<i32>> as core::ops::deref::Deref>::deref Line | Count | Source | 65 | 1 | fn deref(&self) -> &Tower<K, V> { | 66 | 1 | unsafe { &*(self as *const _ as *const Tower<K, V>) } | 67 | 1 | } |
<crossbeam_skiplist::base::Head<base::drops::Key, base::drops::Value> as core::ops::deref::Deref>::deref Line | Count | Source | 65 | 25 | fn deref(&self) -> &Tower<K, V> { | 66 | 25 | unsafe { &*(self as *const _ as *const Tower<K, V>) } | 67 | 25 | } |
<crossbeam_skiplist::base::Head<i32, i32> as core::ops::deref::Deref>::deref Line | Count | Source | 65 | 812 | fn deref(&self) -> &Tower<K, V> { | 66 | 812 | unsafe { &*(self as *const _ as *const Tower<K, V>) } | 67 | 812 | } |
|
68 | | } |
69 | | |
70 | | /// A skip list node. |
71 | | /// |
72 | | /// This struct is marked with `repr(C)` so that the specific order of fields is enforced. |
73 | | /// It is important that the tower is the last field since it is dynamically sized. The key, |
74 | | /// reference count, and height are kept close to the tower to improve cache locality during |
75 | | /// skip list traversal. |
76 | | #[repr(C)] |
77 | | struct Node<K, V> { |
78 | | /// The value. |
79 | | value: V, |
80 | | |
81 | | /// The key. |
82 | | key: K, |
83 | | |
84 | | /// Keeps the reference count and the height of its tower. |
85 | | /// |
86 | | /// The reference count is equal to the number of `Entry`s pointing to this node, plus the |
87 | | /// number of levels in which this node is installed. |
88 | | refs_and_height: AtomicUsize, |
89 | | |
90 | | /// The tower of atomic pointers. |
91 | | tower: Tower<K, V>, |
92 | | } |
93 | | |
94 | | impl<K, V> Node<K, V> { |
95 | | /// Allocates a node. |
96 | | /// |
97 | | /// The returned node will start with reference count of `ref_count` and the tower will be initialized |
98 | | /// with null pointers. However, the key and the value will be left uninitialized, and that is |
99 | | /// why this function is unsafe. |
100 | 152 | unsafe fn alloc(height: usize, ref_count: usize) -> *mut Self { |
101 | 152 | let layout = Self::get_layout(height); |
102 | 152 | let ptr = alloc(layout) as *mut Self; |
103 | 152 | if ptr.is_null() { |
104 | 0 | handle_alloc_error(layout); |
105 | 152 | } |
106 | 152 | |
107 | 152 | ptr::write( |
108 | 152 | &mut (*ptr).refs_and_height, |
109 | 152 | AtomicUsize::new((height - 1) | ref_count << HEIGHT_BITS), |
110 | 152 | ); |
111 | 152 | ptr::write_bytes((*ptr).tower.pointers.as_mut_ptr(), 0, height); |
112 | 152 | ptr |
113 | 152 | } <crossbeam_skiplist::base::Node<i32, i32>>::alloc Line | Count | Source | 100 | 20 | unsafe fn alloc(height: usize, ref_count: usize) -> *mut Self { | 101 | 20 | let layout = Self::get_layout(height); | 102 | 20 | let ptr = alloc(layout) as *mut Self; | 103 | 20 | if ptr.is_null() { | 104 | 0 | handle_alloc_error(layout); | 105 | 20 | } | 106 | 20 | | 107 | 20 | ptr::write( | 108 | 20 | &mut (*ptr).refs_and_height, | 109 | 20 | AtomicUsize::new((height - 1) | ref_count << HEIGHT_BITS), | 110 | 20 | ); | 111 | 20 | ptr::write_bytes((*ptr).tower.pointers.as_mut_ptr(), 0, height); | 112 | 20 | ptr | 113 | 20 | } |
<crossbeam_skiplist::base::Node<i32, ()>>::alloc Line | Count | Source | 100 | 3 | unsafe fn alloc(height: usize, ref_count: usize) -> *mut Self { | 101 | 3 | let layout = Self::get_layout(height); | 102 | 3 | let ptr = alloc(layout) as *mut Self; | 103 | 3 | if ptr.is_null() { | 104 | 0 | handle_alloc_error(layout); | 105 | 3 | } | 106 | 3 | | 107 | 3 | ptr::write( | 108 | 3 | &mut (*ptr).refs_and_height, | 109 | 3 | AtomicUsize::new((height - 1) | ref_count << HEIGHT_BITS), | 110 | 3 | ); | 111 | 3 | ptr::write_bytes((*ptr).tower.pointers.as_mut_ptr(), 0, height); | 112 | 3 | ptr | 113 | 3 | } |
<crossbeam_skiplist::base::Node<i32, i32>>::alloc Line | Count | Source | 100 | 122 | unsafe fn alloc(height: usize, ref_count: usize) -> *mut Self { | 101 | 122 | let layout = Self::get_layout(height); | 102 | 122 | let ptr = alloc(layout) as *mut Self; | 103 | 122 | if ptr.is_null() { | 104 | 0 | handle_alloc_error(layout); | 105 | 122 | } | 106 | 122 | | 107 | 122 | ptr::write( | 108 | 122 | &mut (*ptr).refs_and_height, | 109 | 122 | AtomicUsize::new((height - 1) | ref_count << HEIGHT_BITS), | 110 | 122 | ); | 111 | 122 | ptr::write_bytes((*ptr).tower.pointers.as_mut_ptr(), 0, height); | 112 | 122 | ptr | 113 | 122 | } |
<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::alloc Line | Count | Source | 100 | 7 | unsafe fn alloc(height: usize, ref_count: usize) -> *mut Self { | 101 | 7 | let layout = Self::get_layout(height); | 102 | 7 | let ptr = alloc(layout) as *mut Self; | 103 | 7 | if ptr.is_null() { | 104 | 0 | handle_alloc_error(layout); | 105 | 7 | } | 106 | 7 | | 107 | 7 | ptr::write( | 108 | 7 | &mut (*ptr).refs_and_height, | 109 | 7 | AtomicUsize::new((height - 1) | ref_count << HEIGHT_BITS), | 110 | 7 | ); | 111 | 7 | ptr::write_bytes((*ptr).tower.pointers.as_mut_ptr(), 0, height); | 112 | 7 | ptr | 113 | 7 | } |
|
114 | | |
115 | | /// Deallocates a node. |
116 | | /// |
117 | | /// This function will not run any destructors. |
118 | 112 | unsafe fn dealloc(ptr: *mut Self) { |
119 | 112 | let height = (*ptr).height(); |
120 | 112 | let layout = Self::get_layout(height); |
121 | 112 | dealloc(ptr as *mut u8, layout); |
122 | 112 | } <crossbeam_skiplist::base::Node<i32, i32>>::dealloc Line | Count | Source | 118 | 16 | unsafe fn dealloc(ptr: *mut Self) { | 119 | 16 | let height = (*ptr).height(); | 120 | 16 | let layout = Self::get_layout(height); | 121 | 16 | dealloc(ptr as *mut u8, layout); | 122 | 16 | } |
<crossbeam_skiplist::base::Node<i32, ()>>::dealloc Line | Count | Source | 118 | 3 | unsafe fn dealloc(ptr: *mut Self) { | 119 | 3 | let height = (*ptr).height(); | 120 | 3 | let layout = Self::get_layout(height); | 121 | 3 | dealloc(ptr as *mut u8, layout); | 122 | 3 | } |
<crossbeam_skiplist::base::Node<i32, i32>>::dealloc Line | Count | Source | 118 | 86 | unsafe fn dealloc(ptr: *mut Self) { | 119 | 86 | let height = (*ptr).height(); | 120 | 86 | let layout = Self::get_layout(height); | 121 | 86 | dealloc(ptr as *mut u8, layout); | 122 | 86 | } |
<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::dealloc Line | Count | Source | 118 | 7 | unsafe fn dealloc(ptr: *mut Self) { | 119 | 7 | let height = (*ptr).height(); | 120 | 7 | let layout = Self::get_layout(height); | 121 | 7 | dealloc(ptr as *mut u8, layout); | 122 | 7 | } |
|
123 | | |
124 | | /// Returns the layout of a node with the given `height`. |
125 | 264 | unsafe fn get_layout(height: usize) -> Layout { |
126 | 264 | assert!((1..=MAX_HEIGHT).contains(&height)); |
127 | | |
128 | 264 | let size_self = mem::size_of::<Self>(); |
129 | 264 | let align_self = mem::align_of::<Self>(); |
130 | 264 | let size_pointer = mem::size_of::<Atomic<Self>>(); |
131 | 264 | |
132 | 264 | Layout::from_size_align_unchecked(size_self + size_pointer * height, align_self) |
133 | 264 | } <crossbeam_skiplist::base::Node<i32, i32>>::get_layout Line | Count | Source | 125 | 36 | unsafe fn get_layout(height: usize) -> Layout { | 126 | 36 | assert!((1..=MAX_HEIGHT).contains(&height)); | 127 | | | 128 | 36 | let size_self = mem::size_of::<Self>(); | 129 | 36 | let align_self = mem::align_of::<Self>(); | 130 | 36 | let size_pointer = mem::size_of::<Atomic<Self>>(); | 131 | 36 | | 132 | 36 | Layout::from_size_align_unchecked(size_self + size_pointer * height, align_self) | 133 | 36 | } |
<crossbeam_skiplist::base::Node<i32, ()>>::get_layout Line | Count | Source | 125 | 6 | unsafe fn get_layout(height: usize) -> Layout { | 126 | 6 | assert!((1..=MAX_HEIGHT).contains(&height)); | 127 | | | 128 | 6 | let size_self = mem::size_of::<Self>(); | 129 | 6 | let align_self = mem::align_of::<Self>(); | 130 | 6 | let size_pointer = mem::size_of::<Atomic<Self>>(); | 131 | 6 | | 132 | 6 | Layout::from_size_align_unchecked(size_self + size_pointer * height, align_self) | 133 | 6 | } |
Unexecuted instantiation: <crossbeam_skiplist::base::Node<alloc::string::String, alloc::boxed::Box<i32>>>::get_layout <crossbeam_skiplist::base::Node<i32, i32>>::get_layout Line | Count | Source | 125 | 208 | unsafe fn get_layout(height: usize) -> Layout { | 126 | 208 | assert!((1..=MAX_HEIGHT).contains(&height)); | 127 | | | 128 | 208 | let size_self = mem::size_of::<Self>(); | 129 | 208 | let align_self = mem::align_of::<Self>(); | 130 | 208 | let size_pointer = mem::size_of::<Atomic<Self>>(); | 131 | 208 | | 132 | 208 | Layout::from_size_align_unchecked(size_self + size_pointer * height, align_self) | 133 | 208 | } |
<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::get_layout Line | Count | Source | 125 | 14 | unsafe fn get_layout(height: usize) -> Layout { | 126 | 14 | assert!((1..=MAX_HEIGHT).contains(&height)); | 127 | | | 128 | 14 | let size_self = mem::size_of::<Self>(); | 129 | 14 | let align_self = mem::align_of::<Self>(); | 130 | 14 | let size_pointer = mem::size_of::<Atomic<Self>>(); | 131 | 14 | | 132 | 14 | Layout::from_size_align_unchecked(size_self + size_pointer * height, align_self) | 133 | 14 | } |
|
134 | | |
135 | | /// Returns the height of this node's tower. |
136 | | #[inline] |
137 | 190 | fn height(&self) -> usize { |
138 | 190 | (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 |
139 | 190 | } <crossbeam_skiplist::base::Node<i32, i32>>::height Line | Count | Source | 137 | 24 | fn height(&self) -> usize { | 138 | 24 | (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 | 139 | 24 | } |
<crossbeam_skiplist::base::Node<i32, ()>>::height Line | Count | Source | 137 | 3 | fn height(&self) -> usize { | 138 | 3 | (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 | 139 | 3 | } |
<crossbeam_skiplist::base::Node<i32, i32>>::height Line | Count | Source | 137 | 154 | fn height(&self) -> usize { | 138 | 154 | (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 | 139 | 154 | } |
<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::height Line | Count | Source | 137 | 9 | fn height(&self) -> usize { | 138 | 9 | (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 | 139 | 9 | } |
|
140 | | |
141 | | /// Marks all pointers in the tower and returns `true` if the level 0 was not marked. |
142 | 49 | fn mark_tower(&self) -> bool { |
143 | 49 | let height = self.height(); |
144 | | |
145 | 62 | for level in (0..height)49 .rev() { |
146 | 62 | let tag = unsafe { |
147 | 62 | // We're loading the pointer only for the tag, so it's okay to use |
148 | 62 | // `epoch::unprotected()` in this situation. |
149 | 62 | // TODO(Amanieu): can we use release ordering here? |
150 | 62 | self.tower[level] |
151 | 62 | .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) |
152 | 62 | .tag() |
153 | | }; |
154 | | |
155 | | // If the level 0 pointer was already marked, somebody else removed the node. |
156 | 62 | if level == 0 && tag == 148 { |
157 | 0 | return false; |
158 | 61 | } |
159 | | } |
160 | | |
161 | | // We marked the level 0 pointer, therefore we removed the node. |
162 | 48 | true |
163 | 48 | } <crossbeam_skiplist::base::Node<i32, i32>>::mark_tower Line | Count | Source | 142 | 4 | fn mark_tower(&self) -> bool { | 143 | 4 | let height = self.height(); | 144 | | | 145 | 5 | for level in (0..height)4 .rev() { | 146 | 5 | let tag = unsafe { | 147 | 5 | // We're loading the pointer only for the tag, so it's okay to use | 148 | 5 | // `epoch::unprotected()` in this situation. | 149 | 5 | // TODO(Amanieu): can we use release ordering here? | 150 | 5 | self.tower[level] | 151 | 5 | .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) | 152 | 5 | .tag() | 153 | | }; | 154 | | | 155 | | // If the level 0 pointer was already marked, somebody else removed the node. | 156 | 5 | if level == 0 && tag == 14 { | 157 | 0 | return false; | 158 | 5 | } | 159 | | } | 160 | | | 161 | | // We marked the level 0 pointer, therefore we removed the node. | 162 | 4 | true | 163 | 4 | } |
Unexecuted instantiation: <crossbeam_skiplist::base::Node<i32, ()>>::mark_tower <crossbeam_skiplist::base::Node<i32, i32>>::mark_tower Line | Count | Source | 142 | 44 | fn mark_tower(&self) -> bool { | 143 | 44 | let height = self.height(); | 144 | | | 145 | 56 | for level in (0..height)44 .rev() { | 146 | 56 | let tag = unsafe { | 147 | 56 | // We're loading the pointer only for the tag, so it's okay to use | 148 | 56 | // `epoch::unprotected()` in this situation. | 149 | 56 | // TODO(Amanieu): can we use release ordering here? | 150 | 56 | self.tower[level] | 151 | 56 | .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) | 152 | 56 | .tag() | 153 | | }; | 154 | | | 155 | | // If the level 0 pointer was already marked, somebody else removed the node. | 156 | 56 | if level == 0 && tag == 143 { | 157 | 0 | return false; | 158 | 55 | } | 159 | | } | 160 | | | 161 | | // We marked the level 0 pointer, therefore we removed the node. | 162 | 43 | true | 163 | 43 | } |
<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::mark_tower Line | Count | Source | 142 | 1 | fn mark_tower(&self) -> bool { | 143 | 1 | let height = self.height(); | 144 | | | 145 | 1 | for level in (0..height).rev() { | 146 | 1 | let tag = unsafe { | 147 | 1 | // We're loading the pointer only for the tag, so it's okay to use | 148 | 1 | // `epoch::unprotected()` in this situation. | 149 | 1 | // TODO(Amanieu): can we use release ordering here? | 150 | 1 | self.tower[level] | 151 | 1 | .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) | 152 | 1 | .tag() | 153 | | }; | 154 | | | 155 | | // If the level 0 pointer was already marked, somebody else removed the node. | 156 | 1 | if level == 0 && tag == 1 { | 157 | 0 | return false; | 158 | 1 | } | 159 | | } | 160 | | | 161 | | // We marked the level 0 pointer, therefore we removed the node. | 162 | 1 | true | 163 | 1 | } |
|
164 | | |
165 | | /// Returns `true` if the node is removed. |
166 | | #[inline] |
167 | 4 | fn is_removed(&self) -> bool { |
168 | 4 | let tag = unsafe { |
169 | 4 | // We're loading the pointer only for the tag, so it's okay to use |
170 | 4 | // `epoch::unprotected()` in this situation. |
171 | 4 | self.tower[0] |
172 | 4 | .load(Ordering::Relaxed, epoch::unprotected()) |
173 | 4 | .tag() |
174 | 4 | }; |
175 | 4 | tag == 1 |
176 | 4 | } |
177 | | |
178 | | /// Attempts to increment the reference count of a node and returns `true` on success. |
179 | | /// |
180 | | /// The reference count can be incremented only if it is non-zero. |
181 | | /// |
182 | | /// # Panics |
183 | | /// |
184 | | /// Panics if the reference count overflows. |
185 | | #[inline] |
186 | 334 | unsafe fn try_increment(&self) -> bool { |
187 | 334 | let mut refs_and_height = self.refs_and_height.load(Ordering::Relaxed); |
188 | | |
189 | 334 | loop { |
190 | 334 | // If the reference count is zero, then the node has already been |
191 | 334 | // queued for deletion. Incrementing it again could lead to a |
192 | 334 | // double-free. |
193 | 334 | if refs_and_height & !HEIGHT_MASK == 0 { |
194 | 0 | return false; |
195 | 334 | } |
196 | 334 | |
197 | 334 | // If all bits in the reference count are ones, we're about to overflow it. |
198 | 334 | let new_refs_and_height = refs_and_height |
199 | 334 | .checked_add(1 << HEIGHT_BITS) |
200 | 334 | .expect("SkipList reference count overflow"); |
201 | 334 | |
202 | 334 | // Try incrementing the count. |
203 | 334 | match self.refs_and_height.compare_exchange_weak( |
204 | 334 | refs_and_height, |
205 | 334 | new_refs_and_height, |
206 | 334 | Ordering::Relaxed, |
207 | 334 | Ordering::Relaxed, |
208 | 334 | ) { |
209 | 334 | Ok(_) => return true, |
210 | 0 | Err(current) => refs_and_height = current, |
211 | | } |
212 | | } |
213 | 334 | } <crossbeam_skiplist::base::Node<i32, i32>>::try_increment Line | Count | Source | 186 | 307 | unsafe fn try_increment(&self) -> bool { | 187 | 307 | let mut refs_and_height = self.refs_and_height.load(Ordering::Relaxed); | 188 | | | 189 | 307 | loop { | 190 | 307 | // If the reference count is zero, then the node has already been | 191 | 307 | // queued for deletion. Incrementing it again could lead to a | 192 | 307 | // double-free. | 193 | 307 | if refs_and_height & !HEIGHT_MASK == 0 { | 194 | 0 | return false; | 195 | 307 | } | 196 | 307 | | 197 | 307 | // If all bits in the reference count are ones, we're about to overflow it. | 198 | 307 | let new_refs_and_height = refs_and_height | 199 | 307 | .checked_add(1 << HEIGHT_BITS) | 200 | 307 | .expect("SkipList reference count overflow"); | 201 | 307 | | 202 | 307 | // Try incrementing the count. | 203 | 307 | match self.refs_and_height.compare_exchange_weak( | 204 | 307 | refs_and_height, | 205 | 307 | new_refs_and_height, | 206 | 307 | Ordering::Relaxed, | 207 | 307 | Ordering::Relaxed, | 208 | 307 | ) { | 209 | 307 | Ok(_) => return true, | 210 | 0 | Err(current) => refs_and_height = current, | 211 | | } | 212 | | } | 213 | 307 | } |
Unexecuted instantiation: <crossbeam_skiplist::base::Node<i32, ()>>::try_increment <crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::try_increment Line | Count | Source | 186 | 1 | unsafe fn try_increment(&self) -> bool { | 187 | 1 | let mut refs_and_height = self.refs_and_height.load(Ordering::Relaxed); | 188 | | | 189 | 1 | loop { | 190 | 1 | // If the reference count is zero, then the node has already been | 191 | 1 | // queued for deletion. Incrementing it again could lead to a | 192 | 1 | // double-free. | 193 | 1 | if refs_and_height & !HEIGHT_MASK == 0 { | 194 | 0 | return false; | 195 | 1 | } | 196 | 1 | | 197 | 1 | // If all bits in the reference count are ones, we're about to overflow it. | 198 | 1 | let new_refs_and_height = refs_and_height | 199 | 1 | .checked_add(1 << HEIGHT_BITS) | 200 | 1 | .expect("SkipList reference count overflow"); | 201 | 1 | | 202 | 1 | // Try incrementing the count. | 203 | 1 | match self.refs_and_height.compare_exchange_weak( | 204 | 1 | refs_and_height, | 205 | 1 | new_refs_and_height, | 206 | 1 | Ordering::Relaxed, | 207 | 1 | Ordering::Relaxed, | 208 | 1 | ) { | 209 | 1 | Ok(_) => return true, | 210 | 0 | Err(current) => refs_and_height = current, | 211 | | } | 212 | | } | 213 | 1 | } |
<crossbeam_skiplist::base::Node<i32, i32>>::try_increment Line | Count | Source | 186 | 26 | unsafe fn try_increment(&self) -> bool { | 187 | 26 | let mut refs_and_height = self.refs_and_height.load(Ordering::Relaxed); | 188 | | | 189 | 26 | loop { | 190 | 26 | // If the reference count is zero, then the node has already been | 191 | 26 | // queued for deletion. Incrementing it again could lead to a | 192 | 26 | // double-free. | 193 | 26 | if refs_and_height & !HEIGHT_MASK == 0 { | 194 | 0 | return false; | 195 | 26 | } | 196 | 26 | | 197 | 26 | // If all bits in the reference count are ones, we're about to overflow it. | 198 | 26 | let new_refs_and_height = refs_and_height | 199 | 26 | .checked_add(1 << HEIGHT_BITS) | 200 | 26 | .expect("SkipList reference count overflow"); | 201 | 26 | | 202 | 26 | // Try incrementing the count. | 203 | 26 | match self.refs_and_height.compare_exchange_weak( | 204 | 26 | refs_and_height, | 205 | 26 | new_refs_and_height, | 206 | 26 | Ordering::Relaxed, | 207 | 26 | Ordering::Relaxed, | 208 | 26 | ) { | 209 | 26 | Ok(_) => return true, | 210 | 0 | Err(current) => refs_and_height = current, | 211 | | } | 212 | | } | 213 | 26 | } |
|
214 | | |
215 | | /// Decrements the reference count of a node, destroying it if the count becomes zero. |
216 | | #[inline] |
217 | 93 | unsafe fn decrement(&self, guard: &Guard) { |
218 | 93 | if self |
219 | 93 | .refs_and_height |
220 | 93 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) |
221 | 93 | >> HEIGHT_BITS |
222 | 93 | == 1 |
223 | 2 | { |
224 | 2 | fence(Ordering::Acquire); |
225 | 2 | guard.defer_unchecked(move || Self::finalize(self)1 ); |
226 | 91 | } |
227 | 93 | } <crossbeam_skiplist::base::Node<i32, i32>>::decrement Line | Count | Source | 217 | 36 | unsafe fn decrement(&self, guard: &Guard) { | 218 | 36 | if self | 219 | 36 | .refs_and_height | 220 | 36 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) | 221 | 36 | >> HEIGHT_BITS | 222 | 36 | == 1 | 223 | 1 | { | 224 | 1 | fence(Ordering::Acquire); | 225 | 1 | guard.defer_unchecked(move || Self::finalize(self)); | 226 | 35 | } | 227 | 36 | } |
Unexecuted instantiation: <crossbeam_skiplist::base::Node<i32, ()>>::decrement <crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::decrement Line | Count | Source | 217 | 9 | unsafe fn decrement(&self, guard: &Guard) { | 218 | 9 | if self | 219 | 9 | .refs_and_height | 220 | 9 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) | 221 | 9 | >> HEIGHT_BITS | 222 | 9 | == 1 | 223 | 1 | { | 224 | 1 | fence(Ordering::Acquire); | 225 | 1 | guard.defer_unchecked(move || Self::finalize(self)); | 226 | 8 | } | 227 | 9 | } |
<crossbeam_skiplist::base::Node<i32, i32>>::decrement Line | Count | Source | 217 | 48 | unsafe fn decrement(&self, guard: &Guard) { | 218 | 48 | if self | 219 | 48 | .refs_and_height | 220 | 48 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) | 221 | 48 | >> HEIGHT_BITS | 222 | 48 | == 1 | 223 | 0 | { | 224 | 0 | fence(Ordering::Acquire); | 225 | 0 | guard.defer_unchecked(move || Self::finalize(self)); | 226 | 48 | } | 227 | 48 | } |
|
228 | | |
229 | | /// Decrements the reference count of a node, pinning the thread and destroying the node |
230 | | /// if the count become zero. |
231 | | #[inline] |
232 | 169 | unsafe fn decrement_with_pin<F>(&self, parent: &SkipList<K, V>, pin: F) |
233 | 169 | where |
234 | 169 | F: FnOnce() -> Guard, |
235 | 169 | { |
236 | 169 | if self |
237 | 169 | .refs_and_height |
238 | 169 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) |
239 | 169 | >> HEIGHT_BITS |
240 | 169 | == 1 |
241 | 3 | { |
242 | 3 | fence(Ordering::Acquire); |
243 | 3 | let guard = &pin(); |
244 | 3 | parent.check_guard(guard); |
245 | 3 | guard.defer_unchecked(move || Self::finalize(self)); |
246 | 166 | } |
247 | 169 | } <crossbeam_skiplist::base::Node<i32, i32>>::decrement_with_pin::<crossbeam_epoch::default::pin> Line | Count | Source | 232 | 166 | unsafe fn decrement_with_pin<F>(&self, parent: &SkipList<K, V>, pin: F) | 233 | 166 | where | 234 | 166 | F: FnOnce() -> Guard, | 235 | 166 | { | 236 | 166 | if self | 237 | 166 | .refs_and_height | 238 | 166 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) | 239 | 166 | >> HEIGHT_BITS | 240 | 166 | == 1 | 241 | 3 | { | 242 | 3 | fence(Ordering::Acquire); | 243 | 3 | let guard = &pin(); | 244 | 3 | parent.check_guard(guard); | 245 | 3 | guard.defer_unchecked(move || Self::finalize(self)); | 246 | 163 | } | 247 | 166 | } |
<crossbeam_skiplist::base::Node<i32, ()>>::decrement_with_pin::<crossbeam_epoch::default::pin> Line | Count | Source | 232 | 3 | unsafe fn decrement_with_pin<F>(&self, parent: &SkipList<K, V>, pin: F) | 233 | 3 | where | 234 | 3 | F: FnOnce() -> Guard, | 235 | 3 | { | 236 | 3 | if self | 237 | 3 | .refs_and_height | 238 | 3 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) | 239 | 3 | >> HEIGHT_BITS | 240 | 3 | == 1 | 241 | 0 | { | 242 | 0 | fence(Ordering::Acquire); | 243 | 0 | let guard = &pin(); | 244 | 0 | parent.check_guard(guard); | 245 | 0 | guard.defer_unchecked(move || Self::finalize(self)); | 246 | 3 | } | 247 | 3 | } |
|
248 | | |
249 | | /// Drops the key and value of a node, then deallocates it. |
250 | | #[cold] |
251 | 106 | unsafe fn finalize(ptr: *const Self) { |
252 | 106 | let ptr = ptr as *mut Self; |
253 | 106 | |
254 | 106 | // Call destructors: drop the key and the value. |
255 | 106 | ptr::drop_in_place(&mut (*ptr).key); |
256 | 106 | ptr::drop_in_place(&mut (*ptr).value); |
257 | 106 | |
258 | 106 | // Finally, deallocate the memory occupied by the node. |
259 | 106 | Node::dealloc(ptr); |
260 | 106 | } <crossbeam_skiplist::base::Node<i32, i32>>::finalize Line | Count | Source | 251 | 16 | unsafe fn finalize(ptr: *const Self) { | 252 | 16 | let ptr = ptr as *mut Self; | 253 | 16 | | 254 | 16 | // Call destructors: drop the key and the value. | 255 | 16 | ptr::drop_in_place(&mut (*ptr).key); | 256 | 16 | ptr::drop_in_place(&mut (*ptr).value); | 257 | 16 | | 258 | 16 | // Finally, deallocate the memory occupied by the node. | 259 | 16 | Node::dealloc(ptr); | 260 | 16 | } |
<crossbeam_skiplist::base::Node<i32, ()>>::finalize Line | Count | Source | 251 | 3 | unsafe fn finalize(ptr: *const Self) { | 252 | 3 | let ptr = ptr as *mut Self; | 253 | 3 | | 254 | 3 | // Call destructors: drop the key and the value. | 255 | 3 | ptr::drop_in_place(&mut (*ptr).key); | 256 | 3 | ptr::drop_in_place(&mut (*ptr).value); | 257 | 3 | | 258 | 3 | // Finally, deallocate the memory occupied by the node. | 259 | 3 | Node::dealloc(ptr); | 260 | 3 | } |
<crossbeam_skiplist::base::Node<i32, i32>>::finalize Line | Count | Source | 251 | 80 | unsafe fn finalize(ptr: *const Self) { | 252 | 80 | let ptr = ptr as *mut Self; | 253 | 80 | | 254 | 80 | // Call destructors: drop the key and the value. | 255 | 80 | ptr::drop_in_place(&mut (*ptr).key); | 256 | 80 | ptr::drop_in_place(&mut (*ptr).value); | 257 | 80 | | 258 | 80 | // Finally, deallocate the memory occupied by the node. | 259 | 80 | Node::dealloc(ptr); | 260 | 80 | } |
<crossbeam_skiplist::base::Node<base::drops::Key, base::drops::Value>>::finalize Line | Count | Source | 251 | 7 | unsafe fn finalize(ptr: *const Self) { | 252 | 7 | let ptr = ptr as *mut Self; | 253 | 7 | | 254 | 7 | // Call destructors: drop the key and the value. | 255 | 7 | ptr::drop_in_place(&mut (*ptr).key); | 256 | 7 | ptr::drop_in_place(&mut (*ptr).value); | 257 | 7 | | 258 | 7 | // Finally, deallocate the memory occupied by the node. | 259 | 7 | Node::dealloc(ptr); | 260 | 7 | } |
|
261 | | } |
262 | | |
263 | | impl<K, V> fmt::Debug for Node<K, V> |
264 | | where |
265 | | K: fmt::Debug, |
266 | | V: fmt::Debug, |
267 | | { |
268 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
269 | | f.debug_tuple("Node") |
270 | | .field(&self.key) |
271 | | .field(&self.value) |
272 | | .finish() |
273 | | } |
274 | | } |
275 | | |
276 | | /// A search result. |
277 | | /// |
278 | | /// The result indicates whether the key was found, as well as what were the adjacent nodes to the |
279 | | /// key on each level of the skip list. |
280 | | struct Position<'a, K, V> { |
281 | | /// Reference to a node with the given key, if found. |
282 | | /// |
283 | | /// If this is `Some` then it will point to the same node as `right[0]`. |
284 | | found: Option<&'a Node<K, V>>, |
285 | | |
286 | | /// Adjacent nodes with smaller keys (predecessors). |
287 | | left: [&'a Tower<K, V>; MAX_HEIGHT], |
288 | | |
289 | | /// Adjacent nodes with equal or greater keys (successors). |
290 | | right: [Shared<'a, Node<K, V>>; MAX_HEIGHT], |
291 | | } |
292 | | |
293 | | /// Frequently modified data associated with a skip list. |
294 | | struct HotData { |
295 | | /// The seed for random height generation. |
296 | | seed: AtomicUsize, |
297 | | |
298 | | /// The number of entries in the skip list. |
299 | | len: AtomicUsize, |
300 | | |
301 | | /// Highest tower currently in use. This value is used as a hint for where |
302 | | /// to start lookups and never decreases. |
303 | | max_height: AtomicUsize, |
304 | | } |
305 | | |
306 | | /// A lock-free skip list. |
307 | | // TODO(stjepang): Embed a custom `epoch::Collector` inside `SkipList<K, V>`. Instead of adding |
308 | | // garbage to the default global collector, we should add it to a local collector tied to the |
309 | | // particular skip list instance. |
310 | | // |
311 | | // Since global collector might destroy garbage arbitrarily late in the future, some skip list |
312 | | // methods have `K: 'static` and `V: 'static` bounds. But a local collector embedded in the skip |
313 | | // list would destroy all remaining garbage when the skip list is dropped, so in that case we'd be |
314 | | // able to remove those bounds on types `K` and `V`. |
315 | | // |
316 | | // As a further future optimization, if `!mem::needs_drop::<K>() && !mem::needs_drop::<V>()` |
317 | | // (neither key nor the value have destructors), there's no point in creating a new local |
318 | | // collector, so we should simply use the global one. |
319 | | pub struct SkipList<K, V> { |
320 | | /// The head of the skip list (just a dummy node, not a real entry). |
321 | | head: Head<K, V>, |
322 | | |
323 | | /// The `Collector` associated with this skip list. |
324 | | collector: Collector, |
325 | | |
326 | | /// Hot data associated with the skip list, stored in a dedicated cache line. |
327 | | hot_data: CachePadded<HotData>, |
328 | | } |
329 | | |
330 | | unsafe impl<K: Send + Sync, V: Send + Sync> Send for SkipList<K, V> {} |
331 | | unsafe impl<K: Send + Sync, V: Send + Sync> Sync for SkipList<K, V> {} |
332 | | |
333 | | impl<K, V> SkipList<K, V> { |
334 | | /// Returns a new, empty skip list. |
335 | 23 | pub fn new(collector: Collector) -> SkipList<K, V> { |
336 | 23 | SkipList { |
337 | 23 | head: Head::new(), |
338 | 23 | collector, |
339 | 23 | hot_data: CachePadded::new(HotData { |
340 | 23 | seed: AtomicUsize::new(1), |
341 | 23 | len: AtomicUsize::new(0), |
342 | 23 | max_height: AtomicUsize::new(1), |
343 | 23 | }), |
344 | 23 | } |
345 | 23 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::new Line | Count | Source | 335 | 3 | pub fn new(collector: Collector) -> SkipList<K, V> { | 336 | 3 | SkipList { | 337 | 3 | head: Head::new(), | 338 | 3 | collector, | 339 | 3 | hot_data: CachePadded::new(HotData { | 340 | 3 | seed: AtomicUsize::new(1), | 341 | 3 | len: AtomicUsize::new(0), | 342 | 3 | max_height: AtomicUsize::new(1), | 343 | 3 | }), | 344 | 3 | } | 345 | 3 | } |
<crossbeam_skiplist::base::SkipList<i32, ()>>::new Line | Count | Source | 335 | 1 | pub fn new(collector: Collector) -> SkipList<K, V> { | 336 | 1 | SkipList { | 337 | 1 | head: Head::new(), | 338 | 1 | collector, | 339 | 1 | hot_data: CachePadded::new(HotData { | 340 | 1 | seed: AtomicUsize::new(1), | 341 | 1 | len: AtomicUsize::new(0), | 342 | 1 | max_height: AtomicUsize::new(1), | 343 | 1 | }), | 344 | 1 | } | 345 | 1 | } |
<crossbeam_skiplist::base::SkipList<alloc::string::String, alloc::boxed::Box<i32>>>::new Line | Count | Source | 335 | 1 | pub fn new(collector: Collector) -> SkipList<K, V> { | 336 | 1 | SkipList { | 337 | 1 | head: Head::new(), | 338 | 1 | collector, | 339 | 1 | hot_data: CachePadded::new(HotData { | 340 | 1 | seed: AtomicUsize::new(1), | 341 | 1 | len: AtomicUsize::new(0), | 342 | 1 | max_height: AtomicUsize::new(1), | 343 | 1 | }), | 344 | 1 | } | 345 | 1 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::new Line | Count | Source | 335 | 17 | pub fn new(collector: Collector) -> SkipList<K, V> { | 336 | 17 | SkipList { | 337 | 17 | head: Head::new(), | 338 | 17 | collector, | 339 | 17 | hot_data: CachePadded::new(HotData { | 340 | 17 | seed: AtomicUsize::new(1), | 341 | 17 | len: AtomicUsize::new(0), | 342 | 17 | max_height: AtomicUsize::new(1), | 343 | 17 | }), | 344 | 17 | } | 345 | 17 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::new Line | Count | Source | 335 | 1 | pub fn new(collector: Collector) -> SkipList<K, V> { | 336 | 1 | SkipList { | 337 | 1 | head: Head::new(), | 338 | 1 | collector, | 339 | 1 | hot_data: CachePadded::new(HotData { | 340 | 1 | seed: AtomicUsize::new(1), | 341 | 1 | len: AtomicUsize::new(0), | 342 | 1 | max_height: AtomicUsize::new(1), | 343 | 1 | }), | 344 | 1 | } | 345 | 1 | } |
|
346 | | |
347 | | /// Returns `true` if the skip list is empty. |
348 | 16 | pub fn is_empty(&self) -> bool { |
349 | 16 | self.len() == 0 |
350 | 16 | } |
351 | | |
352 | | /// Returns the number of entries in the skip list. |
353 | | /// |
354 | | /// If the skip list is being concurrently modified, consider the returned number just an |
355 | | /// approximation without any guarantees. |
356 | 38 | pub fn len(&self) -> usize { |
357 | 38 | let len = self.hot_data.len.load(Ordering::Relaxed); |
358 | 38 | |
359 | 38 | // Due to the relaxed memory ordering, the length counter may sometimes |
360 | 38 | // underflow and produce a very large value. We treat such values as 0. |
361 | 38 | if len > isize::max_value() as usize { |
362 | 0 | 0 |
363 | | } else { |
364 | 38 | len |
365 | | } |
366 | 38 | } |
367 | | |
368 | | /// Ensures that all `Guard`s used with the skip list come from the same |
369 | | /// `Collector`. |
370 | 692 | fn check_guard(&self, guard: &Guard) { |
371 | 692 | if let Some(c) = guard.collector() { |
372 | 692 | assert!(c == &self.collector); |
373 | 0 | } |
374 | 694 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::check_guard Line | Count | Source | 370 | 385 | fn check_guard(&self, guard: &Guard) { | 371 | 385 | if let Some(c) = guard.collector() { | 372 | 385 | assert!(c == &self.collector); | 373 | 0 | } | 374 | 385 | } |
<crossbeam_skiplist::base::SkipList<i32, ()>>::check_guard Line | Count | Source | 370 | 3 | fn check_guard(&self, guard: &Guard) { | 371 | 3 | if let Some(c) = guard.collector() { | 372 | 3 | assert!(c == &self.collector); | 373 | 0 | } | 374 | 3 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::check_guard Line | Count | Source | 370 | 16 | fn check_guard(&self, guard: &Guard) { | 371 | 16 | if let Some(c) = guard.collector() { | 372 | 16 | assert!(c == &self.collector); | 373 | 0 | } | 374 | 16 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::check_guard Line | Count | Source | 370 | 288 | fn check_guard(&self, guard: &Guard) { | 371 | 288 | if let Some(c) = guard.collector() { | 372 | 288 | assert!(c == &self.collector); | 373 | 0 | } | 374 | 290 | } |
|
375 | | } |
376 | | |
377 | | impl<K, V> SkipList<K, V> |
378 | | where |
379 | | K: Ord, |
380 | | { |
381 | | /// Returns the entry with the smallest key. |
382 | 8 | pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> { |
383 | 8 | self.check_guard(guard); |
384 | 8 | let n6 = self.next_node(&self.head, Bound::Unbounded, guard)?2 ; |
385 | 6 | Some(Entry { |
386 | 6 | parent: self, |
387 | 6 | node: n, |
388 | 6 | guard, |
389 | 6 | }) |
390 | 8 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::front Line | Count | Source | 382 | 3 | pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> { | 383 | 3 | self.check_guard(guard); | 384 | 3 | let n = self.next_node(&self.head, Bound::Unbounded, guard)?0 ; | 385 | 3 | Some(Entry { | 386 | 3 | parent: self, | 387 | 3 | node: n, | 388 | 3 | guard, | 389 | 3 | }) | 390 | 3 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::front Line | Count | Source | 382 | 5 | pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> { | 383 | 5 | self.check_guard(guard); | 384 | 5 | let n3 = self.next_node(&self.head, Bound::Unbounded, guard)?2 ; | 385 | 3 | Some(Entry { | 386 | 3 | parent: self, | 387 | 3 | node: n, | 388 | 3 | guard, | 389 | 3 | }) | 390 | 5 | } |
|
391 | | |
392 | | /// Returns the entry with the largest key. |
393 | 5 | pub fn back<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> { |
394 | 5 | self.check_guard(guard); |
395 | 5 | let n3 = self.search_bound(Bound::Unbounded, true, guard)?2 ; |
396 | 3 | Some(Entry { |
397 | 3 | parent: self, |
398 | 3 | node: n, |
399 | 3 | guard, |
400 | 3 | }) |
401 | 5 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::back Line | Count | Source | 393 | 1 | pub fn back<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> { | 394 | 1 | self.check_guard(guard); | 395 | 1 | let n = self.search_bound(Bound::Unbounded, true, guard)?0 ; | 396 | 1 | Some(Entry { | 397 | 1 | parent: self, | 398 | 1 | node: n, | 399 | 1 | guard, | 400 | 1 | }) | 401 | 1 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::back Line | Count | Source | 393 | 4 | pub fn back<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> { | 394 | 4 | self.check_guard(guard); | 395 | 4 | let n2 = self.search_bound(Bound::Unbounded, true, guard)?2 ; | 396 | 2 | Some(Entry { | 397 | 2 | parent: self, | 398 | 2 | node: n, | 399 | 2 | guard, | 400 | 2 | }) | 401 | 4 | } |
|
402 | | |
403 | | /// Returns `true` if the map contains a value for the specified key. |
404 | | pub fn contains_key<Q>(&self, key: &Q, guard: &Guard) -> bool |
405 | | where |
406 | | K: Borrow<Q>, |
407 | | Q: Ord + ?Sized, |
408 | | { |
409 | | self.get(key, guard).is_some() |
410 | | } |
411 | | |
412 | | /// Returns an entry with the specified `key`. |
413 | 28 | pub fn get<'a: 'g, 'g, Q>(&'a self, key: &Q, guard: &'g Guard) -> Option<Entry<'a, 'g, K, V>> |
414 | 28 | where |
415 | 28 | K: Borrow<Q>, |
416 | 28 | Q: Ord + ?Sized, |
417 | 28 | { |
418 | 28 | self.check_guard(guard); |
419 | 28 | let n27 = self.search_bound(Bound::Included(key), false, guard)?1 ; |
420 | 27 | if n.key.borrow() != key { |
421 | 8 | return None; |
422 | 19 | } |
423 | 19 | Some(Entry { |
424 | 19 | parent: self, |
425 | 19 | node: n, |
426 | 19 | guard, |
427 | 19 | }) |
428 | 28 | } |
429 | | |
430 | | /// Returns an `Entry` pointing to the lowest element whose key is above |
431 | | /// the given bound. If no such element is found then `None` is |
432 | | /// returned. |
433 | 53 | pub fn lower_bound<'a: 'g, 'g, Q>( |
434 | 53 | &'a self, |
435 | 53 | bound: Bound<&Q>, |
436 | 53 | guard: &'g Guard, |
437 | 53 | ) -> Option<Entry<'a, 'g, K, V>> |
438 | 53 | where |
439 | 53 | K: Borrow<Q>, |
440 | 53 | Q: Ord + ?Sized, |
441 | 53 | { |
442 | 53 | self.check_guard(guard); |
443 | 53 | let n44 = self.search_bound(bound, false, guard)?9 ; |
444 | 44 | Some(Entry { |
445 | 44 | parent: self, |
446 | 44 | node: n, |
447 | 44 | guard, |
448 | 44 | }) |
449 | 53 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::lower_bound::<i32> Line | Count | Source | 433 | 33 | pub fn lower_bound<'a: 'g, 'g, Q>( | 434 | 33 | &'a self, | 435 | 33 | bound: Bound<&Q>, | 436 | 33 | guard: &'g Guard, | 437 | 33 | ) -> Option<Entry<'a, 'g, K, V>> | 438 | 33 | where | 439 | 33 | K: Borrow<Q>, | 440 | 33 | Q: Ord + ?Sized, | 441 | 33 | { | 442 | 33 | self.check_guard(guard); | 443 | 33 | let n27 = self.search_bound(bound, false, guard)?6 ; | 444 | 27 | Some(Entry { | 445 | 27 | parent: self, | 446 | 27 | node: n, | 447 | 27 | guard, | 448 | 27 | }) | 449 | 33 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::lower_bound::<i32> Line | Count | Source | 433 | 20 | pub fn lower_bound<'a: 'g, 'g, Q>( | 434 | 20 | &'a self, | 435 | 20 | bound: Bound<&Q>, | 436 | 20 | guard: &'g Guard, | 437 | 20 | ) -> Option<Entry<'a, 'g, K, V>> | 438 | 20 | where | 439 | 20 | K: Borrow<Q>, | 440 | 20 | Q: Ord + ?Sized, | 441 | 20 | { | 442 | 20 | self.check_guard(guard); | 443 | 20 | let n17 = self.search_bound(bound, false, guard)?3 ; | 444 | 17 | Some(Entry { | 445 | 17 | parent: self, | 446 | 17 | node: n, | 447 | 17 | guard, | 448 | 17 | }) | 449 | 20 | } |
|
450 | | |
451 | | /// Returns an `Entry` pointing to the highest element whose key is below |
452 | | /// the given bound. If no such element is found then `None` is |
453 | | /// returned. |
454 | 19 | pub fn upper_bound<'a: 'g, 'g, Q>( |
455 | 19 | &'a self, |
456 | 19 | bound: Bound<&Q>, |
457 | 19 | guard: &'g Guard, |
458 | 19 | ) -> Option<Entry<'a, 'g, K, V>> |
459 | 19 | where |
460 | 19 | K: Borrow<Q>, |
461 | 19 | Q: Ord + ?Sized, |
462 | 19 | { |
463 | 19 | self.check_guard(guard); |
464 | 19 | let n16 = self.search_bound(bound, true, guard)?3 ; |
465 | 16 | Some(Entry { |
466 | 16 | parent: self, |
467 | 16 | node: n, |
468 | 16 | guard, |
469 | 16 | }) |
470 | 19 | } |
471 | | |
472 | | /// Finds an entry with the specified key, or inserts a new `key`-`value` pair if none exist. |
473 | 2 | pub fn get_or_insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { |
474 | 2 | self.insert_internal(key, value, false, guard) |
475 | 2 | } |
476 | | |
477 | | /// Returns an iterator over all entries in the skip list. |
478 | 21 | pub fn iter<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Iter<'a, 'g, K, V> { |
479 | 21 | self.check_guard(guard); |
480 | 21 | Iter { |
481 | 21 | parent: self, |
482 | 21 | head: None, |
483 | 21 | tail: None, |
484 | 21 | guard, |
485 | 21 | } |
486 | 21 | } |
487 | | |
488 | | /// Returns an iterator over all entries in the skip list. |
489 | 4 | pub fn ref_iter(&self) -> RefIter<'_, K, V> { |
490 | 4 | RefIter { |
491 | 4 | parent: self, |
492 | 4 | head: None, |
493 | 4 | tail: None, |
494 | 4 | } |
495 | 4 | } |
496 | | |
497 | | /// Returns an iterator over a subset of entries in the skip list. |
498 | 33 | pub fn range<'a: 'g, 'g, Q, R>( |
499 | 33 | &'a self, |
500 | 33 | range: R, |
501 | 33 | guard: &'g Guard, |
502 | 33 | ) -> Range<'a, 'g, Q, R, K, V> |
503 | 33 | where |
504 | 33 | K: Borrow<Q>, |
505 | 33 | R: RangeBounds<Q>, |
506 | 33 | Q: Ord + ?Sized, |
507 | 33 | { |
508 | 33 | self.check_guard(guard); |
509 | 33 | Range { |
510 | 33 | parent: self, |
511 | 33 | head: None, |
512 | 33 | tail: None, |
513 | 33 | range, |
514 | 33 | guard, |
515 | 33 | _marker: PhantomData, |
516 | 33 | } |
517 | 33 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::range::<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>)> Line | Count | Source | 498 | 32 | pub fn range<'a: 'g, 'g, Q, R>( | 499 | 32 | &'a self, | 500 | 32 | range: R, | 501 | 32 | guard: &'g Guard, | 502 | 32 | ) -> Range<'a, 'g, Q, R, K, V> | 503 | 32 | where | 504 | 32 | K: Borrow<Q>, | 505 | 32 | R: RangeBounds<Q>, | 506 | 32 | Q: Ord + ?Sized, | 507 | 32 | { | 508 | 32 | self.check_guard(guard); | 509 | 32 | Range { | 510 | 32 | parent: self, | 511 | 32 | head: None, | 512 | 32 | tail: None, | 513 | 32 | range, | 514 | 32 | guard, | 515 | 32 | _marker: PhantomData, | 516 | 32 | } | 517 | 32 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::range::<i32, core::ops::range::RangeFull> Line | Count | Source | 498 | 1 | pub fn range<'a: 'g, 'g, Q, R>( | 499 | 1 | &'a self, | 500 | 1 | range: R, | 501 | 1 | guard: &'g Guard, | 502 | 1 | ) -> Range<'a, 'g, Q, R, K, V> | 503 | 1 | where | 504 | 1 | K: Borrow<Q>, | 505 | 1 | R: RangeBounds<Q>, | 506 | 1 | Q: Ord + ?Sized, | 507 | 1 | { | 508 | 1 | self.check_guard(guard); | 509 | 1 | Range { | 510 | 1 | parent: self, | 511 | 1 | head: None, | 512 | 1 | tail: None, | 513 | 1 | range, | 514 | 1 | guard, | 515 | 1 | _marker: PhantomData, | 516 | 1 | } | 517 | 1 | } |
|
518 | | |
519 | | /// Returns an iterator over a subset of entries in the skip list. |
520 | | #[allow(clippy::needless_lifetimes)] |
521 | 33 | pub fn ref_range<'a, Q, R>(&'a self, range: R) -> RefRange<'a, Q, R, K, V> |
522 | 33 | where |
523 | 33 | K: Borrow<Q>, |
524 | 33 | R: RangeBounds<Q>, |
525 | 33 | Q: Ord + ?Sized, |
526 | 33 | { |
527 | 33 | RefRange { |
528 | 33 | parent: self, |
529 | 33 | range, |
530 | 33 | head: None, |
531 | 33 | tail: None, |
532 | 33 | _marker: PhantomData, |
533 | 33 | } |
534 | 33 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::ref_range::<i32, core::ops::range::RangeFull> Line | Count | Source | 521 | 1 | pub fn ref_range<'a, Q, R>(&'a self, range: R) -> RefRange<'a, Q, R, K, V> | 522 | 1 | where | 523 | 1 | K: Borrow<Q>, | 524 | 1 | R: RangeBounds<Q>, | 525 | 1 | Q: Ord + ?Sized, | 526 | 1 | { | 527 | 1 | RefRange { | 528 | 1 | parent: self, | 529 | 1 | range, | 530 | 1 | head: None, | 531 | 1 | tail: None, | 532 | 1 | _marker: PhantomData, | 533 | 1 | } | 534 | 1 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::ref_range::<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>)> Line | Count | Source | 521 | 32 | pub fn ref_range<'a, Q, R>(&'a self, range: R) -> RefRange<'a, Q, R, K, V> | 522 | 32 | where | 523 | 32 | K: Borrow<Q>, | 524 | 32 | R: RangeBounds<Q>, | 525 | 32 | Q: Ord + ?Sized, | 526 | 32 | { | 527 | 32 | RefRange { | 528 | 32 | parent: self, | 529 | 32 | range, | 530 | 32 | head: None, | 531 | 32 | tail: None, | 532 | 32 | _marker: PhantomData, | 533 | 32 | } | 534 | 32 | } |
|
535 | | |
536 | | /// Generates a random height and returns it. |
537 | 152 | fn random_height(&self) -> usize { |
538 | 152 | // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. |
539 | 152 | // |
540 | 152 | // This particular set of operations generates 32-bit integers. See: |
541 | 152 | // https://en.wikipedia.org/wiki/Xorshift#Example_implementation |
542 | 152 | let mut num = self.hot_data.seed.load(Ordering::Relaxed); |
543 | 152 | num ^= num << 13; |
544 | 152 | num ^= num >> 17; |
545 | 152 | num ^= num << 5; |
546 | 152 | self.hot_data.seed.store(num, Ordering::Relaxed); |
547 | 152 | |
548 | 152 | let mut height = cmp::min(MAX_HEIGHT, num.trailing_zeros() as usize + 1); |
549 | | unsafe { |
550 | | // Keep decreasing the height while it's much larger than all towers currently in the |
551 | | // skip list. |
552 | | // |
553 | | // Note that we're loading the pointer only to check whether it is null, so it's okay |
554 | | // to use `epoch::unprotected()` in this situation. |
555 | 152 | while height >= 4 |
556 | 0 | && self.head[height - 2] |
557 | 0 | .load(Ordering::Relaxed, epoch::unprotected()) |
558 | 0 | .is_null() |
559 | 0 | { |
560 | 0 | height -= 1; |
561 | 0 | } |
562 | | } |
563 | | |
564 | | // Track the max height to speed up lookups |
565 | 153 | let mut max_height = self.hot_data.max_height.load(Ordering::Relaxed); |
566 | 153 | while height > max_height { |
567 | 24 | match self.hot_data.max_height.compare_exchange_weak( |
568 | 24 | max_height, |
569 | 24 | height, |
570 | 24 | Ordering::Relaxed, |
571 | 24 | Ordering::Relaxed, |
572 | 24 | ) { |
573 | 24 | Ok(_) => break23 , |
574 | 0 | Err(h) => max_height = h, |
575 | | } |
576 | | } |
577 | 152 | height |
578 | 152 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::random_height Line | Count | Source | 537 | 20 | fn random_height(&self) -> usize { | 538 | 20 | // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. | 539 | 20 | // | 540 | 20 | // This particular set of operations generates 32-bit integers. See: | 541 | 20 | // https://en.wikipedia.org/wiki/Xorshift#Example_implementation | 542 | 20 | let mut num = self.hot_data.seed.load(Ordering::Relaxed); | 543 | 20 | num ^= num << 13; | 544 | 20 | num ^= num >> 17; | 545 | 20 | num ^= num << 5; | 546 | 20 | self.hot_data.seed.store(num, Ordering::Relaxed); | 547 | 20 | | 548 | 20 | let mut height = cmp::min(MAX_HEIGHT, num.trailing_zeros() as usize + 1); | 549 | | unsafe { | 550 | | // Keep decreasing the height while it's much larger than all towers currently in the | 551 | | // skip list. | 552 | | // | 553 | | // Note that we're loading the pointer only to check whether it is null, so it's okay | 554 | | // to use `epoch::unprotected()` in this situation. | 555 | 20 | while height >= 4 | 556 | 0 | && self.head[height - 2] | 557 | 0 | .load(Ordering::Relaxed, epoch::unprotected()) | 558 | 0 | .is_null() | 559 | 0 | { | 560 | 0 | height -= 1; | 561 | 0 | } | 562 | | } | 563 | | | 564 | | // Track the max height to speed up lookups | 565 | 20 | let mut max_height = self.hot_data.max_height.load(Ordering::Relaxed); | 566 | 20 | while height > max_height { | 567 | 3 | match self.hot_data.max_height.compare_exchange_weak( | 568 | 3 | max_height, | 569 | 3 | height, | 570 | 3 | Ordering::Relaxed, | 571 | 3 | Ordering::Relaxed, | 572 | 3 | ) { | 573 | 3 | Ok(_) => break, | 574 | 0 | Err(h) => max_height = h, | 575 | | } | 576 | | } | 577 | 20 | height | 578 | 20 | } |
<crossbeam_skiplist::base::SkipList<i32, ()>>::random_height Line | Count | Source | 537 | 3 | fn random_height(&self) -> usize { | 538 | 3 | // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. | 539 | 3 | // | 540 | 3 | // This particular set of operations generates 32-bit integers. See: | 541 | 3 | // https://en.wikipedia.org/wiki/Xorshift#Example_implementation | 542 | 3 | let mut num = self.hot_data.seed.load(Ordering::Relaxed); | 543 | 3 | num ^= num << 13; | 544 | 3 | num ^= num >> 17; | 545 | 3 | num ^= num << 5; | 546 | 3 | self.hot_data.seed.store(num, Ordering::Relaxed); | 547 | 3 | | 548 | 3 | let mut height = cmp::min(MAX_HEIGHT, num.trailing_zeros() as usize + 1); | 549 | | unsafe { | 550 | | // Keep decreasing the height while it's much larger than all towers currently in the | 551 | | // skip list. | 552 | | // | 553 | | // Note that we're loading the pointer only to check whether it is null, so it's okay | 554 | | // to use `epoch::unprotected()` in this situation. | 555 | 3 | while height >= 4 | 556 | 0 | && self.head[height - 2] | 557 | 0 | .load(Ordering::Relaxed, epoch::unprotected()) | 558 | 0 | .is_null() | 559 | 0 | { | 560 | 0 | height -= 1; | 561 | 0 | } | 562 | | } | 563 | | | 564 | | // Track the max height to speed up lookups | 565 | 3 | let mut max_height = self.hot_data.max_height.load(Ordering::Relaxed); | 566 | 3 | while height > max_height { | 567 | 0 | match self.hot_data.max_height.compare_exchange_weak( | 568 | 0 | max_height, | 569 | 0 | height, | 570 | 0 | Ordering::Relaxed, | 571 | 0 | Ordering::Relaxed, | 572 | 0 | ) { | 573 | 0 | Ok(_) => break, | 574 | 0 | Err(h) => max_height = h, | 575 | | } | 576 | | } | 577 | 3 | height | 578 | 3 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::random_height Line | Count | Source | 537 | 122 | fn random_height(&self) -> usize { | 538 | 122 | // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. | 539 | 122 | // | 540 | 122 | // This particular set of operations generates 32-bit integers. See: | 541 | 122 | // https://en.wikipedia.org/wiki/Xorshift#Example_implementation | 542 | 122 | let mut num = self.hot_data.seed.load(Ordering::Relaxed); | 543 | 122 | num ^= num << 13; | 544 | 122 | num ^= num >> 17; | 545 | 122 | num ^= num << 5; | 546 | 122 | self.hot_data.seed.store(num, Ordering::Relaxed); | 547 | 122 | | 548 | 122 | let mut height = cmp::min(MAX_HEIGHT, num.trailing_zeros() as usize + 1); | 549 | | unsafe { | 550 | | // Keep decreasing the height while it's much larger than all towers currently in the | 551 | | // skip list. | 552 | | // | 553 | | // Note that we're loading the pointer only to check whether it is null, so it's okay | 554 | | // to use `epoch::unprotected()` in this situation. | 555 | 122 | while height >= 4 | 556 | 0 | && self.head[height - 2] | 557 | 0 | .load(Ordering::Relaxed, epoch::unprotected()) | 558 | 0 | .is_null() | 559 | 0 | { | 560 | 0 | height -= 1; | 561 | 0 | } | 562 | | } | 563 | | | 564 | | // Track the max height to speed up lookups | 565 | 123 | let mut max_height = self.hot_data.max_height.load(Ordering::Relaxed); | 566 | 123 | while height > max_height { | 567 | 20 | match self.hot_data.max_height.compare_exchange_weak( | 568 | 20 | max_height, | 569 | 20 | height, | 570 | 20 | Ordering::Relaxed, | 571 | 20 | Ordering::Relaxed, | 572 | 20 | ) { | 573 | 20 | Ok(_) => break19 , | 574 | 0 | Err(h) => max_height = h, | 575 | | } | 576 | | } | 577 | 122 | height | 578 | 122 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::random_height Line | Count | Source | 537 | 7 | fn random_height(&self) -> usize { | 538 | 7 | // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. | 539 | 7 | // | 540 | 7 | // This particular set of operations generates 32-bit integers. See: | 541 | 7 | // https://en.wikipedia.org/wiki/Xorshift#Example_implementation | 542 | 7 | let mut num = self.hot_data.seed.load(Ordering::Relaxed); | 543 | 7 | num ^= num << 13; | 544 | 7 | num ^= num >> 17; | 545 | 7 | num ^= num << 5; | 546 | 7 | self.hot_data.seed.store(num, Ordering::Relaxed); | 547 | 7 | | 548 | 7 | let mut height = cmp::min(MAX_HEIGHT, num.trailing_zeros() as usize + 1); | 549 | | unsafe { | 550 | | // Keep decreasing the height while it's much larger than all towers currently in the | 551 | | // skip list. | 552 | | // | 553 | | // Note that we're loading the pointer only to check whether it is null, so it's okay | 554 | | // to use `epoch::unprotected()` in this situation. | 555 | 7 | while height >= 4 | 556 | 0 | && self.head[height - 2] | 557 | 0 | .load(Ordering::Relaxed, epoch::unprotected()) | 558 | 0 | .is_null() | 559 | 0 | { | 560 | 0 | height -= 1; | 561 | 0 | } | 562 | | } | 563 | | | 564 | | // Track the max height to speed up lookups | 565 | 7 | let mut max_height = self.hot_data.max_height.load(Ordering::Relaxed); | 566 | 7 | while height > max_height { | 567 | 1 | match self.hot_data.max_height.compare_exchange_weak( | 568 | 1 | max_height, | 569 | 1 | height, | 570 | 1 | Ordering::Relaxed, | 571 | 1 | Ordering::Relaxed, | 572 | 1 | ) { | 573 | 1 | Ok(_) => break, | 574 | 0 | Err(h) => max_height = h, | 575 | | } | 576 | | } | 577 | 7 | height | 578 | 7 | } |
|
579 | | |
580 | | /// If we encounter a deleted node while searching, help with the deletion |
581 | | /// by attempting to unlink the node from the list. |
582 | | /// |
583 | | /// If the unlinking is successful then this function returns the next node |
584 | | /// with which the search should continue on the current level. |
585 | | #[cold] |
586 | 16 | unsafe fn help_unlink<'a>( |
587 | 16 | &'a self, |
588 | 16 | pred: &'a Atomic<Node<K, V>>, |
589 | 16 | curr: &'a Node<K, V>, |
590 | 16 | succ: Shared<'a, Node<K, V>>, |
591 | 16 | guard: &'a Guard, |
592 | 16 | ) -> Option<Shared<'a, Node<K, V>>> { |
593 | 16 | // If `succ` is marked, that means `curr` is removed. Let's try |
594 | 16 | // unlinking it from the skip list at this level. |
595 | 16 | match pred.compare_exchange( |
596 | 16 | Shared::from(curr as *const _), |
597 | 16 | succ.with_tag(0), |
598 | 16 | Ordering::Release, |
599 | 16 | Ordering::Relaxed, |
600 | 16 | guard, |
601 | 16 | ) { |
602 | 16 | Ok(_) => { |
603 | 16 | curr.decrement(guard); |
604 | 16 | Some(succ.with_tag(0)) |
605 | | } |
606 | 0 | Err(_) => None, |
607 | | } |
608 | 16 | } Unexecuted instantiation: <crossbeam_skiplist::base::SkipList<i32, i32>>::help_unlink Unexecuted instantiation: <crossbeam_skiplist::base::SkipList<i32, ()>>::help_unlink Unexecuted instantiation: <crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::help_unlink <crossbeam_skiplist::base::SkipList<i32, i32>>::help_unlink Line | Count | Source | 586 | 16 | unsafe fn help_unlink<'a>( | 587 | 16 | &'a self, | 588 | 16 | pred: &'a Atomic<Node<K, V>>, | 589 | 16 | curr: &'a Node<K, V>, | 590 | 16 | succ: Shared<'a, Node<K, V>>, | 591 | 16 | guard: &'a Guard, | 592 | 16 | ) -> Option<Shared<'a, Node<K, V>>> { | 593 | 16 | // If `succ` is marked, that means `curr` is removed. Let's try | 594 | 16 | // unlinking it from the skip list at this level. | 595 | 16 | match pred.compare_exchange( | 596 | 16 | Shared::from(curr as *const _), | 597 | 16 | succ.with_tag(0), | 598 | 16 | Ordering::Release, | 599 | 16 | Ordering::Relaxed, | 600 | 16 | guard, | 601 | 16 | ) { | 602 | 16 | Ok(_) => { | 603 | 16 | curr.decrement(guard); | 604 | 16 | Some(succ.with_tag(0)) | 605 | | } | 606 | 0 | Err(_) => None, | 607 | | } | 608 | 16 | } |
|
609 | | |
610 | | /// Returns the successor of a node. |
611 | | /// |
612 | | /// This will keep searching until a non-deleted node is found. If a deleted |
613 | | /// node is reached then a search is performed using the given key. |
614 | 353 | fn next_node<'a>( |
615 | 353 | &'a self, |
616 | 353 | pred: &'a Tower<K, V>, |
617 | 353 | lower_bound: Bound<&K>, |
618 | 353 | guard: &'a Guard, |
619 | 353 | ) -> Option<&'a Node<K, V>> { |
620 | 353 | unsafe { |
621 | 353 | // Load the level 0 successor of the current node. |
622 | 353 | let mut curr = pred[0].load_consume(guard); |
623 | 353 | |
624 | 353 | // If `curr` is marked, that means `pred` is removed and we have to use |
625 | 353 | // a key search. |
626 | 353 | if curr.tag() == 1 { |
627 | 8 | return self.search_bound(lower_bound, false, guard); |
628 | 345 | } |
629 | | |
630 | 345 | while let Some(c301 ) = curr.as_ref() { |
631 | 301 | let succ = c.tower[0].load_consume(guard); |
632 | 301 | |
633 | 301 | if succ.tag() == 1 { |
634 | 0 | if let Some(c) = self.help_unlink(&pred[0], c, succ, guard) { |
635 | | // On success, continue searching through the current level. |
636 | 0 | curr = c; |
637 | | continue; |
638 | | } else { |
639 | | // On failure, we cannot do anything reasonable to continue |
640 | | // searching from the current position. Restart the search. |
641 | 0 | return self.search_bound(lower_bound, false, guard); |
642 | | } |
643 | 301 | } |
644 | 301 | |
645 | 301 | return Some(c); |
646 | | } |
647 | | |
648 | 44 | None |
649 | | } |
650 | 353 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::next_node Line | Count | Source | 614 | 135 | fn next_node<'a>( | 615 | 135 | &'a self, | 616 | 135 | pred: &'a Tower<K, V>, | 617 | 135 | lower_bound: Bound<&K>, | 618 | 135 | guard: &'a Guard, | 619 | 135 | ) -> Option<&'a Node<K, V>> { | 620 | 135 | unsafe { | 621 | 135 | // Load the level 0 successor of the current node. | 622 | 135 | let mut curr = pred[0].load_consume(guard); | 623 | 135 | | 624 | 135 | // If `curr` is marked, that means `pred` is removed and we have to use | 625 | 135 | // a key search. | 626 | 135 | if curr.tag() == 1 { | 627 | 1 | return self.search_bound(lower_bound, false, guard); | 628 | 134 | } | 629 | | | 630 | 134 | while let Some(c123 ) = curr.as_ref() { | 631 | 123 | let succ = c.tower[0].load_consume(guard); | 632 | 123 | | 633 | 123 | if succ.tag() == 1 { | 634 | 0 | if let Some(c) = self.help_unlink(&pred[0], c, succ, guard) { | 635 | | // On success, continue searching through the current level. | 636 | 0 | curr = c; | 637 | | continue; | 638 | | } else { | 639 | | // On failure, we cannot do anything reasonable to continue | 640 | | // searching from the current position. Restart the search. | 641 | 0 | return self.search_bound(lower_bound, false, guard); | 642 | | } | 643 | 123 | } | 644 | 123 | | 645 | 123 | return Some(c); | 646 | | } | 647 | | | 648 | 11 | None | 649 | | } | 650 | 135 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::next_node Line | Count | Source | 614 | 218 | fn next_node<'a>( | 615 | 218 | &'a self, | 616 | 218 | pred: &'a Tower<K, V>, | 617 | 218 | lower_bound: Bound<&K>, | 618 | 218 | guard: &'a Guard, | 619 | 218 | ) -> Option<&'a Node<K, V>> { | 620 | 218 | unsafe { | 621 | 218 | // Load the level 0 successor of the current node. | 622 | 218 | let mut curr = pred[0].load_consume(guard); | 623 | 218 | | 624 | 218 | // If `curr` is marked, that means `pred` is removed and we have to use | 625 | 218 | // a key search. | 626 | 218 | if curr.tag() == 1 { | 627 | 7 | return self.search_bound(lower_bound, false, guard); | 628 | 211 | } | 629 | | | 630 | 211 | while let Some(c178 ) = curr.as_ref() { | 631 | 178 | let succ = c.tower[0].load_consume(guard); | 632 | 178 | | 633 | 178 | if succ.tag() == 1 { | 634 | 0 | if let Some(c) = self.help_unlink(&pred[0], c, succ, guard) { | 635 | | // On success, continue searching through the current level. | 636 | 0 | curr = c; | 637 | | continue; | 638 | | } else { | 639 | | // On failure, we cannot do anything reasonable to continue | 640 | | // searching from the current position. Restart the search. | 641 | 0 | return self.search_bound(lower_bound, false, guard); | 642 | | } | 643 | 178 | } | 644 | 178 | | 645 | 178 | return Some(c); | 646 | | } | 647 | | | 648 | 33 | None | 649 | | } | 650 | 218 | } |
|
651 | | |
652 | | /// Searches for first/last node that is greater/less/equal to a key in the skip list. |
653 | | /// |
654 | | /// If `upper_bound == true`: the last node less than (or equal to) the key. |
655 | | /// |
656 | | /// If `upper_bound == false`: the first node greater than (or equal to) the key. |
657 | | /// |
658 | | /// This is unsafe because the returned nodes are bound to the lifetime of |
659 | | /// the `SkipList`, not the `Guard`. |
660 | 183 | fn search_bound<'a, Q>( |
661 | 183 | &'a self, |
662 | 183 | bound: Bound<&Q>, |
663 | 183 | upper_bound: bool, |
664 | 183 | guard: &'a Guard, |
665 | 183 | ) -> Option<&'a Node<K, V>> |
666 | 183 | where |
667 | 183 | K: Borrow<Q>, |
668 | 183 | Q: Ord + ?Sized, |
669 | 183 | { |
670 | | unsafe { |
671 | 183 | 'search: loop { |
672 | 183 | // The current level we're at. |
673 | 183 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); |
674 | | |
675 | | // Fast loop to skip empty tower levels. |
676 | 195 | while level >= 1 |
677 | 193 | && self.head[level - 1] |
678 | 193 | .load(Ordering::Relaxed, guard) |
679 | 193 | .is_null() |
680 | 12 | { |
681 | 12 | level -= 1; |
682 | 12 | } |
683 | | |
684 | | // The current best node |
685 | 184 | let mut result = None; |
686 | 184 | |
687 | 184 | // The predecessor node |
688 | 184 | let mut pred = &*self.head; |
689 | | |
690 | 571 | while level >= 1 { |
691 | 388 | level -= 1; |
692 | 388 | |
693 | 388 | // Two adjacent nodes at the current level. |
694 | 388 | let mut curr = pred[level].load_consume(guard); |
695 | 388 | |
696 | 388 | // If `curr` is marked, that means `pred` is removed and we have to restart the |
697 | 388 | // search. |
698 | 388 | if curr.tag() == 1 { |
699 | | continue 'search; |
700 | 388 | } |
701 | | |
702 | | // Iterate through the current level until we reach a node with a key greater |
703 | | // than or equal to `key`. |
704 | 785 | while let Some(c708 ) = curr.as_ref() { |
705 | 708 | let succ = c.tower[level].load_consume(guard); |
706 | 708 | |
707 | 708 | if succ.tag() == 1 { |
708 | 9 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { |
709 | | // On success, continue searching through the current level. |
710 | 9 | curr = c; |
711 | | continue; |
712 | | } else { |
713 | | // On failure, we cannot do anything reasonable to continue |
714 | | // searching from the current position. Restart the search. |
715 | | continue 'search; |
716 | | } |
717 | 699 | } |
718 | 699 | |
719 | 699 | // If `curr` contains a key that is greater than (or equal) to `key`, we're |
720 | 699 | // done with this level. |
721 | 699 | // |
722 | 699 | // The condition determines whether we should stop the search. For the upper |
723 | 699 | // bound, we return the last node before the condition became true. For the |
724 | 699 | // lower bound, we return the first node after the condition became true. |
725 | 699 | if upper_bound { |
726 | 212 | if !below_upper_bound(&bound, c.key.borrow()) { |
727 | 80 | break; |
728 | 132 | } |
729 | 132 | result = Some(c); |
730 | 487 | } else if above_lower_bound(&bound, c.key.borrow()) { |
731 | 230 | result = Some(c); |
732 | 230 | break; |
733 | 256 | } |
734 | | |
735 | | // Move one step forward. |
736 | 388 | pred = &c.tower; |
737 | 388 | curr = succ; |
738 | | } |
739 | | } |
740 | | |
741 | 183 | return result; |
742 | 183 | } |
743 | 183 | } |
744 | 183 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::search_bound::<i32> Line | Count | Source | 660 | 45 | fn search_bound<'a, Q>( | 661 | 45 | &'a self, | 662 | 45 | bound: Bound<&Q>, | 663 | 45 | upper_bound: bool, | 664 | 45 | guard: &'a Guard, | 665 | 45 | ) -> Option<&'a Node<K, V>> | 666 | 45 | where | 667 | 45 | K: Borrow<Q>, | 668 | 45 | Q: Ord + ?Sized, | 669 | 45 | { | 670 | | unsafe { | 671 | 45 | 'search: loop { | 672 | 45 | // The current level we're at. | 673 | 45 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); | 674 | | | 675 | | // Fast loop to skip empty tower levels. | 676 | 46 | while level >= 1 | 677 | 46 | && self.head[level - 1] | 678 | 46 | .load(Ordering::Relaxed, guard) | 679 | 46 | .is_null() | 680 | 1 | { | 681 | 1 | level -= 1; | 682 | 1 | } | 683 | | | 684 | | // The current best node | 685 | 45 | let mut result = None; | 686 | 45 | | 687 | 45 | // The predecessor node | 688 | 45 | let mut pred = &*self.head; | 689 | | | 690 | 178 | while level >= 1 { | 691 | 133 | level -= 1; | 692 | 133 | | 693 | 133 | // Two adjacent nodes at the current level. | 694 | 133 | let mut curr = pred[level].load_consume(guard); | 695 | 133 | | 696 | 133 | // If `curr` is marked, that means `pred` is removed and we have to restart the | 697 | 133 | // search. | 698 | 133 | if curr.tag() == 1 { | 699 | | continue 'search; | 700 | 133 | } | 701 | | | 702 | | // Iterate through the current level until we reach a node with a key greater | 703 | | // than or equal to `key`. | 704 | 235 | while let Some(c208 ) = curr.as_ref() { | 705 | 208 | let succ = c.tower[level].load_consume(guard); | 706 | 208 | | 707 | 208 | if succ.tag() == 1 { | 708 | 0 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { | 709 | | // On success, continue searching through the current level. | 710 | 0 | curr = c; | 711 | | continue; | 712 | | } else { | 713 | | // On failure, we cannot do anything reasonable to continue | 714 | | // searching from the current position. Restart the search. | 715 | | continue 'search; | 716 | | } | 717 | 208 | } | 718 | 208 | | 719 | 208 | // If `curr` contains a key that is greater than (or equal) to `key`, we're | 720 | 208 | // done with this level. | 721 | 208 | // | 722 | 208 | // The condition determines whether we should stop the search. For the upper | 723 | 208 | // bound, we return the last node before the condition became true. For the | 724 | 208 | // lower bound, we return the first node after the condition became true. | 725 | 208 | if upper_bound { | 726 | 54 | if !below_upper_bound(&bound, c.key.borrow()) { | 727 | 26 | break; | 728 | 28 | } | 729 | 28 | result = Some(c); | 730 | 154 | } else if above_lower_bound(&bound, c.key.borrow()) { | 731 | 80 | result = Some(c); | 732 | 80 | break; | 733 | 74 | } | 734 | | | 735 | | // Move one step forward. | 736 | 102 | pred = &c.tower; | 737 | 102 | curr = succ; | 738 | | } | 739 | | } | 740 | | | 741 | 45 | return result; | 742 | 45 | } | 743 | 45 | } | 744 | 45 | } |
Unexecuted instantiation: <crossbeam_skiplist::base::SkipList<i32, ()>>::search_bound::<i32> Unexecuted instantiation: <crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::search_bound::<base::drops::Key> <crossbeam_skiplist::base::SkipList<i32, i32>>::search_bound::<i32> Line | Count | Source | 660 | 138 | fn search_bound<'a, Q>( | 661 | 138 | &'a self, | 662 | 138 | bound: Bound<&Q>, | 663 | 138 | upper_bound: bool, | 664 | 138 | guard: &'a Guard, | 665 | 138 | ) -> Option<&'a Node<K, V>> | 666 | 138 | where | 667 | 138 | K: Borrow<Q>, | 668 | 138 | Q: Ord + ?Sized, | 669 | 138 | { | 670 | | unsafe { | 671 | 138 | 'search: loop { | 672 | 138 | // The current level we're at. | 673 | 138 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); | 674 | | | 675 | | // Fast loop to skip empty tower levels. | 676 | 149 | while level >= 1 | 677 | 147 | && self.head[level - 1] | 678 | 147 | .load(Ordering::Relaxed, guard) | 679 | 147 | .is_null() | 680 | 11 | { | 681 | 11 | level -= 1; | 682 | 11 | } | 683 | | | 684 | | // The current best node | 685 | 139 | let mut result = None; | 686 | 139 | | 687 | 139 | // The predecessor node | 688 | 139 | let mut pred = &*self.head; | 689 | | | 690 | 393 | while level >= 1 { | 691 | 255 | level -= 1; | 692 | 255 | | 693 | 255 | // Two adjacent nodes at the current level. | 694 | 255 | let mut curr = pred[level].load_consume(guard); | 695 | 255 | | 696 | 255 | // If `curr` is marked, that means `pred` is removed and we have to restart the | 697 | 255 | // search. | 698 | 255 | if curr.tag() == 1 { | 699 | | continue 'search; | 700 | 255 | } | 701 | | | 702 | | // Iterate through the current level until we reach a node with a key greater | 703 | | // than or equal to `key`. | 704 | 550 | while let Some(c500 ) = curr.as_ref() { | 705 | 500 | let succ = c.tower[level].load_consume(guard); | 706 | 500 | | 707 | 500 | if succ.tag() == 1 { | 708 | 9 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { | 709 | | // On success, continue searching through the current level. | 710 | 9 | curr = c; | 711 | | continue; | 712 | | } else { | 713 | | // On failure, we cannot do anything reasonable to continue | 714 | | // searching from the current position. Restart the search. | 715 | | continue 'search; | 716 | | } | 717 | 491 | } | 718 | 491 | | 719 | 491 | // If `curr` contains a key that is greater than (or equal) to `key`, we're | 720 | 491 | // done with this level. | 721 | 491 | // | 722 | 491 | // The condition determines whether we should stop the search. For the upper | 723 | 491 | // bound, we return the last node before the condition became true. For the | 724 | 491 | // lower bound, we return the first node after the condition became true. | 725 | 491 | if upper_bound { | 726 | 158 | if !below_upper_bound(&bound, c.key.borrow()) { | 727 | 54 | break; | 728 | 104 | } | 729 | 104 | result = Some(c); | 730 | 333 | } else if above_lower_bound(&bound, c.key.borrow()) { | 731 | 150 | result = Some(c); | 732 | 150 | break; | 733 | 182 | } | 734 | | | 735 | | // Move one step forward. | 736 | 286 | pred = &c.tower; | 737 | 286 | curr = succ; | 738 | | } | 739 | | } | 740 | | | 741 | 138 | return result; | 742 | 138 | } | 743 | 138 | } | 744 | 138 | } |
|
745 | | |
746 | | /// Searches for a key in the skip list and returns a list of all adjacent nodes. |
747 | 196 | fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> |
748 | 196 | where |
749 | 196 | K: Borrow<Q>, |
750 | 196 | Q: Ord + ?Sized, |
751 | 196 | { |
752 | | unsafe { |
753 | 196 | 'search: loop { |
754 | 196 | // The result of this search. |
755 | 196 | let mut result = Position { |
756 | 196 | found: None, |
757 | 196 | left: [&*self.head; MAX_HEIGHT], |
758 | 196 | right: [Shared::null(); MAX_HEIGHT], |
759 | 196 | }; |
760 | 196 | |
761 | 196 | // The current level we're at. |
762 | 196 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); |
763 | | |
764 | | // Fast loop to skip empty tower levels. |
765 | 233 | while level >= 1 |
766 | 206 | && self.head[level - 1] |
767 | 206 | .load(Ordering::Relaxed, guard) |
768 | 206 | .is_null() |
769 | 37 | { |
770 | 37 | level -= 1; |
771 | 37 | } |
772 | | |
773 | | // The predecessor node |
774 | 198 | let mut pred = &*self.head; |
775 | | |
776 | 440 | while level >= 1 { |
777 | 241 | level -= 1; |
778 | 241 | |
779 | 241 | // Two adjacent nodes at the current level. |
780 | 241 | let mut curr = pred[level].load_consume(guard); |
781 | 241 | |
782 | 241 | // If `curr` is marked, that means `pred` is removed and we have to restart the |
783 | 241 | // search. |
784 | 241 | if curr.tag() == 1 { |
785 | | continue 'search; |
786 | 241 | } |
787 | | |
788 | | // Iterate through the current level until we reach a node with a key greater |
789 | | // than or equal to `key`. |
790 | 557 | while let Some(c477 ) = curr.as_ref() { |
791 | 477 | let succ = c.tower[level].load_consume(guard); |
792 | 477 | |
793 | 477 | if succ.tag() == 1 { |
794 | 7 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { |
795 | | // On success, continue searching through the current level. |
796 | 7 | curr = c; |
797 | | continue; |
798 | | } else { |
799 | | // On failure, we cannot do anything reasonable to continue |
800 | | // searching from the current position. Restart the search. |
801 | | continue 'search; |
802 | | } |
803 | 470 | } |
804 | 470 | |
805 | 470 | // If `curr` contains a key that is greater than or equal to `key`, we're |
806 | 470 | // done with this level. |
807 | 470 | match c.key.borrow().cmp(key) { |
808 | 470 | cmp::Ordering::Greater => break116 , |
809 | | cmp::Ordering::Equal => { |
810 | 46 | result.found = Some(c); |
811 | 46 | break; |
812 | | } |
813 | 309 | cmp::Ordering::Less => {} |
814 | 309 | } |
815 | 309 | |
816 | 309 | // Move one step forward. |
817 | 309 | pred = &c.tower; |
818 | 309 | curr = succ; |
819 | | } |
820 | | |
821 | | // Store the position at the current level into the result. |
822 | 242 | result.left[level] = pred; |
823 | 242 | result.right[level] = curr; |
824 | | } |
825 | | |
826 | 199 | return result; |
827 | 199 | } |
828 | 199 | } |
829 | 199 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::search_position::<i32> Line | Count | Source | 747 | 24 | fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> | 748 | 24 | where | 749 | 24 | K: Borrow<Q>, | 750 | 24 | Q: Ord + ?Sized, | 751 | 24 | { | 752 | | unsafe { | 753 | 24 | 'search: loop { | 754 | 24 | // The result of this search. | 755 | 24 | let mut result = Position { | 756 | 24 | found: None, | 757 | 24 | left: [&*self.head; MAX_HEIGHT], | 758 | 24 | right: [Shared::null(); MAX_HEIGHT], | 759 | 24 | }; | 760 | 24 | | 761 | 24 | // The current level we're at. | 762 | 24 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); | 763 | | | 764 | | // Fast loop to skip empty tower levels. | 765 | 28 | while level >= 1 | 766 | 25 | && self.head[level - 1] | 767 | 25 | .load(Ordering::Relaxed, guard) | 768 | 25 | .is_null() | 769 | 4 | { | 770 | 4 | level -= 1; | 771 | 4 | } | 772 | | | 773 | | // The predecessor node | 774 | 24 | let mut pred = &*self.head; | 775 | | | 776 | 53 | while level >= 1 { | 777 | 29 | level -= 1; | 778 | 29 | | 779 | 29 | // Two adjacent nodes at the current level. | 780 | 29 | let mut curr = pred[level].load_consume(guard); | 781 | 29 | | 782 | 29 | // If `curr` is marked, that means `pred` is removed and we have to restart the | 783 | 29 | // search. | 784 | 29 | if curr.tag() == 1 { | 785 | | continue 'search; | 786 | 29 | } | 787 | | | 788 | | // Iterate through the current level until we reach a node with a key greater | 789 | | // than or equal to `key`. | 790 | 74 | while let Some(c56 ) = curr.as_ref() { | 791 | 56 | let succ = c.tower[level].load_consume(guard); | 792 | 56 | | 793 | 56 | if succ.tag() == 1 { | 794 | 0 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { | 795 | | // On success, continue searching through the current level. | 796 | 0 | curr = c; | 797 | | continue; | 798 | | } else { | 799 | | // On failure, we cannot do anything reasonable to continue | 800 | | // searching from the current position. Restart the search. | 801 | | continue 'search; | 802 | | } | 803 | 56 | } | 804 | 56 | | 805 | 56 | // If `curr` contains a key that is greater than or equal to `key`, we're | 806 | 56 | // done with this level. | 807 | 56 | match c.key.borrow().cmp(key) { | 808 | 56 | cmp::Ordering::Greater => break6 , | 809 | | cmp::Ordering::Equal => { | 810 | 5 | result.found = Some(c); | 811 | 5 | break; | 812 | | } | 813 | 45 | cmp::Ordering::Less => {} | 814 | 45 | } | 815 | 45 | | 816 | 45 | // Move one step forward. | 817 | 45 | pred = &c.tower; | 818 | 45 | curr = succ; | 819 | | } | 820 | | | 821 | | // Store the position at the current level into the result. | 822 | 29 | result.left[level] = pred; | 823 | 29 | result.right[level] = curr; | 824 | | } | 825 | | | 826 | 24 | return result; | 827 | 24 | } | 828 | 24 | } | 829 | 24 | } |
<crossbeam_skiplist::base::SkipList<i32, ()>>::search_position::<i32> Line | Count | Source | 747 | 3 | fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> | 748 | 3 | where | 749 | 3 | K: Borrow<Q>, | 750 | 3 | Q: Ord + ?Sized, | 751 | 3 | { | 752 | | unsafe { | 753 | 3 | 'search: loop { | 754 | 3 | // The result of this search. | 755 | 3 | let mut result = Position { | 756 | 3 | found: None, | 757 | 3 | left: [&*self.head; MAX_HEIGHT], | 758 | 3 | right: [Shared::null(); MAX_HEIGHT], | 759 | 3 | }; | 760 | 3 | | 761 | 3 | // The current level we're at. | 762 | 3 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); | 763 | | | 764 | | // Fast loop to skip empty tower levels. | 765 | 4 | while level >= 1 | 766 | 3 | && self.head[level - 1] | 767 | 3 | .load(Ordering::Relaxed, guard) | 768 | 3 | .is_null() | 769 | 1 | { | 770 | 1 | level -= 1; | 771 | 1 | } | 772 | | | 773 | | // The predecessor node | 774 | 3 | let mut pred = &*self.head; | 775 | | | 776 | 5 | while level >= 1 { | 777 | 2 | level -= 1; | 778 | 2 | | 779 | 2 | // Two adjacent nodes at the current level. | 780 | 2 | let mut curr = pred[level].load_consume(guard); | 781 | 2 | | 782 | 2 | // If `curr` is marked, that means `pred` is removed and we have to restart the | 783 | 2 | // search. | 784 | 2 | if curr.tag() == 1 { | 785 | | continue 'search; | 786 | 2 | } | 787 | | | 788 | | // Iterate through the current level until we reach a node with a key greater | 789 | | // than or equal to `key`. | 790 | 5 | while let Some(c3 ) = curr.as_ref() { | 791 | 3 | let succ = c.tower[level].load_consume(guard); | 792 | 3 | | 793 | 3 | if succ.tag() == 1 { | 794 | 0 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { | 795 | | // On success, continue searching through the current level. | 796 | 0 | curr = c; | 797 | | continue; | 798 | | } else { | 799 | | // On failure, we cannot do anything reasonable to continue | 800 | | // searching from the current position. Restart the search. | 801 | | continue 'search; | 802 | | } | 803 | 3 | } | 804 | 3 | | 805 | 3 | // If `curr` contains a key that is greater than or equal to `key`, we're | 806 | 3 | // done with this level. | 807 | 3 | match c.key.borrow().cmp(key) { | 808 | 3 | cmp::Ordering::Greater => break0 , | 809 | | cmp::Ordering::Equal => { | 810 | 0 | result.found = Some(c); | 811 | 0 | break; | 812 | | } | 813 | 3 | cmp::Ordering::Less => {} | 814 | 3 | } | 815 | 3 | | 816 | 3 | // Move one step forward. | 817 | 3 | pred = &c.tower; | 818 | 3 | curr = succ; | 819 | | } | 820 | | | 821 | | // Store the position at the current level into the result. | 822 | 2 | result.left[level] = pred; | 823 | 2 | result.right[level] = curr; | 824 | | } | 825 | | | 826 | 3 | return result; | 827 | 3 | } | 828 | 3 | } | 829 | 3 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::search_position::<base::drops::Key> Line | Count | Source | 747 | 8 | fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> | 748 | 8 | where | 749 | 8 | K: Borrow<Q>, | 750 | 8 | Q: Ord + ?Sized, | 751 | 8 | { | 752 | | unsafe { | 753 | 8 | 'search: loop { | 754 | 8 | // The result of this search. | 755 | 8 | let mut result = Position { | 756 | 8 | found: None, | 757 | 8 | left: [&*self.head; MAX_HEIGHT], | 758 | 8 | right: [Shared::null(); MAX_HEIGHT], | 759 | 8 | }; | 760 | 8 | | 761 | 8 | // The current level we're at. | 762 | 8 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); | 763 | | | 764 | | // Fast loop to skip empty tower levels. | 765 | 9 | while level >= 1 | 766 | 8 | && self.head[level - 1] | 767 | 8 | .load(Ordering::Relaxed, guard) | 768 | 8 | .is_null() | 769 | 1 | { | 770 | 1 | level -= 1; | 771 | 1 | } | 772 | | | 773 | | // The predecessor node | 774 | 8 | let mut pred = &*self.head; | 775 | | | 776 | 16 | while level >= 1 { | 777 | 8 | level -= 1; | 778 | 8 | | 779 | 8 | // Two adjacent nodes at the current level. | 780 | 8 | let mut curr = pred[level].load_consume(guard); | 781 | 8 | | 782 | 8 | // If `curr` is marked, that means `pred` is removed and we have to restart the | 783 | 8 | // search. | 784 | 8 | if curr.tag() == 1 { | 785 | | continue 'search; | 786 | 8 | } | 787 | | | 788 | | // Iterate through the current level until we reach a node with a key greater | 789 | | // than or equal to `key`. | 790 | 21 | while let Some(c19 ) = curr.as_ref() { | 791 | 19 | let succ = c.tower[level].load_consume(guard); | 792 | 19 | | 793 | 19 | if succ.tag() == 1 { | 794 | 0 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { | 795 | | // On success, continue searching through the current level. | 796 | 0 | curr = c; | 797 | | continue; | 798 | | } else { | 799 | | // On failure, we cannot do anything reasonable to continue | 800 | | // searching from the current position. Restart the search. | 801 | | continue 'search; | 802 | | } | 803 | 19 | } | 804 | 19 | | 805 | 19 | // If `curr` contains a key that is greater than or equal to `key`, we're | 806 | 19 | // done with this level. | 807 | 19 | match c.key.borrow().cmp(key) { | 808 | 19 | cmp::Ordering::Greater => break5 , | 809 | | cmp::Ordering::Equal => { | 810 | 1 | result.found = Some(c); | 811 | 1 | break; | 812 | | } | 813 | 13 | cmp::Ordering::Less => {} | 814 | 13 | } | 815 | 13 | | 816 | 13 | // Move one step forward. | 817 | 13 | pred = &c.tower; | 818 | 13 | curr = succ; | 819 | | } | 820 | | | 821 | | // Store the position at the current level into the result. | 822 | 8 | result.left[level] = pred; | 823 | 8 | result.right[level] = curr; | 824 | | } | 825 | | | 826 | 8 | return result; | 827 | 8 | } | 828 | 8 | } | 829 | 8 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::search_position::<i32> Line | Count | Source | 747 | 161 | fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> | 748 | 161 | where | 749 | 161 | K: Borrow<Q>, | 750 | 161 | Q: Ord + ?Sized, | 751 | 161 | { | 752 | | unsafe { | 753 | 161 | 'search: loop { | 754 | 161 | // The result of this search. | 755 | 161 | let mut result = Position { | 756 | 161 | found: None, | 757 | 161 | left: [&*self.head; MAX_HEIGHT], | 758 | 161 | right: [Shared::null(); MAX_HEIGHT], | 759 | 161 | }; | 760 | 161 | | 761 | 161 | // The current level we're at. | 762 | 161 | let mut level = self.hot_data.max_height.load(Ordering::Relaxed); | 763 | | | 764 | | // Fast loop to skip empty tower levels. | 765 | 192 | while level >= 1 | 766 | 170 | && self.head[level - 1] | 767 | 170 | .load(Ordering::Relaxed, guard) | 768 | 170 | .is_null() | 769 | 31 | { | 770 | 31 | level -= 1; | 771 | 31 | } | 772 | | | 773 | | // The predecessor node | 774 | 163 | let mut pred = &*self.head; | 775 | | | 776 | 366 | while level >= 1 { | 777 | 202 | level -= 1; | 778 | 202 | | 779 | 202 | // Two adjacent nodes at the current level. | 780 | 202 | let mut curr = pred[level].load_consume(guard); | 781 | 202 | | 782 | 202 | // If `curr` is marked, that means `pred` is removed and we have to restart the | 783 | 202 | // search. | 784 | 202 | if curr.tag() == 1 { | 785 | | continue 'search; | 786 | 202 | } | 787 | | | 788 | | // Iterate through the current level until we reach a node with a key greater | 789 | | // than or equal to `key`. | 790 | 457 | while let Some(c399 ) = curr.as_ref() { | 791 | 399 | let succ = c.tower[level].load_consume(guard); | 792 | 399 | | 793 | 399 | if succ.tag() == 1 { | 794 | 7 | if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { | 795 | | // On success, continue searching through the current level. | 796 | 7 | curr = c; | 797 | | continue; | 798 | | } else { | 799 | | // On failure, we cannot do anything reasonable to continue | 800 | | // searching from the current position. Restart the search. | 801 | | continue 'search; | 802 | | } | 803 | 392 | } | 804 | 392 | | 805 | 392 | // If `curr` contains a key that is greater than or equal to `key`, we're | 806 | 392 | // done with this level. | 807 | 392 | match c.key.borrow().cmp(key) { | 808 | 392 | cmp::Ordering::Greater => break105 , | 809 | | cmp::Ordering::Equal => { | 810 | 40 | result.found = Some(c); | 811 | 40 | break; | 812 | | } | 813 | 248 | cmp::Ordering::Less => {} | 814 | 248 | } | 815 | 248 | | 816 | 248 | // Move one step forward. | 817 | 248 | pred = &c.tower; | 818 | 248 | curr = succ; | 819 | | } | 820 | | | 821 | | // Store the position at the current level into the result. | 822 | 203 | result.left[level] = pred; | 823 | 203 | result.right[level] = curr; | 824 | | } | 825 | | | 826 | 164 | return result; | 827 | 164 | } | 828 | 164 | } | 829 | 164 | } |
|
830 | | |
831 | | /// Inserts an entry with the specified `key` and `value`. |
832 | | /// |
833 | | /// If `replace` is `true`, then any existing entry with this key will first be removed. |
834 | 152 | fn insert_internal( |
835 | 152 | &self, |
836 | 152 | key: K, |
837 | 152 | value: V, |
838 | 152 | replace: bool, |
839 | 152 | guard: &Guard, |
840 | 152 | ) -> RefEntry<'_, K, V> { |
841 | 152 | self.check_guard(guard); |
842 | 152 | |
843 | 152 | unsafe { |
844 | 152 | // Rebind the guard to the lifetime of self. This is a bit of a |
845 | 152 | // hack but it allows us to return references that are not bound to |
846 | 152 | // the lifetime of the guard. |
847 | 152 | let guard = &*(guard as *const _); |
848 | | |
849 | | let mut search; |
850 | 156 | loop { |
851 | 156 | // First try searching for the key. |
852 | 156 | // Note that the `Ord` implementation for `K` may panic during the search. |
853 | 156 | search = self.search_position(&key, guard); |
854 | | |
855 | 156 | let r9 = match search.found { |
856 | 9 | Some(r) => r, |
857 | 147 | None => break, |
858 | | }; |
859 | | |
860 | 9 | if replace { |
861 | | // If a node with the key was found and we should replace it, mark its tower |
862 | | // and then repeat the search. |
863 | 8 | if r.mark_tower() { |
864 | 4 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
865 | 4 | }0 |
866 | | } else { |
867 | | // If a node with the key was found and we're not going to replace it, let's |
868 | | // try returning it as an entry. |
869 | 1 | if let Some(e) = RefEntry::try_acquire(self, r) { |
870 | 1 | return e; |
871 | 0 | } |
872 | 0 |
|
873 | 0 | // If we couldn't increment the reference count, that means someone has just |
874 | 0 | // now removed the node. |
875 | 0 | break; |
876 | | } |
877 | | } |
878 | | |
879 | | // Create a new node. |
880 | 147 | let height = self.random_height(); |
881 | 147 | let (node, n) = { |
882 | 147 | // The reference count is initially two to account for: |
883 | 147 | // 1. The entry that will be returned. |
884 | 147 | // 2. The link at the level 0 of the tower. |
885 | 147 | let n = Node::<K, V>::alloc(height, 2); |
886 | 147 | |
887 | 147 | // Write the key and the value into the node. |
888 | 147 | ptr::write(&mut (*n).key, key); |
889 | 147 | ptr::write(&mut (*n).value, value); |
890 | 147 | |
891 | 147 | (Shared::<Node<K, V>>::from(n as *const _), &*n) |
892 | 147 | }; |
893 | 147 | |
894 | 147 | // Optimistically increment `len`. |
895 | 147 | self.hot_data.len.fetch_add(1, Ordering::Relaxed); |
896 | | |
897 | 147 | loop { |
898 | 147 | // Set the lowest successor of `n` to `search.right[0]`. |
899 | 147 | n.tower[0].store(search.right[0], Ordering::Relaxed); |
900 | 147 | |
901 | 147 | // Try installing the new node into the skip list (at level 0). |
902 | 147 | // TODO(Amanieu): can we use release ordering here? |
903 | 147 | if search.left[0][0] |
904 | 147 | .compare_exchange( |
905 | 147 | search.right[0], |
906 | 147 | node, |
907 | 147 | Ordering::SeqCst, |
908 | 147 | Ordering::SeqCst, |
909 | 147 | guard, |
910 | 147 | ) |
911 | 147 | .is_ok() |
912 | | { |
913 | 152 | break; |
914 | 18.4E | } |
915 | 18.4E | |
916 | 18.4E | // We failed. Let's search for the key and try again. |
917 | 18.4E | { |
918 | 18.4E | // Create a guard that destroys the new node in case search panics. |
919 | 18.4E | let sg = scopeguard::guard((), |_| { |
920 | | Node::finalize(node.as_raw()); |
921 | 18.4E | }); |
922 | 18.4E | search = self.search_position(&n.key, guard); |
923 | 18.4E | mem::forget(sg); |
924 | 18.4E | } |
925 | | |
926 | 18.4E | if let Some(r) = search.found { |
927 | 18.4E | if replace { |
928 | | // If a node with the key was found and we should replace it, mark its |
929 | | // tower and then repeat the search. |
930 | 0 | if r.mark_tower() { |
931 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
932 | 0 | } |
933 | | } else { |
934 | | // If a node with the key was found and we're not going to replace it, |
935 | | // let's try returning it as an entry. |
936 | 18.4E | if let Some(e) = RefEntry::try_acquire(self, r) { |
937 | | // Destroy the new node. |
938 | 18.4E | Node::finalize(node.as_raw()); |
939 | 18.4E | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
940 | 18.4E | |
941 | 18.4E | return e; |
942 | 0 | } |
943 | | |
944 | | // If we couldn't increment the reference count, that means someone has |
945 | | // just now removed the node. |
946 | | } |
947 | 0 | } |
948 | | } |
949 | | |
950 | | // The new node was successfully installed. Let's create an entry associated with it. |
951 | 152 | let entry = RefEntry { |
952 | 152 | parent: self, |
953 | 152 | node: n, |
954 | 152 | }; |
955 | | |
956 | | // Build the rest of the tower above level 0. |
957 | 152 | 'build: for level30 in 1..height { |
958 | | loop { |
959 | | // Obtain the predecessor and successor at the current level. |
960 | 30 | let pred = search.left[level]; |
961 | 30 | let succ = search.right[level]; |
962 | 30 | |
963 | 30 | // Load the current value of the pointer in the tower at this level. |
964 | 30 | // TODO(Amanieu): can we use relaxed ordering here? |
965 | 30 | let next = n.tower[level].load(Ordering::SeqCst, guard); |
966 | 30 | |
967 | 30 | // If the current pointer is marked, that means another thread is already |
968 | 30 | // removing the node we've just inserted. In that case, let's just stop |
969 | 30 | // building the tower. |
970 | 30 | if next.tag() == 1 { |
971 | 0 | break 'build; |
972 | 30 | } |
973 | 30 | |
974 | 30 | // When searching for `key` and traversing the skip list from the highest level |
975 | 30 | // to the lowest, it is possible to observe a node with an equal key at higher |
976 | 30 | // levels and then find it missing at the lower levels if it gets removed |
977 | 30 | // during traversal. Even worse, it is possible to observe completely different |
978 | 30 | // nodes with the exact same key at different levels. |
979 | 30 | // |
980 | 30 | // Linking the new node to a dead successor with an equal key could create |
981 | 30 | // subtle corner cases that would require special care. It's much easier to |
982 | 30 | // simply prohibit linking two nodes with equal keys. |
983 | 30 | // |
984 | 30 | // If the successor has the same key as the new node, that means it is marked |
985 | 30 | // as removed and should be unlinked from the skip list. In that case, let's |
986 | 30 | // repeat the search to make sure it gets unlinked and try again. |
987 | 30 | // |
988 | 30 | // If this comparison or the following search panics, we simply stop building |
989 | 30 | // the tower without breaking any invariants. Note that building higher levels |
990 | 30 | // is completely optional. Only the lowest level really matters, and all the |
991 | 30 | // higher levels are there just to make searching faster. |
992 | 30 | if succ.as_ref().map(|s| &s.key2 ) == Some(&n.key) { |
993 | 0 | search = self.search_position(&n.key, guard); |
994 | | continue; |
995 | 30 | } |
996 | 30 | |
997 | 30 | // Change the pointer at the current level from `next` to `succ`. If this CAS |
998 | 30 | // operation fails, that means another thread has marked the pointer and we |
999 | 30 | // should stop building the tower. |
1000 | 30 | // TODO(Amanieu): can we use release ordering here? |
1001 | 30 | if n.tower[level] |
1002 | 30 | .compare_exchange(next, succ, Ordering::SeqCst, Ordering::SeqCst, guard) |
1003 | 30 | .is_err() |
1004 | | { |
1005 | 0 | break 'build; |
1006 | 30 | } |
1007 | 30 | |
1008 | 30 | // Increment the reference count. The current value will always be at least 1 |
1009 | 30 | // because we are holding `entry`. |
1010 | 30 | n.refs_and_height |
1011 | 30 | .fetch_add(1 << HEIGHT_BITS, Ordering::Relaxed); |
1012 | 30 | |
1013 | 30 | // Try installing the new node at the current level. |
1014 | 30 | // TODO(Amanieu): can we use release ordering here? |
1015 | 30 | if pred[level] |
1016 | 30 | .compare_exchange(succ, node, Ordering::SeqCst, Ordering::SeqCst, guard) |
1017 | 30 | .is_ok() |
1018 | | { |
1019 | | // Success! Continue on the next level. |
1020 | 30 | break; |
1021 | 0 | } |
1022 | 0 |
|
1023 | 0 | // Installation failed. Decrement the reference count. |
1024 | 0 | (*n).refs_and_height |
1025 | 0 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Relaxed); |
1026 | 0 |
|
1027 | 0 | // We don't have the most up-to-date search results. Repeat the search. |
1028 | 0 | // |
1029 | 0 | // If this search panics, we simply stop building the tower without breaking |
1030 | 0 | // any invariants. Note that building higher levels is completely optional. |
1031 | 0 | // Only the lowest level really matters, and all the higher levels are there |
1032 | 0 | // just to make searching faster. |
1033 | 0 | search = self.search_position(&n.key, guard); |
1034 | | } |
1035 | | } |
1036 | | |
1037 | | // If any pointer in the tower is marked, that means our node is in the process of |
1038 | | // removal or already removed. It is possible that another thread (either partially or |
1039 | | // completely) removed the new node while we were building the tower, and just after |
1040 | | // that we installed the new node at one of the higher levels. In order to undo that |
1041 | | // installation, we must repeat the search, which will unlink the new node at that |
1042 | | // level. |
1043 | | // TODO(Amanieu): can we use relaxed ordering here? |
1044 | 153 | if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { |
1045 | 0 | self.search_bound(Bound::Included(&n.key), false, guard); |
1046 | 153 | } |
1047 | | |
1048 | | // Finally, return the new entry. |
1049 | 153 | entry |
1050 | | } |
1051 | 149 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::insert_internal Line | Count | Source | 834 | 20 | fn insert_internal( | 835 | 20 | &self, | 836 | 20 | key: K, | 837 | 20 | value: V, | 838 | 20 | replace: bool, | 839 | 20 | guard: &Guard, | 840 | 20 | ) -> RefEntry<'_, K, V> { | 841 | 20 | self.check_guard(guard); | 842 | 20 | | 843 | 20 | unsafe { | 844 | 20 | // Rebind the guard to the lifetime of self. This is a bit of a | 845 | 20 | // hack but it allows us to return references that are not bound to | 846 | 20 | // the lifetime of the guard. | 847 | 20 | let guard = &*(guard as *const _); | 848 | | | 849 | | let mut search; | 850 | 20 | loop { | 851 | 20 | // First try searching for the key. | 852 | 20 | // Note that the `Ord` implementation for `K` may panic during the search. | 853 | 20 | search = self.search_position(&key, guard); | 854 | | | 855 | 20 | let r0 = match search.found { | 856 | 0 | Some(r) => r, | 857 | 20 | None => break, | 858 | | }; | 859 | | | 860 | 0 | if replace { | 861 | | // If a node with the key was found and we should replace it, mark its tower | 862 | | // and then repeat the search. | 863 | 0 | if r.mark_tower() { | 864 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 865 | 0 | } | 866 | | } else { | 867 | | // If a node with the key was found and we're not going to replace it, let's | 868 | | // try returning it as an entry. | 869 | 0 | if let Some(e) = RefEntry::try_acquire(self, r) { | 870 | 0 | return e; | 871 | 0 | } | 872 | 0 |
| 873 | 0 | // If we couldn't increment the reference count, that means someone has just | 874 | 0 | // now removed the node. | 875 | 0 | break; | 876 | | } | 877 | | } | 878 | | | 879 | | // Create a new node. | 880 | 20 | let height = self.random_height(); | 881 | 20 | let (node, n) = { | 882 | 20 | // The reference count is initially two to account for: | 883 | 20 | // 1. The entry that will be returned. | 884 | 20 | // 2. The link at the level 0 of the tower. | 885 | 20 | let n = Node::<K, V>::alloc(height, 2); | 886 | 20 | | 887 | 20 | // Write the key and the value into the node. | 888 | 20 | ptr::write(&mut (*n).key, key); | 889 | 20 | ptr::write(&mut (*n).value, value); | 890 | 20 | | 891 | 20 | (Shared::<Node<K, V>>::from(n as *const _), &*n) | 892 | 20 | }; | 893 | 20 | | 894 | 20 | // Optimistically increment `len`. | 895 | 20 | self.hot_data.len.fetch_add(1, Ordering::Relaxed); | 896 | | | 897 | 20 | loop { | 898 | 20 | // Set the lowest successor of `n` to `search.right[0]`. | 899 | 20 | n.tower[0].store(search.right[0], Ordering::Relaxed); | 900 | 20 | | 901 | 20 | // Try installing the new node into the skip list (at level 0). | 902 | 20 | // TODO(Amanieu): can we use release ordering here? | 903 | 20 | if search.left[0][0] | 904 | 20 | .compare_exchange( | 905 | 20 | search.right[0], | 906 | 20 | node, | 907 | 20 | Ordering::SeqCst, | 908 | 20 | Ordering::SeqCst, | 909 | 20 | guard, | 910 | 20 | ) | 911 | 20 | .is_ok() | 912 | | { | 913 | 20 | break; | 914 | 0 | } | 915 | 0 |
| 916 | 0 | // We failed. Let's search for the key and try again. | 917 | 0 | { | 918 | 0 | // Create a guard that destroys the new node in case search panics. | 919 | 0 | let sg = scopeguard::guard((), |_| { | 920 | | Node::finalize(node.as_raw()); | 921 | 0 | }); | 922 | 0 | search = self.search_position(&n.key, guard); | 923 | 0 | mem::forget(sg); | 924 | 0 | } | 925 | | | 926 | 0 | if let Some(r) = search.found { | 927 | 0 | if replace { | 928 | | // If a node with the key was found and we should replace it, mark its | 929 | | // tower and then repeat the search. | 930 | 0 | if r.mark_tower() { | 931 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 932 | 0 | } | 933 | | } else { | 934 | | // If a node with the key was found and we're not going to replace it, | 935 | | // let's try returning it as an entry. | 936 | 0 | if let Some(e) = RefEntry::try_acquire(self, r) { | 937 | | // Destroy the new node. | 938 | 0 | Node::finalize(node.as_raw()); | 939 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 940 | 0 |
| 941 | 0 | return e; | 942 | 0 | } | 943 | | | 944 | | // If we couldn't increment the reference count, that means someone has | 945 | | // just now removed the node. | 946 | | } | 947 | 0 | } | 948 | | } | 949 | | | 950 | | // The new node was successfully installed. Let's create an entry associated with it. | 951 | 20 | let entry = RefEntry { | 952 | 20 | parent: self, | 953 | 20 | node: n, | 954 | 20 | }; | 955 | | | 956 | | // Build the rest of the tower above level 0. | 957 | 20 | 'build: for level4 in 1..height { | 958 | | loop { | 959 | | // Obtain the predecessor and successor at the current level. | 960 | 4 | let pred = search.left[level]; | 961 | 4 | let succ = search.right[level]; | 962 | 4 | | 963 | 4 | // Load the current value of the pointer in the tower at this level. | 964 | 4 | // TODO(Amanieu): can we use relaxed ordering here? | 965 | 4 | let next = n.tower[level].load(Ordering::SeqCst, guard); | 966 | 4 | | 967 | 4 | // If the current pointer is marked, that means another thread is already | 968 | 4 | // removing the node we've just inserted. In that case, let's just stop | 969 | 4 | // building the tower. | 970 | 4 | if next.tag() == 1 { | 971 | 0 | break 'build; | 972 | 4 | } | 973 | 4 | | 974 | 4 | // When searching for `key` and traversing the skip list from the highest level | 975 | 4 | // to the lowest, it is possible to observe a node with an equal key at higher | 976 | 4 | // levels and then find it missing at the lower levels if it gets removed | 977 | 4 | // during traversal. Even worse, it is possible to observe completely different | 978 | 4 | // nodes with the exact same key at different levels. | 979 | 4 | // | 980 | 4 | // Linking the new node to a dead successor with an equal key could create | 981 | 4 | // subtle corner cases that would require special care. It's much easier to | 982 | 4 | // simply prohibit linking two nodes with equal keys. | 983 | 4 | // | 984 | 4 | // If the successor has the same key as the new node, that means it is marked | 985 | 4 | // as removed and should be unlinked from the skip list. In that case, let's | 986 | 4 | // repeat the search to make sure it gets unlinked and try again. | 987 | 4 | // | 988 | 4 | // If this comparison or the following search panics, we simply stop building | 989 | 4 | // the tower without breaking any invariants. Note that building higher levels | 990 | 4 | // is completely optional. Only the lowest level really matters, and all the | 991 | 4 | // higher levels are there just to make searching faster. | 992 | 4 | if succ.as_ref().map(|s| &s.key) == Some(&n.key) { | 993 | 0 | search = self.search_position(&n.key, guard); | 994 | | continue; | 995 | 4 | } | 996 | 4 | | 997 | 4 | // Change the pointer at the current level from `next` to `succ`. If this CAS | 998 | 4 | // operation fails, that means another thread has marked the pointer and we | 999 | 4 | // should stop building the tower. | 1000 | 4 | // TODO(Amanieu): can we use release ordering here? | 1001 | 4 | if n.tower[level] | 1002 | 4 | .compare_exchange(next, succ, Ordering::SeqCst, Ordering::SeqCst, guard) | 1003 | 4 | .is_err() | 1004 | | { | 1005 | 0 | break 'build; | 1006 | 4 | } | 1007 | 4 | | 1008 | 4 | // Increment the reference count. The current value will always be at least 1 | 1009 | 4 | // because we are holding `entry`. | 1010 | 4 | n.refs_and_height | 1011 | 4 | .fetch_add(1 << HEIGHT_BITS, Ordering::Relaxed); | 1012 | 4 | | 1013 | 4 | // Try installing the new node at the current level. | 1014 | 4 | // TODO(Amanieu): can we use release ordering here? | 1015 | 4 | if pred[level] | 1016 | 4 | .compare_exchange(succ, node, Ordering::SeqCst, Ordering::SeqCst, guard) | 1017 | 4 | .is_ok() | 1018 | | { | 1019 | | // Success! Continue on the next level. | 1020 | 4 | break; | 1021 | 0 | } | 1022 | 0 |
| 1023 | 0 | // Installation failed. Decrement the reference count. | 1024 | 0 | (*n).refs_and_height | 1025 | 0 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Relaxed); | 1026 | 0 |
| 1027 | 0 | // We don't have the most up-to-date search results. Repeat the search. | 1028 | 0 | // | 1029 | 0 | // If this search panics, we simply stop building the tower without breaking | 1030 | 0 | // any invariants. Note that building higher levels is completely optional. | 1031 | 0 | // Only the lowest level really matters, and all the higher levels are there | 1032 | 0 | // just to make searching faster. | 1033 | 0 | search = self.search_position(&n.key, guard); | 1034 | | } | 1035 | | } | 1036 | | | 1037 | | // If any pointer in the tower is marked, that means our node is in the process of | 1038 | | // removal or already removed. It is possible that another thread (either partially or | 1039 | | // completely) removed the new node while we were building the tower, and just after | 1040 | | // that we installed the new node at one of the higher levels. In order to undo that | 1041 | | // installation, we must repeat the search, which will unlink the new node at that | 1042 | | // level. | 1043 | | // TODO(Amanieu): can we use relaxed ordering here? | 1044 | 20 | if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { | 1045 | 0 | self.search_bound(Bound::Included(&n.key), false, guard); | 1046 | 20 | } | 1047 | | | 1048 | | // Finally, return the new entry. | 1049 | 20 | entry | 1050 | | } | 1051 | 20 | } |
<crossbeam_skiplist::base::SkipList<i32, ()>>::insert_internal Line | Count | Source | 834 | 3 | fn insert_internal( | 835 | 3 | &self, | 836 | 3 | key: K, | 837 | 3 | value: V, | 838 | 3 | replace: bool, | 839 | 3 | guard: &Guard, | 840 | 3 | ) -> RefEntry<'_, K, V> { | 841 | 3 | self.check_guard(guard); | 842 | 3 | | 843 | 3 | unsafe { | 844 | 3 | // Rebind the guard to the lifetime of self. This is a bit of a | 845 | 3 | // hack but it allows us to return references that are not bound to | 846 | 3 | // the lifetime of the guard. | 847 | 3 | let guard = &*(guard as *const _); | 848 | | | 849 | | let mut search; | 850 | 3 | loop { | 851 | 3 | // First try searching for the key. | 852 | 3 | // Note that the `Ord` implementation for `K` may panic during the search. | 853 | 3 | search = self.search_position(&key, guard); | 854 | | | 855 | 3 | let r0 = match search.found { | 856 | 0 | Some(r) => r, | 857 | 3 | None => break, | 858 | | }; | 859 | | | 860 | 0 | if replace { | 861 | | // If a node with the key was found and we should replace it, mark its tower | 862 | | // and then repeat the search. | 863 | 0 | if r.mark_tower() { | 864 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 865 | 0 | } | 866 | | } else { | 867 | | // If a node with the key was found and we're not going to replace it, let's | 868 | | // try returning it as an entry. | 869 | 0 | if let Some(e) = RefEntry::try_acquire(self, r) { | 870 | 0 | return e; | 871 | 0 | } | 872 | 0 |
| 873 | 0 | // If we couldn't increment the reference count, that means someone has just | 874 | 0 | // now removed the node. | 875 | 0 | break; | 876 | | } | 877 | | } | 878 | | | 879 | | // Create a new node. | 880 | 3 | let height = self.random_height(); | 881 | 3 | let (node, n) = { | 882 | 3 | // The reference count is initially two to account for: | 883 | 3 | // 1. The entry that will be returned. | 884 | 3 | // 2. The link at the level 0 of the tower. | 885 | 3 | let n = Node::<K, V>::alloc(height, 2); | 886 | 3 | | 887 | 3 | // Write the key and the value into the node. | 888 | 3 | ptr::write(&mut (*n).key, key); | 889 | 3 | ptr::write(&mut (*n).value, value); | 890 | 3 | | 891 | 3 | (Shared::<Node<K, V>>::from(n as *const _), &*n) | 892 | 3 | }; | 893 | 3 | | 894 | 3 | // Optimistically increment `len`. | 895 | 3 | self.hot_data.len.fetch_add(1, Ordering::Relaxed); | 896 | | | 897 | 3 | loop { | 898 | 3 | // Set the lowest successor of `n` to `search.right[0]`. | 899 | 3 | n.tower[0].store(search.right[0], Ordering::Relaxed); | 900 | 3 | | 901 | 3 | // Try installing the new node into the skip list (at level 0). | 902 | 3 | // TODO(Amanieu): can we use release ordering here? | 903 | 3 | if search.left[0][0] | 904 | 3 | .compare_exchange( | 905 | 3 | search.right[0], | 906 | 3 | node, | 907 | 3 | Ordering::SeqCst, | 908 | 3 | Ordering::SeqCst, | 909 | 3 | guard, | 910 | 3 | ) | 911 | 3 | .is_ok() | 912 | | { | 913 | 3 | break; | 914 | 0 | } | 915 | 0 |
| 916 | 0 | // We failed. Let's search for the key and try again. | 917 | 0 | { | 918 | 0 | // Create a guard that destroys the new node in case search panics. | 919 | 0 | let sg = scopeguard::guard((), |_| { | 920 | | Node::finalize(node.as_raw()); | 921 | 0 | }); | 922 | 0 | search = self.search_position(&n.key, guard); | 923 | 0 | mem::forget(sg); | 924 | 0 | } | 925 | | | 926 | 0 | if let Some(r) = search.found { | 927 | 0 | if replace { | 928 | | // If a node with the key was found and we should replace it, mark its | 929 | | // tower and then repeat the search. | 930 | 0 | if r.mark_tower() { | 931 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 932 | 0 | } | 933 | | } else { | 934 | | // If a node with the key was found and we're not going to replace it, | 935 | | // let's try returning it as an entry. | 936 | 0 | if let Some(e) = RefEntry::try_acquire(self, r) { | 937 | | // Destroy the new node. | 938 | 0 | Node::finalize(node.as_raw()); | 939 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 940 | 0 |
| 941 | 0 | return e; | 942 | 0 | } | 943 | | | 944 | | // If we couldn't increment the reference count, that means someone has | 945 | | // just now removed the node. | 946 | | } | 947 | 0 | } | 948 | | } | 949 | | | 950 | | // The new node was successfully installed. Let's create an entry associated with it. | 951 | 3 | let entry = RefEntry { | 952 | 3 | parent: self, | 953 | 3 | node: n, | 954 | 3 | }; | 955 | | | 956 | | // Build the rest of the tower above level 0. | 957 | 3 | 'build: for level0 in 1..height { | 958 | | loop { | 959 | | // Obtain the predecessor and successor at the current level. | 960 | 0 | let pred = search.left[level]; | 961 | 0 | let succ = search.right[level]; | 962 | 0 |
| 963 | 0 | // Load the current value of the pointer in the tower at this level. | 964 | 0 | // TODO(Amanieu): can we use relaxed ordering here? | 965 | 0 | let next = n.tower[level].load(Ordering::SeqCst, guard); | 966 | 0 |
| 967 | 0 | // If the current pointer is marked, that means another thread is already | 968 | 0 | // removing the node we've just inserted. In that case, let's just stop | 969 | 0 | // building the tower. | 970 | 0 | if next.tag() == 1 { | 971 | 0 | break 'build; | 972 | 0 | } | 973 | 0 |
| 974 | 0 | // When searching for `key` and traversing the skip list from the highest level | 975 | 0 | // to the lowest, it is possible to observe a node with an equal key at higher | 976 | 0 | // levels and then find it missing at the lower levels if it gets removed | 977 | 0 | // during traversal. Even worse, it is possible to observe completely different | 978 | 0 | // nodes with the exact same key at different levels. | 979 | 0 | // | 980 | 0 | // Linking the new node to a dead successor with an equal key could create | 981 | 0 | // subtle corner cases that would require special care. It's much easier to | 982 | 0 | // simply prohibit linking two nodes with equal keys. | 983 | 0 | // | 984 | 0 | // If the successor has the same key as the new node, that means it is marked | 985 | 0 | // as removed and should be unlinked from the skip list. In that case, let's | 986 | 0 | // repeat the search to make sure it gets unlinked and try again. | 987 | 0 | // | 988 | 0 | // If this comparison or the following search panics, we simply stop building | 989 | 0 | // the tower without breaking any invariants. Note that building higher levels | 990 | 0 | // is completely optional. Only the lowest level really matters, and all the | 991 | 0 | // higher levels are there just to make searching faster. | 992 | 0 | if succ.as_ref().map(|s| &s.key) == Some(&n.key) { | 993 | 0 | search = self.search_position(&n.key, guard); | 994 | | continue; | 995 | 0 | } | 996 | 0 |
| 997 | 0 | // Change the pointer at the current level from `next` to `succ`. If this CAS | 998 | 0 | // operation fails, that means another thread has marked the pointer and we | 999 | 0 | // should stop building the tower. | 1000 | 0 | // TODO(Amanieu): can we use release ordering here? | 1001 | 0 | if n.tower[level] | 1002 | 0 | .compare_exchange(next, succ, Ordering::SeqCst, Ordering::SeqCst, guard) | 1003 | 0 | .is_err() | 1004 | | { | 1005 | 0 | break 'build; | 1006 | 0 | } | 1007 | 0 |
| 1008 | 0 | // Increment the reference count. The current value will always be at least 1 | 1009 | 0 | // because we are holding `entry`. | 1010 | 0 | n.refs_and_height | 1011 | 0 | .fetch_add(1 << HEIGHT_BITS, Ordering::Relaxed); | 1012 | 0 |
| 1013 | 0 | // Try installing the new node at the current level. | 1014 | 0 | // TODO(Amanieu): can we use release ordering here? | 1015 | 0 | if pred[level] | 1016 | 0 | .compare_exchange(succ, node, Ordering::SeqCst, Ordering::SeqCst, guard) | 1017 | 0 | .is_ok() | 1018 | | { | 1019 | | // Success! Continue on the next level. | 1020 | 0 | break; | 1021 | 0 | } | 1022 | 0 |
| 1023 | 0 | // Installation failed. Decrement the reference count. | 1024 | 0 | (*n).refs_and_height | 1025 | 0 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Relaxed); | 1026 | 0 |
| 1027 | 0 | // We don't have the most up-to-date search results. Repeat the search. | 1028 | 0 | // | 1029 | 0 | // If this search panics, we simply stop building the tower without breaking | 1030 | 0 | // any invariants. Note that building higher levels is completely optional. | 1031 | 0 | // Only the lowest level really matters, and all the higher levels are there | 1032 | 0 | // just to make searching faster. | 1033 | 0 | search = self.search_position(&n.key, guard); | 1034 | | } | 1035 | | } | 1036 | | | 1037 | | // If any pointer in the tower is marked, that means our node is in the process of | 1038 | | // removal or already removed. It is possible that another thread (either partially or | 1039 | | // completely) removed the new node while we were building the tower, and just after | 1040 | | // that we installed the new node at one of the higher levels. In order to undo that | 1041 | | // installation, we must repeat the search, which will unlink the new node at that | 1042 | | // level. | 1043 | | // TODO(Amanieu): can we use relaxed ordering here? | 1044 | 3 | if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { | 1045 | 0 | self.search_bound(Bound::Included(&n.key), false, guard); | 1046 | 3 | } | 1047 | | | 1048 | | // Finally, return the new entry. | 1049 | 3 | entry | 1050 | | } | 1051 | 3 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::insert_internal Line | Count | Source | 834 | 7 | fn insert_internal( | 835 | 7 | &self, | 836 | 7 | key: K, | 837 | 7 | value: V, | 838 | 7 | replace: bool, | 839 | 7 | guard: &Guard, | 840 | 7 | ) -> RefEntry<'_, K, V> { | 841 | 7 | self.check_guard(guard); | 842 | 7 | | 843 | 7 | unsafe { | 844 | 7 | // Rebind the guard to the lifetime of self. This is a bit of a | 845 | 7 | // hack but it allows us to return references that are not bound to | 846 | 7 | // the lifetime of the guard. | 847 | 7 | let guard = &*(guard as *const _); | 848 | | | 849 | | let mut search; | 850 | 7 | loop { | 851 | 7 | // First try searching for the key. | 852 | 7 | // Note that the `Ord` implementation for `K` may panic during the search. | 853 | 7 | search = self.search_position(&key, guard); | 854 | | | 855 | 7 | let r0 = match search.found { | 856 | 0 | Some(r) => r, | 857 | 7 | None => break, | 858 | | }; | 859 | | | 860 | 0 | if replace { | 861 | | // If a node with the key was found and we should replace it, mark its tower | 862 | | // and then repeat the search. | 863 | 0 | if r.mark_tower() { | 864 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 865 | 0 | } | 866 | | } else { | 867 | | // If a node with the key was found and we're not going to replace it, let's | 868 | | // try returning it as an entry. | 869 | 0 | if let Some(e) = RefEntry::try_acquire(self, r) { | 870 | 0 | return e; | 871 | 0 | } | 872 | 0 |
| 873 | 0 | // If we couldn't increment the reference count, that means someone has just | 874 | 0 | // now removed the node. | 875 | 0 | break; | 876 | | } | 877 | | } | 878 | | | 879 | | // Create a new node. | 880 | 7 | let height = self.random_height(); | 881 | 7 | let (node, n) = { | 882 | 7 | // The reference count is initially two to account for: | 883 | 7 | // 1. The entry that will be returned. | 884 | 7 | // 2. The link at the level 0 of the tower. | 885 | 7 | let n = Node::<K, V>::alloc(height, 2); | 886 | 7 | | 887 | 7 | // Write the key and the value into the node. | 888 | 7 | ptr::write(&mut (*n).key, key); | 889 | 7 | ptr::write(&mut (*n).value, value); | 890 | 7 | | 891 | 7 | (Shared::<Node<K, V>>::from(n as *const _), &*n) | 892 | 7 | }; | 893 | 7 | | 894 | 7 | // Optimistically increment `len`. | 895 | 7 | self.hot_data.len.fetch_add(1, Ordering::Relaxed); | 896 | | | 897 | 7 | loop { | 898 | 7 | // Set the lowest successor of `n` to `search.right[0]`. | 899 | 7 | n.tower[0].store(search.right[0], Ordering::Relaxed); | 900 | 7 | | 901 | 7 | // Try installing the new node into the skip list (at level 0). | 902 | 7 | // TODO(Amanieu): can we use release ordering here? | 903 | 7 | if search.left[0][0] | 904 | 7 | .compare_exchange( | 905 | 7 | search.right[0], | 906 | 7 | node, | 907 | 7 | Ordering::SeqCst, | 908 | 7 | Ordering::SeqCst, | 909 | 7 | guard, | 910 | 7 | ) | 911 | 7 | .is_ok() | 912 | | { | 913 | 7 | break; | 914 | 0 | } | 915 | 0 |
| 916 | 0 | // We failed. Let's search for the key and try again. | 917 | 0 | { | 918 | 0 | // Create a guard that destroys the new node in case search panics. | 919 | 0 | let sg = scopeguard::guard((), |_| { | 920 | | Node::finalize(node.as_raw()); | 921 | 0 | }); | 922 | 0 | search = self.search_position(&n.key, guard); | 923 | 0 | mem::forget(sg); | 924 | 0 | } | 925 | | | 926 | 0 | if let Some(r) = search.found { | 927 | 0 | if replace { | 928 | | // If a node with the key was found and we should replace it, mark its | 929 | | // tower and then repeat the search. | 930 | 0 | if r.mark_tower() { | 931 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 932 | 0 | } | 933 | | } else { | 934 | | // If a node with the key was found and we're not going to replace it, | 935 | | // let's try returning it as an entry. | 936 | 0 | if let Some(e) = RefEntry::try_acquire(self, r) { | 937 | | // Destroy the new node. | 938 | 0 | Node::finalize(node.as_raw()); | 939 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 940 | 0 |
| 941 | 0 | return e; | 942 | 0 | } | 943 | | | 944 | | // If we couldn't increment the reference count, that means someone has | 945 | | // just now removed the node. | 946 | | } | 947 | 0 | } | 948 | | } | 949 | | | 950 | | // The new node was successfully installed. Let's create an entry associated with it. | 951 | 7 | let entry = RefEntry { | 952 | 7 | parent: self, | 953 | 7 | node: n, | 954 | 7 | }; | 955 | | | 956 | | // Build the rest of the tower above level 0. | 957 | 7 | 'build: for level1 in 1..height { | 958 | | loop { | 959 | | // Obtain the predecessor and successor at the current level. | 960 | 1 | let pred = search.left[level]; | 961 | 1 | let succ = search.right[level]; | 962 | 1 | | 963 | 1 | // Load the current value of the pointer in the tower at this level. | 964 | 1 | // TODO(Amanieu): can we use relaxed ordering here? | 965 | 1 | let next = n.tower[level].load(Ordering::SeqCst, guard); | 966 | 1 | | 967 | 1 | // If the current pointer is marked, that means another thread is already | 968 | 1 | // removing the node we've just inserted. In that case, let's just stop | 969 | 1 | // building the tower. | 970 | 1 | if next.tag() == 1 { | 971 | 0 | break 'build; | 972 | 1 | } | 973 | 1 | | 974 | 1 | // When searching for `key` and traversing the skip list from the highest level | 975 | 1 | // to the lowest, it is possible to observe a node with an equal key at higher | 976 | 1 | // levels and then find it missing at the lower levels if it gets removed | 977 | 1 | // during traversal. Even worse, it is possible to observe completely different | 978 | 1 | // nodes with the exact same key at different levels. | 979 | 1 | // | 980 | 1 | // Linking the new node to a dead successor with an equal key could create | 981 | 1 | // subtle corner cases that would require special care. It's much easier to | 982 | 1 | // simply prohibit linking two nodes with equal keys. | 983 | 1 | // | 984 | 1 | // If the successor has the same key as the new node, that means it is marked | 985 | 1 | // as removed and should be unlinked from the skip list. In that case, let's | 986 | 1 | // repeat the search to make sure it gets unlinked and try again. | 987 | 1 | // | 988 | 1 | // If this comparison or the following search panics, we simply stop building | 989 | 1 | // the tower without breaking any invariants. Note that building higher levels | 990 | 1 | // is completely optional. Only the lowest level really matters, and all the | 991 | 1 | // higher levels are there just to make searching faster. | 992 | 1 | if succ.as_ref().map(|s| &s.key) == Some(&n.key) { | 993 | 0 | search = self.search_position(&n.key, guard); | 994 | | continue; | 995 | 1 | } | 996 | 1 | | 997 | 1 | // Change the pointer at the current level from `next` to `succ`. If this CAS | 998 | 1 | // operation fails, that means another thread has marked the pointer and we | 999 | 1 | // should stop building the tower. | 1000 | 1 | // TODO(Amanieu): can we use release ordering here? | 1001 | 1 | if n.tower[level] | 1002 | 1 | .compare_exchange(next, succ, Ordering::SeqCst, Ordering::SeqCst, guard) | 1003 | 1 | .is_err() | 1004 | | { | 1005 | 0 | break 'build; | 1006 | 1 | } | 1007 | 1 | | 1008 | 1 | // Increment the reference count. The current value will always be at least 1 | 1009 | 1 | // because we are holding `entry`. | 1010 | 1 | n.refs_and_height | 1011 | 1 | .fetch_add(1 << HEIGHT_BITS, Ordering::Relaxed); | 1012 | 1 | | 1013 | 1 | // Try installing the new node at the current level. | 1014 | 1 | // TODO(Amanieu): can we use release ordering here? | 1015 | 1 | if pred[level] | 1016 | 1 | .compare_exchange(succ, node, Ordering::SeqCst, Ordering::SeqCst, guard) | 1017 | 1 | .is_ok() | 1018 | | { | 1019 | | // Success! Continue on the next level. | 1020 | 1 | break; | 1021 | 0 | } | 1022 | 0 |
| 1023 | 0 | // Installation failed. Decrement the reference count. | 1024 | 0 | (*n).refs_and_height | 1025 | 0 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Relaxed); | 1026 | 0 |
| 1027 | 0 | // We don't have the most up-to-date search results. Repeat the search. | 1028 | 0 | // | 1029 | 0 | // If this search panics, we simply stop building the tower without breaking | 1030 | 0 | // any invariants. Note that building higher levels is completely optional. | 1031 | 0 | // Only the lowest level really matters, and all the higher levels are there | 1032 | 0 | // just to make searching faster. | 1033 | 0 | search = self.search_position(&n.key, guard); | 1034 | | } | 1035 | | } | 1036 | | | 1037 | | // If any pointer in the tower is marked, that means our node is in the process of | 1038 | | // removal or already removed. It is possible that another thread (either partially or | 1039 | | // completely) removed the new node while we were building the tower, and just after | 1040 | | // that we installed the new node at one of the higher levels. In order to undo that | 1041 | | // installation, we must repeat the search, which will unlink the new node at that | 1042 | | // level. | 1043 | | // TODO(Amanieu): can we use relaxed ordering here? | 1044 | 7 | if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { | 1045 | 0 | self.search_bound(Bound::Included(&n.key), false, guard); | 1046 | 7 | } | 1047 | | | 1048 | | // Finally, return the new entry. | 1049 | 7 | entry | 1050 | | } | 1051 | 7 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::insert_internal Line | Count | Source | 834 | 122 | fn insert_internal( | 835 | 122 | &self, | 836 | 122 | key: K, | 837 | 122 | value: V, | 838 | 122 | replace: bool, | 839 | 122 | guard: &Guard, | 840 | 122 | ) -> RefEntry<'_, K, V> { | 841 | 122 | self.check_guard(guard); | 842 | 122 | | 843 | 122 | unsafe { | 844 | 122 | // Rebind the guard to the lifetime of self. This is a bit of a | 845 | 122 | // hack but it allows us to return references that are not bound to | 846 | 122 | // the lifetime of the guard. | 847 | 122 | let guard = &*(guard as *const _); | 848 | | | 849 | | let mut search; | 850 | 126 | loop { | 851 | 126 | // First try searching for the key. | 852 | 126 | // Note that the `Ord` implementation for `K` may panic during the search. | 853 | 126 | search = self.search_position(&key, guard); | 854 | | | 855 | 126 | let r9 = match search.found { | 856 | 9 | Some(r) => r, | 857 | 117 | None => break, | 858 | | }; | 859 | | | 860 | 9 | if replace { | 861 | | // If a node with the key was found and we should replace it, mark its tower | 862 | | // and then repeat the search. | 863 | 8 | if r.mark_tower() { | 864 | 4 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 865 | 4 | }0 | 866 | | } else { | 867 | | // If a node with the key was found and we're not going to replace it, let's | 868 | | // try returning it as an entry. | 869 | 1 | if let Some(e) = RefEntry::try_acquire(self, r) { | 870 | 1 | return e; | 871 | 0 | } | 872 | 0 |
| 873 | 0 | // If we couldn't increment the reference count, that means someone has just | 874 | 0 | // now removed the node. | 875 | 0 | break; | 876 | | } | 877 | | } | 878 | | | 879 | | // Create a new node. | 880 | 117 | let height = self.random_height(); | 881 | 117 | let (node, n) = { | 882 | 117 | // The reference count is initially two to account for: | 883 | 117 | // 1. The entry that will be returned. | 884 | 117 | // 2. The link at the level 0 of the tower. | 885 | 117 | let n = Node::<K, V>::alloc(height, 2); | 886 | 117 | | 887 | 117 | // Write the key and the value into the node. | 888 | 117 | ptr::write(&mut (*n).key, key); | 889 | 117 | ptr::write(&mut (*n).value, value); | 890 | 117 | | 891 | 117 | (Shared::<Node<K, V>>::from(n as *const _), &*n) | 892 | 117 | }; | 893 | 117 | | 894 | 117 | // Optimistically increment `len`. | 895 | 117 | self.hot_data.len.fetch_add(1, Ordering::Relaxed); | 896 | | | 897 | 117 | loop { | 898 | 117 | // Set the lowest successor of `n` to `search.right[0]`. | 899 | 117 | n.tower[0].store(search.right[0], Ordering::Relaxed); | 900 | 117 | | 901 | 117 | // Try installing the new node into the skip list (at level 0). | 902 | 117 | // TODO(Amanieu): can we use release ordering here? | 903 | 117 | if search.left[0][0] | 904 | 117 | .compare_exchange( | 905 | 117 | search.right[0], | 906 | 117 | node, | 907 | 117 | Ordering::SeqCst, | 908 | 117 | Ordering::SeqCst, | 909 | 117 | guard, | 910 | 117 | ) | 911 | 117 | .is_ok() | 912 | | { | 913 | 122 | break; | 914 | 18.4E | } | 915 | 18.4E | | 916 | 18.4E | // We failed. Let's search for the key and try again. | 917 | 18.4E | { | 918 | 18.4E | // Create a guard that destroys the new node in case search panics. | 919 | 18.4E | let sg = scopeguard::guard((), |_| { | 920 | | Node::finalize(node.as_raw()); | 921 | 18.4E | }); | 922 | 18.4E | search = self.search_position(&n.key, guard); | 923 | 18.4E | mem::forget(sg); | 924 | 18.4E | } | 925 | | | 926 | 18.4E | if let Some(r) = search.found { | 927 | 18.4E | if replace { | 928 | | // If a node with the key was found and we should replace it, mark its | 929 | | // tower and then repeat the search. | 930 | 0 | if r.mark_tower() { | 931 | 0 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 932 | 0 | } | 933 | | } else { | 934 | | // If a node with the key was found and we're not going to replace it, | 935 | | // let's try returning it as an entry. | 936 | 18.4E | if let Some(e) = RefEntry::try_acquire(self, r) { | 937 | | // Destroy the new node. | 938 | 18.4E | Node::finalize(node.as_raw()); | 939 | 18.4E | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 940 | 18.4E | | 941 | 18.4E | return e; | 942 | 0 | } | 943 | | | 944 | | // If we couldn't increment the reference count, that means someone has | 945 | | // just now removed the node. | 946 | | } | 947 | 0 | } | 948 | | } | 949 | | | 950 | | // The new node was successfully installed. Let's create an entry associated with it. | 951 | 122 | let entry = RefEntry { | 952 | 122 | parent: self, | 953 | 122 | node: n, | 954 | 122 | }; | 955 | | | 956 | | // Build the rest of the tower above level 0. | 957 | 122 | 'build: for level25 in 1..height { | 958 | | loop { | 959 | | // Obtain the predecessor and successor at the current level. | 960 | 25 | let pred = search.left[level]; | 961 | 25 | let succ = search.right[level]; | 962 | 25 | | 963 | 25 | // Load the current value of the pointer in the tower at this level. | 964 | 25 | // TODO(Amanieu): can we use relaxed ordering here? | 965 | 25 | let next = n.tower[level].load(Ordering::SeqCst, guard); | 966 | 25 | | 967 | 25 | // If the current pointer is marked, that means another thread is already | 968 | 25 | // removing the node we've just inserted. In that case, let's just stop | 969 | 25 | // building the tower. | 970 | 25 | if next.tag() == 1 { | 971 | 0 | break 'build; | 972 | 25 | } | 973 | 25 | | 974 | 25 | // When searching for `key` and traversing the skip list from the highest level | 975 | 25 | // to the lowest, it is possible to observe a node with an equal key at higher | 976 | 25 | // levels and then find it missing at the lower levels if it gets removed | 977 | 25 | // during traversal. Even worse, it is possible to observe completely different | 978 | 25 | // nodes with the exact same key at different levels. | 979 | 25 | // | 980 | 25 | // Linking the new node to a dead successor with an equal key could create | 981 | 25 | // subtle corner cases that would require special care. It's much easier to | 982 | 25 | // simply prohibit linking two nodes with equal keys. | 983 | 25 | // | 984 | 25 | // If the successor has the same key as the new node, that means it is marked | 985 | 25 | // as removed and should be unlinked from the skip list. In that case, let's | 986 | 25 | // repeat the search to make sure it gets unlinked and try again. | 987 | 25 | // | 988 | 25 | // If this comparison or the following search panics, we simply stop building | 989 | 25 | // the tower without breaking any invariants. Note that building higher levels | 990 | 25 | // is completely optional. Only the lowest level really matters, and all the | 991 | 25 | // higher levels are there just to make searching faster. | 992 | 25 | if succ.as_ref().map(|s| &s.key) == Some(&n.key) { | 993 | 0 | search = self.search_position(&n.key, guard); | 994 | | continue; | 995 | 25 | } | 996 | 25 | | 997 | 25 | // Change the pointer at the current level from `next` to `succ`. If this CAS | 998 | 25 | // operation fails, that means another thread has marked the pointer and we | 999 | 25 | // should stop building the tower. | 1000 | 25 | // TODO(Amanieu): can we use release ordering here? | 1001 | 25 | if n.tower[level] | 1002 | 25 | .compare_exchange(next, succ, Ordering::SeqCst, Ordering::SeqCst, guard) | 1003 | 25 | .is_err() | 1004 | | { | 1005 | 0 | break 'build; | 1006 | 25 | } | 1007 | 25 | | 1008 | 25 | // Increment the reference count. The current value will always be at least 1 | 1009 | 25 | // because we are holding `entry`. | 1010 | 25 | n.refs_and_height | 1011 | 25 | .fetch_add(1 << HEIGHT_BITS, Ordering::Relaxed); | 1012 | 25 | | 1013 | 25 | // Try installing the new node at the current level. | 1014 | 25 | // TODO(Amanieu): can we use release ordering here? | 1015 | 25 | if pred[level] | 1016 | 25 | .compare_exchange(succ, node, Ordering::SeqCst, Ordering::SeqCst, guard) | 1017 | 25 | .is_ok() | 1018 | | { | 1019 | | // Success! Continue on the next level. | 1020 | 25 | break; | 1021 | 0 | } | 1022 | 0 |
| 1023 | 0 | // Installation failed. Decrement the reference count. | 1024 | 0 | (*n).refs_and_height | 1025 | 0 | .fetch_sub(1 << HEIGHT_BITS, Ordering::Relaxed); | 1026 | 0 |
| 1027 | 0 | // We don't have the most up-to-date search results. Repeat the search. | 1028 | 0 | // | 1029 | 0 | // If this search panics, we simply stop building the tower without breaking | 1030 | 0 | // any invariants. Note that building higher levels is completely optional. | 1031 | 0 | // Only the lowest level really matters, and all the higher levels are there | 1032 | 0 | // just to make searching faster. | 1033 | 0 | search = self.search_position(&n.key, guard); | 1034 | | } | 1035 | | } | 1036 | | | 1037 | | // If any pointer in the tower is marked, that means our node is in the process of | 1038 | | // removal or already removed. It is possible that another thread (either partially or | 1039 | | // completely) removed the new node while we were building the tower, and just after | 1040 | | // that we installed the new node at one of the higher levels. In order to undo that | 1041 | | // installation, we must repeat the search, which will unlink the new node at that | 1042 | | // level. | 1043 | | // TODO(Amanieu): can we use relaxed ordering here? | 1044 | 123 | if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { | 1045 | 0 | self.search_bound(Bound::Included(&n.key), false, guard); | 1046 | 123 | } | 1047 | | | 1048 | | // Finally, return the new entry. | 1049 | 123 | entry | 1050 | | } | 1051 | 119 | } |
|
1052 | | } |
1053 | | |
1054 | | impl<K, V> SkipList<K, V> |
1055 | | where |
1056 | | K: Ord + Send + 'static, |
1057 | | V: Send + 'static, |
1058 | | { |
1059 | | /// Inserts a `key`-`value` pair into the skip list and returns the new entry. |
1060 | | /// |
1061 | | /// If there is an existing entry with this key, it will be removed before inserting the new |
1062 | | /// one. |
1063 | 151 | pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { |
1064 | 151 | self.insert_internal(key, value, true, guard) |
1065 | 151 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::insert Line | Count | Source | 1063 | 20 | pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { | 1064 | 20 | self.insert_internal(key, value, true, guard) | 1065 | 20 | } |
<crossbeam_skiplist::base::SkipList<i32, ()>>::insert Line | Count | Source | 1063 | 3 | pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { | 1064 | 3 | self.insert_internal(key, value, true, guard) | 1065 | 3 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::insert Line | Count | Source | 1063 | 7 | pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { | 1064 | 7 | self.insert_internal(key, value, true, guard) | 1065 | 7 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::insert Line | Count | Source | 1063 | 121 | pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { | 1064 | 121 | self.insert_internal(key, value, true, guard) | 1065 | 121 | } |
|
1066 | | |
1067 | | /// Removes an entry with the specified `key` from the map and returns it. |
1068 | 42 | pub fn remove<Q>(&self, key: &Q, guard: &Guard) -> Option<RefEntry<'_, K, V>> |
1069 | 42 | where |
1070 | 42 | K: Borrow<Q>, |
1071 | 42 | Q: Ord + ?Sized, |
1072 | 42 | { |
1073 | 42 | self.check_guard(guard); |
1074 | 42 | |
1075 | 42 | unsafe { |
1076 | 42 | // Rebind the guard to the lifetime of self. This is a bit of a |
1077 | 42 | // hack but it allows us to return references that are not bound to |
1078 | 42 | // the lifetime of the guard. |
1079 | 42 | let guard = &*(guard as *const _); |
1080 | | |
1081 | 42 | loop { |
1082 | 42 | // Try searching for the key. |
1083 | 42 | let search = self.search_position(key, guard); |
1084 | | |
1085 | 42 | let n30 = search.found?12 ; |
1086 | | |
1087 | | // First try incrementing the reference count because we have to return the node as |
1088 | | // an entry. If this fails, repeat the search. |
1089 | 30 | let entry = match RefEntry::try_acquire(self, n) { |
1090 | 30 | Some(e) => e, |
1091 | 30 | None => continue, |
1092 | 30 | }; |
1093 | 30 | |
1094 | 30 | // Try removing the node by marking its tower. |
1095 | 30 | if n.mark_tower() { |
1096 | | // Success! Decrement `len`. |
1097 | 30 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
1098 | | |
1099 | | // Unlink the node at each level of the skip list. We could do this by simply |
1100 | | // repeating the search, but it's usually faster to unlink it manually using |
1101 | | // the `left` and `right` lists. |
1102 | 38 | for level in (0..n.height())30 .rev() { |
1103 | | // TODO(Amanieu): can we use relaxed ordering here? |
1104 | 38 | let succ = n.tower[level].load(Ordering::SeqCst, guard).with_tag(0); |
1105 | 38 | |
1106 | 38 | // Try linking the predecessor and successor at this level. |
1107 | 38 | // TODO(Amanieu): can we use release ordering here? |
1108 | 38 | if search.left[level][level] |
1109 | 38 | .compare_exchange( |
1110 | 38 | Shared::from(n as *const _), |
1111 | 38 | succ, |
1112 | 38 | Ordering::SeqCst, |
1113 | 38 | Ordering::SeqCst, |
1114 | 38 | guard, |
1115 | 38 | ) |
1116 | 38 | .is_ok() |
1117 | 38 | { |
1118 | 38 | // Success! Decrement the reference count. |
1119 | 38 | n.decrement(guard); |
1120 | 38 | } else { |
1121 | | // Failed! Just repeat the search to completely unlink the node. |
1122 | 0 | self.search_bound(Bound::Included(key), false, guard); |
1123 | 0 | break; |
1124 | | } |
1125 | | } |
1126 | | |
1127 | 30 | return Some(entry); |
1128 | 0 | } |
1129 | | } |
1130 | | } |
1131 | 42 | } <crossbeam_skiplist::base::SkipList<i32, i32>>::remove::<i32> Line | Count | Source | 1068 | 4 | pub fn remove<Q>(&self, key: &Q, guard: &Guard) -> Option<RefEntry<'_, K, V>> | 1069 | 4 | where | 1070 | 4 | K: Borrow<Q>, | 1071 | 4 | Q: Ord + ?Sized, | 1072 | 4 | { | 1073 | 4 | self.check_guard(guard); | 1074 | 4 | | 1075 | 4 | unsafe { | 1076 | 4 | // Rebind the guard to the lifetime of self. This is a bit of a | 1077 | 4 | // hack but it allows us to return references that are not bound to | 1078 | 4 | // the lifetime of the guard. | 1079 | 4 | let guard = &*(guard as *const _); | 1080 | | | 1081 | 4 | loop { | 1082 | 4 | // Try searching for the key. | 1083 | 4 | let search = self.search_position(key, guard); | 1084 | | | 1085 | 4 | let n = search.found?0 ; | 1086 | | | 1087 | | // First try incrementing the reference count because we have to return the node as | 1088 | | // an entry. If this fails, repeat the search. | 1089 | 4 | let entry = match RefEntry::try_acquire(self, n) { | 1090 | 4 | Some(e) => e, | 1091 | 4 | None => continue, | 1092 | 4 | }; | 1093 | 4 | | 1094 | 4 | // Try removing the node by marking its tower. | 1095 | 4 | if n.mark_tower() { | 1096 | | // Success! Decrement `len`. | 1097 | 4 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 1098 | | | 1099 | | // Unlink the node at each level of the skip list. We could do this by simply | 1100 | | // repeating the search, but it's usually faster to unlink it manually using | 1101 | | // the `left` and `right` lists. | 1102 | 5 | for level in (0..n.height())4 .rev() { | 1103 | | // TODO(Amanieu): can we use relaxed ordering here? | 1104 | 5 | let succ = n.tower[level].load(Ordering::SeqCst, guard).with_tag(0); | 1105 | 5 | | 1106 | 5 | // Try linking the predecessor and successor at this level. | 1107 | 5 | // TODO(Amanieu): can we use release ordering here? | 1108 | 5 | if search.left[level][level] | 1109 | 5 | .compare_exchange( | 1110 | 5 | Shared::from(n as *const _), | 1111 | 5 | succ, | 1112 | 5 | Ordering::SeqCst, | 1113 | 5 | Ordering::SeqCst, | 1114 | 5 | guard, | 1115 | 5 | ) | 1116 | 5 | .is_ok() | 1117 | 5 | { | 1118 | 5 | // Success! Decrement the reference count. | 1119 | 5 | n.decrement(guard); | 1120 | 5 | } else { | 1121 | | // Failed! Just repeat the search to completely unlink the node. | 1122 | 0 | self.search_bound(Bound::Included(key), false, guard); | 1123 | 0 | break; | 1124 | | } | 1125 | | } | 1126 | | | 1127 | 4 | return Some(entry); | 1128 | 0 | } | 1129 | | } | 1130 | | } | 1131 | 4 | } |
<crossbeam_skiplist::base::SkipList<i32, i32>>::remove::<i32> Line | Count | Source | 1068 | 37 | pub fn remove<Q>(&self, key: &Q, guard: &Guard) -> Option<RefEntry<'_, K, V>> | 1069 | 37 | where | 1070 | 37 | K: Borrow<Q>, | 1071 | 37 | Q: Ord + ?Sized, | 1072 | 37 | { | 1073 | 37 | self.check_guard(guard); | 1074 | 37 | | 1075 | 37 | unsafe { | 1076 | 37 | // Rebind the guard to the lifetime of self. This is a bit of a | 1077 | 37 | // hack but it allows us to return references that are not bound to | 1078 | 37 | // the lifetime of the guard. | 1079 | 37 | let guard = &*(guard as *const _); | 1080 | | | 1081 | 37 | loop { | 1082 | 37 | // Try searching for the key. | 1083 | 37 | let search = self.search_position(key, guard); | 1084 | | | 1085 | 37 | let n25 = search.found?12 ; | 1086 | | | 1087 | | // First try incrementing the reference count because we have to return the node as | 1088 | | // an entry. If this fails, repeat the search. | 1089 | 25 | let entry = match RefEntry::try_acquire(self, n) { | 1090 | 25 | Some(e) => e, | 1091 | 25 | None => continue, | 1092 | 25 | }; | 1093 | 25 | | 1094 | 25 | // Try removing the node by marking its tower. | 1095 | 25 | if n.mark_tower() { | 1096 | | // Success! Decrement `len`. | 1097 | 25 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 1098 | | | 1099 | | // Unlink the node at each level of the skip list. We could do this by simply | 1100 | | // repeating the search, but it's usually faster to unlink it manually using | 1101 | | // the `left` and `right` lists. | 1102 | 32 | for level in (0..n.height())25 .rev() { | 1103 | | // TODO(Amanieu): can we use relaxed ordering here? | 1104 | 32 | let succ = n.tower[level].load(Ordering::SeqCst, guard).with_tag(0); | 1105 | 32 | | 1106 | 32 | // Try linking the predecessor and successor at this level. | 1107 | 32 | // TODO(Amanieu): can we use release ordering here? | 1108 | 32 | if search.left[level][level] | 1109 | 32 | .compare_exchange( | 1110 | 32 | Shared::from(n as *const _), | 1111 | 32 | succ, | 1112 | 32 | Ordering::SeqCst, | 1113 | 32 | Ordering::SeqCst, | 1114 | 32 | guard, | 1115 | 32 | ) | 1116 | 32 | .is_ok() | 1117 | 32 | { | 1118 | 32 | // Success! Decrement the reference count. | 1119 | 32 | n.decrement(guard); | 1120 | 32 | } else { | 1121 | | // Failed! Just repeat the search to completely unlink the node. | 1122 | 0 | self.search_bound(Bound::Included(key), false, guard); | 1123 | 0 | break; | 1124 | | } | 1125 | | } | 1126 | | | 1127 | 25 | return Some(entry); | 1128 | 0 | } | 1129 | | } | 1130 | | } | 1131 | 37 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value>>::remove::<base::drops::Key> Line | Count | Source | 1068 | 1 | pub fn remove<Q>(&self, key: &Q, guard: &Guard) -> Option<RefEntry<'_, K, V>> | 1069 | 1 | where | 1070 | 1 | K: Borrow<Q>, | 1071 | 1 | Q: Ord + ?Sized, | 1072 | 1 | { | 1073 | 1 | self.check_guard(guard); | 1074 | 1 | | 1075 | 1 | unsafe { | 1076 | 1 | // Rebind the guard to the lifetime of self. This is a bit of a | 1077 | 1 | // hack but it allows us to return references that are not bound to | 1078 | 1 | // the lifetime of the guard. | 1079 | 1 | let guard = &*(guard as *const _); | 1080 | | | 1081 | 1 | loop { | 1082 | 1 | // Try searching for the key. | 1083 | 1 | let search = self.search_position(key, guard); | 1084 | | | 1085 | 1 | let n = search.found?0 ; | 1086 | | | 1087 | | // First try incrementing the reference count because we have to return the node as | 1088 | | // an entry. If this fails, repeat the search. | 1089 | 1 | let entry = match RefEntry::try_acquire(self, n) { | 1090 | 1 | Some(e) => e, | 1091 | 1 | None => continue, | 1092 | 1 | }; | 1093 | 1 | | 1094 | 1 | // Try removing the node by marking its tower. | 1095 | 1 | if n.mark_tower() { | 1096 | | // Success! Decrement `len`. | 1097 | 1 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); | 1098 | | | 1099 | | // Unlink the node at each level of the skip list. We could do this by simply | 1100 | | // repeating the search, but it's usually faster to unlink it manually using | 1101 | | // the `left` and `right` lists. | 1102 | 1 | for level in (0..n.height()).rev() { | 1103 | | // TODO(Amanieu): can we use relaxed ordering here? | 1104 | 1 | let succ = n.tower[level].load(Ordering::SeqCst, guard).with_tag(0); | 1105 | 1 | | 1106 | 1 | // Try linking the predecessor and successor at this level. | 1107 | 1 | // TODO(Amanieu): can we use release ordering here? | 1108 | 1 | if search.left[level][level] | 1109 | 1 | .compare_exchange( | 1110 | 1 | Shared::from(n as *const _), | 1111 | 1 | succ, | 1112 | 1 | Ordering::SeqCst, | 1113 | 1 | Ordering::SeqCst, | 1114 | 1 | guard, | 1115 | 1 | ) | 1116 | 1 | .is_ok() | 1117 | 1 | { | 1118 | 1 | // Success! Decrement the reference count. | 1119 | 1 | n.decrement(guard); | 1120 | 1 | } else { | 1121 | | // Failed! Just repeat the search to completely unlink the node. | 1122 | 0 | self.search_bound(Bound::Included(key), false, guard); | 1123 | 0 | break; | 1124 | | } | 1125 | | } | 1126 | | | 1127 | 1 | return Some(entry); | 1128 | 0 | } | 1129 | | } | 1130 | | } | 1131 | 1 | } |
|
1132 | | |
1133 | | /// Removes an entry from the front of the skip list. |
1134 | | pub fn pop_front(&self, guard: &Guard) -> Option<RefEntry<'_, K, V>> { |
1135 | | self.check_guard(guard); |
1136 | | loop { |
1137 | | let e = self.front(guard)?; |
1138 | | if let Some(e) = e.pin() { |
1139 | | if e.remove(guard) { |
1140 | | return Some(e); |
1141 | | } |
1142 | | } |
1143 | | } |
1144 | | } |
1145 | | |
1146 | | /// Removes an entry from the back of the skip list. |
1147 | | pub fn pop_back(&self, guard: &Guard) -> Option<RefEntry<'_, K, V>> { |
1148 | | self.check_guard(guard); |
1149 | | loop { |
1150 | | let e = self.back(guard)?; |
1151 | | if let Some(e) = e.pin() { |
1152 | | if e.remove(guard) { |
1153 | | return Some(e); |
1154 | | } |
1155 | | } |
1156 | | } |
1157 | | } |
1158 | | |
1159 | | /// Iterates over the map and removes every entry. |
1160 | 1 | pub fn clear(&self, guard: &mut Guard) { |
1161 | 1 | self.check_guard(guard); |
1162 | | |
1163 | | /// Number of steps after which we repin the current thread and unlink removed nodes. |
1164 | | const BATCH_SIZE: usize = 100; |
1165 | | |
1166 | | loop { |
1167 | | { |
1168 | | // Search for the first entry in order to unlink all the preceding entries |
1169 | | // we have removed. |
1170 | | // |
1171 | | // By unlinking nodes in batches we make sure that the final search doesn't |
1172 | | // unlink all nodes at once, which could keep the current thread pinned for a |
1173 | | // long time. |
1174 | 1 | let mut entry = self.lower_bound(Bound::Unbounded, guard); |
1175 | | |
1176 | 8 | for _ in 0..BATCH_SIZE { |
1177 | | // Stop if we have reached the end of the list. |
1178 | 8 | let e7 = match entry { |
1179 | 8 | None => return1 , |
1180 | 7 | Some(e) => e, |
1181 | 7 | }; |
1182 | 7 | |
1183 | 7 | // Before removing the current entry, first obtain the following one. |
1184 | 7 | let next = e.next(); |
1185 | 7 | |
1186 | 7 | // Try removing the current entry. |
1187 | 7 | if e.node.mark_tower() { |
1188 | 7 | // Success! Decrement `len`. |
1189 | 7 | self.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
1190 | 7 | }0 |
1191 | | |
1192 | 7 | entry = next; |
1193 | | } |
1194 | | } |
1195 | | |
1196 | | // Repin the current thread because we don't want to keep it pinned in the same |
1197 | | // epoch for a too long time. |
1198 | 0 | guard.repin(); |
1199 | | } |
1200 | 1 | } |
1201 | | } |
1202 | | |
1203 | | impl<K, V> Drop for SkipList<K, V> { |
1204 | 25 | fn drop(&mut self) { |
1205 | 25 | unsafe { |
1206 | 25 | let mut node = self.head[0] |
1207 | 25 | .load(Ordering::Relaxed, epoch::unprotected()) |
1208 | 25 | .as_ref(); |
1209 | | |
1210 | | // Iterate through the whole skip list and destroy every node. |
1211 | 130 | while let Some(n105 ) = node { |
1212 | 105 | // Unprotected loads are okay because this function is the only one currently using |
1213 | 105 | // the skip list. |
1214 | 105 | let next = n.tower[0] |
1215 | 105 | .load(Ordering::Relaxed, epoch::unprotected()) |
1216 | 105 | .as_ref(); |
1217 | 105 | |
1218 | 105 | // Deallocate every node. |
1219 | 105 | Node::finalize(n); |
1220 | 105 | |
1221 | 105 | node = next; |
1222 | 105 | } |
1223 | | } |
1224 | 25 | } <crossbeam_skiplist::base::SkipList<i32, i32> as core::ops::drop::Drop>::drop Line | Count | Source | 1204 | 3 | fn drop(&mut self) { | 1205 | 3 | unsafe { | 1206 | 3 | let mut node = self.head[0] | 1207 | 3 | .load(Ordering::Relaxed, epoch::unprotected()) | 1208 | 3 | .as_ref(); | 1209 | | | 1210 | | // Iterate through the whole skip list and destroy every node. | 1211 | 19 | while let Some(n16 ) = node { | 1212 | 16 | // Unprotected loads are okay because this function is the only one currently using | 1213 | 16 | // the skip list. | 1214 | 16 | let next = n.tower[0] | 1215 | 16 | .load(Ordering::Relaxed, epoch::unprotected()) | 1216 | 16 | .as_ref(); | 1217 | 16 | | 1218 | 16 | // Deallocate every node. | 1219 | 16 | Node::finalize(n); | 1220 | 16 | | 1221 | 16 | node = next; | 1222 | 16 | } | 1223 | | } | 1224 | 3 | } |
<crossbeam_skiplist::base::SkipList<i32, ()> as core::ops::drop::Drop>::drop Line | Count | Source | 1204 | 1 | fn drop(&mut self) { | 1205 | 1 | unsafe { | 1206 | 1 | let mut node = self.head[0] | 1207 | 1 | .load(Ordering::Relaxed, epoch::unprotected()) | 1208 | 1 | .as_ref(); | 1209 | | | 1210 | | // Iterate through the whole skip list and destroy every node. | 1211 | 4 | while let Some(n3 ) = node { | 1212 | 3 | // Unprotected loads are okay because this function is the only one currently using | 1213 | 3 | // the skip list. | 1214 | 3 | let next = n.tower[0] | 1215 | 3 | .load(Ordering::Relaxed, epoch::unprotected()) | 1216 | 3 | .as_ref(); | 1217 | 3 | | 1218 | 3 | // Deallocate every node. | 1219 | 3 | Node::finalize(n); | 1220 | 3 | | 1221 | 3 | node = next; | 1222 | 3 | } | 1223 | | } | 1224 | 1 | } |
<crossbeam_skiplist::base::SkipList<i32, i32> as core::ops::drop::Drop>::drop Line | Count | Source | 1204 | 19 | fn drop(&mut self) { | 1205 | 19 | unsafe { | 1206 | 19 | let mut node = self.head[0] | 1207 | 19 | .load(Ordering::Relaxed, epoch::unprotected()) | 1208 | 19 | .as_ref(); | 1209 | | | 1210 | | // Iterate through the whole skip list and destroy every node. | 1211 | 99 | while let Some(n80 ) = node { | 1212 | 80 | // Unprotected loads are okay because this function is the only one currently using | 1213 | 80 | // the skip list. | 1214 | 80 | let next = n.tower[0] | 1215 | 80 | .load(Ordering::Relaxed, epoch::unprotected()) | 1216 | 80 | .as_ref(); | 1217 | 80 | | 1218 | 80 | // Deallocate every node. | 1219 | 80 | Node::finalize(n); | 1220 | 80 | | 1221 | 80 | node = next; | 1222 | 80 | } | 1223 | | } | 1224 | 19 | } |
<crossbeam_skiplist::base::SkipList<alloc::string::String, alloc::boxed::Box<i32>> as core::ops::drop::Drop>::drop Line | Count | Source | 1204 | 1 | fn drop(&mut self) { | 1205 | 1 | unsafe { | 1206 | 1 | let mut node = self.head[0] | 1207 | 1 | .load(Ordering::Relaxed, epoch::unprotected()) | 1208 | 1 | .as_ref(); | 1209 | | | 1210 | | // Iterate through the whole skip list and destroy every node. | 1211 | 1 | while let Some(n0 ) = node { | 1212 | 0 | // Unprotected loads are okay because this function is the only one currently using | 1213 | 0 | // the skip list. | 1214 | 0 | let next = n.tower[0] | 1215 | 0 | .load(Ordering::Relaxed, epoch::unprotected()) | 1216 | 0 | .as_ref(); | 1217 | 0 |
| 1218 | 0 | // Deallocate every node. | 1219 | 0 | Node::finalize(n); | 1220 | 0 |
| 1221 | 0 | node = next; | 1222 | 0 | } | 1223 | | } | 1224 | 1 | } |
<crossbeam_skiplist::base::SkipList<base::drops::Key, base::drops::Value> as core::ops::drop::Drop>::drop Line | Count | Source | 1204 | 1 | fn drop(&mut self) { | 1205 | 1 | unsafe { | 1206 | 1 | let mut node = self.head[0] | 1207 | 1 | .load(Ordering::Relaxed, epoch::unprotected()) | 1208 | 1 | .as_ref(); | 1209 | | | 1210 | | // Iterate through the whole skip list and destroy every node. | 1211 | 7 | while let Some(n6 ) = node { | 1212 | 6 | // Unprotected loads are okay because this function is the only one currently using | 1213 | 6 | // the skip list. | 1214 | 6 | let next = n.tower[0] | 1215 | 6 | .load(Ordering::Relaxed, epoch::unprotected()) | 1216 | 6 | .as_ref(); | 1217 | 6 | | 1218 | 6 | // Deallocate every node. | 1219 | 6 | Node::finalize(n); | 1220 | 6 | | 1221 | 6 | node = next; | 1222 | 6 | } | 1223 | | } | 1224 | 1 | } |
|
1225 | | } |
1226 | | |
1227 | | impl<K, V> fmt::Debug for SkipList<K, V> |
1228 | | where |
1229 | | K: Ord + fmt::Debug, |
1230 | | V: fmt::Debug, |
1231 | | { |
1232 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1233 | | f.pad("SkipList { .. }") |
1234 | | } |
1235 | | } |
1236 | | |
1237 | | impl<K, V> IntoIterator for SkipList<K, V> { |
1238 | | type Item = (K, V); |
1239 | | type IntoIter = IntoIter<K, V>; |
1240 | | |
1241 | 1 | fn into_iter(self) -> IntoIter<K, V> { |
1242 | 1 | unsafe { |
1243 | 1 | // Load the front node. |
1244 | 1 | // |
1245 | 1 | // Unprotected loads are okay because this function is the only one currently using |
1246 | 1 | // the skip list. |
1247 | 1 | let front = self.head[0] |
1248 | 1 | .load(Ordering::Relaxed, epoch::unprotected()) |
1249 | 1 | .as_raw(); |
1250 | | |
1251 | | // Clear the skip list by setting all pointers in head to null. |
1252 | 32 | for level in 0..MAX_HEIGHT { |
1253 | 32 | self.head[level].store(Shared::null(), Ordering::Relaxed); |
1254 | 32 | } |
1255 | | |
1256 | 1 | IntoIter { |
1257 | 1 | node: front as *mut Node<K, V>, |
1258 | 1 | } |
1259 | 1 | } |
1260 | 1 | } |
1261 | | } |
1262 | | |
1263 | | /// An entry in a skip list, protected by a `Guard`. |
1264 | | /// |
1265 | | /// The lifetimes of the key and value are the same as that of the `Guard` |
1266 | | /// used when creating the `Entry` (`'g`). This lifetime is also constrained to |
1267 | | /// not outlive the `SkipList`. |
1268 | | pub struct Entry<'a: 'g, 'g, K, V> { |
1269 | | parent: &'a SkipList<K, V>, |
1270 | | node: &'g Node<K, V>, |
1271 | | guard: &'g Guard, |
1272 | | } |
1273 | | |
1274 | | impl<'a: 'g, 'g, K: 'a, V: 'a> Entry<'a, 'g, K, V> { |
1275 | | /// Returns `true` if the entry is removed from the skip list. |
1276 | 4 | pub fn is_removed(&self) -> bool { |
1277 | 4 | self.node.is_removed() |
1278 | 4 | } |
1279 | | |
1280 | | /// Returns a reference to the key. |
1281 | 55 | pub fn key(&self) -> &'g K { |
1282 | 55 | &self.node.key |
1283 | 55 | } |
1284 | | |
1285 | | /// Returns a reference to the value. |
1286 | 188 | pub fn value(&self) -> &'g V { |
1287 | 188 | &self.node.value |
1288 | 188 | } |
1289 | | |
1290 | | /// Returns a reference to the parent `SkipList` |
1291 | | pub fn skiplist(&self) -> &'a SkipList<K, V> { |
1292 | | self.parent |
1293 | | } |
1294 | | |
1295 | | /// Attempts to pin the entry with a reference count, ensuring that it |
1296 | | /// remains accessible even after the `Guard` is dropped. |
1297 | | /// |
1298 | | /// This method may return `None` if the reference count is already 0 and |
1299 | | /// the node has been queued for deletion. |
1300 | 31 | pub fn pin(&self) -> Option<RefEntry<'a, K, V>> { |
1301 | 31 | unsafe { RefEntry::try_acquire(self.parent, self.node) } |
1302 | 31 | } |
1303 | | } |
1304 | | |
1305 | | impl<'a: 'g, 'g, K, V> Entry<'a, 'g, K, V> |
1306 | | where |
1307 | | K: Ord + Send + 'static, |
1308 | | V: Send + 'static, |
1309 | | { |
1310 | | /// Removes the entry from the skip list. |
1311 | | /// |
1312 | | /// Returns `true` if this call removed the entry and `false` if it was already removed. |
1313 | 8 | pub fn remove(&self) -> bool { |
1314 | 8 | // Try marking the tower. |
1315 | 8 | if self.node.mark_tower() { |
1316 | | // Success - the entry is removed. Now decrement `len`. |
1317 | 8 | self.parent.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
1318 | 8 | |
1319 | 8 | // Search for the key to unlink the node from the skip list. |
1320 | 8 | self.parent |
1321 | 8 | .search_bound(Bound::Included(&self.node.key), false, self.guard); |
1322 | 8 | |
1323 | 8 | true |
1324 | | } else { |
1325 | 0 | false |
1326 | | } |
1327 | 8 | } |
1328 | | } |
1329 | | |
1330 | | impl<'a: 'g, 'g, K, V> Clone for Entry<'a, 'g, K, V> { |
1331 | | fn clone(&self) -> Entry<'a, 'g, K, V> { |
1332 | | Entry { |
1333 | | parent: self.parent, |
1334 | | node: self.node, |
1335 | | guard: self.guard, |
1336 | | } |
1337 | | } |
1338 | | } |
1339 | | |
1340 | | impl<K, V> fmt::Debug for Entry<'_, '_, K, V> |
1341 | | where |
1342 | | K: fmt::Debug, |
1343 | | V: fmt::Debug, |
1344 | | { |
1345 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1346 | | f.debug_tuple("Entry") |
1347 | | .field(self.key()) |
1348 | | .field(self.value()) |
1349 | | .finish() |
1350 | | } |
1351 | | } |
1352 | | |
1353 | | impl<'a: 'g, 'g, K, V> Entry<'a, 'g, K, V> |
1354 | | where |
1355 | | K: Ord, |
1356 | | { |
1357 | | /// Moves to the next entry in the skip list. |
1358 | 13 | pub fn move_next(&mut self) -> bool { |
1359 | 13 | match self.next() { |
1360 | 13 | None => false2 , |
1361 | 11 | Some(n) => { |
1362 | 11 | *self = n; |
1363 | 11 | true |
1364 | | } |
1365 | | } |
1366 | 13 | } |
1367 | | |
1368 | | /// Returns the next entry in the skip list. |
1369 | 24 | pub fn next(&self) -> Option<Entry<'a, 'g, K, V>> { |
1370 | 24 | let n20 = self.parent.next_node( |
1371 | 24 | &self.node.tower, |
1372 | 24 | Bound::Excluded(&self.node.key), |
1373 | 24 | self.guard, |
1374 | 24 | )?4 ; |
1375 | 20 | Some(Entry { |
1376 | 20 | parent: self.parent, |
1377 | 20 | node: n, |
1378 | 20 | guard: self.guard, |
1379 | 20 | }) |
1380 | 24 | } |
1381 | | |
1382 | | /// Moves to the previous entry in the skip list. |
1383 | 6 | pub fn move_prev(&mut self) -> bool { |
1384 | 6 | match self.prev() { |
1385 | 6 | None => false1 , |
1386 | 5 | Some(n) => { |
1387 | 5 | *self = n; |
1388 | 5 | true |
1389 | | } |
1390 | | } |
1391 | 6 | } |
1392 | | |
1393 | | /// Returns the previous entry in the skip list. |
1394 | 10 | pub fn prev(&self) -> Option<Entry<'a, 'g, K, V>> { |
1395 | 10 | let n8 = self |
1396 | 10 | .parent |
1397 | 10 | .search_bound(Bound::Excluded(&self.node.key), true, self.guard)?2 ; |
1398 | 8 | Some(Entry { |
1399 | 8 | parent: self.parent, |
1400 | 8 | node: n, |
1401 | 8 | guard: self.guard, |
1402 | 8 | }) |
1403 | 10 | } |
1404 | | } |
1405 | | |
1406 | | /// A reference-counted entry in a skip list. |
1407 | | /// |
1408 | | /// You *must* call `release` to free this type, otherwise the node will be |
1409 | | /// leaked. This is because releasing the entry requires a `Guard`. |
1410 | | pub struct RefEntry<'a, K, V> { |
1411 | | parent: &'a SkipList<K, V>, |
1412 | | node: &'a Node<K, V>, |
1413 | | } |
1414 | | |
1415 | | impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { |
1416 | | /// Returns `true` if the entry is removed from the skip list. |
1417 | | pub fn is_removed(&self) -> bool { |
1418 | | self.node.is_removed() |
1419 | | } |
1420 | | |
1421 | | /// Returns a reference to the key. |
1422 | 141 | pub fn key(&self) -> &K { |
1423 | 141 | &self.node.key |
1424 | 141 | } |
1425 | | |
1426 | | /// Returns a reference to the value. |
1427 | 134 | pub fn value(&self) -> &V { |
1428 | 134 | &self.node.value |
1429 | 134 | } <crossbeam_skiplist::base::RefEntry<i32, i32>>::value Line | Count | Source | 1427 | 131 | pub fn value(&self) -> &V { | 1428 | 131 | &self.node.value | 1429 | 131 | } |
<crossbeam_skiplist::base::RefEntry<i32, i32>>::value Line | Count | Source | 1427 | 3 | pub fn value(&self) -> &V { | 1428 | 3 | &self.node.value | 1429 | 3 | } |
|
1430 | | |
1431 | | /// Returns a reference to the parent `SkipList` |
1432 | | pub fn skiplist(&self) -> &'a SkipList<K, V> { |
1433 | | self.parent |
1434 | | } |
1435 | | |
1436 | | /// Releases the reference on the entry. |
1437 | 8 | pub fn release(self, guard: &Guard) { |
1438 | 8 | self.parent.check_guard(guard); |
1439 | 8 | unsafe { self.node.decrement(guard) } |
1440 | 8 | } |
1441 | | |
1442 | | /// Releases the reference of the entry, pinning the thread only when |
1443 | | /// the reference count of the node becomes 0. |
1444 | 169 | pub fn release_with_pin<F>(self, pin: F) |
1445 | 169 | where |
1446 | 169 | F: FnOnce() -> Guard, |
1447 | 169 | { |
1448 | 169 | unsafe { self.node.decrement_with_pin(self.parent, pin) } |
1449 | 169 | } <crossbeam_skiplist::base::RefEntry<i32, i32>>::release_with_pin::<crossbeam_epoch::default::pin> Line | Count | Source | 1444 | 166 | pub fn release_with_pin<F>(self, pin: F) | 1445 | 166 | where | 1446 | 166 | F: FnOnce() -> Guard, | 1447 | 166 | { | 1448 | 166 | unsafe { self.node.decrement_with_pin(self.parent, pin) } | 1449 | 166 | } |
<crossbeam_skiplist::base::RefEntry<i32, ()>>::release_with_pin::<crossbeam_epoch::default::pin> Line | Count | Source | 1444 | 3 | pub fn release_with_pin<F>(self, pin: F) | 1445 | 3 | where | 1446 | 3 | F: FnOnce() -> Guard, | 1447 | 3 | { | 1448 | 3 | unsafe { self.node.decrement_with_pin(self.parent, pin) } | 1449 | 3 | } |
|
1450 | | |
1451 | | /// Tries to create a new `RefEntry` by incrementing the reference count of |
1452 | | /// a node. |
1453 | 192 | unsafe fn try_acquire( |
1454 | 192 | parent: &'a SkipList<K, V>, |
1455 | 192 | node: &Node<K, V>, |
1456 | 192 | ) -> Option<RefEntry<'a, K, V>> { |
1457 | 192 | if node.try_increment() { |
1458 | 192 | Some(RefEntry { |
1459 | 192 | parent, |
1460 | 192 | |
1461 | 192 | // We re-bind the lifetime of the node here to that of the skip |
1462 | 192 | // list since we now hold a reference to it. |
1463 | 192 | node: &*(node as *const _), |
1464 | 192 | }) |
1465 | | } else { |
1466 | 0 | None |
1467 | | } |
1468 | 192 | } <crossbeam_skiplist::base::RefEntry<i32, i32>>::try_acquire Line | Count | Source | 1453 | 165 | unsafe fn try_acquire( | 1454 | 165 | parent: &'a SkipList<K, V>, | 1455 | 165 | node: &Node<K, V>, | 1456 | 165 | ) -> Option<RefEntry<'a, K, V>> { | 1457 | 165 | if node.try_increment() { | 1458 | 165 | Some(RefEntry { | 1459 | 165 | parent, | 1460 | 165 | | 1461 | 165 | // We re-bind the lifetime of the node here to that of the skip | 1462 | 165 | // list since we now hold a reference to it. | 1463 | 165 | node: &*(node as *const _), | 1464 | 165 | }) | 1465 | | } else { | 1466 | 0 | None | 1467 | | } | 1468 | 165 | } |
Unexecuted instantiation: <crossbeam_skiplist::base::RefEntry<i32, ()>>::try_acquire <crossbeam_skiplist::base::RefEntry<base::drops::Key, base::drops::Value>>::try_acquire Line | Count | Source | 1453 | 1 | unsafe fn try_acquire( | 1454 | 1 | parent: &'a SkipList<K, V>, | 1455 | 1 | node: &Node<K, V>, | 1456 | 1 | ) -> Option<RefEntry<'a, K, V>> { | 1457 | 1 | if node.try_increment() { | 1458 | 1 | Some(RefEntry { | 1459 | 1 | parent, | 1460 | 1 | | 1461 | 1 | // We re-bind the lifetime of the node here to that of the skip | 1462 | 1 | // list since we now hold a reference to it. | 1463 | 1 | node: &*(node as *const _), | 1464 | 1 | }) | 1465 | | } else { | 1466 | 0 | None | 1467 | | } | 1468 | 1 | } |
<crossbeam_skiplist::base::RefEntry<i32, i32>>::try_acquire Line | Count | Source | 1453 | 26 | unsafe fn try_acquire( | 1454 | 26 | parent: &'a SkipList<K, V>, | 1455 | 26 | node: &Node<K, V>, | 1456 | 26 | ) -> Option<RefEntry<'a, K, V>> { | 1457 | 26 | if node.try_increment() { | 1458 | 26 | Some(RefEntry { | 1459 | 26 | parent, | 1460 | 26 | | 1461 | 26 | // We re-bind the lifetime of the node here to that of the skip | 1462 | 26 | // list since we now hold a reference to it. | 1463 | 26 | node: &*(node as *const _), | 1464 | 26 | }) | 1465 | | } else { | 1466 | 0 | None | 1467 | | } | 1468 | 26 | } |
|
1469 | | } |
1470 | | |
1471 | | impl<K, V> RefEntry<'_, K, V> |
1472 | | where |
1473 | | K: Ord + Send + 'static, |
1474 | | V: Send + 'static, |
1475 | | { |
1476 | | /// Removes the entry from the skip list. |
1477 | | /// |
1478 | | /// Returns `true` if this call removed the entry and `false` if it was already removed. |
1479 | | pub fn remove(&self, guard: &Guard) -> bool { |
1480 | | self.parent.check_guard(guard); |
1481 | | |
1482 | | // Try marking the tower. |
1483 | | if self.node.mark_tower() { |
1484 | | // Success - the entry is removed. Now decrement `len`. |
1485 | | self.parent.hot_data.len.fetch_sub(1, Ordering::Relaxed); |
1486 | | |
1487 | | // Search for the key to unlink the node from the skip list. |
1488 | | self.parent |
1489 | | .search_bound(Bound::Included(&self.node.key), false, guard); |
1490 | | |
1491 | | true |
1492 | | } else { |
1493 | | false |
1494 | | } |
1495 | | } |
1496 | | } |
1497 | | |
1498 | | impl<'a, K, V> Clone for RefEntry<'a, K, V> { |
1499 | 142 | fn clone(&self) -> RefEntry<'a, K, V> { |
1500 | 142 | unsafe { |
1501 | 142 | // Incrementing will always succeed since we're already holding a reference to the node. |
1502 | 142 | Node::try_increment(self.node); |
1503 | 142 | } |
1504 | 142 | RefEntry { |
1505 | 142 | parent: self.parent, |
1506 | 142 | node: self.node, |
1507 | 142 | } |
1508 | 142 | } |
1509 | | } |
1510 | | |
1511 | | impl<K, V> fmt::Debug for RefEntry<'_, K, V> |
1512 | | where |
1513 | | K: fmt::Debug, |
1514 | | V: fmt::Debug, |
1515 | | { |
1516 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1517 | | f.debug_tuple("RefEntry") |
1518 | | .field(self.key()) |
1519 | | .field(self.value()) |
1520 | | .finish() |
1521 | | } |
1522 | | } |
1523 | | |
1524 | | impl<'a, K, V> RefEntry<'a, K, V> |
1525 | | where |
1526 | | K: Ord, |
1527 | | { |
1528 | | /// Moves to the next entry in the skip list. |
1529 | | pub fn move_next(&mut self, guard: &Guard) -> bool { |
1530 | | match self.next(guard) { |
1531 | | None => false, |
1532 | | Some(e) => { |
1533 | | mem::replace(self, e).release(guard); |
1534 | | true |
1535 | | } |
1536 | | } |
1537 | | } |
1538 | | |
1539 | | /// Returns the next entry in the skip list. |
1540 | 132 | pub fn next(&self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { |
1541 | 132 | self.parent.check_guard(guard); |
1542 | 132 | unsafe { |
1543 | 132 | let mut n = self.node; |
1544 | | loop { |
1545 | 132 | n = self |
1546 | 132 | .parent |
1547 | 132 | .next_node(&n.tower, Bound::Excluded(&n.key), guard)?11 ; |
1548 | 121 | if let Some(e) = RefEntry::try_acquire(self.parent, n) { |
1549 | 121 | return Some(e); |
1550 | 0 | } |
1551 | | } |
1552 | | } |
1553 | 132 | } |
1554 | | /// Moves to the previous entry in the skip list. |
1555 | | pub fn move_prev(&mut self, guard: &Guard) -> bool { |
1556 | | match self.prev(guard) { |
1557 | | None => false, |
1558 | | Some(e) => { |
1559 | | mem::replace(self, e).release(guard); |
1560 | | true |
1561 | | } |
1562 | | } |
1563 | | } |
1564 | | |
1565 | | /// Returns the previous entry in the skip list. |
1566 | 10 | pub fn prev(&self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { |
1567 | 10 | self.parent.check_guard(guard); |
1568 | 10 | unsafe { |
1569 | 10 | let mut n = self.node; |
1570 | | loop { |
1571 | 10 | n = self |
1572 | 10 | .parent |
1573 | 10 | .search_bound(Bound::Excluded(&n.key), true, guard)?1 ; |
1574 | 9 | if let Some(e) = RefEntry::try_acquire(self.parent, n) { |
1575 | 9 | return Some(e); |
1576 | 0 | } |
1577 | | } |
1578 | | } |
1579 | 10 | } |
1580 | | } |
1581 | | |
1582 | | /// An iterator over the entries of a `SkipList`. |
1583 | | pub struct Iter<'a: 'g, 'g, K, V> { |
1584 | | parent: &'a SkipList<K, V>, |
1585 | | head: Option<&'g Node<K, V>>, |
1586 | | tail: Option<&'g Node<K, V>>, |
1587 | | guard: &'g Guard, |
1588 | | } |
1589 | | |
1590 | | impl<'a: 'g, 'g, K: 'a, V: 'a> Iterator for Iter<'a, 'g, K, V> |
1591 | | where |
1592 | | K: Ord, |
1593 | | { |
1594 | | type Item = Entry<'a, 'g, K, V>; |
1595 | | |
1596 | 78 | fn next(&mut self) -> Option<Entry<'a, 'g, K, V>> { |
1597 | 78 | self.head = match self.head { |
1598 | 58 | Some(n) => self |
1599 | 58 | .parent |
1600 | 58 | .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), |
1601 | 20 | None => self |
1602 | 20 | .parent |
1603 | 20 | .next_node(&self.parent.head, Bound::Unbounded, self.guard), |
1604 | | }; |
1605 | 78 | if let (Some(h0 ), Some(t0 )) = (self.head, self.tail) { |
1606 | 0 | if h.key >= t.key { |
1607 | 0 | self.head = None; |
1608 | 0 | self.tail = None; |
1609 | 0 | } |
1610 | 78 | } |
1611 | 78 | self.head.map(|n| Entry { |
1612 | 58 | parent: self.parent, |
1613 | 58 | node: n, |
1614 | 58 | guard: self.guard, |
1615 | 78 | }) |
1616 | 78 | } |
1617 | | } |
1618 | | |
1619 | | impl<'a: 'g, 'g, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, 'g, K, V> |
1620 | | where |
1621 | | K: Ord, |
1622 | | { |
1623 | 11 | fn next_back(&mut self) -> Option<Entry<'a, 'g, K, V>> { |
1624 | 11 | self.tail = match self.tail { |
1625 | 10 | Some(n) => self |
1626 | 10 | .parent |
1627 | 10 | .search_bound(Bound::Excluded(&n.key), true, self.guard), |
1628 | 1 | None => self.parent.search_bound(Bound::Unbounded, true, self.guard), |
1629 | | }; |
1630 | 11 | if let (Some(h0 ), Some(t0 )) = (self.head, self.tail) { |
1631 | 0 | if h.key >= t.key { |
1632 | 0 | self.head = None; |
1633 | 0 | self.tail = None; |
1634 | 0 | } |
1635 | 11 | } |
1636 | 11 | self.tail.map(|n| Entry { |
1637 | 10 | parent: self.parent, |
1638 | 10 | node: n, |
1639 | 10 | guard: self.guard, |
1640 | 11 | }) |
1641 | 11 | } |
1642 | | } |
1643 | | |
1644 | | impl<K, V> fmt::Debug for Iter<'_, '_, K, V> |
1645 | | where |
1646 | | K: fmt::Debug, |
1647 | | V: fmt::Debug, |
1648 | | { |
1649 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1650 | | f.debug_struct("Iter") |
1651 | | .field("head", &self.head.map(|n| (&n.key, &n.value))) |
1652 | | .field("tail", &self.tail.map(|n| (&n.key, &n.value))) |
1653 | | .finish() |
1654 | | } |
1655 | | } |
1656 | | |
1657 | | /// An iterator over reference-counted entries of a `SkipList`. |
1658 | | pub struct RefIter<'a, K, V> { |
1659 | | parent: &'a SkipList<K, V>, |
1660 | | head: Option<RefEntry<'a, K, V>>, |
1661 | | tail: Option<RefEntry<'a, K, V>>, |
1662 | | } |
1663 | | |
1664 | | impl<K, V> fmt::Debug for RefIter<'_, K, V> |
1665 | | where |
1666 | | K: fmt::Debug, |
1667 | | V: fmt::Debug, |
1668 | | { |
1669 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1670 | | let mut d = f.debug_struct("RefIter"); |
1671 | | match &self.head { |
1672 | | None => d.field("head", &None::<(&K, &V)>), |
1673 | | Some(e) => d.field("head", &(e.key(), e.value())), |
1674 | | }; |
1675 | | match &self.tail { |
1676 | | None => d.field("tail", &None::<(&K, &V)>), |
1677 | | Some(e) => d.field("tail", &(e.key(), e.value())), |
1678 | | }; |
1679 | | d.finish() |
1680 | | } |
1681 | | } |
1682 | | |
1683 | | impl<'a, K: 'a, V: 'a> RefIter<'a, K, V> |
1684 | | where |
1685 | | K: Ord, |
1686 | | { |
1687 | | /// TODO |
1688 | 24 | pub fn next(&mut self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { |
1689 | 24 | self.parent.check_guard(guard); |
1690 | 24 | self.head = match self.head { |
1691 | 21 | Some(ref e) => { |
1692 | 21 | let next_head = e.next(guard); |
1693 | 21 | unsafe { |
1694 | 21 | e.node.decrement(guard); |
1695 | 21 | } |
1696 | 21 | next_head |
1697 | | } |
1698 | 3 | None => try_pin_loop(|| self.parent.front(guard)), |
1699 | | }; |
1700 | 24 | let mut finished = false; |
1701 | 24 | if let (&Some(ref h0 ), &Some(ref t0 )) = (&self.head, &self.tail) { |
1702 | 0 | if h.key() >= t.key() { |
1703 | 0 | finished = true; |
1704 | 0 | } |
1705 | 24 | } |
1706 | 24 | if finished { |
1707 | 0 | self.head = None; |
1708 | 0 | self.tail = None; |
1709 | 24 | } |
1710 | 24 | self.head.clone() |
1711 | 24 | } |
1712 | | |
1713 | | /// TODO |
1714 | 11 | pub fn next_back(&mut self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { |
1715 | 11 | self.parent.check_guard(guard); |
1716 | 11 | self.tail = match self.tail { |
1717 | 10 | Some(ref e) => { |
1718 | 10 | let next_tail = e.prev(guard); |
1719 | 10 | unsafe { |
1720 | 10 | e.node.decrement(guard); |
1721 | 10 | } |
1722 | 10 | next_tail |
1723 | | } |
1724 | 1 | None => try_pin_loop(|| self.parent.back(guard)), |
1725 | | }; |
1726 | 11 | let mut finished = false; |
1727 | 11 | if let (&Some(ref h0 ), &Some(ref t0 )) = (&self.head, &self.tail) { |
1728 | 0 | if h.key() >= t.key() { |
1729 | 0 | finished = true; |
1730 | 0 | } |
1731 | 11 | } |
1732 | 11 | if finished { |
1733 | 0 | self.head = None; |
1734 | 0 | self.tail = None; |
1735 | 11 | } |
1736 | 11 | self.tail.clone() |
1737 | 11 | } |
1738 | | } |
1739 | | |
1740 | | /// An iterator over a subset of entries of a `SkipList`. |
1741 | | pub struct Range<'a: 'g, 'g, Q, R, K, V> |
1742 | | where |
1743 | | K: Ord + Borrow<Q>, |
1744 | | R: RangeBounds<Q>, |
1745 | | Q: Ord + ?Sized, |
1746 | | { |
1747 | | parent: &'a SkipList<K, V>, |
1748 | | head: Option<&'g Node<K, V>>, |
1749 | | tail: Option<&'g Node<K, V>>, |
1750 | | range: R, |
1751 | | guard: &'g Guard, |
1752 | | _marker: PhantomData<fn() -> Q>, // covariant over `Q` |
1753 | | } |
1754 | | |
1755 | | impl<'a: 'g, 'g, Q, R, K: 'a, V: 'a> Iterator for Range<'a, 'g, Q, R, K, V> |
1756 | | where |
1757 | | K: Ord + Borrow<Q>, |
1758 | | R: RangeBounds<Q>, |
1759 | | Q: Ord + ?Sized, |
1760 | | { |
1761 | | type Item = Entry<'a, 'g, K, V>; |
1762 | | |
1763 | 144 | fn next(&mut self) -> Option<Entry<'a, 'g, K, V>> { |
1764 | 144 | self.head = match self.head { |
1765 | 111 | Some(n) => self |
1766 | 111 | .parent |
1767 | 111 | .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), |
1768 | 33 | None => self |
1769 | 33 | .parent |
1770 | 33 | .search_bound(self.range.start_bound(), false, self.guard), |
1771 | | }; |
1772 | 144 | if let Some(h130 ) = self.head { |
1773 | 130 | let bound = match self.tail { |
1774 | 0 | Some(t) => Bound::Excluded(t.key.borrow()), |
1775 | 130 | None => self.range.end_bound(), |
1776 | | }; |
1777 | 130 | if !below_upper_bound(&bound, h.key.borrow()) { |
1778 | 19 | self.head = None; |
1779 | 19 | self.tail = None; |
1780 | 111 | } |
1781 | 14 | } |
1782 | 144 | self.head.map(|n| Entry { |
1783 | 111 | parent: self.parent, |
1784 | 111 | node: n, |
1785 | 111 | guard: self.guard, |
1786 | 144 | }) <crossbeam_skiplist::base::Range<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>), i32, i32> as core::iter::traits::iterator::Iterator>::next::{closure#0} Line | Count | Source | 1782 | 101 | self.head.map(|n| Entry { | 1783 | 101 | parent: self.parent, | 1784 | 101 | node: n, | 1785 | 101 | guard: self.guard, | 1786 | 101 | }) |
<crossbeam_skiplist::base::Range<i32, core::ops::range::RangeFull, i32, i32> as core::iter::traits::iterator::Iterator>::next::{closure#0} Line | Count | Source | 1782 | 10 | self.head.map(|n| Entry { | 1783 | 10 | parent: self.parent, | 1784 | 10 | node: n, | 1785 | 10 | guard: self.guard, | 1786 | 10 | }) |
|
1787 | 144 | } <crossbeam_skiplist::base::Range<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>), i32, i32> as core::iter::traits::iterator::Iterator>::next Line | Count | Source | 1763 | 133 | fn next(&mut self) -> Option<Entry<'a, 'g, K, V>> { | 1764 | 133 | self.head = match self.head { | 1765 | 101 | Some(n) => self | 1766 | 101 | .parent | 1767 | 101 | .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), | 1768 | 32 | None => self | 1769 | 32 | .parent | 1770 | 32 | .search_bound(self.range.start_bound(), false, self.guard), | 1771 | | }; | 1772 | 133 | if let Some(h120 ) = self.head { | 1773 | 120 | let bound = match self.tail { | 1774 | 0 | Some(t) => Bound::Excluded(t.key.borrow()), | 1775 | 120 | None => self.range.end_bound(), | 1776 | | }; | 1777 | 120 | if !below_upper_bound(&bound, h.key.borrow()) { | 1778 | 19 | self.head = None; | 1779 | 19 | self.tail = None; | 1780 | 101 | } | 1781 | 13 | } | 1782 | 133 | self.head.map(|n| Entry { | 1783 | | parent: self.parent, | 1784 | | node: n, | 1785 | | guard: self.guard, | 1786 | 133 | }) | 1787 | 133 | } |
<crossbeam_skiplist::base::Range<i32, core::ops::range::RangeFull, i32, i32> as core::iter::traits::iterator::Iterator>::next Line | Count | Source | 1763 | 11 | fn next(&mut self) -> Option<Entry<'a, 'g, K, V>> { | 1764 | 11 | self.head = match self.head { | 1765 | 10 | Some(n) => self | 1766 | 10 | .parent | 1767 | 10 | .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), | 1768 | 1 | None => self | 1769 | 1 | .parent | 1770 | 1 | .search_bound(self.range.start_bound(), false, self.guard), | 1771 | | }; | 1772 | 11 | if let Some(h10 ) = self.head { | 1773 | 10 | let bound = match self.tail { | 1774 | 0 | Some(t) => Bound::Excluded(t.key.borrow()), | 1775 | 10 | None => self.range.end_bound(), | 1776 | | }; | 1777 | 10 | if !below_upper_bound(&bound, h.key.borrow()) { | 1778 | 0 | self.head = None; | 1779 | 0 | self.tail = None; | 1780 | 10 | } | 1781 | 1 | } | 1782 | 11 | self.head.map(|n| Entry { | 1783 | | parent: self.parent, | 1784 | | node: n, | 1785 | | guard: self.guard, | 1786 | 11 | }) | 1787 | 11 | } |
|
1788 | | } |
1789 | | |
1790 | | impl<'a: 'g, 'g, Q, R, K: 'a, V: 'a> DoubleEndedIterator for Range<'a, 'g, Q, R, K, V> |
1791 | | where |
1792 | | K: Ord + Borrow<Q>, |
1793 | | R: RangeBounds<Q>, |
1794 | | Q: Ord + ?Sized, |
1795 | | { |
1796 | | fn next_back(&mut self) -> Option<Entry<'a, 'g, K, V>> { |
1797 | | self.tail = match self.tail { |
1798 | | Some(n) => self |
1799 | | .parent |
1800 | | .search_bound(Bound::Excluded(&n.key.borrow()), true, self.guard), |
1801 | | None => self |
1802 | | .parent |
1803 | | .search_bound(self.range.end_bound(), true, self.guard), |
1804 | | }; |
1805 | | if let Some(t) = self.tail { |
1806 | | let bound = match self.head { |
1807 | | Some(h) => Bound::Excluded(h.key.borrow()), |
1808 | | None => self.range.start_bound(), |
1809 | | }; |
1810 | | if !above_lower_bound(&bound, t.key.borrow()) { |
1811 | | self.head = None; |
1812 | | self.tail = None; |
1813 | | } |
1814 | | } |
1815 | | self.tail.map(|n| Entry { |
1816 | | parent: self.parent, |
1817 | | node: n, |
1818 | | guard: self.guard, |
1819 | | }) |
1820 | | } |
1821 | | } |
1822 | | |
1823 | | impl<Q, R, K, V> fmt::Debug for Range<'_, '_, Q, R, K, V> |
1824 | | where |
1825 | | K: Ord + Borrow<Q> + fmt::Debug, |
1826 | | V: fmt::Debug, |
1827 | | R: RangeBounds<Q> + fmt::Debug, |
1828 | | Q: Ord + ?Sized, |
1829 | | { |
1830 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1831 | | f.debug_struct("Range") |
1832 | | .field("range", &self.range) |
1833 | | .field("head", &self.head) |
1834 | | .field("tail", &self.tail) |
1835 | | .finish() |
1836 | | } |
1837 | | } |
1838 | | |
1839 | | /// An iterator over reference-counted subset of entries of a `SkipList`. |
1840 | | pub struct RefRange<'a, Q, R, K, V> |
1841 | | where |
1842 | | K: Ord + Borrow<Q>, |
1843 | | R: RangeBounds<Q>, |
1844 | | Q: Ord + ?Sized, |
1845 | | { |
1846 | | parent: &'a SkipList<K, V>, |
1847 | | pub(crate) head: Option<RefEntry<'a, K, V>>, |
1848 | | pub(crate) tail: Option<RefEntry<'a, K, V>>, |
1849 | | pub(crate) range: R, |
1850 | | _marker: PhantomData<fn() -> Q>, // covariant over `Q` |
1851 | | } |
1852 | | |
1853 | | unsafe impl<Q, R, K, V> Send for RefRange<'_, Q, R, K, V> |
1854 | | where |
1855 | | K: Ord + Borrow<Q>, |
1856 | | R: RangeBounds<Q>, |
1857 | | Q: Ord + ?Sized, |
1858 | | { |
1859 | | } |
1860 | | |
1861 | | unsafe impl<Q, R, K, V> Sync for RefRange<'_, Q, R, K, V> |
1862 | | where |
1863 | | K: Ord + Borrow<Q>, |
1864 | | R: RangeBounds<Q>, |
1865 | | Q: Ord + ?Sized, |
1866 | | { |
1867 | | } |
1868 | | |
1869 | | impl<Q, R, K, V> fmt::Debug for RefRange<'_, Q, R, K, V> |
1870 | | where |
1871 | | K: Ord + Borrow<Q> + fmt::Debug, |
1872 | | V: fmt::Debug, |
1873 | | R: RangeBounds<Q> + fmt::Debug, |
1874 | | Q: Ord + ?Sized, |
1875 | | { |
1876 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1877 | | f.debug_struct("RefRange") |
1878 | | .field("range", &self.range) |
1879 | | .field("head", &self.head) |
1880 | | .field("tail", &self.tail) |
1881 | | .finish() |
1882 | | } |
1883 | | } |
1884 | | |
1885 | | impl<'a, Q, R, K: 'a, V: 'a> RefRange<'a, Q, R, K, V> |
1886 | | where |
1887 | | K: Ord + Borrow<Q>, |
1888 | | R: RangeBounds<Q>, |
1889 | | Q: Ord + ?Sized, |
1890 | | { |
1891 | | /// TODO |
1892 | 144 | pub fn next(&mut self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { |
1893 | 144 | self.parent.check_guard(guard); |
1894 | 144 | self.head = match self.head { |
1895 | 111 | Some(ref e) => e.next(guard), |
1896 | 33 | None => try_pin_loop(|| self.parent.lower_bound(self.range.start_bound(), guard)), <crossbeam_skiplist::base::RefRange<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>), i32, i32>>::next::{closure#0} Line | Count | Source | 1896 | 32 | None => try_pin_loop(|| self.parent.lower_bound(self.range.start_bound(), guard)), |
<crossbeam_skiplist::base::RefRange<i32, core::ops::range::RangeFull, i32, i32>>::next::{closure#0} Line | Count | Source | 1896 | 1 | None => try_pin_loop(|| self.parent.lower_bound(self.range.start_bound(), guard)), |
|
1897 | | }; |
1898 | 144 | let mut finished = false; |
1899 | 144 | if let Some(ref h130 ) = self.head { |
1900 | 130 | let bound = match self.tail { |
1901 | 0 | Some(ref t) => Bound::Excluded(t.key().borrow()), |
1902 | 130 | None => self.range.end_bound(), |
1903 | | }; |
1904 | 130 | if !below_upper_bound(&bound, h.key().borrow()) { |
1905 | 19 | finished = true; |
1906 | 111 | } |
1907 | 14 | } |
1908 | 144 | if finished { |
1909 | 19 | self.head = None; |
1910 | 19 | self.tail = None; |
1911 | 125 | } |
1912 | 144 | self.head.clone() |
1913 | 144 | } <crossbeam_skiplist::base::RefRange<i32, core::ops::range::RangeFull, i32, i32>>::next Line | Count | Source | 1892 | 11 | pub fn next(&mut self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { | 1893 | 11 | self.parent.check_guard(guard); | 1894 | 11 | self.head = match self.head { | 1895 | 10 | Some(ref e) => e.next(guard), | 1896 | 1 | None => try_pin_loop(|| self.parent.lower_bound(self.range.start_bound(), guard)), | 1897 | | }; | 1898 | 11 | let mut finished = false; | 1899 | 11 | if let Some(ref h10 ) = self.head { | 1900 | 10 | let bound = match self.tail { | 1901 | 0 | Some(ref t) => Bound::Excluded(t.key().borrow()), | 1902 | 10 | None => self.range.end_bound(), | 1903 | | }; | 1904 | 10 | if !below_upper_bound(&bound, h.key().borrow()) { | 1905 | 0 | finished = true; | 1906 | 10 | } | 1907 | 1 | } | 1908 | 11 | if finished { | 1909 | 0 | self.head = None; | 1910 | 0 | self.tail = None; | 1911 | 11 | } | 1912 | 11 | self.head.clone() | 1913 | 11 | } |
<crossbeam_skiplist::base::RefRange<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>), i32, i32>>::next Line | Count | Source | 1892 | 133 | pub fn next(&mut self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { | 1893 | 133 | self.parent.check_guard(guard); | 1894 | 133 | self.head = match self.head { | 1895 | 101 | Some(ref e) => e.next(guard), | 1896 | 32 | None => try_pin_loop(|| self.parent.lower_bound(self.range.start_bound(), guard)), | 1897 | | }; | 1898 | 133 | let mut finished = false; | 1899 | 133 | if let Some(ref h120 ) = self.head { | 1900 | 120 | let bound = match self.tail { | 1901 | 0 | Some(ref t) => Bound::Excluded(t.key().borrow()), | 1902 | 120 | None => self.range.end_bound(), | 1903 | | }; | 1904 | 120 | if !below_upper_bound(&bound, h.key().borrow()) { | 1905 | 19 | finished = true; | 1906 | 101 | } | 1907 | 13 | } | 1908 | 133 | if finished { | 1909 | 19 | self.head = None; | 1910 | 19 | self.tail = None; | 1911 | 114 | } | 1912 | 133 | self.head.clone() | 1913 | 133 | } |
|
1914 | | |
1915 | | /// TODO: docs |
1916 | | pub fn next_back(&mut self, guard: &Guard) -> Option<RefEntry<'a, K, V>> { |
1917 | | self.parent.check_guard(guard); |
1918 | | self.tail = match self.tail { |
1919 | | Some(ref e) => e.prev(guard), |
1920 | | None => try_pin_loop(|| self.parent.upper_bound(self.range.start_bound(), guard)), |
1921 | | }; |
1922 | | let mut finished = false; |
1923 | | if let Some(ref t) = self.tail { |
1924 | | let bound = match self.head { |
1925 | | Some(ref h) => Bound::Excluded(h.key().borrow()), |
1926 | | None => self.range.end_bound(), |
1927 | | }; |
1928 | | if !above_lower_bound(&bound, t.key().borrow()) { |
1929 | | finished = true; |
1930 | | } |
1931 | | } |
1932 | | if finished { |
1933 | | self.head = None; |
1934 | | self.tail = None; |
1935 | | } |
1936 | | self.tail.clone() |
1937 | | } |
1938 | | } |
1939 | | |
1940 | | /// An owning iterator over the entries of a `SkipList`. |
1941 | | pub struct IntoIter<K, V> { |
1942 | | /// The current node. |
1943 | | /// |
1944 | | /// All preceding nods have already been destroyed. |
1945 | | node: *mut Node<K, V>, |
1946 | | } |
1947 | | |
1948 | | impl<K, V> Drop for IntoIter<K, V> { |
1949 | | fn drop(&mut self) { |
1950 | | // Iterate through the whole chain and destroy every node. |
1951 | 1 | while !self.node.is_null() { |
1952 | 0 | unsafe { |
1953 | 0 | // Unprotected loads are okay because this function is the only one currently using |
1954 | 0 | // the skip list. |
1955 | 0 | let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); |
1956 | 0 |
|
1957 | 0 | // We can safely do this without deferring because references to |
1958 | 0 | // keys & values that we give out never outlive the SkipList. |
1959 | 0 | Node::finalize(self.node); |
1960 | 0 |
|
1961 | 0 | self.node = next.as_raw() as *mut Node<K, V>; |
1962 | 0 | } |
1963 | | } |
1964 | 1 | } |
1965 | | } |
1966 | | |
1967 | | impl<K, V> Iterator for IntoIter<K, V> { |
1968 | | type Item = (K, V); |
1969 | | |
1970 | 8 | fn next(&mut self) -> Option<(K, V)> { |
1971 | 8 | loop { |
1972 | 8 | // Have we reached the end of the skip list? |
1973 | 8 | if self.node.is_null() { |
1974 | 1 | return None; |
1975 | 7 | } |
1976 | 7 | |
1977 | 7 | unsafe { |
1978 | 7 | // Take the key and value out of the node. |
1979 | 7 | let key = ptr::read(&(*self.node).key); |
1980 | 7 | let value = ptr::read(&(*self.node).value); |
1981 | 7 | |
1982 | 7 | // Get the next node in the skip list. |
1983 | 7 | // |
1984 | 7 | // Unprotected loads are okay because this function is the only one currently using |
1985 | 7 | // the skip list. |
1986 | 7 | let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); |
1987 | 7 | |
1988 | 7 | // Deallocate the current node and move to the next one. |
1989 | 7 | Node::dealloc(self.node); |
1990 | 7 | self.node = next.as_raw() as *mut Node<K, V>; |
1991 | 7 | |
1992 | 7 | // The current node may be marked. If it is, it's been removed from the skip list |
1993 | 7 | // and we should just skip it. |
1994 | 7 | if next.tag() == 0 { |
1995 | 7 | return Some((key, value)); |
1996 | 0 | } |
1997 | | } |
1998 | | } |
1999 | 8 | } |
2000 | | } |
2001 | | |
2002 | | impl<K, V> fmt::Debug for IntoIter<K, V> { |
2003 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
2004 | | f.pad("IntoIter { .. }") |
2005 | | } |
2006 | | } |
2007 | | |
2008 | | /// Helper function to retry an operation until pinning succeeds or `None` is |
2009 | | /// returned. |
2010 | 37 | pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option<RefEntry<'a, K, V>> |
2011 | 37 | where |
2012 | 37 | F: FnMut() -> Option<Entry<'a, 'g, K, V>>, |
2013 | 37 | { |
2014 | | loop { |
2015 | 37 | if let Some(e31 ) = f()?6 .pin() { |
2016 | 31 | return Some(e); |
2017 | 0 | } |
2018 | | } |
2019 | 37 | } crossbeam_skiplist::base::try_pin_loop::<<crossbeam_skiplist::base::RefRange<i32, core::ops::range::RangeFull, i32, i32>>::next::{closure#0}, i32, i32> Line | Count | Source | 2010 | 1 | pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option<RefEntry<'a, K, V>> | 2011 | 1 | where | 2012 | 1 | F: FnMut() -> Option<Entry<'a, 'g, K, V>>, | 2013 | 1 | { | 2014 | | loop { | 2015 | 1 | if let Some(e) = f()?0 .pin() { | 2016 | 1 | return Some(e); | 2017 | 0 | } | 2018 | | } | 2019 | 1 | } |
crossbeam_skiplist::base::try_pin_loop::<<crossbeam_skiplist::base::RefIter<i32, i32>>::next_back::{closure#0}, i32, i32> Line | Count | Source | 2010 | 1 | pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option<RefEntry<'a, K, V>> | 2011 | 1 | where | 2012 | 1 | F: FnMut() -> Option<Entry<'a, 'g, K, V>>, | 2013 | 1 | { | 2014 | | loop { | 2015 | 1 | if let Some(e) = f()?0 .pin() { | 2016 | 1 | return Some(e); | 2017 | 0 | } | 2018 | | } | 2019 | 1 | } |
crossbeam_skiplist::base::try_pin_loop::<<crossbeam_skiplist::base::RefRange<i32, (core::ops::range::Bound<&i32>, core::ops::range::Bound<&i32>), i32, i32>>::next::{closure#0}, i32, i32> Line | Count | Source | 2010 | 32 | pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option<RefEntry<'a, K, V>> | 2011 | 32 | where | 2012 | 32 | F: FnMut() -> Option<Entry<'a, 'g, K, V>>, | 2013 | 32 | { | 2014 | | loop { | 2015 | 32 | if let Some(e26 ) = f()?6 .pin() { | 2016 | 26 | return Some(e); | 2017 | 0 | } | 2018 | | } | 2019 | 32 | } |
crossbeam_skiplist::base::try_pin_loop::<<crossbeam_skiplist::base::RefIter<i32, i32>>::next::{closure#0}, i32, i32> Line | Count | Source | 2010 | 3 | pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option<RefEntry<'a, K, V>> | 2011 | 3 | where | 2012 | 3 | F: FnMut() -> Option<Entry<'a, 'g, K, V>>, | 2013 | 3 | { | 2014 | | loop { | 2015 | 3 | if let Some(e) = f()?0 .pin() { | 2016 | 3 | return Some(e); | 2017 | 0 | } | 2018 | | } | 2019 | 3 | } |
|
2020 | | |
2021 | | /// Helper function to check if a value is above a lower bound |
2022 | 487 | fn above_lower_bound<T: Ord + ?Sized>(bound: &Bound<&T>, other: &T) -> bool { |
2023 | 487 | match *bound { |
2024 | 487 | Bound::Unbounded => true57 , |
2025 | 261 | Bound::Included(key) => other >= key, |
2026 | 169 | Bound::Excluded(key) => other > key, |
2027 | | } |
2028 | 487 | } crossbeam_skiplist::base::above_lower_bound::<i32> Line | Count | Source | 2022 | 154 | fn above_lower_bound<T: Ord + ?Sized>(bound: &Bound<&T>, other: &T) -> bool { | 2023 | 154 | match *bound { | 2024 | 154 | Bound::Unbounded => true27 , | 2025 | 62 | Bound::Included(key) => other >= key, | 2026 | 65 | Bound::Excluded(key) => other > key, | 2027 | | } | 2028 | 154 | } |
Unexecuted instantiation: crossbeam_skiplist::base::above_lower_bound::<i32> Unexecuted instantiation: crossbeam_skiplist::base::above_lower_bound::<base::drops::Key> crossbeam_skiplist::base::above_lower_bound::<i32> Line | Count | Source | 2022 | 333 | fn above_lower_bound<T: Ord + ?Sized>(bound: &Bound<&T>, other: &T) -> bool { | 2023 | 333 | match *bound { | 2024 | 333 | Bound::Unbounded => true30 , | 2025 | 199 | Bound::Included(key) => other >= key, | 2026 | 104 | Bound::Excluded(key) => other > key, | 2027 | | } | 2028 | 333 | } |
|
2029 | | |
2030 | | /// Helper function to check if a value is below an upper bound |
2031 | 471 | fn below_upper_bound<T: Ord + ?Sized>(bound: &Bound<&T>, other: &T) -> bool { |
2032 | 471 | match *bound { |
2033 | 471 | Bound::Unbounded => true117 , |
2034 | 117 | Bound::Included(key) => other <= key, |
2035 | 237 | Bound::Excluded(key) => other < key, |
2036 | | } |
2037 | 471 | } crossbeam_skiplist::base::below_upper_bound::<i32> Line | Count | Source | 2031 | 184 | fn below_upper_bound<T: Ord + ?Sized>(bound: &Bound<&T>, other: &T) -> bool { | 2032 | 184 | match *bound { | 2033 | 184 | Bound::Unbounded => true51 , | 2034 | 43 | Bound::Included(key) => other <= key, | 2035 | 90 | Bound::Excluded(key) => other < key, | 2036 | | } | 2037 | 184 | } |
Unexecuted instantiation: crossbeam_skiplist::base::below_upper_bound::<i32> crossbeam_skiplist::base::below_upper_bound::<i32> Line | Count | Source | 2031 | 287 | fn below_upper_bound<T: Ord + ?Sized>(bound: &Bound<&T>, other: &T) -> bool { | 2032 | 287 | match *bound { | 2033 | 287 | Bound::Unbounded => true66 , | 2034 | 74 | Bound::Included(key) => other <= key, | 2035 | 147 | Bound::Excluded(key) => other < key, | 2036 | | } | 2037 | 287 | } |
Unexecuted instantiation: crossbeam_skiplist::base::below_upper_bound::<base::drops::Key> |