Coverage Report

Created: 2021-01-22 16:54

crossbeam-utils/src/sync/sharded_lock.rs
Line
Count
Source (jump to first uncovered line)
1
use std::cell::UnsafeCell;
2
use std::collections::HashMap;
3
use std::fmt;
4
use std::marker::PhantomData;
5
use std::mem;
6
use std::ops::{Deref, DerefMut};
7
use std::panic::{RefUnwindSafe, UnwindSafe};
8
use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult};
9
use std::sync::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
10
use std::thread::{self, ThreadId};
11
12
use crate::CachePadded;
13
use lazy_static::lazy_static;
14
15
/// The number of shards per sharded lock. Must be a power of two.
16
const NUM_SHARDS: usize = 8;
17
18
/// A shard containing a single reader-writer lock.
19
struct Shard {
20
    /// The inner reader-writer lock.
21
    lock: RwLock<()>,
22
23
    /// The write-guard keeping this shard locked.
24
    ///
25
    /// Write operations will lock each shard and store the guard here. These guards get dropped at
26
    /// the same time the big guard is dropped.
27
    write_guard: UnsafeCell<Option<RwLockWriteGuard<'static, ()>>>,
28
}
29
30
/// A sharded reader-writer lock.
31
///
32
/// This lock is equivalent to [`RwLock`], except read operations are faster and write operations
33
/// are slower.
34
///
35
/// A `ShardedLock` is internally made of a list of *shards*, each being a [`RwLock`] occupying a
36
/// single cache line. Read operations will pick one of the shards depending on the current thread
37
/// and lock it. Write operations need to lock all shards in succession.
38
///
39
/// By splitting the lock into shards, concurrent read operations will in most cases choose
40
/// different shards and thus update different cache lines, which is good for scalability. However,
41
/// write operations need to do more work and are therefore slower than usual.
42
///
43
/// The priority policy of the lock is dependent on the underlying operating system's
44
/// implementation, and this type does not guarantee that any particular policy will be used.
45
///
46
/// # Poisoning
47
///
48
/// A `ShardedLock`, like [`RwLock`], will become poisoned on a panic. Note that it may only be
49
/// poisoned if a panic occurs while a write operation is in progress. If a panic occurs in any
50
/// read operation, the lock will not be poisoned.
51
///
52
/// # Examples
53
///
54
/// ```
55
/// use crossbeam_utils::sync::ShardedLock;
56
///
57
/// let lock = ShardedLock::new(5);
58
///
59
/// // Any number of read locks can be held at once.
60
/// {
61
///     let r1 = lock.read().unwrap();
62
///     let r2 = lock.read().unwrap();
63
///     assert_eq!(*r1, 5);
64
///     assert_eq!(*r2, 5);
65
/// } // Read locks are dropped at this point.
66
///
67
/// // However, only one write lock may be held.
68
/// {
69
///     let mut w = lock.write().unwrap();
70
///     *w += 1;
71
///     assert_eq!(*w, 6);
72
/// } // Write lock is dropped here.
73
/// ```
74
///
75
/// [`RwLock`]: std::sync::RwLock
76
pub struct ShardedLock<T: ?Sized> {
77
    /// A list of locks protecting the internal data.
78
    shards: Box<[CachePadded<Shard>]>,
79
80
    /// The internal data.
81
    value: UnsafeCell<T>,
82
}
83
84
unsafe impl<T: ?Sized + Send> Send for ShardedLock<T> {}
85
unsafe impl<T: ?Sized + Send + Sync> Sync for ShardedLock<T> {}
86
87
impl<T: ?Sized> UnwindSafe for ShardedLock<T> {}
88
impl<T: ?Sized> RefUnwindSafe for ShardedLock<T> {}
89
90
impl<T> ShardedLock<T> {
91
    /// Creates a new sharded reader-writer lock.
92
    ///
93
    /// # Examples
94
    ///
95
    /// ```
96
    /// use crossbeam_utils::sync::ShardedLock;
97
    ///
98
    /// let lock = ShardedLock::new(5);
99
    /// ```
100
14
    pub fn new(value: T) -> ShardedLock<T> {
101
14
        ShardedLock {
102
14
            shards: (0..NUM_SHARDS)
103
113
                .map(|_| {
104
113
                    CachePadded::new(Shard {
105
113
                        lock: RwLock::new(()),
106
113
                        write_guard: UnsafeCell::new(None),
107
113
                    })
108
113
                })
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::NonCopy>>::new::{closure#0}
Line
Count
Source
103
32
                .map(|_| {
104
32
                    CachePadded::new(Shard {
105
32
                        lock: RwLock::new(()),
106
32
                        write_guard: UnsafeCell::new(None),
107
32
                    })
108
32
                })
<crossbeam_utils::sync::sharded_lock::ShardedLock<[i32; 3]>>::new::{closure#0}
Line
Count
Source
103
8
                .map(|_| {
104
8
                    CachePadded::new(Shard {
105
8
                        lock: RwLock::new(()),
106
8
                        write_guard: UnsafeCell::new(None),
107
8
                    })
108
8
                })
<crossbeam_utils::sync::sharded_lock::ShardedLock<()>>::new::{closure#0}
Line
Count
Source
103
16
                .map(|_| {
104
16
                    CachePadded::new(Shard {
105
16
                        lock: RwLock::new(()),
106
16
                        write_guard: UnsafeCell::new(None),
107
16
                    })
108
16
                })
<crossbeam_utils::sync::sharded_lock::ShardedLock<i32>>::new::{closure#0}
Line
Count
Source
103
33
                .map(|_| {
104
33
                    CachePadded::new(Shard {
105
33
                        lock: RwLock::new(()),
106
33
                        write_guard: UnsafeCell::new(None),
107
33
                    })
108
33
                })
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::test_into_inner_drop::Foo>>::new::{closure#0}
Line
Count
Source
103
8
                .map(|_| {
104
8
                    CachePadded::new(Shard {
105
8
                        lock: RwLock::new(()),
106
8
                        write_guard: UnsafeCell::new(None),
107
8
                    })
108
8
                })
<crossbeam_utils::sync::sharded_lock::ShardedLock<isize>>::new::{closure#0}
Line
Count
Source
103
16
                .map(|_| {
104
16
                    CachePadded::new(Shard {
105
16
                        lock: RwLock::new(()),
106
16
                        write_guard: UnsafeCell::new(None),
107
16
                    })
108
16
                })
109
14
                .collect::<Box<[_]>>(),
110
14
            value: UnsafeCell::new(value),
111
14
        }
112
14
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<[i32; 3]>>::new
Line
Count
Source
100
1
    pub fn new(value: T) -> ShardedLock<T> {
101
1
        ShardedLock {
102
1
            shards: (0..NUM_SHARDS)
103
1
                .map(|_| {
104
                    CachePadded::new(Shard {
105
                        lock: RwLock::new(()),
106
                        write_guard: UnsafeCell::new(None),
107
                    })
108
1
                })
109
1
                .collect::<Box<[_]>>(),
110
1
            value: UnsafeCell::new(value),
111
1
        }
112
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::test_into_inner_drop::Foo>>::new
Line
Count
Source
100
1
    pub fn new(value: T) -> ShardedLock<T> {
101
1
        ShardedLock {
102
1
            shards: (0..NUM_SHARDS)
103
1
                .map(|_| {
104
                    CachePadded::new(Shard {
105
                        lock: RwLock::new(()),
106
                        write_guard: UnsafeCell::new(None),
107
                    })
108
1
                })
109
1
                .collect::<Box<[_]>>(),
110
1
            value: UnsafeCell::new(value),
111
1
        }
112
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<i32>>::new
Line
Count
Source
100
4
    pub fn new(value: T) -> ShardedLock<T> {
101
4
        ShardedLock {
102
4
            shards: (0..NUM_SHARDS)
103
4
                .map(|_| {
104
                    CachePadded::new(Shard {
105
                        lock: RwLock::new(()),
106
                        write_guard: UnsafeCell::new(None),
107
                    })
108
4
                })
109
4
                .collect::<Box<[_]>>(),
110
4
            value: UnsafeCell::new(value),
111
4
        }
112
4
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::NonCopy>>::new
Line
Count
Source
100
4
    pub fn new(value: T) -> ShardedLock<T> {
101
4
        ShardedLock {
102
4
            shards: (0..NUM_SHARDS)
103
4
                .map(|_| {
104
                    CachePadded::new(Shard {
105
                        lock: RwLock::new(()),
106
                        write_guard: UnsafeCell::new(None),
107
                    })
108
4
                })
109
4
                .collect::<Box<[_]>>(),
110
4
            value: UnsafeCell::new(value),
111
4
        }
112
4
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<isize>>::new
Line
Count
Source
100
2
    pub fn new(value: T) -> ShardedLock<T> {
101
2
        ShardedLock {
102
2
            shards: (0..NUM_SHARDS)
103
2
                .map(|_| {
104
                    CachePadded::new(Shard {
105
                        lock: RwLock::new(()),
106
                        write_guard: UnsafeCell::new(None),
107
                    })
108
2
                })
109
2
                .collect::<Box<[_]>>(),
110
2
            value: UnsafeCell::new(value),
111
2
        }
112
2
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<()>>::new
Line
Count
Source
100
2
    pub fn new(value: T) -> ShardedLock<T> {
101
2
        ShardedLock {
102
2
            shards: (0..NUM_SHARDS)
103
2
                .map(|_| {
104
                    CachePadded::new(Shard {
105
                        lock: RwLock::new(()),
106
                        write_guard: UnsafeCell::new(None),
107
                    })
108
2
                })
109
2
                .collect::<Box<[_]>>(),
110
2
            value: UnsafeCell::new(value),
111
2
        }
112
2
    }
113
114
    /// Consumes this lock, returning the underlying data.
115
    ///
116
    /// # Errors
117
    ///
118
    /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
119
    /// operation panics.
120
    ///
121
    /// # Examples
122
    ///
123
    /// ```
124
    /// use crossbeam_utils::sync::ShardedLock;
125
    ///
126
    /// let lock = ShardedLock::new(String::new());
127
    /// {
128
    ///     let mut s = lock.write().unwrap();
129
    ///     *s = "modified".to_owned();
130
    /// }
131
    /// assert_eq!(lock.into_inner().unwrap(), "modified");
132
    /// ```
133
4
    pub fn into_inner(self) -> LockResult<T> {
134
4
        let is_poisoned = self.is_poisoned();
135
4
        let inner = self.value.into_inner();
136
4
137
4
        if is_poisoned {
138
1
            Err(PoisonError::new(inner))
139
        } else {
140
3
            Ok(inner)
141
        }
142
4
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::NonCopy>>::into_inner
Line
Count
Source
133
3
    pub fn into_inner(self) -> LockResult<T> {
134
3
        let is_poisoned = self.is_poisoned();
135
3
        let inner = self.value.into_inner();
136
3
137
3
        if is_poisoned {
138
1
            Err(PoisonError::new(inner))
139
        } else {
140
2
            Ok(inner)
141
        }
142
3
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::test_into_inner_drop::Foo>>::into_inner
Line
Count
Source
133
1
    pub fn into_inner(self) -> LockResult<T> {
134
1
        let is_poisoned = self.is_poisoned();
135
1
        let inner = self.value.into_inner();
136
1
137
1
        if is_poisoned {
138
0
            Err(PoisonError::new(inner))
139
        } else {
140
1
            Ok(inner)
141
        }
142
1
    }
143
}
144
145
impl<T: ?Sized> ShardedLock<T> {
146
    /// Returns `true` if the lock is poisoned.
147
    ///
148
    /// If another thread can still access the lock, it may become poisoned at any time. A `false`
149
    /// result should not be trusted without additional synchronization.
150
    ///
151
    /// # Examples
152
    ///
153
    /// ```
154
    /// use crossbeam_utils::sync::ShardedLock;
155
    /// use std::sync::Arc;
156
    /// use std::thread;
157
    ///
158
    /// let lock = Arc::new(ShardedLock::new(0));
159
    /// let c_lock = lock.clone();
160
    ///
161
    /// let _ = thread::spawn(move || {
162
    ///     let _lock = c_lock.write().unwrap();
163
    ///     panic!(); // the lock gets poisoned
164
    /// }).join();
165
    /// assert_eq!(lock.is_poisoned(), true);
166
    /// ```
167
10
    pub fn is_poisoned(&self) -> bool {
168
10
        self.shards[0].lock.is_poisoned()
169
10
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::NonCopy>>::is_poisoned
Line
Count
Source
167
7
    pub fn is_poisoned(&self) -> bool {
168
7
        self.shards[0].lock.is_poisoned()
169
7
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<i32>>::is_poisoned
Line
Count
Source
167
2
    pub fn is_poisoned(&self) -> bool {
168
2
        self.shards[0].lock.is_poisoned()
169
2
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::test_into_inner_drop::Foo>>::is_poisoned
Line
Count
Source
167
1
    pub fn is_poisoned(&self) -> bool {
168
1
        self.shards[0].lock.is_poisoned()
169
1
    }
170
171
    /// Returns a mutable reference to the underlying data.
172
    ///
173
    /// Since this call borrows the lock mutably, no actual locking needs to take place.
174
    ///
175
    /// # Errors
176
    ///
177
    /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
178
    /// operation panics.
179
    ///
180
    /// # Examples
181
    ///
182
    /// ```
183
    /// use crossbeam_utils::sync::ShardedLock;
184
    ///
185
    /// let mut lock = ShardedLock::new(0);
186
    /// *lock.get_mut().unwrap() = 10;
187
    /// assert_eq!(*lock.read().unwrap(), 10);
188
    /// ```
189
2
    pub fn get_mut(&mut self) -> LockResult<&mut T> {
190
2
        let is_poisoned = self.is_poisoned();
191
2
        let inner = unsafe { &mut *self.value.get() };
192
2
193
2
        if is_poisoned {
194
1
            Err(PoisonError::new(inner))
195
        } else {
196
1
            Ok(inner)
197
        }
198
2
    }
199
200
    /// Attempts to acquire this lock with shared read access.
201
    ///
202
    /// If the access could not be granted at this time, an error is returned. Otherwise, a guard
203
    /// is returned which will release the shared access when it is dropped. This method does not
204
    /// provide any guarantees with respect to the ordering of whether contentious readers or
205
    /// writers will acquire the lock first.
206
    ///
207
    /// # Errors
208
    ///
209
    /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
210
    /// operation panics.
211
    ///
212
    /// # Examples
213
    ///
214
    /// ```
215
    /// use crossbeam_utils::sync::ShardedLock;
216
    ///
217
    /// let lock = ShardedLock::new(1);
218
    ///
219
    /// match lock.try_read() {
220
    ///     Ok(n) => assert_eq!(*n, 1),
221
    ///     Err(_) => unreachable!(),
222
    /// };
223
    /// ```
224
0
    pub fn try_read(&self) -> TryLockResult<ShardedLockReadGuard<'_, T>> {
225
0
        // Take the current thread index and map it to a shard index. Thread indices will tend to
226
0
        // distribute shards among threads equally, thus reducing contention due to read-locking.
227
0
        let current_index = current_index().unwrap_or(0);
228
0
        let shard_index = current_index & (self.shards.len() - 1);
229
0
230
0
        match self.shards[shard_index].lock.try_read() {
231
0
            Ok(guard) => Ok(ShardedLockReadGuard {
232
0
                lock: self,
233
0
                _guard: guard,
234
0
                _marker: PhantomData,
235
0
            }),
236
0
            Err(TryLockError::Poisoned(err)) => {
237
0
                let guard = ShardedLockReadGuard {
238
0
                    lock: self,
239
0
                    _guard: err.into_inner(),
240
0
                    _marker: PhantomData,
241
0
                };
242
0
                Err(TryLockError::Poisoned(PoisonError::new(guard)))
243
            }
244
0
            Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock),
245
        }
246
0
    }
247
248
    /// Locks with shared read access, blocking the current thread until it can be acquired.
249
    ///
250
    /// The calling thread will be blocked until there are no more writers which hold the lock.
251
    /// There may be other readers currently inside the lock when this method returns. This method
252
    /// does not provide any guarantees with respect to the ordering of whether contentious readers
253
    /// or writers will acquire the lock first.
254
    ///
255
    /// Returns a guard which will release the shared access when dropped.
256
    ///
257
    /// # Errors
258
    ///
259
    /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
260
    /// operation panics.
261
    ///
262
    /// # Panics
263
    ///
264
    /// This method might panic when called if the lock is already held by the current thread.
265
    ///
266
    /// # Examples
267
    ///
268
    /// ```
269
    /// use crossbeam_utils::sync::ShardedLock;
270
    /// use std::sync::Arc;
271
    /// use std::thread;
272
    ///
273
    /// let lock = Arc::new(ShardedLock::new(1));
274
    /// let c_lock = lock.clone();
275
    ///
276
    /// let n = lock.read().unwrap();
277
    /// assert_eq!(*n, 1);
278
    ///
279
    /// thread::spawn(move || {
280
    ///     let r = c_lock.read();
281
    ///     assert!(r.is_ok());
282
    /// }).join().unwrap();
283
    /// ```
284
8.93k
    pub fn read(&self) -> LockResult<ShardedLockReadGuard<'_, T>> {
285
8.93k
        // Take the current thread index and map it to a shard index. Thread indices will tend to
286
8.93k
        // distribute shards among threads equally, thus reducing contention due to read-locking.
287
8.93k
        let current_index = current_index().unwrap_or(0);
288
8.93k
        let shard_index = current_index & (self.shards.len() - 1);
289
8.93k
290
8.93k
        match self.shards[shard_index].lock.read() {
291
8.93k
            Ok(guard) => Ok(ShardedLockReadGuard {
292
8.93k
                lock: self,
293
8.93k
                _guard: guard,
294
8.93k
                _marker: PhantomData,
295
8.93k
            }),
296
1
            Err(err) => Err(PoisonError::new(ShardedLockReadGuard {
297
1
                lock: self,
298
1
                _guard: err.into_inner(),
299
1
                _marker: PhantomData,
300
1
            })),
301
        }
302
8.93k
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<()>>::read
Line
Count
Source
284
8.92k
    pub fn read(&self) -> LockResult<ShardedLockReadGuard<'_, T>> {
285
8.92k
        // Take the current thread index and map it to a shard index. Thread indices will tend to
286
8.92k
        // distribute shards among threads equally, thus reducing contention due to read-locking.
287
8.92k
        let current_index = current_index().unwrap_or(0);
288
8.92k
        let shard_index = current_index & (self.shards.len() - 1);
289
8.92k
290
8.92k
        match self.shards[shard_index].lock.read() {
291
8.92k
            Ok(guard) => Ok(ShardedLockReadGuard {
292
8.92k
                lock: self,
293
8.92k
                _guard: guard,
294
8.92k
                _marker: PhantomData,
295
8.92k
            }),
296
0
            Err(err) => Err(PoisonError::new(ShardedLockReadGuard {
297
0
                lock: self,
298
0
                _guard: err.into_inner(),
299
0
                _marker: PhantomData,
300
0
            })),
301
        }
302
8.92k
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<isize>>::read
Line
Count
Source
284
2
    pub fn read(&self) -> LockResult<ShardedLockReadGuard<'_, T>> {
285
2
        // Take the current thread index and map it to a shard index. Thread indices will tend to
286
2
        // distribute shards among threads equally, thus reducing contention due to read-locking.
287
2
        let current_index = current_index().unwrap_or(0);
288
2
        let shard_index = current_index & (self.shards.len() - 1);
289
2
290
2
        match self.shards[shard_index].lock.read() {
291
2
            Ok(guard) => Ok(ShardedLockReadGuard {
292
2
                lock: self,
293
2
                _guard: guard,
294
2
                _marker: PhantomData,
295
2
            }),
296
0
            Err(err) => Err(PoisonError::new(ShardedLockReadGuard {
297
0
                lock: self,
298
0
                _guard: err.into_inner(),
299
0
                _marker: PhantomData,
300
0
            })),
301
        }
302
2
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<[i32]>>::read
Line
Count
Source
284
1
    pub fn read(&self) -> LockResult<ShardedLockReadGuard<'_, T>> {
285
1
        // Take the current thread index and map it to a shard index. Thread indices will tend to
286
1
        // distribute shards among threads equally, thus reducing contention due to read-locking.
287
1
        let current_index = current_index().unwrap_or(0);
288
1
        let shard_index = current_index & (self.shards.len() - 1);
289
1
290
1
        match self.shards[shard_index].lock.read() {
291
1
            Ok(guard) => Ok(ShardedLockReadGuard {
292
1
                lock: self,
293
1
                _guard: guard,
294
1
                _marker: PhantomData,
295
1
            }),
296
0
            Err(err) => Err(PoisonError::new(ShardedLockReadGuard {
297
0
                lock: self,
298
0
                _guard: err.into_inner(),
299
0
                _marker: PhantomData,
300
0
            })),
301
        }
302
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<i32>>::read
Line
Count
Source
284
10
    pub fn read(&self) -> LockResult<ShardedLockReadGuard<'_, T>> {
285
10
        // Take the current thread index and map it to a shard index. Thread indices will tend to
286
10
        // distribute shards among threads equally, thus reducing contention due to read-locking.
287
10
        let current_index = current_index().unwrap_or(0);
288
10
        let shard_index = current_index & (self.shards.len() - 1);
289
10
290
10
        match self.shards[shard_index].lock.read() {
291
9
            Ok(guard) => Ok(ShardedLockReadGuard {
292
9
                lock: self,
293
9
                _guard: guard,
294
9
                _marker: PhantomData,
295
9
            }),
296
1
            Err(err) => Err(PoisonError::new(ShardedLockReadGuard {
297
1
                lock: self,
298
1
                _guard: err.into_inner(),
299
1
                _marker: PhantomData,
300
1
            })),
301
        }
302
10
    }
303
304
    /// Attempts to acquire this lock with exclusive write access.
305
    ///
306
    /// If the access could not be granted at this time, an error is returned. Otherwise, a guard
307
    /// is returned which will release the exclusive access when it is dropped. This method does
308
    /// not provide any guarantees with respect to the ordering of whether contentious readers or
309
    /// writers will acquire the lock first.
310
    ///
311
    /// # Errors
312
    ///
313
    /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
314
    /// operation panics.
315
    ///
316
    /// # Examples
317
    ///
318
    /// ```
319
    /// use crossbeam_utils::sync::ShardedLock;
320
    ///
321
    /// let lock = ShardedLock::new(1);
322
    ///
323
    /// let n = lock.read().unwrap();
324
    /// assert_eq!(*n, 1);
325
    ///
326
    /// assert!(lock.try_write().is_err());
327
    /// ```
328
1
    pub fn try_write(&self) -> TryLockResult<ShardedLockWriteGuard<'_, T>> {
329
1
        let mut poisoned = false;
330
1
        let mut blocked = None;
331
332
        // Write-lock each shard in succession.
333
2
        for (i, shard) in 
self.shards.iter()1
.enumerate() {
334
2
            let 
guard1
= match shard.lock.try_write() {
335
1
                Ok(guard) => guard,
336
0
                Err(TryLockError::Poisoned(err)) => {
337
0
                    poisoned = true;
338
0
                    err.into_inner()
339
                }
340
                Err(TryLockError::WouldBlock) => {
341
1
                    blocked = Some(i);
342
1
                    break;
343
                }
344
            };
345
346
            // Store the guard into the shard.
347
1
            unsafe {
348
1
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
349
1
                let dest: *mut _ = shard.write_guard.get();
350
1
                *dest = Some(guard);
351
1
            }
352
        }
353
354
1
        if let Some(i) = blocked {
355
            // Unlock the shards in reverse order of locking.
356
1
            for shard in self.shards[0..i].iter().rev() {
357
1
                unsafe {
358
1
                    let dest: *mut _ = shard.write_guard.get();
359
1
                    let guard = mem::replace(&mut *dest, None);
360
1
                    drop(guard);
361
1
                }
362
            }
363
1
            Err(TryLockError::WouldBlock)
364
0
        } else if poisoned {
365
0
            let guard = ShardedLockWriteGuard {
366
0
                lock: self,
367
0
                _marker: PhantomData,
368
0
            };
369
0
            Err(TryLockError::Poisoned(PoisonError::new(guard)))
370
        } else {
371
0
            Ok(ShardedLockWriteGuard {
372
0
                lock: self,
373
0
                _marker: PhantomData,
374
0
            })
375
        }
376
1
    }
377
378
    /// Locks with exclusive write access, blocking the current thread until it can be acquired.
379
    ///
380
    /// The calling thread will be blocked until there are no more writers which hold the lock.
381
    /// There may be other readers currently inside the lock when this method returns. This method
382
    /// does not provide any guarantees with respect to the ordering of whether contentious readers
383
    /// or writers will acquire the lock first.
384
    ///
385
    /// Returns a guard which will release the exclusive access when dropped.
386
    ///
387
    /// # Errors
388
    ///
389
    /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
390
    /// operation panics.
391
    ///
392
    /// # Panics
393
    ///
394
    /// This method might panic when called if the lock is already held by the current thread.
395
    ///
396
    /// # Examples
397
    ///
398
    /// ```
399
    /// use crossbeam_utils::sync::ShardedLock;
400
    ///
401
    /// let lock = ShardedLock::new(1);
402
    ///
403
    /// let mut n = lock.write().unwrap();
404
    /// *n = 2;
405
    ///
406
    /// assert!(lock.try_read().is_err());
407
    /// ```
408
968
    pub fn write(&self) -> LockResult<ShardedLockWriteGuard<'_, T>> {
409
968
        let mut poisoned = false;
410
411
        // Write-lock each shard in succession.
412
7.72k
        for shard in 
self.shards968
.iter() {
413
7.72k
            let guard = match shard.lock.write() {
414
7.71k
                Ok(guard) => guard,
415
8
                Err(err) => {
416
8
                    poisoned = true;
417
8
                    err.into_inner()
418
                }
419
            };
420
421
            // Store the guard into the shard.
422
7.72k
            unsafe {
423
7.72k
                let guard: RwLockWriteGuard<'_, ()> = guard;
424
7.72k
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
425
7.72k
                let dest: *mut _ = shard.write_guard.get();
426
7.72k
                *dest = Some(guard);
427
7.72k
            }
428
        }
429
430
968
        if poisoned {
431
1
            Err(PoisonError::new(ShardedLockWriteGuard {
432
1
                lock: self,
433
1
                _marker: PhantomData,
434
1
            }))
435
        } else {
436
967
            Ok(ShardedLockWriteGuard {
437
967
                lock: self,
438
967
                _marker: PhantomData,
439
967
            })
440
        }
441
968
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<i32>>::write
Line
Count
Source
408
5
    pub fn write(&self) -> LockResult<ShardedLockWriteGuard<'_, T>> {
409
5
        let mut poisoned = false;
410
411
        // Write-lock each shard in succession.
412
40
        for shard in 
self.shards5
.iter() {
413
40
            let guard = match shard.lock.write() {
414
32
                Ok(guard) => guard,
415
8
                Err(err) => {
416
8
                    poisoned = true;
417
8
                    err.into_inner()
418
                }
419
            };
420
421
            // Store the guard into the shard.
422
40
            unsafe {
423
40
                let guard: RwLockWriteGuard<'_, ()> = guard;
424
40
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
425
40
                let dest: *mut _ = shard.write_guard.get();
426
40
                *dest = Some(guard);
427
40
            }
428
        }
429
430
5
        if poisoned {
431
1
            Err(PoisonError::new(ShardedLockWriteGuard {
432
1
                lock: self,
433
1
                _marker: PhantomData,
434
1
            }))
435
        } else {
436
4
            Ok(ShardedLockWriteGuard {
437
4
                lock: self,
438
4
                _marker: PhantomData,
439
4
            })
440
        }
441
5
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<[i32]>>::write
Line
Count
Source
408
1
    pub fn write(&self) -> LockResult<ShardedLockWriteGuard<'_, T>> {
409
1
        let mut poisoned = false;
410
411
        // Write-lock each shard in succession.
412
8
        for shard in 
self.shards1
.iter() {
413
8
            let guard = match shard.lock.write() {
414
8
                Ok(guard) => guard,
415
0
                Err(err) => {
416
0
                    poisoned = true;
417
0
                    err.into_inner()
418
                }
419
            };
420
421
            // Store the guard into the shard.
422
8
            unsafe {
423
8
                let guard: RwLockWriteGuard<'_, ()> = guard;
424
8
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
425
8
                let dest: *mut _ = shard.write_guard.get();
426
8
                *dest = Some(guard);
427
8
            }
428
        }
429
430
1
        if poisoned {
431
0
            Err(PoisonError::new(ShardedLockWriteGuard {
432
0
                lock: self,
433
0
                _marker: PhantomData,
434
0
            }))
435
        } else {
436
1
            Ok(ShardedLockWriteGuard {
437
1
                lock: self,
438
1
                _marker: PhantomData,
439
1
            })
440
        }
441
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<sharded_lock::NonCopy>>::write
Line
Count
Source
408
2
    pub fn write(&self) -> LockResult<ShardedLockWriteGuard<'_, T>> {
409
2
        let mut poisoned = false;
410
411
        // Write-lock each shard in succession.
412
16
        for shard in 
self.shards2
.iter() {
413
16
            let guard = match shard.lock.write() {
414
16
                Ok(guard) => guard,
415
0
                Err(err) => {
416
0
                    poisoned = true;
417
0
                    err.into_inner()
418
                }
419
            };
420
421
            // Store the guard into the shard.
422
16
            unsafe {
423
16
                let guard: RwLockWriteGuard<'_, ()> = guard;
424
16
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
425
16
                let dest: *mut _ = shard.write_guard.get();
426
16
                *dest = Some(guard);
427
16
            }
428
        }
429
430
2
        if poisoned {
431
0
            Err(PoisonError::new(ShardedLockWriteGuard {
432
0
                lock: self,
433
0
                _marker: PhantomData,
434
0
            }))
435
        } else {
436
2
            Ok(ShardedLockWriteGuard {
437
2
                lock: self,
438
2
                _marker: PhantomData,
439
2
            })
440
        }
441
2
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<isize>>::write
Line
Count
Source
408
1
    pub fn write(&self) -> LockResult<ShardedLockWriteGuard<'_, T>> {
409
1
        let mut poisoned = false;
410
411
        // Write-lock each shard in succession.
412
8
        for shard in 
self.shards1
.iter() {
413
8
            let guard = match shard.lock.write() {
414
8
                Ok(guard) => guard,
415
0
                Err(err) => {
416
0
                    poisoned = true;
417
0
                    err.into_inner()
418
                }
419
            };
420
421
            // Store the guard into the shard.
422
8
            unsafe {
423
8
                let guard: RwLockWriteGuard<'_, ()> = guard;
424
8
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
425
8
                let dest: *mut _ = shard.write_guard.get();
426
8
                *dest = Some(guard);
427
8
            }
428
        }
429
430
1
        if poisoned {
431
0
            Err(PoisonError::new(ShardedLockWriteGuard {
432
0
                lock: self,
433
0
                _marker: PhantomData,
434
0
            }))
435
        } else {
436
1
            Ok(ShardedLockWriteGuard {
437
1
                lock: self,
438
1
                _marker: PhantomData,
439
1
            })
440
        }
441
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLock<()>>::write
Line
Count
Source
408
959
    pub fn write(&self) -> LockResult<ShardedLockWriteGuard<'_, T>> {
409
959
        let mut poisoned = false;
410
411
        // Write-lock each shard in succession.
412
7.65k
        for shard in 
self.shards959
.iter() {
413
7.65k
            let guard = match shard.lock.write() {
414
7.65k
                Ok(guard) => guard,
415
0
                Err(err) => {
416
0
                    poisoned = true;
417
0
                    err.into_inner()
418
                }
419
            };
420
421
            // Store the guard into the shard.
422
7.65k
            unsafe {
423
7.65k
                let guard: RwLockWriteGuard<'_, ()> = guard;
424
7.65k
                let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
425
7.65k
                let dest: *mut _ = shard.write_guard.get();
426
7.65k
                *dest = Some(guard);
427
7.65k
            }
428
        }
429
430
959
        if poisoned {
431
0
            Err(PoisonError::new(ShardedLockWriteGuard {
432
0
                lock: self,
433
0
                _marker: PhantomData,
434
0
            }))
435
        } else {
436
959
            Ok(ShardedLockWriteGuard {
437
959
                lock: self,
438
959
                _marker: PhantomData,
439
959
            })
440
        }
441
959
    }
442
}
443
444
impl<T: ?Sized + fmt::Debug> fmt::Debug for ShardedLock<T> {
445
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
446
0
        match self.try_read() {
447
0
            Ok(guard) => f
448
0
                .debug_struct("ShardedLock")
449
0
                .field("data", &&*guard)
450
0
                .finish(),
451
0
            Err(TryLockError::Poisoned(err)) => f
452
0
                .debug_struct("ShardedLock")
453
0
                .field("data", &&**err.get_ref())
454
0
                .finish(),
455
            Err(TryLockError::WouldBlock) => {
456
                struct LockedPlaceholder;
457
                impl fmt::Debug for LockedPlaceholder {
458
0
                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
459
0
                        f.write_str("<locked>")
460
0
                    }
461
                }
462
0
                f.debug_struct("ShardedLock")
463
0
                    .field("data", &LockedPlaceholder)
464
0
                    .finish()
465
            }
466
        }
467
0
    }
468
}
469
470
impl<T: Default> Default for ShardedLock<T> {
471
0
    fn default() -> ShardedLock<T> {
472
0
        ShardedLock::new(Default::default())
473
0
    }
474
}
475
476
impl<T> From<T> for ShardedLock<T> {
477
0
    fn from(t: T) -> Self {
478
0
        ShardedLock::new(t)
479
0
    }
480
}
481
482
/// A guard used to release the shared read access of a [`ShardedLock`] when dropped.
483
pub struct ShardedLockReadGuard<'a, T: ?Sized> {
484
    lock: &'a ShardedLock<T>,
485
    _guard: RwLockReadGuard<'a, ()>,
486
    _marker: PhantomData<RwLockReadGuard<'a, T>>,
487
}
488
489
unsafe impl<T: ?Sized + Sync> Sync for ShardedLockReadGuard<'_, T> {}
490
491
impl<T: ?Sized> Deref for ShardedLockReadGuard<'_, T> {
492
    type Target = T;
493
494
9
    fn deref(&self) -> &T {
495
9
        unsafe { &*self.lock.value.get() }
496
9
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockReadGuard<i32> as core::ops::deref::Deref>::deref
Line
Count
Source
494
7
    fn deref(&self) -> &T {
495
7
        unsafe { &*self.lock.value.get() }
496
7
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockReadGuard<[i32]> as core::ops::deref::Deref>::deref
Line
Count
Source
494
1
    fn deref(&self) -> &T {
495
1
        unsafe { &*self.lock.value.get() }
496
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockReadGuard<isize> as core::ops::deref::Deref>::deref
Line
Count
Source
494
1
    fn deref(&self) -> &T {
495
1
        unsafe { &*self.lock.value.get() }
496
1
    }
497
}
498
499
impl<T: fmt::Debug> fmt::Debug for ShardedLockReadGuard<'_, T> {
500
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
501
0
        f.debug_struct("ShardedLockReadGuard")
502
0
            .field("lock", &self.lock)
503
0
            .finish()
504
0
    }
505
}
506
507
impl<T: ?Sized + fmt::Display> fmt::Display for ShardedLockReadGuard<'_, T> {
508
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
509
0
        (**self).fmt(f)
510
0
    }
511
}
512
513
/// A guard used to release the exclusive write access of a [`ShardedLock`] when dropped.
514
pub struct ShardedLockWriteGuard<'a, T: ?Sized> {
515
    lock: &'a ShardedLock<T>,
516
    _marker: PhantomData<RwLockWriteGuard<'a, T>>,
517
}
518
519
unsafe impl<T: ?Sized + Sync> Sync for ShardedLockWriteGuard<'_, T> {}
520
521
impl<T: ?Sized> Drop for ShardedLockWriteGuard<'_, T> {
522
968
    fn drop(&mut self) {
523
        // Unlock the shards in reverse order of locking.
524
7.74k
        for shard in 
self.lock.shards.iter()968
.rev() {
525
7.74k
            unsafe {
526
7.74k
                let dest: *mut _ = shard.write_guard.get();
527
7.74k
                let guard = mem::replace(&mut *dest, None);
528
7.74k
                drop(guard);
529
7.74k
            }
530
        }
531
968
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<[i32]> as core::ops::drop::Drop>::drop
Line
Count
Source
522
1
    fn drop(&mut self) {
523
        // Unlock the shards in reverse order of locking.
524
8
        for shard in 
self.lock.shards.iter()1
.rev() {
525
8
            unsafe {
526
8
                let dest: *mut _ = shard.write_guard.get();
527
8
                let guard = mem::replace(&mut *dest, None);
528
8
                drop(guard);
529
8
            }
530
        }
531
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<i32> as core::ops::drop::Drop>::drop
Line
Count
Source
522
5
    fn drop(&mut self) {
523
        // Unlock the shards in reverse order of locking.
524
40
        for shard in 
self.lock.shards.iter()5
.rev() {
525
40
            unsafe {
526
40
                let dest: *mut _ = shard.write_guard.get();
527
40
                let guard = mem::replace(&mut *dest, None);
528
40
                drop(guard);
529
40
            }
530
        }
531
5
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<()> as core::ops::drop::Drop>::drop
Line
Count
Source
522
959
    fn drop(&mut self) {
523
        // Unlock the shards in reverse order of locking.
524
7.67k
        for shard in 
self.lock.shards.iter()959
.rev() {
525
7.67k
            unsafe {
526
7.67k
                let dest: *mut _ = shard.write_guard.get();
527
7.67k
                let guard = mem::replace(&mut *dest, None);
528
7.67k
                drop(guard);
529
7.67k
            }
530
        }
531
959
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<isize> as core::ops::drop::Drop>::drop
Line
Count
Source
522
1
    fn drop(&mut self) {
523
        // Unlock the shards in reverse order of locking.
524
8
        for shard in 
self.lock.shards.iter()1
.rev() {
525
8
            unsafe {
526
8
                let dest: *mut _ = shard.write_guard.get();
527
8
                let guard = mem::replace(&mut *dest, None);
528
8
                drop(guard);
529
8
            }
530
        }
531
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<sharded_lock::NonCopy> as core::ops::drop::Drop>::drop
Line
Count
Source
522
2
    fn drop(&mut self) {
523
        // Unlock the shards in reverse order of locking.
524
16
        for shard in 
self.lock.shards.iter()2
.rev() {
525
16
            unsafe {
526
16
                let dest: *mut _ = shard.write_guard.get();
527
16
                let guard = mem::replace(&mut *dest, None);
528
16
                drop(guard);
529
16
            }
530
        }
531
2
    }
532
}
533
534
impl<T: fmt::Debug> fmt::Debug for ShardedLockWriteGuard<'_, T> {
535
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
536
0
        f.debug_struct("ShardedLockWriteGuard")
537
0
            .field("lock", &self.lock)
538
0
            .finish()
539
0
    }
540
}
541
542
impl<T: ?Sized + fmt::Display> fmt::Display for ShardedLockWriteGuard<'_, T> {
543
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
544
0
        (**self).fmt(f)
545
0
    }
546
}
547
548
impl<T: ?Sized> Deref for ShardedLockWriteGuard<'_, T> {
549
    type Target = T;
550
551
11
    fn deref(&self) -> &T {
552
11
        unsafe { &*self.lock.value.get() }
553
11
    }
554
}
555
556
impl<T: ?Sized> DerefMut for ShardedLockWriteGuard<'_, T> {
557
22
    fn deref_mut(&mut self) -> &mut T {
558
22
        unsafe { &mut *self.lock.value.get() }
559
22
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<i32> as core::ops::deref::DerefMut>::deref_mut
Line
Count
Source
557
20
    fn deref_mut(&mut self) -> &mut T {
558
20
        unsafe { &mut *self.lock.value.get() }
559
20
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<isize> as core::ops::deref::DerefMut>::deref_mut
Line
Count
Source
557
1
    fn deref_mut(&mut self) -> &mut T {
558
1
        unsafe { &mut *self.lock.value.get() }
559
1
    }
<crossbeam_utils::sync::sharded_lock::ShardedLockWriteGuard<[i32]> as core::ops::deref::DerefMut>::deref_mut
Line
Count
Source
557
1
    fn deref_mut(&mut self) -> &mut T {
558
1
        unsafe { &mut *self.lock.value.get() }
559
1
    }
560
}
561
562
/// Returns a `usize` that identifies the current thread.
563
///
564
/// Each thread is associated with an 'index'. While there are no particular guarantees, indices
565
/// usually tend to be consecutive numbers between 0 and the number of running threads.
566
///
567
/// Since this function accesses TLS, `None` might be returned if the current thread's TLS is
568
/// tearing down.
569
#[inline]
570
8.91k
fn current_index() -> Option<usize> {
571
8.91k
    REGISTRATION.try_with(|reg| 
reg.index8.88k
).ok()
572
8.91k
}
Unexecuted instantiation: <<crossbeam_utils::sync::sharded_lock::ShardedLock<_> as core::fmt::Debug>::fmt::LockedPlaceholder as core::fmt::Debug>::fmt
573
574
/// The global registry keeping track of registered threads and indices.
575
struct ThreadIndices {
576
    /// Mapping from `ThreadId` to thread index.
577
    mapping: HashMap<ThreadId, usize>,
578
579
    /// A list of free indices.
580
    free_list: Vec<usize>,
581
582
    /// The next index to allocate if the free list is empty.
583
    next_index: usize,
584
}
585
586
lazy_static! {
587
    static ref THREAD_INDICES: Mutex<ThreadIndices> = Mutex::new(ThreadIndices {
588
        mapping: HashMap::new(),
589
        free_list: Vec::new(),
590
        next_index: 0,
591
    });
592
}
593
594
/// A registration of a thread with an index.
595
///
596
/// When dropped, unregisters the thread and frees the reserved index.
597
struct Registration {
598
    index: usize,
599
    thread_id: ThreadId,
600
}
601
602
impl Drop for Registration {
603
31
    fn drop(&mut self) {
604
31
        let mut indices = THREAD_INDICES.lock().unwrap();
605
31
        indices.mapping.remove(&self.thread_id);
606
31
        indices.free_list.push(self.index);
607
31
    }
Unexecuted instantiation: <crossbeam_utils::sync::sharded_lock::Registration as core::ops::drop::Drop>::drop
608
}
609
610
thread_local! {
611
    static REGISTRATION: Registration = {
612
        let thread_id = thread::current().id();
613
        let mut indices = THREAD_INDICES.lock().unwrap();
614
615
        let index = match indices.free_list.pop() {
616
            Some(i) => i,
617
            None => {
618
                let i = indices.next_index;
619
                indices.next_index += 1;
620
                i
621
            }
622
        };
623
        indices.mapping.insert(thread_id, index);
624
625
        Registration {
626
            index,
627
            thread_id,
628
        }
629
    };
630
}