1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
#![allow(unsafe_code)]
//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
//! mostly in dealing with its bucket "pointers".
use super::{equivalent, get_hash, Bucket, Entry, HashValue, IndexMapCore, VacantEntry};
use core::fmt;
use core::mem::replace;
use hashbrown::raw::RawTable;
type RawBucket = hashbrown::raw::Bucket<usize>;
/// Inserts many entries into a raw table without reallocating.
///
/// ***Panics*** if there is not sufficient capacity already.
pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
assert!(indices.capacity() - indices.len() >= entries.len());
for entry in entries {
// SAFETY: we asserted that sufficient capacity exists for all entries.
unsafe {
indices.insert_no_grow(entry.hash.get(), indices.len());
}
}
}
pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
impl fmt::Debug for DebugIndices<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// SAFETY: we're not letting any of the buckets escape this function
let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) };
f.debug_list().entries(indices).finish()
}
}
impl<K, V> IndexMapCore<K, V> {
/// Sweep the whole table to erase indices start..end
pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
// SAFETY: we're not letting any of the buckets escape this function
unsafe {
let offset = end - start;
for bucket in self.indices.iter() {
let i = bucket.as_mut();
if *i >= end {
*i -= offset;
} else if *i >= start {
self.indices.erase(bucket);
}
}
}
}
/// Search for a key in the table and return `Ok(entry_index)` if found.
/// Otherwise, insert the key and return `Err(new_index)`.
///
/// Note that hashbrown may resize the table to reserve space for insertion,
/// even before checking if it's already present, so this is somewhat biased
/// towards new items.
pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result<usize, usize>
where
K: Eq,
{
let hash = hash.get();
let eq = equivalent(key, &self.entries);
let hasher = get_hash(&self.entries);
// SAFETY: We're not mutating between find and read/insert.
unsafe {
match self.indices.find_or_find_insert_slot(hash, eq, hasher) {
Ok(raw_bucket) => Ok(*raw_bucket.as_ref()),
Err(slot) => {
let index = self.indices.len();
self.indices.insert_in_slot(hash, slot, index);
Err(index)
}
}
}
}
pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V>
where
K: Eq,
{
let eq = equivalent(&key, &self.entries);
match self.indices.find(hash.get(), eq) {
// SAFETY: The entry is created with a live raw bucket, at the same time
// we have a &mut reference to the map, so it can not be modified further.
Some(raw_bucket) => Entry::Occupied(OccupiedEntry {
map: self,
raw_bucket,
key,
}),
None => Entry::Vacant(VacantEntry {
map: self,
hash,
key,
}),
}
}
pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
// SAFETY: we're not letting any of the buckets escape this function,
// only the item references that are appropriately bound to `&mut self`.
unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
}
/// Return the raw bucket for the given index
fn find_index(&self, index: usize) -> RawBucket {
// We'll get a "nice" bounds-check from indexing `self.entries`,
// and then we expect to find it in the table as well.
let hash = self.entries[index].hash.get();
self.indices
.find(hash, move |&i| i == index)
.expect("index not found")
}
pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
// SAFETY: Can't take two `get_mut` references from one table, so we
// must use raw buckets to do the swap. This is still safe because we
// are locally sure they won't dangle, and we write them individually.
unsafe {
let raw_bucket_a = self.find_index(a);
let raw_bucket_b = self.find_index(b);
*raw_bucket_a.as_mut() = b;
*raw_bucket_b.as_mut() = a;
}
self.entries.swap(a, b);
}
}
/// A view into an occupied entry in a `IndexMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
// SAFETY: The lifetime of the map reference also constrains the raw bucket,
// which is essentially a raw pointer into the map indices.
pub struct OccupiedEntry<'a, K, V> {
map: &'a mut IndexMapCore<K, V>,
raw_bucket: RawBucket,
key: K,
}
// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
// SAFETY: `&self` only accesses the bucket to read it.
unsafe impl<K: Sync, V: Sync> Sync for OccupiedEntry<'_, K, V> {}
// The parent module also adds methods that don't threaten the unsafe encapsulation.
impl<'a, K, V> OccupiedEntry<'a, K, V> {
/// Gets a reference to the entry's key in the map.
///
/// Note that this is not the key that was used to find the entry. There may be an observable
/// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
/// extra fields or the memory address of an allocation.
pub fn key(&self) -> &K {
&self.map.entries[self.index()].key
}
/// Gets a reference to the entry's value in the map.
pub fn get(&self) -> &V {
&self.map.entries[self.index()].value
}
/// Gets a mutable reference to the entry's value in the map.
///
/// If you need a reference which may outlive the destruction of the
/// `Entry` value, see `into_mut`.
pub fn get_mut(&mut self) -> &mut V {
let index = self.index();
&mut self.map.entries[index].value
}
/// Put the new key in the occupied entry's key slot
pub(crate) fn replace_key(self) -> K {
let index = self.index();
let old_key = &mut self.map.entries[index].key;
replace(old_key, self.key)
}
/// Return the index of the key-value pair
#[inline]
pub fn index(&self) -> usize {
// SAFETY: we have &mut map keep keeping the bucket stable
unsafe { *self.raw_bucket.as_ref() }
}
/// Converts into a mutable reference to the entry's value in the map,
/// with a lifetime bound to the map itself.
pub fn into_mut(self) -> &'a mut V {
let index = self.index();
&mut self.map.entries[index].value
}
/// Remove and return the key, value pair stored in the map for this entry
///
/// Like `Vec::swap_remove`, the pair is removed by swapping it with the
/// last element of the map and popping it off. **This perturbs
/// the position of what used to be the last element!**
///
/// Computes in **O(1)** time (average).
pub fn swap_remove_entry(self) -> (K, V) {
// SAFETY: This is safe because it can only happen once (self is consumed)
// and map.indices have not been modified since entry construction
let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
self.map.swap_remove_finish(index)
}
/// Remove and return the key, value pair stored in the map for this entry
///
/// Like `Vec::remove`, the pair is removed by shifting all of the
/// elements that follow it, preserving their relative order.
/// **This perturbs the index of all of those elements!**
///
/// Computes in **O(n)** time (average).
pub fn shift_remove_entry(self) -> (K, V) {
// SAFETY: This is safe because it can only happen once (self is consumed)
// and map.indices have not been modified since entry construction
let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
self.map.shift_remove_finish(index)
}
}