1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
//! A SCAllocator that can allocate fixed size objects.
use crate::*;
/// A genius(?) const min()
///
/// # What this does
/// * create an array of the two elements you want to choose between
/// * create an arbitrary boolean expression
/// * cast said expresison to a usize
/// * use that value to index into the array created above
///
/// # Source
/// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
#[cfg(feature = "unstable")]
const fn cmin(a: usize, b: usize) -> usize {
[a, b][(a > b) as usize]
}
/// The boring variant of min (not const).
#[cfg(not(feature = "unstable"))]
fn cmin(a: usize, b: usize) -> usize {
core::cmp::min(a, b)
}
/// A slab allocator allocates elements of a fixed size.
///
/// It maintains three internal lists of `MappedPages8k`
/// from which it can allocate memory.
///
/// * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
/// has 0 allocations in them.
/// * `slabs`: A list of pages partially allocated and still have room for more.
/// * `full_slabs`: A list of pages that are completely allocated.
///
/// On allocation we allocate memory from `slabs`, however if the list is empty
/// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
/// error. If a page becomes full after the allocation we move it from `slabs` to
/// `full_slabs`.
///
/// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
/// or from `slabs` to `empty_slabs` after we deallocated an object.
pub struct SCAllocator {
/// Maximum possible allocation size for this `SCAllocator`.
pub(crate) size: usize,
/// Keeps track of succeeded allocations.
pub(crate) allocation_count: usize,
/// Keeps track of `MappedPages8k` in the heap
pub(crate) page_count: usize,
/// max objects per page
pub(crate) obj_per_page: usize,
/// Keeps track of the empty pages in the heap.
pub(crate) empty_count: usize,
/// List to hold empty MappedPages (nothing allocated in these).
pub(crate) empty_slabs: Vec<MappedPages8k>,
/// List to hold partially used MappedPages (some objects allocated but pages are not full).
pub(crate) slabs: Vec<MappedPages8k>,
/// List to hold full MappedPages (everything allocated in these don't need to search them).
pub(crate) full_slabs: Vec<MappedPages8k>,
}
/// Creates an instance of a scallocator, we do this in a macro because we
/// re-use the code in const and non-const functions
macro_rules! new_sc_allocator {
($size:expr) => {
SCAllocator {
size: $size,
allocation_count: 0,
page_count: 0,
obj_per_page: cmin((MappedPages8k::SIZE - MappedPages8k::METADATA_SIZE) / $size, 8 * 64),
empty_count: 0,
empty_slabs: Vec::with_capacity(Self::MAX_PAGE_LIST_SIZE),
slabs: Vec::with_capacity(Self::MAX_PAGE_LIST_SIZE),
full_slabs: Vec::with_capacity(Self::MAX_PAGE_LIST_SIZE)
}
};
}
impl SCAllocator {
const _REBALANCE_COUNT: usize = 10_000;
/// The maximum number of allocable pages the SCAllocator can hold.
pub const MAX_PAGE_LIST_SIZE: usize = 122*10; // ~10 MiB
/// Creates a new SCAllocator and initializes the page lists to have a capacity of `MAX_PAGE_LIST_SIZE`.
/// After initialization, the length of the list won't exceed the capacity.
pub fn new(size: usize) -> SCAllocator {
new_sc_allocator!(size)
}
/// Add a page to the partial list
fn insert_partial(&mut self, new_page: MappedPages8k) {
self.slabs.push(new_page);
// Any recently used page, we move to the front of the list
if self.slabs.len() > 1 {
let mp = self.slabs.swap_remove(0);
self.slabs.push(mp);
}
}
/// Add page to empty list.
fn insert_empty(&mut self, new_page: MappedPages8k) {
self.empty_slabs.push(new_page);
self.empty_count += 1;
}
/// Add page to full list.
fn insert_full(&mut self, new_page: MappedPages8k) {
self.full_slabs.push(new_page);
// Any recently used page, we move to the front of the list
if self.full_slabs.len() > 1 {
let mp = self.full_slabs.swap_remove(0);
self.full_slabs.push(mp);
}
}
fn remove_empty(&mut self) -> Option<MappedPages8k> {
self.empty_slabs.pop().map(|mp| {self.empty_count -= 1; mp} )
}
fn remove_partial(&mut self, id: usize) -> MappedPages8k {
self.slabs.swap_remove(id)
}
fn remove_full(&mut self, id: usize) -> MappedPages8k {
self.full_slabs.swap_remove(id)
}
/// Move a page from `slabs` to `empty_slabs`.
fn move_partial_to_empty(&mut self, id: usize) {
let page = self.remove_partial(id);
self.insert_empty(page);
}
/// Move a page from `slabs` to `full_slabs`.
fn move_partial_to_full(&mut self, id: usize) {
let page = self.remove_partial(id);
self.insert_full(page);
}
/// Move a page from `full_slabs` to `slab`.
fn move_full_to_partial(&mut self, id: usize) {
let page = self.remove_full(id);
self.insert_partial(page);
}
/// Tries to allocate a block of memory with respect to the `layout`.
/// Searches within already allocated slab pages, if no suitable spot is found
/// will try to use a page from the empty page list.
///
/// # Arguments
/// * `sc_layout`: This is not the original layout but adjusted for the
/// SCAllocator size (>= original).
fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
// TODO: Do we really need to check multiple slab pages (due to alignment)
// If not we can get away with a singly-linked list and have 8 more bytes
// for the bitfield in an ObjectPage.
let mut need_to_move = false;
let mut ret_ptr = ptr::null_mut();
let mut list_id = 0;
for (id, slab_page) in self.slabs.iter_mut().enumerate() {
let page = slab_page.as_objectpage8k_mut();
let ptr = page.allocate(sc_layout);
if !ptr.is_null() {
if page.is_full() {
need_to_move = true;
list_id = id;
// trace!("move {:p} partial -> full", page);
}
self.allocation_count += 1;
ret_ptr = ptr;
break;
} else {
continue;
}
}
if need_to_move {
self.move_partial_to_full(list_id);
}
// // Periodically rebalance page-lists (since dealloc can't do it for us)
// if self.allocation_count % SCAllocator::<P>::REBALANCE_COUNT == 0 {
// self.check_page_assignments();
// }
ret_ptr
}
/// Refill the SCAllocator
///
/// # Warning
/// This should only be used to insert `MAX_PAGE_LIST_SIZE` number of pages to the heap.
/// Any more, and the heap will not be able to store them.
pub fn refill(&mut self, mut mp: MappedPages8k, heap_id: usize) -> Result<(), &'static str> {
if self.page_count >= Self::MAX_PAGE_LIST_SIZE {
error!("Page limit ({} pages) of SCAllocator has been reached!", Self::MAX_PAGE_LIST_SIZE);
return Err("Page limit of SCAllocator has been reached!");
}
let page = mp.as_objectpage8k_mut();
page.bitfield_mut().initialize(self.size, MappedPages8k::SIZE - MappedPages8k::METADATA_SIZE);
page.heap_id = heap_id;
// trace!("adding page to SCAllocator {:p}", page);
self.insert_empty(mp);
self.page_count += 1;
Ok(())
}
/// Returns an empty page from the allocator if available.
pub fn retrieve_empty_page(&mut self) -> Option<MappedPages8k> {
self.remove_empty().map(|mp| {self.page_count -= 1; mp} )
}
/// Allocates a block of memory descriped by `layout`.
///
/// Returns a pointer to a valid region of memory or an
/// Error.
///
/// The function may also move around pages between lists
/// (empty -> partial or partial -> full).
pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, &'static str> {
// trace!(
// "SCAllocator({}) is trying to allocate {:?}, {}",
// self.size,
// layout,
// MappedPages8k::SIZE - CACHE_LINE_SIZE
// );
assert!(layout.size() <= self.size);
assert!(self.size <= (MappedPages8k::SIZE - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = {
// Try to allocate from partial slabs,
// if we fail check if we have empty pages and allocate from there
let mut ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
if let Some(mut empty_page) = self.remove_empty() {
ptr = empty_page.as_objectpage8k_mut().allocate(layout);
debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
// trace!(
// "move {:p} empty -> partial",
// empty_page.start_address(),
// );
// Move empty page to partial pages
self.insert_partial(empty_page);
}
ptr
} else {
ptr
}
};
NonNull::new(ptr).ok_or("AllocationError::OutOfMemory")
}
/// Deallocates a previously allocated `ptr` described by `Layout`.
///
/// May return an error in case an invalid `layout` is provided.
/// The function may also move internal slab pages between lists partial -> empty
/// or full -> partial lists.
///
/// # Safety
/// The caller must ensure that `ptr` argument is returned from [`Self::allocate()`]
/// and `layout` argument is correct.
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), &'static str> {
assert!(layout.size() <= self.size);
assert!(self.size <= (MappedPages8k::SIZE - CACHE_LINE_SIZE));
// trace!(
// "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
// self.size,
// ptr,
// layout,
// MappedPages8k::SIZE
// );
// let page_addr = (ptr.as_ptr() as usize) & !(MappedPages8k::SIZE - 1) as usize;
let page_vaddr = VirtualAddress::new((ptr.as_ptr() as usize) & !(MappedPages8k::SIZE - 1))
.ok_or("pointer to deallocate was an invalid virtual address")?;
// Figure out which page we are on and retrieve a reference to it
let new_layout = Layout::from_size_align_unchecked(self.size, layout.align());
let (ret, slab_page_is_empty, slab_page_was_full, list_id) = {
// find slab page from partial slabs
let mut page = self.slabs.iter_mut().enumerate()
.find(|(_id,mp)| mp.start_address() == page_vaddr);
// if it was not in the partial slabs then it should be in the full slabs
if page.is_none() {
page = self.full_slabs.iter_mut().enumerate()
.find(|(_id,mp)| mp.start_address() == page_vaddr)
}
let mp = page.ok_or("could not find page to deallocate from")?;
let list_id = mp.0;
let mapped_page = mp.1;
let slab_page = mapped_page.as_objectpage8k_mut();
let slab_page_was_full = slab_page.is_full();
let ret = slab_page.deallocate(ptr, new_layout);
debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
(ret, slab_page.is_empty(self.obj_per_page), slab_page_was_full, list_id)
};
if slab_page_is_empty {
// We need to move it from self.slabs -> self.empty_slabs
// trace!("move {:p} partial -> empty", page_vaddr);
self.move_partial_to_empty(list_id);
} else if slab_page_was_full {
// We need to move it from self.full_slabs -> self.slabs
// trace!("move {:p} full -> partial", page_vaddr);
self.move_full_to_partial(list_id);
}
ret
}
}