hook up slabs and general allocator to Db methods

This commit is contained in:
soruh 2023-07-28 02:18:24 +02:00
parent 1525b3f466
commit 43f85073ef

View File

@ -247,6 +247,7 @@ impl Db {
unsafe { self.modify_range_unchecked(Self::header_ptr().range(size_of::<Header>() as u64)) }
}
// NOTE: only allowed before any data of `size` has been allocated
fn add_slab(&mut self, size: u32) -> SlabPointer {
let allocator_state = self.header().allocator_state;
let slab = allocator_state.slabs.add_slab(self, size);
@ -254,7 +255,8 @@ impl Db {
slab
}
pub fn ensure_slab(&mut self, size: u32) -> SlabPointer {
// NOTE: only allowed before any data of `size` has been allocated
fn ensure_slab(&mut self, size: u32) -> SlabPointer {
self.slabs
.get(&size)
.copied()
@ -392,11 +394,10 @@ impl Db {
PagePointer::nth((len / PAGE_SIZE).try_into().unwrap())
}
pub fn new(file: File) -> Self {
let len = file.metadata().unwrap().len();
if len == 0 {
pub fn create(file: File, slabs: &[u32]) -> Self {
// clear file
file.set_len(0).unwrap();
file.set_len(PAGE_SIZE).unwrap();
}
let map = unsafe { MmapMut::map_mut(&file) }.unwrap();
@ -410,22 +411,9 @@ impl Db {
slabs: BTreeMap::new(),
};
let allocator_state = db.header().allocator_state;
let mut slabs = BTreeMap::new();
for slab in allocator_state.slabs.iter(&db) {
let size = slab.size(&db);
slabs.insert(size, slab);
}
db.slabs = slabs;
unsafe {
if len == 0 {
*db.header_mut() = Header::default();
db.init_allocator();
}
db.init_allocator(slabs);
}
let _ = db.state.swap(Arc::new(Snapshot {
@ -436,17 +424,65 @@ impl Db {
db
}
unsafe fn init_allocator(&mut self) {
pub fn open(file: File) -> Self {
let map = unsafe { MmapMut::map_mut(&file) }.unwrap();
let mut db = Self {
state: Arc::new(AtomicArc::new(Arc::new(Snapshot {
root: FilePointer::null(),
map: unsafe { Mmap::map(&file).unwrap() },
}))),
file,
map,
slabs: BTreeMap::new(),
};
let _ = db.state.swap(Arc::new(Snapshot {
root: db.header().root,
map: unsafe { Mmap::map(&db.file).unwrap() },
}));
db
}
unsafe fn init_allocator(&mut self, slabs: &[u32]) {
let allocator_state = self.header().allocator_state;
allocator_state.slabs.init(
self,
(PAGE_SIZE - size_of::<Header>() as u64).try_into().unwrap(),
);
for &size in slabs {
self.ensure_slab(size);
}
}
fn end_of_file(&self) -> FilePointer {
FilePointer::null() + self.file.metadata().unwrap().len()
}
fn get_slab(&self, size: u64) -> Option<SlabPointer> {
u32::try_from(size)
.ok()
.and_then(|size| self.slabs.get(&size))
.copied()
}
pub fn allocate(&mut self, size: u64) -> FileRange {
if let Some(slab) = self.get_slab(size) {
slab.alloc(self)
} else {
Db::general_purpose_allocator().allocate(self, size)
}
}
pub fn free(&mut self, range: FileRange) {
if let Some(slab) = self.get_slab(range.len()) {
slab.free(self, range)
} else {
Db::general_purpose_allocator().free(self, range)
}
}
}
#[cfg(test)]
@ -454,8 +490,6 @@ mod tests {
use std::io::Write;
use std::process::Stdio;
use rand::{thread_rng, Rng};
use super::*;
#[derive(Debug, Clone, Copy)]
enum Operation {
@ -464,7 +498,7 @@ mod tests {
}
fn causes_fragmentation(sequence: &[Operation], print: bool) -> bool {
let mut db = Db::new(tempfile::tempfile().unwrap());
let mut db = Db::create(tempfile::tempfile().unwrap(), &[]);
let allocator = Db::general_purpose_allocator();
let mut ranges = Vec::new();
@ -618,14 +652,12 @@ mod tests {
#[test]
fn it_works() {
let mut db = Db::new(tempfile::tempfile().unwrap());
let mut db = Db::create(tempfile::tempfile().unwrap(), &[4, 16]);
fn alloc_and_free_many<const N: usize>(db: &mut Db, n: u64) {
let slab = db.add_slab(N as u32);
let mut ranges = Vec::new();
for i in 1..n {
let range = slab.alloc(db);
let range = db.allocate(N as u64);
let data = [0; N].map(|_| i as u8);
@ -637,7 +669,7 @@ mod tests {
}
for range in ranges.into_iter().rev() {
slab.free(db, range);
db.free(range);
}
}