add smaller slabs

This commit is contained in:
soruh 2023-07-25 17:10:32 +02:00
parent 2f3c46136b
commit 42eda592ff
2 changed files with 290 additions and 81 deletions

View File

@ -2,10 +2,11 @@ use std::mem::size_of;
use zerocopy::{AsBytes, FromBytes, Unaligned};
use crate::{Db, FilePointer, FileRange, Header, PAGE_SIZE, U32};
use crate::{Db, FilePointer, FileRange, PagePointer, PAGE_SIZE, U16, U32};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum SlabKind {
SplitFreeList,
SingleBytes,
RelativeFreeList,
AbsoluteFreeList,
}
@ -13,8 +14,8 @@ enum SlabKind {
impl SlabKind {
fn for_size(size: u32) -> Self {
if size == 1 {
Self::SplitFreeList
} else if size < 8 {
Self::SingleBytes
} else if size < size_of::<FilePointer>() as u32 {
Self::RelativeFreeList
} else if (size as u64) <= PAGE_SIZE {
Self::AbsoluteFreeList
@ -60,6 +61,28 @@ pub struct SlabListHeader {
#[repr(transparent)]
pub struct SlabListPointer(pub FilePointer);
pub struct SlabListIterator<'db> {
position: u32,
db: &'db Db,
ptr: SlabListPointer,
}
impl<'db> Iterator for SlabListIterator<'db> {
type Item = SlabPointer;
fn next(&mut self) -> Option<Self::Item> {
if let Some(res) = self.ptr.get(self.db, self.position) {
self.position += 1;
Some(res)
} else if let Some(next) = self.ptr.next(self.db) {
self.ptr = next;
self.next()
} else {
None
}
}
}
impl SlabListHeader {
pub fn capacity(&self) -> u32 {
(self.size.get() - size_of::<SlabListHeader>() as u32) / size_of::<Slab>() as u32
@ -67,49 +90,68 @@ impl SlabListHeader {
}
impl SlabListPointer {
pub fn set_next(&self, db: &mut Db, next: SlabListPointer) {
db.write(self.0, next);
pub fn next(self, db: &Db) -> Option<SlabListPointer> {
let ptr: SlabListPointer = self.read_header(db).next;
(!ptr.0.is_null()).then_some(ptr)
}
pub fn set_len(&self, db: &mut Db, len: u32) {
db.write(self.0 + size_of::<SlabListPointer>() as u64, U32::from(len));
fn read_header(self, db: &Db) -> SlabListHeader {
unsafe { db.read(self.0) }
}
pub fn init(&self, db: &mut Db, size: u32) {
db.write(
self.0,
SlabListHeader {
next: SlabListPointer(FilePointer::null()),
size: size.into(),
len: 0.into(),
},
);
fn modify_header(self, db: &mut Db) -> &mut SlabListHeader {
unsafe { db.modify(self.0) }
}
pub fn ptr(&self, db: &Db, i: u32) -> FilePointer {
let this: SlabListHeader = db.read(self.0);
assert!(i < this.len.get());
self.0 + size_of::<SlabListHeader>() as u64 + i as u64 * size_of::<Slab>() as u64
pub fn set_next(self, db: &mut Db, next: SlabListPointer) {
self.modify_header(db).next = next;
}
pub fn write(&self, db: &mut Db, i: u32, value: Slab) {
let ptr = self.ptr(db, i);
db.write(ptr, value);
pub fn set_len(self, db: &mut Db, len: u32) {
self.modify_header(db).len = U32::from(len);
}
pub fn get(&self, db: &Db, i: u32) -> SlabPointer {
let ptr = self.ptr(db, i);
SlabPointer(ptr)
pub fn init(self, db: &mut Db, size: u32) {
*self.modify_header(db) = SlabListHeader {
next: SlabListPointer(FilePointer::null()),
size: size.into(),
len: 0.into(),
};
}
pub fn add(&self, db: &mut Db, slab_size: u32) -> SlabPointer {
let this: SlabListHeader = db.read(self.0);
pub fn ptr(self, db: &Db, i: u32) -> Option<FilePointer> {
let this = self.read_header(db);
(i < this.len.get()).then(|| {
self.0 + size_of::<SlabListHeader>() as u64 + i as u64 * size_of::<Slab>() as u64
})
}
pub fn write(self, db: &mut Db, i: u32, value: Slab) {
let ptr = self.ptr(db, i).unwrap();
unsafe { db.write(ptr, value) };
}
pub fn get(self, db: &Db, i: u32) -> Option<SlabPointer> {
self.ptr(db, i).map(SlabPointer)
}
pub fn iter(self, db: &Db) -> SlabListIterator {
SlabListIterator {
position: 0,
db,
ptr: self,
}
}
pub fn add(self, db: &mut Db, slab_size: u32) -> SlabPointer {
let this = self.read_header(db);
let capacity = this.capacity();
let SlabListHeader { mut next, len, .. } = this;
if len.get() >= capacity {
if next.0 == FilePointer::null() {
next = SlabListPointer(db.add_pages(1));
if next.0.is_null() {
next = SlabListPointer(db.add_pages(1).start());
next.init(db, PAGE_SIZE as u32);
self.set_next(db, next);
}
@ -128,7 +170,7 @@ impl SlabListPointer {
},
);
SlabPointer(self.ptr(db, len))
SlabPointer(self.ptr(db, len).unwrap())
}
}
@ -139,51 +181,169 @@ pub struct Slab {
size: U32,
}
#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned)]
#[repr(C)]
struct RelativeFreeListHeader {
next_page: PagePointer,
first: U16,
}
impl RelativeFreeListHeader {
const DATA_SIZE: u32 = PAGE_SIZE as u32 - size_of::<Self>() as u32;
fn capacity(size: u32) -> u32 {
debug_assert_eq!(SlabKind::for_size(size), SlabKind::RelativeFreeList);
Self::DATA_SIZE / size
}
}
#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned)]
#[repr(transparent)]
pub struct SlabPointer(FilePointer);
impl SlabPointer {
pub fn read(&self, db: &Db) -> Slab {
db.read(self.0)
fn read(&self, db: &Db) -> Slab {
unsafe { db.read(self.0) }
}
pub fn get(&self, db: &mut Db) -> FileRange {
let Slab { head, size } = self.read(db);
fn modify<'db>(&self, db: &'db mut Db) -> &'db mut Slab {
unsafe { db.modify(self.0) }
}
pub fn alloc(&self, db: &mut Db) -> FileRange {
let Slab { mut head, size } = self.read(db);
if head.is_null() {
head = self.allocate_page(db);
}
let size = size.get();
match SlabKind::for_size(size) {
SlabKind::SplitFreeList => todo!(),
SlabKind::RelativeFreeList => todo!(),
SlabKind::AbsoluteFreeList => {
let mut next = head;
SlabKind::SingleBytes => todo!(),
SlabKind::RelativeFreeList => {
let (page, offset) = head.page_offset();
assert_eq!(offset, 0);
if next == FilePointer::null() {
next = self.allocate_page(db);
let RelativeFreeListHeader { first, .. } = unsafe { db.read(page.start()) };
// the page should never be full if its in the free list
assert_ne!(first.get(), 0);
let ptr = FilePointer::from_page_offset(page, first.get());
let next: U16 = unsafe { db.read(ptr) };
let header = unsafe { db.modify::<RelativeFreeListHeader>(page.start()) };
header.first = next;
if next.get() == 0 {
// page is full
let next_page = header.next_page;
header.next_page = PagePointer::null();
self.modify(db).head = next_page.start();
}
let new_next = db.read(next);
ptr
}
SlabKind::AbsoluteFreeList => {
let next = unsafe { db.read(head) };
self.set_head(db, next);
head
}
}
.range(size as u64)
}
self.set_next(db, new_next);
pub fn free(&self, db: &mut Db, range: FileRange) {
let Slab { head, size } = self.read(db);
next.range(size as u64)
assert_eq!(range.len(), size.get() as u64);
let size = size.get();
match SlabKind::for_size(size) {
SlabKind::SingleBytes => todo!(),
SlabKind::RelativeFreeList => {
let (page, offset) = range.start.page_offset();
let RelativeFreeListHeader { first, .. } = unsafe { db.read(page.start()) };
// update next pointer of new element in free list
unsafe { db.write(range.start, first) };
let header = unsafe { db.modify::<RelativeFreeListHeader>(page.start()) };
// point to new element
header.first = offset.into();
if first.get() == 0 {
// page was full
let (head_page, offset) = head.page_offset();
assert_eq!(offset, 0);
header.next_page = head_page;
self.modify(db).head = page.start();
}
}
SlabKind::AbsoluteFreeList => {
unsafe { db.write(range.start, head) };
self.set_head(db, range.start);
}
}
}
pub fn set_next(&self, db: &mut Db, next: FilePointer) {
db.write(self.0, next);
pub fn set_head(&self, db: &mut Db, next: FilePointer) {
self.modify(db).head = next;
}
pub fn allocate_page(&self, db: &mut Db) -> FilePointer {
let Slab { head, size } = self.read(db);
println!("allocate_page({size})");
let size = size.get();
match SlabKind::for_size(size) {
SlabKind::SplitFreeList => todo!(),
SlabKind::RelativeFreeList => todo!(),
SlabKind::SingleBytes => todo!(),
SlabKind::RelativeFreeList => {
let page = db.add_pages(1);
let (next_page, offset) = head.page_offset();
assert_eq!(offset, 0);
let capacity = RelativeFreeListHeader::capacity(size);
let data_offset = size_of::<Self>() as u16;
unsafe {
db.write(
page.start(),
RelativeFreeListHeader {
next_page,
first: data_offset.into(),
},
)
};
let mut offset = 0;
for i in (0..capacity).rev() {
let next = data_offset + (i * size) as u16;
unsafe {
db.write(FilePointer::from_page_offset(page, next), U16::from(offset))
};
offset = next;
}
self.set_head(db, page.start());
page.start()
}
SlabKind::AbsoluteFreeList => {
let n = PAGE_SIZE / size as u64;
@ -191,12 +351,12 @@ impl SlabPointer {
let mut next = head;
for i in (0..n).rev() {
let current = page + i * size as u64;
db.write(current, next);
let current = page.start() + i * size as u64;
unsafe { db.write(current, next) };
next = current;
}
self.set_next(db, next);
self.set_head(db, next);
next
}

View File

@ -12,18 +12,51 @@ const PAGE_SIZE: u64 = 4096;
type U64 = zerocopy::byteorder::U64<LE>;
type U32 = zerocopy::byteorder::U32<LE>;
type U16 = zerocopy::byteorder::U16<LE>;
#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug, Hash, PartialEq, Eq)]
#[repr(transparent)]
pub struct FilePointer(U64);
impl FilePointer {
fn page(n: u64) -> Self {
Self((n * PAGE_SIZE).into())
fn page(self) -> PagePointer {
PagePointer(u32::try_from(self.0.get() / PAGE_SIZE).unwrap().into())
}
fn page_offset(self) -> (PagePointer, u16) {
(self.page(), (self.0.get() % PAGE_SIZE) as u16)
}
fn from_page_offset(page: PagePointer, offset: u16) -> Self {
debug_assert!(
offset < PAGE_SIZE as u16,
"offset 0x{offset:x} out for page bounds (0..0x{PAGE_SIZE:x})"
);
page.start() + offset as u64
}
fn null() -> Self {
Self(U64::ZERO)
}
fn is_null(self) -> bool {
self == Self::null()
}
}
#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug, PartialEq, Eq)]
#[repr(transparent)]
pub struct PagePointer(U32);
impl PagePointer {
fn start(self) -> FilePointer {
FilePointer((self.0.get() as u64 * PAGE_SIZE).into())
}
fn range(self) -> FileRange {
self.start().range(PAGE_SIZE)
}
fn nth(n: u32) -> Self {
Self(n.into())
}
fn null() -> Self {
Self::nth(0)
}
}
#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug, PartialEq, Eq)]
@ -75,7 +108,7 @@ impl Default for Header {
root: FilePointer::null(),
allocator_state: AllocatorState {
general: FreeList::empty(),
slabs: SlabListPointer(FilePointer::page(0) + size_of::<Header>() as u64),
slabs: SlabListPointer(FilePointer::null() + size_of::<Header>() as u64),
},
}
}
@ -203,7 +236,10 @@ impl Db {
self.map.flush().unwrap();
// update root pointer and immediately flush
self.write(Self::root_ptr(), new_root);
unsafe {
self.write(Self::root_ptr(), new_root);
}
self.map
.flush_range(Self::root_ptr().0.get() as usize, size_of::<FilePointer>())
.unwrap();
@ -215,41 +251,41 @@ impl Db {
}))
}
fn read<T: FromBytes>(&self, at: FilePointer) -> T {
unsafe fn read<T: FromBytes>(&self, at: FilePointer) -> T {
self.read_range(at.range(size_of::<T>() as u64))
}
fn read_range<T: FromBytes>(&self, range: FileRange) -> T {
unsafe fn read_range<T: FromBytes>(&self, range: FileRange) -> T {
LayoutVerified::<_, T>::new(&self.map[range.as_range()])
.unwrap()
.read()
}
fn write<T: AsBytes>(&mut self, at: FilePointer, data: T) {
unsafe fn write<T: AsBytes>(&mut self, at: FilePointer, data: T) {
self.write_range(at.range(size_of::<T>() as u64), data)
}
fn write_range<T: AsBytes>(&mut self, range: FileRange, data: T) {
unsafe fn write_range<T: AsBytes>(&mut self, range: FileRange, data: T) {
LayoutVerified::<_, T>::new(&mut self.map[range.as_range()])
.unwrap()
.write(data)
}
fn modify<T: FromBytes + AsBytes>(&mut self, at: FilePointer) -> &mut T {
unsafe fn modify<T: FromBytes + AsBytes>(&mut self, at: FilePointer) -> &mut T {
self.modify_range(at.range(size_of::<T>() as u64))
}
fn modify_range<T: FromBytes + AsBytes>(&mut self, range: FileRange) -> &mut T {
unsafe fn modify_range<T: FromBytes + AsBytes>(&mut self, range: FileRange) -> &mut T {
LayoutVerified::<_, T>::new(&mut self.map[range.as_range()])
.unwrap()
.into_mut()
}
fn reference<T: FromBytes + AsBytes>(&self, at: FilePointer) -> &T {
unsafe fn reference<T: FromBytes + AsBytes>(&self, at: FilePointer) -> &T {
self.reference_range(at.range(size_of::<T>() as u64))
}
fn reference_range<T: FromBytes + AsBytes>(&self, range: FileRange) -> &T {
unsafe fn reference_range<T: FromBytes + AsBytes>(&self, range: FileRange) -> &T {
LayoutVerified::<_, T>::new(&self.map[range.as_range()])
.unwrap()
.into_ref()
@ -264,13 +300,11 @@ impl Db {
unsafe { Mmap::map(&self.file) }.unwrap()
}
fn add_pages(&mut self, n: u64) -> FilePointer {
fn add_pages(&mut self, n: u64) -> PagePointer {
let len = self.file.metadata().unwrap().len();
self.file.set_len(len + PAGE_SIZE * n).unwrap();
self.remap();
FilePointer::null() + len
PagePointer::nth((len / PAGE_SIZE).try_into().unwrap())
}
pub fn new(file: File) -> Self {
@ -291,11 +325,13 @@ impl Db {
header: Header::default(),
};
if len == 0 {
db.init_allocator();
db.write(Self::header_ptr(), db.header);
} else {
db.header = db.read(Self::header_ptr());
unsafe {
if len == 0 {
db.init_allocator();
db.write(Self::header_ptr(), db.header);
} else {
db.header = db.read(Self::header_ptr());
}
}
let _ = db.state.swap(Arc::new(Snapshot {
@ -306,7 +342,7 @@ impl Db {
db
}
fn init_allocator(&mut self) {
unsafe fn init_allocator(&mut self) {
let allocator_state = self.header.allocator_state;
allocator_state.slabs.init(
self,
@ -326,18 +362,31 @@ mod tests {
fn it_works() {
let mut db = Db::new(tempfile::tempfile().unwrap());
let slab = db.add_slab(16);
fn alloc_and_free_many<const N: usize>(db: &mut Db, n: u64) {
let slab = db.add_slab(N as u32);
for i in 1..520 {
let range = slab.get(&mut db);
let start = range.start.0.get();
dbg!(start);
let mut ranges = Vec::new();
for i in 1..n {
let range = slab.alloc(db);
let data = [0; 16].map(|_| i as u8);
let data = [0; N].map(|_| i as u8);
db.write_range(range, data);
unsafe {
db.write_range(range, data);
}
ranges.push(range);
}
for range in ranges.into_iter().rev() {
slab.free(db, range);
}
}
alloc_and_free_many::<1>(&mut db, 3 * PAGE_SIZE);
// alloc_and_free_many::<4>(&mut db, PAGE_SIZE / 4 * 3);
// alloc_and_free_many::<16>(&mut db, PAGE_SIZE / 16 * 3);
let mut child = std::process::Command::new("hexdump")
.arg("-C")
.stdin(Stdio::piped())