use offset_of! for ptr offsets

This commit is contained in:
soruh 2023-08-03 02:09:14 +02:00
parent d92eda3ea4
commit 53441e014e
3 changed files with 88 additions and 42 deletions

View File

@ -7,7 +7,8 @@ edition = "2021"
[dependencies]
memmap = "0.7.0"
zerocopy = "0.7.0-alpha.5"
memoffset = "0.9.0"
zerocopy = { version = "0.7.0-alpha.5", features = ["alloc"] }
[dev-dependencies]
rand = "0.8.5"

View File

@ -1,5 +1,6 @@
use std::mem::size_of;
use memoffset::offset_of;
use zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned};
use crate::{Db, FilePointer, FileRange, PagePointer, RawFilePointer, PAGE_SIZE, U16, U32, U64};
@ -31,14 +32,24 @@ impl SlabKind {
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
#[repr(C)]
pub struct AllocatorState {
pub general: RawFilePointer,
pub general: FilePointer<FreeListBlock>,
pub slabs: SlabListPointer,
}
impl FilePointer<AllocatorState> {
pub fn general_ptr(self) -> FilePointer<FilePointer<FreeListBlock>> {
field_ptr!(self, AllocatorState, general)
}
pub fn slabs_ptr(self) -> FilePointer<SlabListPointer> {
field_ptr!(self, AllocatorState, slabs)
}
}
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
#[repr(transparent)]
pub struct GeneralPurposeAllocator {
pub head_ptr: FilePointer<FreeListBlock>,
pub head_ptr: FilePointer<FilePointer<FreeListBlock>>,
}
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
@ -46,36 +57,37 @@ pub struct GeneralPurposeAllocator {
pub struct FreeListBlock {
next: FilePointer<FreeListBlock>,
size: u8,
full_size: U64,
}
impl FilePointer<FreeListBlock> {
pub fn next_ptr(self) -> FilePointer<FilePointer<FreeListBlock>> {
FilePointer::new(self.into_raw())
FilePointer::new(self.into_raw() + offset_of!(FreeListBlock, next) as u64)
}
pub fn size_start_ptr(self) -> FilePointer<u8> {
FilePointer::new(self.into_raw() + size_of::<FilePointer<FreeListBlock>>() as u64)
pub fn size_ptr(self) -> FilePointer<u8> {
FilePointer::new(self.into_raw() + offset_of!(FreeListBlock, size) as u64)
}
pub fn size_end_ptr(self) -> FilePointer<U64> {
FilePointer::new(self.into_raw() + size_of::<FreeListBlock>() as u64)
pub fn full_size_ptr(self) -> FilePointer<U64> {
FilePointer::new(self.into_raw() + offset_of!(FreeListBlock, full_size) as u64)
}
}
impl GeneralPurposeAllocator {
const SIZE_MASK: u8 = 0b1000_0000;
const MIN_ALLOCATION_SIZE: u64 = size_of::<FreeListBlock>() as u64;
const MIN_ALLOCATION_SIZE: u64 = size_of::<FreeListBlock>() as u64 - size_of::<U64>() as u64;
pub fn size<R>(db: &Db<R>, head: FilePointer<FreeListBlock>) -> u64 {
// println!("get size({head:?})");
let first_byte: u8 = unsafe { db.read(head.size_start_ptr()) };
let first_byte: u8 = unsafe { db.read(head.size_ptr()) };
let size = if first_byte & Self::SIZE_MASK == 0 {
// small size (can fit in 7bits)
first_byte as u64
} else {
// large size
unsafe { db.read::<U64>(head.size_end_ptr()) }.get()
unsafe { db.read(head.full_size_ptr()) }.get()
};
Self::MIN_ALLOCATION_SIZE + size
@ -87,11 +99,11 @@ impl GeneralPurposeAllocator {
if size <= (u8::MAX & !Self::SIZE_MASK) as u64 {
// small size (can fit in 7bits)
debug_assert_eq!(size as u8 & Self::SIZE_MASK, 0);
unsafe { db.write(head.size_start_ptr(), size as u8) };
unsafe { db.write(head.size_ptr(), size as u8) };
} else {
unsafe {
db.write(head.size_start_ptr(), Self::SIZE_MASK);
db.write::<U64>(head.size_end_ptr(), size.into());
db.write(head.size_ptr(), Self::SIZE_MASK);
db.write(head.full_size_ptr(), size.into());
}
}
}
@ -99,13 +111,13 @@ impl GeneralPurposeAllocator {
fn clear<R>(db: &mut Db<R>, ptr: FilePointer<FreeListBlock>) -> RawFilePointer {
unsafe {
db.write(ptr.next_ptr(), FilePointer::null());
let first_byte: u8 = db.read(ptr.size_start_ptr());
let first_byte: u8 = db.read(ptr.size_ptr());
// clear first size byte
db.write(ptr.size_start_ptr(), 0);
db.write(ptr.size_ptr(), 0);
if first_byte & Self::SIZE_MASK != 0 {
// larger block. clear full size field
db.write(ptr.size_end_ptr(), 0.into());
db.write(ptr.full_size_ptr(), 0.into());
}
}
@ -140,7 +152,11 @@ impl GeneralPurposeAllocator {
// we need space to store the free list entry
let needed_size = expected_size.max(Self::MIN_ALLOCATION_SIZE);
let head = self.head_ptr;
// while this pointet doesn't technically point to a `FreeListBlock` (it points to the head pointer)
// as long as it's at most in the `prev` position it will never be allocated and only ever be
// written to to set the pointer to the first element. We use `empty_list` to make sure we never
// write into it incorrectly.
let head = self.head_ptr.cast::<FreeListBlock>();
// if the first element is replaced update the head pointer
let mut prevprev = FilePointer::<FreeListBlock>::null();
@ -152,7 +168,7 @@ impl GeneralPurposeAllocator {
while !next.is_null() && !Self::can_allocate_into(needed_size, Self::size(db, next)) {
prevprev = prev;
prev = next;
next = unsafe { db.read(next) }.next;
next = unsafe { db.read(next.next_ptr()) };
}
// dbg!(next, Self::size(db, next));
@ -248,7 +264,8 @@ impl GeneralPurposeAllocator {
let mut size = range.len().max(Self::MIN_ALLOCATION_SIZE);
let mut start = FilePointer::<FreeListBlock>::new(range.start);
let head = self.head_ptr;
// see `allocate` for reasoning why this is okay
let head = self.head_ptr.cast::<FreeListBlock>();
let mut prevprev = FilePointer::null();
let mut prev = head;

View File

@ -10,6 +10,22 @@ use std::{
sync::Arc,
};
macro_rules! field_ptr {
($base_ptr: expr, $type: ty, $field: ident) => {{
let base: FilePointer<$type> = $base_ptr;
let res = FilePointer::new(base.into_raw() + offset_of!($type, $field) as u64);
if false {
let ptr: Box<$type> = <$type as ::zerocopy::FromZeroes>::new_box_zeroed();
let mut addr = ::core::ptr::addr_of!(ptr.$field);
addr = res.typed_null_ptr();
}
res
}};
}
mod allocator;
mod atomic_arc;
mod mapped;
@ -18,6 +34,7 @@ mod transaction;
use allocator::{AllocatorState, GeneralPurposeAllocator, SlabListPointer, SlabPointer};
use atomic_arc::AtomicArc;
use memmap::{Mmap, MmapMut};
use memoffset::offset_of;
use transaction::TransactionHandle;
use zerocopy::{AsBytes, FromBytes, FromZeroes, Ref, Unaligned, LE};
@ -107,6 +124,15 @@ impl<T> FilePointer<T> {
pub fn into_raw(self) -> RawFilePointer {
self.inner
}
pub fn cast<U>(self) -> FilePointer<U> {
FilePointer::new(self.into_raw())
}
#[doc(hidden)]
pub fn typed_null_ptr(self) -> *const T {
std::ptr::null()
}
}
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned, Hash, PartialEq, Eq)]
@ -226,7 +252,7 @@ impl Default for Header {
magic: *b"cool db format 1",
root: RawFilePointer::null(),
allocator_state: AllocatorState {
general: RawFilePointer::null(),
general: FilePointer::null(),
slabs: SlabListPointer(FilePointer::new(
RawFilePointer::null() + size_of::<Header>() as u64,
)),
@ -274,11 +300,31 @@ struct SnapshotAndFreeList<R> {
to_free: Vec<FileRange>,
}
impl FilePointer<Header> {
fn root_ptr<R>(self) -> FilePointer<FilePointer<R>> {
field_ptr!(self, Header, root).cast::<FilePointer<R>>()
}
fn allocator_state_ptr(self) -> FilePointer<AllocatorState> {
field_ptr!(self, Header, allocator_state)
}
}
impl<R> Db<R> {
fn root(&self) -> FilePointer<R> {
FilePointer::new(self.header().root)
}
fn header_ptr() -> FilePointer<Header> {
FilePointer::null()
}
fn general_purpose_allocator() -> GeneralPurposeAllocator {
GeneralPurposeAllocator {
head_ptr: Self::header_ptr().allocator_state_ptr().general_ptr(),
}
}
fn header(&self) -> &Header {
unsafe { self.reference_range_unchecked(Self::header_ptr().range()) }
}
@ -337,24 +383,6 @@ impl<R> Db<R> {
self.snapshots = snapshots;
}
fn header_ptr() -> FilePointer<Header> {
FilePointer::new(RawFilePointer(0.into()))
}
fn root_ptr() -> FilePointer<FilePointer<R>> {
FilePointer::new(RawFilePointer(16.into()))
}
fn allocator_state_ptr() -> RawFilePointer {
RawFilePointer((size_of::<Header>() as u64 - size_of::<AllocatorState>() as u64).into())
}
fn general_purpose_allocator() -> GeneralPurposeAllocator {
GeneralPurposeAllocator {
head_ptr: FilePointer::new(Self::allocator_state_ptr()),
}
}
pub fn create_reader(&self) -> Reader<R> {
Reader {
state: self.state.clone(),
@ -370,12 +398,12 @@ impl<R> Db<R> {
// update root pointer and immediately flush
unsafe {
self.write(Self::root_ptr(), new_root);
self.write(Self::header_ptr().root_ptr(), new_root);
}
self.map
.flush_range(
Self::root_ptr().into_raw().0.get() as usize,
Self::header_ptr().root_ptr::<R>().into_raw().0.get() as usize,
size_of::<RawFilePointer>(),
)
.unwrap();
@ -626,7 +654,7 @@ mod tests {
fn fragmentation<R>(db: &mut Db<R>, print: bool) -> usize {
let allocator = Db::<()>::general_purpose_allocator();
let mut next = unsafe { db.read(allocator.head_ptr.next_ptr()) };
let mut next = unsafe { db.read(allocator.head_ptr) };
let mut n = 0;
while !next.is_null() {