From 53441e014e3473dce82784fe169d2e7190684118 Mon Sep 17 00:00:00 2001 From: soruh Date: Thu, 3 Aug 2023 02:09:14 +0200 Subject: [PATCH] use offset_of! for ptr offsets --- Cargo.toml | 3 +- src/allocator.rs | 55 +++++++++++++++++++++++------------- src/lib.rs | 72 +++++++++++++++++++++++++++++++++--------------- 3 files changed, 88 insertions(+), 42 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 29e62ce..8dc5504 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,8 @@ edition = "2021" [dependencies] memmap = "0.7.0" -zerocopy = "0.7.0-alpha.5" +memoffset = "0.9.0" +zerocopy = { version = "0.7.0-alpha.5", features = ["alloc"] } [dev-dependencies] rand = "0.8.5" diff --git a/src/allocator.rs b/src/allocator.rs index 34f28d8..0bb8586 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -1,5 +1,6 @@ use std::mem::size_of; +use memoffset::offset_of; use zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned}; use crate::{Db, FilePointer, FileRange, PagePointer, RawFilePointer, PAGE_SIZE, U16, U32, U64}; @@ -31,14 +32,24 @@ impl SlabKind { #[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)] #[repr(C)] pub struct AllocatorState { - pub general: RawFilePointer, + pub general: FilePointer, pub slabs: SlabListPointer, } +impl FilePointer { + pub fn general_ptr(self) -> FilePointer> { + field_ptr!(self, AllocatorState, general) + } + + pub fn slabs_ptr(self) -> FilePointer { + field_ptr!(self, AllocatorState, slabs) + } +} + #[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)] #[repr(transparent)] pub struct GeneralPurposeAllocator { - pub head_ptr: FilePointer, + pub head_ptr: FilePointer>, } #[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)] @@ -46,36 +57,37 @@ pub struct GeneralPurposeAllocator { pub struct FreeListBlock { next: FilePointer, size: u8, + full_size: U64, } impl FilePointer { pub fn next_ptr(self) -> FilePointer> { - FilePointer::new(self.into_raw()) + FilePointer::new(self.into_raw() + offset_of!(FreeListBlock, next) as u64) } - pub fn size_start_ptr(self) -> FilePointer { - FilePointer::new(self.into_raw() + size_of::>() as u64) + pub fn size_ptr(self) -> FilePointer { + FilePointer::new(self.into_raw() + offset_of!(FreeListBlock, size) as u64) } - pub fn size_end_ptr(self) -> FilePointer { - FilePointer::new(self.into_raw() + size_of::() as u64) + pub fn full_size_ptr(self) -> FilePointer { + FilePointer::new(self.into_raw() + offset_of!(FreeListBlock, full_size) as u64) } } impl GeneralPurposeAllocator { const SIZE_MASK: u8 = 0b1000_0000; - const MIN_ALLOCATION_SIZE: u64 = size_of::() as u64; + const MIN_ALLOCATION_SIZE: u64 = size_of::() as u64 - size_of::() as u64; pub fn size(db: &Db, head: FilePointer) -> u64 { // println!("get size({head:?})"); - let first_byte: u8 = unsafe { db.read(head.size_start_ptr()) }; + let first_byte: u8 = unsafe { db.read(head.size_ptr()) }; let size = if first_byte & Self::SIZE_MASK == 0 { // small size (can fit in 7bits) first_byte as u64 } else { // large size - unsafe { db.read::(head.size_end_ptr()) }.get() + unsafe { db.read(head.full_size_ptr()) }.get() }; Self::MIN_ALLOCATION_SIZE + size @@ -87,11 +99,11 @@ impl GeneralPurposeAllocator { if size <= (u8::MAX & !Self::SIZE_MASK) as u64 { // small size (can fit in 7bits) debug_assert_eq!(size as u8 & Self::SIZE_MASK, 0); - unsafe { db.write(head.size_start_ptr(), size as u8) }; + unsafe { db.write(head.size_ptr(), size as u8) }; } else { unsafe { - db.write(head.size_start_ptr(), Self::SIZE_MASK); - db.write::(head.size_end_ptr(), size.into()); + db.write(head.size_ptr(), Self::SIZE_MASK); + db.write(head.full_size_ptr(), size.into()); } } } @@ -99,13 +111,13 @@ impl GeneralPurposeAllocator { fn clear(db: &mut Db, ptr: FilePointer) -> RawFilePointer { unsafe { db.write(ptr.next_ptr(), FilePointer::null()); - let first_byte: u8 = db.read(ptr.size_start_ptr()); + let first_byte: u8 = db.read(ptr.size_ptr()); // clear first size byte - db.write(ptr.size_start_ptr(), 0); + db.write(ptr.size_ptr(), 0); if first_byte & Self::SIZE_MASK != 0 { // larger block. clear full size field - db.write(ptr.size_end_ptr(), 0.into()); + db.write(ptr.full_size_ptr(), 0.into()); } } @@ -140,7 +152,11 @@ impl GeneralPurposeAllocator { // we need space to store the free list entry let needed_size = expected_size.max(Self::MIN_ALLOCATION_SIZE); - let head = self.head_ptr; + // while this pointet doesn't technically point to a `FreeListBlock` (it points to the head pointer) + // as long as it's at most in the `prev` position it will never be allocated and only ever be + // written to to set the pointer to the first element. We use `empty_list` to make sure we never + // write into it incorrectly. + let head = self.head_ptr.cast::(); // if the first element is replaced update the head pointer let mut prevprev = FilePointer::::null(); @@ -152,7 +168,7 @@ impl GeneralPurposeAllocator { while !next.is_null() && !Self::can_allocate_into(needed_size, Self::size(db, next)) { prevprev = prev; prev = next; - next = unsafe { db.read(next) }.next; + next = unsafe { db.read(next.next_ptr()) }; } // dbg!(next, Self::size(db, next)); @@ -248,7 +264,8 @@ impl GeneralPurposeAllocator { let mut size = range.len().max(Self::MIN_ALLOCATION_SIZE); let mut start = FilePointer::::new(range.start); - let head = self.head_ptr; + // see `allocate` for reasoning why this is okay + let head = self.head_ptr.cast::(); let mut prevprev = FilePointer::null(); let mut prev = head; diff --git a/src/lib.rs b/src/lib.rs index 612eada..6645588 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,6 +10,22 @@ use std::{ sync::Arc, }; +macro_rules! field_ptr { + ($base_ptr: expr, $type: ty, $field: ident) => {{ + let base: FilePointer<$type> = $base_ptr; + + let res = FilePointer::new(base.into_raw() + offset_of!($type, $field) as u64); + + if false { + let ptr: Box<$type> = <$type as ::zerocopy::FromZeroes>::new_box_zeroed(); + let mut addr = ::core::ptr::addr_of!(ptr.$field); + addr = res.typed_null_ptr(); + } + + res + }}; +} + mod allocator; mod atomic_arc; mod mapped; @@ -18,6 +34,7 @@ mod transaction; use allocator::{AllocatorState, GeneralPurposeAllocator, SlabListPointer, SlabPointer}; use atomic_arc::AtomicArc; use memmap::{Mmap, MmapMut}; +use memoffset::offset_of; use transaction::TransactionHandle; use zerocopy::{AsBytes, FromBytes, FromZeroes, Ref, Unaligned, LE}; @@ -107,6 +124,15 @@ impl FilePointer { pub fn into_raw(self) -> RawFilePointer { self.inner } + + pub fn cast(self) -> FilePointer { + FilePointer::new(self.into_raw()) + } + + #[doc(hidden)] + pub fn typed_null_ptr(self) -> *const T { + std::ptr::null() + } } #[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned, Hash, PartialEq, Eq)] @@ -226,7 +252,7 @@ impl Default for Header { magic: *b"cool db format 1", root: RawFilePointer::null(), allocator_state: AllocatorState { - general: RawFilePointer::null(), + general: FilePointer::null(), slabs: SlabListPointer(FilePointer::new( RawFilePointer::null() + size_of::
() as u64, )), @@ -274,11 +300,31 @@ struct SnapshotAndFreeList { to_free: Vec, } +impl FilePointer
{ + fn root_ptr(self) -> FilePointer> { + field_ptr!(self, Header, root).cast::>() + } + + fn allocator_state_ptr(self) -> FilePointer { + field_ptr!(self, Header, allocator_state) + } +} + impl Db { fn root(&self) -> FilePointer { FilePointer::new(self.header().root) } + fn header_ptr() -> FilePointer
{ + FilePointer::null() + } + + fn general_purpose_allocator() -> GeneralPurposeAllocator { + GeneralPurposeAllocator { + head_ptr: Self::header_ptr().allocator_state_ptr().general_ptr(), + } + } + fn header(&self) -> &Header { unsafe { self.reference_range_unchecked(Self::header_ptr().range()) } } @@ -337,24 +383,6 @@ impl Db { self.snapshots = snapshots; } - fn header_ptr() -> FilePointer
{ - FilePointer::new(RawFilePointer(0.into())) - } - - fn root_ptr() -> FilePointer> { - FilePointer::new(RawFilePointer(16.into())) - } - - fn allocator_state_ptr() -> RawFilePointer { - RawFilePointer((size_of::
() as u64 - size_of::() as u64).into()) - } - - fn general_purpose_allocator() -> GeneralPurposeAllocator { - GeneralPurposeAllocator { - head_ptr: FilePointer::new(Self::allocator_state_ptr()), - } - } - pub fn create_reader(&self) -> Reader { Reader { state: self.state.clone(), @@ -370,12 +398,12 @@ impl Db { // update root pointer and immediately flush unsafe { - self.write(Self::root_ptr(), new_root); + self.write(Self::header_ptr().root_ptr(), new_root); } self.map .flush_range( - Self::root_ptr().into_raw().0.get() as usize, + Self::header_ptr().root_ptr::().into_raw().0.get() as usize, size_of::(), ) .unwrap(); @@ -626,7 +654,7 @@ mod tests { fn fragmentation(db: &mut Db, print: bool) -> usize { let allocator = Db::<()>::general_purpose_allocator(); - let mut next = unsafe { db.read(allocator.head_ptr.next_ptr()) }; + let mut next = unsafe { db.read(allocator.head_ptr) }; let mut n = 0; while !next.is_null() {