fix wrong manual pointer calculations

This commit is contained in:
soruh 2023-08-03 01:08:05 +02:00
parent ac8bca3868
commit d92eda3ea4
2 changed files with 57 additions and 44 deletions

View File

@ -48,33 +48,34 @@ pub struct FreeListBlock {
size: u8,
}
impl FilePointer<FreeListBlock> {
pub fn next_ptr(self) -> FilePointer<FilePointer<FreeListBlock>> {
FilePointer::new(self.into_raw())
}
pub fn size_start_ptr(self) -> FilePointer<u8> {
FilePointer::new(self.into_raw() + size_of::<FilePointer<FreeListBlock>>() as u64)
}
pub fn size_end_ptr(self) -> FilePointer<U64> {
FilePointer::new(self.into_raw() + size_of::<FreeListBlock>() as u64)
}
}
impl GeneralPurposeAllocator {
const SIZE_MASK: u8 = 0b1000_0000;
const MIN_ALLOCATION_SIZE: u64 = size_of::<RawFilePointer>() as u64 + 1;
pub(crate) fn next_ptr(
ptr: FilePointer<FreeListBlock>,
) -> FilePointer<FilePointer<FreeListBlock>> {
FilePointer::new(ptr.into_raw())
}
fn first_byte_ptr(ptr: FilePointer<FreeListBlock>) -> FilePointer<u8> {
FilePointer::new(ptr.into_raw() + size_of::<RawFilePointer>() as u64)
}
fn size_ptr(ptr: FilePointer<FreeListBlock>) -> FilePointer<U64> {
FilePointer::new(ptr.into_raw() + size_of::<FreeListBlock>() as u64)
}
const MIN_ALLOCATION_SIZE: u64 = size_of::<FreeListBlock>() as u64;
pub fn size<R>(db: &Db<R>, head: FilePointer<FreeListBlock>) -> u64 {
let first_byte: u8 = unsafe { db.read(Self::first_byte_ptr(head)) };
// println!("get size({head:?})");
let first_byte: u8 = unsafe { db.read(head.size_start_ptr()) };
let size = if first_byte & Self::SIZE_MASK == 0 {
// small size (can fit in 7bits)
first_byte as u64
} else {
// large size
unsafe { db.read::<U64>(Self::size_ptr(head)) }.get()
unsafe { db.read::<U64>(head.size_end_ptr()) }.get()
};
Self::MIN_ALLOCATION_SIZE + size
@ -86,25 +87,25 @@ impl GeneralPurposeAllocator {
if size <= (u8::MAX & !Self::SIZE_MASK) as u64 {
// small size (can fit in 7bits)
debug_assert_eq!(size as u8 & Self::SIZE_MASK, 0);
unsafe { db.write(Self::first_byte_ptr(head), size as u8) };
unsafe { db.write(head.size_start_ptr(), size as u8) };
} else {
unsafe {
db.write(Self::first_byte_ptr(head), Self::SIZE_MASK);
db.write::<U64>(Self::size_ptr(head), size.into());
db.write(head.size_start_ptr(), Self::SIZE_MASK);
db.write::<U64>(head.size_end_ptr(), size.into());
}
}
}
fn clear<R>(db: &mut Db<R>, ptr: FilePointer<FreeListBlock>) -> RawFilePointer {
unsafe {
db.write(Self::next_ptr(ptr), FilePointer::null());
let first_byte: u8 = db.read(Self::first_byte_ptr(ptr));
db.write(ptr.next_ptr(), FilePointer::null());
let first_byte: u8 = db.read(ptr.size_start_ptr());
// clear first size byte
db.write(Self::first_byte_ptr(ptr), 0u8);
db.write(ptr.size_start_ptr(), 0);
if first_byte & Self::SIZE_MASK != 0 {
// larger block. clear full size field
db.write(Self::size_ptr(ptr), U64::from(0));
db.write(ptr.size_end_ptr(), 0.into());
}
}
@ -144,7 +145,7 @@ impl GeneralPurposeAllocator {
// if the first element is replaced update the head pointer
let mut prevprev = FilePointer::<FreeListBlock>::null();
let mut prev = head;
let mut next: FilePointer<FreeListBlock> = unsafe { db.read(Self::next_ptr(head)) };
let mut next: FilePointer<FreeListBlock> = unsafe { db.read(head.next_ptr()) };
let empty_list = next.is_null();
@ -201,16 +202,16 @@ impl GeneralPurposeAllocator {
Self::set_size(db, remainder, extra_space);
// prev must be the current tail of the free list and the newly allocated space, being at the end of the file
// must be the last element of the free list to keep it sorted.
unsafe { db.write(Self::next_ptr(prev), remainder) };
unsafe { db.write(prev.next_ptr(), remainder) };
} else {
unsafe { db.write(Self::next_ptr(prev), FilePointer::<FreeListBlock>::null()) };
unsafe { db.write(prev.next_ptr(), FilePointer::<FreeListBlock>::null()) };
}
start
} else {
let start = next;
let nextnext = unsafe { db.read(Self::next_ptr(start)) };
let nextnext = unsafe { db.read(start.next_ptr()) };
let extra_space = Self::size(db, start) - needed_size;
@ -224,13 +225,13 @@ impl GeneralPurposeAllocator {
Self::set_size(db, remainder, extra_space);
unsafe {
db.write(Self::next_ptr(prev), remainder);
db.write(Self::next_ptr(remainder), nextnext);
db.write(prev.next_ptr(), remainder);
db.write(remainder.next_ptr(), nextnext);
}
// println!("{:x?}", unsafe { db.read::<[u8; 9 + 8]>(remainder) });
} else {
unsafe { db.write(Self::next_ptr(prev), nextnext) };
unsafe { db.write(prev.next_ptr(), nextnext) };
}
start
@ -247,25 +248,23 @@ impl GeneralPurposeAllocator {
let mut size = range.len().max(Self::MIN_ALLOCATION_SIZE);
let mut start = FilePointer::<FreeListBlock>::new(range.start);
let range = ();
let head = self.head_ptr;
let mut prevprev = FilePointer::null();
let mut prev = head;
let mut next = unsafe { db.read(Self::next_ptr(head)) };
let mut next = unsafe { db.read(head.next_ptr()) };
while !next.is_null() && next < start {
prevprev = prev;
prev = next;
next = unsafe { db.read(Self::next_ptr(next)) };
next = unsafe { db.read(next.next_ptr()) };
}
if start.into_raw() + size == next.into_raw() {
// we can merge with the next range
let nextlen = Self::size(db, next);
let nextnext = unsafe { db.read(Self::next_ptr(next)) };
let nextnext = unsafe { db.read(next.next_ptr()) };
// println!("merging with next range {:?}", next.range(nextlen));
@ -290,8 +289,8 @@ impl GeneralPurposeAllocator {
}
unsafe {
db.write(Self::next_ptr(prev), start);
db.write(Self::next_ptr(start), next);
db.write(prev.next_ptr(), start);
db.write(start.next_ptr(), next);
Self::set_size(db, start, size)
}
}

View File

@ -3,6 +3,7 @@ use std::{
collections::{BTreeMap, VecDeque},
fmt::Debug,
fs::File,
hash::Hash,
marker::PhantomData,
mem::size_of,
ops::Range,
@ -26,7 +27,7 @@ type U64 = zerocopy::byteorder::U64<LE>;
type U32 = zerocopy::byteorder::U32<LE>;
type U16 = zerocopy::byteorder::U16<LE>;
#[derive(FromBytes, FromZeroes, AsBytes, Unaligned, Hash)]
#[derive(FromBytes, FromZeroes, AsBytes, Unaligned)]
#[repr(transparent)]
pub struct FilePointer<T> {
inner: RawFilePointer,
@ -70,6 +71,12 @@ impl<T> PartialEq for FilePointer<T> {
impl<T> Eq for FilePointer<T> {}
impl<T> Hash for FilePointer<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.inner.hash(state);
}
}
impl<T> FilePointer<T> {
fn from_range(range: FileRange) -> Self {
assert_eq!(range.len(), size_of::<T>() as u64);
@ -335,11 +342,11 @@ impl<R> Db<R> {
}
fn root_ptr() -> FilePointer<FilePointer<R>> {
FilePointer::new(RawFilePointer((size_of::<Header>() as u64).into()))
FilePointer::new(RawFilePointer(16.into()))
}
fn allocator_state_ptr() -> RawFilePointer {
RawFilePointer((size_of::<Header>() as u64 + size_of::<RawFilePointer>() as u64).into())
RawFilePointer((size_of::<Header>() as u64 - size_of::<AllocatorState>() as u64).into())
}
fn general_purpose_allocator() -> GeneralPurposeAllocator {
@ -556,6 +563,10 @@ impl<R> Db<R> {
// TODO: scrap the PAGE-wise allocation and make slab allocations allocations of the general allocator.
pub fn allocate(&mut self, size: u64) -> FileRange {
// println!("allocate({size})");
if size == 0 {
return RawFilePointer::null().range(0);
}
if let Some(slab) = self.get_slab(size) {
slab.alloc(self)
} else {
@ -564,6 +575,9 @@ impl<R> Db<R> {
}
pub fn free(&mut self, range: FileRange) {
if range.len() == 0 {
return;
}
if let Some(slab) = self.get_slab(range.len()) {
slab.free(self, range)
} else {
@ -612,7 +626,7 @@ mod tests {
fn fragmentation<R>(db: &mut Db<R>, print: bool) -> usize {
let allocator = Db::<()>::general_purpose_allocator();
let mut next = unsafe { db.read(GeneralPurposeAllocator::next_ptr(allocator.head_ptr)) };
let mut next = unsafe { db.read(allocator.head_ptr.next_ptr()) };
let mut n = 0;
while !next.is_null() {
@ -620,7 +634,7 @@ mod tests {
if print {
println!("\x1b[34m[{n}]\x1b[m {:?}", next.into_raw().range(size));
}
next = unsafe { db.read(GeneralPurposeAllocator::next_ptr(next)) };
next = unsafe { db.read(next.next_ptr()) };
n += 1;
}
@ -739,7 +753,7 @@ mod tests {
}
#[test]
fn it_works() {
fn allocator() {
let mut db = Db::<()>::create(tempfile::tempfile().unwrap(), &[4, 7, 16]);
let mut ranges = Vec::new();