From 2d9791db30805ded0172cc62ab27e1df902825b6 Mon Sep 17 00:00:00 2001 From: soruh Date: Fri, 28 Jul 2023 01:40:36 +0200 Subject: [PATCH] implement general allocator --- Cargo.toml | 1 + src/allocator.rs | 275 +++++++++++++++++++++++++++++++++++++++++++++-- src/lib.rs | 271 ++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 519 insertions(+), 28 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4e63b6e..dd98bc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,4 +10,5 @@ memmap = "0.7.0" zerocopy = "0.6.1" [dev-dependencies] +rand = "0.8.5" tempfile = "3.7.0" diff --git a/src/allocator.rs b/src/allocator.rs index 0d83e12..a55d37e 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -2,7 +2,7 @@ use std::mem::size_of; use zerocopy::{AsBytes, FromBytes, Unaligned}; -use crate::{Db, FilePointer, FileRange, PagePointer, PAGE_SIZE, U16, U32}; +use crate::{Db, FilePointer, FileRange, PagePointer, PAGE_SIZE, U16, U32, U64}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum SlabKind { @@ -45,11 +45,265 @@ impl FreeList { #[derive(Clone, Copy, FromBytes, AsBytes, Unaligned)] #[repr(C)] pub struct AllocatorState { - pub general: FreeList, + pub general: FilePointer, pub slabs: SlabListPointer, } #[derive(Clone, Copy, FromBytes, AsBytes, Unaligned)] +#[repr(transparent)] +pub struct GeneralPurposeAllocator { + pub head_ptr: FilePointer, +} + +impl GeneralPurposeAllocator { + const SIZE_MASK: u8 = 0b1000_0000; + const MIN_ALLOCATION_SIZE: u64 = size_of::() as u64 + 1; + + fn size_ptr(ptr: FilePointer) -> FilePointer { + ptr + size_of::() as u64 + } + + pub fn size(db: &Db, head: FilePointer) -> u64 { + let first_byte: u8 = unsafe { db.read(Self::size_ptr(head)) }; + + let size = if first_byte & Self::SIZE_MASK == 0 { + // small size (can fit in 7bits) + first_byte as u64 + } else { + // large size + unsafe { db.read::(Self::size_ptr(head) + 1) }.get() + }; + + Self::MIN_ALLOCATION_SIZE + size + } + + fn set_size(db: &mut Db, head: FilePointer, size: u64) { + assert!(size >= Self::MIN_ALLOCATION_SIZE); + let size = size - Self::MIN_ALLOCATION_SIZE; + if size <= (u8::MAX & !Self::SIZE_MASK) as u64 { + // small size (can fit in 7bits) + debug_assert_eq!(size as u8 & Self::SIZE_MASK, 0); + unsafe { db.write(Self::size_ptr(head), size as u8) }; + } else { + unsafe { + db.write(Self::size_ptr(head), Self::SIZE_MASK); + db.write::(Self::size_ptr(head) + 1, size.into()); + } + } + } + + fn clear(db: &mut Db, ptr: FilePointer) { + unsafe { + db.write(ptr, FilePointer::null()); + let first_byte: u8 = db.read(Self::size_ptr(ptr)); + + if first_byte & Self::SIZE_MASK == 0 { + db.write(Self::size_ptr(ptr), 0u8); + } else { + db.write(Self::size_ptr(ptr), U64::from(0)); + } + } + } + + fn can_allocate_into(needed_size: u64, actual_size: u64) -> bool { + use std::cmp::Ordering::*; + + match actual_size.cmp(&needed_size) { + Less => false, + Equal => true, + // leave space to insert the remaining space into the free list + Greater => actual_size >= needed_size + Self::MIN_ALLOCATION_SIZE, + } + } + + fn needed_pages(size: u64) -> u64 { + let mut n_pages = div_round_up(size, PAGE_SIZE); + let extra_space = n_pages * PAGE_SIZE - size; + + if extra_space != 0 && extra_space < Self::MIN_ALLOCATION_SIZE { + // the extra space in the allocated pages is too small to + // insert it into the free list. allocate an additional page. + n_pages += 1; + } + + n_pages + } + + pub fn allocate(self, db: &mut Db, expected_size: u64) -> FileRange { + // println!("allocate({expected_size}) {{\x1b[33m"); + // we need space to store the free list entry + let needed_size = expected_size.max(Self::MIN_ALLOCATION_SIZE); + + // dbg!(expected_size, needed_size); + + // dbg!(needed_size); + + let head = self.head_ptr; + + // if the first element is replaced update the head pointer + let mut prevprev = FilePointer::null(); + let mut prev = head; + let mut next: FilePointer = unsafe { db.read(head) }; + + let empty_list = next.is_null(); + + while !next.is_null() && !Self::can_allocate_into(needed_size, Self::size(db, next)) { + prevprev = prev; + prev = next; + next = unsafe { db.read(next) }; + } + + // dbg!(next, Self::size(db, next)); + + let start = if next.is_null() { + let (prev, start, prev_free) = if !empty_list { + let prevlen = Self::size(db, prev); + + if prev + prevlen == db.end_of_file() { + // println!("free block at end of file {prev:?}"); + + Self::clear(db, prev); + + (prevprev, prev, prevlen) + } else { + (prev, db.end_of_file(), 0) + } + } else { + (prev, db.end_of_file(), 0) + }; + + // dbg!(prev, start, prev_free); + + let still_needed = if prev_free > needed_size { + assert!(needed_size + Self::MIN_ALLOCATION_SIZE > prev_free); + needed_size + Self::MIN_ALLOCATION_SIZE - prev_free + } else { + needed_size - prev_free + }; + + let n_pages = Self::needed_pages(still_needed); + assert_ne!(n_pages, 0); + + let page_start = db.add_pages(n_pages).start(); + + // dbg!(n_pages, page_start); + + if prev_free == 0 { + assert_eq!(page_start, start); + } + + let free_space = prev_free + PAGE_SIZE * n_pages; + + let extra_space = free_space - needed_size; + if extra_space != 0 { + let remainder = start + needed_size; + Self::set_size(db, remainder, extra_space); + // prev must be the current tail of the free list and the newly allocated space, being at the end of the file + // must be the last element of the free list to keep it sorted. + unsafe { db.write(prev, remainder) }; + } else { + unsafe { db.write(prev, FilePointer::null()) }; + } + + start + } else { + let start = next; + + let nextnext = unsafe { db.read::(start) }; + + let extra_space = Self::size(db, start) - needed_size; + + // dbg!(prev, nextnext, extra_space); + + Self::clear(db, start); + + if extra_space != 0 { + let remainder = start + needed_size; + + // dbg!(remainder); + + Self::set_size(db, remainder, extra_space); + + unsafe { + db.write(prev, remainder); + db.write(remainder, nextnext); + } + + // println!("{:x?}", unsafe { db.read::<[u8; 9 + 8]>(remainder) }); + } else { + unsafe { db.write(prev, nextnext) }; + } + + start + }; + + Self::clear(db, start); + + let res = start.range(expected_size); + + // println!("\x1b[m}} -> {res:?}"); + + res + } + + pub fn free(self, db: &mut Db, mut range: FileRange) { + // println!("free({range:?})"); + + let mut size = range.len().max(Self::MIN_ALLOCATION_SIZE); + + let head = self.head_ptr; + + let mut prevprev = FilePointer::null(); + let mut prev = head; + let mut next: FilePointer = unsafe { db.read(head) }; + + while !next.is_null() && next < range.start { + prevprev = prev; + prev = next; + next = unsafe { db.read(next) }; + } + + if range.start + size == next { + // we can merge with the next range + + let nextlen = Self::size(db, next); + let nextnext = unsafe { db.read(next) }; + + // println!("merging with next range {:?}", next.range(nextlen)); + + Self::clear(db, next); + + next = nextnext; + size += nextlen; + } + + // we can't merge with the head pointer + if prev != head && prev + Self::size(db, prev) == range.start { + // we can merge with the previous range + + let prevlen = Self::size(db, prev); + // println!("merging with previous range {:?}", prev.range(prevlen)); + + Self::clear(db, prev); + + range.start = prev; + prev = prevprev; + size += prevlen; + } + + unsafe { + db.write(prev, range.start); + db.write(range.start, next); + Self::set_size(db, range.start, size) + } + } +} + +fn div_round_up(a: u64, b: u64) -> u64 { + (a + b - 1) / b +} + +#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug)] #[repr(C)] pub struct SlabListHeader { next: SlabListPointer, @@ -57,7 +311,7 @@ pub struct SlabListHeader { size: U32, } -#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned)] +#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug)] #[repr(transparent)] pub struct SlabListPointer(pub FilePointer); @@ -144,8 +398,13 @@ impl SlabListPointer { } } - pub fn add(self, db: &mut Db, slab_size: u32) -> SlabPointer { + pub fn add_slab(self, db: &mut Db, slab_size: u32) -> SlabPointer { + println!("add_slab({slab_size})"); + let this = self.read_header(db); + + dbg!(&this); + let capacity = this.capacity(); let SlabListHeader { mut next, len, .. } = this; @@ -156,7 +415,7 @@ impl SlabListPointer { self.set_next(db, next); } - return next.add(db, slab_size); + return next.add_slab(db, slab_size); } let len = len.get(); @@ -220,7 +479,7 @@ impl SlabPointer { let size = size.get(); match SlabKind::for_size(size) { - SlabKind::SingleBytes => todo!(), + SlabKind::SingleBytes => todo!("single byte slabs"), SlabKind::RelativeFreeList => { let (page, offset) = head.page_offset(); assert_eq!(offset, 0); @@ -264,7 +523,7 @@ impl SlabPointer { let size = size.get(); match SlabKind::for_size(size) { - SlabKind::SingleBytes => todo!(), + SlabKind::SingleBytes => todo!("single byte slabs"), SlabKind::RelativeFreeList => { let (page, offset) = range.start.page_offset(); @@ -308,7 +567,7 @@ impl SlabPointer { let size = size.get(); match SlabKind::for_size(size) { - SlabKind::SingleBytes => todo!(), + SlabKind::SingleBytes => todo!("single byte slabs"), SlabKind::RelativeFreeList => { let page = db.add_pages(1); diff --git a/src/lib.rs b/src/lib.rs index 0950fa4..9502e40 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,12 @@ -use std::{borrow::BorrowMut, collections::HashMap, fs::File, mem::size_of, ops::Range, sync::Arc}; +use std::{ + borrow::BorrowMut, collections::HashMap, fmt::Debug, fs::File, mem::size_of, ops::Range, + sync::Arc, +}; mod allocator; mod atomic_arc; -use allocator::{AllocatorState, FreeList, SlabListPointer, SlabPointer}; +use allocator::{AllocatorState, FreeList, GeneralPurposeAllocator, SlabListPointer, SlabPointer}; use atomic_arc::AtomicArc; use memmap::{Mmap, MmapMut}; use zerocopy::{AsBytes, FromBytes, LayoutVerified, Unaligned, LE}; @@ -14,10 +17,28 @@ type U64 = zerocopy::byteorder::U64; type U32 = zerocopy::byteorder::U32; type U16 = zerocopy::byteorder::U16; -#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug, Hash, PartialEq, Eq)] +#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Hash, PartialEq, Eq)] #[repr(transparent)] pub struct FilePointer(U64); +impl PartialOrd for FilePointer { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.get().partial_cmp(&other.0.get()) + } +} + +impl Ord for FilePointer { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.get().cmp(&other.0.get()) + } +} + +impl Debug for FilePointer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "0x{:x}", self.0.get()) + } +} + impl FilePointer { fn page(self) -> PagePointer { PagePointer(u32::try_from(self.0.get() / PAGE_SIZE).unwrap().into()) @@ -59,13 +80,19 @@ impl PagePointer { } } -#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, FromBytes, AsBytes, Unaligned, PartialEq, Eq)] #[repr(C)] pub struct FileRange { start: FilePointer, len: U64, } +impl Debug for FileRange { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}..{:?}", self.start, self.start + self.len.get()) + } +} + impl std::ops::Add for FilePointer { type Output = Self; @@ -107,7 +134,7 @@ impl Default for Header { magic: *b"cool db format 1", root: FilePointer::null(), allocator_state: AllocatorState { - general: FreeList::empty(), + general: FilePointer::null(), slabs: SlabListPointer(FilePointer::null() + size_of::
() as u64), }, } @@ -126,7 +153,6 @@ pub struct Reader { pub struct Db { file: File, map: MmapMut, - header: Header, state: Arc>, } @@ -205,9 +231,19 @@ impl<'t> TransactionHandle<'t> { } impl Db { + fn header(&self) -> &Header { + unsafe { + self.reference_range_unchecked(Self::header_ptr().range(size_of::
() as u64)) + } + } + + fn header_mut(&mut self) -> &mut Header { + unsafe { self.modify_range_unchecked(Self::header_ptr().range(size_of::
() as u64)) } + } + pub fn add_slab(&mut self, size: u32) -> SlabPointer { - let allocator_state = self.header.allocator_state; - allocator_state.slabs.add(self, size) + let allocator_state = self.header().allocator_state; + allocator_state.slabs.add_slab(self, size) } fn transaction(f: impl FnOnce(TransactionHandle)) { @@ -219,7 +255,17 @@ impl Db { } fn root_ptr() -> FilePointer { - FilePointer(16.into()) + Self::header_ptr() + 16 + } + + fn allocator_state_ptr() -> FilePointer { + Self::root_ptr() + size_of::() as u64 + } + + fn general_purpose_allocator() -> GeneralPurposeAllocator { + GeneralPurposeAllocator { + head_ptr: Self::allocator_state_ptr(), + } } pub fn create_reader(&self) -> Reader { @@ -251,41 +297,64 @@ impl Db { })) } + #[track_caller] unsafe fn read(&self, at: FilePointer) -> T { self.read_range(at.range(size_of::() as u64)) } + #[track_caller] unsafe fn read_range(&self, range: FileRange) -> T { + assert!(!range.start.is_null(), "null pointer dereference"); LayoutVerified::<_, T>::new(&self.map[range.as_range()]) .unwrap() .read() } + #[track_caller] unsafe fn write(&mut self, at: FilePointer, data: T) { self.write_range(at.range(size_of::() as u64), data) } + #[track_caller] unsafe fn write_range(&mut self, range: FileRange, data: T) { + assert!(!range.start.is_null(), "null pointer dereference"); LayoutVerified::<_, T>::new(&mut self.map[range.as_range()]) .unwrap() .write(data) } + #[track_caller] unsafe fn modify(&mut self, at: FilePointer) -> &mut T { self.modify_range(at.range(size_of::() as u64)) } + #[track_caller] unsafe fn modify_range(&mut self, range: FileRange) -> &mut T { + assert!(!range.start.is_null(), "null pointer dereference"); + self.modify_range_unchecked(range) + } + + unsafe fn modify_range_unchecked( + &mut self, + range: FileRange, + ) -> &mut T { LayoutVerified::<_, T>::new(&mut self.map[range.as_range()]) .unwrap() .into_mut() } - unsafe fn reference(&self, at: FilePointer) -> &T { + #[track_caller] + unsafe fn reference(&self, at: FilePointer) -> &T { self.reference_range(at.range(size_of::() as u64)) } - unsafe fn reference_range(&self, range: FileRange) -> &T { + #[track_caller] + unsafe fn reference_range(&self, range: FileRange) -> &T { + assert!(!range.start.is_null(), "null pointer dereference"); + self.reference_range_unchecked(range) + } + + unsafe fn reference_range_unchecked(&self, range: FileRange) -> &T { LayoutVerified::<_, T>::new(&self.map[range.as_range()]) .unwrap() .into_ref() @@ -301,6 +370,7 @@ impl Db { } fn add_pages(&mut self, n: u64) -> PagePointer { + // println!("adding {n} page{}", if n == 1 { "" } else { "s" }); let len = self.file.metadata().unwrap().len(); self.file.set_len(len + PAGE_SIZE * n).unwrap(); self.remap(); @@ -322,20 +392,17 @@ impl Db { }))), file, map, - header: Header::default(), }; unsafe { if len == 0 { + *db.header_mut() = Header::default(); db.init_allocator(); - db.write(Self::header_ptr(), db.header); - } else { - db.header = db.read(Self::header_ptr()); } } let _ = db.state.swap(Arc::new(Snapshot { - root: db.header.root, + root: db.header().root, map: unsafe { Mmap::map(&db.file).unwrap() }, })); @@ -343,12 +410,16 @@ impl Db { } unsafe fn init_allocator(&mut self) { - let allocator_state = self.header.allocator_state; + let allocator_state = self.header().allocator_state; allocator_state.slabs.init( self, (PAGE_SIZE - size_of::
() as u64).try_into().unwrap(), ); } + + fn end_of_file(&self) -> FilePointer { + FilePointer::null() + self.file.metadata().unwrap().len() + } } #[cfg(test)] @@ -356,7 +427,167 @@ mod tests { use std::io::Write; use std::process::Stdio; + use rand::{thread_rng, Rng}; + use super::*; + #[derive(Debug, Clone, Copy)] + enum Operation { + Allocate { size: u64 }, + Free { index: usize }, + } + + fn causes_fragmentation(sequence: &[Operation], print: bool) -> bool { + let mut db = Db::new(tempfile::tempfile().unwrap()); + let allocator = Db::general_purpose_allocator(); + + let mut ranges = Vec::new(); + + for &operation in sequence { + match operation { + Operation::Allocate { size } => ranges.push(allocator.allocate(&mut db, size)), + Operation::Free { index } => { + if ranges.get(index).is_some() { + allocator.free(&mut db, ranges.remove(index)) + } + } + } + } + + for range in ranges.drain(..) { + allocator.free(&mut db, range); + } + + fragmentation(&mut db, print) > 1 + } + + fn fragmentation(db: &mut Db, print: bool) -> usize { + let allocator = Db::general_purpose_allocator(); + + let mut next = unsafe { db.read::(allocator.head_ptr) }; + + let mut n = 0; + while !next.is_null() { + let size = GeneralPurposeAllocator::size(db, next); + if print { + println!("\x1b[34m[{n}]\x1b[m {:?}", next.range(size)); + } + next = unsafe { db.read::(next) }; + + n += 1; + } + + n + } + + #[test] + fn debug_fragmentation() { + use Operation::*; + #[rustfmt::skip] + let mut sequence = vec![Allocate { size: 1946 }, Allocate { size: 3252 }, Free { index: 0 }, Allocate { size: 7391 }, Allocate { size: 394 }, Allocate { size: 3726 }, Allocate { size: 1429 }, Allocate { size: 3188 }, Allocate { size: 6375 }, Allocate { size: 4453 }, Allocate { size: 2514 }, Allocate { size: 4754 }, Allocate { size: 6785 }, Allocate { size: 2751 }, Allocate { size: 4107 }, Allocate { size: 3509 }, Allocate { size: 5897 }, Allocate { size: 7081 }, Allocate { size: 2419 }, Allocate { size: 5400 }, Allocate { size: 7135 }, Free { index: 14 }, Allocate { size: 2130 }, Free { index: 18 }, Allocate { size: 3450 }, Allocate { size: 1296 }, Allocate { size: 8091 }, Allocate { size: 4646 }, Allocate { size: 3891 }, Free { index: 0 }, Allocate { size: 1087 }, Allocate { size: 101 }, Allocate { size: 5353 }, Allocate { size: 3381 }, Allocate { size: 6869 }, Free { index: 1 }, Allocate { size: 3750 }, Allocate { size: 1398 }, Free { index: 22 }, Allocate { size: 18 }, Free { index: 25 }, Allocate { size: 642 }, Free { index: 4 }, Allocate { size: 4 }, Allocate { size: 1898 }, Allocate { size: 5259 }, Free { index: 26 }, Allocate { size: 3151 }, Allocate { size: 4989 }, Allocate { size: 6493 }, Allocate { size: 551 }, Allocate { size: 706 }, Allocate { size: 4161 }, Free { index: 16 }, Allocate { size: 3422 }, Allocate { size: 3011 }, Allocate { size: 5149 }, Allocate { size: 4687 }, Allocate { size: 5 }, Free { index: 34 }, Allocate { size: 191 }, Allocate { size: 2851 }, Allocate { size: 3597 }, Free { index: 28 }, Allocate { size: 7037 }, Allocate { size: 4660 }, Allocate { size: 194 }, Allocate { size: 5537 }, Allocate { size: 3242 }, Allocate { size: 6298 }, Allocate { size: 1239 }, Allocate { size: 7025 }, Allocate { size: 3563 }, Allocate { size: 5039 }, Free { index: 40 }, Allocate { size: 4549 }, Allocate { size: 5362 }, Allocate { size: 3510 }, Free { index: 31 }, Allocate { size: 226 }, Allocate { size: 6904 }, Allocate { size: 4150 }, Allocate { size: 4914 }, Allocate { size: 2330 }, Allocate { size: 2499 }, Allocate { size: 6677 }, Allocate { size: 95 }, Allocate { size: 3726 }, Allocate { size: 3258 }, Free { index: 2 }, Allocate { size: 2129 }, Allocate { size: 3674 }, Allocate { size: 1542 }, Allocate { size: 2210 }, Free { index: 21 }, Allocate { size: 3914 }, Allocate { size: 3108 }, Allocate { size: 1979 }, Allocate { size: 2677 }, Allocate { size: 8140 }, Allocate { size: 7573 }, Allocate { size: 121 }, Free { index: 59 }, Allocate { size: 6467 }, Allocate { size: 262 }, Allocate { size: 7711 }, Allocate { size: 2450 }, Allocate { size: 4351 }, Allocate { size: 4282 }, Free { index: 39 }, Allocate { size: 4050 }, Allocate { size: 67 }, Allocate { size: 5560 }, Free { index: 51 }, Allocate { size: 6038 }, Allocate { size: 555 }, Allocate { size: 1852 }, Free { index: 78 }, Allocate { size: 698 }]; + + let mut prev_sequence = sequence.clone(); + while causes_fragmentation(&sequence, false) { + prev_sequence = sequence.clone(); + sequence.pop(); + } + + println!("{prev_sequence:?}"); + + let mut sequence = prev_sequence.clone(); + loop { + let mut removed_something = false; + let mut i = 0; + while i < sequence.len() { + let mut new_sequence = sequence.clone(); + + new_sequence.remove(i); + + if causes_fragmentation(&new_sequence, false) { + removed_something = true; + println!("removed {i} ({:?})", sequence[i]); + sequence = new_sequence; + } else { + for item in &mut new_sequence { + if let Operation::Free { index } = item { + if *index > i { + *index -= 1; + } + } + } + + if causes_fragmentation(&new_sequence, false) { + removed_something = true; + println!("removed {i} ({:?}) after adjusting frees", sequence[i]); + sequence = new_sequence; + } + } + + i += 1; + } + + if !removed_something { + break; + } + } + + loop { + let mut merged_something = false; + let mut i = 0; + while i < sequence.len() { + let mut new_sequence = sequence.clone(); + + let removed = new_sequence.remove(i); + + if let Operation::Allocate { size: removed_size } = removed { + if let Some(Operation::Allocate { size }) = new_sequence.get_mut(i) { + *size += removed_size; + } + } + + if causes_fragmentation(&new_sequence, false) { + merged_something = true; + println!( + "merged {} and {} ({:?} and {:?})", + i, + i + 1, + sequence[i], + sequence[i + 1] + ); + sequence = new_sequence; + } else { + for item in &mut new_sequence { + if let Operation::Free { index } = item { + if *index > i { + *index -= 1; + } + } + } + + if causes_fragmentation(&new_sequence, false) { + merged_something = true; + println!( + "merged {} and {} ({:?} and {:?}) after adjusting frees", + i, + i + 1, + sequence[i], + sequence[i + 1] + ); + sequence = new_sequence; + } + } + + i += 1; + } + + if !merged_something { + break; + } + } + + println!("{sequence:?}"); + + dbg!(causes_fragmentation(&sequence, true)); + } #[test] fn it_works() { @@ -383,9 +614,9 @@ mod tests { } } - alloc_and_free_many::<1>(&mut db, 3 * PAGE_SIZE); - // alloc_and_free_many::<4>(&mut db, PAGE_SIZE / 4 * 3); - // alloc_and_free_many::<16>(&mut db, PAGE_SIZE / 16 * 3); + // alloc_and_free_many::<1>(&mut db, 3 * PAGE_SIZE); + alloc_and_free_many::<4>(&mut db, PAGE_SIZE / 4 * 3); + alloc_and_free_many::<16>(&mut db, PAGE_SIZE / 16 * 3); let mut child = std::process::Command::new("hexdump") .arg("-C")