fixed general purpose allocator

This commit is contained in:
soruh 2023-08-13 18:01:18 +02:00
parent a147865048
commit ffcc848b0f
2 changed files with 51 additions and 683 deletions

View File

@ -76,7 +76,8 @@ impl FilePointer<FreeListBlock> {
impl GeneralPurposeAllocator {
const SIZE_MASK: u8 = 0b1000_0000;
const MIN_ALLOCATION_SIZE: u64 = size_of::<FreeListBlock>() as u64 - size_of::<U64>() as u64;
pub const MIN_ALLOCATION_SIZE: u64 =
size_of::<FreeListBlock>() as u64 - size_of::<U64>() as u64;
pub fn size<R>(db: &Db<R>, head: FilePointer<FreeListBlock>) -> u64 {
// println!("get size({head:?})");
@ -152,7 +153,9 @@ impl GeneralPurposeAllocator {
// we need space to store the free list entry
let needed_size = expected_size.max(Self::MIN_ALLOCATION_SIZE);
// while this pointet doesn't technically point to a `FreeListBlock` (it points to the head pointer)
// dbg!(needed_size);
// while this pointer doesn't technically point to a `FreeListBlock` (it points to the head pointer)
// as long as it's at most in the `prev` position it will never be allocated and only ever be
// written to to set the pointer to the first element. We use `empty_list` to make sure we never
// write into it incorrectly.
@ -163,7 +166,10 @@ impl GeneralPurposeAllocator {
let mut prev = head;
let mut next: FilePointer<FreeListBlock> = unsafe { db.read(head.next_ptr()) };
// println!("next: {next:?} | prev: {prev:?} | prevprev: {prevprev:?}");
let empty_list = next.is_null();
// dbg!(empty_list);
while !next.is_null() && !Self::can_allocate_into(needed_size, Self::size(db, next)) {
prevprev = prev;
@ -171,6 +177,13 @@ impl GeneralPurposeAllocator {
next = unsafe { db.read(next.next_ptr()) };
}
let prev_is_head = prevprev.is_null();
// dbg!(Self::size(db, next));
// dbg!(prev_is_head);
// println!("next: {next:?} | prev: {prev:?} | prevprev: {prevprev:?}");
// dbg!(next, Self::size(db, next));
let start = if next.is_null() {
@ -202,7 +215,9 @@ impl GeneralPurposeAllocator {
let n_pages = Self::needed_pages(still_needed);
assert_ne!(n_pages, 0);
let page_start = db.add_pages(n_pages).start();
let page = db.add_pages(n_pages);
let page_start = page.start();
// println!("allocate_general_pages({n_pages}, size={expected_size}) -> {}..{}", page.0.get(), page.0.get() + n_pages as u32);
// dbg!(n_pages, page_start);
@ -231,7 +246,7 @@ impl GeneralPurposeAllocator {
let extra_space = Self::size(db, start) - needed_size;
// dbg!(prev, nextnext, extra_space);
// dbg!(start, prev, nextnext, extra_space);
if extra_space != 0 {
let remainder = FilePointer::<FreeListBlock>::new(start.into_raw() + needed_size);
@ -245,7 +260,8 @@ impl GeneralPurposeAllocator {
db.write(remainder.next_ptr(), nextnext);
}
// println!("{:x?}", unsafe { db.read::<[u8; 9 + 8]>(remainder) });
// println!("{:x?}", unsafe { db.read(prev.cast::<[u8; 8]>()) });
// println!("{:x?}", unsafe { db.read(remainder.cast::<[u8; 9 + 8]>()) });
} else {
unsafe { db.write(prev.next_ptr(), nextnext) };
}
@ -253,9 +269,7 @@ impl GeneralPurposeAllocator {
start
};
let start = Self::clear(db, start);
start.range(expected_size)
start.into_raw().range(expected_size)
}
pub fn free<R>(self, db: &mut Db<R>, range: FileRange) {
@ -420,7 +434,7 @@ impl SlabListPointer {
}
pub fn add_slab<R>(self, db: &mut Db<R>, slab_size: u32) -> SlabPointer {
println!("add_slab({slab_size})");
// println!("add_slab({slab_size})");
let this = self.read_header(db);
@ -469,7 +483,7 @@ pub struct RelativeFreeListHeader {
impl RelativeFreeListHeader {
const DATA_SIZE: u32 = PAGE_SIZE as u32 - size_of::<Self>() as u32;
fn capacity(size: u32) -> u32 {
pub fn capacity(size: u32) -> u32 {
debug_assert_eq!(SlabKind::for_size(size), SlabKind::RelativeFreeList);
Self::DATA_SIZE / size
}
@ -518,7 +532,7 @@ impl SlabPointer {
first.get(),
));
let next: U16 = unsafe { db.read(ptr) };
let next = unsafe { db.read(ptr) };
let header = unsafe { db.modify::<RelativeFreeListHeader>(start) };
@ -593,22 +607,21 @@ impl SlabPointer {
pub fn allocate_page<R>(&self, db: &mut Db<R>) -> RawFilePointer {
let Slab { head, size } = self.read(db);
let page = db.add_pages(1);
println!("allocate_slab_page({size})");
// println!("allocate_slab_page({size}) -> {}", page.0.get());
let size = size.get();
match SlabKind::for_size(size) {
SlabKind::SingleBytes => todo!("single byte slabs"),
SlabKind::RelativeFreeList => {
let page = db.add_pages(1);
let (next_page, offset) = head.page_offset();
assert_eq!(offset, 0);
let capacity = RelativeFreeListHeader::capacity(size);
let data_offset = size_of::<Self>() as u16;
let data_offset = size_of::<RelativeFreeListHeader>() as u16;
unsafe {
db.write(
@ -620,18 +633,20 @@ impl SlabPointer {
)
};
let mut offset = 0;
let mut prev = 0;
for i in (0..capacity).rev() {
let next = data_offset + (i * size) as u16;
let next = data_offset
.checked_add(u16::try_from(i * size).unwrap())
.unwrap();
unsafe {
db.write(
FilePointer::new(RawFilePointer::from_page_and_offset(page, next)),
U16::from(offset),
U16::from(prev),
)
};
offset = next;
prev = next;
}
self.set_head(db, page.start());
@ -641,8 +656,6 @@ impl SlabPointer {
SlabKind::AbsoluteFreeList => {
let n = PAGE_SIZE / size as u64;
let page = db.add_pages(1);
let mut next = head;
for i in (0..n).rev() {
let current = page.start() + i * size as u64;

View File

@ -31,6 +31,9 @@ mod atomic_arc;
mod mapped;
mod transaction;
#[cfg(test)]
mod tests;
use allocator::{AllocatorState, GeneralPurposeAllocator, SlabListPointer, SlabPointer};
use atomic_arc::AtomicArc;
use memmap::{Mmap, MmapMut};
@ -167,7 +170,7 @@ impl RawFilePointer {
fn from_page_and_offset(page: PagePointer, offset: u16) -> Self {
debug_assert!(
offset < PAGE_SIZE as u16,
"offset 0x{offset:x} out for page bounds (0..0x{PAGE_SIZE:x})"
"offset 0x{offset:x} out of page bounds (0..0x{PAGE_SIZE:x})"
);
page.start() + offset as u64
}
@ -183,6 +186,18 @@ impl RawFilePointer {
#[repr(transparent)]
pub struct PagePointer(U32);
impl PartialOrd for PagePointer {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.0.get().partial_cmp(&other.0.get())
}
}
impl Ord for PagePointer {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.0.get().cmp(&other.0.get())
}
}
impl PagePointer {
fn start(self) -> RawFilePointer {
RawFilePointer((self.0.get() as u64 * PAGE_SIZE).into())
@ -434,7 +449,7 @@ impl<R> Db<R> {
#[track_caller]
unsafe fn copy_nonoverlapping(&mut self, from: FileRange, to: FileRange) {
let len = from.len();
println!("copy from {from:?} to {to:?} ({len})",);
// println!("copy from {from:?} to {to:?} ({len})",);
// intervals must be non-overlapping and of the same size
assert!(!from.as_range().contains(&(to.start().0.get() as usize)));
@ -621,7 +636,6 @@ impl<R> Db<R> {
// TODO: scrap the PAGE-wise allocation and make slab allocations allocations of the general allocator.
pub fn allocate(&mut self, size: u64) -> FileRange {
// println!("allocate({size})");
if size == 0 {
return RawFilePointer::null().range(0);
}
@ -643,662 +657,3 @@ impl<R> Db<R> {
}
}
}
#[cfg(test)]
mod tests {
use crate::allocator::{div_round_up, RelativeFreeListHeader, SlabKind};
use super::*;
use mapped::ReaderTrait;
use std::io::Write;
use std::ops::Shl;
use std::process::Stdio;
#[derive(Debug, Clone, Copy)]
enum Operation {
Allocate { size: u64 },
Free { index: usize },
}
fn causes_fragmentation(sequence: &[Operation], print: bool) -> bool {
let mut db = Db::<()>::create(tempfile::tempfile().unwrap(), &[]);
let allocator = Db::<()>::general_purpose_allocator();
let mut ranges = Vec::new();
for &operation in sequence {
match operation {
Operation::Allocate { size } => ranges.push(allocator.allocate(&mut db, size)),
Operation::Free { index } => {
if ranges.get(index).is_some() {
allocator.free(&mut db, ranges.remove(index))
}
}
}
}
for range in ranges.drain(..) {
allocator.free(&mut db, range);
}
fragmentation(&mut db, print) > 1
}
fn fragmentation<R>(db: &mut Db<R>, print: bool) -> usize {
let allocator = Db::<()>::general_purpose_allocator();
let mut next = unsafe { db.read(allocator.head_ptr) };
let mut n = 0;
while !next.is_null() {
let size = GeneralPurposeAllocator::size(db, next);
if print {
println!("\x1b[34m[{n}]\x1b[m {:?}", next.into_raw().range(size));
}
next = unsafe { db.read(next.next_ptr()) };
n += 1;
}
n
}
#[test]
fn debug_fragmentation() {
use Operation::*;
#[rustfmt::skip]
let mut sequence = vec![Allocate { size: 1946 }, Allocate { size: 3252 }, Free { index: 0 }, Allocate { size: 7391 }, Allocate { size: 394 }, Allocate { size: 3726 }, Allocate { size: 1429 }, Allocate { size: 3188 }, Allocate { size: 6375 }, Allocate { size: 4453 }, Allocate { size: 2514 }, Allocate { size: 4754 }, Allocate { size: 6785 }, Allocate { size: 2751 }, Allocate { size: 4107 }, Allocate { size: 3509 }, Allocate { size: 5897 }, Allocate { size: 7081 }, Allocate { size: 2419 }, Allocate { size: 5400 }, Allocate { size: 7135 }, Free { index: 14 }, Allocate { size: 2130 }, Free { index: 18 }, Allocate { size: 3450 }, Allocate { size: 1296 }, Allocate { size: 8091 }, Allocate { size: 4646 }, Allocate { size: 3891 }, Free { index: 0 }, Allocate { size: 1087 }, Allocate { size: 101 }, Allocate { size: 5353 }, Allocate { size: 3381 }, Allocate { size: 6869 }, Free { index: 1 }, Allocate { size: 3750 }, Allocate { size: 1398 }, Free { index: 22 }, Allocate { size: 18 }, Free { index: 25 }, Allocate { size: 642 }, Free { index: 4 }, Allocate { size: 4 }, Allocate { size: 1898 }, Allocate { size: 5259 }, Free { index: 26 }, Allocate { size: 3151 }, Allocate { size: 4989 }, Allocate { size: 6493 }, Allocate { size: 551 }, Allocate { size: 706 }, Allocate { size: 4161 }, Free { index: 16 }, Allocate { size: 3422 }, Allocate { size: 3011 }, Allocate { size: 5149 }, Allocate { size: 4687 }, Allocate { size: 5 }, Free { index: 34 }, Allocate { size: 191 }, Allocate { size: 2851 }, Allocate { size: 3597 }, Free { index: 28 }, Allocate { size: 7037 }, Allocate { size: 4660 }, Allocate { size: 194 }, Allocate { size: 5537 }, Allocate { size: 3242 }, Allocate { size: 6298 }, Allocate { size: 1239 }, Allocate { size: 7025 }, Allocate { size: 3563 }, Allocate { size: 5039 }, Free { index: 40 }, Allocate { size: 4549 }, Allocate { size: 5362 }, Allocate { size: 3510 }, Free { index: 31 }, Allocate { size: 226 }, Allocate { size: 6904 }, Allocate { size: 4150 }, Allocate { size: 4914 }, Allocate { size: 2330 }, Allocate { size: 2499 }, Allocate { size: 6677 }, Allocate { size: 95 }, Allocate { size: 3726 }, Allocate { size: 3258 }, Free { index: 2 }, Allocate { size: 2129 }, Allocate { size: 3674 }, Allocate { size: 1542 }, Allocate { size: 2210 }, Free { index: 21 }, Allocate { size: 3914 }, Allocate { size: 3108 }, Allocate { size: 1979 }, Allocate { size: 2677 }, Allocate { size: 8140 }, Allocate { size: 7573 }, Allocate { size: 121 }, Free { index: 59 }, Allocate { size: 6467 }, Allocate { size: 262 }, Allocate { size: 7711 }, Allocate { size: 2450 }, Allocate { size: 4351 }, Allocate { size: 4282 }, Free { index: 39 }, Allocate { size: 4050 }, Allocate { size: 67 }, Allocate { size: 5560 }, Free { index: 51 }, Allocate { size: 6038 }, Allocate { size: 555 }, Allocate { size: 1852 }, Free { index: 78 }, Allocate { size: 698 }];
let mut prev_sequence = sequence.clone();
while causes_fragmentation(&sequence, false) {
prev_sequence = sequence.clone();
sequence.pop();
}
println!("{prev_sequence:?}");
let mut sequence = prev_sequence.clone();
loop {
let mut removed_something = false;
let mut i = 0;
while i < sequence.len() {
let mut new_sequence = sequence.clone();
new_sequence.remove(i);
if causes_fragmentation(&new_sequence, false) {
removed_something = true;
println!("removed {i} ({:?})", sequence[i]);
sequence = new_sequence;
} else {
for item in &mut new_sequence {
if let Operation::Free { index } = item {
if *index > i {
*index -= 1;
}
}
}
if causes_fragmentation(&new_sequence, false) {
removed_something = true;
println!("removed {i} ({:?}) after adjusting frees", sequence[i]);
sequence = new_sequence;
}
}
i += 1;
}
if !removed_something {
break;
}
}
loop {
let mut merged_something = false;
let mut i = 0;
while i < sequence.len() {
let mut new_sequence = sequence.clone();
let removed = new_sequence.remove(i);
if let Operation::Allocate { size: removed_size } = removed {
if let Some(Operation::Allocate { size }) = new_sequence.get_mut(i) {
*size += removed_size;
}
}
if causes_fragmentation(&new_sequence, false) {
merged_something = true;
println!(
"merged {} and {} ({:?} and {:?})",
i,
i + 1,
sequence[i],
sequence[i + 1]
);
sequence = new_sequence;
} else {
for item in &mut new_sequence {
if let Operation::Free { index } = item {
if *index > i {
*index -= 1;
}
}
}
if causes_fragmentation(&new_sequence, false) {
merged_something = true;
println!(
"merged {} and {} ({:?} and {:?}) after adjusting frees",
i,
i + 1,
sequence[i],
sequence[i + 1]
);
sequence = new_sequence;
}
}
i += 1;
}
if !merged_something {
break;
}
}
println!("{sequence:?}");
dbg!(causes_fragmentation(&sequence, true));
}
#[test]
fn allocator() {
let mut db = Db::<()>::create(tempfile::tempfile().unwrap(), &[4, 7, 16]);
let mut ranges = Vec::new();
for i in 0..10000 {
ranges.push(db.allocate(i % 32));
}
let n = ranges.len();
for range in ranges.drain(n / 4..n * 3 / 4) {
db.free(range);
}
for i in 0..10000 {
ranges.push(db.allocate((4 * i) % 32));
}
for range in ranges.drain(..) {
db.free(range);
}
// hexdump(db.map.as_bytes());
}
#[test]
fn transactions_work() {
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
#[repr(C)]
struct DataHeader {
generation: U64,
list: FilePointer<DataList>,
}
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
#[repr(C)]
struct DataList {
next: FilePointer<DataList>,
data: U64,
}
let mut db = Db::<DataHeader>::create(
tempfile::tempfile().unwrap(),
&[size_of::<DataHeader>() as u32, size_of::<DataList>() as u32],
);
let mut snapshots = VecDeque::new();
for i in 0..20 {
db.transaction(|transaction| {
let root = transaction.root();
let root = if !root.is_null() {
root
} else {
let (root, data) = transaction.allocate::<DataHeader>();
*data = DataHeader {
generation: 0.into(),
list: FilePointer::null(),
};
root
};
let &data = transaction.read(root);
assert_eq!(data.generation.get(), i);
let n = {
let mut next = data.list;
let mut n = 0;
while !next.is_null() {
next = transaction.read(next).next;
n += 1;
}
n
};
let next = if n >= 5 {
let next = transaction.read(data.list).next;
transaction.free(data.list);
next
} else {
data.list
};
let (elem_ptr, element) = transaction.allocate::<DataList>();
element.next = next;
element.data = i.into();
let (root, data) = transaction.modify(root);
data.list = elem_ptr;
data.generation = (i + 1).into();
root
});
snapshots.push_back(db.create_reader().state.get());
if snapshots.len() > 10 {
drop(snapshots.pop_front());
db.free_old_epochs()
}
validate_db(&db, |snaphot, coverage| {
coverage.set_allocated(snaphot.root.range());
let data = snaphot.read(snaphot.root);
let mut next = data.list;
while !next.is_null() {
coverage.set_allocated(next.range());
next = snaphot.read(next).next;
}
});
}
// TODO: allocate some variably sized strings
for (i, snapshot) in snapshots.iter().enumerate() {
let root = snapshot.read(snapshot.root);
assert_eq!(root.generation.get(), 1 + 10 + i as u64);
let mut items = Vec::new();
let mut ptr = root.list;
while !ptr.is_null() {
let element = snapshot.read(ptr);
items.push(element.data.get());
ptr = element.next;
}
assert_eq!(items.len(), 5);
assert_eq!(items[0], 10 + i as u64);
for (expected, &is) in items.iter().skip(1).rev().enumerate() {
assert_eq!(expected as u64, is);
}
}
drop(snapshots);
// hexdump(db.map.as_bytes());
db.free_old_epochs();
// hexdump(db.map.as_bytes());
}
#[repr(u8)]
#[derive(Clone, Copy)]
#[rustfmt::skip]
enum CoverageKind {
Unaccounted = 0b000,
Allocated = 0b001,
Free = 0b010,
Retired = 0b011,
SlabMetadata = 0b100,
FileMetadata = 0b101,
}
impl CoverageKind {
fn color(self) -> &'static str {
match self {
CoverageKind::Unaccounted => "31",
CoverageKind::Allocated => "32",
CoverageKind::Free => "34",
CoverageKind::Retired => "36",
CoverageKind::SlabMetadata => "35",
CoverageKind::FileMetadata => "93",
}
}
}
impl CoverageKind {
#[rustfmt::skip]
fn from_bits(a: bool, b: bool, c: bool) -> Self {
let res = match (a, b, c) {
(false, false, false) => Self::Unaccounted,
(false, false, true) => Self::Allocated,
(false, true, false) => Self::Free,
(false, true, true) => Self::Retired,
( true, false, false) => Self::SlabMetadata,
( true, false, true) => Self::FileMetadata,
_ => panic!(),
};
assert_eq!(res as u8, ((a as u8) << 2) + ((b as u8) << 1) + c as u8);
res
}
fn to_bits(self) -> (bool, bool, bool) {
(
self as u8 & 0b100 != 0,
self as u8 & 0b010 != 0,
self as u8 & 0b001 != 0,
)
}
}
struct CoverageMap {
data_0: Vec<u8>,
data_1: Vec<u8>,
data_2: Vec<u8>,
empty_bits: u8,
}
impl CoverageMap {
fn new(len: usize) -> Self {
let bits = div_round_up(len as u64, 8) as usize;
Self {
data_0: vec![0; bits],
data_1: vec![0; bits],
data_2: vec![0; bits],
empty_bits: (8 - len % 8) as u8,
}
}
#[must_use]
fn set(&mut self, i: usize, kind: CoverageKind) -> bool {
let i_byte = i / 8;
let i_bit = i % 8;
let mask = 1 << i_bit;
let (set_0, set_1, set_2) = kind.to_bits();
if i_byte >= self.data_0.len() {
return false;
}
let byte = self.data_0[i_byte] | self.data_1[i_byte] | self.data_2[i_byte];
if byte & mask != 0 {
return false;
}
self.data_0[i_byte] |= mask * set_0 as u8;
self.data_1[i_byte] |= mask * set_1 as u8;
self.data_2[i_byte] |= mask * set_2 as u8;
true
}
#[must_use]
fn try_set_range(&mut self, range: FileRange, kind: CoverageKind) -> bool {
range.as_range().all(|i| self.set(i, kind))
}
fn set_allocated(&mut self, range: FileRange) {
self.set_range(range, CoverageKind::Allocated);
}
fn set_range(&mut self, range: FileRange, kind: CoverageKind) {
assert!(
self.try_set_range(range, kind),
"possible allocator corruption"
)
}
fn all_covered(&self) -> bool {
let len = self.data_0.len();
for (i, ((&byte_0, &byte_1), &byte_2)) in self
.data_0
.iter()
.zip(self.data_1.iter())
.zip(self.data_2.iter())
.enumerate()
{
let byte = byte_0 | byte_1 | byte_2;
if i == len - 1 {
if byte != u8::MAX.overflowing_shl(self.empty_bits as u32).0 {
return false;
}
} else if byte != u8::MAX {
return false;
}
}
true
}
fn set_color(res: &mut String, color: &str) {
res.push_str("\x1b[");
res.push_str(color);
res.push('m');
}
fn print(&self) -> String {
let mut res = String::new();
let mut prev = "";
for (i, ((&byte_0, &byte_1), &byte_2)) in self
.data_0
.iter()
.zip(self.data_1.iter())
.zip(self.data_2.iter())
.enumerate()
{
let byte = byte_0 | byte_1 | byte_2;
fn all_equal(bits: u8) -> bool {
bits == 0 || bits == u8::MAX
}
let kind = if all_equal(byte_0) && all_equal(byte_1) && all_equal(byte_2) {
Some(CoverageKind::from_bits(
byte_0 & 1 == 1,
byte_1 & 1 == 1,
byte_2 & 1 == 1,
))
} else {
None
};
if i != 0 {
if i as u64 % (PAGE_SIZE / 8 / 8) == 0 {
Self::set_color(&mut res, "");
res.push('\n');
Self::set_color(&mut res, prev);
}
if i as u64 % (PAGE_SIZE / 8) == 0 {
Self::set_color(&mut res, "");
prev = "";
res.push_str(&"-".repeat((PAGE_SIZE / 8 / 8) as usize));
res.push('\n');
}
}
let color = kind.map(CoverageKind::color).unwrap_or("33");
if color != prev {
Self::set_color(&mut res, color);
}
prev = color;
res.push(char::from_u32(0x2800 + byte as u32).unwrap());
}
Self::set_color(&mut res, "");
res.push('\n');
res
}
fn assert_covered(&self) {
if !self.all_covered() {
panic!("Space in the file was lost\n{}", self.print());
}
}
}
#[test]
fn coverage_map_works() {
let mut coverage = CoverageMap::new(40);
assert!(!coverage.all_covered());
assert!(
coverage.try_set_range(RawFilePointer::null().range(20), CoverageKind::FileMetadata)
);
assert!(!coverage.all_covered());
assert!(coverage.try_set_range((RawFilePointer::null() + 20).range(20), CoverageKind::Free));
assert!(coverage.all_covered());
assert!(!coverage.try_set_range(
(RawFilePointer::null() + 40).range(8),
CoverageKind::Allocated
));
assert!(!coverage.try_set_range(
(RawFilePointer::null() + 50).range(10),
CoverageKind::Allocated
));
}
fn validate_db<R>(db: &Db<R>, f: impl FnOnce(&Snapshot<R>, &mut CoverageMap)) {
let mut coverage = CoverageMap::new(db.map.len());
let snapshot = &*db.state.get();
coverage.set_range(Db::<R>::header_ptr().range(), CoverageKind::FileMetadata);
// general purpose
{
let head = Db::<R>::header_ptr().allocator_state_ptr().general_ptr();
let mut next = *snapshot.read(head);
while !next.is_null() {
let size = GeneralPurposeAllocator::size(db, next);
coverage.set_range(next.into_raw().range(size), CoverageKind::Free);
next = *snapshot.read(next.next_ptr());
}
}
// slabs
{
let slabs = *snapshot.read(Db::<R>::header_ptr().allocator_state_ptr().slabs_ptr());
let mut next = Some(slabs);
while let Some(slabs) = next {
coverage.set_range(
slabs
.0
.into_raw()
.range(slabs.read_header(db).size() as u64),
CoverageKind::SlabMetadata,
);
next = slabs.next(db);
for slab in slabs.iter(db) {
let size = slab.size(db);
let head = slab.head(db);
match SlabKind::for_size(size) {
SlabKind::SingleBytes => todo!(),
SlabKind::RelativeFreeList => {
let (mut page, offset) = head.page_offset();
while !page.is_null() {
let header =
FilePointer::<RelativeFreeListHeader>::new(page.start());
coverage.set_range(header.range(), CoverageKind::SlabMetadata);
let header = snapshot.read(header);
page = header.next_page;
let mut next = header.first.get();
while next != 0 {
let next_ptr = FilePointer::<U16>::new(
RawFilePointer::from_page_and_offset(page, next),
);
coverage.set_range(
next_ptr.into_raw().range(size as u64),
CoverageKind::Free,
);
next = snapshot.read(next_ptr).get();
}
}
todo!();
}
SlabKind::AbsoluteFreeList => {
let mut next = head;
while !next.is_null() {
let next_ptr = FilePointer::<RawFilePointer>::new(next);
coverage.set_range(
next_ptr.into_raw().range(size as u64),
CoverageKind::Free,
);
next = *snapshot.read(next_ptr);
}
}
}
}
}
}
// retired objects
{
for SnapshotAndFreeList { to_free, .. } in &db.snapshots {
for &range in to_free {
coverage.set_range(range, CoverageKind::Retired);
}
}
}
f(snapshot, &mut coverage);
if !coverage.all_covered() {
print!("{}", coverage.print());
panic!("space in the file was lost...");
}
}
fn hexdump(bytes: &[u8]) {
let mut child = std::process::Command::new("hexdump")
.arg("-C")
.stdin(Stdio::piped())
.stdout(Stdio::inherit())
.spawn()
.unwrap();
let mut stdin = child.stdin.take().expect("failed to get stdin");
stdin.write_all(bytes).unwrap();
std::mem::drop(stdin);
child.wait().unwrap();
}
}