976 lines
34 KiB
Rust
976 lines
34 KiB
Rust
use crate::allocator::{div_round_up, FreeListBlock, RelativeFreeListHeader, SlabKind};
|
|
use crate::datastructures::queue::{self, Queue, QueueElement};
|
|
|
|
use super::*;
|
|
use mapped::ReaderTrait;
|
|
use rand::Rng;
|
|
use std::collections::BTreeSet;
|
|
use std::io::Write;
|
|
use std::ops::Shl;
|
|
use std::process::Stdio;
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
enum Operation {
|
|
Allocate { size: u64 },
|
|
Free { index: usize },
|
|
}
|
|
|
|
fn causes_fragmentation(sequence: &[Operation], print: bool) -> bool {
|
|
let mut db = Db::<()>::create(tempfile::tempfile().unwrap(), &[]);
|
|
let allocator = Db::<()>::general_purpose_allocator();
|
|
|
|
let mut ranges = Vec::new();
|
|
|
|
for &operation in sequence {
|
|
match operation {
|
|
Operation::Allocate { size } => ranges.push(allocator.allocate(&mut db, size)),
|
|
Operation::Free { index } => {
|
|
if ranges.get(index).is_some() {
|
|
allocator.free(&mut db, ranges.remove(index))
|
|
}
|
|
}
|
|
}
|
|
|
|
// println!("{operation:?}");
|
|
// validate_db(&db, |_snapshot, coverage| { for &range in &ranges { coverage.set_allocated(range); }});
|
|
}
|
|
|
|
for range in ranges.drain(..) {
|
|
allocator.free(&mut db, range);
|
|
}
|
|
|
|
validate_db(&db, |_snapshot, coverage| {
|
|
for &range in &ranges {
|
|
coverage.set_allocated(range);
|
|
}
|
|
});
|
|
|
|
fragmentation(&mut db, print) > 1
|
|
}
|
|
|
|
fn fragmentation<R>(db: &mut Db<R>, print: bool) -> usize {
|
|
let allocator = Db::<()>::general_purpose_allocator();
|
|
|
|
let mut next = unsafe { db.read(allocator.head_ptr) };
|
|
|
|
let mut n = 0;
|
|
while !next.is_null() {
|
|
let size = GeneralPurposeAllocator::size(db, next);
|
|
if print {
|
|
println!("\x1b[34m[{n}]\x1b[m {:?}", next.into_raw().range(size));
|
|
}
|
|
next = unsafe { db.read(next.next_ptr()) };
|
|
|
|
n += 1;
|
|
}
|
|
|
|
n
|
|
}
|
|
|
|
#[test]
|
|
#[ignore = "only used to brute force sequences that break the allocator"]
|
|
fn debug_fragmentation() {
|
|
use Operation::*;
|
|
#[rustfmt::skip]
|
|
let mut sequence = vec![Allocate { size: 1946 }, Allocate { size: 3252 }, Free { index: 0 }, Allocate { size: 7391 }, Allocate { size: 394 }, Allocate { size: 3726 }, Allocate { size: 1429 }, Allocate { size: 3188 }, Allocate { size: 6375 }, Allocate { size: 4453 }, Allocate { size: 2514 }, Allocate { size: 4754 }, Allocate { size: 6785 }, Allocate { size: 2751 }, Allocate { size: 4107 }, Allocate { size: 3509 }, Allocate { size: 5897 }, Allocate { size: 7081 }, Allocate { size: 2419 }, Allocate { size: 5400 }, Allocate { size: 7135 }, Free { index: 14 }, Allocate { size: 2130 }, Free { index: 18 }, Allocate { size: 3450 }, Allocate { size: 1296 }, Allocate { size: 8091 }, Allocate { size: 4646 }, Allocate { size: 3891 }, Free { index: 0 }, Allocate { size: 1087 }, Allocate { size: 101 }, Allocate { size: 5353 }, Allocate { size: 3381 }, Allocate { size: 6869 }, Free { index: 1 }, Allocate { size: 3750 }, Allocate { size: 1398 }, Free { index: 22 }, Allocate { size: 18 }, Free { index: 25 }, Allocate { size: 642 }, Free { index: 4 }, Allocate { size: 4 }, Allocate { size: 1898 }, Allocate { size: 5259 }, Free { index: 26 }, Allocate { size: 3151 }, Allocate { size: 4989 }, Allocate { size: 6493 }, Allocate { size: 551 }, Allocate { size: 706 }, Allocate { size: 4161 }, Free { index: 16 }, Allocate { size: 3422 }, Allocate { size: 3011 }, Allocate { size: 5149 }, Allocate { size: 4687 }, Allocate { size: 5 }, Free { index: 34 }, Allocate { size: 191 }, Allocate { size: 2851 }, Allocate { size: 3597 }, Free { index: 28 }, Allocate { size: 7037 }, Allocate { size: 4660 }, Allocate { size: 194 }, Allocate { size: 5537 }, Allocate { size: 3242 }, Allocate { size: 6298 }, Allocate { size: 1239 }, Allocate { size: 7025 }, Allocate { size: 3563 }, Allocate { size: 5039 }, Free { index: 40 }, Allocate { size: 4549 }, Allocate { size: 5362 }, Allocate { size: 3510 }, Free { index: 31 }, Allocate { size: 226 }, Allocate { size: 6904 }, Allocate { size: 4150 }, Allocate { size: 4914 }, Allocate { size: 2330 }, Allocate { size: 2499 }, Allocate { size: 6677 }, Allocate { size: 95 }, Allocate { size: 3726 }, Allocate { size: 3258 }, Free { index: 2 }, Allocate { size: 2129 }, Allocate { size: 3674 }, Allocate { size: 1542 }, Allocate { size: 2210 }, Free { index: 21 }, Allocate { size: 3914 }, Allocate { size: 3108 }, Allocate { size: 1979 }, Allocate { size: 2677 }, Allocate { size: 8140 }, Allocate { size: 7573 }, Allocate { size: 121 }, Free { index: 59 }, Allocate { size: 6467 }, Allocate { size: 262 }, Allocate { size: 7711 }, Allocate { size: 2450 }, Allocate { size: 4351 }, Allocate { size: 4282 }, Free { index: 39 }, Allocate { size: 4050 }, Allocate { size: 67 }, Allocate { size: 5560 }, Free { index: 51 }, Allocate { size: 6038 }, Allocate { size: 555 }, Allocate { size: 1852 }, Free { index: 78 }, Allocate { size: 698 }];
|
|
|
|
let mut prev_sequence = sequence.clone();
|
|
while causes_fragmentation(&sequence, false) {
|
|
prev_sequence = sequence.clone();
|
|
sequence.pop();
|
|
}
|
|
|
|
// println!("{prev_sequence:?}");
|
|
|
|
let mut sequence = prev_sequence.clone();
|
|
loop {
|
|
let mut removed_something = false;
|
|
let mut i = 0;
|
|
while i < sequence.len() {
|
|
let mut new_sequence = sequence.clone();
|
|
|
|
new_sequence.remove(i);
|
|
|
|
if causes_fragmentation(&new_sequence, false) {
|
|
removed_something = true;
|
|
println!("removed {i} ({:?})", sequence[i]);
|
|
sequence = new_sequence;
|
|
} else {
|
|
for item in &mut new_sequence {
|
|
if let Operation::Free { index } = item {
|
|
if *index > i {
|
|
*index -= 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
if causes_fragmentation(&new_sequence, false) {
|
|
removed_something = true;
|
|
println!("removed {i} ({:?}) after adjusting frees", sequence[i]);
|
|
sequence = new_sequence;
|
|
}
|
|
}
|
|
|
|
i += 1;
|
|
}
|
|
|
|
if !removed_something {
|
|
break;
|
|
}
|
|
}
|
|
|
|
// println!("{sequence:?}");
|
|
|
|
loop {
|
|
let mut merged_something = false;
|
|
let mut i = 0;
|
|
while i < sequence.len() {
|
|
let mut new_sequence = sequence.clone();
|
|
|
|
let removed = new_sequence.remove(i);
|
|
|
|
if let Operation::Allocate { size: removed_size } = removed {
|
|
if let Some(Operation::Allocate { size }) = new_sequence.get_mut(i) {
|
|
*size += removed_size;
|
|
}
|
|
}
|
|
|
|
if causes_fragmentation(&new_sequence, false) {
|
|
merged_something = true;
|
|
println!(
|
|
"merged {} and {} ({:?} and {:?})",
|
|
i,
|
|
i + 1,
|
|
sequence[i],
|
|
sequence[i + 1]
|
|
);
|
|
sequence = new_sequence;
|
|
} else {
|
|
for item in &mut new_sequence {
|
|
if let Operation::Free { index } = item {
|
|
if *index > i {
|
|
*index -= 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
if causes_fragmentation(&new_sequence, false) {
|
|
merged_something = true;
|
|
println!(
|
|
"merged {} and {} ({:?} and {:?}) after adjusting frees",
|
|
i,
|
|
i + 1,
|
|
sequence[i],
|
|
sequence[i + 1]
|
|
);
|
|
sequence = new_sequence;
|
|
}
|
|
}
|
|
|
|
i += 1;
|
|
}
|
|
|
|
if !merged_something {
|
|
break;
|
|
}
|
|
}
|
|
|
|
// println!("{sequence:?}");
|
|
|
|
dbg!(causes_fragmentation(&sequence, true));
|
|
}
|
|
|
|
#[test]
|
|
fn allocator() {
|
|
let mut db = Db::<()>::create(tempfile::tempfile().unwrap(), &[4, 7, 16, 18]);
|
|
|
|
let mut ranges = Vec::new();
|
|
|
|
validate_db(&db, |_snapshot, coverage| {
|
|
for &range in &ranges {
|
|
coverage.set_allocated(range);
|
|
}
|
|
});
|
|
|
|
for i in 0..10000 {
|
|
ranges.push(db.allocate(i % 32));
|
|
}
|
|
|
|
validate_db(&db, |_snapshot, coverage| {
|
|
for &range in &ranges {
|
|
coverage.set_allocated(range);
|
|
}
|
|
});
|
|
|
|
let n = ranges.len();
|
|
for range in ranges.drain(n / 4..n * 3 / 4) {
|
|
db.free(range);
|
|
}
|
|
|
|
validate_db(&db, |_snapshot, coverage| {
|
|
for &range in &ranges {
|
|
coverage.set_allocated(range);
|
|
}
|
|
});
|
|
|
|
for i in 0..10000 {
|
|
ranges.push(db.allocate((4 * i) % 32));
|
|
}
|
|
|
|
validate_db(&db, |_snapshot, coverage| {
|
|
for &range in &ranges {
|
|
coverage.set_allocated(range);
|
|
}
|
|
});
|
|
|
|
for range in ranges.drain(..) {
|
|
db.free(range);
|
|
}
|
|
|
|
validate_db(&db, |_snapshot, coverage| {
|
|
for &range in &ranges {
|
|
coverage.set_allocated(range);
|
|
}
|
|
});
|
|
|
|
// hexdump(db.map.as_bytes());
|
|
}
|
|
|
|
#[test]
|
|
fn no_fragmentation() {
|
|
use Operation::*;
|
|
#[rustfmt::skip]
|
|
let mut sequence = vec![Allocate { size: 1946 }, Allocate { size: 3252 }, Free { index: 0 }, Allocate { size: 7391 }, Allocate { size: 394 }, Allocate { size: 3726 }, Allocate { size: 1429 }, Allocate { size: 3188 }, Allocate { size: 6375 }, Allocate { size: 4453 }, Allocate { size: 2514 }, Allocate { size: 4754 }, Allocate { size: 6785 }, Allocate { size: 2751 }, Allocate { size: 4107 }, Allocate { size: 3509 }, Allocate { size: 5897 }, Allocate { size: 7081 }, Allocate { size: 2419 }, Allocate { size: 5400 }, Allocate { size: 7135 }, Free { index: 14 }, Allocate { size: 2130 }, Free { index: 18 }, Allocate { size: 3450 }, Allocate { size: 1296 }, Allocate { size: 8091 }, Allocate { size: 4646 }, Allocate { size: 3891 }, Free { index: 0 }, Allocate { size: 1087 }, Allocate { size: 101 }, Allocate { size: 5353 }, Allocate { size: 3381 }, Allocate { size: 6869 }, Free { index: 1 }, Allocate { size: 3750 }, Allocate { size: 1398 }, Free { index: 22 }, Allocate { size: 18 }, Free { index: 25 }, Allocate { size: 642 }, Free { index: 4 }, Allocate { size: 4 }, Allocate { size: 1898 }, Allocate { size: 5259 }, Free { index: 26 }, Allocate { size: 3151 }, Allocate { size: 4989 }, Allocate { size: 6493 }, Allocate { size: 551 }, Allocate { size: 706 }, Allocate { size: 4161 }, Free { index: 16 }, Allocate { size: 3422 }, Allocate { size: 3011 }, Allocate { size: 5149 }, Allocate { size: 4687 }, Allocate { size: 5 }, Free { index: 34 }, Allocate { size: 191 }, Allocate { size: 2851 }, Allocate { size: 3597 }, Free { index: 28 }, Allocate { size: 7037 }, Allocate { size: 4660 }, Allocate { size: 194 }, Allocate { size: 5537 }, Allocate { size: 3242 }, Allocate { size: 6298 }, Allocate { size: 1239 }, Allocate { size: 7025 }, Allocate { size: 3563 }, Allocate { size: 5039 }, Free { index: 40 }, Allocate { size: 4549 }, Allocate { size: 5362 }, Allocate { size: 3510 }, Free { index: 31 }, Allocate { size: 226 }, Allocate { size: 6904 }, Allocate { size: 4150 }, Allocate { size: 4914 }, Allocate { size: 2330 }, Allocate { size: 2499 }, Allocate { size: 6677 }, Allocate { size: 95 }, Allocate { size: 3726 }, Allocate { size: 3258 }, Free { index: 2 }, Allocate { size: 2129 }, Allocate { size: 3674 }, Allocate { size: 1542 }, Allocate { size: 2210 }, Free { index: 21 }, Allocate { size: 3914 }, Allocate { size: 3108 }, Allocate { size: 1979 }, Allocate { size: 2677 }, Allocate { size: 8140 }, Allocate { size: 7573 }, Allocate { size: 121 }, Free { index: 59 }, Allocate { size: 6467 }, Allocate { size: 262 }, Allocate { size: 7711 }, Allocate { size: 2450 }, Allocate { size: 4351 }, Allocate { size: 4282 }, Free { index: 39 }, Allocate { size: 4050 }, Allocate { size: 67 }, Allocate { size: 5560 }, Free { index: 51 }, Allocate { size: 6038 }, Allocate { size: 555 }, Allocate { size: 1852 }, Free { index: 78 }, Allocate { size: 698 }];
|
|
assert!(!causes_fragmentation(&sequence, true));
|
|
}
|
|
|
|
#[test]
|
|
fn transactions_work() {
|
|
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
|
|
#[repr(C)]
|
|
struct DataHeader {
|
|
generation: U64,
|
|
list: FilePointer<DataList>,
|
|
}
|
|
|
|
#[derive(Clone, Copy, FromBytes, FromZeroes, AsBytes, Unaligned)]
|
|
#[repr(C)]
|
|
struct DataList {
|
|
next: FilePointer<DataList>,
|
|
data: U64,
|
|
}
|
|
|
|
let mut db = Db::<DataHeader>::create(
|
|
tempfile::tempfile().unwrap(),
|
|
&[size_of::<DataHeader>() as u32, size_of::<DataList>() as u32],
|
|
);
|
|
|
|
let mut snapshots = VecDeque::new();
|
|
|
|
for i in 0..20 {
|
|
db.transaction(|transaction| {
|
|
let root = transaction.root();
|
|
|
|
let root = if !root.is_null() {
|
|
root
|
|
} else {
|
|
let (root, data) = transaction.allocate::<DataHeader>();
|
|
|
|
*data = DataHeader {
|
|
generation: 0.into(),
|
|
list: FilePointer::null(),
|
|
};
|
|
|
|
root
|
|
};
|
|
|
|
let &data = transaction.read(root);
|
|
|
|
assert_eq!(data.generation.get(), i);
|
|
|
|
let n = {
|
|
let mut next = data.list;
|
|
let mut n = 0;
|
|
while !next.is_null() {
|
|
next = transaction.read(next).next;
|
|
n += 1;
|
|
}
|
|
n
|
|
};
|
|
|
|
let next = if n >= 5 {
|
|
let next = transaction.read(data.list).next;
|
|
transaction.free(data.list);
|
|
next
|
|
} else {
|
|
data.list
|
|
};
|
|
|
|
let (elem_ptr, element) = transaction.allocate::<DataList>();
|
|
|
|
element.next = next;
|
|
element.data = i.into();
|
|
|
|
let (root, data) = transaction.modify(root);
|
|
data.list = elem_ptr;
|
|
data.generation = (i + 1).into();
|
|
|
|
root
|
|
});
|
|
|
|
snapshots.push_back(db.create_reader().state.get());
|
|
if snapshots.len() > 10 {
|
|
drop(snapshots.pop_front());
|
|
db.free_old_epochs()
|
|
}
|
|
|
|
validate_db(&db, |snapshot, coverage| {
|
|
coverage.set_allocated(snapshot.root.range());
|
|
let data = snapshot.read(snapshot.root);
|
|
|
|
let mut next = data.list;
|
|
while !next.is_null() {
|
|
coverage.set_allocated(next.range());
|
|
next = snapshot.read(next).next;
|
|
}
|
|
});
|
|
}
|
|
|
|
// TODO: allocate some variably sized strings
|
|
|
|
for (i, snapshot) in snapshots.iter().enumerate() {
|
|
let root = snapshot.read(snapshot.root);
|
|
|
|
assert_eq!(root.generation.get(), 1 + 10 + i as u64);
|
|
|
|
let mut items = Vec::new();
|
|
|
|
let mut ptr = root.list;
|
|
|
|
while !ptr.is_null() {
|
|
let element = snapshot.read(ptr);
|
|
|
|
items.push(element.data.get());
|
|
ptr = element.next;
|
|
}
|
|
|
|
assert_eq!(items.len(), 5);
|
|
assert_eq!(items[0], 10 + i as u64);
|
|
|
|
for (expected, &is) in items.iter().skip(1).rev().enumerate() {
|
|
assert_eq!(expected as u64, is);
|
|
}
|
|
}
|
|
|
|
drop(snapshots);
|
|
|
|
// hexdump(db.map.as_bytes());
|
|
|
|
db.free_old_epochs();
|
|
|
|
// hexdump(db.map.as_bytes());
|
|
}
|
|
|
|
#[repr(u8)]
|
|
#[derive(Clone, Copy, PartialEq, Eq)]
|
|
#[rustfmt::skip]
|
|
enum CoverageKind {
|
|
Unaccounted = 0b000,
|
|
Allocated = 0b001,
|
|
Free = 0b010,
|
|
Retired = 0b011,
|
|
SlabMetadata = 0b100,
|
|
FileMetadata = 0b101,
|
|
Unusable = 0b110,
|
|
Overlap = 0b111,
|
|
}
|
|
|
|
impl CoverageKind {
|
|
fn color(self) -> &'static str {
|
|
match self {
|
|
CoverageKind::Unaccounted => "91",
|
|
CoverageKind::Allocated => "32",
|
|
CoverageKind::Free => "34",
|
|
CoverageKind::Retired => "36",
|
|
CoverageKind::SlabMetadata => "35",
|
|
CoverageKind::FileMetadata => "93",
|
|
CoverageKind::Unusable => "30",
|
|
CoverageKind::Overlap => "31",
|
|
}
|
|
}
|
|
}
|
|
|
|
impl CoverageKind {
|
|
#[rustfmt::skip]
|
|
fn from_bits(a: bool, b: bool, c: bool) -> Self {
|
|
let res = match (a, b, c) {
|
|
(false, false, false) => Self::Unaccounted,
|
|
(false, false, true) => Self::Allocated,
|
|
(false, true, false) => Self::Free,
|
|
(false, true, true) => Self::Retired,
|
|
( true, false, false) => Self::SlabMetadata,
|
|
( true, false, true) => Self::FileMetadata,
|
|
( true, true, false) => Self::Unusable,
|
|
( true, true, true) => Self::Overlap,
|
|
_ => panic!(),
|
|
};
|
|
assert_eq!(res as u8, ((a as u8) << 2) + ((b as u8) << 1) + c as u8);
|
|
res
|
|
}
|
|
|
|
fn to_bits(self) -> (bool, bool, bool) {
|
|
(
|
|
self as u8 & 0b100 != 0,
|
|
self as u8 & 0b010 != 0,
|
|
self as u8 & 0b001 != 0,
|
|
)
|
|
}
|
|
}
|
|
|
|
struct CoverageMap<'c, R> {
|
|
data_0: Vec<u8>,
|
|
data_1: Vec<u8>,
|
|
data_2: Vec<u8>,
|
|
empty_bits: u8,
|
|
db: &'c Db<R>,
|
|
pages_marked: BTreeSet<PagePointer>,
|
|
}
|
|
|
|
impl<'c, R> CoverageMap<'c, R> {
|
|
fn new(db: &'c Db<R>, len: usize) -> Self {
|
|
let bits = div_round_up(len as u64, 8) as usize;
|
|
Self {
|
|
data_0: vec![0; bits],
|
|
data_1: vec![0; bits],
|
|
data_2: vec![0; bits],
|
|
empty_bits: (8 - len % 8) as u8,
|
|
db,
|
|
pages_marked: BTreeSet::new(),
|
|
}
|
|
}
|
|
|
|
fn set(&mut self, i: usize, kind: CoverageKind) {
|
|
let i_byte = i / 8;
|
|
let i_bit = i % 8;
|
|
let mask = 1 << i_bit;
|
|
|
|
if i_byte >= self.data_0.len() {
|
|
panic!("out of bounds access");
|
|
}
|
|
|
|
let byte = self.data_0[i_byte] | self.data_1[i_byte] | self.data_2[i_byte];
|
|
|
|
let (set_0, set_1, set_2) = if byte & mask == 0 {
|
|
// byte was previously unset
|
|
kind.to_bits()
|
|
} else {
|
|
CoverageKind::Overlap.to_bits()
|
|
};
|
|
|
|
self.data_0[i_byte] |= mask * set_0 as u8;
|
|
self.data_1[i_byte] |= mask * set_1 as u8;
|
|
self.data_2[i_byte] |= mask * set_2 as u8;
|
|
}
|
|
|
|
fn set_range(&mut self, range: FileRange, kind: CoverageKind) {
|
|
range.as_range().for_each(|i| self.set(i, kind))
|
|
}
|
|
|
|
fn set_allocated(&mut self, range: FileRange) {
|
|
if range.start.is_null() {
|
|
return;
|
|
}
|
|
|
|
let size = range.len();
|
|
if let Some(slab) = self.db.get_slab(size) {
|
|
self.set_range(range, CoverageKind::Allocated);
|
|
|
|
match SlabKind::for_size(size as u32) {
|
|
SlabKind::SingleBytes => todo!(),
|
|
SlabKind::RelativeFreeList => {
|
|
let (mut page, offset) = range.start.page_offset();
|
|
|
|
let header = FilePointer::<RelativeFreeListHeader>::new(page.start());
|
|
|
|
let unuseable_start = size_of::<RelativeFreeListHeader>() as u64
|
|
+ RelativeFreeListHeader::capacity(size as u32) as u64 * size;
|
|
|
|
let unuseable =
|
|
(page.start() + unuseable_start).range(PAGE_SIZE - unuseable_start);
|
|
|
|
if !self.pages_marked.contains(&page) {
|
|
self.set_range(header.range(), CoverageKind::SlabMetadata);
|
|
|
|
if unuseable.len() > 0 {
|
|
self.set_range(unuseable, CoverageKind::Unusable);
|
|
self.pages_marked.insert(page);
|
|
}
|
|
}
|
|
}
|
|
SlabKind::AbsoluteFreeList => {
|
|
let unuseable = PAGE_SIZE - (PAGE_SIZE / size) * size;
|
|
let page = range.start.page();
|
|
|
|
if unuseable > 0 && !self.pages_marked.contains(&page) {
|
|
self.set_range(
|
|
(page.start() + (PAGE_SIZE - unuseable)).range(unuseable),
|
|
CoverageKind::Unusable,
|
|
);
|
|
self.pages_marked.insert(page);
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
let size = range
|
|
.len()
|
|
.max(GeneralPurposeAllocator::MIN_ALLOCATION_SIZE);
|
|
|
|
self.set_range(range.start.range(size), CoverageKind::Allocated);
|
|
}
|
|
}
|
|
|
|
fn uncovered_at(&self) -> Option<usize> {
|
|
let len = self.data_0.len();
|
|
for (i, ((&byte_0, &byte_1), &byte_2)) in self
|
|
.data_0
|
|
.iter()
|
|
.zip(self.data_1.iter())
|
|
.zip(self.data_2.iter())
|
|
.enumerate()
|
|
{
|
|
let byte = byte_0 | byte_1 | byte_2;
|
|
if i == len - 1 {
|
|
if byte != u8::MAX.overflowing_shl(self.empty_bits as u32).0 {
|
|
return Some(i * 8);
|
|
}
|
|
} else if byte != u8::MAX {
|
|
return Some(i * 8);
|
|
}
|
|
}
|
|
|
|
None
|
|
}
|
|
|
|
fn had_failures(&self) -> bool {
|
|
for (i, ((&byte_0, &byte_1), &byte_2)) in self
|
|
.data_0
|
|
.iter()
|
|
.zip(self.data_1.iter())
|
|
.zip(self.data_2.iter())
|
|
.enumerate()
|
|
{
|
|
let byte = byte_0 | byte_1 | byte_2;
|
|
|
|
if Self::all_equal(byte_0) && Self::all_equal(byte_1) && Self::all_equal(byte_2) {
|
|
let kind =
|
|
CoverageKind::from_bits(byte_0 & 1 == 1, byte_1 & 1 == 1, byte_2 & 1 == 1);
|
|
|
|
if kind == CoverageKind::Overlap {
|
|
return true;
|
|
}
|
|
};
|
|
}
|
|
|
|
false
|
|
}
|
|
|
|
fn set_color(res: &mut String, color: &str) {
|
|
res.push_str("\x1b[");
|
|
res.push_str(color);
|
|
res.push('m');
|
|
}
|
|
|
|
fn all_equal(bits: u8) -> bool {
|
|
bits == 0 || bits == u8::MAX
|
|
}
|
|
|
|
fn print(&self) -> String {
|
|
let mut res = String::new();
|
|
|
|
let mut page = 0;
|
|
let mut prev = "";
|
|
|
|
let mut current_page = String::new();
|
|
let mut any_not_allocated = false;
|
|
|
|
for (i, ((&byte_0, &byte_1), &byte_2)) in self
|
|
.data_0
|
|
.iter()
|
|
.zip(self.data_1.iter())
|
|
.zip(self.data_2.iter())
|
|
.enumerate()
|
|
{
|
|
let byte = byte_0 | byte_1 | byte_2;
|
|
|
|
let kind =
|
|
if Self::all_equal(byte_0) && Self::all_equal(byte_1) && Self::all_equal(byte_2) {
|
|
Some(CoverageKind::from_bits(
|
|
byte_0 & 1 == 1,
|
|
byte_1 & 1 == 1,
|
|
byte_2 & 1 == 1,
|
|
))
|
|
} else {
|
|
None
|
|
};
|
|
|
|
if i != 0 {
|
|
if i as u64 % (PAGE_SIZE / 8 / 8) == 0 {
|
|
Self::set_color(&mut current_page, "");
|
|
current_page.push('\n');
|
|
Self::set_color(&mut current_page, prev);
|
|
}
|
|
if i as u64 % (PAGE_SIZE / 8) == 0 {
|
|
Self::set_color(&mut current_page, "");
|
|
|
|
page += 1;
|
|
|
|
if any_not_allocated {
|
|
const LINE_LEN: usize = (PAGE_SIZE / 8 / 8) as usize;
|
|
|
|
let mut line = format!("[{page}] ");
|
|
|
|
while line.len() < LINE_LEN {
|
|
line.push('-');
|
|
}
|
|
|
|
res.push_str(¤t_page);
|
|
res.push_str(&line);
|
|
res.push('\n');
|
|
|
|
any_not_allocated = false;
|
|
}
|
|
|
|
current_page.clear();
|
|
|
|
Self::set_color(&mut current_page, prev);
|
|
}
|
|
}
|
|
|
|
let color = kind.map(CoverageKind::color).unwrap_or("33");
|
|
if color != prev {
|
|
Self::set_color(&mut current_page, color);
|
|
if kind != Some(CoverageKind::Allocated) {
|
|
any_not_allocated = true;
|
|
}
|
|
}
|
|
prev = color;
|
|
|
|
current_page.push(char::from_u32(0x2800 + byte as u32).unwrap());
|
|
}
|
|
|
|
res.push_str(¤t_page);
|
|
|
|
Self::set_color(&mut res, "");
|
|
|
|
res
|
|
}
|
|
|
|
#[track_caller]
|
|
fn assert_covered(&self) {
|
|
if self.uncovered_at().is_some() || self.had_failures() {
|
|
println!("{}", self.print());
|
|
|
|
if let Some(unconvered) = self.uncovered_at() {
|
|
panic!(
|
|
"Space in the file was lost at 0x{unconvered:x?} (page {})",
|
|
(RawFilePointer::null() + unconvered as u64).page().0.get()
|
|
);
|
|
}
|
|
|
|
if self.had_failures() {
|
|
panic!("Space was corrupted");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn coverage_map_works() {
|
|
let db = Db::<()>::create(tempfile::tempfile().unwrap(), &[]);
|
|
|
|
let mut coverage = CoverageMap::new(&db, 40);
|
|
assert!(coverage.uncovered_at().is_some());
|
|
coverage.set_range(RawFilePointer::null().range(20), CoverageKind::FileMetadata);
|
|
assert!(coverage.uncovered_at().is_some());
|
|
coverage.set_range((RawFilePointer::null() + 20).range(20), CoverageKind::Free);
|
|
assert_eq!(coverage.uncovered_at(), None);
|
|
assert!(!coverage.had_failures());
|
|
|
|
coverage.set_range((RawFilePointer::null()).range(8), CoverageKind::Allocated);
|
|
coverage.set_range(
|
|
(RawFilePointer::null() + 10).range(10),
|
|
CoverageKind::Allocated,
|
|
);
|
|
assert_eq!(coverage.uncovered_at(), None);
|
|
assert!(coverage.had_failures());
|
|
}
|
|
|
|
#[track_caller]
|
|
fn validate_db<R>(db: &Db<R>, f: impl FnOnce(&Snapshot<R>, &mut CoverageMap<'_, R>)) {
|
|
let mut coverage = CoverageMap::new(db, db.map.len());
|
|
|
|
let snapshot = Snapshot {
|
|
root: db.root(),
|
|
map: db.create_readonly_map(),
|
|
};
|
|
|
|
coverage.set_range(Db::<R>::header_ptr().range(), CoverageKind::FileMetadata);
|
|
|
|
// general purpose
|
|
{
|
|
let head = Db::<R>::header_ptr().allocator_state_ptr().general_ptr();
|
|
let mut next = *snapshot.read(head);
|
|
while !next.is_null() {
|
|
let size = GeneralPurposeAllocator::size(db, next);
|
|
coverage.set_range(next.into_raw().range(size), CoverageKind::Free);
|
|
next = *snapshot.read(next.next_ptr());
|
|
}
|
|
}
|
|
|
|
// slabs
|
|
{
|
|
let slabs = *snapshot.read(Db::<R>::header_ptr().allocator_state_ptr().slabs_ptr());
|
|
|
|
let mut next = Some(slabs);
|
|
while let Some(slabs) = next {
|
|
coverage.set_range(
|
|
slabs
|
|
.0
|
|
.into_raw()
|
|
.range(slabs.read_header(db).size() as u64),
|
|
CoverageKind::SlabMetadata,
|
|
);
|
|
next = slabs.next(db);
|
|
|
|
for slab in slabs.iter(db) {
|
|
let size = slab.size(db);
|
|
let head = slab.head(db);
|
|
|
|
match SlabKind::for_size(size) {
|
|
SlabKind::SingleBytes => todo!(),
|
|
SlabKind::RelativeFreeList => {
|
|
let (mut page, offset) = head.page_offset();
|
|
|
|
while !page.is_null() {
|
|
let header_ptr =
|
|
FilePointer::<RelativeFreeListHeader>::new(page.start());
|
|
|
|
let header = snapshot.read(header_ptr);
|
|
|
|
let mut next = header.first.get();
|
|
while next != 0 {
|
|
let next_ptr = FilePointer::<U16>::new(
|
|
RawFilePointer::from_page_and_offset(page, next),
|
|
);
|
|
coverage.set_range(
|
|
next_ptr.into_raw().range(size as u64),
|
|
CoverageKind::Free,
|
|
);
|
|
next = snapshot.read(next_ptr).get();
|
|
}
|
|
|
|
{
|
|
let unuseable_start = size_of::<RelativeFreeListHeader>() as u64
|
|
+ RelativeFreeListHeader::capacity(size) as u64 * size as u64;
|
|
|
|
let unuseable = (page.start() + unuseable_start)
|
|
.range(PAGE_SIZE - unuseable_start);
|
|
|
|
if !coverage.pages_marked.contains(&page) {
|
|
coverage
|
|
.set_range(header_ptr.range(), CoverageKind::SlabMetadata);
|
|
|
|
if unuseable.len() > 0 {
|
|
coverage.set_range(unuseable, CoverageKind::Unusable);
|
|
coverage.pages_marked.insert(page);
|
|
}
|
|
}
|
|
}
|
|
|
|
page = header.next_page;
|
|
}
|
|
}
|
|
SlabKind::AbsoluteFreeList => {
|
|
let mut next = head;
|
|
while !next.is_null() {
|
|
let next_ptr = FilePointer::<RawFilePointer>::new(next);
|
|
coverage.set_range(
|
|
next_ptr.into_raw().range(size as u64),
|
|
CoverageKind::Free,
|
|
);
|
|
|
|
let unuseable = PAGE_SIZE - (PAGE_SIZE / size as u64) * size as u64;
|
|
let page = next.page();
|
|
|
|
if unuseable > 0 && !coverage.pages_marked.contains(&page) {
|
|
coverage.set_range(
|
|
(page.start() + (PAGE_SIZE - unuseable)).range(unuseable),
|
|
CoverageKind::Unusable,
|
|
);
|
|
coverage.pages_marked.insert(page);
|
|
}
|
|
|
|
next = *snapshot.read(next_ptr);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// retired objects
|
|
{
|
|
for SnapshotAndFreeList { to_free, .. } in &db.snapshots {
|
|
for &range in to_free {
|
|
coverage.set_range(range, CoverageKind::Retired);
|
|
}
|
|
}
|
|
}
|
|
|
|
f(&snapshot, &mut coverage);
|
|
|
|
if coverage.uncovered_at().is_none() {
|
|
// println!("{}", coverage.print());
|
|
}
|
|
|
|
coverage.assert_covered();
|
|
}
|
|
|
|
fn hexdump(bytes: &[u8]) {
|
|
let mut child = std::process::Command::new("hexdump")
|
|
.arg("-C")
|
|
.stdin(Stdio::piped())
|
|
.stdout(Stdio::inherit())
|
|
.spawn()
|
|
.unwrap();
|
|
|
|
let mut stdin = child.stdin.take().expect("failed to get stdin");
|
|
|
|
stdin.write_all(bytes).unwrap();
|
|
|
|
std::mem::drop(stdin);
|
|
|
|
child.wait().unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn queue() {
|
|
fn validate(db: &Db<Queue<U64>>) {
|
|
validate_db(db, |snapshot, coverage| {
|
|
let queue = snapshot.root;
|
|
coverage.set_allocated(queue.range());
|
|
|
|
let n = queue.length(snapshot);
|
|
|
|
let mut next = *snapshot.read(field_ptr!(queue, Queue<U64>, head));
|
|
|
|
for i in 0..n {
|
|
coverage.set_allocated(next.range());
|
|
|
|
next = *snapshot.read(field_ptr!(next, QueueElement<U64>, next))
|
|
}
|
|
})
|
|
};
|
|
|
|
let mut db = Db::<Queue<U64>>::create(
|
|
tempfile::tempfile().unwrap(),
|
|
&[size_of::<QueueElement<U64>>() as u32],
|
|
);
|
|
|
|
db.transaction(|transaction| Queue::new(transaction));
|
|
db.transaction(|transaction| transaction.root().enqueue(transaction, 1.into()));
|
|
db.transaction(|transaction| transaction.root().enqueue(transaction, 2.into()));
|
|
db.transaction(|transaction| {
|
|
let (queue, res) = transaction.root().dequeue(transaction).unwrap();
|
|
assert_eq!(res.get(), 1);
|
|
queue
|
|
});
|
|
db.transaction(|transaction| {
|
|
let (queue, res) = transaction.root().dequeue(transaction).unwrap();
|
|
assert_eq!(res.get(), 2);
|
|
queue
|
|
});
|
|
|
|
db.transaction(|transaction| {
|
|
assert!(transaction.root().dequeue(transaction).is_none());
|
|
transaction.root()
|
|
});
|
|
|
|
db.free_old_epochs();
|
|
|
|
validate(&db);
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
let mut j = 0;
|
|
let mut i = 0;
|
|
for _ in 0..100 {
|
|
db.transaction(|transaction| {
|
|
let mut root = transaction.root();
|
|
|
|
let n = rng.gen_range(1..20);
|
|
for _ in 0..n {
|
|
let how_many = rng.gen_range(1..20);
|
|
if rng.gen_bool(0.6) || root.length(transaction) < how_many {
|
|
let elements = (i..i + how_many).map(U64::from).collect::<Vec<U64>>();
|
|
root = root.enqueue_many(transaction, &elements);
|
|
i += how_many;
|
|
} else {
|
|
let res;
|
|
(root, res) = root.dequeue_many(transaction, how_many).unwrap();
|
|
for x in res {
|
|
assert_eq!(x.get(), j);
|
|
j += 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
if !root.is_empty(transaction) {
|
|
let s = rng.gen_range(0..root.length(transaction));
|
|
let e = rng.gen_range(s..=root.length(transaction));
|
|
|
|
let elements = root.get_range(transaction, s..e).unwrap();
|
|
|
|
dbg!(&elements);
|
|
for (i, element) in elements.into_iter().enumerate() {
|
|
assert_eq!(element.get(), j + s + i as u64);
|
|
}
|
|
}
|
|
|
|
if !root.is_empty(transaction) {
|
|
assert_eq!(root.last(transaction), Some(U64::from(i - 1)));
|
|
}
|
|
|
|
root
|
|
});
|
|
|
|
if rng.gen_bool(0.05) {
|
|
db.free_old_epochs();
|
|
}
|
|
}
|
|
|
|
validate(&db);
|
|
|
|
db.transaction(|transaction| {
|
|
let mut root = transaction.root();
|
|
|
|
let n = root.length(transaction);
|
|
|
|
if n != 0 {
|
|
let mut res;
|
|
(root, res) = root.dequeue_many(transaction, n).unwrap();
|
|
|
|
for x in res {
|
|
assert_eq!(x.get(), j);
|
|
j += 1;
|
|
}
|
|
}
|
|
|
|
root
|
|
});
|
|
|
|
db.free_old_epochs();
|
|
|
|
validate(&db);
|
|
}
|