diff --git a/memregion_merge/Cargo.lock b/memregion_merge/Cargo.lock new file mode 100644 index 0000000..52819d7 --- /dev/null +++ b/memregion_merge/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "memregion_merge" +version = "0.1.0" diff --git a/memregion_merge/Cargo.toml b/memregion_merge/Cargo.toml new file mode 100644 index 0000000..c476269 --- /dev/null +++ b/memregion_merge/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "memregion_merge" +version = "0.1.0" +edition = "2021" + +[dependencies] diff --git a/memregion_merge/src/main.rs b/memregion_merge/src/main.rs new file mode 100644 index 0000000..11fab82 --- /dev/null +++ b/memregion_merge/src/main.rs @@ -0,0 +1,282 @@ +// Notes: +// - Once we've got a list of usable memory, we can use some of it to store the original e820 list +// for future use. We therefore don't have to worry about storing the unavailable regions for +// now, we can do that later. + +use std::array; + +struct MemRegions { + buf: [MemRegion; N], + len: usize, +} + +impl MemRegions { + fn new() -> Self { + Self { buf: array::from_fn(|_| Default::default()), len: 0 } + } + + fn regions(&self) -> &[MemRegion] { + &self.buf[..self.len] + } + + fn push_usable(&mut self, new: MemRegion) -> bool { + let mut merged = false; + for old in &mut self.buf[..self.len] { + if old.overlaps(&new) { + old.merge(&new); + merged = true; + break; + } + } + + // If we merged the new region with an existing one, we may have filled in a gap that allows + // other regions to be merged, so check if any adjacent regions can be merged. + if merged { + let mut i = 1; + while i < self.len { + if self.buf[i - 1].overlaps(&self.buf[i]) { + let (left, right) = self.buf.split_at_mut(i); + left[i - 1].merge(&right[0]); + right.rotate_left(1); + self.len -= 1; + } + else { + i += 1; + } + } + + return true; + } + + // If we couldn't merge the new region with any others, then place it into its sorted position + // in the regions buffer. + else { + self.push_without_merge_at(new, 0) + } + } + + fn push_without_merge_at(&mut self, new: MemRegion, start_idx: usize) -> bool { + // Fail if we don't have space for the new region. + if self.len >= N { + return false; + } + + let mut i = start_idx; + while i < self.len { + if new.start < self.buf[i].start { + break; + } + i += 1; + } + + self.buf[self.len] = new; + self.len += 1; + self.buf[i..self.len].rotate_right(1); + + true + } + + // n.b. expects to be run only after all `push_usable`s are done + fn push_unusable(&mut self, unusable: MemRegion) { + let mut i = 0; + while i < self.len { + if !unusable.overlaps(&self.buf[i]) { + i += 1; + } + else { + let left_region = MemRegion::new(self.buf[i].start, unusable.start); + let right_region = MemRegion::new(unusable.end, self.buf[i].end); + + // If the unusable region completely contains the usable region, remove the usable region + // entirely. + if left_region.empty() && right_region.empty() { + self.buf[i..self.len].rotate_left(1); + self.len -= 1; + } + // If the unusable region is completely contained within the usable region, then split the + // usable region in two. + else if !left_region.empty() && !right_region.empty() { + self.buf[i] = left_region; + self.push_without_merge_at(right_region, i + 1); + i += 1; + } + // Otherwise, trim the usable region. + else { + self.buf[i] = if !left_region.empty() { left_region } else { right_region }; + i += 1; + } + } + } + } +} + +#[derive(Clone, Default, PartialEq, Eq, Debug)] +struct MemRegion { + start: u64, + end: u64, +} + +impl MemRegion { + fn new(start: u64, end: u64) -> Self { + Self { start, end } + } + + fn len(&self) -> u64 { + if self.end < self.start { + 0 + } + else { + self.end - self.start + } + } + + fn empty(&self) -> bool { + self.len() == 0 + } + + fn overlaps(&self, other: &MemRegion) -> bool { + self.start <= other.end && other.start <= self.end + } + + fn merge(&mut self, other: &MemRegion) { + debug_assert!(self.overlaps(other)); + + self.start = self.start.min(other.start); + self.end = self.end.max(other.end); + } +} + +fn main() { + +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_push_usable() { + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(800, 900)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 200), + MemRegion::new(250, 400), + MemRegion::new(500, 600), + MemRegion::new(800, 900), + ]); + } + + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_usable(MemRegion::new(150, 300)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 400), + ]); + } + + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_usable(MemRegion::new(150, 300)); + rs.push_usable(MemRegion::new(250, 400)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 400), + ]); + } + + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(150, 300)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 400), + MemRegion::new(500, 600), + ]); + } + } + + #[test] + fn test_push_unusable() { + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(800, 900)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_unusable(MemRegion::new(250, 400)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 200), + MemRegion::new(500, 600), + MemRegion::new(800, 900), + ]); + } + + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(800, 900)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_unusable(MemRegion::new(300, 400)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 200), + MemRegion::new(250, 300), + MemRegion::new(500, 600), + MemRegion::new(800, 900), + ]); + } + + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(800, 900)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_unusable(MemRegion::new(250, 300)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 200), + MemRegion::new(300, 400), + MemRegion::new(500, 600), + MemRegion::new(800, 900), + ]); + } + + { + let mut rs = MemRegions::<5>::new(); + rs.push_usable(MemRegion::new(800, 900)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_unusable(MemRegion::new(300, 350)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 200), + MemRegion::new(250, 300), + MemRegion::new(350, 400), + MemRegion::new(500, 600), + MemRegion::new(800, 900), + ]); + } + + { + let mut rs = MemRegions::<4>::new(); + rs.push_usable(MemRegion::new(800, 900)); + rs.push_usable(MemRegion::new(500, 600)); + rs.push_usable(MemRegion::new(250, 400)); + rs.push_usable(MemRegion::new(100, 200)); + rs.push_unusable(MemRegion::new(150, 550)); + assert_eq!(rs.regions(), &[ + MemRegion::new(100, 150), + MemRegion::new(550, 600), + MemRegion::new(800, 900), + ]); + } + } +}