diff --git a/boot1/src/main.rs b/boot1/src/main.rs index 57b8b2c..55744a3 100644 --- a/boot1/src/main.rs +++ b/boot1/src/main.rs @@ -1,12 +1,11 @@ #![no_std] #![no_main] +mod spin; mod vga; use core::{arch::asm, panic::PanicInfo, fmt::Write}; -use vga::VgaBuf; - #[repr(C)] #[derive(Clone, Debug)] struct BiosIntr { @@ -32,9 +31,9 @@ fn panic(_info: &PanicInfo) -> ! { #[no_mangle] pub extern "C" fn _start() -> ! { - let mut vga_buf = unsafe { VgaBuf::new(0xb8000 as *mut u16) }; + vga::vga_init(); - writeln!(&mut vga_buf, "hello from rust").ok(); + vga_println!("hello from rust"); let mut args = BiosIntr { eax: 0xe820, @@ -52,7 +51,7 @@ pub extern "C" fn _start() -> ! { _bios_call(0x15, &raw mut args); } - writeln!(&mut vga_buf, "eax = {:x}", args.eax).ok(); + vga_println!("eax = {:x}", args.eax); hlt() } diff --git a/boot1/src/spin.rs b/boot1/src/spin.rs new file mode 100644 index 0000000..4374fc9 --- /dev/null +++ b/boot1/src/spin.rs @@ -0,0 +1,101 @@ +use core::{cell::UnsafeCell, hint, ops::{Deref, DerefMut}, sync::atomic::{AtomicBool, Ordering}}; + +pub struct Spinlock { + data: UnsafeCell, + locked: AtomicBool, +} + +impl Spinlock { + pub const fn new(data: T) -> Self { + Self { + data: UnsafeCell::new(data), + locked: AtomicBool::new(false), + } + } + + pub fn lock(&self) -> SpinlockGuard { + // If we observe `locked` was `false`, then: + // - Use acquire ordering, so nothing inside the critical section gets reordered before we + // observed the `false` value. + // - Store `true`, so nothing else can enter the critical section until we exit it. + // Otherwise, spin until we observe a `false` value. + while self.locked + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + hint::spin_loop(); + } + + SpinlockGuard { lock: self } + } + + /// # Safety + /// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be + /// used to obtain a reference to the spinlock-protected data. + unsafe fn unlock(&self) { + // Unset `locked` with release ordering so that nothing inside the critical section gets + // reordered to after we stored `false`. + self.locked.store(false, Ordering::Release); + } + + /// # Safety + /// No mutable references to the spinlock-protected data may exist. + unsafe fn get<'s, 'a>(&'s self) -> &'a T + where + 's: 'a, + { + unsafe { &*self.data.get() } + } + + /// # Safety + /// No references to the spinlock-protected data may exist. + unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T + where + 's: 'a, + { + unsafe { &mut *self.data.get() } + } +} + +unsafe impl Sync for Spinlock +where + T: Send, +{} + +pub struct SpinlockGuard<'a, T> { + lock: &'a Spinlock, +} + +impl<'a, T> Deref for SpinlockGuard<'a, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + // SAFETY: + // For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have + // exclusive access to `data`, so no mutable references to `data` can exist. + unsafe { self.lock.get() } + } +} + +impl<'a, T> DerefMut for SpinlockGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: + // For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have + // exclusive access to `data`, so no other references to `data` can exist. + unsafe { self.lock.get_mut() } + } +} + +impl<'a, T> Drop for SpinlockGuard<'a, T> { + fn drop(&mut self) { + // SAFETY: + // Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked` + // to true before creating a guard and refuse to create any new ones until it is `false` again. + // Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and + // we don't access the spinlock-protected data here, there are therefore no "active" + // `SpinlockGuard`s remaining for the lock. + unsafe { self.lock.unlock() } + } +} diff --git a/boot1/src/vga.rs b/boot1/src/vga.rs index 76fd0bb..5f5db70 100644 --- a/boot1/src/vga.rs +++ b/boot1/src/vga.rs @@ -1,10 +1,47 @@ use core::{fmt, ptr}; +use crate::spin::Spinlock; + const VGA_WIDTH: usize = 80; const VGA_HEIGHT: usize = 25; const COLOUR_MASK: u16 = 0x0a00; +pub static VGA: Spinlock> = Spinlock::new(None); + +#[macro_export] +macro_rules! vga_print { + ($($args:tt)*) => ({ + crate::vga::with_vga_buf(|vga_buf| { + ::core::write!(vga_buf, $($args)*).ok(); + }); + }) +} + +#[macro_export] +macro_rules! vga_println { + ($($args:tt)*) => ({ + crate::vga::with_vga_buf(|vga_buf| { + ::core::writeln!(vga_buf, $($args)*).ok(); + }); + }) +} + +pub fn vga_init() { + let vga_buf = unsafe { VgaBuf::new(0xb8000 as *mut u16) }; + let mut guard = VGA.lock(); + *guard = Some(vga_buf); +} + +pub fn with_vga_buf(f: F) +where + F: FnOnce(&mut VgaBuf), +{ + let mut guard = crate::vga::VGA.lock(); + let vga = guard.as_mut().expect("vga not initialised"); + f(vga); +} + pub struct VgaBuf { buf: *mut u16, col: usize, @@ -87,3 +124,5 @@ impl fmt::Write for VgaBuf { Ok(()) } } + +unsafe impl Send for VgaBuf {}