serial print macro

refactor
pantonshire 5 months ago
parent 988d3d601d
commit e2478d0f3a

@ -1,6 +1,6 @@
fn main() { fn main() {
const LINKER_SCRIPT_PATH: &str = "link.ld"; const LINKER_SCRIPT_PATH: &str = "link.ld";
println!("cargo::rerun-if-changed={}", LINKER_SCRIPT_PATH); println!("cargo::rerun-if-changed={}", LINKER_SCRIPT_PATH);
println!("cargo::rustc-link-arg=-T{}", LINKER_SCRIPT_PATH); println!("cargo::rustc-link-arg=-T{}", LINKER_SCRIPT_PATH);
} }

@ -1,6 +1,28 @@
use core::{fmt, hint}; use core::{fmt, hint};
use crate::ioport; use crate::{ioport, spin::Spinlock};
const COM1_PORT: u16 = 0x3f8;
pub static COM1: Spinlock<Option<Com>> = Spinlock::new(None);
#[macro_export]
macro_rules! com1_print {
($($args:tt)*) => ({
crate::com::with_com1_if_available(|com1| {
::core::write!(com1, $($args)*).ok();
});
})
}
#[macro_export]
macro_rules! com1_println {
($($args:tt)*) => ({
crate::com::with_com1_if_available(|com1| {
::core::writeln!(com1, $($args)*).ok();
});
})
}
pub struct Com { pub struct Com {
port: u16, port: u16,
@ -8,10 +30,31 @@ pub struct Com {
pub struct ComError; pub struct ComError;
pub fn with_com1_if_available<F>(f: F)
where
F: FnOnce(&mut Com),
{
let mut guard = COM1.lock();
if let Some(com1) = guard.as_mut() {
f(com1);
}
}
pub unsafe fn try_com1_init() -> bool {
match unsafe { Com::init(COM1_PORT) } {
Ok(com1) => {
let mut guard = COM1.lock();
*guard = Some(com1);
true
}
_ => false,
}
}
impl Com { impl Com {
pub unsafe fn init(port: u16) -> Result<Self, ComError> { pub unsafe fn init(port: u16) -> Result<Self, ComError> {
const ECHO_BYTE: u8 = 0x5a; const ECHO_BYTE: u8 = 0x5a;
let echo = unsafe { let echo = unsafe {
// Unset DLAB // Unset DLAB
ioport::outb(port + 3, 0x00); ioport::outb(port + 3, 0x00);
@ -43,9 +86,7 @@ impl Com {
ioport::outb(port + 4, 0x0f); ioport::outb(port + 4, 0x0f);
} }
Ok(Self { Ok(Self { port })
port,
})
} }
pub fn poll_has_data(&self) -> bool { pub fn poll_has_data(&self) -> bool {
@ -74,19 +115,10 @@ impl Com {
} }
impl fmt::Write for Com { impl fmt::Write for Com {
fn write_str(&mut self, s: &str) -> fmt::Result { fn write_str(&mut self, s: &str) -> fmt::Result {
for b in s.bytes() { for b in s.bytes() {
self.write_poll(b); self.write_poll(b);
} }
Ok(()) Ok(())
}
fn write_char(&mut self, c: char) -> fmt::Result {
let mut buf = [0u8; 4];
let s = c.encode_utf8(&mut buf);
for b in s.bytes() {
self.write_poll(b);
} }
Ok(())
}
} }

@ -1,23 +1,23 @@
use core::arch::asm; use core::arch::asm;
pub unsafe fn inb(port: u16) -> u8 { pub unsafe fn inb(port: u16) -> u8 {
let x: u8; let x: u8;
unsafe { unsafe {
asm!( asm!(
"in al, dx", "in al, dx",
in("dx") port, in("dx") port,
lateout("al") x lateout("al") x
); );
} }
x x
} }
pub unsafe fn outb(port: u16, x: u8) { pub unsafe fn outb(port: u16, x: u8) {
unsafe { unsafe {
asm!( asm!(
"out dx, al", "out dx, al",
in("al") x, in("al") x,
in("dx") port, in("dx") port,
); );
} }
} }

@ -3,54 +3,57 @@
#![feature(custom_test_frameworks)] #![feature(custom_test_frameworks)]
#![test_runner(crate::test_runner)] #![test_runner(crate::test_runner)]
mod spin; mod com;
mod ioport; mod ioport;
mod spin;
mod vga; mod vga;
mod com;
use core::{arch::{asm, global_asm}, fmt::Write, panic::PanicInfo, ptr, slice}; use core::{
arch::{asm, global_asm},
fmt::{self, Write},
panic::PanicInfo,
ptr, slice,
};
global_asm!(include_str!("asm/trampoline.s")); global_asm!(include_str!("asm/trampoline.s"));
#[panic_handler] #[panic_handler]
fn panic(info: &PanicInfo) -> ! { fn panic(info: &PanicInfo) -> ! {
vga_println!("panic!"); vga_println!("panic!");
if let Some(location) = info.location() { if let Some(location) = info.location() {
vga_println!("{}", location); vga_println!("{}", location);
} }
vga_println!("{}", info.message()); vga_println!("{}", info.message());
hlt() hlt()
} }
const COM1_PORT: u16 = 0x3f8;
#[unsafe(no_mangle)] #[unsafe(no_mangle)]
pub extern "C" fn _start() -> ! { pub extern "C" fn _start() -> ! {
vga::vga_init(); vga::vga_init();
let gdt_ptr = ptr::with_exposed_provenance::<u64>(0xc000); unsafe {
let gdt_slice = unsafe { slice::from_raw_parts(gdt_ptr, 4) }; com::try_com1_init();
}
vga_println!("hello from rust :)"); let gdt_ptr = ptr::with_exposed_provenance::<u64>(0xc000);
vga_println!("{:016x}", gdt_slice[0]); let gdt_slice = unsafe { slice::from_raw_parts(gdt_ptr, 4) };
vga_println!("{:016x}", gdt_slice[1]);
vga_println!("{:016x}", gdt_slice[2]);
vga_println!("{:016x}", gdt_slice[3]);
let mut com = unsafe { com::Com::init(COM1_PORT) };
if let Ok(com) = &mut com { vga_println!("hello from rust :)");
writeln!(com, "hello serial").ok(); vga_println!("{:016x}", gdt_slice[0]);
} vga_println!("{:016x}", gdt_slice[1]);
vga_println!("{:016x}", gdt_slice[2]);
vga_println!("{:016x}", gdt_slice[3]);
hlt() com1_println!("hello serial!");
hlt()
} }
#[inline] #[inline]
fn hlt() -> ! { fn hlt() -> ! {
loop { loop {
unsafe { unsafe {
asm!("hlt"); asm!("hlt");
}
} }
}
} }

@ -1,101 +1,104 @@
use core::{cell::UnsafeCell, hint, ops::{Deref, DerefMut}, sync::atomic::{AtomicBool, Ordering}}; use core::{
cell::UnsafeCell,
hint,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, Ordering},
};
pub struct Spinlock<T> { pub struct Spinlock<T> {
data: UnsafeCell<T>, data: UnsafeCell<T>,
locked: AtomicBool, locked: AtomicBool,
} }
impl<T> Spinlock<T> { impl<T> Spinlock<T> {
pub const fn new(data: T) -> Self { pub const fn new(data: T) -> Self {
Self { Self {
data: UnsafeCell::new(data), data: UnsafeCell::new(data),
locked: AtomicBool::new(false), locked: AtomicBool::new(false),
}
} }
}
pub fn lock(&self) -> SpinlockGuard<'_, T> { pub fn lock(&self) -> SpinlockGuard<'_, T> {
// If we observe `locked` was `false`, then: // If we observe `locked` was `false`, then:
// - Use acquire ordering, so nothing inside the critical section gets reordered before we // - Use acquire ordering, so nothing inside the critical section gets reordered before we
// observed the `false` value. // observed the `false` value.
// - Store `true`, so nothing else can enter the critical section until we exit it. // - Store `true`, so nothing else can enter the critical section until we exit it.
// Otherwise, spin until we observe a `false` value. // Otherwise, spin until we observe a `false` value.
while self.locked while self
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) .locked
.is_err() .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
{ .is_err()
hint::spin_loop(); {
} hint::spin_loop();
}
SpinlockGuard { lock: self } SpinlockGuard { lock: self }
} }
/// # Safety /// # Safety
/// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be /// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be
/// used to obtain a reference to the spinlock-protected data. /// used to obtain a reference to the spinlock-protected data.
unsafe fn unlock(&self) { unsafe fn unlock(&self) {
// Unset `locked` with release ordering so that nothing inside the critical section gets // Unset `locked` with release ordering so that nothing inside the critical section gets
// reordered to after we stored `false`. // reordered to after we stored `false`.
self.locked.store(false, Ordering::Release); self.locked.store(false, Ordering::Release);
} }
/// # Safety /// # Safety
/// No mutable references to the spinlock-protected data may exist. /// No mutable references to the spinlock-protected data may exist.
unsafe fn get<'s, 'a>(&'s self) -> &'a T unsafe fn get<'s, 'a>(&'s self) -> &'a T
where where
's: 'a, 's: 'a,
{ {
unsafe { &*self.data.get() } unsafe { &*self.data.get() }
} }
/// # Safety /// # Safety
/// No references to the spinlock-protected data may exist. /// No references to the spinlock-protected data may exist.
unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T
where where
's: 'a, 's: 'a,
{ {
unsafe { &mut *self.data.get() } unsafe { &mut *self.data.get() }
} }
} }
unsafe impl<T> Sync for Spinlock<T> unsafe impl<T> Sync for Spinlock<T> where T: Send {}
where
T: Send,
{}
pub struct SpinlockGuard<'a, T> { pub struct SpinlockGuard<'a, T> {
lock: &'a Spinlock<T>, lock: &'a Spinlock<T>,
} }
impl<'a, T> Deref for SpinlockGuard<'a, T> { impl<'a, T> Deref for SpinlockGuard<'a, T> {
type Target = T; type Target = T;
#[inline] #[inline]
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
// SAFETY: // SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have // For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no mutable references to `data` can exist. // exclusive access to `data`, so no mutable references to `data` can exist.
unsafe { self.lock.get() } unsafe { self.lock.get() }
} }
} }
impl<'a, T> DerefMut for SpinlockGuard<'a, T> { impl<'a, T> DerefMut for SpinlockGuard<'a, T> {
#[inline] #[inline]
fn deref_mut(&mut self) -> &mut Self::Target { fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: // SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have // For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no other references to `data` can exist. // exclusive access to `data`, so no other references to `data` can exist.
unsafe { self.lock.get_mut() } unsafe { self.lock.get_mut() }
} }
} }
impl<'a, T> Drop for SpinlockGuard<'a, T> { impl<'a, T> Drop for SpinlockGuard<'a, T> {
fn drop(&mut self) { fn drop(&mut self) {
// SAFETY: // SAFETY:
// Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked` // Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked`
// to true before creating a guard and refuse to create any new ones until it is `false` again. // to true before creating a guard and refuse to create any new ones until it is `false` again.
// Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and // Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and
// we don't access the spinlock-protected data here, there are therefore no "active" // we don't access the spinlock-protected data here, there are therefore no "active"
// `SpinlockGuard`s remaining for the lock. // `SpinlockGuard`s remaining for the lock.
unsafe { self.lock.unlock() } unsafe { self.lock.unlock() }
} }
} }

@ -29,101 +29,104 @@ macro_rules! vga_println {
} }
pub fn vga_init() { pub fn vga_init() {
let vga_buf = unsafe { VgaBuf::new(ptr::with_exposed_provenance_mut::<u16>(VGA_ADDR)) }; let vga_buf = unsafe { VgaBuf::new(ptr::with_exposed_provenance_mut::<u16>(VGA_ADDR)) };
let mut guard = VGA.lock(); let mut guard = VGA.lock();
*guard = Some(vga_buf); *guard = Some(vga_buf);
} }
pub fn with_vga_buf<F>(f: F) pub fn with_vga_buf<F>(f: F)
where where
F: FnOnce(&mut VgaBuf), F: FnOnce(&mut VgaBuf),
{ {
let mut guard = crate::vga::VGA.lock(); let mut guard = VGA.lock();
let vga = guard.as_mut().expect("vga not initialised"); let vga = guard.as_mut().expect("vga not initialised");
f(vga); f(vga);
} }
pub struct VgaBuf { pub struct VgaBuf {
buf: *mut u16, buf: *mut u16,
col: usize, col: usize,
row: usize, row: usize,
} }
impl VgaBuf { impl VgaBuf {
pub unsafe fn new(buf: *mut u16) -> Self { pub unsafe fn new(buf: *mut u16) -> Self {
Self { buf, col: 0, row: 0 } Self {
} buf,
col: 0,
row: 0,
}
}
pub fn vga_write_str(&mut self, s: &str) { pub fn vga_write_str(&mut self, s: &str) {
for c in s.chars() { for c in s.chars() {
self.vga_write_char(c); self.vga_write_char(c);
}
} }
}
pub fn vga_write_char(&mut self, c: char) { pub fn vga_write_char(&mut self, c: char) {
let newline = c == '\n'; let newline = c == '\n';
if newline || self.col >= VGA_WIDTH { if newline || self.col >= VGA_WIDTH {
self.col = 0; self.col = 0;
self.row += 1; self.row += 1;
} }
if self.row >= VGA_HEIGHT { if self.row >= VGA_HEIGHT {
self.scroll(); self.scroll();
self.row -= 1; self.row -= 1;
} }
if !newline { if !newline {
self.vga_write_char_at(c, self.col, self.row); self.vga_write_char_at(c, self.col, self.row);
self.col += 1; self.col += 1;
}
} }
}
pub fn vga_write_ascii_char_at(&mut self, c: u8, col: usize, row: usize) { pub fn vga_write_ascii_char_at(&mut self, c: u8, col: usize, row: usize) {
let vga_val = COLOUR_MASK | u16::from(c); let vga_val = COLOUR_MASK | u16::from(c);
if col < VGA_WIDTH && row < VGA_HEIGHT {
unsafe {
self.coord_ptr(col, row).write_volatile(vga_val);
}
}
}
if col < VGA_WIDTH && row < VGA_HEIGHT { pub fn vga_write_char_at(&mut self, c: char, col: usize, row: usize) {
unsafe { let c = u8::try_from(c).unwrap_or(0xfe);
self.coord_ptr(col, row) self.vga_write_ascii_char_at(c, col, row);
.write_volatile(vga_val);
}
} }
}
fn scroll(&mut self) {
pub fn vga_write_char_at(&mut self, c: char, col: usize, row: usize) { unsafe {
let c = u8::try_from(c).unwrap_or(0xfe); ptr::copy(
self.vga_write_ascii_char_at(c, col, row); self.coord_ptr(0, 1),
} self.coord_ptr(0, 0),
VGA_WIDTH * (VGA_HEIGHT - 1),
fn scroll(&mut self) { );
unsafe {
ptr::copy( for col in 0..VGA_WIDTH {
self.coord_ptr(0, 1), self.vga_write_ascii_char_at(0, col, VGA_HEIGHT - 1);
self.coord_ptr(0, 0), }
VGA_WIDTH * (VGA_HEIGHT - 1) }
);
for col in 0..VGA_WIDTH {
self.vga_write_ascii_char_at(0, col, VGA_HEIGHT - 1);
}
} }
}
unsafe fn coord_ptr(&self, col: usize, row: usize) -> *mut u16 { unsafe fn coord_ptr(&self, col: usize, row: usize) -> *mut u16 {
unsafe { self.buf.add((row * VGA_WIDTH) + col) } unsafe { self.buf.add((row * VGA_WIDTH) + col) }
} }
} }
impl fmt::Write for VgaBuf { impl fmt::Write for VgaBuf {
fn write_str(&mut self, s: &str) -> fmt::Result { fn write_str(&mut self, s: &str) -> fmt::Result {
self.vga_write_str(s); self.vga_write_str(s);
Ok(()) Ok(())
} }
fn write_char(&mut self, c: char) -> fmt::Result { fn write_char(&mut self, c: char) -> fmt::Result {
self.vga_write_char(c); self.vga_write_char(c);
Ok(()) Ok(())
} }
} }
unsafe impl Send for VgaBuf {} unsafe impl Send for VgaBuf {}

Loading…
Cancel
Save