serial print macro

refactor
pantonshire 5 months ago
parent 988d3d601d
commit e2478d0f3a

@ -1,6 +1,6 @@
fn main() {
const LINKER_SCRIPT_PATH: &str = "link.ld";
println!("cargo::rerun-if-changed={}", LINKER_SCRIPT_PATH);
println!("cargo::rustc-link-arg=-T{}", LINKER_SCRIPT_PATH);
const LINKER_SCRIPT_PATH: &str = "link.ld";
println!("cargo::rerun-if-changed={}", LINKER_SCRIPT_PATH);
println!("cargo::rustc-link-arg=-T{}", LINKER_SCRIPT_PATH);
}

@ -1,6 +1,28 @@
use core::{fmt, hint};
use crate::ioport;
use crate::{ioport, spin::Spinlock};
const COM1_PORT: u16 = 0x3f8;
pub static COM1: Spinlock<Option<Com>> = Spinlock::new(None);
#[macro_export]
macro_rules! com1_print {
($($args:tt)*) => ({
crate::com::with_com1_if_available(|com1| {
::core::write!(com1, $($args)*).ok();
});
})
}
#[macro_export]
macro_rules! com1_println {
($($args:tt)*) => ({
crate::com::with_com1_if_available(|com1| {
::core::writeln!(com1, $($args)*).ok();
});
})
}
pub struct Com {
port: u16,
@ -8,10 +30,31 @@ pub struct Com {
pub struct ComError;
pub fn with_com1_if_available<F>(f: F)
where
F: FnOnce(&mut Com),
{
let mut guard = COM1.lock();
if let Some(com1) = guard.as_mut() {
f(com1);
}
}
pub unsafe fn try_com1_init() -> bool {
match unsafe { Com::init(COM1_PORT) } {
Ok(com1) => {
let mut guard = COM1.lock();
*guard = Some(com1);
true
}
_ => false,
}
}
impl Com {
pub unsafe fn init(port: u16) -> Result<Self, ComError> {
const ECHO_BYTE: u8 = 0x5a;
let echo = unsafe {
// Unset DLAB
ioport::outb(port + 3, 0x00);
@ -43,9 +86,7 @@ impl Com {
ioport::outb(port + 4, 0x0f);
}
Ok(Self {
port,
})
Ok(Self { port })
}
pub fn poll_has_data(&self) -> bool {
@ -74,19 +115,10 @@ impl Com {
}
impl fmt::Write for Com {
fn write_str(&mut self, s: &str) -> fmt::Result {
for b in s.bytes() {
self.write_poll(b);
}
Ok(())
}
fn write_char(&mut self, c: char) -> fmt::Result {
let mut buf = [0u8; 4];
let s = c.encode_utf8(&mut buf);
for b in s.bytes() {
self.write_poll(b);
fn write_str(&mut self, s: &str) -> fmt::Result {
for b in s.bytes() {
self.write_poll(b);
}
Ok(())
}
Ok(())
}
}

@ -1,23 +1,23 @@
use core::arch::asm;
pub unsafe fn inb(port: u16) -> u8 {
let x: u8;
unsafe {
asm!(
"in al, dx",
in("dx") port,
lateout("al") x
);
}
x
let x: u8;
unsafe {
asm!(
"in al, dx",
in("dx") port,
lateout("al") x
);
}
x
}
pub unsafe fn outb(port: u16, x: u8) {
unsafe {
asm!(
"out dx, al",
in("al") x,
in("dx") port,
);
}
unsafe {
asm!(
"out dx, al",
in("al") x,
in("dx") port,
);
}
}

@ -3,54 +3,57 @@
#![feature(custom_test_frameworks)]
#![test_runner(crate::test_runner)]
mod spin;
mod com;
mod ioport;
mod spin;
mod vga;
mod com;
use core::{arch::{asm, global_asm}, fmt::Write, panic::PanicInfo, ptr, slice};
use core::{
arch::{asm, global_asm},
fmt::{self, Write},
panic::PanicInfo,
ptr, slice,
};
global_asm!(include_str!("asm/trampoline.s"));
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
vga_println!("panic!");
if let Some(location) = info.location() {
vga_println!("{}", location);
}
vga_println!("{}", info.message());
hlt()
vga_println!("panic!");
if let Some(location) = info.location() {
vga_println!("{}", location);
}
vga_println!("{}", info.message());
hlt()
}
const COM1_PORT: u16 = 0x3f8;
#[unsafe(no_mangle)]
pub extern "C" fn _start() -> ! {
vga::vga_init();
vga::vga_init();
let gdt_ptr = ptr::with_exposed_provenance::<u64>(0xc000);
let gdt_slice = unsafe { slice::from_raw_parts(gdt_ptr, 4) };
unsafe {
com::try_com1_init();
}
vga_println!("hello from rust :)");
vga_println!("{:016x}", gdt_slice[0]);
vga_println!("{:016x}", gdt_slice[1]);
vga_println!("{:016x}", gdt_slice[2]);
vga_println!("{:016x}", gdt_slice[3]);
let mut com = unsafe { com::Com::init(COM1_PORT) };
let gdt_ptr = ptr::with_exposed_provenance::<u64>(0xc000);
let gdt_slice = unsafe { slice::from_raw_parts(gdt_ptr, 4) };
if let Ok(com) = &mut com {
writeln!(com, "hello serial").ok();
}
vga_println!("hello from rust :)");
vga_println!("{:016x}", gdt_slice[0]);
vga_println!("{:016x}", gdt_slice[1]);
vga_println!("{:016x}", gdt_slice[2]);
vga_println!("{:016x}", gdt_slice[3]);
hlt()
com1_println!("hello serial!");
hlt()
}
#[inline]
fn hlt() -> ! {
loop {
unsafe {
asm!("hlt");
loop {
unsafe {
asm!("hlt");
}
}
}
}

@ -1,101 +1,104 @@
use core::{cell::UnsafeCell, hint, ops::{Deref, DerefMut}, sync::atomic::{AtomicBool, Ordering}};
use core::{
cell::UnsafeCell,
hint,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, Ordering},
};
pub struct Spinlock<T> {
data: UnsafeCell<T>,
locked: AtomicBool,
data: UnsafeCell<T>,
locked: AtomicBool,
}
impl<T> Spinlock<T> {
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
locked: AtomicBool::new(false),
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
locked: AtomicBool::new(false),
}
}
}
pub fn lock(&self) -> SpinlockGuard<'_, T> {
// If we observe `locked` was `false`, then:
// - Use acquire ordering, so nothing inside the critical section gets reordered before we
// observed the `false` value.
// - Store `true`, so nothing else can enter the critical section until we exit it.
// Otherwise, spin until we observe a `false` value.
while self.locked
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
hint::spin_loop();
}
pub fn lock(&self) -> SpinlockGuard<'_, T> {
// If we observe `locked` was `false`, then:
// - Use acquire ordering, so nothing inside the critical section gets reordered before we
// observed the `false` value.
// - Store `true`, so nothing else can enter the critical section until we exit it.
// Otherwise, spin until we observe a `false` value.
while self
.locked
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
hint::spin_loop();
}
SpinlockGuard { lock: self }
}
SpinlockGuard { lock: self }
}
/// # Safety
/// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be
/// used to obtain a reference to the spinlock-protected data.
unsafe fn unlock(&self) {
// Unset `locked` with release ordering so that nothing inside the critical section gets
// reordered to after we stored `false`.
self.locked.store(false, Ordering::Release);
}
/// # Safety
/// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be
/// used to obtain a reference to the spinlock-protected data.
unsafe fn unlock(&self) {
// Unset `locked` with release ordering so that nothing inside the critical section gets
// reordered to after we stored `false`.
self.locked.store(false, Ordering::Release);
}
/// # Safety
/// No mutable references to the spinlock-protected data may exist.
unsafe fn get<'s, 'a>(&'s self) -> &'a T
where
's: 'a,
{
unsafe { &*self.data.get() }
}
/// # Safety
/// No mutable references to the spinlock-protected data may exist.
unsafe fn get<'s, 'a>(&'s self) -> &'a T
where
's: 'a,
{
unsafe { &*self.data.get() }
}
/// # Safety
/// No references to the spinlock-protected data may exist.
unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T
where
's: 'a,
{
unsafe { &mut *self.data.get() }
}
/// # Safety
/// No references to the spinlock-protected data may exist.
unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T
where
's: 'a,
{
unsafe { &mut *self.data.get() }
}
}
unsafe impl<T> Sync for Spinlock<T>
where
T: Send,
{}
unsafe impl<T> Sync for Spinlock<T> where T: Send {}
pub struct SpinlockGuard<'a, T> {
lock: &'a Spinlock<T>,
lock: &'a Spinlock<T>,
}
impl<'a, T> Deref for SpinlockGuard<'a, T> {
type Target = T;
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no mutable references to `data` can exist.
unsafe { self.lock.get() }
}
#[inline]
fn deref(&self) -> &Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no mutable references to `data` can exist.
unsafe { self.lock.get() }
}
}
impl<'a, T> DerefMut for SpinlockGuard<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no other references to `data` can exist.
unsafe { self.lock.get_mut() }
}
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no other references to `data` can exist.
unsafe { self.lock.get_mut() }
}
}
impl<'a, T> Drop for SpinlockGuard<'a, T> {
fn drop(&mut self) {
// SAFETY:
// Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked`
// to true before creating a guard and refuse to create any new ones until it is `false` again.
// Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and
// we don't access the spinlock-protected data here, there are therefore no "active"
// `SpinlockGuard`s remaining for the lock.
unsafe { self.lock.unlock() }
}
fn drop(&mut self) {
// SAFETY:
// Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked`
// to true before creating a guard and refuse to create any new ones until it is `false` again.
// Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and
// we don't access the spinlock-protected data here, there are therefore no "active"
// `SpinlockGuard`s remaining for the lock.
unsafe { self.lock.unlock() }
}
}

@ -29,101 +29,104 @@ macro_rules! vga_println {
}
pub fn vga_init() {
let vga_buf = unsafe { VgaBuf::new(ptr::with_exposed_provenance_mut::<u16>(VGA_ADDR)) };
let mut guard = VGA.lock();
*guard = Some(vga_buf);
let vga_buf = unsafe { VgaBuf::new(ptr::with_exposed_provenance_mut::<u16>(VGA_ADDR)) };
let mut guard = VGA.lock();
*guard = Some(vga_buf);
}
pub fn with_vga_buf<F>(f: F)
where
F: FnOnce(&mut VgaBuf),
F: FnOnce(&mut VgaBuf),
{
let mut guard = crate::vga::VGA.lock();
let vga = guard.as_mut().expect("vga not initialised");
f(vga);
let mut guard = VGA.lock();
let vga = guard.as_mut().expect("vga not initialised");
f(vga);
}
pub struct VgaBuf {
buf: *mut u16,
col: usize,
row: usize,
buf: *mut u16,
col: usize,
row: usize,
}
impl VgaBuf {
pub unsafe fn new(buf: *mut u16) -> Self {
Self { buf, col: 0, row: 0 }
}
pub unsafe fn new(buf: *mut u16) -> Self {
Self {
buf,
col: 0,
row: 0,
}
}
pub fn vga_write_str(&mut self, s: &str) {
for c in s.chars() {
self.vga_write_char(c);
pub fn vga_write_str(&mut self, s: &str) {
for c in s.chars() {
self.vga_write_char(c);
}
}
}
pub fn vga_write_char(&mut self, c: char) {
let newline = c == '\n';
pub fn vga_write_char(&mut self, c: char) {
let newline = c == '\n';
if newline || self.col >= VGA_WIDTH {
self.col = 0;
self.row += 1;
}
if newline || self.col >= VGA_WIDTH {
self.col = 0;
self.row += 1;
}
if self.row >= VGA_HEIGHT {
self.scroll();
self.row -= 1;
}
if self.row >= VGA_HEIGHT {
self.scroll();
self.row -= 1;
}
if !newline {
self.vga_write_char_at(c, self.col, self.row);
self.col += 1;
if !newline {
self.vga_write_char_at(c, self.col, self.row);
self.col += 1;
}
}
}
pub fn vga_write_ascii_char_at(&mut self, c: u8, col: usize, row: usize) {
let vga_val = COLOUR_MASK | u16::from(c);
pub fn vga_write_ascii_char_at(&mut self, c: u8, col: usize, row: usize) {
let vga_val = COLOUR_MASK | u16::from(c);
if col < VGA_WIDTH && row < VGA_HEIGHT {
unsafe {
self.coord_ptr(col, row).write_volatile(vga_val);
}
}
}
if col < VGA_WIDTH && row < VGA_HEIGHT {
unsafe {
self.coord_ptr(col, row)
.write_volatile(vga_val);
}
pub fn vga_write_char_at(&mut self, c: char, col: usize, row: usize) {
let c = u8::try_from(c).unwrap_or(0xfe);
self.vga_write_ascii_char_at(c, col, row);
}
}
pub fn vga_write_char_at(&mut self, c: char, col: usize, row: usize) {
let c = u8::try_from(c).unwrap_or(0xfe);
self.vga_write_ascii_char_at(c, col, row);
}
fn scroll(&mut self) {
unsafe {
ptr::copy(
self.coord_ptr(0, 1),
self.coord_ptr(0, 0),
VGA_WIDTH * (VGA_HEIGHT - 1)
);
for col in 0..VGA_WIDTH {
self.vga_write_ascii_char_at(0, col, VGA_HEIGHT - 1);
}
fn scroll(&mut self) {
unsafe {
ptr::copy(
self.coord_ptr(0, 1),
self.coord_ptr(0, 0),
VGA_WIDTH * (VGA_HEIGHT - 1),
);
for col in 0..VGA_WIDTH {
self.vga_write_ascii_char_at(0, col, VGA_HEIGHT - 1);
}
}
}
}
unsafe fn coord_ptr(&self, col: usize, row: usize) -> *mut u16 {
unsafe { self.buf.add((row * VGA_WIDTH) + col) }
}
unsafe fn coord_ptr(&self, col: usize, row: usize) -> *mut u16 {
unsafe { self.buf.add((row * VGA_WIDTH) + col) }
}
}
impl fmt::Write for VgaBuf {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.vga_write_str(s);
Ok(())
}
fn write_char(&mut self, c: char) -> fmt::Result {
self.vga_write_char(c);
Ok(())
}
fn write_str(&mut self, s: &str) -> fmt::Result {
self.vga_write_str(s);
Ok(())
}
fn write_char(&mut self, c: char) -> fmt::Result {
self.vga_write_char(c);
Ok(())
}
}
unsafe impl Send for VgaBuf {}

Loading…
Cancel
Save