rust stage 4

refactor
pantonshire 9 months ago
parent dae57bb1a1
commit b2bb36e9f4

@ -11,12 +11,15 @@ run:
build:
nasm -f bin {{common_flags}} -o s1.bin stages/s1/s1.s
nasm -f elf -werror -Iinclude -o stages/s2/s2.o stages/s2/s2.s
nasm -f elf -werror -Iinclude -o stages/s3/s3.o stages/s3/s3.s
nasm -f elf -werror -Iinclude -o stages/s3/a20.o stages/s3/a20.s
ld -T s2.ld -o s234.bin stages/s2/*.o stages/s3/*.o
# TODO: try with gnu ld
nasm -f elf {{common_flags}} -o stages/s2/s2.o stages/s2/s2.s
nasm -f elf {{common_flags}} -o stages/s3/s3.o stages/s3/s3.s
nasm -f elf {{common_flags}} -o stages/s3/a20.o stages/s3/a20.s
ld.lld -T s2.ld -o s234.bin stages/s2/*.o stages/s3/*.o stages/s4/target/protected32/release/libs4.a
mkimg:
dd if=/dev/zero of=disk.bin bs=440 count=1 conv=notrunc
dd if=s1.bin of=disk.bin conv=notrunc
dd if=s234.bin of=disk.bin bs=512 seek=70 conv=notrunc
# build:
# nasm -f bin -Iinclude -o boot0.bin boot0.s

25
s2.ld

@ -13,11 +13,34 @@ SECTIONS {
KEEP(*(.s3_text))
*(.s3_text)
}
.s3_data : {
KEEP(*(.s3_data))
*(.s3_data)
}
/* TODO: set current address for s4 loadpoint */
/* TODO: move magic & length */
.text : {
*(.text)
*(.text.*)
}
.data : {
*(.data)
*(.data.*)
}
.bss : {
*(.bss)
*(.bss.*)
}
.rodata : {
*(.rodata)
*(.rodata.*)
}
.magic : {
/* Magic bytes stage 2 uses to make sure it's loaded the subsequent sectors correctly. */

@ -12,13 +12,15 @@ extern s234_bin_len
extern s234_bin_sectors
extern s234_magic
section .s2_text
%macro copy_stack_var_to_globals 2
mov %1, [bp - %2]
mov [REAL_GLOBALS + %2], %1
%endmacro
section .s2_text
; Load stages 3 and 4 into memory.
load_s234:
; Now that we're not doing instruction byte golf like we were in stage 1, we can afford to move

@ -6,6 +6,7 @@
extern test_a20
extern enable_a20_intel_8042
extern _start
section .s3_text
@ -65,9 +66,8 @@ s3_main:
mov ebp, REAL_STACK_BASE
mov esp, ebp
; TODO
; jmp _start
jmp _start
.halt:
hlt
jmp .halt

@ -0,0 +1,7 @@
[build]
target = "protected32.json"
[unstable]
build-std = [ "core", "compiler_builtins" ]
build-std-features = [ "compiler-builtins-mem" ]

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "s4"
version = "0.1.0"

@ -0,0 +1,14 @@
[package]
name = "s4"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["staticlib"]
[profile.release]
opt-level = "s"
debug = 0
[dependencies]

@ -0,0 +1,16 @@
{
"arch": "x86",
"os": "none",
"llvm-target": "i686-unknown-none",
"data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
"target-endian": "little",
"target-pointer-width": "32",
"target-c-int-width": "32",
"executables": true,
"linker-flavor": "ld.lld",
"linker": "rust-lld",
"panic-strategy": "abort",
"disable-redzone": true,
"features": "-mmx,-sse,+soft-float"
}

@ -0,0 +1,39 @@
#![no_std]
#![no_main]
#![feature(custom_test_frameworks)]
#![test_runner(crate::test_runner)]
mod spin;
mod vga;
mod x86;
use core::{arch::asm, panic::PanicInfo, fmt::Write};
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
vga_println!("panic!");
if let Some(location) = info.location() {
vga_println!("{}", location);
}
vga_println!("{}", info.message());
hlt()
}
#[no_mangle]
pub extern "C" fn _start() -> ! {
vga::vga_init();
vga_println!("hello from rust :)");
hlt()
}
#[inline]
fn hlt() -> ! {
loop {
unsafe {
asm!("hlt");
}
}
}

@ -0,0 +1,101 @@
use core::{cell::UnsafeCell, hint, ops::{Deref, DerefMut}, sync::atomic::{AtomicBool, Ordering}};
pub struct Spinlock<T> {
data: UnsafeCell<T>,
locked: AtomicBool,
}
impl<T> Spinlock<T> {
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
locked: AtomicBool::new(false),
}
}
pub fn lock(&self) -> SpinlockGuard<T> {
// If we observe `locked` was `false`, then:
// - Use acquire ordering, so nothing inside the critical section gets reordered before we
// observed the `false` value.
// - Store `true`, so nothing else can enter the critical section until we exit it.
// Otherwise, spin until we observe a `false` value.
while self.locked
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
hint::spin_loop();
}
SpinlockGuard { lock: self }
}
/// # Safety
/// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be
/// used to obtain a reference to the spinlock-protected data.
unsafe fn unlock(&self) {
// Unset `locked` with release ordering so that nothing inside the critical section gets
// reordered to after we stored `false`.
self.locked.store(false, Ordering::Release);
}
/// # Safety
/// No mutable references to the spinlock-protected data may exist.
unsafe fn get<'s, 'a>(&'s self) -> &'a T
where
's: 'a,
{
unsafe { &*self.data.get() }
}
/// # Safety
/// No references to the spinlock-protected data may exist.
unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T
where
's: 'a,
{
unsafe { &mut *self.data.get() }
}
}
unsafe impl<T> Sync for Spinlock<T>
where
T: Send,
{}
pub struct SpinlockGuard<'a, T> {
lock: &'a Spinlock<T>,
}
impl<'a, T> Deref for SpinlockGuard<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no mutable references to `data` can exist.
unsafe { self.lock.get() }
}
}
impl<'a, T> DerefMut for SpinlockGuard<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no other references to `data` can exist.
unsafe { self.lock.get_mut() }
}
}
impl<'a, T> Drop for SpinlockGuard<'a, T> {
fn drop(&mut self) {
// SAFETY:
// Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked`
// to true before creating a guard and refuse to create any new ones until it is `false` again.
// Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and
// we don't access the spinlock-protected data here, there are therefore no "active"
// `SpinlockGuard`s remaining for the lock.
unsafe { self.lock.unlock() }
}
}

@ -0,0 +1,128 @@
use core::{fmt, ptr};
use crate::spin::Spinlock;
const VGA_WIDTH: usize = 80;
const VGA_HEIGHT: usize = 25;
const COLOUR_MASK: u16 = 0x0a00;
pub static VGA: Spinlock<Option<VgaBuf>> = Spinlock::new(None);
#[macro_export]
macro_rules! vga_print {
($($args:tt)*) => ({
crate::vga::with_vga_buf(|vga_buf| {
::core::write!(vga_buf, $($args)*).ok();
});
})
}
#[macro_export]
macro_rules! vga_println {
($($args:tt)*) => ({
crate::vga::with_vga_buf(|vga_buf| {
::core::writeln!(vga_buf, $($args)*).ok();
});
})
}
pub fn vga_init() {
let vga_buf = unsafe { VgaBuf::new(0xb8000 as *mut u16) };
let mut guard = VGA.lock();
*guard = Some(vga_buf);
}
pub fn with_vga_buf<F>(f: F)
where
F: FnOnce(&mut VgaBuf),
{
let mut guard = crate::vga::VGA.lock();
let vga = guard.as_mut().expect("vga not initialised");
f(vga);
}
pub struct VgaBuf {
buf: *mut u16,
col: usize,
row: usize,
}
impl VgaBuf {
pub unsafe fn new(buf: *mut u16) -> Self {
Self { buf, col: 0, row: 0 }
}
pub fn vga_write_str(&mut self, s: &str) {
for c in s.chars() {
self.vga_write_char(c);
}
}
pub fn vga_write_char(&mut self, c: char) {
let newline = c == '\n';
if newline || self.col >= VGA_WIDTH {
self.col = 0;
self.row += 1;
}
if self.row >= VGA_HEIGHT {
self.scroll();
self.row -= 1;
}
if !newline {
self.vga_write_char_at(c, self.col, self.row);
self.col += 1;
}
}
pub fn vga_write_ascii_char_at(&mut self, c: u8, col: usize, row: usize) {
let vga_val = COLOUR_MASK | u16::from(c);
if col < VGA_WIDTH && row < VGA_HEIGHT {
unsafe {
self.coord_ptr(col, row)
.write_volatile(vga_val);
}
}
}
pub fn vga_write_char_at(&mut self, c: char, col: usize, row: usize) {
let c = u8::try_from(c).unwrap_or(0xfe);
self.vga_write_ascii_char_at(c, col, row);
}
fn scroll(&mut self) {
unsafe {
ptr::copy(
self.coord_ptr(0, 1),
self.coord_ptr(0, 0),
VGA_WIDTH * (VGA_HEIGHT - 1)
);
for col in 0..VGA_WIDTH {
self.vga_write_ascii_char_at(0, col, VGA_HEIGHT - 1);
}
}
}
unsafe fn coord_ptr(&self, col: usize, row: usize) -> *mut u16 {
unsafe { self.buf.add((row * VGA_WIDTH) + col) }
}
}
impl fmt::Write for VgaBuf {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.vga_write_str(s);
Ok(())
}
fn write_char(&mut self, c: char) -> fmt::Result {
self.vga_write_char(c);
Ok(())
}
}
unsafe impl Send for VgaBuf {}

@ -0,0 +1,11 @@
pub mod flags {
pub const CF: u32 = 1 << 0;
pub const PF: u32 = 1 << 2;
pub const AF: u32 = 1 << 4;
pub const ZF: u32 = 1 << 6;
pub const SF: u32 = 1 << 7;
pub const TF: u32 = 1 << 8;
pub const IF: u32 = 1 << 9;
pub const DF: u32 = 1 << 10;
pub const OF: u32 = 1 << 11;
}
Loading…
Cancel
Save