Compare commits

...

27 Commits

Author SHA1 Message Date
pantonshire 20e857b453 explain big code segment 1 month ago
pantonshire 535749afc9 expand protected mode code segment more 1 month ago
pantonshire a90c727a9a expand protected mode code segment 1 month ago
pantonshire da6f981a6a move gdt, use fixed addr 5 months ago
pantonshire f0289cd753 use nographic for run_serial 5 months ago
pantonshire e2478d0f3a serial print macro 5 months ago
pantonshire 988d3d601d fixed addr gdt, serial 5 months ago
pantonshire b0dffe7bc8 use strict provenance api 5 months ago
pantonshire 868844f710 add calling convention notes 5 months ago
pantonshire 88d7fb32d4 fixes for latest nightly 5 months ago
pantonshire 1a8fadf226 wip reset docs 8 months ago
pantonshire bb60e1c8ed load and jump to stage 3 separately from stage 2 8 months ago
pantonshire da529e8426 fix off-by-one error in stage 2 prelude 8 months ago
pantonshire 9e5cbaf07b add build to gitignore 8 months ago
pantonshire 359d980a1c build single blob for stages 2 and 3 8 months ago
pantonshire b61aa778d0 xtask to build stages 1-3 8 months ago
pantonshire 1974163db6 use hlt for padding of stage 2 data section 8 months ago
pantonshire 96e98cd534 default to nightly compiler 8 months ago
pantonshire 4f79fbae5b cargo xtask alias 8 months ago
pantonshire 63977011cf update layout macros 8 months ago
pantonshire b41ba2de55 workspace for build tools 8 months ago
pantonshire 2219fac4e5 more refactoring 8 months ago
pantonshire 909875ec5b update resouces 8 months ago
pantonshire f5b28bd24d fix protected mode target specification 8 months ago
pantonshire b2bb36e9f4 rust stage 4 9 months ago
pantonshire dae57bb1a1 fix s3 a20 9 months ago
pantonshire 2b1a9f9412 wip refactor 9 months ago

@ -0,0 +1,3 @@
[alias]
xtask = "run --package xtask --"

2
.gitignore vendored

@ -1,3 +1,5 @@
build/
*.o
*.bin
.pc
target/

@ -0,0 +1,5 @@
[workspace]
resolver = "2"
members = ["xtask"]
exclude = ["stage_3"]

1101
boot1.s

File diff suppressed because it is too large Load Diff

@ -0,0 +1,10 @@
# Calling convention
## boot0
I'm not sticking to any particular calling convention for boot0, since we have to golf to fit in
440 bytes. Whatever's most convenient.
## boot1
- Arguments: ax, cx, dx, stack
- Return value: ax, cf for booleans
- ax may be clobbered. All other registers are preserved
- Callee cleanup (i.e. the function pops stack arguments when returning)

@ -0,0 +1,37 @@
# Physical memory layout
## Low memory
| Type | Range | |
|----------|---------------------|-------------------------------------------------|
| Reserved | 0x000000 - 0x000400 | real-mode interrupt vector table |
| Reserved | 0x000400 - 0x000500 | bios data area |
| Usable | 0x000500 - 0x004000 | main stack |
| Usable | 0x004000 - 0x006a00 | globals |
| Usable | 0x006a00 - 0x007c00 | memory map |
| Usable | 0x007c00 - 0x007e00 | boot sector |
| Usable | 0x007e00 - 0x080000 | conventional usable memory |
| Reserved | 0x080000 - 0x0a0000 | extended bios data area (maximum possible size) |
| Reserved | 0x0a0000 - 0x0c0000 | video memory |
| Reserved | 0x0c0000 - 0x0c8000 | video bios |
| Reserved | 0x0c8000 - 0x0f0000 | bios expansions |
| Reserved | 0x0f0000 - 0x100000 | motherboard bios |
### Allocations
- 0x00500 - 0x04000: stage 1/2 stack
- 0x04000 - 0x06a00: globals
- 0x07c00 - 0x07e00: stage 1 text
- 0x07e00 - 0x08000: gpt partition header
- 0x08000 - 0x08200: stage 2 partition table entry
- 0x08200 - 0x0c000: stage 2 text, stage 3 stack
- 0x0e000 : gdt
- 0x10000 : stage 3 text
0x10000: stage 3
TODO: once we're in real mode, repurpose s2 for a stack
TODO: load s3 into a separate memory region
TODO: use some fixed known memory address for the GDT, rather than putting it in stage 2.
That way, we can safely overwrite stage 2 once we're done with it.

@ -0,0 +1,41 @@
QEMU emulated hardware:
- i440fx northbridge
- National Semiconductor 16550a UART
```asm
[bits 16]
[org 0xff000]
times (0xff0 - ($ - $$)) db 0x00
mov eax, 0xcafeface
hlt
times (0x2000 - ($ - $$)) db 0xf4
```
```sh
qemu-system-x86_64 \
-monitor stdio \
-no-reboot \
-m 512M \
-drive if=pflash,file=reset.bin,format=raw
```
- The BIOS flash is mapped in its entirety to a board-specific address. Here it looks like the
right edge is always 00000000ffffffff, and it expands to the left as the size of the flash
increases.
- The last 4096 bytes of flash are mapped to ff000 as well.
```
x/4096xb 0x00000000fffff000
```
Left edge moves left as we add more to the ROM image.
Last 4096 bytes of the ROM image are also mapped to 0xff000.
```
info mtree
00000000ffffe000-00000000ffffffff (prio 0, romd): system.flash0
```

@ -62,13 +62,3 @@
%define VGA_COL 0x1c
%define TEXTBUF_LINE 0x1e
%define MEMMAP_ENTRIES 0x20
%macro fnstart 0
push bp
mov bp, sp
%endmacro
%macro fnret 0
pop bp
ret
%endmacro

@ -0,0 +1,14 @@
%ifndef BOOT_FN_H
%define BOOT_FN_H
%macro fnstart 0
push bp
mov bp, sp
%endmacro
%macro fnret 0
pop bp
ret
%endmacro
%endif

@ -0,0 +1,38 @@
%ifndef BOOT_GLOBALS_H
%define BOOT_GLOBALS_H
; Stage 1 base stack frame variable offsets / globals
; (we use the same offsets once we copy the variables to the globals section)
; -------------------------------------------------------------------------------------------------
; The boot drive number given to us by the BIOS.
%define BOOT_DRIVE 0x02
; Boot drive geometry
%define SECTORS_PER_TRACK 0x04
%define N_HEADS 0x06
; Starting LBA of the GPT partition entries array.
%define GPT_ENTRIES_START_LBA 0x08
; Number of GPT entries, saturated to 16 bits.
%define GPT_N_ENTRIES_16 0x0a
; Number of sectors to advance by once we've read every GPT entry in the current sector.
%define GPT_SECTOR_STRIDE 0x0c
; Number of bytes to advance by in the current sector once we've read a GPT entry.
%define GPT_BYTE_STRIDE 0x0e
; Number of GPT entries which can fit in a single sector.
%define GPT_ENTRIES_PER_SECTOR 0x10
%define GPT_CURRENT_ENTRY_IDX 0x12
%define GPT_SECTOR_ENTRY_IDX 0x14
%define GPT_SECTORS_LOADED 0x16
%define GPT_CURRENT_LBA 0x18
; 2-byte address of the partition GPT entry loaded into memory.
%define STAGE_2_GPT_ENTRY_ADDR 0x1a
; Stage 2 globals
; -------------------------------------------------------------------------------------------------
; 4-byte partition end LBA.
%define LOADER_PART_END_LBA 0x1c
; 2-byte stage 2 start LBA.
%define STAGE_2_START_LBA 0x20
; 2-byte stage 2 start LBA (inclusive).
%define STAGE_2_END_LBA 0x22
%endif

@ -0,0 +1,26 @@
%ifndef BOOT_LAYOUT_H
%define BOOT_LAYOUT_H
%define S1_ADDR 0x7c00
%define S2_DATA_LEN 32
%define S2_TEXT_OFFSET S2_DATA_LEN
%define S2_LOAD_ADDR 0x8200
%define S2_DATA S2_LOAD_ADDR
%define S2_ENTRYPOINT (S2_DATA_ADDR + S2_DATA_LEN)
%define GDT_FLAT_ADDR 0xe000
%define S3_LOAD_ADDR 0x10000
; %define MEMMAP 0x6a00
; %define MEMMAP_END S2_ADDR
; %define MEMMAP_ENT_SIZE 32
; %define MEMMAP_CAP ((MEMMAP_END - MEMMAP) / MEMMAP_ENT_SIZE)
%define REAL_GLOBALS 0x4000
%define REAL_GLOBALS_END 0x6a00
%define REAL_STACK_BASE REAL_GLOBALS
%define S2_MAGIC 0x544e4150
%endif

@ -0,0 +1,8 @@
%ifndef BOOT_S2_FNS_H
%define BOOT_S2_FNS_H
extern addr32_to_addr16
extern read_sector
extern panic_simple
%endif

@ -1,16 +1,40 @@
include_flags := "-Iinclude"
common_flags := "-werror " + include_flags
ld32 := "ld -m elf_i386"
run:
qemu-system-x86_64 \
-monitor stdio \
-no-reboot \
-bios seabios/out/bios.bin \
-m 512M \
-drive format=raw,file=disk.bin
run_serial:
qemu-system-x86_64 \
-nographic \
-no-reboot \
-m 512M \
-drive format=raw,file=disk.bin
run_reset:
qemu-system-x86_64 \
-monitor stdio \
-no-reboot \
-m 512M \
-drive if=pflash,file=reset.bin,format=raw
build_reset:
nasm -f bin -werror -o reset.bin reset.s
#-bios seabios/out/bios.bin
build:
nasm -f bin -Iinclude -o boot0.bin boot0.s
cd boot1; cargo build --release
# nasm -f bin -Iinclude -o boot1.bin boot1.s
cargo xtask build
mkimg:
dd if=/dev/zero of=disk.bin bs=440 count=1 conv=notrunc
dd if=build/stage_1/s1.bin of=disk.bin conv=notrunc
dd if=build/s23.bin of=disk.bin bs=512 seek=70 conv=notrunc
zero_disk:
dd if=/dev/zero of=disk.bin bs=512 count=1000
@ -31,10 +55,3 @@ partition_disk:
parted --script disk.bin mkpart stage2 70s 900s
parted --script disk.bin type 6 fdffea69-3651-442f-a11d-88a09bf372dd
write_stage1:
dd if=/dev/zero of=disk.bin bs=440 count=1 conv=notrunc
dd if=boot0.bin of=disk.bin conv=notrunc
write_stage2:
# dd if=boot1.bin of=disk.bin bs=512 seek=70 conv=notrunc
dd if=boot1/target/target_protected/release/boot1 of=disk.bin bs=512 seek=70 conv=notrunc

@ -0,0 +1,14 @@
; flash seems to get mapped to ff000
; reset vector is f000:fff0 = ffff0
; so there's ff0 = 4080 bytes of something before the reset vector. what is it?
[bits 16]
[org 0xff000]
times (0xff0 - ($ - $$)) db 0x00
mov eax, 0xcafeface
hlt
times (0x1000 - ($ - $$)) db 0xf4

@ -47,6 +47,7 @@
- <https://wiki.osdev.org/I/O_Ports>
## Partitioning
- <https://uefi.org/specs/UEFI/2.10/05_GUID_Partition_Table_Format.html>
- <https://wiki.osdev.org/GPT>
## Memory protection

@ -0,0 +1,3 @@
[toolchain]
channel = "nightly"

@ -1,28 +1,10 @@
; MEMORY LAYOUT
; R = reserved, U = usable
; --------------------------------------------------------------------
; R | 0x000000 - 0x000400: real-mode interrupt vector table
; R | 0x000400 - 0x000500: bios data area
; U | 0x000500 - 0x004000: main stack
; U | 0x004000 - 0x006a00: globals
; U | 0x006a00 - 0x007c00: memory map
; U | 0x007c00 - 0x007e00: boot sector
; U | 0x007e00 - 0x080000: conventional usable memory
; R | 0x080000 - 0x0a0000: extended bios data area (maximum possible size)
; R | 0x0a0000 - 0x0c0000: video memory
; R | 0x0c0000 - 0x0c8000: video bios
; R | 0x0c8000 - 0x0f0000: bios expansions
; R | 0x0f0000 - 0x100000: motherboard bios
%include "layout.s"
%include "globals.s"
%include "defines.s"
; BIOS puts our boot sector at 0000:7c00
[org BOOT0_LOADPOINT]
; We're (probably) in real mode
[org S1_ADDR]
[bits 16]
main:
; Disable interrupts
cli
xor ax, ax
@ -33,7 +15,7 @@ main:
; Put the stack base at 0x4000.
; Stack grows high->low, so we'll grow away from our globals and program text.
mov ss, ax
mov bp, STACK_BASE
mov bp, REAL_STACK_BASE
mov sp, bp
; Segment for VGA (0xb800 * 16 = 0xb8000)
@ -217,8 +199,9 @@ main:
ja panic
.stage2_end_lba_ok:
mov bx, BOOT1_LOADPOINT
mov bx, S2_LOAD_ADDR
call read_lba
add bx, S2_TEXT_OFFSET
jmp bx
; Load a single boot disk sector. Panic on failure.

@ -0,0 +1,112 @@
[bits 16]
%include "fn.s"
%include "ps2.s"
%macro mov_out 3
mov %1, %3
out %2, %1
%endmacro
; Check whether the A20 line is enabled. Writes to the boot sector identifier.
; Arguments: none
; Return:
; - ax: 0 if A20 disabled, nonzero if A20 enabled
; Clobber: none
test_a20:
push bp
mov bp, sp
push gs
; Restore the boot sector identifier in case it was overwritten by anything.
mov word [0x7dfe], 0xaa55
mov ax, 0xffff
mov gs, ax
xor ax, ax
; If the word at 0x107dfe (1 MiB after the boot sector identifier) is different to the boot
; sector identifier, than A20 must be enabled.
cmp word gs:[0x7e0e], 0xaa55
setne al
jne .return
; Even if A20 was enabled, the two words may have been equal by chance, so we temporarily swap
; the boot sector identifier bytes and test again.
ror word [0x7dfe], 8
cmp word gs:[0x7e0e], 0x55aa
setne al
ror word [0x7dfe], 8
jmp .return
.return:
pop gs
pop bp
ret
global test_a20
; Try to enable A20 using the Intel 8042 PS/2 keyboard controller.
; Arguments: none
; Return: none
; Clobber: ax, cx, dx
enable_a20_intel_8042:
; Temporarily disable the keyboard.
call intel_8042_wait_write
mov_out al, INTEL_8042_OUT_CMD, INTEL_8042_CMD_PS2_1_DISABLE
; Read the controller output port.
call intel_8042_wait_write
mov_out al, INTEL_8042_OUT_CMD, INTEL_8042_CMD_CONTROLLER_OUT_PORT_READ
call intel_8042_wait_read
in al, INTEL_8042_IO_DATA
; The second bit is "A20 enabled", so set it.
mov cl, al
or cl, 2
; Write the modified byte back to the controller output port.
call intel_8042_wait_write
mov_out al, INTEL_8042_OUT_CMD, INTEL_8042_CMD_CONTROLLER_OUT_PORT_WRITE
call intel_8042_wait_write
mov_out al, INTEL_8042_IO_DATA, cl
; Re-enable the keyboard.
call intel_8042_wait_write
mov_out al, INTEL_8042_OUT_CMD, INTEL_8042_CMD_PS2_1_ENABLE
; Wait for writes to finish.
call intel_8042_wait_write
ret
global enable_a20_intel_8042
; Wait for the Intel 8042 input buffer to become empty, so we can write.
; Arguments: none
; Return: none
; Clobber: al
intel_8042_wait_write:
.loop:
; Read the 8042 status register.
in al, INTEL_8042_IN_STATUS
; Input buffer status flag set means the input buffer is full, so loop in this case.
test al, INTEL_8042_STATUS_MASK_IBUF
jnz .loop
ret
; Wait for the Intel 8042 output buffer to become filled, so we can read.
; Arguments: none
; Return: none
; Clobber: al
intel_8042_wait_read:
.loop:
; Read the 8042 status register.
in al, INTEL_8042_IN_STATUS
; Output buffer status flag unset means output buffer is empty, so loop in this case.
test al, INTEL_8042_STATUS_MASK_OBUF
jz .loop
ret

@ -0,0 +1,43 @@
OUTPUT_FORMAT("binary")
. = 0x8200;
SECTIONS {
/* Prelude must come first so it's in the single sector loaded by stage 1. */
.prelude : {
*(.prelude)
}
.text : {
*(.text)
*(.text.*)
}
.data : {
*(.data)
*(.data.*)
}
.bss : {
*(.bss)
*(.bss.*)
}
.rodata : {
*(.rodata)
*(.rodata.*)
}
.magic : {
/* Magic bytes stage 2 uses to make sure it's loaded the subsequent sectors correctly. */
LONG(0x544e4150)
}
s2_magic = ADDR(.magic);
/* Define a symbol for the total length of the binary, so the prelude knows how many blocks to
* load from disk.
*/
s2_bin_len = . - 0x8200;
s2_bin_sectors = (s2_bin_len + 511) / 512;
}

@ -0,0 +1,69 @@
[bits 16]
%include "fn.s"
%include "layout.s"
%include "globals.s"
%include "s2_fns.s"
extern s2_data.s3_bin_offset_sectors
extern s2_data.s3_bin_len_sectors
load_s3:
fnstart
push ebx
; Calculate the stage 3 start LBA.
mov ax, [REAL_GLOBALS + STAGE_2_START_LBA]
add ax, [s2_data.s3_bin_offset_sectors]
jc .fail
; Stage 3 should not overlap with stage 2.
cmp word [REAL_GLOBALS + STAGE_2_END_LBA], ax
jae .fail
xor ebx, ebx
; There must be at least one sector to load.
mov bx, [s2_data.s3_bin_len_sectors]
or bx, bx
jz .fail
; Calculate the end LBA (inclusive).
dec bx
add bx, ax
jc .fail
; Check stage 3 is entirely inside the partition.
cmp dword [REAL_GLOBALS + LOADER_PART_END_LBA], ebx
jb .fail
push ax ; Current LBA
push bx ; s3 end LBA
mov ebx, S3_LOAD_ADDR ; Current load address
.load_loop:
mov ax, [bp - 0x06] ; Load current LBA
cmp word [bp - 0x08], ax ; Compare to s3 end LBA
jb .load_done
mov ecx, ebx
call read_sector
jc .fail
add ebx, 512
inc word [bp - 0x06]
jmp .load_loop
.load_done:
add sp, 4
pop ebx
clc
fnret
.fail:
pop ebx
stc
fnret
global load_s3

@ -0,0 +1,176 @@
[bits 16]
%include "fn.s"
%include "layout.s"
%include "s2_fns.s"
extern test_a20
extern enable_a20_intel_8042
extern load_s3
s2_main:
call test_a20
test al, al
jnz .a20_enabled
; Try to enable A20 using the Intel 8042 PS/2 keyboard controller.
call enable_a20_intel_8042
call test_a20
test al, al
jnz .a20_enabled
; TODO: try other methods first before we panic:
; - [ ] BIOS interrupt
; - [ ] Fast A20 enable
jmp panic_simple
.a20_enabled:
call load_s3
jc panic_simple
mov ax, 0x0003
int 0x10
; Disable cursor
mov ax, 0x0100
mov cx, 0x3f00
int 0x10
; Copy the GDT
mov cx, GDT_FLAT_LEN
mov si, gdt_flat
mov di, GDT_FLAT_ADDR
rep movsb
; Ensure interrupts are definitely disabled.
cli
; Load our flat-address-space GDT.
lgdt [gdt_flat_slice]
; Set the protected-mode bit in cr0.
mov eax, cr0
or al, 0x01
mov cr0, eax
; Long jump to set the code segment to gdt_flat.segment_code, and to clear the instruction
; pipeline.
jmp GDT_FLAT_IDX_CODE_32:.protected_mode_32
[bits 32]
.protected_mode_32:
; Set the data segments to gdt_flat.segment_data.
mov eax, GDT_FLAT_IDX_DATA
mov ds, eax
mov es, eax
mov fs, eax
mov gs, eax
mov ss, eax
; Reset the stack.
; TODO: put the 32-bit stack somewhere else.
mov ebp, REAL_STACK_BASE
mov esp, ebp
jmp S3_LOAD_ADDR
.halt:
hlt
jmp .halt
global s2_main
section .s3_data
gdt_flat_slice:
dw GDT_FLAT_LEN
dd GDT_FLAT_ADDR
; Segment descriptor layout
; | Range (bits) | Field |
; |--------------|---------------|
; | 0-16 | limit |
; | 16-32 | base |
; | 32-40 | base cont. |
; | 40-48 | access |
; | 48-52 | limit cont. |
; | 52-56 | flags |
; | 56-64 | base cont. |
;
; Flags
; - 0: reserved
; - 1: long-mode code segment
; - 2: size
; - unset: 16-bit
; - set: 32-bit
; - 3: granularity
; - unset: limit is measured in bytes
; - set: limit is measured in 4KiB pages
;
; Access
; - 0: accessed
; - unset: CPU will set it when the segment is accessed
; - 1: readable / writable
; - data segments: is segment writable (data segments are always readable)
; - code segments: is segment readable (code segments are never writable)
; - 2: direction / conforming
; - data segments: whether segment grows down
; - code segments: whether this can be executed from a lower-privilege ring
; - 3: executable
; - unset: this is a data segment
; - set: this is a code segment
; - 4: descriptor type
; - unset: this is a task state segment
; - set: this is a data or code segment
; - 5-6: privilege level (ring number)
; - 7: present (must be set)
;
align 8
gdt_flat:
; First GDT entry must be 0.
dq 0
; 32-bit code segment.
; Pages 0x0000 - 0xffff. Needs to contain 0x10000, where the stage 3 text is loaded.
.segment_code_32:
db 0xff, 0xff, \
0x00, 0x00, \
0x00, \
10011011b, \
11001111b, \
0x00
; 16-bit code segment, to use if we want to switch back to real mode.
; Bytes 0x0000 - 0xffff.
.segment_code_16:
db 0xff, 0xff, \
0x00, 0x00, \
0x00, \
10011011b, \
00000000b, \
0x00
; Data segment.
; Pages 0x000000 - 0x0fffff, which covers the entire 32-bit address space (start of 0xfffff-th page
; is 0xfffff * 4096 = 0xfffff000, end of page exclusive is 0xfffff000 + 4096 = 0x100000000).
.segment_data:
db 0xff, 0xff, \
0x00, 0x00, \
0x00, \
10010011b, \
11001111b, \
0x00
GDT_FLAT_LEN equ ($ - gdt_flat)
GDT_FLAT_IDX_CODE_32 equ (gdt_flat.segment_code_32 - gdt_flat)
global GDT_FLAT_IDX_CODE_32
GDT_FLAT_IDX_CODE_16 equ (gdt_flat.segment_code_16 - gdt_flat)
global GDT_FLAT_IDX_CODE_16
GDT_FLAT_IDX_DATA equ (gdt_flat.segment_data - gdt_flat)
global GDT_FLAT_IDX_DATA

@ -0,0 +1,241 @@
[bits 16]
%include "fn.s"
%include "layout.s"
%include "globals.s"
extern s2_bin_len
extern s2_bin_sectors
extern s2_magic
extern s2_main
%macro copy_stack_var_to_globals 2
mov %1, [bp - %2]
mov [REAL_GLOBALS + %2], %1
%endmacro
section .prelude
s2_data:
.s3_bin_offset_sectors:
dw 0
.s3_bin_len_sectors:
dw 0
.padding:
times (S2_DATA_LEN - 4) db 0xf4
global s2_data
global s2_data.s3_bin_offset_sectors
global s2_data.s3_bin_len_sectors
%if ($ - $$) != S2_DATA_LEN
%error "incorrect prelude data size"
%endif
; Load the rest of stage 2 into memory.
prelude:
; Now that we're not doing instruction byte golf like we were in stage 1, we can afford to move
; the various stage 1 stack variables to the globals section.
copy_stack_var_to_globals ax, BOOT_DRIVE
copy_stack_var_to_globals ax, SECTORS_PER_TRACK
copy_stack_var_to_globals ax, N_HEADS
copy_stack_var_to_globals ax, GPT_ENTRIES_START_LBA
copy_stack_var_to_globals ax, GPT_N_ENTRIES_16
copy_stack_var_to_globals ax, GPT_SECTOR_STRIDE
copy_stack_var_to_globals ax, GPT_BYTE_STRIDE
copy_stack_var_to_globals ax, GPT_ENTRIES_PER_SECTOR
copy_stack_var_to_globals ax, GPT_CURRENT_ENTRY_IDX
copy_stack_var_to_globals ax, GPT_SECTOR_ENTRY_IDX
copy_stack_var_to_globals ax, GPT_SECTORS_LOADED
copy_stack_var_to_globals ax, GPT_CURRENT_LBA
copy_stack_var_to_globals ax, STAGE_2_GPT_ENTRY_ADDR
; Reset the stack, now we've got everything we need from it.
mov sp, bp
mov si, [REAL_GLOBALS + STAGE_2_GPT_ENTRY_ADDR]
mov eax, [si + 0x20] ; Partition / s2 start LBA lower
mov ebx, [si + 0x24] ; Partition / s2 start LBA upper
mov ecx, [si + 0x28] ; Partition end LBA lower
mov edx, [si + 0x32] ; Partition end LBA upper
; Panic if the partition / boot1 starting LBA overflows 16 bits.
or ebx, ebx
jnz panic_simple
ror eax, 16
or ax, ax
jnz panic_simple
ror eax, 16
; There must be at least one sector to load.
mov bx, s2_bin_sectors
or bx, bx
jz panic_simple
; Calculate the s2 end LBA (inclusive) and panic if it overflows 16 bits.
; n.b. ebx is zero before this so both bx and ebx can be used as the s2 end LBA.
dec bx
add bx, ax
jc panic_simple
; Panic if the s2 end LBA is after the partition end LBA.
; If the upper 32 bits of the partition end LBA are nonzero, then it must be greater than our
; 16-bit s2 end LBA.
or edx, edx
jnz .end_lba_ok
; Compare the s2 end LBA to the lower 32 bits of the partition end LBA.
cmp ebx, ecx
ja panic_simple
; Save partition / s2 extents for later.
mov [REAL_GLOBALS + LOADER_PART_END_LBA], ecx
mov [REAL_GLOBALS + STAGE_2_START_LBA], ax
mov [REAL_GLOBALS + STAGE_2_END_LBA], bx
.end_lba_ok:
; The first sector has already been loaded (we're running it right now!) so increment the
; current LBA.
inc ax
push ax ; Current LBA
push bx ; s2 end LBA
mov ebx, S2_LOAD_ADDR + 512 ; Current sector load address
.load_loop:
mov ax, [bp - 0x02] ; Load current LBA
cmp word [bp - 0x04], ax ; Compare to s2 end LBA
jb .load_done
mov ecx, ebx
call read_sector
jc panic_simple
add ebx, 512
inc word [bp - 0x02]
jmp .load_loop
.load_done:
; Check the magic bytes at the end of s2.
push es
mov ebx, s2_magic
call addr32_to_addr16
cmp dword es:[bx], S2_MAGIC
pop es
jne panic_simple
jmp s2_main
; Converts a 32-bit address to a 16-bit sector and offset.
; Arguments:
; - ebx: 32-bit address
; Return:
; - es: 16-bit address segment (unchanged on failure)
; - ebx: 16-bit address offset
; - cf: unset on success, set on failure
; Clobber: none
addr32_to_addr16:
fnstart
push es
push eax
mov eax, ebx
; Divide addr by 16 and saturate to 16 bits to get the segment.
shr eax, 4
ror eax, 16
or ax, ax
jz .segment_ok
mov eax, 0xffff0000
.segment_ok:
ror eax, 16
mov es, ax
; Calculate offset = addr - (16 * segment), failing if the offset doesn't fit in 16 bits.
shl eax, 4
sub ebx, eax
ror ebx, 16
or bx, bx
jnz .fail
ror ebx, 16
pop eax
add sp, 2 ; Discard the original es from the stack
pop bp
clc
ret
.fail:
pop eax
pop es
stc
fnret
global addr32_to_addr16
; Reads a single sector at the given LBA into memory.
; Arguments:
; - ax: start LBA
; - ecx: address to read sector to
; Return:
; - cf: unset on success, set on failure
; Clobber: eax, ecx, edx
read_sector:
; sector - 1 = LBA % sectors_per_track
; temp = LBA / sectors_per_track
; head = temp % n_heads
; cylinder = temp / n_heads
fnstart
push es
push ebx
mov ebx, ecx
call addr32_to_addr16
jc .return
; Calculate sector and temp
xor dx, dx
; Divide by sectors per track. dx = mod (sector - 1), ax = div (temp)
div word [REAL_GLOBALS + SECTORS_PER_TRACK]
; Put the sector into cx (the bios call will use cl)
mov cx, dx
inc cx
; Calculate head and cylinder
xor dx, dx
; Divide by number of heads. dx = mod (head), ax = div (cylinder)
div word [REAL_GLOBALS + N_HEADS]
mov dh, dl
mov ch, al
mov dl, byte [REAL_GLOBALS + BOOT_DRIVE]
mov ah, 0x02
mov al, 1
; Read sector
int 0x13
.return:
pop ebx
pop es
fnret
global read_sector
panic_simple:
mov ax, 0x0003
int 0x10
mov word fs:[0x0000], 0x4f21
.halt:
hlt
jmp .halt
global panic_simple
%if ($ - $$) > 512
%error "stage 2 exceeded sector size"
%endif

@ -0,0 +1,7 @@
[build]
target = "protected32.json"
[unstable]
build-std = [ "core", "compiler_builtins" ]
build-std-features = [ "compiler-builtins-mem" ]

@ -0,0 +1 @@
/target

7
stage_3/Cargo.lock generated

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "stage_3"
version = "0.1.0"

@ -0,0 +1,6 @@
[package]
name = "stage_3"
version = "0.1.0"
edition = "2024"
[dependencies]

@ -0,0 +1,6 @@
fn main() {
const LINKER_SCRIPT_PATH: &str = "link.ld";
println!("cargo::rerun-if-changed={}", LINKER_SCRIPT_PATH);
println!("cargo::rustc-link-arg=-T{}", LINKER_SCRIPT_PATH);
}

@ -0,0 +1,30 @@
OUTPUT_FORMAT("binary")
. = 0x10000;
SECTIONS {
.trampoline : {
KEEP(*(.trampoline))
KEEP(*(.trampoline.*))
}
.text : {
*(.text)
*(.text.*)
}
.data : {
*(.data)
*(.data.*)
}
.bss : {
*(.bss)
*(.bss.*)
}
.rodata : {
*(.rodata)
*(.rodata.*)
}
}

@ -0,0 +1,17 @@
{
"arch": "x86",
"os": "none",
"llvm-target": "i686-unknown-none",
"data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
"target-endian": "little",
"target-pointer-width": "32",
"target-c-int-width": 32,
"executables": true,
"linker-flavor": "ld.lld",
"linker": "rust-lld",
"panic-strategy": "abort",
"disable-redzone": true,
"features": "-mmx,-sse,+soft-float",
"rustc-abi": "x86-softfloat"
}

@ -0,0 +1,8 @@
section .text
[bits 32]
; TODO: reference sysv abi spec to see what we can mangle and what we must preserve
load_sector:
ret

@ -0,0 +1,7 @@
.code32
.section ".trampoline", "ax", @progbits
trampoline:
jmp _start
.globl trampoline

@ -0,0 +1,124 @@
use core::{fmt, hint};
use crate::{ioport, spin::Spinlock};
const COM1_PORT: u16 = 0x3f8;
pub static COM1: Spinlock<Option<Com>> = Spinlock::new(None);
#[macro_export]
macro_rules! com1_print {
($($args:tt)*) => ({
crate::com::with_com1_if_available(|com1| {
::core::write!(com1, $($args)*).ok();
});
})
}
#[macro_export]
macro_rules! com1_println {
($($args:tt)*) => ({
crate::com::with_com1_if_available(|com1| {
::core::writeln!(com1, $($args)*).ok();
});
})
}
pub struct Com {
port: u16,
}
pub struct ComError;
pub fn with_com1_if_available<F>(f: F)
where
F: FnOnce(&mut Com),
{
let mut guard = COM1.lock();
if let Some(com1) = guard.as_mut() {
f(com1);
}
}
pub unsafe fn try_com1_init() -> bool {
match unsafe { Com::init(COM1_PORT) } {
Ok(com1) => {
let mut guard = COM1.lock();
*guard = Some(com1);
true
}
_ => false,
}
}
impl Com {
pub unsafe fn init(port: u16) -> Result<Self, ComError> {
const ECHO_BYTE: u8 = 0x5a;
let echo = unsafe {
// Unset DLAB
ioport::outb(port + 3, 0x00);
// Disable interrupts
ioport::outb(port + 1, 0x00);
// Set DLAB
ioport::outb(port + 3, 0x80);
// Set baud rate divisor to 00 01 (115200)
ioport::outb(port + 0, 0x01);
ioport::outb(port + 1, 0x00);
// 8 bits, no parity, one stop bit
ioport::outb(port + 3, 0x03);
// Enable FIFO, clear them, with 14-byte threshold
ioport::outb(port + 2, 0xc7);
// IRQs enabled, RTS/DSR set
ioport::outb(port + 4, 0x0b);
// Set loopback mode
ioport::outb(port + 4, 0x1e);
ioport::outb(port + 0, ECHO_BYTE);
ioport::inb(port + 0)
};
if echo != ECHO_BYTE {
return Err(ComError);
}
unsafe {
ioport::outb(port + 4, 0x0f);
}
Ok(Self { port })
}
pub fn poll_has_data(&self) -> bool {
unsafe { ioport::inb(self.port + 5) & 0x01 != 0 }
}
pub fn read_poll(&mut self) -> u8 {
while !self.poll_has_data() {
hint::spin_loop();
}
unsafe { ioport::inb(self.port) }
}
pub fn poll_has_space(&self) -> bool {
unsafe { ioport::inb(self.port + 5) & 0x20 != 0 }
}
pub fn write_poll(&mut self, x: u8) {
while !self.poll_has_space() {
hint::spin_loop();
}
unsafe {
ioport::outb(self.port, x);
}
}
}
impl fmt::Write for Com {
fn write_str(&mut self, s: &str) -> fmt::Result {
for b in s.bytes() {
self.write_poll(b);
}
Ok(())
}
}

@ -0,0 +1,23 @@
use core::arch::asm;
pub unsafe fn inb(port: u16) -> u8 {
let x: u8;
unsafe {
asm!(
"in al, dx",
in("dx") port,
lateout("al") x
);
}
x
}
pub unsafe fn outb(port: u16, x: u8) {
unsafe {
asm!(
"out dx, al",
in("al") x,
in("dx") port,
);
}
}

@ -0,0 +1,59 @@
#![no_std]
#![no_main]
#![feature(custom_test_frameworks)]
#![test_runner(crate::test_runner)]
mod com;
mod ioport;
mod spin;
mod vga;
use core::{
arch::{asm, global_asm},
fmt::{self, Write},
panic::PanicInfo,
ptr, slice,
};
global_asm!(include_str!("asm/trampoline.s"));
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
vga_println!("panic!");
if let Some(location) = info.location() {
vga_println!("{}", location);
}
vga_println!("{}", info.message());
hlt()
}
#[unsafe(no_mangle)]
pub extern "C" fn _start() -> ! {
vga::vga_init();
unsafe {
com::try_com1_init();
}
let gdt_ptr = ptr::with_exposed_provenance::<u64>(0xe000);
let gdt_slice = unsafe { slice::from_raw_parts(gdt_ptr, 4) };
vga_println!("hello from rust :)");
vga_println!("{:016x}", gdt_slice[0]);
vga_println!("{:016x}", gdt_slice[1]);
vga_println!("{:016x}", gdt_slice[2]);
vga_println!("{:016x}", gdt_slice[3]);
com1_println!("hello serial!");
hlt()
}
#[inline]
fn hlt() -> ! {
loop {
unsafe {
asm!("hlt");
}
}
}

@ -0,0 +1,104 @@
use core::{
cell::UnsafeCell,
hint,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, Ordering},
};
pub struct Spinlock<T> {
data: UnsafeCell<T>,
locked: AtomicBool,
}
impl<T> Spinlock<T> {
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
locked: AtomicBool::new(false),
}
}
pub fn lock(&self) -> SpinlockGuard<'_, T> {
// If we observe `locked` was `false`, then:
// - Use acquire ordering, so nothing inside the critical section gets reordered before we
// observed the `false` value.
// - Store `true`, so nothing else can enter the critical section until we exit it.
// Otherwise, spin until we observe a `false` value.
while self
.locked
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
hint::spin_loop();
}
SpinlockGuard { lock: self }
}
/// # Safety
/// There must be no "active" `SpinlockGuards` for this lock, i.e. a `SpinlockGuard` which can be
/// used to obtain a reference to the spinlock-protected data.
unsafe fn unlock(&self) {
// Unset `locked` with release ordering so that nothing inside the critical section gets
// reordered to after we stored `false`.
self.locked.store(false, Ordering::Release);
}
/// # Safety
/// No mutable references to the spinlock-protected data may exist.
unsafe fn get<'s, 'a>(&'s self) -> &'a T
where
's: 'a,
{
unsafe { &*self.data.get() }
}
/// # Safety
/// No references to the spinlock-protected data may exist.
unsafe fn get_mut<'s, 'a>(&'s self) -> &'a mut T
where
's: 'a,
{
unsafe { &mut *self.data.get() }
}
}
unsafe impl<T> Sync for Spinlock<T> where T: Send {}
pub struct SpinlockGuard<'a, T> {
lock: &'a Spinlock<T>,
}
impl<'a, T> Deref for SpinlockGuard<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no mutable references to `data` can exist.
unsafe { self.lock.get() }
}
}
impl<'a, T> DerefMut for SpinlockGuard<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY:
// For the entire lifetime of the `SpinlockGuard`, `locked` remains `true`, so we have
// exclusive access to `data`, so no other references to `data` can exist.
unsafe { self.lock.get_mut() }
}
}
impl<'a, T> Drop for SpinlockGuard<'a, T> {
fn drop(&mut self) {
// SAFETY:
// Only one `SpinlockGuard` can exist at a time for a particular lock, since we set `locked`
// to true before creating a guard and refuse to create any new ones until it is `false` again.
// Therefore, we are the only `SpinlockGuard` for the lock. Since this is the destructor, and
// we don't access the spinlock-protected data here, there are therefore no "active"
// `SpinlockGuard`s remaining for the lock.
unsafe { self.lock.unlock() }
}
}

@ -0,0 +1,132 @@
use core::{fmt, ptr};
use crate::spin::Spinlock;
const VGA_ADDR: usize = 0xb8000;
const VGA_WIDTH: usize = 80;
const VGA_HEIGHT: usize = 25;
const COLOUR_MASK: u16 = 0x0a00;
pub static VGA: Spinlock<Option<VgaBuf>> = Spinlock::new(None);
#[macro_export]
macro_rules! vga_print {
($($args:tt)*) => ({
crate::vga::with_vga_buf(|vga_buf| {
::core::write!(vga_buf, $($args)*).ok();
});
})
}
#[macro_export]
macro_rules! vga_println {
($($args:tt)*) => ({
crate::vga::with_vga_buf(|vga_buf| {
::core::writeln!(vga_buf, $($args)*).ok();
});
})
}
pub fn vga_init() {
let vga_buf = unsafe { VgaBuf::new(ptr::with_exposed_provenance_mut::<u16>(VGA_ADDR)) };
let mut guard = VGA.lock();
*guard = Some(vga_buf);
}
pub fn with_vga_buf<F>(f: F)
where
F: FnOnce(&mut VgaBuf),
{
let mut guard = VGA.lock();
let vga = guard.as_mut().expect("vga not initialised");
f(vga);
}
pub struct VgaBuf {
buf: *mut u16,
col: usize,
row: usize,
}
impl VgaBuf {
pub unsafe fn new(buf: *mut u16) -> Self {
Self {
buf,
col: 0,
row: 0,
}
}
pub fn vga_write_str(&mut self, s: &str) {
for c in s.chars() {
self.vga_write_char(c);
}
}
pub fn vga_write_char(&mut self, c: char) {
let newline = c == '\n';
if newline || self.col >= VGA_WIDTH {
self.col = 0;
self.row += 1;
}
if self.row >= VGA_HEIGHT {
self.scroll();
self.row -= 1;
}
if !newline {
self.vga_write_char_at(c, self.col, self.row);
self.col += 1;
}
}
pub fn vga_write_ascii_char_at(&mut self, c: u8, col: usize, row: usize) {
let vga_val = COLOUR_MASK | u16::from(c);
if col < VGA_WIDTH && row < VGA_HEIGHT {
unsafe {
self.coord_ptr(col, row).write_volatile(vga_val);
}
}
}
pub fn vga_write_char_at(&mut self, c: char, col: usize, row: usize) {
let c = u8::try_from(c).unwrap_or(0xfe);
self.vga_write_ascii_char_at(c, col, row);
}
fn scroll(&mut self) {
unsafe {
ptr::copy(
self.coord_ptr(0, 1),
self.coord_ptr(0, 0),
VGA_WIDTH * (VGA_HEIGHT - 1),
);
for col in 0..VGA_WIDTH {
self.vga_write_ascii_char_at(0, col, VGA_HEIGHT - 1);
}
}
}
unsafe fn coord_ptr(&self, col: usize, row: usize) -> *mut u16 {
unsafe { self.buf.add((row * VGA_WIDTH) + col) }
}
}
impl fmt::Write for VgaBuf {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.vga_write_str(s);
Ok(())
}
fn write_char(&mut self, c: char) -> fmt::Result {
self.vga_write_char(c);
Ok(())
}
}
unsafe impl Send for VgaBuf {}

1
xtask/.gitignore vendored

@ -0,0 +1 @@
/target

@ -0,0 +1,8 @@
[package]
name = "xtask"
version = "0.1.0"
edition = "2024"
[dependencies]
eyre = "0.6.12"
kdl = "6.3.4"

@ -0,0 +1,305 @@
use std::{
ffi::OsStr,
fs::{self, File, OpenOptions},
io::{self, Seek, Write},
path::{Path, PathBuf},
process::{Command, Output, Stdio},
};
use eyre::{WrapErr, eyre};
use crate::Context;
const SECTOR_SIZE: usize = 512;
const NASM_COMMON_FLAGS: &[&str] = &["-werror"];
pub fn build(ctx: &Context) -> Result<(), eyre::Error> {
let s1_src_dir = ctx.workspace.join("stage_1");
let s2_src_dir = ctx.workspace.join("stage_2");
let s3_src_dir = ctx.workspace.join("stage_3");
let build_dir = ctx.workspace.join("build");
let s1_build_dir = build_dir.join("stage_1");
let s2_build_dir = build_dir.join("stage_2");
mkdir_if_missing(&build_dir)?;
mkdir_if_missing(&s1_build_dir)?;
mkdir_if_missing(&s2_build_dir)?;
println!("building stage 1");
let s1_bin_path = build_stage_1(ctx, &s1_build_dir, &s1_src_dir)?;
println!("building stage 2");
let s2_bin_path = build_stage_2(ctx, &s2_build_dir, &s2_src_dir)?;
println!("building stage 3");
let s3_bin_path = build_stage_3(ctx, &s3_src_dir)?;
println!("creating stage 2/3 blob");
make_s23_blob(&build_dir, &s2_bin_path, &s3_bin_path)?;
Ok(())
}
fn build_stage_1(ctx: &Context, build_dir: &Path, src_dir: &Path) -> Result<PathBuf, eyre::Error> {
let src_paths = ls_with_extension(src_dir, "s")?;
let [src_path] = &*src_paths else {
return Err(eyre!(
"expected exactly one stage 1 source file, found {}",
src_paths.len()
));
};
let bin_path = build_dir.join("s1.bin");
let include_dir = ctx.workspace.join("include");
Command::new("nasm")
.args(["-f", "bin"])
.args(NASM_COMMON_FLAGS)
.arg("-I")
.arg(&include_dir)
.arg("-o")
.arg(&bin_path)
.arg(src_path)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.run_ok()?;
Ok(bin_path)
}
fn build_stage_2(ctx: &Context, build_dir: &Path, src_dir: &Path) -> Result<PathBuf, eyre::Error> {
let include_dir = ctx.workspace.join("include");
let src_paths = ls_with_extension(src_dir, "s")?;
let mut obj_paths = Vec::new();
for src_path in src_paths {
let Some(file_stem) = src_path.file_stem() else {
continue;
};
let obj_path = build_dir.join(file_stem).with_extension("o");
Command::new("nasm")
.args(["-f", "elf"])
.args(NASM_COMMON_FLAGS)
.arg("-I")
.arg(&include_dir)
.arg("-o")
.arg(&obj_path)
.arg(src_path)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.run_ok()?;
obj_paths.push(obj_path);
}
let linker_file_path = src_dir.join("link.ld");
let bin_path = build_dir.join("s2.bin");
Command::new("ld")
.args(["-m", "elf_i386"])
.arg("-T")
.arg(&linker_file_path)
.arg("-o")
.arg(&bin_path)
.args(&obj_paths)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.run_ok()?;
Ok(bin_path)
}
fn build_stage_3(ctx: &Context, stage_3_src_dir: &Path) -> Result<PathBuf, eyre::Error> {
Command::new(ctx.cargo)
.arg("build")
.arg("--release")
.current_dir(stage_3_src_dir)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.run_ok()?;
let bin_path = {
let mut bin_path = stage_3_src_dir.to_owned();
bin_path.extend(["target", "protected32", "release", "stage_3"]);
bin_path
};
Ok(bin_path)
}
fn make_s23_blob(
build_dir: &Path, s2_bin_path: &Path, s3_bin_path: &Path,
) -> Result<(), eyre::Error> {
let out_path = build_dir.join("s23.bin");
let mut out_file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&out_path)
.wrap_io_err(IoOp::Open, &out_path)?;
let mut buf = [0u8; SECTOR_SIZE];
let s2_len_sectors = read_sectors_from_to(s2_bin_path, &mut out_file, &out_path, &mut buf)?;
if s2_len_sectors == 0 {
return Err(eyre!("empty stage 2"));
}
let s3_offset_sectors_16 = u16::try_from(s2_len_sectors).wrap_err("stage 3 offset overflow")?;
let s3_len_sectors = read_sectors_from_to(s3_bin_path, &mut out_file, &out_path, &mut buf)?;
if s3_len_sectors == 0 {
return Err(eyre!("empty stage 3"));
}
let s3_len_sectors_16 = u16::try_from(s3_len_sectors).wrap_err("stage 3 length overflow")?;
out_file
.seek(io::SeekFrom::Start(0))
.wrap_io_err(IoOp::Seek, &out_path)?;
// Write the stage 2 data section fields
out_file
.write_all(&s3_offset_sectors_16.to_le_bytes())
.wrap_io_err(IoOp::Write, &out_path)?;
out_file
.write_all(&s3_len_sectors_16.to_le_bytes())
.wrap_io_err(IoOp::Write, &out_path)?;
out_file.flush().wrap_io_err(IoOp::Flush, &out_path)?;
Ok(())
}
fn mkdir_if_missing(path: &Path) -> Result<(), eyre::Error> {
if !fs::exists(path).wrap_io_err(IoOp::Stat, path)? {
fs::create_dir(path).wrap_io_err(IoOp::Open, path)?;
}
Ok(())
}
fn ls_with_extension(path: &Path, ext: &str) -> Result<Vec<PathBuf>, eyre::Error> {
let read_dir = fs::read_dir(path).wrap_io_err(IoOp::Read, path)?;
let mut paths = Vec::new();
for dir in read_dir {
let dir = dir.wrap_io_err(IoOp::Read, path)?;
if dir.path().extension() == Some(OsStr::new(ext)) {
paths.push(dir.path());
}
}
Ok(paths)
}
fn read_retry_eintr<T>(r: &mut T, buf: &mut [u8]) -> Result<usize, io::Error>
where
T: io::Read,
{
loop {
match r.read(buf) {
Ok(n) => return Ok(n),
Err(err) => match err.kind() {
io::ErrorKind::Interrupted => (),
_ => return Err(err),
},
}
}
}
fn read_sectors_from_to(
src_path: &Path, dst_file: &mut File, dst_path: &Path, buf: &mut [u8],
) -> Result<u64, eyre::Error> {
let mut total_read = 0u64;
let mut src_file = File::open(src_path).wrap_io_err(IoOp::Open, src_path)?;
loop {
match read_retry_eintr(&mut src_file, buf).wrap_io_err(IoOp::Read, src_path)? {
0 => break,
n => {
total_read += n as u64;
dst_file
.write_all(&buf[..n])
.wrap_io_err(IoOp::Write, dst_path)?;
}
}
}
let remainder = (total_read % (SECTOR_SIZE as u64)) as usize;
let padding = (SECTOR_SIZE - remainder) % SECTOR_SIZE;
if padding != 0 {
// Pad with 0xf4 since it encodes HLT, in case we accidentally jump to it.
buf[..padding].fill(0xf4);
dst_file
.write_all(&buf[..padding])
.wrap_io_err(IoOp::Write, dst_path)?;
}
let total_bytes = total_read + padding as u64;
assert!(total_bytes % SECTOR_SIZE as u64 == 0);
Ok(total_bytes / SECTOR_SIZE as u64)
}
trait CommandExt {
fn run_ok(&mut self) -> Result<Output, eyre::Error>;
}
impl CommandExt for Command {
fn run_ok(&mut self) -> Result<Output, eyre::Error> {
let output = self
.output()
.wrap_err_with(|| format!("failed to run {:?}", self))?;
if !output.status.success() {
return Err(eyre!("command failed: {:?}", self));
}
Ok(output)
}
}
trait WrapIoError<T> {
fn wrap_io_err(self, op: IoOp, path: &Path) -> Result<T, eyre::Report>;
}
impl<T, E> WrapIoError<T> for Result<T, E>
where
Self: WrapErr<T, E>,
{
fn wrap_io_err(self, op: IoOp, path: &Path) -> Result<T, eyre::Report> {
self.wrap_err_with(|| format!("failed to {} {}", op.name(), path.to_string_lossy()))
}
}
#[derive(Clone, Copy)]
enum IoOp {
Read,
Write,
Stat,
Open,
Seek,
Flush,
}
impl IoOp {
fn name(self) -> &'static str {
match self {
IoOp::Read => "read",
IoOp::Write => "write",
IoOp::Stat => "stat",
IoOp::Open => "open",
IoOp::Seek => "seek",
IoOp::Flush => "flush",
}
}
}

@ -0,0 +1,44 @@
use std::{
env,
path::{Path, PathBuf},
};
use eyre::{ContextCompat, eyre};
mod build;
mod mkimg;
fn main() -> Result<(), eyre::Error> {
let mut args = env::args_os().skip(1);
let task = args.next().context("no task provided")?;
let task = task
.to_str()
.with_context(|| format!("invalid utf-8 \"{}\"", task.to_string_lossy()))?;
let manifest_dir =
PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").context("CARGO_MANIFEST_DIR not set")?);
let workspace_dir = manifest_dir
.parent()
.context("invalid CARGO_MANIFEST_DIR")?;
let cargo_path = PathBuf::from(env::var_os("CARGO").context("CARGO not set")?);
let ctx = Context {
workspace: workspace_dir,
cargo: &cargo_path,
};
match task {
"build" => build::build(&ctx),
"mkimg" => mkimg::mkimg_bios_gpt(&ctx),
_ => Err(eyre!("unknown task \"{}\"", task)),
}
}
struct Context<'a> {
workspace: &'a Path,
cargo: &'a Path,
}

@ -0,0 +1,23 @@
use crate::Context;
// LBA 0: MBR
// LBA 1: partition header
// LBA 2..33: partition table entries
// LBA 34..n: usable blocks
// LBA -33..-2: partition table entries (dup)
// LBA -1: partition header (dup)
struct Partition {
type_guid: [u8; 16],
part_guid: [u8; 16],
lba_start: u64,
lba_end: u64,
attr: u64,
name: String,
}
pub fn mkimg_bios_gpt(ctx: &Context) -> Result<(), eyre::Error> {
println!("dir={}", ctx.workspace.to_string_lossy());
Ok(())
}
Loading…
Cancel
Save