Compare commits

...

8 Commits

Author SHA1 Message Date
Valentin Anger ea32228161 Several fixes, changes and improvements
"Improved" physical memory management, now using a linked list
Split initialization (start) and main kernel function apart
Make the logging target runtime changeable
Add a lock to log to stop parallel access
Remove the distiction of Virtual and Physical pointers, it is too
cumbersome to use
Make kernel debug information accessible
Add panic backtraces
Allow the display of an image on panic
2020-01-29 20:16:31 +01:00
Valentin Anger c2158d071f Link all sections into one
The Bootboot specification specifies that only one section is loaded
2020-01-29 19:45:46 +01:00
Valentin Anger f982c85ea5 Update enum value to upstreamed version 2020-01-29 19:34:14 +01:00
Valentin Anger 498f969360 Add support for Intel microcode updates 2020-01-13 12:33:09 +01:00
Valentin Anger fada83f0b6 Paging use packed instead of extern structs 2020-01-13 12:21:13 +01:00
Valentin Anger f995f0831f Framebuffer support for more pixel layouts 2020-01-13 12:20:01 +01:00
Valentin Anger 2583eb4561 Logger automatically prints newlines 2020-01-13 12:16:31 +01:00
Valentin Anger cb5a8a4af3 Only partition and format the disk image when it didn't already exists 2020-01-10 20:05:25 +01:00
30 changed files with 1530 additions and 389 deletions

View File

@ -8,7 +8,7 @@ pub fn build(b: *Builder) void {
exe.setBuildMode(mode);
exe.setTarget(.x86_64, .freestanding, .elfv2);
exe.code_model = .Kernel;
exe.code_model = .kernel;
// exe.force_pic = true;
exe.setLinkerScriptPath("linker.ld");

View File

@ -1,18 +1,35 @@
mmio = 0xfffffffff8000000;
fb = 0xfffffffffc000000;
PHDRS {
kern PT_LOAD;
}
SECTIONS
{
. = 0xffffffffffe00000;
bootboot = .; . += 4096;
environment = .; . += 4096;
.text : {
KEEP(*(.text.boot)) *(.text .text.*) /* code */
*(.rodata .rodata.*) /* data */
bootboot = .; . += 4K;
environment = .; . += 4K;
.text ALIGN(4K) : {
*(.text .text.*)
} :kern
.rodata ALIGN(4K) : {
*(.rodata .rodata.*)
} :kern
.data ALIGN(4K) : {
*(.data .data.*)
}
.bss (NOLOAD) : { /* bss */
. = ALIGN(16);
} :kern
.bss ALIGN(16) : {
*(.bss .bss.*)
*(COMMON)
}
. = ALIGN(4K);
} :kern
/DISCARD/ : { *(.comment) }
}

View File

@ -8,18 +8,25 @@ fi
./mkinitrd.sh
truncate -s 64M out/disk.img
NEW_DISK=0
if [ ! -f out/disk.img ]; then
NEW_DISK=1
sfdisk out/disk.img <<END
label: gpt
2048 - U *
truncate -s 64M out/disk.img
sfdisk out/disk.img <<END
label: gpt
2048 - U *
END
fi
LOOPBACK=$(losetup -f)
sudo losetup -P $LOOPBACK $(readlink -f out/disk.img)
sudo mkfs.fat -F 32 ${LOOPBACK}p1
if [ -n $NEW_DISK ]; then
sudo mkfs.fat -F 32 ${LOOPBACK}p1
fi
mkdir -p out/mnt
sudo mount ${LOOPBACK}p1 out/mnt

View File

@ -2,6 +2,8 @@
set -eu -o pipefail
mkdir -p out/
mkdir -p /tmp/xenon_initrd/debug
mkdir -p /tmp/xenon_initrd/sys
cp zig-cache/bin/xenon /tmp/xenon_initrd/sys/core
@ -10,10 +12,14 @@ if [ -d third_party/microcode/ ]; then
cp -r third_party/microcode/ /tmp/xenon_initrd/sys/
fi
if [ -f third_party/panic.ppm ]; then
cp third_party/panic.ppm /tmp/xenon_initrd/debug/
fi
TARGET=$(realpath out/initrd)
cd /tmp/xenon_initrd
find -type f | cpio -o --reproducible -H newc | gzip > $TARGET
find -type f | cpio -o --reproducible -H newc > $TARGET
# tar -c -z -f $TARGET *
rm -r /tmp/xenon_initrd

14
run.sh
View File

@ -8,16 +8,22 @@ fi
TARGET_ARGS=""
case $1 in
'')
;;
bios)
;;
uefi)
TARGET_ARGS="-pflash third_party/bios.bin"
;;
;;
*)
TARGET_ARGS=""
;;
echo "Unknown target $1"
exit 1
;;
esac
qemu-system-x86_64 -hda out/disk.img \
-net none -m 4G -smp 3 \
-net none -m 4G -smp 4 \
-no-reboot -no-shutdown \
-d in_asm,int,guest_errors \
-cpu qemu64,+pdpe1gb -s \
$TARGET_ARGS

View File

@ -2,9 +2,9 @@ const cpu = @import("cpu.zig");
const msr = @import("msr.zig");
const types = @import("types.zig");
fn baseAddress() types.PhysicalAddress([*]u32) {
fn baseAddress() [*]u32 {
const reg = cpu.rdmsr(msr.apic_base_address);
return types.Physical(@intToPtr([*]u32, reg >> 12 & 0xffffffffff));
return @intToPtr([*]u32, reg >> 12 & 0xffffffffff);
}
pub fn bootstrapCpuCore() bool {
@ -13,5 +13,5 @@ pub fn bootstrapCpuCore() bool {
}
pub fn cpuId() u8 {
return @intCast(u8, baseAddress().ptr[5] >> 24);
return @intCast(u8, baseAddress()[5] >> 24);
}

View File

@ -1,6 +1,8 @@
const std = @import("std");
const types = @import("types.zig");
const PixelType = @import("framebuffer.zig").PixelType;
pub const c = @cImport({
@cDefine("uint8_t", "unsigned char");
@cDefine("uint16_t", "unsigned short");
@ -18,9 +20,9 @@ pub extern const environment: [4096]u8;
pub extern var fb: u32;
pub extern var mmio: u8;
pub fn memory_map() types.VirtualAddress([]align(1) const MemoryMapEntry) {
pub fn memory_map() []align(1) const MemoryMapEntry {
const entry_count = (bootboot.size - 128) / @sizeOf(c.MMapEnt);
return types.Virtual(@ptrCast([*]align(1) const MemoryMapEntry, &bootboot.mmap)[0..entry_count]);
return @ptrCast([*]align(1) const MemoryMapEntry, &bootboot.mmap)[0..entry_count];
}
pub const MemoryMapEntry = extern struct {
@ -32,8 +34,8 @@ pub const MemoryMapEntry = extern struct {
_ptr: [*]u8,
_size: u64,
pub fn ptr(self: Self) types.PhysicalAddress([*]u8) {
return types.PhysicalAddress([*]u8).init(self._ptr);
pub fn ptr(self: Self) [*]u8 {
return self._ptr;
}
pub fn size(self: Self) u64 {
@ -59,3 +61,13 @@ pub const MemoryMapEntry = extern struct {
});
}
};
pub fn framebuffer_pixel_type() !PixelType {
return switch (bootboot.fb_type) {
c.FB_ARGB => PixelType.ARGB,
c.FB_RGBA => PixelType.RGBA,
c.FB_ABGR => PixelType.ABGR,
c.FB_BGRA => PixelType.BGRA,
else => return error.Unsupported,
};
}

View File

@ -1,5 +1,7 @@
const std = @import("std");
const msr = @import("msr.zig");
inline fn bit(value: var, comptime pos: @IntType(false, std.math.log2_int(u16, @typeInfo(@TypeOf(value)).Int.bits))) bool {
return (value & (1 << pos)) != 0;
}
@ -157,6 +159,10 @@ pub const CpuId = struct {
const base = @intCast(u8, self.baseFamily());
return base + if (base == 0xf) self.extFamily() else 0;
}
pub fn format(self: *const @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, context: var, comptime Errors: type, output: fn (@TypeOf(context), []const u8) Errors!void) Errors!void {
try std.fmt.format(context, Errors, output, "Family {x}h model {} stepping {} - {x}", .{ self.family(), self.model(), self.stepping(), self.value });
}
};
};
@ -181,10 +187,49 @@ pub fn cpuid(function: u32) CpuId {
};
}
pub const Capabilities = struct {
physical_address_width: u8 = 32,
virtual_address_width: u8 = 32,
gigabyte_pages: bool = false,
no_execute: bool = false,
pub fn initialize(self: *@This()) void {
const addr_width = cpuid(0x80000008);
self.physical_address_width = @intCast(u8, addr_width.a & 255);
self.virtual_address_width = @intCast(u8, addr_width.a >> 8 & 255);
const caps = cpuid(0x80000001);
if (bit(caps.d, 20) or bit(caps.d, 29)) { // EFER available
var extended_features_enables = rdmsr(msr.ia32_efer);
extended_features_enables |= 1 << 11; // Set no execute
wrmsr(msr.ia32_efer, extended_features_enables);
extended_features_enables = rdmsr(msr.ia32_efer);
if (bit(extended_features_enables, 11))
self.no_execute = true;
}
if (bit(caps.d, 26)) {
self.gigabyte_pages = true;
}
}
};
pub inline fn hlt() void {
asm volatile ("hlt");
}
pub inline fn lgdt(addr: u64) void {
asm volatile ("lgdt %[addr]"
: [addr] "r" (addr)
);
}
pub inline fn lidt(addr: u64) void {
asm volatile ("lidt %[addr]"
: [addr] "r" (addr)
);
}
pub inline fn outb(port: u16, data: u8) void {
asm volatile ("out %[data], %[port]"
:
@ -222,16 +267,17 @@ pub const register = struct {
);
}
pub inline fn cr3Set(addr: u64) void {
return asm volatile ("movq %[addr], %%cr3"
:
: [addr] "r" (addr)
: "memory"
);
}
pub inline fn sp() u64 {
return asm volatile (""
: [ret] "={rsp}" (-> u64)
);
}
pub inline fn spSet(addr: u64) void {
asm volatile ("movq %[addr], %%rsp"
:
: [addr] "r" (addr)
);
}
};

View File

@ -1,15 +0,0 @@
const format = @import("std").fmt.format;
const bootboot = @import("bootboot.zig");
const Framebuffer = @import("framebuffer.zig").Framebuffer;
const Serial = @import("serial.zig").Serial;
const serial = Serial.init(0x3f8);
const EmptyError = error{};
fn format_helper(_: void, string: []const u8) EmptyError!void {
serial.write(string);
}
pub fn print(comptime fmt: []const u8, args: var) void {
format({}, EmptyError, format_helper, fmt, args) catch unreachable;
}

View File

@ -14,6 +14,19 @@ pub const Cpio = struct {
};
}
pub fn findFile(self: *const Self, name: []const u8) ?(CpioError!CpioEntry_) {
var copy = self.*;
while (copy.nextFile()) |err_entry| {
const entry = err_entry catch break;
if (std.mem.eql(u8, entry.name() catch continue, name))
return entry;
}
return null;
}
pub fn nextFile(self: *Self) ?(CpioError!CpioEntry_) { // *const CpioEntry) {
if (self.remaining_files.len == 0)
return null;
@ -21,7 +34,7 @@ pub const Cpio = struct {
if (self.remaining_files.len < 110) // @sizeOf(CpioEntry))
return CpioError.EndOfData;
// const entry = @ptrCast(*const CpioEntry, &self.remaining_files);
// const entry = @ptrCast(*const CpioEntry, &self.remaining_files[0]);
const entry = CpioEntry_{
.header = self.remaining_files[0..110],
};
@ -158,8 +171,8 @@ const CpioEntry = packed struct {
return end_of_file + 3 & ~@as(usize, 0b11); // Align to four bytes
}
fn read_hex(digits: [8]u8) CpioError!u32 {
return parseInt(u32, &digits, 16) catch return CpioError.Invalid;
fn read_hex(digits: []u8) CpioError!u32 {
return parseInt(u32, digits, 16) catch return CpioError.Invalid;
}
fn total_size(self: *const Self) CpioError!usize {

100
src/formats/ppm.zig Normal file
View File

@ -0,0 +1,100 @@
const std = @import("std");
pub const Ppm = struct {
const Self = @This();
slice: []const u8,
width: usize,
height: usize,
max_val: usize,
pub fn init(slice: []const u8) !Self {
var self = Self{
.slice = slice,
.width = 0,
.height = 0,
.max_val = 0,
};
const magic = self.consumeText();
if (!std.mem.eql(u8, magic, "P6"))
return Error.UnknownFormat;
self.width = try self.consumeNumber();
self.height = try self.consumeNumber();
self.max_val = try self.consumeNumber();
if (self.max_val != 255)
return Error.UnsupportedFormat;
if (self.slice.len == 0)
return Error.InvalidFile;
// Consume a single whitespace
self.slice = self.slice[1..];
if (self.slice.len != 3 * self.width * self.height)
return Error.InvalidFile;
return self;
}
pub fn nextPixel(self: *Self) ?RGBPixel {
if (self.slice.len == 0)
return null;
const ret = RGBPixel{
.r = self.slice[0],
.g = self.slice[1],
.b = self.slice[2],
};
self.slice = self.slice[3..];
return ret;
}
fn consumeNumber(self: *Self) !usize {
const num = self.consumeText();
return std.fmt.parseInt(usize, num, 10);
}
fn consumeText(self: *Self) []const u8 {
var start: usize = 0;
while (start < self.slice.len and
std.ascii.isSpace(self.slice[start]))
start += 1;
var end = start;
while (end < self.slice.len and
!std.ascii.isSpace(self.slice[end]))
end += 1;
const ret = self.slice[start..end];
self.slice = self.slice[end..];
return ret;
}
pub const RGBPixel = packed struct {
r: u8,
g: u8,
b: u8,
};
pub const Error = error{
InvalidFile,
UnknownFormat,
UnsupportedFormat,
};
};
test "Empty file" {
const assert = std.debug.assert;
var ppm = try Ppm.init("P6 0 0 255 ");
assert(ppm.width == 0);
assert(ppm.height == 0);
assert(ppm.max_val == 255);
assert(ppm.nextPixel() == null);
}

View File

@ -1,7 +1,7 @@
const std = @import("std");
const eql = std.mem.eql;
const bootboot = @import("bootboot.zig");
const ppm = @import("formats/ppm.zig");
const types = @import("types.zig");
extern const _binary_font_psf_start: comptime if (use_psf2) Psf2 else Psf1;
@ -9,10 +9,10 @@ extern const _binary_font_psf_start: comptime if (use_psf2) Psf2 else Psf1;
pub const use_psf2 = false;
pub const Pixel = packed struct {
r: u8,
g: u8,
b: u8,
x: u8 = 0,
g: u8,
r: u8,
a: u8 = 0,
};
pub fn color(r: u8, g: u8, b: u8) Pixel {
@ -23,11 +23,19 @@ pub fn color(r: u8, g: u8, b: u8) Pixel {
};
}
pub const PixelType = enum {
ARGB,
RGBA,
ABGR,
BGRA,
};
pub const Framebuffer = struct {
const Self = @This();
width: u32,
height: u32,
pixel_type: PixelType,
x: u32 = 0,
y: u32 = 0,
@ -35,29 +43,61 @@ pub const Framebuffer = struct {
foreground: Pixel = color(0xa0, 0xa0, 0xa0),
background: Pixel = color(0x20, 0, 0x50),
buffer: []volatile Pixel,
buffer: []volatile u32,
pub fn init(fb: []volatile u8, width: u32, height: u32) Self {
pub fn init(fb: []align(4) volatile u8, width: u32, height: u32, pixel_type: PixelType) Self {
if (!eql(u8, &_binary_font_psf_start.magic, &(comptime if (use_psf2) Psf2.magic_const else Psf1.magic_const)))
@panic("Invalid kernel font");
std.debug.assert(width * height * @sizeOf(Pixel) <= fb.len);
std.debug.assert(width * height * @sizeOf(u32) <= fb.len);
return .{
.width = width,
.height = height,
.pixel_type = pixel_type,
.buffer = @ptrCast([*]volatile Pixel, &fb[0])[0 .. width * height],
.buffer = @ptrCast([*]volatile u32, &fb[0])[0 .. width * height],
};
}
pub fn clear(self: Self) void {
@setRuntimeSafety(false);
self.rectangle(0, 0, self.width - 1, self.height - 1, self.background);
}
pub inline fn pixel(self: Self, x: u32, y: u32, pix: Pixel) void {
@setRuntimeSafety(false);
@fence(.SeqCst);
if (x >= self.width or y >= self.height)
@panic("Framebuffer out of bounds access");
self.buffer[y * self.width + x] = pix;
self.buffer[y * self.width + x] = switch (self.pixel_type) {
.ARGB => @bitCast(u32, pix),
.RGBA => @bitCast(u32, pix) << 8,
.ABGR => @byteSwap(u32, @bitCast(u32, pix)) >> 8,
.BGRA => @byteSwap(u32, @bitCast(u32, pix)),
};
}
// TODO Abstract this to an Image interface
pub fn image(self: *Self, x: u32, y: u32, image_stream: *ppm.Ppm) void {
const max_x = std.math.min(self.width, x + image_stream.width);
const max_y = std.math.min(self.height, y + image_stream.height);
const cut_width = image_stream.width + x - max_x;
var cy = y;
while (cy < max_y) : (cy += 1) {
var cx = x;
while (cx < max_x) : (cx += 1) {
const in_pixel = image_stream.nextPixel() orelse return;
const pix = color(in_pixel.r, in_pixel.g, in_pixel.b);
self.pixel(cx, cy, pix);
}
var consume: usize = 0;
while (consume < cut_width) : (consume += 1)
_ = image_stream.nextPixel(); // TODO Skip method
}
}
pub fn outStream(self: *Self) OutStream {
@ -67,16 +107,7 @@ pub const Framebuffer = struct {
};
}
pub fn rectangle(self: Self, x1: u32, y1: u32, x2: u32, y2: u32, pix: Pixel) void {
var cy = y1;
while (cy <= y2) : (cy += 1) {
var cx = x1;
while (cx <= x2) : (cx += 1)
self.pixel(cx, cy, pix);
}
}
fn put(self: *Self, c: u8) void {
pub fn put(self: *Self, c: u8) void {
const width = comptime if (use_psf2) _binary_font_psf_start.width else 8;
const height = _binary_font_psf_start.height;
@ -86,6 +117,7 @@ pub const Framebuffer = struct {
},
'\n' => {
self.x = self.width;
self.rectangle(0, (self.y + height) % self.height, self.width - 1, (self.y + 2 * height) % self.height, self.background);
},
else => {
const glyph_start: u32 = comptime if (use_psf2)
@ -121,19 +153,28 @@ pub const Framebuffer = struct {
}
}
fn write(self: *Self, text: []const u8) void {
pub fn rectangle(self: Self, x1: u32, y1: u32, x2: u32, y2: u32, pix: Pixel) void {
var cy = y1;
while (cy <= y2) : (cy += 1) {
var cx = x1;
while (cx <= x2) : (cx += 1)
self.pixel(cx, cy, pix);
}
}
pub fn write(self: *Self, text: []const u8) void {
for (text) |c, i| {
self.put(c);
}
}
const OutStream = struct {
pub const OutStream = struct {
pub const Stream = std.io.OutStream(types.NoError);
fb: *Self,
stream: Stream,
fn writeFn(out_stream: *Stream, string: []const u8) error{}!void {
fn writeFn(out_stream: *Stream, string: []const u8) types.NoError!void {
const self = @fieldParentPtr(OutStream, "stream", out_stream);
self.fb.write(string);
}

View File

@ -1,6 +1,23 @@
const std = @import("std");
const OutStream = std.io.OutStream;
const cpu = @import("cpu.zig");
const framebuffer_mod = @import("framebuffer.zig");
const logger = @import("logger.zig");
const memory = @import("memory.zig");
const types = @import("types.zig");
pub var logger: ?*OutStream(types.NoError) = null;
pub var cpu_capabilities: cpu.Capabilities = cpu.Capabilities{};
pub var initrd: ?[]const u8 = null;
pub var framebuffer: ?framebuffer_mod.Framebuffer = null;
pub var framebuffer_stream: ?framebuffer_mod.Framebuffer.OutStream = null;
pub var log_target: ?*OutStream(types.NoError) = null;
pub var log_stream: *OutStream(types.NoError) = &logger.log_stream;
pub var kernel_memory: ?memory.VirtualMemory = null;
pub var physical_memory: ?memory.PhysicalMemory = null;
pub var bp_stack: ?[]u8 = null;

View File

@ -2,77 +2,42 @@ const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const apic = @import("apic.zig");
const bootboot = @import("bootboot.zig");
const cpio = @import("formats/cpio.zig");
const cpu = @import("cpu.zig");
const framebuffer = @import("framebuffer.zig");
const global = @import("global.zig");
const logger = @import("logger.zig");
const memory = @import("memory.zig");
const log = logger.log;
const microcode = @import("microcode.zig");
const msr = @import("msr.zig");
const paging = @import("paging.zig");
const types = @import("types.zig");
const log = logger.log;
pub fn main() noreturn {
if (!apic.bootstrapCpuCore()) {
while (true) {
cpu.hlt();
}
}
// TODO Possible bootboot bug
const fb_size = if (bootboot.bootboot.fb_size != 0) bootboot.bootboot.fb_size else bootboot.bootboot.fb_width * bootboot.bootboot.fb_height * 4;
var fb = framebuffer.Framebuffer.init(
@ptrCast([*]volatile u8, &bootboot.fb)[0..fb_size],
bootboot.bootboot.fb_width,
bootboot.bootboot.fb_height,
);
fb.clear();
var fb_stream = fb.outStream();
global.logger = &fb_stream.stream;
log(.Info, "Entering Xenon ({} {} {})\r\n", .{ builtin.arch, builtin.mode, builtin.code_model });
log(.Info, "Entering Xenon ({} {} {})", .{ builtin.arch, builtin.mode, builtin.code_model });
{ // CPU information
log(.Info, "CPU vendor {}\r\n", .{cpu.CpuId.vendor()});
log(.Info, "CPU vendor {}", .{cpu.CpuId.vendor()});
const sign = cpu.CpuId.signature();
log(.Info, "CPU family {x}h model {} stepping {} - {x}\r\n", .{ sign.family(), sign.model(), sign.stepping(), sign.value });
log(.Info, "CPU microcode patchlevel {x}\r\n", .{cpu.rdmsr(msr.ucode_amd_patchlevel)});
log(.Info, "CPU {}", .{sign});
log(.Info, "CPU microcode patchlevel {x}", .{microcode.patchlevel()});
}
var initrd = cpio.Cpio.init(@intToPtr([*]const u8, bootboot.bootboot.initrd_ptr)[0..bootboot.bootboot.initrd_size]);
log(.Info, "Updated microcode patchlevel? {} -> {x}\r\n", .{ microcode.update(initrd), cpu.rdmsr(msr.ucode_amd_patchlevel) });
var initrd = cpio.Cpio.init(global.initrd.?);
if (microcode.update_cpio(initrd)) {
log(.Info, "Updated microcode to patchlevel {x}", .{microcode.patchlevel()});
const page_table = paging.PageMapLevel4.fromCr3(cpu.register.cr3());
log(.Info, "{*}\r\n", .{page_table});
log(.Info, "Kernel main at Virtual({x}) {}\r\n", .{
@ptrToInt(main),
page_table.toPhysical(@TypeOf(main), types.Virtual(main)),
});
log(.Info, "Bootboot located at Virtual({x}) Physical({*})\r\n", .{
@ptrToInt(&bootboot.bootboot),
(page_table.toPhysical(@TypeOf(&bootboot.bootboot), types.Virtual(&bootboot.bootboot)) orelse @panic("Kernel code is not mapped?!")).ptr,
});
assert(std.mem.eql(u8, &bootboot.bootboot.magic, "BOOT"));
const mmap = bootboot.memory_map();
log(.Info, "Memory map contains {} entries\r\n", .{mmap.ptr.len});
log(.Info, "Initrd files:\r\n", .{});
while (initrd.nextFile()) |file| {
log(.Info, "{}\r\n", .{(file catch continue).name()});
// Update capabilities after microcode update
global.cpu_capabilities.initialize();
}
// TODO Requires > 4KiB stack memory
// print("Instruction support:\r\n{}", .{cpu.CpuId.instructionSupport()});
log(.Info, "CPU capabilities: {}", .{global.cpu_capabilities});
@panic("Kernel is about to return");
}
pub fn main_ap() noreturn {
while (true) {
cpu.hlt();
}
@panic("AP is about to return");
}

26
src/lock.zig Normal file
View File

@ -0,0 +1,26 @@
pub const SpinLock = struct {
const Self = @This();
lock: u8,
pub fn init() Self {
return .{
.lock = 0,
};
}
pub fn acquire(self: *Self) void {
while (@atomicRmw(u8, &self.lock, .Xchg, 1, .Acquire) != 0) {
asm volatile ("pause");
}
}
pub fn release(self: *Self) void {
@atomicStore(u8, &self.lock, 0, .Release);
}
pub fn wait(self: *const Self) void {
while (@atomicLoad(u8, &self.lock, .Acquire) != 0) {
asm volatile ("pause");
}
}
};

View File

@ -3,18 +3,44 @@ const std = @import("std");
const fb = @import("framebuffer.zig");
const global = @import("global.zig");
const lock = @import("lock.zig");
const types = @import("types.zig");
const enable_verbose = false;
pub const LogLevel = enum {
Verbose,
Debug,
Info,
Warning,
Error,
Writethrough,
};
var log_lock: lock.SpinLock = lock.SpinLock.init();
pub fn log(comptime loglevel: LogLevel, comptime fmt: []const u8, args: var) void {
if (loglevel != .Debug or builtin.mode == .Debug) {
if (global.logger) |logger| {
logger.print("[{}] ", .{@tagName(loglevel)}) catch unreachable;
if ((loglevel != .Debug or builtin.mode == .Debug) and
(loglevel != .Verbose or enable_verbose))
{
if (global.log_target) |logger| {
log_lock.acquire();
defer log_lock.release();
if (loglevel != .Writethrough)
logger.print("{}|", .{@tagName(loglevel)[0..1]}) catch unreachable;
logger.print(fmt, args) catch unreachable;
if (loglevel != .Writethrough)
logger.write("\r\n") catch unreachable;
}
}
}
pub var log_stream = std.io.OutStream(types.NoError){
.writeFn = logStreamWrite,
};
fn logStreamWrite(out_stream: *std.io.OutStream(types.NoError), string: []const u8) types.NoError!void {
log(.Writethrough, "{s}", .{string});
}

View File

@ -1,90 +1,478 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const global = @import("global.zig");
const log = @import("logger.zig").log;
const MemoryMapEntry = @import("bootboot.zig").MemoryMapEntry;
const paging = @import("paging.zig");
const types = @import("types.zig");
/// Manager of physical memory
/// Methods other than init
/// may only be called in identity mapped memory
pub const PhysicalMemory = struct {
const Self = @This();
const List = std.SinglyLinkedList(NodeState);
const Node = List.Node;
memory_map: []align(1) const MemoryMapEntry,
map_index: usize = 0,
byte_index: usize = 0,
const node_size: usize = @sizeOf(Node);
const node_align: u29 = @alignOf(Node);
static_memory_map: []align(1) const MemoryMapEntry,
static_index: usize = 0,
known_available: usize = 0,
known_used: usize = 0,
memory_map: List,
pub fn init(static_memory_map: []align(1) const MemoryMapEntry) Self {
var kernel_memory_map = List.init();
var last_node: ?*Node = null;
pub fn init(memory_map: types.VirtualAddress([]align(1) const MemoryMapEntry)) Self {
return .{
.memory_map = memory_map.ptr,
.memory_map = kernel_memory_map,
.static_memory_map = static_memory_map,
};
}
pub fn alloc(self: *Self, size: usize, comptime alignment: usize) MemoryError!types.PhysicalAddress([]allowzero align(alignment) u8) {
var segment = self.memory_map[self.map_index];
var segment_size = self.memory_map[self.map_index].size();
while (self.byte_index >= segment_size or segment_size - self.byte_index < size) {
try self.nextFreePage();
segment = self.memory_map[self.map_index];
segment_size = segment.size();
/// Allocates physical memory
/// Returned slice is not usable unless the region it points
/// to is identity mapped
/// Returned memory is zeroed out
pub fn alloc(self: *Self, size: usize, alignment: u29) Allocator.Error![]u8 {
log(.Verbose, "Allocating {} bytes of memory", .{size});
var previous_node: ?*Node = null; // For deletion
var current_node = self.memory_map.first;
var base_address: usize = 0;
var aligned_address: usize = 0;
var lost_size: usize = 0;
while (true) {
if (current_node) |node| {
base_address = @ptrToInt(&node.data.slice[0]) + node_size;
aligned_address = forwardAlign(base_address, alignment);
lost_size = aligned_address - base_address;
if (node.data.slice.len >= size + lost_size + node_size and !node.data.used)
break;
previous_node = current_node;
current_node = node.next;
} else {
// Attempt to claim a new region from the static memory map
if (self.nextFreeRegion()) |region| {
if (createNode(region)) |new_node| {
self.known_available += new_node.data.slice.len;
if (previous_node) |prev_node| {
prev_node.insertAfter(new_node);
} else {
self.memory_map.first = new_node;
}
current_node = new_node;
} else {
return Allocator.Error.OutOfMemory;
}
} else {
return Allocator.Error.OutOfMemory;
}
}
}
const base_address = @ptrToInt(&segment.ptr().ptr[0]);
const start = (base_address + self.byte_index + (alignment - 1) & ~(alignment - 1));
const ret = types.Physical(@intToPtr([*]allowzero align(alignment) u8, start)[0..size]);
const slice = @intToPtr([*]u8, aligned_address)[0..size];
self.byte_index = start - base_address + size;
self.updateNode(previous_node, current_node.?, lost_size + size);
return ret;
std.mem.secureZero(u8, slice);
return slice;
}
pub fn free(self: *Self, slice: types.PhysicalAddress([]u8)) void {
// TODO Proper memory management instead of discard
pub fn mark(self: *Self, slice: []allowzero const u8) void {
// TODO
}
fn nextFreePage(self: *Self) MemoryError!void {
self.byte_index = 0;
pub fn free(self: *Self, slice: []allowzero u8) void {
log(.Verbose, "Freeing {} bytes of memory", .{slice.len});
self.map_index += 1;
while (self.memory_map[self.map_index].usage() != .Free) {
self.map_index += 1;
if (self.map_index >= self.memory_map.len)
return MemoryError.OutOfMemory;
// Because we are freeing there must be at least one memory region
var previous_node: ?*Node = null;
var node = self.memory_map.first orelse @panic("Physical memory manager lost track of some memory");
const slice_start = @ptrToInt(&slice[0]);
var node_start = @ptrToInt(node);
while (!(node_start <= slice_start and
slice_start < node_start + node.data.slice.len))
{
previous_node = node;
node = node.next orelse @panic("Physical memory manager lost track of some memory");
node_start = @ptrToInt(node);
}
self.updateNode(previous_node, node, 0);
}
/// Creates a new node managing region
/// When the return value is null the region
/// has been rejected and is unmodified
fn createNode(region: []u8) ?*Node {
if (region.len < node_size)
return null;
const entry_start = @ptrToInt(&region[0]);
// Trade a few bytes for access speed
const region_start = forwardAlign(entry_start, node_align);
const new_node = @intToPtr(*Node, region_start);
const new_region = @intToPtr([*]u8, region_start)[0 .. region.len - (region_start - entry_start)];
new_node.* = Node.init(.{
.used = false,
.slice = new_region,
});
return new_node;
}
/// Merges, splits and updates the memory_map's Nodes
/// to represent the new memory usage of node.
/// previous_node_opt must manage the highest free region
/// smaller than that of node.
/// A usage value of 0 indicates freeing the node
/// any other value marks usage bytes used.
fn updateNode(
self: *Self,
previous_node_opt: ?*Node,
node: *Node,
usage: usize,
) void {
if (previous_node_opt) |previous_node| {
assert(previous_node.next == node);
} else {
assert(self.memory_map.first == node);
}
if (usage == 0) {
// This node has been free'd
assert(node.data.used);
node.data.used = false;
// TODO First check whether we can merge with the followup region
// Now try merging with previous region
if (previous_node_opt) |previous_node| {
const previous_start = @ptrToInt(previous_node);
if (!previous_node.data.used and
previous_start + previous_node.data.slice.len ==
@ptrToInt(node))
{
previous_node.data.slice = @intToPtr(
[*]u8,
previous_start,
)[0 .. previous_node.data.slice.len + node.data.slice.len];
_ = previous_node.removeNext();
}
}
self.known_available += node.data.slice.len;
self.known_used -= node.data.slice.len;
} else {
// This node is going to be used
assert(!node.data.used);
node.data.used = true;
const remaining_free_size = node.data.slice.len - usage - node_size;
if (remaining_free_size < node_size) {
// Remaining size is too small for a Node,
// just loose the space
// TODO Try merging the next node
} else {
const node_start = @ptrToInt(node);
// Create a new node managing the remaining free space
const new_region_start = forwardAlign(node_start + node_size + usage, node_align);
const new_region = @intToPtr([*]u8, new_region_start)[0..remaining_free_size];
if (createNode(new_region)) |new_node| {
node.insertAfter(new_node);
} else {
@panic("Memory node creation failed despite size check");
}
// Shrink current node to its new size
node.data.slice = node.data.slice[0 .. new_region_start - node_start];
}
self.known_available -= node.data.slice.len;
self.known_used += node.data.slice.len;
}
}
fn nextFreeRegion(self: *Self) ?[]u8 {
if (self.static_index >= self.static_memory_map.len)
return null;
var entry = self.static_memory_map[self.static_index];
while (entry.usage() != .Free or @ptrToInt(&entry.ptr()[0]) == 0) {
self.static_index += 1;
if (self.static_index >= self.static_memory_map.len)
return null;
entry = self.static_memory_map[self.static_index];
}
self.static_index += 1;
return entry.ptr()[0..entry.size()];
}
pub const NodeState = struct {
used: bool,
slice: []u8,
};
};
/// Manager of a virtual memory map
/// Methods other than init and activate
/// may only be called in identity mapped memory
pub const VirtualMemory = struct {
const Self = @This();
physical_memory: *PhysicalMemory,
allocator: Allocator,
page_table: *paging.PageMapLevel4,
base_address: usize,
offset: usize = 0,
next_page: usize,
read_slice: []const u8,
write_slice: []u8,
pub fn init() Self {}
pub fn init(page_table: *paging.PageMapLevel4, base_address: usize) Self {
return .{
.page_table = page_table,
.base_address = base_address,
.allocator = .{
.reallocFn = kernel_realloc,
.shrinkFn = kernel_shrink,
},
};
}
/// Destroy the memory map
pub fn deinit(self: *Self) void {}
/// Activate this virtual map
pub fn activate(self: *const Self) void {}
/// Activate this memory map
/// Make sure stack and code are mapped properly
/// before calling this
pub fn activate(self: *const Self) void {
cpu.register.cr3Set(@ptrToInt(self.page_table));
}
/// Allocate read only memory
/// Allocate memory with the provided attributes
/// Returned slice is only valid when memory map is activated
pub fn alloc_read(self: *Self, size: usize, alignment: usize) MemoryError!types.VirtualAddress([]u8) {}
pub fn alloc(self: *Self, comptime attributes: Attributes, size: usize, alignment: u29) Allocator.Error![]u8 {
if (size == 0)
return &[0]u8{};
/// Allocate writeable memory
/// Returned slice is only valid when memory map is activated
pub fn alloc_write(self: *Self, size: usize, alignment: usize) MemoryError!types.VirtualAddress([]u8) {}
// Mapped pages must be multiple of page size,
// so for now extended all sizes
const used_size = forwardAlign(size, 4096);
/// Free memory received from a alloc_* call
pub fn free(self: *Self, slice: types.VirtualAddress([]u8)) void {
// Alignment must be at least page size,
// so for now extend it
const used_alignment = std.math.max(4096, alignment);
const physical_slice = try global.physical_memory.?.alloc(used_size, used_alignment);
errdefer global.physical_memory.?.free(physical_slice);
const virtual_base = forwardAlign(self.base_address + self.offset, used_alignment);
self.offset = virtual_base - self.base_address + used_size;
try self.map(
attributes,
@intToPtr([*]align(4096) u8, virtual_base)[0..used_size],
@intToPtr([*]align(4096) u8, @ptrToInt(&physical_slice[0]))[0..physical_slice.len],
);
return @intToPtr([*]u8, virtual_base)[0..size];
}
/// Map addresses of virtual slice to physical slice,
/// enforcing the given attributes
pub fn map(self: *Self, comptime attributes: Attributes, virtual_slice: []align(4096) const u8, physical_slice: []align(4096) u8) Allocator.Error!void {
if (physical_slice.len < virtual_slice.len)
@panic("Invalid memory mapping attempt: Tried to map virtual range to smaller physical one");
if (virtual_slice.len % 4096 != 0)
@panic("Invalid memory mapping attempt: Virtual slice is not a multiple of page size");
if (attributes.user_access and attributes.writeable and attributes.executable)
log(.Warning, "[memory] About to map memory as uwx", .{});
if (false and virtual_slice.len % (1024 * 1024 * 1024) == 0) {
// TODO 1 GiB pages
} else if (false and virtual_slice.len % (2 * 1024 * 1024) == 0) {
// TODO 2 MiB pages
} else {
// 4 KiB pages
var offset: usize = 0;
while (offset < virtual_slice.len) : (offset += 4096) {
const current_page_ptr = &virtual_slice[offset];
var pml4e = self.page_table.getEntry(current_page_ptr);
if (!pml4e.present()) {
log(.Verbose, "[memory] Creating new PDP", .{});
const pdp_memory = try global.physical_memory.?.alloc(@sizeOf(paging.PageDirectoryPointer), 4096);
errdefer global.physical_memory.free(pdp_memory);
std.mem.secureZero(u8, pdp_memory);
const pdp = @ptrCast(*paging.PageDirectoryPointer, &pdp_memory[0]);
pml4e.read_writeSet(false);
pml4e.user_supervisorSet(false);
if (global.cpu_capabilities.no_execute)
pml4e.no_executeSet(true);
pml4e.ptrSet(pdp);
}
const pdp = pml4e.ptr();
const pdpe = pdp.getEntry(current_page_ptr);
if (!pdpe.present()) {
log(.Verbose, "[memory] Creating new PD", .{});
const pd_memory = try global.physical_memory.?.alloc(@sizeOf(paging.PageDirectory), 4096);
errdefer global.physical_memory.free(pd_memory);
std.mem.secureZero(u8, pd_memory);
const pd = @ptrCast(*paging.PageDirectory, &pd_memory[0]);
pdpe.read_writeSet(false);
pdpe.user_supervisorSet(false);
if (global.cpu_capabilities.no_execute)
pdpe.no_executeSet(true);
pdpe.ptrSet(pd);
}
const pd = pdpe.ptr();
const pde = pd.getEntry(current_page_ptr);
if (!pde.present()) {
log(.Verbose, "[memory] Creating new PT", .{});
const pt_memory = try global.physical_memory.?.alloc(@sizeOf(paging.PageTable), 4096);
errdefer global.physical_memory.free(pt_memory);
std.mem.secureZero(u8, pt_memory);
const pt = @ptrCast(*paging.PageTable, &pt_memory[0]);
pde.read_writeSet(false);
pde.user_supervisorSet(false);
if (global.cpu_capabilities.no_execute)
pde.no_executeSet(true);
pde.ptrSet(pt);
}
const pt = pde.ptr();
const pte = pt.getEntry(current_page_ptr);
if (!pte.present()) {
const page = physical_slice[offset .. offset + 4096];
pte.read_writeSet(false);
pte.user_supervisorSet(false);
if (global.cpu_capabilities.no_execute)
pte.no_executeSet(true);
pte.ptrSet(page);
} else {
@panic("[memory] Mapping already mapped memory");
}
// Reduce enforcement of the tables
// The lowest entries will restore these as needed
if (attributes.writeable) {
pml4e.read_writeSet(true);
pdpe.read_writeSet(true);
pde.read_writeSet(true);
pte.read_writeSet(true);
}
if (attributes.user_access) {
pml4e.user_supervisorSet(true);
pdpe.user_supervisorSet(true);
pde.user_supervisorSet(true);
pte.user_supervisorSet(true);
}
if (attributes.executable) {
pml4e.no_executeSet(false);
pdpe.no_executeSet(false);
pde.no_executeSet(false);
pte.no_executeSet(false);
}
pml4e.presentSet(true);
pdpe.presentSet(true);
pde.presentSet(true);
pte.presentSet(true);
}
}
}
/// Free memory received from an alloc call
pub fn free(self: *Self, slice: []u8) void {
// TODO Proper memory management instead of discard
const physical_slice = @ptrCast([*]u8, self.page_table.toPhysical(&slice[0]))[0..slice.len];
global.physical_memory.?.free(physical_slice);
}
fn alloc_page(self: *Self, writeable: bool) MemoryError!types.PhysicalAddress([]u8) {
const page = self.physical_memory.alloc(4096, 4096);
fn kernel_realloc(
self: *Allocator,
old_mem: []u8,
old_alignment: u29,
new_byte_count: usize,
new_alignment: u29,
) Allocator.Error![]u8 {
const virtual = @fieldParentPtr(Self, "allocator", self);
if (old_mem.len == 0)
return virtual.alloc(.{ .writeable = true }, new_byte_count, new_alignment);
if (old_mem.len < new_byte_count) {
const new_memory = try virtual.alloc(.{ .writeable = true }, new_byte_count, new_alignment);
std.mem.copy(u8, new_memory, old_mem);
virtual.free(old_mem);
return new_memory;
}
return Allocator.Error.OutOfMemory;
}
fn kernel_shrink(
self: *Allocator,
old_mem: []u8,
old_alignment: u29,
new_byte_count: usize,
new_alignment: u29,
) []u8 {
const virtual = @fieldParentPtr(Self, "allocator", self);
if (new_byte_count == 0) {
virtual.free(old_mem);
return &[0]u8{};
}
return old_mem[0..new_byte_count];
}
const Attributes = struct {
writeable: bool = false,
user_access: bool = false,
executable: bool = false,
};
};
const MemoryError = error{OutOfMemory};
inline fn forwardAlign(base: usize, alignment: usize) usize {
return base + (alignment - 1) & ~(alignment - 1);
}

View File

@ -3,39 +3,107 @@ const std = @import("std");
const amd = @import("microcode/amd.zig");
const cpu = @import("cpu.zig");
const cpio = @import("formats/cpio.zig");
const debug = @import("debug.zig");
const intel = @import("microcode/intel.zig");
const log = @import("logger.zig").log;
const msr = @import("msr.zig");
const amd_prefix = "sys/microcode/microcode_amd_fam";
pub fn update(initrd: cpio.Cpio) bool {
var initrd_w = initrd;
/// Base path to microcode updates
const microcode_path = "sys/microcode/";
const amd_vendor_string = "AuthenticAMD";
const intel_vendor_string = "GenuineIntel";
const log_update_found = "[microcode] Found microcode update file";
const panic_filename_overflow = "Microcode filename buffer overflow";
/// Returns the patchlevel of the current logical processor,
/// if known
pub fn patchlevel() ?u64 {
const vendor = cpu.CpuId.vendor();
if (std.mem.eql(u8, &vendor, "AuthenticAMD")) {
const sign = cpu.CpuId.signature();
while (initrd_w.nextFile()) |err_file| {
const file = err_file catch continue;
const filename = file.name() catch continue;
return if (std.mem.eql(u8, &vendor, amd_vendor_string))
cpu.rdmsr(msr.ucode_amd_patchlevel)
else
cpu.rdmsr(msr.ia32_bios_sign_id) >> 32;
}
if (filename.len < amd_prefix.len + 2)
continue;
/// Update the microcode of the current logical processor (manifacturer dependent)
/// Returns true if the update was successful
/// This function should be run sequentially on each logical core
/// to ensure the updates are fully applied
pub fn update(context: var, fileFn: fn (@TypeOf(context), []const u8) ?[]const u8) bool {
var filename_buffer = [_]u8{0} ** 64;
if (!std.mem.eql(u8, filename[0..amd_prefix.len], amd_prefix))
continue;
const sign = cpu.CpuId.signature();
const vendor = cpu.CpuId.vendor();
if (!(std.fmt.parseInt(u16, filename[amd_prefix.len .. amd_prefix.len + 2], 16) catch 0 == sign.family()))
continue;
if (std.mem.eql(u8, &vendor, amd_vendor_string)) {
const family = sign.family();
const filename = std.fmt.bufPrint(&filename_buffer, microcode_path ++ "amd/microcode_amd_fam{0x:0>2}h.bin", .{family}) catch @panic(panic_filename_overflow);
debug.print("[MICROCODE] Found compatible microcode data: {}\r\n", .{filename});
const microcode = if (fileFn(context, filename)) |mc|
mc
else if (fileFn(context, microcode_path ++ "amd/microcode_amd.bin")) |mc|
mc
else
return false;
if (amd.apply(file.data() catch return false) catch return false) {
return true;
}
log(.Debug, log_update_found, .{});
break;
if (amd.apply(microcode)) |applied| {
return applied;
} else |err| {
log(.Debug, "[microcode] Microcode update failed: {}", .{err});
return false;
}
} else if (std.mem.eql(u8, &vendor, intel_vendor_string)) {
const filename = std.fmt.bufPrint(&filename_buffer, microcode_path ++ "intel/{0x:0>2}-{1x:0>2}-{2x:0>2}", .{ sign.baseFamily(), sign.baseModel(), sign.stepping() }) catch @panic(panic_filename_overflow);
const microcode = if (fileFn(context, filename)) |mc|
mc
else
return false;
log(.Debug, log_update_found, .{});
const microcode_align = if (@ptrToInt(&microcode[0]) & 0b1111 == 0) bl: {
break :bl @intToPtr([*]align(16) u8, @ptrToInt(&microcode[0]))[0..microcode.len];
} else {
log(.Warning, "[microcode] Update failed due to misaligned data", .{});
return false;
};
if (intel.apply(microcode_align)) |applied| {
return applied;
} else |err| {
log(.Debug, "[microcode] Microcode update failed: {}", .{err});
return false;
}
}
return false;
}
pub fn update_cpio(disk: cpio.Cpio) bool {
const fileFn = struct {
pub fn fileFn(context: cpio.Cpio, filename: []const u8) ?[]const u8 {
var cpio_context = context;
if (cpio_context.findFile(filename)) |err_file| {
const file = err_file catch return null;
return file.data() catch return null;
}
return null;
}
}.fileFn;
return update(disk, fileFn);
}
pub const MicrocodeError = error{
EndOfData,
Invalid,
ChecksumMismatch,
Incompatible,
};

View File

@ -7,9 +7,11 @@ const assert = std.debug.assert;
const eql = std.mem.eql;
const cpu = @import("../cpu.zig");
const debug = @import("../debug.zig");
const log = @import("../logger.zig").log;
const msr = @import("../msr.zig");
const MicrocodeError = @import("../microcode.zig").MicrocodeError;
// TODO Test
pub fn apply(microcode: []const u8) MicrocodeError!bool {
var mc = try Microcode.init(microcode);
@ -30,12 +32,8 @@ pub fn apply(microcode: []const u8) MicrocodeError!bool {
// Find applicable patch
while (try mc.nextPatch()) |patch| {
if (patch.header.processor_rev_id == rev_id) {
const patchlevel = cpu.rdmsr(msr.ucode_amd_patchlevel);
if (patch.header.patch_id > patchlevel) {
debug.print("[MICROCODE] Found compatible patch: {x}\r\n", .{patch.header.patch_id});
cpu.wrmsr(msr.ucode_amd_patchloader, @ptrToInt(&patch) + @sizeOf(PatchHeader));
}
log(.Debug, "[microcode/amd] Compatible microcode update found: {x}", .{patch.header.patch_id});
cpu.wrmsr(msr.ucode_amd_patchloader, @ptrToInt(&patch) + @sizeOf(PatchHeader));
return true;
}
@ -122,12 +120,6 @@ const Microcode = struct {
}
};
const MicrocodeError = error{
EndOfData,
Invalid,
ChecksumMismatch,
};
const CpuEquivalenceTable = packed struct {
processor_signature: u32,
errata_mask: u32,

145
src/microcode/intel.zig Normal file
View File

@ -0,0 +1,145 @@
const std = @import("std");
const assert = std.debug.assert;
const cpu = @import("../cpu.zig");
const log = @import("../logger.zig").log;
const MicrocodeError = @import("../microcode.zig").MicrocodeError;
const msr = @import("../msr.zig");
// TODO Test
pub fn apply(microcode: []align(16) const u8) MicrocodeError!bool {
var mc = try Microcode.init(microcode);
const sign = cpu.CpuId.signature();
const plat_id = cpu.rdmsr(msr.ia32_platform_id);
if (mc.compatible(sign, plat_id)) {
if (mc.header.loader_revision != 0x00000001)
return MicrocodeError.Incompatible;
log(.Debug, "[microcode/intel] Compatible microcode update found: {x}", .{mc.header.update_revision});
cpu.wrmsr(msr.ia32_bios_updt_trig, @ptrToInt(&mc.microcode[0]));
}
return false;
}
pub fn parse(microcode: []const u8) MicrocodeError!Microcode {
return try Microcode.init(microcode);
}
const Microcode = struct {
const Self = @This();
header: *align(16) const MicrocodeUpdateHeader,
microcode: []align(16) const u8,
processor_signatures: ?[]const ProcessorSignature,
pub fn init(microcode: []align(16) const u8) MicrocodeError!Self {
if (microcode.len < @sizeOf(MicrocodeUpdateHeader))
return MicrocodeError.EndOfData;
const header = @ptrCast(*align(16) const MicrocodeUpdateHeader, &microcode[0]);
if (header.header_version != 0x00000001)
return MicrocodeError.Incompatible;
if (header.total_size > microcode.len)
return MicrocodeError.EndOfData;
{
var checksum: u32 = 0;
for (@bytesToSlice(u32, microcode)) |v|
checksum +%= v;
if (checksum != 0)
return MicrocodeError.ChecksumMismatch;
}
const data_size = if (header.data_size == 0) 2000 else header.data_size;
const microcode_end = @sizeOf(MicrocodeUpdateHeader) + data_size;
var processor_signatures: ?[]const ProcessorSignature = null;
if (header.total_size > microcode_end) {
// A extended processor signature table is present
const signature_table_header = @ptrCast(*align(1) const ExtendedProcessorSignatureTableHeader, &microcode[microcode_end]);
var checksum: u32 = 0;
for (@bytesToSlice(u32, microcode[microcode_end..])) |v|
checksum +%= v;
if (checksum != 0)
return MicrocodeError.ChecksumMismatch;
const table_start = microcode_end + @sizeOf(ExtendedProcessorSignatureTableHeader);
const table_end = table_start + @sizeOf(ProcessorSignature) * signature_table_header.count;
processor_signatures = @bytesToSlice(ProcessorSignature, microcode[table_start..table_end]);
}
return Self{
.header = header,
.microcode = microcode[@sizeOf(MicrocodeUpdateHeader)..microcode_end],
.processor_signatures = processor_signatures,
};
}
pub fn compatible(self: *const Self, signature: cpu.CpuId.Signature, platform_id: u64) bool {
const flag = @intCast(u3, platform_id >> 50 & 0b111);
if (signature.value == self.header.processor_signature) {
return self.header.processor_flags & @as(u32, 1) << flag != 0;
}
if (self.processor_signatures) |signatures| {
for (signatures) |extra_sig| {
if (signature.value == extra_sig.processor_signature)
return extra_sig.processor_flags & @as(u32, 1) << flag != 0;
}
}
return false;
}
};
const MicrocodeUpdateHeader = packed struct {
header_version: u32,
update_revision: u32,
date: u32,
processor_signature: u32,
checksum: u32,
loader_revision: u32,
processor_flags: u8,
_reserved1: u8, // Not a u24 or [3]u8 due to compiler bug
_reserved2: u8,
_reserved3: u8,
data_size: u32,
total_size: u32,
_reserved4: u32, // Not a [3]u32 due to compiler bug
_reserved5: u32,
_reserved6: u32,
comptime {
assert(@sizeOf(@This()) == 48);
}
};
const ExtendedProcessorSignatureTableHeader = packed struct {
count: u32,
checksum: u32,
_reserved1: u32, // Not a [3]u32 due to compiler bug
_reserved2: u32,
_reserved3: u32,
comptime {
assert(@sizeOf(@This()) == 20);
}
};
const ProcessorSignature = packed struct {
processor_signature: u32,
processor_flags: u32,
checksum: u32,
comptime {
assert(@sizeOf(@This()) == 12);
}
};

View File

@ -1,3 +1,7 @@
pub const apic_base_address: u64 = 0x0000001b;
pub const ia32_platform_id: u32 = 0x00000017;
pub const apic_base_address: u32 = 0x0000001b;
pub const ia32_bios_updt_trig: u32 = 0x00000079;
pub const ia32_bios_sign_id: u32 = 0x0000008b;
pub const ucode_amd_patchlevel: u32 = 0x0000008b;
pub const ia32_efer: u32 = 0xc0000080;
pub const ucode_amd_patchloader: u32 = 0xc0010020;

View File

@ -1,4 +1,92 @@
const std = @import("std");
pub usingnamespace @import("paging/pml4.zig");
pub usingnamespace @import("paging/pdp.zig");
pub usingnamespace @import("paging/pd.zig");
pub usingnamespace @import("paging/pt.zig");
const global = @import("global.zig");
const log = @import("logger.zig").log;
/// Allow access to up to pml4_entries * 0.5 TiB physical address space
/// using pml4_entries * 2 MiB memory
const pml4_entries: usize = 2;
/// Initializes paging on the processor
/// Currently requires the system to already use paging,
/// the memory map is only configured to be like expected.
/// NOTE All allocations from global.physical_memory need to already be usable
pub fn initialize(page_table: *PageMapLevel4) void {
var existing_map_warning: bool = true;
// Identity map the lower half of virtual address space to physical memory
if (!global.cpu_capabilities.gigabyte_pages)
@panic("CPU not supported, 1 GiB page support required");
for (page_table.entries[0..pml4_entries]) |*pml4_entry, half_tbytes| {
if (!pml4_entry.present()) {
const pdp_memory = global.physical_memory.?.alloc(@sizeOf(PageDirectoryPointer), 4096) catch @panic("Failed to initialize kernel memory map (allocation failure)");
std.mem.secureZero(u8, pdp_memory);
const pdp = @ptrCast(*PageDirectoryPointer, &pdp_memory[0]);
pml4_entry.ptrSet(pdp);
}
@fence(.SeqCst);
const pdp = pml4_entry.ptr();
for (pdp.entries) |*pdp_entry, gbytes| {
// Create a copy of the original entry so it can be free'd later.
// This is done after remapping to ensure
// that there is no memory hole during cleanup.
const old_pdp_entry = pdp_entry.*;
// Also create a copy of the original entry for modification.
// This is done to ensure the entry is not in an
// inconsistent state during bit manipulation.
// Relevant for when the entries memory is managed by itself.
var new_pdp_entry = pdp_entry.*;
const physical_address = (half_tbytes * 512 + gbytes) * 1024 * 1024;
new_pdp_entry.ptrPsSet(@intToPtr(*allowzero u8, physical_address));
new_pdp_entry.psSet(true);
if (global.cpu_capabilities.no_execute)
new_pdp_entry.no_executeSet(true);
new_pdp_entry.read_writeSet(true);
new_pdp_entry.user_supervisorSet(false);
new_pdp_entry.presentSet(true);
pdp_entry.* = new_pdp_entry;
@fence(.SeqCst);
// TODO The old entries might not be allowed to be free'd
// for some reason
// if (old_pdp_entry.present() and !old_pdp_entry.ps()) {
// if (existing_map_warning) {
// log(.Warning, "[paging] Freeing existing memory mapping", .{});
// existing_map_warning = false;
// }
// const pd = old_pdp_entry.ptr();
// for (pd.entries) |*pd_entry| {
// if (pd_entry.present() and !pd_entry.ps()) {
// const pt = pd_entry.ptr();
// const pt_memory = @ptrCast([*]u8, pt)[0..@sizeOf(@TypeOf(pt.*))];
// std.mem.secureZero(u8, pt_memory);
// global.physical_memory.?.free(pt_memory);
// }
// }
// const pd_memory = @ptrCast([*]u8, pd)[0..@sizeOf(@TypeOf(pd.*))];
// std.mem.secureZero(u8, pd_memory);
// global.physical_memory.?.free(pd_memory);
// }
}
@fence(.SeqCst);
if (global.cpu_capabilities.no_execute)
pml4_entry.no_executeSet(true);
pml4_entry.read_writeSet(true);
pml4_entry.user_supervisorSet(false);
pml4_entry.presentSet(true);
}
}

View File

@ -2,7 +2,21 @@ pub fn PageEntryMixin() type {
return struct {
const present_bit: u64 = 1 << 0;
const read_write_bit: u64 = 1 << 1;
const user_supervisor_bit: u64 = 1 << 2;
const ps_bit: u64 = 1 << 7;
const no_execute_bit: u64 = 1 << 63;
pub inline fn no_execute(self: var) bool {
return self.value & noExecute_bit != 0;
}
pub inline fn no_executeSet(self: var, is_no_execute: bool) void {
if (is_no_execute) {
self.value |= no_execute_bit;
} else {
self.value &= ~no_execute_bit;
}
}
pub inline fn present(self: var) bool {
return self.value & present_bit != 0;
@ -28,7 +42,7 @@ pub fn PageEntryMixin() type {
}
}
pub inline fn read_write(self: var, allow: bool) void {
pub inline fn read_write(self: var) void {
return self.value & read_write_bit != 0;
}
@ -39,5 +53,17 @@ pub fn PageEntryMixin() type {
self.value &= ~read_write_bit;
}
}
pub inline fn user_supervisor(self: var) void {
return self.value & user_supervisor_bit != 0;
}
pub inline fn user_supervisorSet(self: var, allow_user: bool) void {
if (allow_user) {
self.value |= user_supervisor_bit;
} else {
self.value &= ~user_supervisor_bit;
}
}
};
}

View File

@ -1,33 +1,32 @@
const std = @import("std");
const assert = std.debug.assert;
const types = @import("../types.zig");
const PageTable = @import("pt.zig").PageTable;
pub const PageDirectory = extern struct {
pub const PageDirectory = packed struct {
const Self = @This();
entries: [512]Entry_,
pub fn getEntry(self: *Self, comptime T: type, ptr: types.VirtualAddress(T)) *Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntry(self: *Self, ptr: var) *Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 21 & 0x1ff];
}
pub fn getEntryConst(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) *const Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntryConst(self: *const Self, ptr: var) *const Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 21 & 0x1ff];
}
pub fn toPhysical(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) ?types.PhysicalAddress(T) {
const entry = self.getEntryConst(T, ptr);
pub fn toPhysical(self: *const Self, ptr: var) ?@TypeOf(ptr) {
const entry = self.getEntryConst(ptr);
if (!entry.present()) return null;
const address = @ptrToInt(ptr.ptr);
const address = @ptrToInt(ptr);
return if (entry.ps())
types.PhysicalAddress(T).init(@intToPtr(T, @ptrToInt(entry.ptr().ptr) + (address & 0x1fffff)))
@intToPtr(@TypeOf(ptr), entry.pageStart() + (address & 0x1fffff))
else
entry.ptr().ptr.toPhysical(T, ptr);
entry.ptr().toPhysical(ptr);
}
pub const Entry_ = packed struct {
@ -39,9 +38,23 @@ pub const PageDirectory = extern struct {
usingnamespace @import("mixin.zig").PageEntryMixin();
pub inline fn ptr(self: @This()) types.PhysicalAddress(*PageTable) {
const address = self.value & 0xffffffffff000;
return types.PhysicalAddress(*PageTable).init(@intToPtr(*PageTable, address));
const ptr_mask: u64 = 0xffffffffff000;
pub inline fn pageStart(self: @This()) usize {
return self.value & ptr_mask;
}
pub inline fn ptr(self: @This()) *PageTable {
const address = self.value & ptr_mask;
return @intToPtr(*PageTable, address);
}
pub inline fn ptrSet(self: *@This(), new_ptr: *PageTable) void {
self.value = (self.value & ~ptr_mask) | @ptrToInt(new_ptr);
}
pub inline fn ptrPsSet(self: *@This(), new_ptr: *allowzero u8) void {
self.value = (self.value & ~ptr_mask) | @ptrToInt(new_ptr);
}
};
@ -78,9 +91,9 @@ pub const PageDirectory = extern struct {
/// Forbid execution of lower levels
no_execute: bool,
pub inline fn ptr(self: Entry) types.PhysicalAddress(*PageTable) {
pub inline fn ptr(self: Entry) *PageTable {
const address = @bitCast(u64, self) & 0xffffffffff000;
return types.PhysicalAddress(*PageTable).init(@intToPtr(*PageTable, address));
return @intToPtr(*PageTable, address);
}
};
};

View File

@ -1,33 +1,32 @@
const std = @import("std");
const assert = std.debug.assert;
const types = @import("../types.zig");
const PageDirectory = @import("pd.zig").PageDirectory;
pub const PageDirectoryPointer = extern struct {
pub const PageDirectoryPointer = packed struct {
const Self = @This();
entries: [512]Entry_,
pub fn getEntry(self: *Self, comptime T: type, ptr: types.VirtualAddress(T)) *Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntry(self: *Self, ptr: var) *Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 30 & 0x1ff];
}
pub fn getEntryConst(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) *const Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntryConst(self: *const Self, ptr: var) *const Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 30 & 0x1ff];
}
pub fn toPhysical(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) ?types.PhysicalAddress(T) {
const entry = self.getEntryConst(T, ptr);
pub fn toPhysical(self: *const Self, ptr: var) ?@TypeOf(ptr) {
const entry = self.getEntryConst(ptr);
if (!entry.present()) return null;
const address = @ptrToInt(ptr.ptr);
const address = @ptrToInt(ptr);
return if (entry.ps())
types.PhysicalAddress(T).init(@intToPtr(T, @ptrToInt(entry.ptr().ptr) + (address & 0x3fffffff)))
@intToPtr(@TypeOf(ptr), entry.pageStart() + (address & 0x3fffffff))
else
entry.ptr().ptr.toPhysical(T, ptr);
entry.ptr().toPhysical(ptr);
}
pub const Entry_ = packed struct {
@ -39,9 +38,23 @@ pub const PageDirectoryPointer = extern struct {
usingnamespace @import("mixin.zig").PageEntryMixin();
pub inline fn ptr(self: @This()) types.PhysicalAddress(*PageDirectory) {
const address = self.value & 0xffffffffff000;
return types.PhysicalAddress(*PageDirectory).init(@intToPtr(*PageDirectory, address));
const ptr_mask: u64 = 0xffffffffff000;
pub inline fn pageStart(self: @This()) usize {
return self.value & ptr_mask;
}
pub inline fn ptr(self: @This()) *PageDirectory {
const address = self.value & ptr_mask;
return @intToPtr(*PageDirectory, address);
}
pub inline fn ptrSet(self: *@This(), new_ptr: *PageDirectory) void {
self.value = (self.value & ~ptr_mask) | @ptrToInt(new_ptr);
}
pub inline fn ptrPsSet(self: *@This(), new_ptr: *allowzero u8) void {
self.value = (self.value & ~ptr_mask) | @ptrToInt(new_ptr);
}
};
@ -78,9 +91,9 @@ pub const PageDirectoryPointer = extern struct {
/// Forbid execution of lower levels
no_execute: bool,
pub inline fn ptr(self: @This()) types.PhysicalAddress(*PageDirectory) {
pub inline fn ptr(self: @This()) *PageDirectory {
const address = @bitCast(u64, self) & 0xffffffffff000;
return types.PhysicalAddress(*PageDirectory).init(@intToPtr(*PageDirectory, address));
return @intToPtr(*PageDirectory, address);
}
};
};

View File

@ -1,10 +1,9 @@
const std = @import("std");
const assert = std.debug.assert;
const types = @import("../types.zig");
const PageDirectoryPointer = @import("pdp.zig").PageDirectoryPointer;
pub const PageMapLevel4 = extern struct {
pub const PageMapLevel4 = packed struct {
const Self = @This();
entries: [512]Entry_,
@ -13,55 +12,20 @@ pub const PageMapLevel4 = extern struct {
return @intToPtr(*Self, cr3 & 0xffffffffff000);
}
pub fn getEntry(self: *Self, comptime T: type, ptr: types.VirtualAddress(T)) *Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntry(self: *Self, ptr: var) *Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 39 & 0x1ff];
}
pub fn getEntryConst(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) *const Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntryConst(self: *const Self, ptr: var) *const Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 39 & 0x1ff];
}
pub fn isWriteable(self: *Self, comptime T: type, ptr: types.VirtualAddress(T), writeable: bool) bool {
const pml4e = self.getEntry(T, ptr);
if (!pml4e.present())
return false;
if (writeable)
pml4e.read_writeSet(true);
const pdpe = pml4e.ptr().ptr.getEntry(T, ptr);
if (!pdpe.present())
return false;
if (pdpe.ps()) {
pdpe.read_writeSet(writeable);
return true;
}
if (writeable)
pdpe.read_writeSet(true);
const pde = pdpe.ptr().ptr.getEntry(T, ptr);
if (!pde.present())
return false;
if (pde.ps()) {
pde.read_writeSet(writeable);
return true;
}
if (writeable)
pde.read_writeSet(true);
const pte = pde.ptr().ptr.getEntry(T, ptr);
if (!pte.present())
return false;
pte.read_writeSet(writeable);
return true;
}
pub fn toPhysical(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) ?types.PhysicalAddress(T) {
const entry = self.getEntryConst(T, ptr);
pub fn toPhysical(self: *const Self, ptr: var) ?@TypeOf(ptr) {
const entry = self.getEntryConst(ptr);
if (!entry.present()) return null;
return entry.ptr().ptr.toPhysical(T, ptr);
return entry.ptr().toPhysical(ptr);
}
pub const Entry_ = packed struct {
@ -73,9 +37,15 @@ pub const PageMapLevel4 = extern struct {
usingnamespace @import("mixin.zig").PageEntryMixin();
pub inline fn ptr(self: @This()) types.PhysicalAddress(*PageDirectoryPointer) {
const address = self.value & 0xffffffffff000;
return types.PhysicalAddress(*PageDirectoryPointer).init(@intToPtr(*PageDirectoryPointer, address));
const ptr_mask: u64 = 0xffffffffff000;
pub inline fn ptr(self: @This()) *PageDirectoryPointer {
const address = self.value & ptr_mask;
return @intToPtr(*PageDirectoryPointer, address);
}
pub inline fn ptrSet(self: *@This(), new_ptr: *PageDirectoryPointer) void {
self.value = (self.value & ~ptr_mask) | @ptrToInt(new_ptr);
}
};
@ -109,9 +79,9 @@ pub const PageMapLevel4 = extern struct {
/// Forbid execution of lower levels
no_execute: bool,
pub inline fn ptr(self: Entry) types.PhysicalAddress(*PageDirectoryPointer) {
pub inline fn ptr(self: Entry) *PageDirectoryPointer {
const address = @bitCast(u64, self) & 0xffffffffff000;
return types.PhysicalAddress(*PageDirectoryPointer).init(@intToPtr(*PageDirectoryPointer, address));
return @intToPtr(*PageDirectoryPointer, address);
}
};
};

View File

@ -2,27 +2,27 @@ const std = @import("std");
const assert = std.debug.assert;
const types = @import("../types.zig");
pub const PageTable = extern struct {
pub const PageTable = packed struct {
const Self = @This();
entries: [512]Entry_,
pub fn getEntry(self: *Self, comptime T: type, ptr: types.VirtualAddress(T)) *Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntry(self: *Self, ptr: var) *Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 12 & 0x1ff];
}
pub fn getEntryConst(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) *const Entry_ {
const address = @ptrToInt(ptr.ptr);
pub fn getEntryConst(self: *const Self, ptr: var) *const Entry_ {
const address = @ptrToInt(ptr);
return &self.entries[address >> 12 & 0x1ff];
}
pub fn toPhysical(self: *const Self, comptime T: type, ptr: types.VirtualAddress(T)) ?types.PhysicalAddress(T) {
const entry = self.getEntryConst(T, ptr);
pub fn toPhysical(self: *const Self, ptr: var) ?@TypeOf(ptr) {
const entry = self.getEntryConst(ptr);
if (!entry.present()) return null;
const address = @ptrToInt(ptr.ptr);
return types.PhysicalAddress(T).init(@intToPtr(T, @ptrToInt(&entry.ptr().ptr[address & 4095])));
const address = @ptrToInt(ptr);
return @intToPtr(@TypeOf(ptr), @ptrToInt(&entry.ptr()[address & 4095]));
}
pub const Entry_ = packed struct {
@ -34,9 +34,15 @@ pub const PageTable = extern struct {
usingnamespace @import("mixin.zig").PageEntryMixin();
pub inline fn ptr(self: @This()) types.PhysicalAddress([]align(4096) u8) {
const address = self.value & 0xffffffffff000;
return types.PhysicalAddress([]align(4096) u8).init(@intToPtr([*]align(4096) u8, address)[0..4096]);
const ptr_mask: u64 = 0xffffffffff000;
pub inline fn ptr(self: @This()) []align(4096) u8 {
const address = self.value & ptr_mask;
return @intToPtr([*]align(4096) u8, address)[0..4096];
}
pub inline fn ptrSet(self: *@This(), new_ptr: []align(4096) u8) void {
self.value = (self.value & ~ptr_mask) | @ptrToInt(&new_ptr[0]);
}
};
@ -76,9 +82,9 @@ pub const PageTable = extern struct {
/// Forbid execution of lower levels
no_execute: bool,
pub inline fn ptr(self: Entry) types.PhysicalAddress(*align(4096) []u8) {
pub inline fn ptr(self: Entry) []align(4096) u8 {
const address = @bitCast(u64, self) & 0xffffffffff000;
return types.Physical(@intToPtr([*]align(4096) u8, address)[0..4096]);
return @intToPtr([*]align(4096) u8, address)[0..4096];
}
};
};

157
src/panic.zig Normal file
View File

@ -0,0 +1,157 @@
const builtin = @import("builtin");
const std = @import("std");
const apic = @import("apic.zig");
const cpio = @import("formats/cpio.zig");
const cpu = @import("cpu.zig");
const global = @import("global.zig");
const log = @import("logger.zig").log;
var panicking: bool = false;
var panic_allocator: ?std.heap.FixedBufferAllocator = null;
var panic_memory: []u8 = undefined;
pub fn initialize() void {
panic_memory = global.kernel_memory.?.alloc(.{ .writeable = true }, 32 * 1024 * 1024, 4096) catch |err| {
log(.Warning, "Failed to allocate panic memory, extended debug features are unavailable: {}", .{err});
return;
};
panic_allocator = std.heap.FixedBufferAllocator.init(panic_memory);
}
pub fn panic(reason: []const u8, _: ?*builtin.StackTrace) noreturn {
@setCold(true);
if (!panicking) {
panicking = true;
displayErrorScreen();
log(.Error, "KERNEL PANIC: {}", .{reason});
// TODO This is only here because AP's don't do anything atm
if (apic.bootstrapCpuCore() and global.bp_stack != null) {
var stack_iter = if (builtin.mode == .Debug)
StackTrace.init(@frameAddress(), @ptrToInt(&global.bp_stack.?[0]) + global.bp_stack.?.len)
else blk: {
log(.Warning, "Using a release build, stack trace information might be incorrect", .{});
break :blk struct {
// A fake iterator to get a one entry stack trace in optimised builds
value: ?usize = null,
pub fn next(self: *@This()) ?usize {
const ret = self.value;
self.value = null;
return ret;
}
}{ .value = @returnAddress() };
};
var debug_info: ?*std.debug.DebugInfo = null;
if (panic_allocator) |*alloc| {
if (std.debug.openSelfDebugInfo(&alloc.allocator)) |*di| {
debug_info = di;
} else |err| {
log(.Warning, "[panic] Reading debug info failed: {}", .{err});
}
}
log(.Error, "Stack trace:", .{});
while (stack_iter.next()) |address| {
if (debug_info) |di| {
di.printSourceAtAddress(global.log_stream, address, .no_color, printSourceLine) catch |_| {};
} else {
log(.Error, "0x{0x:0>16}", .{address});
}
}
}
} else {
log(.Error, "KERNEL PANIC PANIC, oof: {}", .{reason});
}
while (true) {
cpu.hlt();
}
}
fn displayErrorScreen() void {
const ppm = @import("formats/ppm.zig");
if (global.framebuffer) |*fb| {
const initrd = cpio.Cpio.init(global.initrd orelse return);
if (initrd.findFile("debug/panic.ppm")) |file_err| {
const file = file_err catch return;
var picture = ppm.Ppm.init(file.data() catch return) catch return;
var x = fb.width -% @intCast(u32, picture.width);
if (x > fb.width) // We wrapped around
x = 0;
var y = fb.height -% @intCast(u32, picture.height);
if (y > fb.height)
y = 0;
fb.image(x, y, &picture);
}
}
}
fn printSourceLine(out_stream: var, line_info: std.debug.LineInfo) PrintError!void {
return PrintError.FileNotFound;
}
const PrintError = error{
BadPathName,
FileNotFound,
EndOfFile,
};
const StackTrace = struct {
const Self = @This();
frame_pointer: usize,
stack_start: usize,
pub fn init(frame_pointer: usize, stack_start: usize) Self {
return .{
.frame_pointer = frame_pointer,
.stack_start = stack_start,
};
}
pub fn next(self: *Self) ?usize {
if (self.frame_pointer > self.stack_start - 2 * @sizeOf(usize)) {
return null;
}
const return_address = @intToPtr(*usize, self.frame_pointer + @sizeOf(usize)).*;
self.frame_pointer = @intToPtr(*usize, self.frame_pointer).*;
return return_address;
}
};
pub const os = struct {
pub const debug = struct {
pub fn openSelfDebugInfo(allocator: *std.mem.Allocator) !std.debug.DebugInfo {
var initrd = cpio.Cpio.init(global.initrd orelse return error.InitrdNotFound);
if (initrd.findFile("sys/core")) |err_file| {
const file = err_file catch return error.CouldNotReadKernel;
const kernel_file = file.data() catch return error.CouldNotReadKernel;
// Does not need to be free'd because the system is failing or
// the debug data is reused on the next failure
const kernel_copy = try allocator.alloc(u8, kernel_file.len);
std.mem.copy(u8, kernel_copy, kernel_file);
var dwarfinfo = try std.debug.openElfDebugInfo(
allocator,
kernel_copy,
);
try std.debug.openDwarfDebugInfo(&dwarfinfo, allocator);
return dwarfinfo;
}
return error.KernelNotFound;
}
};
};

View File

@ -2,52 +2,105 @@ const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const debug = @import("debug.zig");
const kernel = @import("kernel.zig");
const log = @import("logger.zig").log;
usingnamespace @import("panic.zig");
const apic = @import("apic.zig");
const bootboot = @import("bootboot.zig");
const cpu = @import("cpu.zig");
const framebuffer = @import("framebuffer.zig");
const global = @import("global.zig");
const kernel = @import("kernel.zig");
const lock = @import("lock.zig");
const log = @import("logger.zig").log;
const memory = @import("memory.zig");
const panic_mod = @import("panic.zig");
const paging = @import("paging.zig");
const types = @import("types.zig");
const kernel_stack_size: usize = 8 * 4096;
var ap_lock: lock.SpinLock = lock.SpinLock.init();
var init_stack: [8192]u8 align(std.Target.stack_align) = undefined;
fn init() noreturn {
// Initialize framebuffer
// TODO Possible bootboot bug
const fb_size = if (bootboot.bootboot.fb_size != 0) bootboot.bootboot.fb_size else bootboot.bootboot.fb_width * bootboot.bootboot.fb_height * 4;
global.framebuffer = framebuffer.Framebuffer.init(
@ptrCast([*]volatile u8, &bootboot.fb)[0..fb_size],
bootboot.bootboot.fb_width,
bootboot.bootboot.fb_height,
bootboot.framebuffer_pixel_type() catch @panic("Unsupported framebuffer pixel type"),
);
global.framebuffer.?.clear();
global.framebuffer_stream = global.framebuffer.?.outStream();
global.log_target = &global.framebuffer_stream.?.stream;
log(.Debug, "Framebuffer ({}x{} {}) initialized", .{
bootboot.bootboot.fb_width,
bootboot.bootboot.fb_height,
@tagName(bootboot.framebuffer_pixel_type() catch unreachable),
});
log(.Debug, "Logging initialized", .{});
// Initrd
global.initrd = @intToPtr([*]const u8, bootboot.bootboot.initrd_ptr)[0..bootboot.bootboot.initrd_size];
// Establish CPU capabilities
global.cpu_capabilities.initialize();
// Initialize memory management
log(.Debug, "Initializing memory management", .{});
const mmap = bootboot.memory_map();
const page_table = paging.PageMapLevel4.fromCr3(cpu.register.cr3());
const kernel_dynamic_data_address = 0 -% std.math.pow(usize, 2, global.cpu_capabilities.virtual_address_width - 1);
global.physical_memory = memory.PhysicalMemory.init(mmap);
global.kernel_memory = memory.VirtualMemory.init(page_table, kernel_dynamic_data_address);
paging.initialize(page_table);
// Initialize panic memory
panic_mod.initialize();
// Setup complete, allow AP's to continue
ap_lock.release();
// Call main kernel function
global.bp_stack = global.kernel_memory.?.alloc(.{ .writeable = true }, kernel_stack_size, std.Target.stack_align) catch @panic("Unable to allocate kernel stack");
const new_stack_address = @ptrToInt(&global.bp_stack.?[0]) + kernel_stack_size;
export fn _start() callconv(.Naked) noreturn {
// NO STACK FRAME AVAILABLE
// DO NOT INLINE CALL
asm volatile (
\\xor %%rax, %%rax
\\xor %%rbx, %%rbx
\\xor %%rcx, %%rcx
\\xor %%rdx, %%rdx
\\xor %%rsi, %%rsi
\\xor %%rdi, %%rdi
\\xor %%r8, %%r8
\\xor %%r9, %%r9
\\xor %%r10, %%r10
\\xor %%r11, %%r11
\\xor %%r12, %%r12
\\xor %%r13, %%r13
\\xor %%r14, %%r14
\\xor %%r15, %%r15
\\movq %[addr], %%rsp
\\movq %%rsp, %%rbp
:
: [addr] "r" (new_stack_address)
: "memory"
);
@call(.{ .modifier = .never_inline }, kernel.main, .{});
}
fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
debug.print("Stacktrace NYI\r\n", .{});
fn init_ap() noreturn {
ap_lock.wait();
@call(.{ .modifier = .never_inline }, kernel.main_ap, .{});
}
pub fn panic(reason: []const u8, stack_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
export fn _start() noreturn {
assert(std.mem.eql(u8, &bootboot.bootboot.magic, "BOOT"));
if (stack_trace) |st| {
dumpStackTrace(st.*);
} else {
debug.print("[Stacktrace unavailable]\r\n", .{});
@atomicStore(u8, &ap_lock.lock, 1, .SeqCst);
if (!apic.bootstrapCpuCore()) {
@call(.{ .modifier = .never_inline }, init_ap, .{});
}
debug.print("KERNEL PANIC: {}\r\n", .{reason});
log(.Warning, "KERNEL PANIC: {}\r\n", .{reason});
while (true) {
asm volatile ("hlt");
}
@call(.{ .stack = &init_stack, .modifier = .never_inline }, init, .{});
}
comptime {

View File

@ -1,50 +1 @@
const std = @import("std");
const assert = std.debug.assert;
pub inline fn Physical(ptr: var) PhysicalAddress(@TypeOf(ptr)) {
return PhysicalAddress(@TypeOf(ptr)).init(ptr);
}
pub inline fn Virtual(ptr: var) VirtualAddress(@TypeOf(ptr)) {
return VirtualAddress(@TypeOf(ptr)).init(ptr);
}
pub fn PhysicalAddress(comptime T: type) type {
// TODO [*]type fail here
// comptime assert(@typeInfo(T) == .Pointer or @typeInfo(T) == .Fn);
return struct {
ptr: T,
pub fn init(ptr: T) @This() {
return .{
.ptr = ptr,
};
}
pub fn format(self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, context: var, comptime Errors: type, output: fn (@TypeOf(context), []const u8) Errors!void) Errors!void {
const typeinfo = @typeInfo(T);
try std.fmt.format(context, Errors, output, "Physical({" ++ fmt ++ "})", .{self.ptr});
}
};
}
pub fn VirtualAddress(comptime T: type) type {
// TODO [*]type fail here
// assert(@typeInfo(T) == .Pointer or @typeInfo(T) == .Fn);
return struct {
ptr: T,
pub fn init(ptr: T) @This() {
return .{
.ptr = ptr,
};
}
pub fn format(self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, context: var, comptime Errors: type, output: fn (@TypeOf(context), []const u8) Errors!void) Errors!void {
try std.fmt.format(context, Errors, output, "Physical({" ++ fmt ++ "})", .{self.ptr});
}
};
}
pub const NoError = error{};