Xenon/src/paging.zig

93 lines
3.9 KiB
Zig

const std = @import("std");
pub usingnamespace @import("paging/pml4.zig");
pub usingnamespace @import("paging/pdp.zig");
pub usingnamespace @import("paging/pd.zig");
pub usingnamespace @import("paging/pt.zig");
const global = @import("global.zig");
const log = @import("logger.zig").log;
/// Allow access to up to pml4_entries * 0.5 TiB physical address space
/// using pml4_entries * 2 MiB memory
const pml4_entries: usize = 2;
/// Initializes paging on the processor
/// Currently requires the system to already use paging,
/// the memory map is only configured to be like expected.
/// NOTE All allocations from global.physical_memory need to already be usable
pub fn initialize(page_table: *PageMapLevel4) void {
var existing_map_warning: bool = true;
// Identity map the lower half of virtual address space to physical memory
if (!global.cpu_capabilities.gigabyte_pages)
@panic("CPU not supported, 1 GiB page support required");
for (page_table.entries[0..pml4_entries]) |*pml4_entry, half_tbytes| {
if (!pml4_entry.present()) {
const pdp_memory = global.physical_memory.?.alloc(@sizeOf(PageDirectoryPointer), 4096) catch @panic("Failed to initialize kernel memory map (allocation failure)");
std.mem.secureZero(u8, pdp_memory);
const pdp = @ptrCast(*PageDirectoryPointer, &pdp_memory[0]);
pml4_entry.ptrSet(pdp);
}
@fence(.SeqCst);
const pdp = pml4_entry.ptr();
for (pdp.entries) |*pdp_entry, gbytes| {
// Create a copy of the original entry so it can be free'd later.
// This is done after remapping to ensure
// that there is no memory hole during cleanup.
const old_pdp_entry = pdp_entry.*;
// Also create a copy of the original entry for modification.
// This is done to ensure the entry is not in an
// inconsistent state during bit manipulation.
// Relevant for when the entries memory is managed by itself.
var new_pdp_entry = pdp_entry.*;
const physical_address = (half_tbytes * 512 + gbytes) * 1024 * 1024;
new_pdp_entry.ptrPsSet(@intToPtr(*allowzero u8, physical_address));
new_pdp_entry.psSet(true);
if (global.cpu_capabilities.no_execute)
new_pdp_entry.no_executeSet(true);
new_pdp_entry.read_writeSet(true);
new_pdp_entry.user_supervisorSet(false);
new_pdp_entry.presentSet(true);
pdp_entry.* = new_pdp_entry;
@fence(.SeqCst);
// TODO The old entries might not be allowed to be free'd
// for some reason
// if (old_pdp_entry.present() and !old_pdp_entry.ps()) {
// if (existing_map_warning) {
// log(.Warning, "[paging] Freeing existing memory mapping", .{});
// existing_map_warning = false;
// }
// const pd = old_pdp_entry.ptr();
// for (pd.entries) |*pd_entry| {
// if (pd_entry.present() and !pd_entry.ps()) {
// const pt = pd_entry.ptr();
// const pt_memory = @ptrCast([*]u8, pt)[0..@sizeOf(@TypeOf(pt.*))];
// std.mem.secureZero(u8, pt_memory);
// global.physical_memory.?.free(pt_memory);
// }
// }
// const pd_memory = @ptrCast([*]u8, pd)[0..@sizeOf(@TypeOf(pd.*))];
// std.mem.secureZero(u8, pd_memory);
// global.physical_memory.?.free(pd_memory);
// }
}
@fence(.SeqCst);
if (global.cpu_capabilities.no_execute)
pml4_entry.no_executeSet(true);
pml4_entry.read_writeSet(true);
pml4_entry.user_supervisorSet(false);
pml4_entry.presentSet(true);
}
}