479 lines
17 KiB
Zig
479 lines
17 KiB
Zig
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
const assert = std.debug.assert;
|
|
|
|
const global = @import("global.zig");
|
|
const log = @import("logger.zig").log;
|
|
const MemoryMapEntry = @import("bootboot.zig").MemoryMapEntry;
|
|
const paging = @import("paging.zig");
|
|
const types = @import("types.zig");
|
|
|
|
/// Manager of physical memory
|
|
/// Methods other than init
|
|
/// may only be called in identity mapped memory
|
|
pub const PhysicalMemory = struct {
|
|
const Self = @This();
|
|
const List = std.SinglyLinkedList(NodeState);
|
|
const Node = List.Node;
|
|
|
|
const node_size: usize = @sizeOf(Node);
|
|
const node_align: u29 = @alignOf(Node);
|
|
|
|
static_memory_map: []align(1) const MemoryMapEntry,
|
|
static_index: usize = 0,
|
|
|
|
known_available: usize = 0,
|
|
known_used: usize = 0,
|
|
|
|
memory_map: List,
|
|
|
|
pub fn init(static_memory_map: []align(1) const MemoryMapEntry) Self {
|
|
var kernel_memory_map = List.init();
|
|
var last_node: ?*Node = null;
|
|
|
|
return .{
|
|
.memory_map = kernel_memory_map,
|
|
.static_memory_map = static_memory_map,
|
|
};
|
|
}
|
|
|
|
/// Allocates physical memory
|
|
/// Returned slice is not usable unless the region it points
|
|
/// to is identity mapped
|
|
/// Returned memory is zeroed out
|
|
pub fn alloc(self: *Self, size: usize, alignment: u29) Allocator.Error![]u8 {
|
|
log(.Verbose, "Allocating {} bytes of memory", .{size});
|
|
|
|
var previous_node: ?*Node = null; // For deletion
|
|
var current_node = self.memory_map.first;
|
|
|
|
var base_address: usize = 0;
|
|
var aligned_address: usize = 0;
|
|
var lost_size: usize = 0;
|
|
|
|
while (true) {
|
|
if (current_node) |node| {
|
|
base_address = @ptrToInt(&node.data.slice[0]) + node_size;
|
|
aligned_address = forwardAlign(base_address, alignment);
|
|
lost_size = aligned_address - base_address;
|
|
|
|
if (node.data.slice.len >= size + lost_size + node_size and !node.data.used)
|
|
break;
|
|
|
|
previous_node = current_node;
|
|
current_node = node.next;
|
|
} else {
|
|
// Attempt to claim a new region from the static memory map
|
|
if (self.nextFreeRegion()) |region| {
|
|
if (createNode(region)) |new_node| {
|
|
self.known_available += new_node.data.slice.len;
|
|
|
|
if (previous_node) |prev_node| {
|
|
prev_node.insertAfter(new_node);
|
|
} else {
|
|
self.memory_map.first = new_node;
|
|
}
|
|
|
|
current_node = new_node;
|
|
} else {
|
|
return Allocator.Error.OutOfMemory;
|
|
}
|
|
} else {
|
|
return Allocator.Error.OutOfMemory;
|
|
}
|
|
}
|
|
}
|
|
|
|
const slice = @intToPtr([*]u8, aligned_address)[0..size];
|
|
|
|
self.updateNode(previous_node, current_node.?, lost_size + size);
|
|
|
|
std.mem.secureZero(u8, slice);
|
|
return slice;
|
|
}
|
|
|
|
pub fn mark(self: *Self, slice: []allowzero const u8) void {
|
|
// TODO
|
|
}
|
|
|
|
pub fn free(self: *Self, slice: []allowzero u8) void {
|
|
log(.Verbose, "Freeing {} bytes of memory", .{slice.len});
|
|
|
|
// Because we are freeing there must be at least one memory region
|
|
var previous_node: ?*Node = null;
|
|
var node = self.memory_map.first orelse @panic("Physical memory manager lost track of some memory");
|
|
|
|
const slice_start = @ptrToInt(&slice[0]);
|
|
var node_start = @ptrToInt(node);
|
|
while (!(node_start <= slice_start and
|
|
slice_start < node_start + node.data.slice.len))
|
|
{
|
|
previous_node = node;
|
|
node = node.next orelse @panic("Physical memory manager lost track of some memory");
|
|
|
|
node_start = @ptrToInt(node);
|
|
}
|
|
|
|
self.updateNode(previous_node, node, 0);
|
|
}
|
|
|
|
/// Creates a new node managing region
|
|
/// When the return value is null the region
|
|
/// has been rejected and is unmodified
|
|
fn createNode(region: []u8) ?*Node {
|
|
if (region.len < node_size)
|
|
return null;
|
|
|
|
const entry_start = @ptrToInt(®ion[0]);
|
|
|
|
// Trade a few bytes for access speed
|
|
const region_start = forwardAlign(entry_start, node_align);
|
|
|
|
const new_node = @intToPtr(*Node, region_start);
|
|
const new_region = @intToPtr([*]u8, region_start)[0 .. region.len - (region_start - entry_start)];
|
|
|
|
new_node.* = Node.init(.{
|
|
.used = false,
|
|
.slice = new_region,
|
|
});
|
|
|
|
return new_node;
|
|
}
|
|
|
|
/// Merges, splits and updates the memory_map's Nodes
|
|
/// to represent the new memory usage of node.
|
|
/// previous_node_opt must manage the highest free region
|
|
/// smaller than that of node.
|
|
/// A usage value of 0 indicates freeing the node
|
|
/// any other value marks usage bytes used.
|
|
fn updateNode(
|
|
self: *Self,
|
|
previous_node_opt: ?*Node,
|
|
node: *Node,
|
|
usage: usize,
|
|
) void {
|
|
if (previous_node_opt) |previous_node| {
|
|
assert(previous_node.next == node);
|
|
} else {
|
|
assert(self.memory_map.first == node);
|
|
}
|
|
|
|
if (usage == 0) {
|
|
// This node has been free'd
|
|
assert(node.data.used);
|
|
node.data.used = false;
|
|
|
|
// TODO First check whether we can merge with the followup region
|
|
|
|
// Now try merging with previous region
|
|
if (previous_node_opt) |previous_node| {
|
|
const previous_start = @ptrToInt(previous_node);
|
|
if (!previous_node.data.used and
|
|
previous_start + previous_node.data.slice.len ==
|
|
@ptrToInt(node))
|
|
{
|
|
previous_node.data.slice = @intToPtr(
|
|
[*]u8,
|
|
previous_start,
|
|
)[0 .. previous_node.data.slice.len + node.data.slice.len];
|
|
|
|
_ = previous_node.removeNext();
|
|
}
|
|
}
|
|
|
|
self.known_available += node.data.slice.len;
|
|
self.known_used -= node.data.slice.len;
|
|
} else {
|
|
// This node is going to be used
|
|
assert(!node.data.used);
|
|
node.data.used = true;
|
|
|
|
const remaining_free_size = node.data.slice.len - usage - node_size;
|
|
if (remaining_free_size < node_size) {
|
|
// Remaining size is too small for a Node,
|
|
// just loose the space
|
|
// TODO Try merging the next node
|
|
} else {
|
|
const node_start = @ptrToInt(node);
|
|
|
|
// Create a new node managing the remaining free space
|
|
const new_region_start = forwardAlign(node_start + node_size + usage, node_align);
|
|
|
|
const new_region = @intToPtr([*]u8, new_region_start)[0..remaining_free_size];
|
|
|
|
if (createNode(new_region)) |new_node| {
|
|
node.insertAfter(new_node);
|
|
} else {
|
|
@panic("Memory node creation failed despite size check");
|
|
}
|
|
|
|
// Shrink current node to its new size
|
|
node.data.slice = node.data.slice[0 .. new_region_start - node_start];
|
|
}
|
|
|
|
self.known_available -= node.data.slice.len;
|
|
self.known_used += node.data.slice.len;
|
|
}
|
|
}
|
|
|
|
fn nextFreeRegion(self: *Self) ?[]u8 {
|
|
if (self.static_index >= self.static_memory_map.len)
|
|
return null;
|
|
|
|
var entry = self.static_memory_map[self.static_index];
|
|
while (entry.usage() != .Free or @ptrToInt(&entry.ptr()[0]) == 0) {
|
|
self.static_index += 1;
|
|
|
|
if (self.static_index >= self.static_memory_map.len)
|
|
return null;
|
|
|
|
entry = self.static_memory_map[self.static_index];
|
|
}
|
|
|
|
self.static_index += 1;
|
|
|
|
return entry.ptr()[0..entry.size()];
|
|
}
|
|
|
|
pub const NodeState = struct {
|
|
used: bool,
|
|
slice: []u8,
|
|
};
|
|
};
|
|
|
|
/// Manager of a virtual memory map
|
|
/// Methods other than init and activate
|
|
/// may only be called in identity mapped memory
|
|
pub const VirtualMemory = struct {
|
|
const Self = @This();
|
|
|
|
allocator: Allocator,
|
|
page_table: *paging.PageMapLevel4,
|
|
base_address: usize,
|
|
offset: usize = 0,
|
|
|
|
pub fn init(page_table: *paging.PageMapLevel4, base_address: usize) Self {
|
|
return .{
|
|
.page_table = page_table,
|
|
.base_address = base_address,
|
|
.allocator = .{
|
|
.reallocFn = kernel_realloc,
|
|
.shrinkFn = kernel_shrink,
|
|
},
|
|
};
|
|
}
|
|
|
|
/// Destroy the memory map
|
|
pub fn deinit(self: *Self) void {}
|
|
|
|
/// Activate this memory map
|
|
/// Make sure stack and code are mapped properly
|
|
/// before calling this
|
|
pub fn activate(self: *const Self) void {
|
|
cpu.register.cr3Set(@ptrToInt(self.page_table));
|
|
}
|
|
|
|
/// Allocate memory with the provided attributes
|
|
/// Returned slice is only valid when memory map is activated
|
|
pub fn alloc(self: *Self, comptime attributes: Attributes, size: usize, alignment: u29) Allocator.Error![]u8 {
|
|
if (size == 0)
|
|
return &[0]u8{};
|
|
|
|
// Mapped pages must be multiple of page size,
|
|
// so for now extended all sizes
|
|
const used_size = forwardAlign(size, 4096);
|
|
|
|
// Alignment must be at least page size,
|
|
// so for now extend it
|
|
const used_alignment = std.math.max(4096, alignment);
|
|
|
|
const physical_slice = try global.physical_memory.?.alloc(used_size, used_alignment);
|
|
errdefer global.physical_memory.?.free(physical_slice);
|
|
|
|
const virtual_base = forwardAlign(self.base_address + self.offset, used_alignment);
|
|
self.offset = virtual_base - self.base_address + used_size;
|
|
|
|
try self.map(
|
|
attributes,
|
|
@intToPtr([*]align(4096) u8, virtual_base)[0..used_size],
|
|
@intToPtr([*]align(4096) u8, @ptrToInt(&physical_slice[0]))[0..physical_slice.len],
|
|
);
|
|
|
|
return @intToPtr([*]u8, virtual_base)[0..size];
|
|
}
|
|
|
|
/// Map addresses of virtual slice to physical slice,
|
|
/// enforcing the given attributes
|
|
pub fn map(self: *Self, comptime attributes: Attributes, virtual_slice: []align(4096) const u8, physical_slice: []align(4096) u8) Allocator.Error!void {
|
|
if (physical_slice.len < virtual_slice.len)
|
|
@panic("Invalid memory mapping attempt: Tried to map virtual range to smaller physical one");
|
|
if (virtual_slice.len % 4096 != 0)
|
|
@panic("Invalid memory mapping attempt: Virtual slice is not a multiple of page size");
|
|
|
|
if (attributes.user_access and attributes.writeable and attributes.executable)
|
|
log(.Warning, "[memory] About to map memory as uwx", .{});
|
|
|
|
if (false and virtual_slice.len % (1024 * 1024 * 1024) == 0) {
|
|
// TODO 1 GiB pages
|
|
} else if (false and virtual_slice.len % (2 * 1024 * 1024) == 0) {
|
|
// TODO 2 MiB pages
|
|
} else {
|
|
// 4 KiB pages
|
|
var offset: usize = 0;
|
|
while (offset < virtual_slice.len) : (offset += 4096) {
|
|
const current_page_ptr = &virtual_slice[offset];
|
|
|
|
var pml4e = self.page_table.getEntry(current_page_ptr);
|
|
if (!pml4e.present()) {
|
|
log(.Verbose, "[memory] Creating new PDP", .{});
|
|
|
|
const pdp_memory = try global.physical_memory.?.alloc(@sizeOf(paging.PageDirectoryPointer), 4096);
|
|
errdefer global.physical_memory.free(pdp_memory);
|
|
|
|
std.mem.secureZero(u8, pdp_memory);
|
|
|
|
const pdp = @ptrCast(*paging.PageDirectoryPointer, &pdp_memory[0]);
|
|
|
|
pml4e.read_writeSet(false);
|
|
pml4e.user_supervisorSet(false);
|
|
if (global.cpu_capabilities.no_execute)
|
|
pml4e.no_executeSet(true);
|
|
pml4e.ptrSet(pdp);
|
|
}
|
|
|
|
const pdp = pml4e.ptr();
|
|
const pdpe = pdp.getEntry(current_page_ptr);
|
|
if (!pdpe.present()) {
|
|
log(.Verbose, "[memory] Creating new PD", .{});
|
|
|
|
const pd_memory = try global.physical_memory.?.alloc(@sizeOf(paging.PageDirectory), 4096);
|
|
errdefer global.physical_memory.free(pd_memory);
|
|
|
|
std.mem.secureZero(u8, pd_memory);
|
|
|
|
const pd = @ptrCast(*paging.PageDirectory, &pd_memory[0]);
|
|
|
|
pdpe.read_writeSet(false);
|
|
pdpe.user_supervisorSet(false);
|
|
if (global.cpu_capabilities.no_execute)
|
|
pdpe.no_executeSet(true);
|
|
pdpe.ptrSet(pd);
|
|
}
|
|
|
|
const pd = pdpe.ptr();
|
|
const pde = pd.getEntry(current_page_ptr);
|
|
if (!pde.present()) {
|
|
log(.Verbose, "[memory] Creating new PT", .{});
|
|
const pt_memory = try global.physical_memory.?.alloc(@sizeOf(paging.PageTable), 4096);
|
|
errdefer global.physical_memory.free(pt_memory);
|
|
|
|
std.mem.secureZero(u8, pt_memory);
|
|
|
|
const pt = @ptrCast(*paging.PageTable, &pt_memory[0]);
|
|
|
|
pde.read_writeSet(false);
|
|
pde.user_supervisorSet(false);
|
|
if (global.cpu_capabilities.no_execute)
|
|
pde.no_executeSet(true);
|
|
pde.ptrSet(pt);
|
|
}
|
|
|
|
const pt = pde.ptr();
|
|
const pte = pt.getEntry(current_page_ptr);
|
|
if (!pte.present()) {
|
|
const page = physical_slice[offset .. offset + 4096];
|
|
|
|
pte.read_writeSet(false);
|
|
pte.user_supervisorSet(false);
|
|
if (global.cpu_capabilities.no_execute)
|
|
pte.no_executeSet(true);
|
|
pte.ptrSet(page);
|
|
} else {
|
|
@panic("[memory] Mapping already mapped memory");
|
|
}
|
|
|
|
// Reduce enforcement of the tables
|
|
// The lowest entries will restore these as needed
|
|
if (attributes.writeable) {
|
|
pml4e.read_writeSet(true);
|
|
pdpe.read_writeSet(true);
|
|
pde.read_writeSet(true);
|
|
pte.read_writeSet(true);
|
|
}
|
|
if (attributes.user_access) {
|
|
pml4e.user_supervisorSet(true);
|
|
pdpe.user_supervisorSet(true);
|
|
pde.user_supervisorSet(true);
|
|
pte.user_supervisorSet(true);
|
|
}
|
|
if (attributes.executable) {
|
|
pml4e.no_executeSet(false);
|
|
pdpe.no_executeSet(false);
|
|
pde.no_executeSet(false);
|
|
pte.no_executeSet(false);
|
|
}
|
|
|
|
pml4e.presentSet(true);
|
|
pdpe.presentSet(true);
|
|
pde.presentSet(true);
|
|
pte.presentSet(true);
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Free memory received from an alloc call
|
|
pub fn free(self: *Self, slice: []u8) void {
|
|
// TODO Proper memory management instead of discard
|
|
const physical_slice = @ptrCast([*]u8, self.page_table.toPhysical(&slice[0]))[0..slice.len];
|
|
global.physical_memory.?.free(physical_slice);
|
|
}
|
|
|
|
fn kernel_realloc(
|
|
self: *Allocator,
|
|
old_mem: []u8,
|
|
old_alignment: u29,
|
|
new_byte_count: usize,
|
|
new_alignment: u29,
|
|
) Allocator.Error![]u8 {
|
|
const virtual = @fieldParentPtr(Self, "allocator", self);
|
|
|
|
if (old_mem.len == 0)
|
|
return virtual.alloc(.{ .writeable = true }, new_byte_count, new_alignment);
|
|
if (old_mem.len < new_byte_count) {
|
|
const new_memory = try virtual.alloc(.{ .writeable = true }, new_byte_count, new_alignment);
|
|
std.mem.copy(u8, new_memory, old_mem);
|
|
virtual.free(old_mem);
|
|
return new_memory;
|
|
}
|
|
|
|
return Allocator.Error.OutOfMemory;
|
|
}
|
|
|
|
fn kernel_shrink(
|
|
self: *Allocator,
|
|
old_mem: []u8,
|
|
old_alignment: u29,
|
|
new_byte_count: usize,
|
|
new_alignment: u29,
|
|
) []u8 {
|
|
const virtual = @fieldParentPtr(Self, "allocator", self);
|
|
|
|
if (new_byte_count == 0) {
|
|
virtual.free(old_mem);
|
|
return &[0]u8{};
|
|
}
|
|
|
|
return old_mem[0..new_byte_count];
|
|
}
|
|
|
|
const Attributes = struct {
|
|
writeable: bool = false,
|
|
user_access: bool = false,
|
|
executable: bool = false,
|
|
};
|
|
};
|
|
|
|
inline fn forwardAlign(base: usize, alignment: usize) usize {
|
|
return base + (alignment - 1) & ~(alignment - 1);
|
|
}
|