Back to our “initial_paging.rs”, let’s add some more stuff relating to making some entries for our hypervisor.

use core::{
    ptr::{self},
    usize,
};

use log::info;
use uefi::boot::{MemoryType, PAGE_SIZE, allocate_pages};
use x86::{
    bits64::paging::{
        PAddr, PD, PDEntry, PDFlags, PDPT, PDPTEntry, PDPTFlags, PML4, PML4Entry, PML4Flags, PT,
        PTEntry, PTFlags,
    },
    controlregs::cr3,
};
//..
// etc
//..
fn new_pml4_entry(entry: &mut PML4Entry, pdpte: usize) {
    let paddr = PAddr::from(pdpte);
    *entry = PML4Entry::new(paddr, PML4Flags::P | PML4Flags::RW);
}

fn new_pdpt_entry(entry: &mut PDPTEntry, pde: usize) {
    let paddr = PAddr::from(pde);

    *entry = PDPTEntry::new(paddr, PDPTFlags::P | PDPTFlags::RW);
}

fn new_pd_entry(entry: &mut PDEntry, pt: usize) {
    let paddr = PAddr::from(pt);

    *entry = PDEntry::new(paddr, PDFlags::P | PDFlags::RW);
}

fn map_4kb_page_entry(entry: &mut PTEntry, pa: u64) {
    let phys = PAddr::from(pa);

    *entry = PTEntry::new(phys, PTFlags::P | PTFlags::RW);
}

These are the entries we will create, with basic permissions. Let’s add the mapping function itself.

pub fn map_page(cr3: *mut PML4, va: u64, pa: u64) {
    let pml4_index = (va >> 39) & 0x1FF;
    let pdpt_index = (va >> 30) & 0x1FF;
    let pde_index = (va >> 21) & 0x1FF;
    let pt_index = (va >> 12) & 0x1FF;

    info!("Mapping VA {:x} to PA: {:x}", va, pa);

    let pml4_entry = unsafe { &mut (*cr3)[pml4_index as usize] };

    if !pml4_entry.is_present() {
        let pdpt_pages = uefi_allocator(size_of::<PDPT>());
        new_pml4_entry(pml4_entry, pdpt_pages.addr());
    }

    info!("PML4 {:?}", pml4_entry);

    let pdpt = pml4_entry.address().as_u64() as *mut PDPT;

    let pdpt_entry = unsafe { &mut (*pdpt)[pdpt_index as usize] };

    if !pdpt_entry.is_present() {
        let pde_pages = uefi_allocator(size_of::<PD>());
        new_pdpt_entry(pdpt_entry, pde_pages.addr());
    }

    info!("PDPT {:?}", pdpt_entry);

    let pdes = pdpt_entry.address().as_u64() as *mut PD;

    let pde = unsafe { &mut (*pdes)[pde_index as usize] };

    if !pde.is_present() {
        let pt_pages = uefi_allocator(size_of::<PT>());
        new_pd_entry(pde, pt_pages.addr());
    }

    info!("PD {:?}", pde);

    let ptes = pde.address().as_u64() as *mut PT;

    let pt = unsafe { &mut (*ptes)[pt_index as usize] };

    if !pt.is_present() {
        map_4kb_page_entry(pt, pa);
    }

    info!("PT: {:?}", pt);
}

That’s it. It’s pretty basic because we are specifying the VA.

Next, we are actually going to be parsing the ELF and loading it into memory, create a file called “loader.rs” and let’s get started.

To load an ELF into memory, we have to load every PT_LOAD header into memory at the specified VA p_vaddr field, beginning at offset specified at the p_offset. Also, if p_memsz is greater than p_filesz we have to fill the extra bytes with 0x0. This is the .bss section in the ELF format. The .bss section is for statically allocated variables without a value, so like a global variable with a value unknown.

Let’s get started.

use crate::initial_paging::{map_page, size_to_pages, uefi_allocator};
use elf::{ElfBytes, abi::PT_LOAD, endian::LittleEndian, segment::ProgramHeader};
use uefi::boot::{PAGE_SIZE};
use x86::{bits64::paging::PML4};
use log::info;

fn map_segment(va: u64, size: u64, cr3: *mut PML4, pa: usize) {
    let size_in_pages = size_to_pages(size.try_into().unwrap());

    let mut curr_va = va;
    let mut curr_pa = pa;

    info!("Size of segment: {}", size_in_pages);

    for _ in 0..size_in_pages {
        map_page(cr3, curr_va, curr_pa as u64);
        curr_pa += PAGE_SIZE;
        curr_va += PAGE_SIZE as u64;
    }
}

pub fn map_elf_load_segments(cr3: *mut PML4, buffer: &[u8]) -> u64 {
    let file: ElfBytes<'_, LittleEndian> = ElfBytes::<LittleEndian>::minimal_parse(buffer).unwrap();

    file.segments()
        .unwrap()
        .iter()
        .filter(|phdr| phdr.p_type == PT_LOAD)
        .for_each(|phdr| {
            let pages = uefi_allocator(phdr.p_memsz as usize);

            let pa = pages.addr();

            info!("Mapping segment {:x} to {:x}", phdr.p_vaddr, pa);

            map_segment(phdr.p_vaddr, phdr.p_memsz, cr3, pa.into());
        });

    file.ehdr.e_entry
}

So, as you can see, we filter each PT_LOAD segment, allocate the pages needed, and map the VA to the PA. Easy!

Next, is actually copying the segment into the virtual address, which is easy too. I’ll show you:

use core::{ptr, slice};

fn copy_segment(phdr: ProgramHeader, data: &[u8]) {
    let size = phdr.p_filesz as usize;

    let vaddr = phdr.p_vaddr as *mut u8;

    unsafe { slice::from_raw_parts_mut(vaddr, size).copy_from_slice(&data) };

    /* If the segment's memory size (p_memsz) is larger than the file size
    (p_filesz), the "extra" bytes are defined to hold the value 0 and to follow the
    segment's initialized area. The file size may not be larger than the memory size. */

    if phdr.p_memsz > size as u64 {
        info!("Copying segment and zeroing bss {:x}", phdr.p_vaddr);

        let difference = phdr.p_memsz - phdr.p_filesz;

        let pos = unsafe { vaddr.add(size) };

        unsafe { ptr::write_bytes(pos, 0x0, difference as usize) };
    };
}

pub fn load_offset_into_memory(buffer: &[u8]) {
    let file: ElfBytes<'_, LittleEndian> = ElfBytes::<LittleEndian>::minimal_parse(buffer).unwrap();

    file.segments()
        .unwrap()
        .iter()
        .filter(|phdr| phdr.p_type == PT_LOAD)
        .for_each(|phdr| {
            let curr_data = file.segment_data(&phdr).unwrap();
            copy_segment(phdr, curr_data);
        });

    info!("Done loading segments into memory..");
}

Next, we are going to want to pass some info to our kernel, most notably, the memory map. Which shows, from the firmware, the available memory which is very important (also shows which memory is ACPI used, etc.) So we are going to need to need to make a struct that we will share with the kernel and the bootloader, so let’s make a library in the root directory of our project just called “lib” and simply give it these properties in cargo.toml:

[package]
name = "lib"
version = "0.1.0"
edition = "2024"

[dependencies]
uefi = "0.35.0"

And let’s make the struct itself! in lib.rs:

#![no_std]

use uefi::mem::memory_map::{MemoryMap, MemoryMapIter, MemoryMapOwned};

pub const BOOT_MAGIC: u64 = 0x4444441111;


pub struct BootInfo {
    pub memory: MemoryMapOwned,
    pub magic: u64,
    pub guest_cr3: u64
}


impl BootInfo {
    pub fn is_magic_same(&self, magic: u64) -> bool {
        let result = self.magic == magic;

        result
    }

    pub fn mem_entries(&self) -> MemoryMapIter<'_> {
        self.memory.entries()
    }

    pub fn new(cr3: u64, memory_map: MemoryMapOwned, magic: u64) -> BootInfo {
        BootInfo { magic:  magic, memory: memory_map, guest_cr3: cr3 }
    }
}

We can’t forget to load it as a dependency of our UEFI program, in cargo.toml make sure to add:

[dependencies]
...
lib = { path = "../lib"}

Now, we are going to write the code that actually jumps to the kernel.

use core::{arch::asm, ptr, slice};

use crate::initial_paging::{map_page, size_to_pages, uefi_allocator};
use elf::{ElfBytes, abi::PT_LOAD, endian::LittleEndian, segment::ProgramHeader};

use lib::{BootInfo, BOOT_MAGIC};
use log::info;
use uefi::boot::{PAGE_SIZE, exit_boot_services};
use x86::{bits64::paging::PML4, controlregs};

const SIZE_OF_STACK: usize = 4 * PAGE_SIZE;

// ...
// etc
// ...

fn jump_to_kernel(
    cr3: *mut PML4,
    kernel_stack: u64,
    entry_point: u64,
    buffer: &[u8],
    boot_info: u64,
) {
    unsafe { x86::controlregs::cr3_write(cr3.addr() as u64) };

    load_offset_into_memory(buffer);

    // lets go
    unsafe {
        asm!(
            "mov rsp, {0}",
            "jmp {1}", in(reg) kernel_stack, in(reg) entry_point, in("rdi") boot_info);
    }
}

pub fn load_boot_info_and_jump(cr3: *mut PML4, entry_point: u64, buffer: &[u8]) {
    let kernel_stack = uefi_allocator(SIZE_OF_STACK);

    let boot_pages = uefi_allocator(size_of::<BootInfo>());

    let boot_info = boot_pages as *mut BootInfo;

    let initial_cr3 = unsafe { controlregs::cr3() };

    info!("Jumping and exiting boot services");

    let memory_map = unsafe { exit_boot_services(None) };

    unsafe { *boot_info = BootInfo::new(initial_cr3, memory_map, BOOT_MAGIC) };

    jump_to_kernel(
        cr3,
        kernel_stack.addr() as u64,
        entry_point,
        buffer,
        boot_info.addr() as u64,
    );
}

Next blog entry we are going to be hooking it all together and finally finish writing our bootloader.