Compare commits

...

37 Commits

Author SHA1 Message Date
Asya 81d11685da
New heap allocator! 2022-12-22 19:22:11 +03:00
Asya 0880391dde
Update limine to v4.x 2022-12-17 10:30:34 +03:00
Asya 25afe9fec7
Stop assuming that udisks2 automatically mounted filesystem 2022-12-17 10:24:34 +03:00
ondra05 e92809c1da Moved ext2-rs to separate repo 2022-12-10 23:05:48 +01:00
ondra05 d047609f7f uh oh fixed a compilation error 2022-12-09 00:52:28 +01:00
ondra05 0f08c059a6 Added kernel cmdline support and retrieval of initrd 2022-12-08 22:21:21 +01:00
ondra05 7fbc348508 moved stuff around 2022-12-08 21:31:54 +01:00
ondra05 5e5d3057f1 they don't seem to be used? removing. 2022-12-08 21:25:49 +01:00
ondra05 933e5669c7 created empty initramfs 2022-12-08 21:12:33 +01:00
ondra05 646643838b Cleanup 2022-12-08 20:52:20 +01:00
ondra05 61e59e45f0 fixed segments, timer doesn't double-fault anymore 2022-12-08 20:35:42 +01:00
ondra05 8f6c91ab9a Moved away from PIC to APIC. 2022-12-08 20:15:10 +01:00
ondra05 91baa44f36 removed few package from the workspace manifest 2022-12-08 15:54:11 +01:00
ondra05 1be74243ac Added GDT, IDT, sloop and some logging 2022-12-08 01:26:43 +01:00
ondra05 39cafcaed4 Logging 2022-12-08 00:07:02 +01:00
ondra05 cd5b7a8e69 Added basic logging support 2022-12-07 01:43:26 +01:00
ondra05 0cea5e66d0 made kernel the system entrypoint. 2022-12-07 01:10:38 +01:00
ondra05 415756bc3e Skeleton for architecture-specific things 2022-12-06 23:04:28 +01:00
ondra05 a398498352 Threw stuff from the microkernel. Start of moving core parts there. 2022-12-06 23:04:26 +01:00
ondra05 9152dbb57f clippy 2022-12-06 23:04:11 +01:00
ondra05 1c41494cc9 removed module that I forgot to remove + 1.65 stuff 2022-12-06 23:04:09 +01:00
ondra05 60a38212c0 removed sus messaging module 2022-12-06 23:04:04 +01:00
ondra05 67a3b89234 »fixed« scratchpad 2022-12-06 22:39:52 +01:00
Able 8847bfa5c6
remove path 2022-12-06 13:24:04 -06:00
ondra05 76e2bd286b repbuild r uses udisks 2022-12-03 17:47:10 +01:00
Able edbdf9456f
Consolidating limine graphics 2022-12-02 08:51:21 -06:00
Able 9b243410ab
forgor smp is broked 2022-11-30 01:14:34 -06:00
Able 5cb1ef8d65 framebuffer + smp work 2022-11-24 07:20:16 -06:00
Able b2ab5219f5 Ready 2022-11-24 03:35:55 -06:00
ondra05 2ddbb320c4 UNIX fans hate him, he replaced process spawns with IPC!
Doesn't require root permissions now.

TODO: Please replicate BuildImage changes to different places.
2022-11-24 00:22:58 +01:00
Able 0917c0cb02
Revert "get ableOS running proper on limine sans graphics"
This reverts commit 5ea2794aa2.
2022-11-23 06:00:38 -06:00
Able 5ea2794aa2
get ableOS running proper on limine sans graphics 2022-11-23 05:06:38 -06:00
Able af2610c0b0
limine compiling booting and not panicking 2022-11-23 04:02:02 -06:00
TheOddGarlic 1e873874cf progress on porting to limine 2022-08-20 09:28:48 +03:00
TheOddGarlic 13c41340d0 interrupts: page fault handler 2022-08-20 09:26:59 +03:00
TheOddGarlic 6c2fea9dff repbuild: generate disk image with limine 2022-08-19 09:22:05 +03:00
TheOddGarlic f5088d0bd1 build: fix rust-toolchain configuration 2022-08-18 09:32:14 +03:00
102 changed files with 2288 additions and 4597 deletions

View File

@ -1,22 +0,0 @@
name: Build
on:
- push
- pull_request
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
components: rust-src
- uses: actions-rs/cargo@v1
with:
command: repbuild
args: run

4
.gitignore vendored
View File

@ -1,6 +1,8 @@
userland/root_fs/mnt/
target/
.gdb_history
!*/.gitkeep
__pycache__/
debug.log
/disk/
/limine/
/disk.img

1131
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,2 @@
[workspace]
members = [
"ableos",
"asl",
"ext2-rs",
"kernel",
"facepalm",
"repbuild"
]
members = ["kernel", "repbuild"]

46
TODO.md
View File

@ -1,46 +0,0 @@
# AbleOS
## General
- [ ] Improve EXT2
- [ ] Remove x86 specific code and refine the boot process
## Capabilities
A new process should not have any capabilities at all until it is given them or requests them and is approved.
- [ ] Filesystem cap
- [ ] Create a new filesystem
- [ ] Unmount/Mount a filesystem
- [ ] read a file
- [ ] write a file
- [ ] delete a file
- [ ] Network cap
- [ ] open/close socket
- [ ] bind/unbind socket
- [ ] Manage Process cap
- [ ] spawn Process cap
- [ ] kill Process cap
## Riscv
## ARM
- [ ] Get arm-version booting on real hardware
## Drivers
- [ ] Slim down driver specific program code
- [ ] Remove entry/exit functions for drivers
## Filesystem
- [ ] Create a vfs that ties into the capability system
- [ ] Remote home directory
- [ ] local file caching
- [ ] remote file changes
- [ ] Update file if the remote file changes
# Tooling
## Repbuild
- [ ] make generation of the ext2 image possible

View File

@ -8,7 +8,6 @@ build-std-features = ["compiler-builtins-mem"]
[target.'cfg(target_arch = "x86_64")']
rustflags = ["-C", "target-feature=+rdrand"]
runner = "bootimage runner"
[target.riscv64gc-unknown-none-elf]
rustflags = "-C link-arg=-Tableos/src/arch/riscv/virt.lds"

View File

@ -8,52 +8,6 @@ version = "0.1.1"
panic = "abort"
[package.metadata.bootimage]
run-command = [
"qemu-system-x86_64",
"-device",
"piix4-ide,id=ide",
"-drive",
"file={},format=raw,if=none,id=disk",
"-device",
"ide-hd,drive=disk,bus=ide.0"
]
run-args = [
# "--nodefaults",
"-cpu",
"Broadwell-v3",
"-m",
"4G",
"-serial",
"stdio",
"-smp",
"cores=2",
"-soundhw",
"pcspk",
# "-device",
# "VGA",
# "-device",
# "virtio-gpu-pci",
"-device",
"vmware-svga",
"-device",
"sb16",
# "-machine", "pcspk-audiodev=0",
# "-qmp",
# "unix:../qmp-sock,server,nowait",
]
test-args = [
"-device",
"isa-debug-exit,iobase=0xf4,iosize=0x04",
@ -154,7 +108,7 @@ git = "https://git.ablecorp.us:443/able/externc-libm.git"
riscv = "*"
[target.'cfg(target_arch = "x86_64")'.dependencies]
bootloader = { version = "0.9.8", features = ["map_physical_memory"] }
limine = "0.1"
cpuio = { git = "https://git.ablecorp.us/ondra05/cpuio.git" }
pic8259 = "0.10.1"
uart_16550 = "0.2.0"

View File

@ -1,16 +1,6 @@
[boot]
system_processes = []
user_processes = ["shell"]
[logging]
enabled = true
level = "Trace"
log_to_serial = true
log_to_vterm = false
filter = ["ableos::ps2_mouse", "ableos::vterm"]
[tests]
run_tests = false
run_demos = false
run_shader_tests = false
filter = ["ableos::ps2_mouse", "ableos::vterm", "ableos::devices::pci"]

View File

@ -11,5 +11,12 @@
"linker": "rust-lld",
"panic-strategy": "abort",
"disable-redzone": true,
"features": "-mmx,-sse,+soft-float"
"features": "-mmx,-sse,+soft-float",
"code-model": "kernel",
"pre-link-args": {
"ld.lld": [
"--gc-sections",
"--script=kernel/lds/x86_64.ld"
]
}
}

View File

@ -1 +0,0 @@
nightly-2022-01-04

View File

@ -39,6 +39,7 @@ pub fn init() {
use x86_64::instructions::segmentation::{Segment, CS};
use x86_64::instructions::tables::load_tss;
log::debug!("Initialising GDT");
GDT.0.load();
unsafe {
CS::set_reg(GDT.1.code_selector);

View File

@ -1,6 +1,7 @@
// #![allow(clippy::print_literal)]
use super::{gdt, interrupts};
use crate::{logger, serial_println, TERM};
// use crate::{logger, serial_println, TERM};
use crate::{logger, serial_println};
/// x86_64 initialization
pub fn init() {
@ -17,15 +18,21 @@ pub fn init() {
Err(err) => serial_println!("{}", err),
}
let mut term = TERM.lock();
// term.initialize();
term.set_dirty(true);
term.draw_term();
drop(term);
trace!("gdt");
gdt::init();
trace!("idt");
interrupts::init_idt();
unsafe { interrupts::PICS.lock().initialize() };
// trace!("term");
// let term = &*TERM;
// trace!("term.lock()");
// let mut term = term.lock();
// term.initialize();
// term.set_dirty(true);
// term.draw_term();
// drop(term);
x86_64::instructions::interrupts::enable();
}

View File

@ -12,7 +12,10 @@ use pic8259::ChainedPics;
use qrcode::QrCode;
use seq_macro::seq;
use spin::Lazy;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame};
use x86_64::{
registers::control::Cr2,
structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode},
};
use super::sloop;
@ -56,6 +59,7 @@ static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
});
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe {
idt.double_fault
.set_handler_fn(double_fault_handler)
@ -93,13 +97,23 @@ extern "x86-interrupt" fn breakpoint_handler(stack_frame: InterruptStackFrame) {
trace!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn page_fault_handler(
stack_frame: InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
error!("EXCEPTION: PAGE FAULT {error_code:?}\n{:#?}", stack_frame);
trace!("CR2: {:x}", Cr2::read_raw());
trace!("SCREE");
loop {}
}
extern "x86-interrupt" fn double_fault_handler(
stack_frame: InterruptStackFrame,
// NOTE(able): ignore this always is 0
_error_code: u64,
) -> ! {
bsod(BSODSource::DoubleFault(&stack_frame));
// panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
// bsod(BSODSource::DoubleFault(&stack_frame));
panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
}
/* SAFETY

View File

@ -1,7 +1,7 @@
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use limine::{LimineMemmapResponse, LimineMemoryMapEntryType};
use x86_64::{
structures::paging::{
FrameAllocator, FrameDeallocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame,
FrameAllocator, FrameDeallocator, OffsetPageTable, PageTable, PhysFrame,
Size4KiB,
},
PhysAddr, VirtAddr,
@ -25,69 +25,13 @@ unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut
&mut *page_table_ptr
}
fn translate_addr_inner(addr: VirtAddr, physical_memory_offset: VirtAddr) -> Option<PhysAddr> {
use x86_64::registers::control::Cr3;
use x86_64::structures::paging::page_table::FrameError;
let (level_4_table_frame, _) = Cr3::read();
let table_indexes = [
addr.p4_index(),
addr.p3_index(),
addr.p2_index(),
addr.p1_index(),
];
let mut frame = level_4_table_frame;
for &index in &table_indexes {
// convert the frame into a page table reference
let virt = physical_memory_offset + frame.start_address().as_u64();
let table_ptr: *const PageTable = virt.as_ptr();
let table = unsafe { &*table_ptr };
let entry = &table[index];
frame = match entry.frame() {
Ok(frame) => frame,
Err(FrameError::FrameNotPresent) => return None,
Err(FrameError::HugeFrame) => panic!["huge pages not supported"],
};
}
Some(frame.start_address() + u64::from(addr.page_offset()))
}
pub unsafe fn translate_addr(addr: VirtAddr, physical_memory_offset: VirtAddr) -> Option<PhysAddr> {
translate_addr_inner(addr, physical_memory_offset)
}
pub fn create_example_mapping(
page: Page,
mapper: &mut OffsetPageTable,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) {
use x86_64::structures::paging::PageTableFlags as Flags;
let frame = PhysFrame::containing_address(PhysAddr::new(0xb8000));
let flags = Flags::PRESENT | Flags::WRITABLE;
let map_to_result = unsafe { mapper.map_to(page, frame, flags, frame_allocator) };
map_to_result.expect("map_to failed").flush();
}
pub struct EmptyFrameAllocator;
unsafe impl FrameAllocator<Size4KiB> for EmptyFrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
None
}
}
pub struct BootInfoFrameAllocator {
memory_map: &'static MemoryMap,
memory_map: &'static LimineMemmapResponse,
next: usize,
}
impl BootInfoFrameAllocator {
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
pub unsafe fn init(memory_map: &'static LimineMemmapResponse) -> Self {
Self {
memory_map,
next: 0,
@ -95,11 +39,11 @@ impl BootInfoFrameAllocator {
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
let regions = self.memory_map.iter();
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
let addr_range = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
let frame_address = addr_range.flat_map(|r| r.step_by(4096));
frame_address.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
self.memory_map.mmap().unwrap().iter()
.filter(|r| r.typ == LimineMemoryMapEntryType::Usable)
.map(|r| r.base..r.base + r.len)
.flat_map(|r| r.step_by(4096))
.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
}
}

View File

@ -10,27 +10,24 @@ pub mod init;
pub mod interrupts;
pub mod memory;
use crate::arch::drivers::allocator;
use bootloader::{entry_point, BootInfo};
use limine::*;
use x86_64::{instructions::hlt, VirtAddr};
#[cfg(not(test))]
entry_point![start];
use crate::serial_println;
use self::drivers::allocator;
static HHDM: LimineHhdmRequest = LimineHhdmRequest::new(0);
static MMAP: LimineMmapRequest = LimineMmapRequest::new(0);
#[cfg(not(test))]
#[no_mangle]
pub fn start(boot_info: &'static BootInfo) -> ! {
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
pub fn x86_64_start() -> ! {
let hhdm = HHDM.get_response().get().unwrap();
let mmap = MMAP.get_response().get().unwrap();
let phys_mem_offset = VirtAddr::new(hhdm.offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator =
unsafe { memory::BootInfoFrameAllocator::init(&boot_info.memory_map) };
// let page = Page::containing_address(VirtAddr::new(0xdeadbeaf000));
// memory::create_example_mapping(page, &mut mapper, &mut frame_allocator);
//
// let page_ptr: *mut u64 = page.start_address().as_mut_ptr();
// unsafe { page_ptr.offset(400).write_volatile(0xf021_f077_f065_804e) };
let mut frame_allocator = unsafe { memory::BootInfoFrameAllocator::init(mmap) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");

View File

@ -19,16 +19,22 @@ pub enum LogLevel {
#[derive(Serialize, Debug, Deserialize)]
pub struct KernelConfig {
pub boot: BootConfig,
pub logging: LoggingConfig,
pub tests: TestsConfig,
}
impl KernelConfig {
pub fn new() -> Self {
toml::from_str(include_str!("../assets/kernel.toml")).unwrap()
KernelConfig::default()
}
pub fn load_from_string(toml_string: &str) -> Self {
match toml::from_str(toml_string) {
Ok(kernel_conf) => kernel_conf,
Err(err) => {
error!("Error {}", err);
KernelConfig::new()
}
}
}
pub fn log_level(&self) -> LevelFilter {
use LevelFilter::*;
match self.logging.level {
@ -44,7 +50,15 @@ impl KernelConfig {
impl Default for KernelConfig {
fn default() -> Self {
Self::new()
Self {
logging: LoggingConfig {
enabled: true,
log_to_serial: true,
log_to_vterm: false,
level: LogLevel::Trace,
filter: Vec::new(),
},
}
}
}
@ -56,15 +70,3 @@ pub struct LoggingConfig {
pub level: LogLevel,
pub filter: Vec<String>,
}
#[derive(Serialize, Debug, Deserialize)]
pub struct TestsConfig {
pub run_tests: bool,
pub run_demos: bool,
pub run_shader_tests: bool,
}
#[derive(Serialize, Debug, Deserialize)]
pub struct BootConfig {
pub system_processes: Vec<String>,
pub user_processes: Vec<String>,
}

View File

@ -1,4 +1,4 @@
use kernel::device_interface::CharacterDevice;
use crate::device_interface::CharacterDevice;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DevNull;

View File

@ -1,4 +1,4 @@
use kernel::device_interface::CharacterDevice;
use crate::device_interface::CharacterDevice;
#[derive(Debug)]
pub struct DevUnicode {

View File

@ -1,4 +1,4 @@
use kernel::device_interface::CharacterDevice;
use crate::device_interface::CharacterDevice;
#[derive(Debug)]
pub struct DevZero;

View File

@ -2,4 +2,4 @@ pub mod dev_null;
pub mod dev_unicode;
pub mod dev_zero;
pub use kernel::device_interface::CharacterDevice;
pub use crate::device_interface::CharacterDevice;

View File

@ -1,9 +1,9 @@
// ! A virtual terminal device.
use crate::device_interface::CharacterDevice;
use core::ops::Not;
use core::sync::atomic::AtomicU32;
use core::sync::atomic::Ordering;
use kernel::device_interface::CharacterDevice;
use crate::pixel_format::Rgba64;

View File

@ -6,10 +6,10 @@ pub mod pci;
pub use self::Device::*;
use crate::device_interface::{BlockDevice, CharacterDevice};
use crate::devices::dev_vterm::VTerm;
use character_devs::{dev_null::DevNull, dev_unicode::DevUnicode, dev_zero::DevZero};
use hashbrown::HashMap;
use kernel::device_interface::{BlockDevice, CharacterDevice};
use spin::Lazy;
pub static DEVICE_TABLE: Lazy<spin::Mutex<DeviceTable>> =

View File

@ -1,4 +1,4 @@
use kernel::device_interface::CharacterDevice;
use crate::device_interface::CharacterDevice;
pub struct Serial {
pub base: usize,

View File

@ -0,0 +1,24 @@
// TODO: Reorganize and make part of this into a limine agnostic API for general purpose graphics
use crate::kmain::FRAMEBUFFER;
pub fn clear_screen() {
{
// TODO: setup a proper framebuffer handler
let fb_response = FRAMEBUFFER.get_response().get().unwrap();
for fb in fb_response.framebuffers().unwrap() {
trace!("Framebuffer {}x{}", fb.width, fb.height);
trace!("{}", fb.memory_model);
trace!("{}", fb.bpp);
let mut count = 0;
let total = fb.width * fb.height * 3;
while count != total {
unsafe {
let fb_ptr = fb.address.as_mut_ptr().unwrap();
*fb_ptr.offset((count).try_into().unwrap()) = 0x00;
*fb_ptr.offset((count + 1).try_into().unwrap()) = 0x00;
*fb_ptr.offset((count + 2).try_into().unwrap()) = 0x00;
}
count += 3;
}
}
}
}

View File

@ -103,9 +103,9 @@ fn on_complete(mouse_state: MouseState) {
mouse.change_y(delta_y);
x86_64::instructions::interrupts::without_interrupts(|| {
use crate::TERM;
let mut term = TERM.lock();
term.set_dirty(true);
// use crate::TERM;
// let mut term = TERM.lock();
// term.set_dirty(true);
});
// draw_mouse((mouse.get_x() as usize, mouse.get_y() as usize));

View File

@ -12,9 +12,13 @@ use crate::arch::memory::BootInfoFrameAllocator;
use crate::arch::{drivers::sysinfo::master, init, sloop};
use crate::devices::pci;
use crate::relib::network::socket::{SimpleSock, Socket};
use crate::{boot_conf::KernelConfig, scratchpad, systeminfo::RELEASE_TYPE, TERM};
use crate::{filesystem, hardware};
use kernel::KERNEL_VERSION;
use crate::{boot_conf::KernelConfig, scratchpad, systeminfo::RELEASE_TYPE};
// use crate::{boot_conf::KernelConfig, scratchpad, systeminfo::RELEASE_TYPE, TERM};
use crate::{filesystem, graphics_limine, hardware};
use kernel::VERSION;
use limine::LimineSmpInfo;
use limine::{LimineFramebufferRequest, LimineSmpRequest};
use spin::Lazy;
// FIXME: platform agnostic paging stuff
@ -22,6 +26,8 @@ use x86_64::structures::paging::{Mapper, Size4KiB};
// TODO: Change this structure to allow for multiple cores loaded
pub static KERNEL_CONF: Lazy<KernelConfig> = Lazy::new(KernelConfig::new);
pub static FRAMEBUFFER: LimineFramebufferRequest = LimineFramebufferRequest::new(0);
pub static SMP: LimineSmpRequest = LimineSmpRequest::new(0);
/// The main entry point of the kernel
pub fn kernel_main(
@ -30,14 +36,20 @@ pub fn kernel_main(
) -> ! {
init::init();
// /*
{
// TODO: Setup config loaded from disk
let mut kptr = KERNEL_CONF.as_mut_ptr();
unsafe {
*kptr = KernelConfig::new();
}
}
if KERNEL_CONF.logging.enabled {
log::set_max_level(KERNEL_CONF.log_level());
// println!("{:#?}", *KERNEL_CONF);
} else {
log::set_max_level(log::LevelFilter::Off);
}
// */
// let mut term = TERM.lock();
// term.initialize();
// term.set_dirty(true);
@ -82,46 +94,52 @@ pub fn kernel_main(
/*
log_version_data();
// */
{
// TODO: setup a proper framebuffer handler
let fb_response = FRAMEBUFFER.get_response().get().unwrap();
for fb in fb_response.framebuffers().unwrap() {
trace!("Framebuffer {}x{}", fb.width, fb.height);
trace!("{}", fb.memory_model);
trace!("{}", fb.bpp);
let mut count = 0;
let total = fb.width * fb.height * 3;
while count != total {
unsafe {
let fb_ptr = fb.address.as_mut_ptr().unwrap();
*fb_ptr.offset((count).try_into().unwrap()) = 0xff;
*fb_ptr.offset((count + 1).try_into().unwrap()) = 0xff;
*fb_ptr.offset((count + 2).try_into().unwrap()) = 0xff;
*fb_ptr.offset((count + 3).try_into().unwrap()) = 0x00;
*fb_ptr.offset((count + 4).try_into().unwrap()) = 0x00;
*fb_ptr.offset((count + 5).try_into().unwrap()) = 0x00;
// *fb_ptr.offset((count + 6).try_into().unwrap()) = 0x00;
}
count += 6;
}
}
}
// // SMP
// {
// let smp = SMP.get_response().get().unwrap();
// for cpu in smp.cpus().unwrap() {
// // unsafe {
// // cpu.goto_address = *(trace_hcf as *const u64);
// // }
// }
// }
graphics_limine::clear_screen();
scratchpad();
sloop()
}
pub fn traceloop() {
// TODO: Having an empty function double faults
// let mut last_time = 0.0;
/*
loop {
// FIXME: the following double faults
/*
let time = fetch_time();
if time > last_time {
last_time = time;
trace!("Timer");
}
*/
}
*/
/* TODO: This also double faults
let fs = &*crate::filesystem::FILE_SYSTEM.lock();
let path = format!("/home/able/bins/aos_test.wasm");
let home_exec_file = fs.open(&path.as_bytes(), OpenOptions::new().read(true));
drop(fs);
let mut binary_prog: Vec<u8> = vec![];
match home_exec_file {
Ok(file) => {
let ret = file.read_to_end(&mut binary_prog).unwrap();
}
_ => {}
}
wasm_jumploader::run_program(&binary_prog);
*/
extern "C" fn trace_hcf(info: *const LimineSmpInfo) -> ! {
trace!("CPU BOOT");
loop {}
}
pub fn cpu_socket_startup() {
@ -133,7 +151,7 @@ pub fn cpu_socket_startup() {
}
pub fn log_version_data() {
info!("{} v{}", RELEASE_TYPE, KERNEL_VERSION);
info!("{} v{}", RELEASE_TYPE, VERSION);
info!(
"Brand String: {}",
master().unwrap().brand_string().unwrap()
@ -144,9 +162,9 @@ pub static TICK: AtomicU64 = AtomicU64::new(0);
pub fn tick() {
x86_64::instructions::interrupts::without_interrupts(|| {
let mut term = TERM.lock();
// let mut term = TERM.lock();
term.draw_term();
// term.draw_term();
use core::sync::atomic::Ordering::Relaxed;
let mut data = TICK.load(Relaxed);

View File

@ -57,6 +57,7 @@ pub mod print;
pub mod serial_print;
pub mod boot_conf;
pub mod device_interface;
pub mod devices;
pub mod driver_traits;
pub mod experiments;
@ -81,6 +82,7 @@ pub mod wasm_jumploader;
pub mod allocator;
// pub use allocator as aalloc;
pub mod graphics_limine;
pub mod handle;
pub mod hardware;
pub mod ipc;
@ -97,7 +99,6 @@ pub use driver_traits::*;
pub use experiments::*;
pub use graphics::*;
pub use kernel;
pub use kernel::messaging;
// pub use kernel::panic;
pub use kernel_state::*;
pub use keyboard::*;

View File

@ -23,7 +23,7 @@ impl core::fmt::Write for Stdout {
fn write_str(&mut self, s: &str) -> Result<(), Error> {
// use mini_backtrace::Backtrace;
use crate::TERM;
// use crate::TERM;
// Capture up to 16 frames. This is returned using an ArrayVec that doesn't
// perform any dynamic memory allocation.
@ -38,14 +38,15 @@ impl core::fmt::Write for Stdout {
}
*/
// trace!("printing");
trace!("printing");
trace!("PRINT: {}", s);
// x86_64::instructions::interrupts::without_interrupts(|| {
let mut term = TERM.lock();
// let mut term = TERM.lock();
term.set_dirty(true);
term.print(s);
// term.set_dirty(true);
// term.print(s);
drop(term);
// drop(term);
// });
// trace!("Finished printing");
Ok(())

View File

@ -43,26 +43,19 @@ impl acpi::AcpiHandler for AcpiStruct {
}
}
pub static TERM: Lazy<spin::Mutex<VTerm>> = Lazy::new(|| spin::Mutex::new(VTerm::new()));
// pub static TERM: Lazy<spin::Mutex<VTerm>> = Lazy::new(|| {
// trace!("mutex");
#[derive(Debug)]
pub struct Path {
pub path: Vec<String>,
}
impl Path {
pub fn new(path: String) -> Self {
let mut path_vec_string = vec![];
for part in path.split(&['\\', '/'][..]) {
path_vec_string.push(part.to_string());
}
Path {
path: path_vec_string,
}
}
}
// loop {}
// let mutex = spin::Mutex::new({
// trace!("vterm");
// let vterm = VTerm::new();
// trace!("vterm-done");
// vterm
// });
// trace!("mutex-done");
// mutex
// });
/// Experimental scratchpad for testing.
pub fn scratchpad() {
@ -133,7 +126,9 @@ pub fn scratchpad() {
let mut pci_ide_device = pci_ide_device.lock();
if let PciDevice::Ide(device) = &mut *pci_ide_device {
let mut first_sector = Vec::with_capacity(512);
device.read(Channel::Primary, Drive::Master, 0, 1, &mut first_sector).unwrap();
device
.read(Channel::Primary, Drive::Master, 0, 1, &mut first_sector)
.unwrap();
trace!("IDE Primary/Master sector 0: {first_sector:?}");
}
}
@ -212,88 +207,7 @@ pub fn real_shell() {
}
pub fn command_parser(user: String, command: String) {
let mut iter = command.split_whitespace();
let current_path = Path::new("/home/able".to_string());
trace!("Current path: {:?}", current_path);
let current_path = "/home/able/";
let bin_name = iter.next().unwrap();
let mut strin = String::new();
for stri in iter.clone() {
trace!("{}", stri);
strin.push_str(stri);
}
let conf_args;
match clparse::Arguments::parse_from_string(strin) {
Ok(ok) => conf_args = ok,
Err(err) => {
println!("ERROR: {}", err);
error!("{}", err);
return;
}
};
match bin_name {
// note: able asked for rhaish to stay in the repo but will be removed
// in the future so just comment it out for now
// "rhai" => {
// shell();
// }
"list" | "ls" => {
let mut vfs = VFS.lock();
let handle = vfs.resolve(current_path).unwrap();
let dir = vfs.fs_node(handle).unwrap();
drop(vfs);
for dir_entry in dir.directory().unwrap() {
println!("{}", dir_entry.name());
}
}
"echo" => match conf_args.1.arguments.get("p") {
Some(path) => echo_file(path.to_string()),
None => println!("No path provided"),
},
"test" => {}
"quit" => shutdown(),
"tree" => filesystem::tree("/").unwrap(),
_ => {
let file = {
let mut vfs = VFS.lock();
let path = format!("/home/{user}/bins/{bin_name}.wasm");
let handle = if let Ok(file) = vfs.resolve(path) {
file
} else {
let path = format!("/shared/bins/{bin_name}.wasm");
if let Ok(file) = vfs.resolve(path) {
file
} else {
let path = format!("/system/bins/{bin_name}.wasm");
match vfs.resolve(path) {
Ok(file) => file,
Err(error) => {
trace!("{:?}", error);
println!("No such binary: {}", bin_name);
error!("No such binary: {}", bin_name);
return;
}
}
}
};
vfs.fs_node(handle).unwrap()
};
let mut binary = vec![];
file.read(0, file.size(), &mut binary).unwrap();
let args = iter.collect::<Vec<&str>>();
println!("{:?}", args);
run_program(&binary);
}
}
unimplemented!()
}
pub fn sound(n_frequency: u32) {

View File

@ -1,7 +1,7 @@
use {
crate::device_interface::CharacterDevice,
crate::devices::Device::{Block, Character, Vterm},
core::fmt::{Arguments, Error, Write},
kernel::device_interface::CharacterDevice,
};
#[derive(Debug, Clone)]

View File

@ -66,5 +66,3 @@ pub fn test_kernel_main(boot_info: &'static BootInfo) -> ! {
loop {}
}
use bootloader::{entry_point, BootInfo};
use crate::test_main;

View File

@ -27,6 +27,7 @@ pub struct VTerm {
impl VTerm {
/// Construct a new VTerm
pub fn new() -> Self {
trace!("Setting vga mode");
let mode = VGAE.lock();
mode.set_mode();
// let fb = mode.get_frame_buffer();

View File

@ -1,12 +0,0 @@
file {
val=
name: "Hi"
extension: "txt"
size: 123
fn|
open: (None)->()
read: (Num)->(String)
write: (Num, String)->(Bool)
close: (None)->(Bool)
}

View File

@ -1,12 +0,0 @@
[package]
name = "asl"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[dependencies.logos]
version = "0.12.1"
default-features = false

View File

@ -1,10 +0,0 @@
DefinitionBlock ("test.aml", "DSDT", 1, "OEMID ", "TABLEID ", 0x00000000)
{
Scope (_SB)
{
Device (PCI0)
{
Name (_HID, EisaId ("PNP0A03"))
}
}
}

View File

@ -1,35 +0,0 @@
use logos::{Lexer, Logos};
#[derive(Logos, Debug, Clone, Copy, PartialEq)]
enum Token {
#[regex(r"[ \t\n\f]+", logos::skip)]
#[error]
Error,
#[regex("[1-9]+", num_parser)]
Num(isize),
}
fn num_parser(lex: &mut Lexer<Token>) -> isize {
let slice = lex.slice();
let num_str: String = slice.into();
let num = num_str.parse::<isize>();
num.unwrap()
}
#[test]
pub fn num_test() {
let mut lex = Token::lexer("5 42 75");
assert_eq!(lex.next(), Some(Token::Num(5)));
assert_eq!(lex.next(), Some(Token::Num(42)));
assert_eq!(lex.next(), Some(Token::Num(75)));
}
#[test]
pub fn asl_simple_test() {
let lex = Token::lexer(include_str!("../assets/asl/asl_simple.asl"));
for token in lex {
// println!("{:?}", token);
assert_ne!(Token::Error, token);
}
}

4
base/README.md Normal file
View File

@ -0,0 +1,4 @@
# Base Root Filesystem
This is the base root filesystem for ableOS. It's used by tepbuild while
building the disk image.

BIN
base/boot/initrd.tar Normal file

Binary file not shown.

7
base/boot/kernel.toml Normal file
View File

@ -0,0 +1,7 @@
[logging]
enabled = true
level = "Trace"
log_to_serial = true
log_to_vterm = false
filter = ["ableos::ps2_mouse", "ableos::vterm", "ableos::devices::pci"]

22
base/boot/limine.cfg Normal file
View File

@ -0,0 +1,22 @@
${ABLEOS_KERNEL}=boot:///boot/kernel
# TODO: Make a boot background image for ableOS
# ${WALLPAPER_PATH}=boot:///boot/bg.bmp
DEFAULT_ENTRY=1
TIMEOUT=3
VERBOSE=yes
INTERFACE_RESOLUTION=800x600
# Terminal related settings
# TERM_WALLPAPER=${WALLPAPER_PATH}
TERM_BACKDROP=008080
:ableOS
COMMENT=Default ableOS boot entry.
PROTOCOL=limine
KERNEL_PATH=${ABLEOS_KERNEL}
KERNEL_CMDLINE=
# Setting a default resolution for the framebuffer
RESOLUTION=800x600x24
MODULE_PATH=boot:///boot/initrd.tar
MODULE_CMDLINE=This is the first module.

BIN
base/home/able/bins/aos_test.wasm Executable file

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,252 @@
# Able doesn't have a full keyboard
0-NONE
1-
2-
3-BACKSPACE
4-
5-
6-
7-
8-
9-TAB
10-
11-
12-
13-ENTER
14-
15-
16-SHIFT
17-CONTROL
18-ALT
19-PAUSE
20-CAPS_LOCK
21-
22-
23-
24-
25-
26-
27-
28-
29-
30-
31-
32-SPACE
33-PAGE_UP
34-PAGE_DOWN
35-END
36-HOME
37-ARROW_LEFT
38-ARROW_UP
39-ARROW_RIGHT
40-ARROW_DOWN
41-
42-
43-
44-
45-INSERT
46-DELETE
47-
48-0
49-1
50-2
51-3
52-4
53-5
54-6
55-7
56-8
57-9
58-
59-SEMICOLON
60-
61-EQUAL
62-
63-
64-
65-a
66-b
67-c
68-d
69-e
70-f
71-g
72-h
73-i
74-j
75-k
76-l
77-m
78-n
79-o
80-p
81-q
82-r
83-s
84-t
85-u
86-v
87-w
88-x
89-y
90-z
91-
92-
93-
94-
95-
96-
97-
98-
99-
100-
101-
102-
103-
106-
107-
108-
109-
110-
111-
112-FUNCTION_1
113-FUNCTION_2
114-FUNCTION_3
115-FUNCTION_4
116-FUNCTION_5
117-FUNCTION_6
118-FUNCTION_7
119-FUNCTION_8
120-FUNCTION_9
121-FUNCTION_10
122-FUNCTION_11
123-FUNCTION_12
124-
125-
126-
127-
128-
129-
130-
131-
132-
134-
135-
136-
137-
138-
139-
140-
141-
142-
143-
145-SCROLL_LOCK
146-
147-
148-
149-
150-
151-
152-
153-
154-
155-
156-
157-
158-
159-
160-
161-
162-
163-
164-
165-
166-
167-
168-
169-
170-
171-
172-
173-MINUS
174-
175-
176-
177-
178-
179-
180-
181-
182-
183-
184-
185-
186-
187-
188-COMMA
189-
190-PERIOD
191-FORWARD_SLASH
192-GRAVE
193-
194-
195-
196-
197-
198-
199-
200-
201-
202-
203-
204-
205-
206-
207-
208-
209-
210-
211-
212-
213-
214-
215-
216-
218-
219-BRACKET_LEFT
220-BACK_SLASH
221-BRACKET_RIGHT
222-QUOTE
223-
224-
225-
226-
227-
228-
229-
230-
231-
232-
233-
234-
235-
236-
237-
238-
239-
240-
241-
242-
243-
244-
245-
246-
247-
248-
249-
250-
251-
252-
253-
254-
255-

14
base/home/able/layout.txt Normal file
View File

@ -0,0 +1,14 @@
boot/
├─ kernel.img
home/
├─ able/
│ ├─ bins/
│ ├─ config/
│ │ ├─ able_edit/
│ │ │ ├─ config.toml
│ ├─ irl_pic.png
│ ├─ password.txt
system/
├─ bins/
├─ configs/
│ ├─ kernel.toml

View File

@ -1,16 +0,0 @@
[package]
name = "ext2"
version = "0.1.1"
authors = ["Szymon Walter <walter.szymon.98@gmail.com>",
"able <abl3theabove@gmail.com>"]
[dependencies]
bitflags = "1.0"
rlibc = { version = "1.0", optional = true }
spin = "0.9.2"
genfs = "^0.1.4"
[features]
default = ["no_std"]
no_std = ["rlibc"]

View File

@ -1,22 +0,0 @@
# ext2-rs
## an ext2 implementation
Copyright © 2018, Szymon Walter
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from
the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not
be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
#### walter.szymon.98@gmail.com

View File

@ -1,3 +0,0 @@
# ext2-rs
An OS and architecture independent implementation of ext2 in pure Rust.

View File

@ -1,2 +0,0 @@
max_width = 80
wrap_comments = true

View File

@ -1,124 +0,0 @@
//! Errors
use {
alloc::string::String,
core::fmt::{self, Display},
};
#[cfg(any(test, not(feature = "no_std")))]
use std::io;
/// The set of all possible errors
#[derive(Debug)]
pub enum Error {
/// Generic error
Other(String),
/// Bad magic number
BadMagic {
/// The magic number
magic: u16,
},
/// Out of bounds error
OutOfBounds {
/// index
index: usize,
},
/// Address out of bounds
AddressOutOfBounds {
///
sector: u32,
///
offset: u32,
///
size: usize,
},
/// Bad block group count
BadBlockGroupCount {
///
by_blocks: u32,
///
by_inodes: u32,
},
/// Inode Not Found
InodeNotFound {
/// inode number
inode: u32,
},
/// Inode is not a directory
NotADirectory {
/// inode number
inode: u32,
/// inode name
name: String,
},
/// Not Absolute Path
NotAbsolute {
/// path name
name: String,
},
/// Not Found
NotFound {
/// inode name
name: String,
},
// #[cfg(any(test, not(feature = "no_std")))]
// Io { inner: io::Error },
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Other(ref msg) => write!(f, "{}", msg),
Error::BadMagic {
magic,
} => write!(f, "invalid magic value: {}", magic),
Error::OutOfBounds {
index,
} => write!(f, "index ouf of bounds: {}", index),
Error::AddressOutOfBounds {
sector,
offset,
size,
} => write!(f, "address ouf of bounds: {}:{} with a block size of: {}",
sector, offset, size),
Error::BadBlockGroupCount {
by_blocks,
by_inodes,
} => write!(f, "conflicting block group count data; by blocks: {}, by inodes: {}", by_blocks, by_inodes),
Error::InodeNotFound {
inode,
} => write!(f, "couldn't find inode no. {}", &inode),
Error::NotADirectory {
inode,
ref name,
} => write!(f, "inode no. {} at: {} is not a directory", inode, &name),
Error::NotAbsolute {
ref name,
} => write!(f, "{} is not an absolute path", &name),
Error::NotFound {
ref name,
} => write!(f, "couldn't find {}", &name),
#[cfg(any(test, not(feature = "no_std")))]
Error::Io {
ref inner,
} => write!(f, "io error: {}", inner),
}
}
}
impl From<Infallible> for Error {
fn from(_: Infallible) -> Error {
unreachable!()
}
}
#[cfg(any(test, not(feature = "no_std")))]
impl From<io::Error> for Error {
fn from(inner: io::Error) -> Error {
Error::Io { inner }
}
}
/// Infalliable
pub enum Infallible {}

View File

@ -1,177 +0,0 @@
//!
use {
alloc::vec::Vec,
core::mem,
error::Error,
sector::{Address, SectorSize},
sys::{
block_group::BlockGroupDescriptor, inode::Inode as RawInode,
superblock::Superblock,
},
volume::Volume,
};
pub mod sync;
#[allow(dead_code)]
pub(crate) struct Struct<T, S: SectorSize> {
pub inner: T,
pub offset: Address<S>,
}
impl<T, S: SectorSize> From<(T, Address<S>)> for Struct<T, S> {
#[inline]
fn from((inner, offset): (T, Address<S>)) -> Struct<T, S> {
Struct { inner, offset }
}
}
/// Safe wrapper for raw sys structs
pub struct Ext2<S: SectorSize, V: Volume<u8, S>> {
// TODO: should this have some different vis?
pub(crate) volume: V,
pub(crate) superblock: Struct<Superblock, S>,
pub(crate) block_groups: Struct<Vec<BlockGroupDescriptor>, S>,
}
impl<S: SectorSize, V: Volume<u8, S>> Ext2<S, V> {
///
pub fn new(volume: V) -> Result<Ext2<S, V>, Error> {
let superblock = unsafe { Struct::from(Superblock::find(&volume)?) };
let block_groups_offset = Address::with_block_size(
superblock.inner.first_data_block + 1,
0,
superblock.inner.log_block_size + 10,
);
let block_groups_count = superblock
.inner
.block_group_count()
.map(|count| count as usize)
.map_err(|(a, b)| Error::BadBlockGroupCount {
by_blocks: a,
by_inodes: b,
})?;
let block_groups = unsafe {
BlockGroupDescriptor::find_descriptor_table(
&volume,
block_groups_offset,
block_groups_count,
)?
};
let block_groups = Struct::from(block_groups);
Ok(Ext2 {
volume,
superblock,
block_groups,
})
}
/// Return the version of the filesystem
pub fn version(&self) -> (u32, u16) {
(
self.superblock.inner.rev_major,
self.superblock.inner.rev_minor,
)
}
/// Return inode size
pub fn inode_size(&self) -> usize {
if self.version().0 == 0 {
mem::size_of::<RawInode>()
} else {
// note: inodes bigger than 128 are not supported
self.superblock.inner.inode_size as usize
}
}
///
pub fn inodes_count(&self) -> usize {
self.superblock.inner.inodes_per_group as _
}
///
pub fn total_inodes_count(&self) -> usize {
self.superblock.inner.inodes_count as _
}
///
pub fn block_group_count(&self) -> Result<usize, Error> {
self.superblock
.inner
.block_group_count()
.map(|count| count as usize)
.map_err(|(a, b)| Error::BadBlockGroupCount {
by_blocks: a,
by_inodes: b,
})
}
///
pub fn total_block_count(&self) -> usize {
self.superblock.inner.blocks_count as _
}
///
pub fn free_block_count(&self) -> usize {
self.superblock.inner.free_blocks_count as _
}
///
pub fn block_size(&self) -> usize {
self.superblock.inner.block_size()
}
///
pub fn log_block_size(&self) -> u32 {
self.superblock.inner.log_block_size + 10
}
///
pub fn sector_size(&self) -> usize {
S::SIZE
}
///
pub fn log_sector_size(&self) -> u32 {
S::LOG_SIZE
}
}
#[cfg(test)]
mod tests {
use std::cell::RefCell;
use std::fs::File;
use sector::{Address, Size512};
use volume::Volume;
use super::Ext2;
#[test]
fn file_len() {
let file = RefCell::new(File::open("ext2.img").unwrap());
assert_eq!(
Address::<Size512>::from(2048_u64)
- Address::<Size512>::from(1024_u64),
Address::<Size512>::new(2, 0)
);
assert_eq!(
unsafe {
file.slice_unchecked(
Address::<Size512>::from(1024_u64)
..Address::<Size512>::from(2048_u64),
)
.len()
},
1024
);
}
#[test]
fn file() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Ext2::<Size512, _>::new(file);
assert!(
fs.is_ok(),
"Err({:?})",
fs.err().unwrap_or_else(|| unreachable!()),
);
let fs = fs.unwrap();
let vers = fs.version();
println!("version: {}.{}", vers.0, vers.1);
assert_eq!(128, fs.inode_size());
}
}

View File

@ -1,905 +0,0 @@
//!
use crate::sys::inode::TypePerm;
use {
super::Ext2,
alloc::{
sync::Arc,
{string::String, vec::Vec},
},
core::{
fmt::{self, Debug},
iter::Iterator,
num::NonZeroU32,
},
error::Error,
genfs::*,
sector::{Address, SectorSize},
spin::{Mutex, MutexGuard},
sys::inode::Inode as RawInode,
volume::Volume,
};
/// DOCME: what is this?
pub struct Synced<T> {
inner: Arc<Mutex<T>>,
}
impl<T> Synced<T> {
/// DOCME: what is this?
pub fn with_inner(inner: T) -> Synced<T> {
Synced {
inner: Arc::new(Mutex::new(inner)),
}
}
/// DOCME: what is this?
pub fn inner<'a>(&'a self) -> MutexGuard<'a, T> {
self.inner.lock()
}
}
impl<T> Clone for Synced<T> {
fn clone(&self) -> Self {
Synced {
inner: self.inner.clone(),
}
}
}
impl<S: SectorSize, V: Volume<u8, S>> Synced<Ext2<S, V>> {
/// DOCME: what is this?
pub fn new(volume: V) -> Result<Synced<Ext2<S, V>>, Error> {
Ext2::new(volume).map(Synced::with_inner)
}
/// Get the root inode.
pub fn root_inode(&self) -> Inode<S, V> {
self.inode_nth(2).unwrap()
}
/// Get the inode at the given index.
pub fn inode_nth(&self, index: usize) -> Option<Inode<S, V>> {
self.inodes_nth(index).next()
}
/// DOCME: what is this?
pub fn inodes(&self) -> Inodes<S, V> {
self.inodes_nth(1)
}
/// DOCME: what is this?
pub fn inodes_nth(&self, index: usize) -> Inodes<S, V> {
assert!(index > 0, "inodes are 1-indexed");
let inner = self.inner();
Inodes {
fs: self.clone(),
log_block_size: inner.log_block_size(),
inode_size: inner.inode_size(),
inodes_per_group: inner.inodes_count(),
inodes_count: inner.total_inodes_count(),
index,
}
}
/// DOCME: what is this?
pub fn sector_size(&self) -> usize {
S::SIZE
}
/// DOCME: what is this?
pub fn log_sector_size(&self) -> u32 {
S::LOG_SIZE
}
}
impl<S: SectorSize, V: Volume<u8, S>> Fs for Synced<Ext2<S, V>> {
type Path = [u8];
type PathOwned = Vec<u8>;
type File = Inode<S, V>;
type Dir = Directory<S, V>;
type DirEntry = DirectoryEntry;
type Metadata = (); // TODO
type Permissions = (); // TODO
type Error = Error;
fn open(
&self,
abs_path: &Self::Path,
_options: &OpenOptions<Self::Permissions>,
) -> Result<Self::File, Self::Error> {
fn inner<'a, S, V, I>(
fs: &Synced<Ext2<S, V>>,
inode: Inode<S, V>,
mut path: I,
abs_path: &[u8],
) -> Result<Inode<S, V>, Error>
where
S: SectorSize,
V: Volume<u8, S>,
I: Iterator<Item = &'a [u8]>,
{
let name = match path.next() {
Some(name) => name,
None => return Ok(inode),
};
let mut dir =
inode.directory().ok_or_else(|| Error::NotADirectory {
inode: inode.num,
name: String::from_utf8_lossy(abs_path).into_owned(),
})?;
let entry = dir
.find(|entry| {
entry.is_err() || entry.as_ref().unwrap().name == name
})
.ok_or_else(|| Error::NotFound {
name: String::from_utf8_lossy(abs_path).into_owned(),
})??;
let inode = fs
.inode_nth(entry.inode)
.ok_or(Error::InodeNotFound { inode: inode.num })?;
inner(fs, inode, path, abs_path)
}
if abs_path.is_empty() || abs_path[0] != b'/' {
return Err(Error::NotAbsolute {
name: String::from_utf8_lossy(abs_path).into_owned(),
});
}
if abs_path == b"/" {
return Ok(self.root_inode());
}
let mut path = abs_path.split(|byte| *byte == b'/');
path.next();
let root = self.root_inode();
inner(self, root, path, abs_path)
}
fn remove_file(&mut self, _path: &Self::Path) -> Result<(), Self::Error> {
unimplemented!()
}
fn metadata(
&self,
_path: &Self::Path,
) -> Result<Self::Metadata, Self::Error> {
unimplemented!()
}
fn symlink_metadata(
&self,
_path: &Self::Path,
) -> Result<Self::Metadata, Self::Error> {
unimplemented!()
}
fn rename(
&mut self,
_from: &Self::Path,
_to: &Self::Path,
) -> Result<(), Self::Error> {
unimplemented!()
}
fn copy(
&mut self,
_from: &Self::Path,
_to: &Self::Path,
) -> Result<u64, Self::Error> {
unimplemented!()
}
fn hard_link(
&mut self,
_src: &Self::Path,
_dst: &Self::Path,
) -> Result<(), Self::Error> {
unimplemented!()
}
fn symlink(
&mut self,
_src: &Self::Path,
_dst: &Self::Path,
) -> Result<(), Self::Error> {
unimplemented!()
}
fn read_link(
&self,
_path: &Self::Path,
) -> Result<Self::PathOwned, Self::Error> {
unimplemented!()
}
fn canonicalize(
&self,
_path: &Self::Path,
) -> Result<Self::PathOwned, Self::Error> {
unimplemented!()
}
fn create_dir(
&mut self,
_path: &Self::Path,
_options: &DirOptions<Self::Permissions>,
) -> Result<(), Self::Error> {
unimplemented!()
}
fn remove_dir(&mut self, _path: &Self::Path) -> Result<(), Self::Error> {
unimplemented!()
}
fn remove_dir_all(
&mut self,
_path: &Self::Path,
) -> Result<(), Self::Error> {
unimplemented!()
}
fn read_dir(&self, path: &Self::Path) -> Result<Self::Dir, Self::Error> {
let inode = self.open(path, OpenOptions::new().read(true))?;
inode.directory().ok_or(Error::NotADirectory {
inode: inode.num,
name: String::from_utf8_lossy(path).into_owned(),
})
}
fn set_permissions(
&mut self,
_path: &Self::Path,
_perm: Self::Permissions,
) -> Result<(), Self::Error> {
unimplemented!()
}
}
impl<S: SectorSize, V: Volume<u8, S>> Debug for Synced<Ext2<S, V>> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Synced<Ext2<{}>>", S::SIZE)
}
}
#[derive(Debug, Clone)]
/// A collection of inodes.
pub struct Inodes<S: SectorSize, V: Volume<u8, S>> {
fs: Synced<Ext2<S, V>>,
log_block_size: u32,
inode_size: usize,
inodes_per_group: usize,
inodes_count: usize,
index: usize,
}
impl<S: SectorSize, V: Volume<u8, S>> Iterator for Inodes<S, V> {
type Item = Inode<S, V>;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.inodes_count {
let block_group = (self.index - 1) / self.inodes_per_group;
let index = (self.index - 1) % self.inodes_per_group;
self.index += 1;
let fs = self.fs.inner();
let inodes_block =
fs.block_groups.inner[block_group].inode_table_block;
let offset = Address::with_block_size(
inodes_block,
(index * self.inode_size) as i32,
self.log_block_size,
);
let raw = unsafe {
RawInode::find_inode(&fs.volume, offset, self.inode_size).ok()
};
raw.map(|(raw, offset)| {
Inode::new(
self.fs.clone(),
raw,
offset,
(self.index - 1) as u32,
)
})
} else {
None
}
}
}
#[derive(Debug)]
/// A single inode in an ext2 filesystem.
pub struct Inode<S: SectorSize, V: Volume<u8, S>> {
fs: Synced<Ext2<S, V>>,
inner: RawInode,
addr: Address<S>,
num: u32,
}
impl<S: SectorSize, V: Volume<u8, S>> Clone for Inode<S, V> {
fn clone(&self) -> Self {
Inode {
fs: self.fs.clone(),
inner: self.inner,
addr: self.addr,
num: self.num,
}
}
}
impl<S: SectorSize, V: Volume<u8, S>> Inode<S, V> {
///
pub fn new(
fs: Synced<Ext2<S, V>>,
inner: RawInode,
addr: Address<S>,
num: u32,
) -> Inode<S, V> {
Inode {
fs,
inner,
addr,
num,
}
}
/// Read to the end of a buffer.
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> Result<usize, Error> {
let total_size = self.size();
let capacity = buf.capacity();
if capacity < total_size {
buf.reserve_exact(total_size - capacity);
}
unsafe {
buf.set_len(total_size);
}
let size = self.read(&mut buf[..]);
size.map(|size| {
unsafe {
buf.set_len(size);
}
size
})
.map_err(|err| {
unsafe {
buf.set_len(0);
}
err
})
}
/// Return blocks on a sector
pub fn blocks(&self) -> InodeBlocks<S, V> {
InodeBlocks {
inode: self.clone(),
index: 0,
}
}
/// return a directory iterator
pub fn directory(&self) -> Option<Directory<S, V>> {
if self.is_dir() {
Some(Directory {
blocks: self.blocks(),
offset: 0,
buffer: None,
block_size: {
let fs = self.fs.inner();
fs.block_size()
},
})
} else {
None
}
}
/// Determine if an inode is a directory
pub fn is_dir(&self) -> bool {
{ self.inner.type_perm }.contains(TypePerm::DIRECTORY)
// self.inner.type_perm.contains(TypePerm::DIRECTORY)
}
///
pub fn block(&self, index: usize) -> Option<NonZeroU32> {
self.try_block(index).ok().and_then(|block| block)
}
/// Try to get a block
pub fn try_block(
&self,
mut index: usize,
) -> Result<Option<NonZeroU32>, Error> {
// number of blocks in direct table: 12
// number of blocks in indirect table: block_size/4
// why?
// - a block is n bytes long
// - a block address occupies 32 bits, or 4 bytes
// - thus, n/4
// number of blocks in doubly table: (block_size/4)^2
// why?
// - every entry in the doubly table points to another block
// - that's n/4 blocks, where n is the block size
// - every block contains n/4 block pointers
// - that's n/4 blocks with n/4 pointers each = (n/4)^2
// number of blocks in triply table: (block_size/4)^3
fn block_index<S: SectorSize, V: Volume<u8, S>>(
volume: &V,
block: u32,
index: usize,
log_block_size: u32,
) -> Result<Option<NonZeroU32>, Error> {
let offset = (index * 4) as i32;
let end = offset + 4;
let addr = Address::with_block_size(block, offset, log_block_size);
let end = Address::with_block_size(block, end, log_block_size);
let block = volume.slice(addr..end);
match block {
Ok(block) => unsafe {
Ok(NonZeroU32::new(block.dynamic_cast::<u32>().0))
},
Err(err) => Err(err.into()),
}
}
let fs = self.fs.inner();
let bs4 = fs.block_size() / 4;
let log_block_size = fs.log_block_size();
if index < 12 {
return Ok(NonZeroU32::new(self.inner.direct_pointer[index]));
}
index -= 12;
if index < bs4 {
let block = self.inner.indirect_pointer;
return block_index(&fs.volume, block, index, log_block_size);
}
index -= bs4;
if index < bs4 * bs4 {
let indirect_index = index >> (log_block_size + 2);
let block = match block_index(
&fs.volume,
self.inner.doubly_indirect,
indirect_index,
log_block_size,
) {
Ok(Some(block)) => block.get(),
Ok(None) => return Ok(None),
Err(err) => return Err(err),
};
return block_index(
&fs.volume,
block,
index & (bs4 - 1),
log_block_size,
);
}
index -= bs4 * bs4;
if index < bs4 * bs4 * bs4 {
let doubly_index = index >> (2 * log_block_size + 4);
let indirect = match block_index(
&fs.volume,
self.inner.triply_indirect,
doubly_index,
log_block_size,
) {
Ok(Some(block)) => block.get(),
Ok(None) => return Ok(None),
Err(err) => return Err(err),
};
let indirect_index = (index >> (log_block_size + 2)) & (bs4 - 1);
let block = match block_index(
&fs.volume,
indirect as u32,
indirect_index,
log_block_size,
) {
Ok(Some(block)) => block.get(),
Ok(None) => return Ok(None),
Err(err) => return Err(err),
};
return block_index(
&fs.volume,
block,
index & (bs4 - 1),
log_block_size,
);
}
Ok(None)
}
///
pub fn in_use(&self) -> bool {
self.inner.hard_links > 0
}
/// return the uid
pub fn uid(&self) -> u16 {
self.inner.uid
}
///
pub fn sectors(&self) -> usize {
self.inner.sectors_count as usize
}
///
pub fn size32(&self) -> u32 {
self.inner.size_low
}
///
pub fn size64(&self) -> u64 {
self.inner.size_low as u64 | (self.inner.size_high as u64) << 32
}
///
#[cfg(target_pointer_width = "64")]
#[inline]
pub fn size(&self) -> usize {
self.size64() as usize
}
#[cfg(target_pointer_width = "32")]
#[inline]
pub fn size(&self) -> usize {
self.size32() as usize
}
/// ableOS: expose type_perm
pub fn type_perm(&self) -> TypePerm {
self.inner.type_perm
}
}
impl<S: SectorSize, V: Volume<u8, S>> File for Inode<S, V> {
type Error = Error;
fn read(&self, buf: &mut [u8]) -> Result<usize, Error> {
let total_size = self.size();
let block_size = {
let fs = self.fs.inner();
fs.block_size()
};
let mut offset = 0;
for block in self.blocks() {
match block {
Ok((data, _)) => {
let data_size = block_size
.min(total_size - offset)
.min(buf.len() - offset);
let end = offset + data_size;
buf[offset..end].copy_from_slice(&data[..data_size]);
offset += data_size;
}
Err(err) => return Err(err),
}
}
Ok(offset)
}
fn write(&mut self, _buf: &[u8]) -> Result<usize, Self::Error> {
unimplemented!()
}
fn flush(&mut self) -> Result<(), Self::Error> {
unimplemented!()
}
fn seek(&mut self, _pos: SeekFrom) -> Result<u64, Self::Error> {
unimplemented!()
}
}
///
#[derive(Debug, Clone)]
pub struct InodeBlocks<S: SectorSize, V: Volume<u8, S>> {
inode: Inode<S, V>,
index: usize,
}
impl<S: SectorSize, V: Volume<u8, S>> Iterator for InodeBlocks<S, V> {
type Item = Result<(Vec<u8>, Address<S>), Error>;
fn next(&mut self) -> Option<Self::Item> {
let block = self.inode.try_block(self.index);
let block = match block {
Ok(Some(ok)) => ok,
Ok(None) => return None,
Err(err) => return Some(Err(err)),
};
self.index += 1;
let fs = self.inode.fs.inner();
let block = block.get();
let log_block_size = fs.log_block_size();
let offset = Address::with_block_size(block, 0, log_block_size);
let end = Address::with_block_size(block + 1, 0, log_block_size);
let slice = fs
.volume
.slice(offset..end)
.map(|slice| (slice.to_vec(), offset))
.map_err(|err| err.into());
Some(slice)
}
}
#[derive(Debug, Clone)]
/// A directory structure
pub struct Directory<S: SectorSize, V: Volume<u8, S>> {
blocks: InodeBlocks<S, V>,
offset: usize,
buffer: Option<Vec<u8>>,
block_size: usize,
}
impl<S: SectorSize, V: Volume<u8, S>> Dir<DirectoryEntry, Error>
for Directory<S, V>
{
}
impl<S: SectorSize, V: Volume<u8, S>> Iterator for Directory<S, V> {
type Item = Result<DirectoryEntry, Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.buffer.is_none() || self.offset >= self.block_size {
self.buffer = match self.blocks.next() {
None => return None,
Some(Ok((block, _))) => Some(block),
Some(Err(err)) => return Some(Err(err)),
};
self.offset = 0;
}
let buffer = &self.buffer.as_ref().unwrap()[self.offset..];
let inode = buffer[0] as u32
| (buffer[1] as u32) << 8
| (buffer[2] as u32) << 16
| (buffer[3] as u32) << 24;
if inode == 0 {
return None;
}
let size = buffer[4] as u16 | (buffer[5] as u16) << 8;
let len = buffer[6];
let ty = buffer[7];
let name = buffer[8..8 + len as usize].to_vec();
self.offset += size as usize;
Some(Ok(DirectoryEntry {
name,
inode: inode as usize,
ty,
}))
}
}
#[derive(Clone)]
/// A directory entry
pub struct DirectoryEntry {
/// The name of the entry
pub name: Vec<u8>,
/// The inode of the entry
pub inode: usize,
///
pub ty: u8,
}
impl DirEntry for DirectoryEntry {
type Path = [u8];
type PathOwned = Vec<u8>;
type Metadata = (); // TODO
type FileType = u8; // TODO: enum FileType
type Error = Error;
fn path(&self) -> Self::PathOwned {
unimplemented!()
}
fn metadata(&self) -> Result<Self::Metadata, Self::Error> {
unimplemented!()
}
fn file_type(&self) -> Result<Self::FileType, Self::Error> {
Ok(self.ty)
}
fn file_name(&self) -> &Self::Path {
&self.name
}
}
impl DirectoryEntry {
/// Turns a filename into a string for display
pub fn file_name_string(&self) -> String {
let mut filename = String::new();
for ch in &self.name {
filename.push(*ch as char);
}
filename
}
}
#[cfg(test)]
mod tests {
use std::cell::RefCell;
use std::fs::File;
use genfs::{File as GenFile, Fs, OpenOptions};
use sector::{SectorSize, Size512};
use volume::Volume;
use super::{Ext2, Inode, Synced};
#[test]
fn file() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file);
assert!(
fs.is_ok(),
"Err({:?})",
fs.err().unwrap_or_else(|| unreachable!()),
);
let fs = fs.unwrap();
let inner = fs.inner();
let vers = inner.version();
println!("version: {}.{}", vers.0, vers.1);
assert_eq!(128, inner.inode_size());
}
#[test]
fn inodes() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file);
assert!(
fs.is_ok(),
"Err({:?})",
fs.err().unwrap_or_else(|| unreachable!()),
);
let fs = fs.unwrap();
let inodes = fs.inodes().filter(|inode| inode.in_use());
for inode in inodes {
println!("{:?}", inode);
}
}
#[test]
fn inode_blocks() {
use std::str;
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file).unwrap();
let inodes = fs.inodes().filter(|inode| {
inode.in_use() && inode.uid() == 1000 && inode.size() < 1024
});
for inode in inodes {
println!("{:?}", inode);
let size = inode.size();
for block in inode.blocks() {
let (data, _) = block.unwrap();
assert_eq!(data.len(), {
let fs = fs.inner();
fs.block_size()
});
println!("{:?}", &data[..size]);
let _ = str::from_utf8(&data[..size])
.map(|string| println!("{}", string));
}
}
}
#[test]
fn read_inode() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file).unwrap();
let inodes = fs.inodes().filter(|inode| {
inode.in_use() && inode.uid() == 1000 && inode.size() < 1024
});
for inode in inodes {
let mut buf = Vec::with_capacity(inode.size());
unsafe {
buf.set_len(inode.size());
}
let size = inode.read(&mut buf[..]);
assert!(size.is_ok());
let size = size.unwrap();
assert_eq!(size, inode.size());
unsafe {
buf.set_len(size);
}
}
}
#[test]
fn read_big() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file).unwrap();
let inodes = fs.inodes().filter(|inode| {
inode.in_use() && inode.uid() == 1000 && inode.size() == 537600
});
for inode in inodes {
let mut buf = Vec::with_capacity(inode.size());
unsafe {
buf.set_len(inode.size());
}
let size = inode.read(&mut buf[..]);
assert!(size.is_ok());
let size = size.unwrap();
assert_eq!(size, inode.size());
unsafe {
buf.set_len(size);
}
for (i, &x) in buf.iter().enumerate() {
if i & 1 == 0 {
assert_eq!(x, b'u', "{}", i);
} else {
assert_eq!(x, b'\n', "{}", i);
}
}
}
}
#[test]
fn walkdir() {
use std::str;
fn walk<'vol, S: SectorSize, V: Volume<u8, S>>(
fs: &'vol Synced<Ext2<S, V>>,
inode: Inode<S, V>,
name: String,
) {
inode.directory().map(|dir| {
for entry in dir {
assert!(entry.is_ok());
let entry = entry.unwrap();
let entry_name = str::from_utf8(&entry.name).unwrap_or("?");
println!("{}/{} => {}", name, entry_name, entry.inode,);
if entry_name != "." && entry_name != ".." {
walk(
fs,
fs.inode_nth(entry.inode).unwrap(),
format!("{}/{}", name, entry_name),
);
}
}
});
}
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file).unwrap();
let root = fs.root_inode();
walk(&fs, root, String::new());
}
#[test]
fn find() {
use std::str;
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Synced::<Ext2<Size512, _>>::new(file).unwrap();
let found = fs.open(b"/home/funky/README.md", &OpenOptions::new());
assert!(found.is_ok());
let inode = found.unwrap();
let mut vec = Vec::new();
assert!(inode.read_to_end(&mut vec).is_ok());
println!("{}", str::from_utf8(&vec).unwrap());
}
}

View File

@ -1,36 +0,0 @@
//! Ext2 crate for ableOS
#![deny(missing_docs)]
#![feature(min_specialization, step_trait, associated_type_defaults)]
#![cfg_attr(all(not(test), feature = "no_std"), no_std)]
extern crate alloc;
#[macro_use]
extern crate bitflags;
extern crate genfs;
extern crate spin;
#[cfg(any(test, not(feature = "no_std")))]
extern crate core;
pub mod error;
pub mod fs;
pub mod sector;
pub mod sys;
pub mod volume;
#[cfg(test)]
mod tests {
use sys::block_group::*;
use sys::inode::*;
use sys::superblock::*;
#[test]
fn sizes() {
use std::mem::size_of;
assert_eq!(size_of::<Superblock>(), 1024);
assert_eq!(size_of::<BlockGroupDescriptor>(), 32);
assert_eq!(size_of::<Inode>(), 128);
}
}

View File

@ -1,242 +0,0 @@
//! Sector data.
use core::{
fmt::{self, Debug, Display, LowerHex},
iter::Step,
marker::PhantomData,
ops::{Add, Sub},
};
/// Size of a sector in bytes
pub trait SectorSize: Clone + Copy + PartialEq + PartialOrd + 'static {
/// DOCME: What is this?
const LOG_SIZE: u32;
/// DOCME: What is this?
const SIZE: usize = 1 << Self::LOG_SIZE;
/// DOCME: What is this?
const OFFSET_MASK: u32 = (Self::SIZE - 1) as u32;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
/// DOCME: What is this?
pub struct Size512;
impl SectorSize for Size512 {
const LOG_SIZE: u32 = 9;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
/// DOCME: What is this?
pub struct Size1024;
impl SectorSize for Size1024 {
const LOG_SIZE: u32 = 10;
}
/// DOCME: What is this?
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Size2048;
impl SectorSize for Size2048 {
const LOG_SIZE: u32 = 11;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
/// DOCME: What is this?
pub struct Size4096;
impl SectorSize for Size4096 {
const LOG_SIZE: u32 = 12;
}
/// Address in a physical sector
#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Address<S: SectorSize> {
sector: u32,
offset: u32,
_phantom: PhantomData<S>,
}
impl<S: SectorSize> Address<S> {
///
pub unsafe fn new_unchecked(sector: u32, offset: u32) -> Address<S> {
assert!((offset as usize) < S::SIZE, "offset out of sector bounds");
let _phantom = PhantomData;
Address {
sector,
offset,
_phantom,
}
}
///
pub fn new(sector: u32, offset: i32) -> Address<S> {
let sector = (sector as i32 + (offset >> S::LOG_SIZE)) as u32;
let offset = offset.unsigned_abs() & S::OFFSET_MASK;
unsafe { Address::new_unchecked(sector, offset) }
}
///
pub fn with_block_size(
block: u32,
offset: i32,
log_block_size: u32,
) -> Address<S> {
let block = (block as i32 + (offset >> log_block_size)) as u32;
let offset = offset.unsigned_abs() & ((1 << log_block_size) - 1);
let log_diff = log_block_size as i32 - S::LOG_SIZE as i32;
let top_offset = offset >> S::LOG_SIZE;
let offset = offset & ((1 << S::LOG_SIZE) - 1);
let sector = block << log_diff | top_offset;
unsafe { Address::new_unchecked(sector, offset) }
}
///
pub fn into_index(&self) -> u64 {
((self.sector as u64) << S::LOG_SIZE) + self.offset as u64
}
/// Get the size of the sector
pub const fn sector_size(&self) -> usize {
S::SIZE
}
/// DOCME: What is this?
pub const fn log_sector_size(&self) -> u32 {
S::LOG_SIZE
}
/// Return the sector number
pub fn sector(&self) -> u32 {
self.sector
}
/// Return the offset in the sector
pub fn offset(&self) -> u32 {
self.offset
}
}
impl<S: SectorSize> Step for Address<S> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if end.sector >= start.sector {
Some(end.sector as usize - start.sector as usize)
} else {
None
}
}
/*
fn replace_one(&mut self) -> Self {
mem::replace(self, Address::new(1, 0))
}
fn replace_zero(&mut self) -> Self {
mem::replace(self, Address::new(0, 0))
}
fn add_one(&self) -> Self {
Address::new(self.sector + 1, 0)
}
fn sub_one(&self) -> Self {
Address::new(self.sector - 1, 0)
}
fn add_usize(&self, n: usize) -> Option<Self> {
self.sector
.checked_add(n as u32)
.map(|sector| Address::new(sector, 0))
}
*/
fn forward_checked(_start: Self, count: usize) -> Option<Self> {
todo!("forward_checked: count: {}", count);
}
fn backward_checked(_start: Self, count: usize) -> Option<Self> {
todo!("backward_checked count: {}", count);
}
}
impl<S: SectorSize> Display for Address<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.sector, self.offset)
}
}
impl<S: SectorSize> LowerHex for Address<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:x}:{:x}", self.sector, self.offset)
}
}
impl<S: SectorSize> From<u64> for Address<S> {
fn from(idx: u64) -> Address<S> {
let sector = idx >> S::LOG_SIZE;
let offset = idx & S::OFFSET_MASK as u64;
Address::new(sector as u32, offset as i32)
}
}
impl<S: SectorSize> From<usize> for Address<S> {
fn from(idx: usize) -> Address<S> {
let sector = idx >> S::LOG_SIZE;
let offset = idx & S::OFFSET_MASK as usize;
Address::new(sector as u32, offset as i32)
}
}
impl<S: SectorSize> Add for Address<S> {
type Output = Address<S>;
fn add(self, rhs: Address<S>) -> Address<S> {
Address::new(
self.sector + rhs.sector,
(self.offset + rhs.offset) as i32,
)
}
}
impl<S: SectorSize> Sub for Address<S> {
type Output = Address<S>;
fn sub(self, rhs: Address<S>) -> Address<S> {
Address::new(
self.sector - rhs.sector,
self.offset as i32 - rhs.offset as i32,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn conv() {
assert_eq!(Address::<Size512>::new(0, 1024).into_index(), 1024);
assert_eq!(Address::<Size512>::from(1024_u64).into_index(), 1024);
assert_eq!(
Address::<Size512>::with_block_size(1, 256, 10).into_index(),
1024 + 256
);
assert_eq!(
Address::<Size512>::with_block_size(2, 0, 10).into_index(),
2048
);
assert_eq!(
Address::<Size512>::with_block_size(0, 1792, 10).into_index(),
1792
);
}
#[test]
fn arithmetic() {
assert_eq!(
Address::<Size512>::new(0, 512),
Address::<Size512>::new(1, 0),
);
assert_eq!(
Address::<Size512>::new(2, -256),
Address::<Size512>::new(1, 256),
);
let a = Address::<Size2048>::new(0, 1024);
let b = Address::<Size2048>::new(0, 1024);
assert_eq!(a + b, Address::<Size2048>::new(1, 0));
assert_eq!((a + b).into_index(), 2048);
let a = Address::<Size512>::new(0, 2048);
let b = Address::<Size512>::new(0, 256);
assert_eq!(a - b, Address::<Size512>::new(3, 256));
assert_eq!((a - b).into_index(), 1792);
}
}

View File

@ -1,116 +0,0 @@
//!
use {
alloc::vec::Vec,
core::{fmt::Debug, mem},
error::Error,
sector::{Address, SectorSize},
volume::Volume,
};
/// The Block Group Descriptor Table contains a descriptor for each block group
/// within the file system. The number of block groups within the file system,
/// and correspondingly, the number of entries in the Block Group Descriptor
/// Table, is described above. Each descriptor contains information regarding
/// where important data structures for that group are located.
///
/// The (`BlockGroupDescriptor`) table is located in the block immediately
/// following the Superblock. So if the block size (determined from a field in
/// the superblock) is 1024 bytes per block, the Block Group Descriptor Table
/// will begin at block 2. For any other block size, it will begin at block 1.
/// Remember that blocks are numbered starting at 0, and that block numbers
/// don't usually correspond to physical block addresses.
#[repr(C, packed)]
#[derive(Clone, Debug, Copy)]
pub struct BlockGroupDescriptor {
/// Block address of block usage bitmap
pub block_usage_addr: u32,
/// Block address of inode usage bitmap
pub inode_usage_addr: u32,
/// Starting block address of inode table
pub inode_table_block: u32,
/// Number of unallocated blocks in group
pub free_blocks_count: u16,
/// Number of unallocated inodes in group
pub free_inodes_count: u16,
/// Number of directories in group
pub dirs_count: u16,
#[doc(hidden)]
_reserved: [u8; 14],
}
impl BlockGroupDescriptor {
/// Find a descriptor in a descriptor table
pub unsafe fn find_descriptor<S: SectorSize, V: Volume<u8, S>>(
haystack: &V,
offset: Address<S>,
) -> Result<(BlockGroupDescriptor, Address<S>), Error> {
let end =
offset + Address::from(mem::size_of::<BlockGroupDescriptor>());
if haystack.size() < end {
return Err(Error::AddressOutOfBounds {
sector: end.sector(),
offset: end.offset(),
size: end.sector_size(),
});
}
let descr = haystack
.slice_unchecked(offset..end)
.dynamic_cast::<BlockGroupDescriptor>();
Ok(descr)
}
/// find a descriptor table
pub unsafe fn find_descriptor_table<S: SectorSize, V: Volume<u8, S>>(
haystack: &V,
offset: Address<S>,
count: usize,
) -> Result<(Vec<BlockGroupDescriptor>, Address<S>), Error> {
let end = offset
+ Address::from(count * mem::size_of::<BlockGroupDescriptor>());
if haystack.size() < end {
return Err(Error::AddressOutOfBounds {
sector: end.sector(),
offset: end.offset(),
size: end.sector_size(),
});
}
let mut vec = Vec::with_capacity(count);
for i in 0..count {
let offset = offset
+ Address::from(i * mem::size_of::<BlockGroupDescriptor>());
vec.push({
BlockGroupDescriptor::find_descriptor(haystack, offset)?.0
});
}
Ok((vec, offset))
}
}
#[cfg(test)]
mod tests {
use super::*;
use sector::{Address, Size512};
#[test]
fn find() {
let volume = vec![0_u8; 4096];
let table = unsafe {
BlockGroupDescriptor::find_descriptor_table(
&volume,
Address::<Size512>::new(4, 0),
8,
)
};
assert!(
table.is_ok(),
"Err({:?})",
table.err().unwrap_or_else(|| unreachable!()),
);
let table = table.unwrap_or_else(|_| unreachable!());
assert_eq!(table.0.len(), 8);
}
}

View File

@ -1,188 +0,0 @@
//!
use {
core::{fmt::Debug, mem},
error::Error,
sector::{Address, SectorSize},
volume::Volume,
};
/// An inode is a structure on the disk that represents a file, directory,
/// symbolic link, etc. Inodes do not contain the data of the file / directory /
/// etc. that they represent. Instead, they link to the blocks that actually
/// contain the data. This lets the inodes themselves have a well-defined size
/// which lets them be placed in easily indexed arrays. Each block group has an
/// array of inodes it is responsible for, and conversely every inode within a
/// file system belongs to one of such tables (and one of such block groups).
#[repr(C, packed)]
#[derive(Clone, Debug, Copy)]
pub struct Inode {
/// Type and Permissions (see below)
pub type_perm: TypePerm,
/// User ID
pub uid: u16,
/// Lower 32 bits of size in bytes
pub size_low: u32,
/// Last Access Time (in POSIX time)
pub atime: u32,
/// Creation Time (in POSIX time)
pub ctime: u32,
/// Last Modification time (in POSIX time)
pub mtime: u32,
/// Deletion time (in POSIX time)
pub dtime: u32,
/// Group ID
pub gid: u16,
/// Count of hard links (directory entries) to this inode. When this
/// reaches 0, the data blocks are marked as unallocated.
pub hard_links: u16,
/// Count of disk sectors (not Ext2 blocks) in use by this inode, not
/// counting the actual inode structure nor directory entries linking
/// to the inode.
pub sectors_count: u32,
/// Flags
pub flags: Flags,
/// Operating System Specific value #1
pub _os_specific_1: [u8; 4],
/// Direct block pointers
pub direct_pointer: [u32; 12],
/// Singly Indirect Block Pointer (Points to a block that is a list of
/// block pointers to data)
pub indirect_pointer: u32,
/// Doubly Indirect Block Pointer (Points to a block that is a list of
/// block pointers to Singly Indirect Blocks)
pub doubly_indirect: u32,
/// Triply Indirect Block Pointer (Points to a block that is a list of
/// block pointers to Doubly Indirect Blocks)
pub triply_indirect: u32,
/// Generation number (Primarily used for NFS)
pub gen_number: u32,
/// In Ext2 version 0, this field is reserved. In version >= 1,
/// Extended attribute block (File ACL).
pub ext_attribute_block: u32,
/// In Ext2 version 0, this field is reserved. In version >= 1, Upper
/// 32 bits of file size (if feature bit set) if it's a file,
/// Directory ACL if it's a directory
pub size_high: u32,
/// Block address of fragment
pub frag_block_addr: u32,
/// Operating System Specific Value #2
pub _os_specific_2: [u8; 12],
}
impl Inode {
/// Discover the inode location on the disk.
pub unsafe fn find_inode<S: SectorSize, V: Volume<u8, S>>(
haystack: &V,
offset: Address<S>,
size: usize,
) -> Result<(Inode, Address<S>), Error> {
if size != mem::size_of::<Inode>() {
unimplemented!("inodes with a size != 128");
}
let end = offset + Address::from(size);
if haystack.size() < end {
return Err(Error::AddressOutOfBounds {
sector: end.sector(),
offset: end.offset(),
size: end.sector_size(),
});
}
let inode = haystack
.slice_unchecked(offset..end)
.dynamic_cast::<Inode>();
Ok(inode)
}
}
bitflags! {
///
// #[derive(Copy)]
pub struct TypePerm: u16 {
/// FIFO
const FIFO = 0x1000;
/// Character device
const CHAR_DEVICE = 0x2000;
/// Directory
const DIRECTORY = 0x4000;
/// Block device
const BLOCK_DEVICE = 0x6000;
/// Regular file
const FILE = 0x8000;
/// Symbolic link
const SYMLINK = 0xA000;
/// Unix socket
const SOCKET = 0xC000;
/// Other—execute permission
const O_EXEC = 0x001;
/// Other—write permission
const O_WRITE = 0x002;
/// Other—read permission
const O_READ = 0x004;
/// Group—execute permission
const G_EXEC = 0x008;
/// Group—write permission
const G_WRITE = 0x010;
/// Group—read permission
const G_READ = 0x020;
/// User—execute permission
const U_EXEC = 0x040;
/// User—write permission
const U_WRITE = 0x080;
/// User—read permission
const U_READ = 0x100;
/// Sticky Bit
const STICKY = 0x200;
/// Set group ID
const SET_GID = 0x400;
/// Set user ID
const SET_UID = 0x800;
}
}
bitflags! {
/// Flags
pub struct Flags: u32 {
/// Secure deletion (not used)
const SECURE_DEL = 0x00000001;
/// Keep a copy of data when deleted (not used)
const KEEP_COPY = 0x00000002;
/// File compression (not used)
const COMPRESSION = 0x00000004;
/// Synchronous updates—new data is written immediately to disk
const SYNC_UPDATE = 0x00000008;
/// Immutable file (content cannot be changed)
const IMMUTABLE = 0x00000010;
/// Append only
const APPEND_ONLY = 0x00000020;
/// File is not included in 'dump' command
const NODUMP = 0x00000040;
/// Last accessed time should not updated
const DONT_ATIME = 0x00000080;
/// Hash indexed directory
const HASH_DIR = 0x00010000;
/// AFS directory
const AFS_DIR = 0x00020000;
/// Journal file data
const JOURNAL_DATA = 0x00040000;
}
}
/// Unknown entry type
pub const UNKNOWN: u8 = 0;
/// FIFO entry type
pub const FIFO: u8 = 1;
/// Character device entry type
pub const CHAR_DEVICE: u8 = 2;
/// Directory entry type
pub const DIRECTORY: u8 = 3;
/// Block device entry type
pub const BLOCK_DEVICE: u8 = 4;
/// Regular file entry type
pub const FILE: u8 = 5;
/// Symbolic link entry type
pub const SYMLINK: u8 = 6;
/// Unix socket entry type
pub const SOCKET: u8 = 7;

View File

@ -1,5 +0,0 @@
//!
pub mod block_group;
pub mod inode;
pub mod superblock;

View File

@ -1,284 +0,0 @@
//! Superblock information
use {
core::{fmt::Debug, mem},
error::Error,
sector::{Address, SectorSize},
volume::Volume,
};
/// Ext2 signature (0xef53), used to help confirm the presence of Ext2 on a
/// volume
pub const EXT2_MAGIC: u16 = 0xef53;
/// Filesystem is free of errors
pub const FS_CLEAN: u16 = 1;
/// Filesystem has errors
pub const FS_ERR: u16 = 2;
/// Ignore errors
pub const ERR_IGNORE: u16 = 1;
/// Remount as read-only on error
pub const ERR_RONLY: u16 = 2;
/// Panic on error
pub const ERR_PANIC: u16 = 3;
/// Creator OS is Linux
pub const OS_LINUX: u32 = 0;
/// Creator OS is Hurd
pub const OS_HURD: u32 = 1;
/// Creator OS is Masix
pub const OS_MASIX: u32 = 2;
/// Creator OS is FreeBSD
pub const OS_FREEBSD: u32 = 3;
/// Creator OS is a BSD4.4-Lite derivative
pub const OS_LITE: u32 = 4;
/// The Superblock contains all information about the layout of the file system
/// and possibly contains other important information like what optional
/// features were used to create the file system.
///
/// The Superblock is always located at byte 1024 from the beginning of the
/// volume and is exactly 1024 bytes in length. For example, if the disk uses
/// 512 byte sectors, the Superblock will begin at LBA 2 and will occupy all of
/// sector 2 and 3.
#[repr(C, packed)]
#[derive(Clone, Debug, Copy)]
pub struct Superblock {
// taken from https://wiki.osdev.org/Ext2
/// Total number of inodes in file system
pub inodes_count: u32,
/// Total number of blocks in file system
pub blocks_count: u32,
/// Number of blocks reserved for superuser (see offset 80)
pub r_blocks_count: u32,
/// Total number of unallocated blocks
pub free_blocks_count: u32,
/// Total number of unallocated inodes
pub free_inodes_count: u32,
/// Block number of the block containing the superblock
pub first_data_block: u32,
/// log2 (block size) - 10. (In other words, the number to shift 1,024
/// to the left by to obtain the block size)
pub log_block_size: u32,
/// log2 (fragment size) - 10. (In other words, the number to shift
/// 1,024 to the left by to obtain the fragment size)
pub log_frag_size: i32,
/// Number of blocks in each block group
pub blocks_per_group: u32,
/// Number of fragments in each block group
pub frags_per_group: u32,
/// Number of inodes in each block group
pub inodes_per_group: u32,
/// Last mount time (in POSIX time)
pub mtime: u32,
/// Last written time (in POSIX time)
pub wtime: u32,
/// Number of times the volume has been mounted since its last
/// consistency check (fsck)
pub mnt_count: u16,
/// Number of mounts allowed before a consistency check (fsck) must be
/// done
pub max_mnt_count: i16,
/// Ext2 signature (0xef53), used to help confirm the presence of Ext2
/// on a volume
pub magic: u16,
/// File system state (see `FS_CLEAN` and `FS_ERR`)
pub state: u16,
/// What to do when an error is detected (see `ERR_IGNORE`, `ERR_RONLY` and
/// `ERR_PANIC`)
pub errors: u16,
/// Minor portion of version (combine with Major portion below to
/// construct full version field)
pub rev_minor: u16,
/// POSIX time of last consistency check (fsck)
pub lastcheck: u32,
/// Interval (in POSIX time) between forced consistency checks (fsck)
pub checkinterval: u32,
/// Operating system ID from which the filesystem on this volume was
/// created
pub creator_os: u32,
/// Major portion of version (combine with Minor portion above to
/// construct full version field)
pub rev_major: u32,
/// User ID that can use reserved blocks
pub block_uid: u16,
/// Group ID that can use reserved blocks
pub block_gid: u16,
/// First non-reserved inode in file system.
pub first_inode: u32,
/// SectorSize of each inode structure in bytes.
pub inode_size: u16,
/// Block group that this superblock is part of (if backup copy)
pub block_group: u16,
/// Optional features present (features that are not required to read
/// or write, but usually result in a performance increase)
pub features_opt: FeaturesOptional,
/// Required features present (features that are required to be
/// supported to read or write)
pub features_req: FeaturesRequired,
/// Features that if not supported, the volume must be mounted
/// read-only)
pub features_ronly: FeaturesROnly,
/// File system ID (what is output by blkid)
pub fs_id: [u8; 16],
/// Volume name (C-style string: characters terminated by a 0 byte)
pub volume_name: [u8; 16],
/// Path volume was last mounted to (C-style string: characters
/// terminated by a 0 byte)
pub last_mnt_path: [u8; 64],
/// Compression algorithms used (see Required features above)
pub compression: u32,
/// Number of blocks to preallocate for files
pub prealloc_blocks_files: u8,
/// Number of blocks to preallocate for directories
pub prealloc_blocks_dirs: u8,
#[doc(hidden)]
_unused: [u8; 2],
/// Journal ID (same style as the File system ID above)
pub journal_id: [u8; 16],
/// Journal inode
pub journal_inode: u32,
/// Journal device
pub journal_dev: u32,
/// Head of orphan inode list
pub journal_orphan_head: u32,
#[doc(hidden)]
_reserved: [u8; 788],
}
impl Superblock {
/// Discover the location of the superblock in the given block device.
pub unsafe fn find<S: SectorSize, V: Volume<u8, S>>(
haystack: &V,
) -> Result<(Superblock, Address<S>), Error> {
let offset = Address::from(1024_usize);
let end = offset + Address::from(mem::size_of::<Superblock>());
if haystack.size() < end {
return Err(Error::AddressOutOfBounds {
sector: end.sector(),
offset: end.offset(),
size: end.sector_size(),
});
}
let superblock = {
haystack
.slice_unchecked(offset..end)
.dynamic_cast::<Superblock>()
};
if superblock.0.magic != EXT2_MAGIC {
Err(Error::BadMagic {
magic: superblock.0.magic,
})
} else {
Ok(superblock)
}
}
#[inline]
/// Return the block size
pub fn block_size(&self) -> usize {
1024 << self.log_block_size
}
#[inline]
/// Return the fragment size
pub fn frag_size(&self) -> usize {
1024 << self.log_frag_size
}
/// Return the number of blocks per group
pub fn block_group_count(&self) -> Result<u32, (u32, u32)> {
let blocks_mod = self.blocks_count % self.blocks_per_group;
let inodes_mod = self.inodes_count % self.inodes_per_group;
let blocks_inc = if blocks_mod == 0 { 0 } else { 1 };
let inodes_inc = if inodes_mod == 0 { 0 } else { 1 };
let by_blocks = self.blocks_count / self.blocks_per_group + blocks_inc;
let by_inodes = self.inodes_count / self.inodes_per_group + inodes_inc;
if by_blocks == by_inodes {
Ok(by_blocks)
} else {
Err((by_blocks, by_inodes))
}
}
}
bitflags! {
/// Optional features
pub struct FeaturesOptional: u32 {
/// Preallocate some number of (contiguous?) blocks (see
/// `Superblock::prealloc_blocks_dirs`) to a directory when creating a new one
const PREALLOCATE = 0x0001;
/// AFS server inodes exist
const AFS = 0x0002;
/// File system has a journal (Ext3)
const JOURNAL = 0x0004;
/// Inodes have extended attributes
const EXTENDED_INODE = 0x0008;
/// File system can resize itself for larger partitions
const SELF_RESIZE = 0x0010;
/// Directories use hash index
const HASH_INDEX = 0x0020;
}
}
bitflags! {
/// Required features. If these are not supported; can't mount
pub struct FeaturesRequired: u32 {
/// Compression is used
const REQ_COMPRESSION = 0x0001;
/// Directory entries contain a type field
const REQ_DIRECTORY_TYPE = 0x0002;
/// File system needs to replay its journal
const REQ_REPLAY_JOURNAL = 0x0004;
/// File system uses a journal device
const REQ_JOURNAL_DEVICE = 0x0008;
}
}
bitflags! {
/// ROnly features. If these are not supported; remount as read-only
pub struct FeaturesROnly: u32 {
/// Sparse superblocks and group descriptor tables
const RONLY_SPARSE = 0x0001;
/// File system uses a 64-bit file size
const RONLY_FILE_SIZE_64 = 0x0002;
/// Directory contents are stored in the form of a Binary Tree
const RONLY_BTREE_DIRECTORY = 0x0004;
}
}
#[cfg(test)]
mod tests {
use super::*;
use sector::Size512;
#[test]
fn find() {
let mut volume = vec![0_u8; 4096];
// magic
volume[1024 + 56] = EXT2_MAGIC as u8;
volume[1024 + 57] = (EXT2_MAGIC >> 8) as u8;
let superblock = unsafe { Superblock::find::<Size512, _>(&volume) };
assert!(
superblock.is_ok(),
"Err({:?})",
superblock.err().unwrap_or_else(|| unreachable!()),
);
}
#[test]
fn superblock() {
use std::cell::RefCell;
use std::fs::File;
let file = RefCell::new(File::open("ext2.img").unwrap());
let superblock = unsafe { Superblock::find::<Size512, _>(&file) };
assert!(
superblock.is_ok(),
"Err({:?})",
superblock.err().unwrap_or_else(|| unreachable!()),
);
}
}

View File

@ -1,371 +0,0 @@
#![allow(missing_docs)]
use {
alloc::{
borrow::{Cow, ToOwned},
boxed::Box,
vec::Vec,
},
core::{
mem,
ops::{Deref, DerefMut, Range},
slice,
},
error::Error,
sector::{Address, SectorSize},
};
pub mod size;
use self::size::Size;
pub trait Volume<T: Clone, S: SectorSize> {
type Error: Into<Error>;
fn size(&self) -> Size<S>;
fn commit(
&mut self,
slice: Option<VolumeCommit<T, S>>,
) -> Result<(), Self::Error>;
unsafe fn slice_unchecked<'a>(
&'a self,
range: Range<Address<S>>,
) -> VolumeSlice<'a, T, S>;
fn slice<'a>(
&'a self,
range: Range<Address<S>>,
) -> Result<VolumeSlice<'a, T, S>, Self::Error>;
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct VolumeSlice<'a, T: 'a + Clone, S: SectorSize> {
inner: Cow<'a, [T]>,
index: Address<S>,
}
impl<T: Clone, S: SectorSize> VolumeSlice<'static, T, S> {
pub fn with_static(inner: &'static [T]) -> VolumeSlice<'static, T, S> {
VolumeSlice {
inner: Cow::Borrowed(inner),
index: Address::new(0, 0),
}
}
pub fn new_owned(
inner: <[T] as ToOwned>::Owned,
index: Address<S>,
) -> VolumeSlice<'static, T, S> {
VolumeSlice {
inner: Cow::Owned(inner),
index,
}
}
}
impl<'a, T: Clone, S: SectorSize> VolumeSlice<'a, T, S> {
pub fn new(inner: &'a [T], index: Address<S>) -> VolumeSlice<'a, T, S> {
VolumeSlice {
inner: Cow::Borrowed(inner),
index,
}
}
pub fn is_mutated(&self) -> bool {
match self.inner {
Cow::Borrowed(_) => false,
Cow::Owned(_) => true,
}
}
pub fn address(&self) -> Address<S> {
self.index
}
}
impl<'a, S: SectorSize> VolumeSlice<'a, u8, S> {
pub unsafe fn dynamic_cast<T: Copy>(&self) -> (T, Address<S>) {
assert!(self.inner.len() >= mem::size_of::<T>());
let index = self.index;
let cast = self.inner.as_ptr().cast::<T>().read_unaligned();
// mem::transmute_copy(self.inner.as_ptr().as_ref().unwrap());
(cast, index)
}
pub fn from_cast<T: Copy>(
cast: &'a T,
index: Address<S>,
) -> VolumeSlice<'a, u8, S> {
let len = mem::size_of::<T>();
let ptr = cast as *const T as *const u8;
let slice = unsafe { slice::from_raw_parts(ptr, len) };
VolumeSlice::new(slice, index)
}
}
impl<'a, T: Clone, S: SectorSize> VolumeSlice<'a, T, S> {
pub fn commit(self) -> Option<VolumeCommit<T, S>> {
if self.is_mutated() {
Some(VolumeCommit::new(self.inner.into_owned(), self.index))
} else {
None
}
}
}
impl<'a, T: Clone, S: SectorSize> AsRef<[T]> for VolumeSlice<'a, T, S> {
fn as_ref(&self) -> &[T] {
self.inner.as_ref()
}
}
impl<'a, T: Clone, S: SectorSize> AsMut<[T]> for VolumeSlice<'a, T, S> {
fn as_mut(&mut self) -> &mut [T] {
self.inner.to_mut().as_mut()
}
}
impl<'a, T: Clone, S: SectorSize> Deref for VolumeSlice<'a, T, S> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl<'a, T: Clone, S: SectorSize> DerefMut for VolumeSlice<'a, T, S> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut()
}
}
pub struct VolumeCommit<T, S: SectorSize> {
inner: Vec<T>,
index: Address<S>,
}
impl<T: Clone, S: SectorSize> VolumeCommit<T, S> {
pub fn with_vec(inner: Vec<T>) -> VolumeCommit<T, S> {
VolumeCommit {
inner,
index: Address::new(0, 0),
}
}
}
impl<T: Clone, S: SectorSize> VolumeCommit<T, S> {
pub fn new(inner: Vec<T>, index: Address<S>) -> VolumeCommit<T, S> {
VolumeCommit { inner, index }
}
pub fn into_inner(self) -> Vec<T> {
self.inner
}
pub fn address(&self) -> Address<S> {
self.index
}
}
impl<T: Clone, S: SectorSize> AsRef<[T]> for VolumeCommit<T, S> {
fn as_ref(&self) -> &[T] {
self.inner.as_ref()
}
}
impl<T: Clone, S: SectorSize> AsMut<[T]> for VolumeCommit<T, S> {
fn as_mut(&mut self) -> &mut [T] {
self.inner.as_mut()
}
}
impl<T: Clone, S: SectorSize> Deref for VolumeCommit<T, S> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl<T: Clone, S: SectorSize> DerefMut for VolumeCommit<T, S> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut()
}
}
macro_rules! impl_slice {
(@inner $volume:ty $( , $lt:lifetime )* ) => {
impl<$( $lt, )* T: Clone, S: SectorSize> Volume<T, S>
for $volume
{
type Error = Error;
fn size(&self) -> Size<S> {
Size::Bounded(
Address::from(<Self as AsRef<[T]>>::as_ref(self).len())
)
}
fn commit(
&mut self,
slice: Option<VolumeCommit<T, S>>,
) -> Result<(), Self::Error> {
slice.map(|slice| {
let index = slice.address().into_index() as usize;
let end = index + slice.as_ref().len();
// XXX: it would be much better to drop the contents of dst
// and move the contents of slice instead of cloning
let dst =
&mut <Self as AsMut<[T]>>::as_mut(self)[index..end];
dst.clone_from_slice(slice.as_ref());
});
Ok(())
}
unsafe fn slice_unchecked<'a>(
&'a self,
range: Range<Address<S>>,
) -> VolumeSlice<'a, T, S> {
let index = range.start;
let range = range.start.into_index() as usize
..range.end.into_index() as usize;
VolumeSlice::new(
<Self as AsRef<[T]>>::as_ref(self).get_unchecked(range),
index,
)
}
fn slice<'a>(
&'a self,
range: Range<Address<S>>,
) -> Result<VolumeSlice<'a, T, S>, Self::Error> {
if self.size() >= range.end {
unsafe { Ok(self.slice_unchecked(range)) }
} else {
Err(Error::AddressOutOfBounds {
sector: range.end.sector(),
offset: range.end.offset(),
size: range.end.sector_size()
})
}
}
}
};
($volume:ty) => {
impl_slice!(@inner $volume);
};
($volume:ty $( , $lt:lifetime )* ) => {
impl_slice!(@inner $volume $( , $lt )* );
};
}
impl_slice!(&'b mut [T], 'b);
impl_slice!(Vec<T>);
impl_slice!(Box<[T]>);
#[cfg(any(test, not(feature = "no_std")))]
mod file {
use std::cell::RefCell;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::ops::Range;
use sector::{Address, SectorSize};
use super::size::Size;
use super::{Volume, VolumeCommit, VolumeSlice};
impl<S: SectorSize> Volume<u8, S> for RefCell<File> {
type Error = io::Error;
fn size(&self) -> Size<S> {
Size::Bounded(
self.borrow()
.metadata()
.map(|data| Address::from(data.len()))
.unwrap_or(Address::new(0, 0)),
)
}
fn commit(
&mut self,
slice: Option<VolumeCommit<u8, S>>,
) -> Result<(), Self::Error> {
slice
.map(|slice| {
let index = slice.address();
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.write(slice.as_ref()))
.map(|_| ())
})
.unwrap_or(Ok(()))
}
unsafe fn slice_unchecked<'a>(
&'a self,
range: Range<Address<S>>,
) -> VolumeSlice<'a, u8, S> {
let index = range.start;
let len = range.end - range.start;
let mut vec = Vec::with_capacity(len.into_index() as usize);
vec.set_len(len.into_index() as usize);
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.read_exact(&mut vec[..]))
.unwrap_or_else(|err| {
panic!("could't read from File Volume: {:?}", err)
});
VolumeSlice::new_owned(vec, index)
}
fn slice<'a>(
&'a self,
range: Range<Address<S>>,
) -> Result<VolumeSlice<'a, u8, S>, Self::Error> {
let index = range.start;
let mut vec = Vec::with_capacity(
(range.end - range.start).into_index() as usize,
);
unsafe {
vec.set_len((range.end - range.start).into_index() as usize);
}
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.read_exact(&mut vec[..]))
.map(move |_| VolumeSlice::new_owned(vec, index))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use sector::{Address, Size512};
#[test]
fn volume() {
let mut volume = vec![0; 1024];
let commit = {
let mut slice = volume
.slice(
Address::<Size512>::from(256_u64)
..Address::<Size512>::from(512_u64),
)
.unwrap();
slice.iter_mut().for_each(|x| *x = 1);
slice.commit()
};
assert!(volume.commit(commit).is_ok());
for (i, &x) in volume.iter().enumerate() {
if i < 256 || i >= 512 {
assert_eq!(x, 0);
} else {
assert_eq!(x, 1);
}
}
}
}

View File

@ -1,115 +0,0 @@
//!
use {
core::{
cmp::Ordering,
fmt::{self, Display},
},
sector::{Address, SectorSize},
};
#[derive(Clone, Copy, Debug, Hash)]
/// A size
pub enum Size<S: SectorSize> {
/// An unbounded size
Unbounded,
/// A bounded size
Bounded(Address<S>),
}
impl<S: SectorSize> Size<S> {
/// Try to get the length of the sector
pub fn try_len(&self) -> Option<Address<S>> {
match *self {
Size::Unbounded => None,
Size::Bounded(n) => Some(n),
}
}
/// Get the length of the sector unsafely
///
/// # Safety
///
/// This function is unsafe because it does not check that the size is
/// bounded.
pub unsafe fn len(&self) -> Address<S> {
match *self {
Size::Unbounded => panic!(
"attempt to convert `Size::Unbounded` to a concrete length"
),
Size::Bounded(n) => n,
}
}
}
impl<S: SectorSize> Size<S> {
/// Check if the size is unbounded
pub fn is_bounded(&self) -> bool {
match *self {
Size::Unbounded => false,
Size::Bounded(_) => true,
}
}
}
impl<S: SectorSize> Display for Size<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Size::Unbounded => write!(f, "Unbounded"),
Size::Bounded(n) => write!(f, "Bounded({})", n),
}
}
}
impl<S: SectorSize> PartialEq for Size<S> {
fn eq(&self, rhs: &Self) -> bool {
match (self, rhs) {
(&Size::Unbounded, _) => false,
(_, &Size::Unbounded) => false,
(&Size::Bounded(ref a), &Size::Bounded(ref b)) => a.eq(b),
}
}
fn ne(&self, rhs: &Self) -> bool {
match (self, rhs) {
(&Size::Unbounded, _) => false,
(_, &Size::Unbounded) => false,
(&Size::Bounded(ref a), &Size::Bounded(ref b)) => a.ne(b),
}
}
}
impl<S: SectorSize> PartialEq<Address<S>> for Size<S> {
fn eq(&self, rhs: &Address<S>) -> bool {
match *self {
Size::Unbounded => false,
Size::Bounded(ref n) => n.eq(rhs),
}
}
fn ne(&self, rhs: &Address<S>) -> bool {
match *self {
Size::Unbounded => false,
Size::Bounded(ref n) => n.eq(rhs),
}
}
}
impl<S: SectorSize> PartialOrd for Size<S> {
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
match (self, rhs) {
(&Size::Unbounded, &Size::Unbounded) => None,
(&Size::Unbounded, _) => Some(Ordering::Greater),
(_, &Size::Unbounded) => Some(Ordering::Less),
(&Size::Bounded(ref a), &Size::Bounded(ref b)) => a.partial_cmp(b),
}
}
}
impl<S: SectorSize> PartialOrd<Address<S>> for Size<S> {
fn partial_cmp(&self, rhs: &Address<S>) -> Option<Ordering> {
match *self {
Size::Unbounded => Some(Ordering::Greater),
Size::Bounded(ref n) => n.partial_cmp(rhs),
}
}
}

View File

@ -1,9 +0,0 @@
[package]
name = "facepalm"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
log = "0.4.0"

View File

@ -1 +0,0 @@
Facepalm is the general purpose debugger bundled with ableOS

View File

@ -1,19 +0,0 @@
#![no_std]
use log::*;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
#[cfg(debug_assertions)]
pub const RELEASE_TYPE: &str = "debug";
#[cfg(not(debug_assertions))]
pub const RELEASE_TYPE: &str = "release";
pub fn start_facepalm() {
info!("facepalm 🤦 launched!");
info!("facepalm 🤦 version: {}", VERSION);
info!("facepalm 🤦 {} mode", RELEASE_TYPE);
// Drop into a debug shell
}

View File

@ -1,2 +1,9 @@
[unstable]
build-std = ["core", "compiler_builtins", "alloc"]
build-std-features = ["compiler-builtins-mem"]
[build]
target = "./json_targets/x86_64-ableos.json"
target = "./targets/x86_64-ableos.json"
[target.'cfg(target_arch = "x86_64")']
rustflags = ["-C", "target-feature=+rdrand"]

View File

@ -1,18 +1,21 @@
[package]
edition = "2021"
name = "kernel"
version = "0.1.2"
version = "0.2.0"
[dependencies]
linked_list_allocator = "0.9"
log = "0.4.14"
slab = { version = "0.4", default-features = false }
spin = "0.9"
versioning = { git = "https://git.ablecorp.us/able/aos_userland" }
log = "0.4"
[dependencies.crossbeam-queue]
version = "0.3"
default-features = false
features = ["alloc"]
[dependencies.versioning]
git = "https://git.ablecorp.us/able/aos_userland"
[target.'cfg(target_arch = "x86_64")'.dependencies]
limine = "0.1"
uart_16550 = "0.2"
x86_64 = "0.14"
x2apic = "0.4"

53
kernel/lds/x86_64.ld Normal file
View File

@ -0,0 +1,53 @@
/* Tell the linker that we want an x86_64 ELF64 output file */
OUTPUT_FORMAT(elf64-x86-64)
OUTPUT_ARCH(i386:x86-64)
ENTRY(_kernel_start)
/* Define the program headers we want so the bootloader gives us the right */
/* MMU permissions */
PHDRS
{
null PT_NULL FLAGS(0) ; /* Null segment */
text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */
rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */
data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
}
SECTIONS
{
/* We wanna be placed in the topmost 2GiB of the address space, for optimisations */
/* and because that is what the Limine spec mandates. */
/* Any address in this region will do, but often 0xffffffff80000000 is chosen as */
/* that is the beginning of the region. */
. = 0xffffffff80000000;
.text : {
*(.text .text.*)
} :text
/* Align .rodata to page boundary */
. = ALIGN(4K);
.rodata : {
*(.rodata .rodata.*)
} :rodata
/* Align .data to page boundary */
. = ALIGN(4K);
.data : {
*(.data .data.*)
} :data
.bss : {
*(COMMON)
*(.bss .bss.*)
/* Align initial kernel heap to page boundary */
. = ALIGN(4K);
PROVIDE(_initial_kernel_heap_start = .);
PROVIDE(_initial_kernel_heap_size = 1024 * 1024);
. += _initial_kernel_heap_size;
} :data
}

View File

@ -1,122 +0,0 @@
//! The allocator to be implemented by ableOS
//!
//! NOTE: All memory regions are taken from https://wiki.osdev.org/Memory_Map_(x86)
#![allow(missing_docs)]
use alloc::alloc::{GlobalAlloc, Layout};
use core::{fmt::Display, ptr::null_mut};
use log::{debug, info};
// const HEAP_START: usize = 600_000_000;
const HEAP_START: usize = 0x00100000;
const BLOCK_SIZE: usize = 1024;
const BLOCK_COUNT: usize = 512;
#[derive(Debug, Clone, Copy)]
pub struct MemoryRegion {
start: usize,
end: usize,
}
impl Display for MemoryRegion {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
writeln!(
f,
"MemoryRegion {{ start: {}, end: {}, size: {} bytes}}",
self.start,
self.end,
self.end - self.start
)
}
}
impl MemoryRegion {
pub fn new(start: usize, end: usize) -> MemoryRegion {
MemoryRegion { start, end }
}
pub fn test_region(&self) -> bool {
unsafe {
let mutptr = self.start as *mut u8;
core::ptr::write(mutptr, 0xFF);
// trace!("{}", core::ptr::read(mutptr));
}
true
}
}
#[derive(Debug, Clone, Copy)]
pub struct AAlloc {
current_region: usize,
memory_regions: [Option<MemoryRegion>; 512],
}
impl AAlloc {
fn test_regions(&self) {
for x in 0..self.current_region {
if let Some(region) = self.memory_regions[x] {
debug!("Region {}: {:?}", x, region);
}
}
}
pub fn add_region(&mut self, mem: MemoryRegion) {
self.memory_regions[self.current_region] = Some(mem);
self.current_region += 1;
}
pub fn intialize() {
info!("Heap Start: {}", HEAP_START);
info!("Heap Size: {}", BLOCK_SIZE * BLOCK_COUNT);
info!("Heap End: {}", HEAP_START + BLOCK_SIZE * BLOCK_COUNT);
let mut aalloc = AAlloc {
current_region: 0,
memory_regions: [None; 512],
};
// BS MEMORY REGION
aalloc.add_region(MemoryRegion::new(HEAP_START, HEAP_START + 10));
aalloc.add_region(MemoryRegion::new(0x00007E00, 0x0007FFFF));
aalloc.add_region(MemoryRegion::new(0x00100000, 0x00EFFFFF));
// ISA Memory Hole
aalloc.add_region(MemoryRegion::new(0x00F00000, 0x00FFFFFF));
aalloc.add_region(MemoryRegion::new(0x0000000100000000, 0x0000000100000000));
aalloc.memory_regions[0].unwrap().test_region();
debug!("{}", aalloc);
}
}
impl Display for AAlloc {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "AAlloc {{\n\tcurrent_region: {},\n", self.current_region)?;
for x in 0..self.current_region {
if let Some(region) = self.memory_regions[x] {
write!(f, "\tRegion {}: {}", x, region)?;
}
}
write!(f, "}}")?;
Ok(())
}
}
unsafe impl GlobalAlloc for AAlloc {
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
info!("Allocating memory");
info!("{}", _layout.size());
info!("{}", _layout.align());
null_mut()
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("dealloc should be never called")
}
}

View File

@ -1,12 +0,0 @@
#![allow(missing_docs)]
pub mod aalloc;
pub use aalloc::*;
/*
#[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {
panic!("allocation error: {:?}", layout)
}
*/

View File

@ -1,24 +1,344 @@
//! Memory allocator
/*
* This file incorporates work covered by the following license notice:
*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use linked_list_allocator::LockedHeap;
use core::{
alloc::{GlobalAlloc, Layout},
mem,
ptr::{self, NonNull},
};
///
pub const HEAP_START: usize = 0x_4444_4444_0000;
use spin::Mutex;
///
pub const HEAP_MULTIPLIER: usize = 100000;
extern "C" {
fn _initial_kernel_heap_start();
fn _initial_kernel_heap_size();
}
///
pub const HEAP_BASE: usize = 100;
const INITIAL_KERNEL_HEAP_START: *mut u8 = _initial_kernel_heap_start as _;
const INITIAL_KERNEL_HEAP_SIZE: *const () = _initial_kernel_heap_size as _;
///
pub const HEAP_SIZE: usize = HEAP_BASE * HEAP_MULTIPLIER;
struct Allocator(Mutex<Option<Heap>>);
unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut lock = self.0.lock();
let allocator = lock.as_mut().expect("heap allocator should be initialized");
match allocator.allocate(layout.size(), layout.align()) {
Some(ptr) => ptr.as_ptr(),
None => ptr::null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) {
let mut lock = self.0.lock();
let allocator = lock.as_mut().expect("heap allocator should be initialized");
allocator.deallocate(ptr);
}
}
/// Global allocator
#[global_allocator]
pub static ALLOCATOR: LockedHeap = LockedHeap::empty();
static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
pub fn init() {
log::info!("Initialising kernel heap allocator");
let memory_size = unsafe { mem::transmute(INITIAL_KERNEL_HEAP_SIZE) };
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, memory_size) });
}
// FIXME: these are arch-specific
const CHUNK_SIZE: usize = 16;
const MINIMUM_ALIGNMENT: usize = 8;
struct Header {
size_in_chunks: usize,
}
// compile-time assertions to make sure that AllocationHeader's size is a power of two
// and CHUNK_SIZE is bigger than AllocationHeader's size
const _: () = {
assert!(mem::size_of::<Header>().is_power_of_two());
assert!(CHUNK_SIZE >= mem::size_of::<Header>());
};
/// A first-fit heap allocator, with CHUNK_SIZE chunks and a set size
/// In the future these will become subheaps and the actual heap allocator will create more
/// subheaps as needed
struct Heap {
total_chunks: usize,
allocated_chunks: usize,
chunks: *mut u8,
bitmap: *mut u8,
}
impl Heap {
/// # Safety
/// This function assumes that the pointer given points at a valid memory address
unsafe fn new(memory: *mut u8, memory_size: usize) -> Self {
let total_chunks = Self::calculate_chunks(memory_size);
assert!(total_chunks * CHUNK_SIZE + (total_chunks + 7) / 8 <= memory_size);
Self {
total_chunks,
allocated_chunks: 0,
chunks: memory,
bitmap: unsafe { memory.add(total_chunks * CHUNK_SIZE) },
}
}
fn allocate(&mut self, size: usize, alignment: usize) -> Option<NonNull<u8>> {
assert!(alignment.is_power_of_two());
let alignment = if alignment < MINIMUM_ALIGNMENT {
MINIMUM_ALIGNMENT
} else {
alignment
};
// We need space for the header as well
let size = size + mem::size_of::<Header>();
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
log::info!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
if chunks_needed + chunk_alignment > self.free_chunks() {
return None;
}
// FIXME: should utilize the alignment directly instead of trying to allocate `size + alignment`
let first_chunk = self.find_first_fit(chunks_needed + chunk_alignment)?;
let chunks_addr = self.chunks as usize;
let addr_unaligned = chunks_addr + first_chunk * CHUNK_SIZE;
// Align the starting address and verify that we haven't gone outside the calculated free area
let addr =
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
log::info!(
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
addr_unaligned - chunks_addr
);
log::info!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
assert!(first_chunk <= aligned_first_chunk);
assert!(
aligned_first_chunk + chunks_needed <= first_chunk + chunks_needed + chunk_alignment
);
let header: *mut Header = unsafe { mem::transmute(addr) };
unsafe {
(*header).size_in_chunks = chunks_needed;
}
self.bitmap_set_range(aligned_first_chunk, chunks_needed, true);
self.allocated_chunks += chunks_needed;
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
log::info!("{ptr:p}");
// FIXME: zero or scrub memory?
assert!(ptr.is_aligned_to(alignment));
NonNull::new(ptr)
}
fn deallocate(&mut self, ptr: *mut u8) {
let header = Self::allocation_header(ptr);
let start = (header as usize - self.chunks as usize) / CHUNK_SIZE;
assert!(self.bitmap_get(start));
let size = unsafe { (*header).size_in_chunks };
self.bitmap_set_range(start, size, false);
self.allocated_chunks -= size;
// FIXME: zero or scrub memory?
}
/// Finds first hole that can fit an allocation of `size` chunks, returns the start of the
/// found free chunks
fn find_first_fit(&self, size: usize) -> Option<usize> {
let mut start_of_free_chunks = 0;
let mut free_chunks = 0;
for i in 0..self.total_chunks / usize::BITS as usize {
if free_chunks >= size {
return Some(start_of_free_chunks);
}
let mut bucket = unsafe { *self.bitmap.cast::<usize>().add(i) };
if bucket == usize::MAX {
// Skip over completely full bucket
free_chunks = 0;
continue;
}
if bucket == 0 {
// Skip over completely empty bucket
if free_chunks == 0 {
start_of_free_chunks = i * usize::BITS as usize;
}
free_chunks += usize::BITS as usize;
continue;
}
let mut viewed_bits = 0;
while viewed_bits < usize::BITS as usize {
if bucket == 0 {
if free_chunks == 0 {
start_of_free_chunks = i * usize::BITS as usize + viewed_bits;
}
free_chunks += usize::BITS as usize - viewed_bits;
viewed_bits = usize::BITS as usize;
} else {
let trailing_zeros = bucket.trailing_zeros() as usize;
bucket >>= trailing_zeros;
if free_chunks == 0 {
start_of_free_chunks = i * usize::BITS as usize + viewed_bits;
}
free_chunks += trailing_zeros;
viewed_bits += trailing_zeros;
if free_chunks >= size {
return Some(start_of_free_chunks);
}
let trailing_ones = bucket.trailing_ones() as usize;
bucket >>= trailing_ones;
viewed_bits += trailing_ones;
free_chunks = 0;
}
}
}
if free_chunks >= size {
return Some(start_of_free_chunks);
}
let first_trailing_bit = (self.total_chunks / usize::BITS as usize) * usize::BITS as usize;
let trailing_bits = self.total_chunks % usize::BITS as usize;
for i in 0..trailing_bits {
if self.bitmap_get(first_trailing_bit + i) {
free_chunks = 0;
continue;
}
if free_chunks == 0 {
start_of_free_chunks = first_trailing_bit + i;
}
free_chunks += 1;
if free_chunks >= size {
return Some(start_of_free_chunks);
}
}
None
}
fn bitmap_set_range(&mut self, start: usize, length: usize, value: bool) {
assert!(start + length <= self.total_chunks);
if length == 0 {
return;
}
const BITMASK_FIRST_BYTE: [u8; 8] = [0xFF, 0xFE, 0xFC, 0xF8, 0xF0, 0xE0, 0xC0, 0x80];
const BITMASK_LAST_BYTE: [u8; 8] = [0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F];
let first = unsafe { self.bitmap.add(start / 8) };
let last = unsafe { self.bitmap.add((start + length) / 8) };
let mut byte_mask = BITMASK_FIRST_BYTE[start % 8];
if first == last {
byte_mask &= BITMASK_LAST_BYTE[(start + length) % 8];
if value {
unsafe {
*first |= byte_mask;
}
} else {
unsafe {
*first &= !byte_mask;
}
}
} else {
if value {
unsafe {
*first |= byte_mask;
}
} else {
unsafe {
*first &= !byte_mask;
}
}
byte_mask = BITMASK_LAST_BYTE[(start + length) % 8];
if value {
unsafe {
*last |= byte_mask;
}
} else {
unsafe {
*last &= !byte_mask;
}
}
let first = unsafe { first.add(1) };
if first >= last {
return;
}
if value {
unsafe {
first.write_bytes(0xFF, last.sub_ptr(first));
}
} else {
unsafe {
first.write_bytes(0, last.sub_ptr(first));
}
}
}
}
fn bitmap_get(&self, index: usize) -> bool {
assert!(index < self.total_chunks);
(unsafe { *self.bitmap.add(index / 8) } & (1 << (index % 8))) != 0
}
const fn free_chunks(&self) -> usize {
self.total_chunks - self.allocated_chunks
}
fn allocation_header(ptr: *mut u8) -> *mut Header {
unsafe { mem::transmute::<_, *mut Header>(ptr).sub(1) }
}
const fn calculate_chunks(memory_size: usize) -> usize {
memory_size / (CHUNK_SIZE + 1)
}
}
unsafe impl Send for Heap {}
#[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {
panic!("allocation error: {:?}", layout)
loop {}
}

View File

@ -0,0 +1 @@
//!

View File

@ -1,6 +1,16 @@
//! Architecture-specific code.
//! Architecture specific code
/// X86 specific code
#[cfg(target_arch = "x86_64")]
#[path = "x86_64/mod.rs"]
pub mod arch;
macro_rules! arch_cond {
($($arch:ident: $str:literal),* $(,)?) => {$(
#[cfg(target_arch = $str)]
pub mod $arch;
#[cfg(target_arch = $str)]
pub use self::$arch::*;
)*};
}
arch_cond!(
aarch64: "aarch64",
riscv64: "riscv64",
x86_64: "x86_64",
);

View File

@ -0,0 +1 @@
//!

View File

@ -0,0 +1,59 @@
use spin::Lazy;
use x86_64::{
structures::{
gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector},
tss::TaskStateSegment,
},
VirtAddr,
};
pub const DOUBLE_FAULT_IX: u16 = 0;
pub unsafe fn init() {
use x86_64::instructions::segmentation::{Segment, CS, DS, ES, SS};
use x86_64::instructions::tables::load_tss;
log::info!("Initialising GDT");
GDT.0.load();
unsafe {
CS::set_reg(GDT.1.kcode);
DS::set_reg(GDT.1.kdata);
ES::set_reg(GDT.1.kdata);
SS::set_reg(GDT.1.kdata);
load_tss(GDT.1.tss);
}
}
struct Selectors {
kcode: SegmentSelector,
kdata: SegmentSelector,
tss: SegmentSelector,
udata: SegmentSelector,
ucode: SegmentSelector,
}
static TSS: Lazy<TaskStateSegment> = Lazy::new(|| {
let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[usize::from(DOUBLE_FAULT_IX)] = {
const SIZE: usize = 5 * 1024;
let stack = unsafe {
alloc::alloc::alloc_zeroed(
alloc::alloc::Layout::from_size_align(SIZE, 1).expect("stack pointer"),
)
};
VirtAddr::from_ptr(stack) + SIZE
};
tss
});
static GDT: Lazy<(GlobalDescriptorTable, Selectors)> = Lazy::new(|| {
let mut gdt = GlobalDescriptorTable::new();
let sels = Selectors {
kcode: gdt.add_entry(Descriptor::kernel_code_segment()),
kdata: gdt.add_entry(Descriptor::kernel_data_segment()),
tss: gdt.add_entry(Descriptor::tss_segment(&TSS)),
udata: gdt.add_entry(Descriptor::user_data_segment()),
ucode: gdt.add_entry(Descriptor::user_code_segment()),
};
(gdt, sels)
});

View File

@ -0,0 +1,67 @@
use spin::{Lazy, Mutex};
use x2apic::lapic::{LocalApic, LocalApicBuilder};
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
pub unsafe fn init() {
log::info!("Initialising IDT");
IDT.load();
Lazy::force(&LAPIC);
x86_64::instructions::interrupts::enable();
}
#[repr(u8)]
enum Interrupt {
Timer = 32,
ApicErr = u8::MAX - 1,
Spurious = u8::MAX,
}
static LAPIC: Lazy<Mutex<LocalApic>> = Lazy::new(|| {
let mut lapic = LocalApicBuilder::new()
.timer_vector(Interrupt::Timer as usize)
.error_vector(Interrupt::ApicErr as usize)
.spurious_vector(Interrupt::Spurious as usize)
.set_xapic_base(
unsafe { x2apic::lapic::xapic_base() }
+ super::memory::HHDM_OFFSET.load(core::sync::atomic::Ordering::Relaxed),
)
.build()
.expect("failed to setup Local APIC");
unsafe { lapic.enable() };
Mutex::new(lapic)
});
static IDT: Lazy<InterruptDescriptorTable> = Lazy::new(|| {
let mut idt = InterruptDescriptorTable::new();
unsafe {
idt.double_fault
.set_handler_fn(double_fault)
.set_stack_index(super::gdt::DOUBLE_FAULT_IX);
}
idt.page_fault.set_handler_fn(page_fault);
idt[Interrupt::Timer as usize].set_handler_fn(timer);
idt
});
extern "x86-interrupt" fn double_fault(stack_frame: InterruptStackFrame, error_code: u64) -> ! {
panic!("Double fault: error code {error_code} \n{stack_frame:#?}")
}
extern "x86-interrupt" fn page_fault(
stack_frame: InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
panic!("Page fault ({error_code:?}): {stack_frame:?}")
}
extern "x86-interrupt" fn timer(_: InterruptStackFrame) {
unsafe { LAPIC.lock().end_of_interrupt() };
}
extern "x86-interrupt" fn apic_err(_: InterruptStackFrame) {
panic!("Internal APIC error");
}
extern "x86-interrupt" fn spurious(_: InterruptStackFrame) {
unsafe { LAPIC.lock().end_of_interrupt() };
}

View File

@ -0,0 +1,69 @@
use core::sync::atomic::AtomicU64;
use limine::{LimineMemmapEntry, LimineMemoryMapEntryType};
use spin::{Mutex, Once};
use x86_64::{
structures::paging::{FrameAllocator, FrameDeallocator, OffsetPageTable, PhysFrame, Size4KiB},
PhysAddr, VirtAddr,
};
pub static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
pub static FRAME_ALLOC: Once<Mutex<FrameAlloc>> = Once::new();
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
/// Initialise page table
pub unsafe fn init_pt(phys_base: VirtAddr) {
log::info!("Retrieving page table");
HHDM_OFFSET.store(phys_base.as_u64(), core::sync::atomic::Ordering::Relaxed);
PAGE_TABLE.call_once(|| {
Mutex::new(OffsetPageTable::new(
&mut *((phys_base
+ x86_64::registers::control::Cr3::read()
.0
.start_address()
.as_u64())
.as_mut_ptr()),
phys_base,
))
});
}
/// Initialise page frame allocator
pub unsafe fn init_falloc(mmap: &'static [LimineMemmapEntry]) {
log::info!("Initialising frame allocator");
FRAME_ALLOC.call_once(|| Mutex::new(FrameAlloc::new(mmap)));
}
pub struct FrameAlloc {
mmap: &'static [LimineMemmapEntry],
next: usize,
}
impl FrameAlloc {
pub unsafe fn new(mmap: &'static [LimineMemmapEntry]) -> Self {
Self { mmap, next: 0 }
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
self.mmap
.iter()
.filter(|e| e.typ == LimineMemoryMapEntryType::Usable)
.map(|e| e.base..e.base + e.len)
.flat_map(|r| r.step_by(4096))
.map(PhysAddr::new)
.map(PhysFrame::containing_address)
}
}
unsafe impl FrameAllocator<Size4KiB> for FrameAlloc {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
let f = self.usable_frames().nth(self.next);
self.next += 1;
f
}
}
impl FrameDeallocator<Size4KiB> for FrameAlloc {
unsafe fn deallocate_frame(&mut self, frame: PhysFrame<Size4KiB>) {
// TODO
}
}

View File

@ -1,9 +1,79 @@
///
use limine::{LimineHhdmRequest, LimineKernelFileRequest, LimineMmapRequest, LimineModuleRequest};
use spin::Mutex;
use uart_16550::SerialPort;
use x86_64::VirtAddr;
pub fn sloop() {
use crate::allocator;
mod gdt;
mod interrupts;
mod memory;
static SERIAL_CONSOLE: Mutex<SerialPort> = Mutex::new(unsafe { SerialPort::new(0x3f8) });
#[no_mangle]
unsafe extern "C" fn _kernel_start() -> ! {
static HDHM_REQ: LimineHhdmRequest = LimineHhdmRequest::new(0);
static MMAP_REQ: LimineMmapRequest = LimineMmapRequest::new(0);
static KFILE_REQ: LimineKernelFileRequest = LimineKernelFileRequest::new(0);
static MOD_REQ: LimineModuleRequest = LimineModuleRequest::new(0);
SERIAL_CONSOLE.lock().init();
crate::logger::init().expect("failed to set logger");
log::info!("Initialising AKern {}", crate::VERSION);
memory::init_pt(VirtAddr::new(
HDHM_REQ
.get_response()
.get()
.expect("tried to get physical memory mapping offset from Limine")
.offset,
));
memory::init_falloc(
MMAP_REQ
.get_response()
.get()
.and_then(limine::LimineMemmapResponse::mmap)
.expect("tried to get memory map from Limine"),
);
allocator::init();
gdt::init();
interrupts::init();
crate::kmain::kmain(
KFILE_REQ
.get_response()
.get()
.and_then(|r| r.kernel_file.get())
.expect("failed to get kernel file from Limine")
.cmdline
.to_string()
.unwrap_or_default(),
MOD_REQ
.get_response()
.get()
.and_then(|r| r.modules())
.and_then(|m| m.get(0))
.map(|file| unsafe {
core::slice::from_raw_parts(
file.base.as_ptr().expect("invalid initrd"),
file.length as usize,
)
}),
)
}
/// Format args to serial console
pub fn serial_fmt(args: core::fmt::Arguments<'_>) -> core::fmt::Result {
use core::fmt::Write;
x86_64::instructions::interrupts::without_interrupts(|| SERIAL_CONSOLE.lock().write_fmt(args))
}
/// Spin loop
pub fn sloop() -> ! {
loop {
unsafe {
core::arch::asm!("hlt");
}
x86_64::instructions::hlt();
}
}

9
kernel/src/kmain.rs Normal file
View File

@ -0,0 +1,9 @@
//! AbleOS Kernel Entrypoint
pub fn kmain(cmdline: &str, initrd: Option<&'static [u8]>) -> ! {
log::debug!("Entered kmain");
log::info!("Cmdline: \"{cmdline}\"");
let initrd = initrd.expect("no initrd found");
crate::arch::sloop()
}

View File

@ -1,47 +1,51 @@
//! The ableOS kernel.
#![feature(alloc_error_handler)]
#![feature(arbitrary_enum_discriminant)]
#![feature(prelude_import)]
#![feature(
abi_x86_interrupt,
alloc_error_handler,
panic_info_message,
pointer_is_aligned,
prelude_import,
ptr_sub_ptr
)]
#![no_std]
#![deny(missing_docs)]
extern crate alloc;
pub mod aalloc;
pub mod allocator;
pub mod arch;
pub mod device_interface;
pub mod messaging;
// pub mod panic;
pub mod proccess;
pub mod syscalls;
pub mod task;
pub mod time;
mod allocator;
mod arch;
mod kmain;
mod logger;
mod task;
use core::arch::asm;
use versioning::Version;
/// The number of ticks since the first CPU was started
// pub static TICK: AtomicU64 = AtomicU64::new(0);
/// Kernel's version
pub const KERNEL_VERSION: Version = Version {
pub const VERSION: Version = Version {
major: 0,
minor: 1,
patch: 2,
minor: 2,
patch: 0,
};
/*
/// called by arch specific timers to tick up all kernel related functions
pub fn tick() {
let mut data = TICK.load(Relaxed);
data = data.wrapping_add(1);
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
// TODO: Better panic handler
let _ = crate::arch::serial_fmt(format_args!(
"\x1b[1m\x1b[4m\x1b[38;5;125mKernel Panic\x1b[0m\r\n",
));
TICK.store(data, Relaxed)
}
*/
/// Cause a software interrupt
pub fn software_int() {
unsafe { asm!("int 54") }
if let Some(loc) = info.location() {
let _ = crate::arch::serial_fmt(format_args!(
"Location: {}: {}, {}\r\n",
loc.file(),
loc.line(),
loc.column()
));
}
if let Some(msg) = info.message() {
let _ = crate::arch::serial_fmt(format_args!("{msg}\r\n"));
}
loop {}
}

33
kernel/src/logger.rs Normal file
View File

@ -0,0 +1,33 @@
use log::{Level, SetLoggerError};
pub fn init() -> Result<(), SetLoggerError> {
log::set_logger(&crate::logger::Logger)?;
log::set_max_level(log::LevelFilter::Trace);
Ok(())
}
struct Logger;
impl log::Log for Logger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
let lvl = record.level();
crate::arch::serial_fmt(format_args!(
"\x1b[38;5;{}m{lvl}\x1b[0m [{}]: {}\r\n",
match lvl {
Level::Error => "160",
Level::Warn => "172",
Level::Info => "47",
Level::Debug => "25",
Level::Trace => "103",
},
record.module_path().unwrap_or_default(),
record.args(),
))
.expect("write to serial console");
}
fn flush(&self) {}
}

4
kernel/src/main.rs Normal file
View File

@ -0,0 +1,4 @@
#![no_std]
#![no_main]
extern crate kernel;

View File

@ -1,193 +0,0 @@
//! Interprocess communication.
use alloc::vec::Vec;
use crate::{proccess::PID, time::Time};
extern crate alloc;
/// 131070 * 128
pub const TINY_MESSAGE_COUNT: usize = 131070;
/// 16384 * 1024
pub const SMALL_MESSAGE_COUNT: usize = 16384;
/// 65536 * 256
pub const MEDIUM_MESSAGE_COUNT: usize = 256;
/// 262144 * 16
pub const LARGE_MESSAGE_COUNT: usize = 16;
/// 16777216 * 4
pub const HUGE_MESSAGE_COUNT: usize = 4;
/// 128 Bytes
pub type Tiny = [u8; 128];
/// 1 KiB
pub type Small = [u8; 1024];
/// 65.536 KiB
pub type Medium = [u8; 65536];
/// 1MiB
pub type Large = [u8; 1048576];
/// 16MiB
pub type Huge = [u8; 16777216];
/// An internal message to be held in a process message
pub enum Message {
/// A Tiny message
///
/// The message is 128 bytes long
Tiny(Tiny),
/// A Small message
///
/// The message is 1 KiB long
Small(Small),
/// A Medium message
///
/// The message is 65.536 KiB long
Medium(Medium),
/// A Large message
///
/// The message is 1 MiB long
Large(Large),
/// A Huge message
///
/// The message is 16 MiB long
Huge(Huge),
}
/// A message that can be sent between processes
pub struct ProcessMessage {
/// The sender of the message
pub to_pid: PID,
/// The receiver of the message
pub from_pid: PID,
/// The message
pub message: Message,
/// The time the message was sent
pub sender_time: Time,
/// The time the message was received
pub receiver_time: Time,
}
impl ProcessMessage {
/// Return the size of the message
pub fn size(&self) -> usize {
match &self.message {
Message::Tiny(_) => 128,
Message::Small(_) => 1024,
Message::Medium(_) => 65536,
Message::Large(_) => 1048576,
Message::Huge(_) => 16777216,
}
}
}
#[derive(Debug)]
/// An enum of all possible errors that can occur when sending a message
pub enum MessagingError {
/// The message is too large to be sent
MessageTooLarge,
/// The reciever of the message is not valid
ProcessNonExistant,
/// The message Queue is full
TooManyMessages,
}
/// A mailbox that holds messages and PipeState
pub struct Mailbox {
/// The messages in the mailbox
pub messages: Vec<ProcessMessage>,
/// The count of messages in the mailbox
pub message_count: MessageCount,
}
impl Mailbox {
/// append a message to the mailbox
pub fn append(&mut self, message: ProcessMessage) -> Result<(), MessagingError> {
let msg_size = message.size();
if self.message_count.total() > 147730 {
return Err(MessagingError::TooManyMessages);
}
match msg_size {
TINY_MESSAGE_COUNT => {
if self.message_count.tiny < TINY_MESSAGE_COUNT {
self.messages.push(message);
self.message_count.tiny += 1;
Ok(())
} else {
Err(MessagingError::TooManyMessages)
}
}
SMALL_MESSAGE_COUNT => {
if self.message_count.small < SMALL_MESSAGE_COUNT {
self.messages.push(message);
self.message_count.small += 1;
Ok(())
} else {
Err(MessagingError::TooManyMessages)
}
}
MEDIUM_MESSAGE_COUNT => {
if self.message_count.medium < MEDIUM_MESSAGE_COUNT {
self.messages.push(message);
self.message_count.medium += 1;
Ok(())
} else {
Err(MessagingError::TooManyMessages)
}
}
LARGE_MESSAGE_COUNT => {
if self.message_count.large < LARGE_MESSAGE_COUNT {
self.messages.push(message);
self.message_count.large += 1;
Ok(())
} else {
Err(MessagingError::TooManyMessages)
}
}
HUGE_MESSAGE_COUNT => {
if self.message_count.huge < HUGE_MESSAGE_COUNT {
self.messages.push(message);
self.message_count.huge += 1;
Ok(())
} else {
return Err(MessagingError::TooManyMessages);
}
}
_ => Err(MessagingError::MessageTooLarge),
}
}
}
/// A proper struct to list the number of messages in the mailbox
pub struct MessageCount {
/// The number of tiny messages in the mailbox
pub tiny: usize,
/// The number of small messages in the mailbox
pub small: usize,
/// The number of medium messages in the mailbox
pub medium: usize,
/// The number of large messages in the mailbox
pub large: usize,
/// The number of huge messages in the mailbox
pub huge: usize,
}
impl MessageCount {
/// Return the total number of messages in the mailbox
pub fn total(&self) -> usize {
self.tiny + self.small + self.medium + self.large + self.huge
}
}
impl Default for MessageCount {
fn default() -> Self {
MessageCount {
tiny: 0,
small: 0,
medium: 0,
large: 0,
huge: 0,
}
}
}

View File

@ -1,11 +0,0 @@
//! Panic-related stuff
use core::panic::PanicInfo;
use log::error;
#[panic_handler]
fn panic_handler(info: &PanicInfo) -> ! {
error!("{}", info);
loop {}
}

View File

@ -1,14 +0,0 @@
//! Platform agnostic process
/// A process ID
pub type PID = u64;
/// Signals that can be sent to a process
#[repr(C)]
pub enum Signals {
/// Terminate the process
Terminate,
/// Shutdown the process and allow it to shutdown cleanly
Quit,
}

View File

@ -1,26 +0,0 @@
//!
use crate::proccess::{Signals, PID};
/// All possible system calls
pub enum Syscall {
/// Create a new process and return its PID
CreateProcess,
/// Send a signal to a process
SendSignal(PID, Signals),
/// Get the current process ID
GetPID,
/// Get the current time
GetTime,
/// Set the time
SetTime,
// ListInodes,
// CreateInode,
// RemoveInode,
// OpenInode,
// CloseInode,
}

View File

@ -87,16 +87,16 @@ impl Executor {
.map(|t| TaskId(self.tasks.insert(t)))
.or_else(|| self.queue.pop())
{
let task = match self.tasks.get_mut(id.0) {
Some(t) => t,
None => panic!("attempted to get non-extant task with id {}", id.0),
let Some(task) = self.tasks.get_mut(id.0) else {
panic!("attempted to get non-extant task with id {}", id.0)
};
let mut cx = Context::from_waker(
self.wakers
.entry(id)
.or_insert_with(|| TaskWaker::new(id, Arc::clone(&self.queue))),
);
let mut cx = Context::from_waker(self.wakers.entry(id).or_insert_with(|| {
Waker::from(Arc::new(TaskWaker {
id,
queue: Arc::clone(&self.queue),
}))
}));
match task.poll(&mut cx) {
Poll::Ready(()) => {
@ -138,12 +138,6 @@ struct TaskWaker {
queue: TaskQueue,
}
impl TaskWaker {
fn new(id: TaskId, queue: TaskQueue) -> Waker {
Waker::from(Arc::new(Self { id, queue }))
}
}
impl Wake for TaskWaker {
fn wake(self: Arc<Self>) {
self.wake_by_ref();

View File

@ -1,10 +0,0 @@
//! Time
/// An internal structure that is used to keep track of the time
pub struct Time {
/// The number of seconds since the kernel was started
pub seconds: u64,
/// The number of nanoseconds since the kernel was started
pub nanoseconds: u32,
}

View File

@ -0,0 +1,22 @@
{
"llvm-target": "x86_64-unknown-none",
"data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
"arch": "x86_64",
"target-endian": "little",
"target-pointer-width": "64",
"target-c-int-width": "32",
"os": "none",
"executables": true,
"linker-flavor": "ld.lld",
"linker": "rust-lld",
"panic-strategy": "abort",
"disable-redzone": true,
"features": "-mmx,-sse,+soft-float",
"code-model": "kernel",
"pre-link-args": {
"ld.lld": [
"--gc-sections",
"--script=kernel/lds/x86_64.ld"
]
}
}

243
qmp.py
View File

@ -1,243 +0,0 @@
# QEMU Monitor Protocol Python class
#
# Copyright (C) 2009, 2010 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <lcapitulino@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import json
import errno
import socket
import logging
class QMPError(Exception):
pass
class QMPConnectError(QMPError):
pass
class QMPCapabilitiesError(QMPError):
pass
class QMPTimeoutError(QMPError):
pass
class QEMUMonitorProtocol(object):
#: Logger object for debugging messages
logger = logging.getLogger('QMP')
#: Socket's error class
error = socket.error
#: Socket's timeout
timeout = socket.timeout
def __init__(self, address, server=False):
"""
Create a QEMUMonitorProtocol class.
@param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
@param server: server mode listens on the socket (bool)
@raise socket.error on socket connection errors
@note No connection is established, this is done by the connect() or
accept() methods
"""
self.__events = []
self.__address = address
self.__sock = self.__get_sock()
self.__sockfile = None
if server:
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__sock.bind(self.__address)
self.__sock.listen(1)
def __get_sock(self):
if isinstance(self.__address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def __negotiate_capabilities(self):
greeting = self.__json_read()
if greeting is None or "QMP" not in greeting:
raise QMPConnectError
# Greeting seems ok, negotiate capabilities
resp = self.cmd('qmp_capabilities')
if "return" in resp:
return greeting
raise QMPCapabilitiesError
def __json_read(self, only_event=False):
while True:
data = self.__sockfile.readline()
if not data:
return
resp = json.loads(data)
if 'event' in resp:
self.logger.debug("<<< %s", resp)
self.__events.append(resp)
if not only_event:
continue
return resp
def __get_events(self, wait=False):
"""
Check for new events in the stream and cache them in __events.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
"""
# Check for new events regardless and pull them into the cache:
self.__sock.setblocking(0)
try:
self.__json_read()
except socket.error as err:
if err[0] == errno.EAGAIN:
# No data available
pass
self.__sock.setblocking(1)
# Wait for new events, if needed.
# if wait is 0.0, this means "no wait" and is also implicitly false.
if not self.__events and wait:
if isinstance(wait, float):
self.__sock.settimeout(wait)
try:
ret = self.__json_read(only_event=True)
except socket.timeout:
raise QMPTimeoutError("Timeout waiting for event")
except:
raise QMPConnectError("Error while reading from socket")
if ret is None:
raise QMPConnectError("Error while reading from socket")
self.__sock.settimeout(None)
def connect(self, negotiate=True):
"""
Connect to the QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict
@raise socket.error on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock.connect(self.__address)
self.__sockfile = self.__sock.makefile()
if negotiate:
return self.__negotiate_capabilities()
def accept(self):
"""
Await connection from QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict
@raise socket.error on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock.settimeout(15)
self.__sock, _ = self.__sock.accept()
self.__sockfile = self.__sock.makefile()
return self.__negotiate_capabilities()
def cmd_obj(self, qmp_cmd):
"""
Send a QMP command to the QMP Monitor.
@param qmp_cmd: QMP command to be sent as a Python dict
@return QMP response as a Python dict or None if the connection has
been closed
"""
self.logger.debug(">>> %s", qmp_cmd)
try:
self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
except socket.error as err:
if err[0] == errno.EPIPE:
return
raise socket.error(err)
resp = self.__json_read()
self.logger.debug("<<< %s", resp)
return resp
def cmd(self, name, args=None, cmd_id=None):
"""
Build a QMP command and send it to the QMP Monitor.
@param name: command name (string)
@param args: command arguments (dict)
@param cmd_id: command id (dict, list, string or int)
"""
qmp_cmd = {'execute': name}
if args:
qmp_cmd['arguments'] = args
if cmd_id:
qmp_cmd['id'] = cmd_id
return self.cmd_obj(qmp_cmd)
def command(self, cmd, **kwds):
"""
Build and send a QMP command to the monitor, report errors if any
"""
ret = self.cmd(cmd, kwds)
if "error" in ret:
raise Exception(ret['error']['desc'])
return ret['return']
def pull_event(self, wait=False):
"""
Pulls a single event.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
@return The first available QMP event, or None.
"""
self.__get_events(wait)
if self.__events:
return self.__events.pop(0)
return None
def get_events(self, wait=False):
"""
Get a list of available QMP events.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
@return The list of available QMP events.
"""
self.__get_events(wait)
return self.__events
def clear_events(self):
"""
Clear current list of pending events.
"""
self.__events = []
def close(self):
self.__sock.close()
self.__sockfile.close()
def settimeout(self, timeout):
self.__sock.settimeout(timeout)
def get_sock_fd(self):
return self.__sock.fileno()
def is_scm_available(self):
return self.__sock.family == socket.AF_UNIX

View File

@ -1,118 +0,0 @@
#!/usr/bin/python
#
# QProfiler is a QEMU profiler based on QMP
#
# Copyright (c) 2019-2022 Matias Vara <matiasevara@gmail.com>
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys, os, re
from qmp import QEMUMonitorProtocol
from time import sleep
import subprocess
def main(args):
path = None
filename = None
# duration of the test in seconds
duration = 5
# sampling frequency in seconds
frequency = 0.05
while len(args):
arg = args[0]
if arg.startswith('--'):
arg = arg[2:]
if arg.find('=') == -1:
value = True
else:
arg, value = arg.split('=', 1)
if arg in ['path']:
if type(value) == str:
path = value
elif arg in ['duration']:
duration = int(value)
elif arg in ['frequency']:
frequency = float(value)
elif arg in ['filename']:
filename = value
else:
print('Unknown argument "%s"' % arg)
return 1
args = args[1:]
else:
break
if not path:
print("Path isn't set, use --path=qmp-monitor-address")
return 1
def do_command(srv, cmd, **kwds):
rsp = srv.cmd(cmd, kwds)
if 'error' in rsp:
raise Exception(rsp['error']['desc'])
return rsp['return']
srv = QEMUMonitorProtocol(path)
srv.connect()
arguments = {}
command = 'human-monitor-command'
r = int(duration // frequency)
rip_hash = {}
for i in range(r):
arguments['command-line'] = 'info registers'
rsp = do_command(srv, command, **arguments)
regs = re.search(r'RIP=([\w]+)\s', rsp)
rip = regs.group(1)
if rip in rip_hash:
rip_hash[rip] += 1
else:
rip_hash[rip] = 1
sleep(frequency)
srv.close()
rip_hash_name = {}
for i in rip_hash:
with open(os.devnull, 'w') as devnull:
# pass
tmp = subprocess.check_output("addr2line --demangle -p -s -f -e "
+ filename
+ " "
+ i , shell=True, stderr=devnull).rstrip()
if tmp in rip_hash_name:
rip_hash_name[tmp] += rip_hash[i]
else:
rip_hash_name[tmp] = rip_hash[i]
for i in rip_hash_name:
print('{:>8} {}'.format(rip_hash_name[i], i))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

View File

@ -7,3 +7,5 @@ edition = "2021"
[dependencies]
colored = "2.0"
udisks = "0.1"
zbus = "2.3"

View File

@ -5,9 +5,16 @@
* SPDX-License-Identifier: MPL-2.0
*/
use std::{fs, process::Command};
use colored::*;
use std::{
fs::{self, File},
os::fd::AsRawFd,
process::Command,
};
use udisks::{
filesystem::{MountOptions, UnMountOptions},
manager::LoopSetupOptions,
};
struct Options {
pub subcommand: Subcommand,
@ -15,6 +22,7 @@ struct Options {
}
enum Subcommand {
BuildImage,
Doc,
Help,
Run,
@ -27,6 +35,7 @@ enum Subcommand {
impl Subcommand {
fn from_str<S: AsRef<str>>(str: S) -> Subcommand {
match str.as_ref() {
"build-image" => Subcommand::BuildImage,
"doc" => Subcommand::Doc,
"help" => Subcommand::Help,
"run" | "r" => Subcommand::Run,
@ -44,10 +53,155 @@ enum MachineType {
Unknown(String),
}
fn main() {
fn main() -> Result<(), Box<dyn std::error::Error>> {
let options = options();
match options.subcommand {
Subcommand::BuildImage => {
let machine_text = options.arguments.get(0).cloned().unwrap_or_default();
match machine(machine_text) {
MachineType::X86_64 => {
// Cleanup
// NOTE: we are not unwrapping these, as we don't want this to fail if they
// don't exist yet, probably not the best idea tho.
// FIXME: figure out a better way to ignore errors about these not existing
#[allow(unused_must_use)]
{
fs::remove_dir_all("./limine");
fs::remove_dir_all("./disk");
fs::remove_file("./target/disk.img");
}
// Build ableOS in release mode
Command::new("cargo")
.args(["build", "--release"])
.current_dir(fs::canonicalize("./kernel").unwrap())
.status()
.unwrap();
// Create disk directory
fs::create_dir("./disk").unwrap();
// Clone limine 4.x binaries
Command::new("git")
.arg("clone")
.arg("https://github.com/limine-bootloader/limine.git")
.arg("--branch=v4.x-branch-binary")
.arg("--depth=1")
.status()
.unwrap();
println!("{}", "Building limine".bold());
Command::new("make")
.args(["-C", "limine"])
.status()
.unwrap();
println!("{}", "Allocating new disk image".bold());
Command::new("fallocate")
.args(["-l", "256M", "./target/disk.img"])
.status()
.unwrap();
println!("{}", "Partitioning disk image".bold());
let dbus_conn = zbus::blocking::Connection::system()?;
// Setup loop device
let disk_img = File::options()
.read(true)
.write(true)
.open("./target/disk.img")?;
let loopdev = udisks::manager::UDisks2ManagerProxyBlocking::new(&dbus_conn)?
.loop_setup(
disk_img.as_raw_fd().into(),
LoopSetupOptions {
no_user_interaction: true,
offset: 0,
size: 0,
readonly: false,
no_part_scan: false,
},
)?;
// Create MBR
udisks::block::BlockProxyBlocking::builder(&dbus_conn)
.path(&loopdev)?
.build()?
.format("dos", Default::default())?;
// Create and format partition
let filesystem =
udisks::partition::PartitionTableProxyBlocking::builder(&dbus_conn)
.destination("org.freedesktop.UDisks2")?
.path(&loopdev)?
.build()?
.create_partition_and_format(
0,
0,
"",
"",
Default::default(),
"ext2",
[("take-ownership", true.into())].into_iter().collect(),
)?;
let fsproxy = udisks::filesystem::FilesystemProxyBlocking::builder(&dbus_conn)
.path(&filesystem)?
.build()?;
// Mount the filesystem
let mountpoint = fsproxy.mount(MountOptions {
no_user_interaction: true,
fs_type: String::new(),
mount_options: String::new(),
})?;
// copy ./base/* over to ./disk
Command::new("sh")
.arg("-c")
.arg(format!("cp -r ./base/* {mountpoint}"))
.status()?;
// copy ./limine/limine.sys over to ./disk/boot
Command::new("cp")
.args(["./limine/limine.sys", &format!("{mountpoint}/boot")])
.status()?;
// copy the kernel over to ./disk/boot/kernel
Command::new("cp")
.arg("./target/x86_64-ableos/release/kernel")
.arg(&format!("{mountpoint}/boot/kernel"))
.status()?;
// Unmount the filesystem (and the rest of things will follow)
fsproxy.unmount(UnMountOptions {
no_user_interaction: true,
force: false,
})?;
println!("{}", "Deploying limine".bold());
Command::new("./limine/limine-deploy")
.arg("./target/disk.img")
.status()
.unwrap();
}
MachineType::Unknown(unknown) => {
eprintln!(
"{}: unknown machine type `{}`",
"error".red().bold(),
unknown.bold(),
);
eprintln!("expected one of x86_64, riscv64 or aarch64");
}
_ => {
eprintln!(
"{}: build-image not implemented for this machine type",
"error".red().bold(),
);
}
}
}
Subcommand::Test => {
Command::new("cargo")
.args(["test", "--target=json_targets/x86_64-ableos.json"])
@ -100,16 +254,161 @@ fn main() {
match machine(machine_text) {
MachineType::X86_64 if debug => {
// Build ableOS
Command::new("cargo")
.args(["run", "--", "-S", "-gdb", "tcp:9000"])
.current_dir(fs::canonicalize("./ableos").unwrap())
.arg("build")
.current_dir(fs::canonicalize("./kernel").unwrap())
.status()
.unwrap();
// Setup loopback device for disk.img, with partitions
// FIXME: don't do ths if running without changes
// Setup loop device
let disk_img = File::options()
.read(true)
.write(true)
.open("./target/disk.img")?;
let dbus_conn = zbus::blocking::Connection::system()?;
let loopdev = udisks::manager::UDisks2ManagerProxyBlocking::new(&dbus_conn)?
.loop_setup(
disk_img.as_raw_fd().into(),
LoopSetupOptions {
no_user_interaction: true,
offset: 0,
size: 0,
readonly: false,
no_part_scan: false,
},
)?;
let parts = udisks::partition::PartitionTableProxyBlocking::builder(&dbus_conn)
.destination("org.freedesktop.UDisks2")?
.path(loopdev)?
.build()?
.partitions()?;
let fsobjpath = parts.get(0).ok_or("missing boot partition")?;
let mountpoint =
udisks::filesystem::FilesystemProxyBlocking::builder(&dbus_conn)
.path(fsobjpath)?
.build()?
.mount(MountOptions {
no_user_interaction: true,
fs_type: String::new(),
mount_options: String::new(),
})?;
// copy the kernel over to ./disk/boot/kernel
Command::new("cp")
.arg("./target/x86_64-ableos/debug/kernel")
.arg(format!("{mountpoint}/boot/kernel"))
.status()
.unwrap();
udisks::filesystem::FilesystemProxyBlocking::builder(&dbus_conn)
.path(fsobjpath)?
.build()?
.unmount(UnMountOptions {
no_user_interaction: true,
force: false,
})?;
// run qemu with "-S", "-gdb", "tcp:9000"
Command::new("qemu-system-x86_64")
.args(["-device", "piix4-ide,id=ide"])
.arg("-drive")
.arg("file=./target/disk.img,format=raw,if=none,id=disk")
.args(["-device", "ide-hd,drive=disk,bus=ide.0"])
// .arg("--nodefaults")
.args(["-cpu", "Broadwell-v3"])
.args(["-m", "4G"])
.args(["-serial", "stdio"])
.args(["-smp", "cores=2"])
// .args(["-soundhw", "pcspk"])
// .args(["-device", "VGA"])
// .args(["-device", "virtio-gpu-pci"])
.args(["-device", "vmware-svga"])
.args(["-device", "sb16"])
// .args(["-machine", "pcspk-audiodev=0"])
// .args(["-qmp", "unix:../qmp-sock,server,nowait"])
.args(["-S", "-gdb", "tcp:9000"])
.status()
.unwrap();
}
MachineType::X86_64 => {
// Build ableOS
Command::new("cargo")
.args(["run", "--release"])
.current_dir(fs::canonicalize("./ableos").unwrap())
.args(["build", "--release"])
.current_dir(fs::canonicalize("./kernel").unwrap())
.status()
.unwrap();
// Setup loopback device for disk.img, with partitions
// FIXME: don't do ths if running without changes
let disk_img = File::options()
.read(true)
.write(true)
.open("./target/disk.img")?;
let dbus_conn = zbus::blocking::Connection::system()?;
let loopdev = udisks::manager::UDisks2ManagerProxyBlocking::new(&dbus_conn)?
.loop_setup(
disk_img.as_raw_fd().into(),
LoopSetupOptions {
no_user_interaction: true,
offset: 0,
size: 0,
readonly: false,
no_part_scan: false,
},
)?;
let parts = udisks::partition::PartitionTableProxyBlocking::builder(&dbus_conn)
.destination("org.freedesktop.UDisks2")?
.path(loopdev)?
.build()?
.partitions()?;
let fsproxy = udisks::filesystem::FilesystemProxyBlocking::builder(&dbus_conn)
.path(&parts[0])?
.build()?;
// Mount the filesystem
let mountpoint = fsproxy.mount(MountOptions {
no_user_interaction: true,
fs_type: String::new(),
mount_options: String::new(),
})?;
// copy the kernel over to ./disk/boot/kernel
Command::new("cp")
.arg("./target/x86_64-ableos/release/kernel")
.arg(format!("{mountpoint}/boot/kernel"))
.status()
.unwrap();
fsproxy.unmount(UnMountOptions {
no_user_interaction: true,
force: false,
})?;
// run qemu
Command::new("qemu-system-x86_64")
.args(["-device", "piix4-ide,id=ide"])
.arg("-drive")
.arg("file=./target/disk.img,format=raw,if=none,id=disk")
.args(["-device", "ide-hd,drive=disk,bus=ide.0"])
// .arg("--nodefaults")
.args(["-cpu", "Broadwell-v3"])
.args(["-m", "4G"])
.args(["-serial", "stdio"])
.args(["-smp", "cores=2"])
// .args(["-soundhw", "pcspk"])
// .args(["-device", "VGA"])
// .args(["-device", "virtio-gpu-pci"])
.args(["-device", "vmware-svga"])
.args(["-device", "sb16"])
// .args(["-machine", "pcspk-audiodev=0"])
// .args(["-qmp", "unix:../qmp-sock,server,nowait"])
.status()
.unwrap();
}
@ -189,6 +488,8 @@ fn main() {
help();
}
}
Ok(())
}
fn options() -> Options {

View File

@ -1,2 +0,0 @@
# ableOS userland

View File

@ -1,77 +0,0 @@
// Utterly stolen from stack overflow
/*
* Standalone BogoMips program
*
* Based on code Linux kernel code in init/main.c and
* include/linux/delay.h
*
* For more information on interpreting the results, see the BogoMIPS
* Mini-HOWTO document.
*
* version: 1.3
* author: Jeff Tranter (Jeff_Tranter@Mitel.COM)
*/
#include <stdio.h>
#include <time.h>
#ifdef CLASSIC_BOGOMIPS
/* the original code from the Linux kernel */
static __inline__ void delay(int loops)
{
__asm__(".align 2,0x90\n1:\tdecl %0\n\tjns 1b": :"a" (loops):"ax");
}
#endif
#ifdef QNX_BOGOMIPS
/* version for QNX C compiler */
void delay(int loops);
#pragma aux delay = \
"l1:" \
"dec eax" \
"jns l1" \
parm nomemory [eax] modify exact nomemory [eax];
#endif
#ifdef PORTABLE_BOGOMIPS
/* portable version */
static void delay(int loops)
{
long i;
for (i = loops; i >= 0 ; i--)
;
}
#endif
int
main(void)
{
unsigned long loops_per_sec = 1;
unsigned long ticks;
printf("Calibrating delay loop.. ");
fflush(stdout);
while ((loops_per_sec <<= 1)) {
ticks = clock();
delay(loops_per_sec);
ticks = clock() - ticks;
if (ticks >= CLOCKS_PER_SEC) {
loops_per_sec = (loops_per_sec / ticks) * CLOCKS_PER_SEC;
printf("ok - %lu.%02lu BogoMips\n",
loops_per_sec/500000,
(loops_per_sec/5000) % 100
);
return 0;
}
}
printf("failed\n");
return -1;
}

View File

@ -1,30 +0,0 @@
enum FSReturns {
/// The system call was successful
Ok,
/// The directory can not be created
DirectoryCouldNotBeCreated,
/// The directory could not be removed
DirectoryCouldNotBeRemoved,
///
FileCouldNotBeCreated,
///
FileCouldNotBeRemoved,
/// The file could not be opened
FileCouldNotBeOpened,
///
FileCouldNotBeClosed,
};
int create_directory(path) {
return DirectoryCouldNotBeCreated;
}
///
int remove_directory(path) {
return DirectoryCouldNotBeRemoved;
}

View File

@ -1 +0,0 @@
# The libraries here are simplified examples of syscall APi

View File

@ -1,7 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "rname"
version = "0.1.0"

View File

@ -1,8 +0,0 @@
[package]
name = "rname"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -1,44 +0,0 @@
//! An implementation of the uname command.
use crate::Arch::*;
use core::fmt;
// an example string "Darwin Roadrunner.local 10.3.0 Darwin Kernel Version 10.3.0: Fri Feb 26 11:58:09 PST 2010; root:xnu-1504.3.12~1/RELEASE_I386 i386"
pub struct RName {
pub arch: Arch,
}
#[derive(Debug, Clone, Copy)]
pub enum Arch {
X86,
X86_64,
ARM,
ARM64,
PPC,
PPC64,
MIPS,
MIPS64,
SPARC,
SPARC64,
Unknown,
}
impl fmt::Display for Arch {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
fn main() {
let mut rname_string = "".to_string();
rname_string.push_str("ableOS");
let arch = Some(X86_64);
if let Some(arch) = arch {
let fmt_str = format!(" {:?}", arch);
rname_string.push_str(&fmt_str);
}
println!("{}", rname_string);
}

Some files were not shown because too many files have changed in this diff Show More