1
0
Fork 0
forked from AbleOS/ableos

KERNEL: Fixed holeybytes

This commit is contained in:
Erin 2023-08-22 15:52:30 +02:00 committed by ondra05
parent 0f151fdd52
commit cbed32526b
10 changed files with 256 additions and 1148 deletions

975
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,3 @@
[workspace]
resolver = "2"
members = ["kernel", "repbuild"]
members = ["kernel"]

View file

@ -15,6 +15,7 @@ xml.git = "https://git.ablecorp.us/ableos/ableos_userland"
versioning.git = "https://git.ablecorp.us/ableos/ableos_userland"
able_graphics_library.git = "https://git.ablecorp.us/ableos/ableos_userland"
hashbrown = "*"
kiam = "0.1.1"
[dependencies.limine]
version = "0.1"

83
kernel/src/holeybytes.rs Normal file
View file

@ -0,0 +1,83 @@
use core::{future::Future, task::Poll};
use hbvm::{
mem::{softpaging::icache::ICache, Address},
Vm, VmRunError, VmRunOk,
};
use {
alloc::boxed::Box,
hbvm::mem::softpaging::{HandlePageFault, SoftPagedMem},
};
const TIMER_QUOTIENT: usize = 100;
pub struct ExecThread<'p> {
vm: Vm<SoftPagedMem<'p, PageFaultHandler, true>, TIMER_QUOTIENT>,
}
unsafe impl<'p> Send for ExecThread<'p> {}
impl<'p> ExecThread<'p> {
pub unsafe fn new(program: &'p [u8], entrypoint: Address) -> Self {
ExecThread {
vm: unsafe {
Vm::new(
SoftPagedMem {
root_pt: Box::into_raw(Box::default()),
pf_handler: PageFaultHandler,
program,
icache: ICache::default(),
},
entrypoint,
)
},
}
}
}
impl<'p> Future for ExecThread<'p> {
type Output = Result<(), VmRunError>;
fn poll(
mut self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> Poll<Self::Output> {
match self.vm.run() {
Err(err) => return Poll::Ready(Err(err)),
Ok(VmRunOk::End) => return Poll::Ready(Ok(())),
Ok(VmRunOk::Ecall) => {
log::info!("Ecall");
log::info!("{:?}", self.vm.registers);
}
Ok(VmRunOk::Timer) => (),
}
cx.waker().wake_by_ref();
Poll::Pending
}
}
struct PageFaultHandler;
impl HandlePageFault for PageFaultHandler {
fn page_fault(
&mut self,
reason: hbvm::mem::MemoryAccessReason,
pagetable: &mut hbvm::mem::softpaging::paging::PageTable,
vaddr: hbvm::mem::Address,
size: hbvm::mem::softpaging::PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized,
{
log::error!(
"REASON: {reason} \
vaddr: {vaddr} \
size: {size:?} \
Dataptr {dataptr:p}",
);
false
}
}

View file

@ -1,90 +0,0 @@
//! the system interface
// use {
// crate::ipc::message::Message,
// alloc::vec::Vec,
// crossbeam_queue::{ArrayQueue, SegQueue},
// // hbvm::engine::Engine,
// log::trace,
// HostError::MemoryError,
// };
/// Host errors
pub enum HostError {
/// A host memory error
MemoryError,
}
// / Check f0 register for the handle
// / check f1 for the message ptr
// / check f2 for the message length
// pub fn ipc_send(engine: &mut Engine) -> Result<(), HostError> {
// let _handle = engine.registers.f0;
// let message_start = engine.registers.f1;
// let message_length = engine.registers.f2;
// let mut ipc_msg: Vec<u8> = alloc::vec![];
// for x in message_start..message_start + message_length {
// let byte = engine.read_mem_addr_8(x);
// match byte {
// Ok(byte) => ipc_msg.push(byte),
// Err(_) => return Err(MemoryError),
// }
// }
// log::trace!("Message bytes {:?}", ipc_msg);
// Ok(())
// }
// // pub fn ipc_recv(_engine: &mut Engine) {}
// /// Check f0 for the buffer type
// /// 0 means an unbound buffer
// /// 1 means a bound buffer
// /// Check f1 if the buffer is bound
// ///
// /// f2 Return a handle to the sender
// /// f3 returns a handle the the reciever
// pub fn ipc_mkbuf(engine: &mut Engine) {
// match engine.registers.f0 as usize {
// 0 => {
// trace!("Making a new ipc unbound buffer");
// let _buf: SegQueue<Message> = SegQueue::new();
// }
// 1 => {
// let buf_len = engine.registers.f1 as usize;
// trace!("Making a new ipc buffer with capacity {}", buf_len);
// let _buf: ArrayQueue<Message> = ArrayQueue::new(buf_len);
// }
// _ => {}
// }
// }
// // pub fn rpc_discover(_engine: &mut Engine) {}
// // pub fn rpc_register(_engine: &mut Engine) {}
// // pub fn rpc_call(_engine: &mut Engine) {}
use hbvm::mem::softpaging::HandlePageFault;
/// AbleOS HBVM traphandler
pub struct TrapHandler;
impl HandlePageFault for TrapHandler {
fn page_fault(
&mut self,
reason: hbvm::mem::MemoryAccessReason,
pagetable: &mut hbvm::mem::softpaging::paging::PageTable,
vaddr: hbvm::mem::Address,
size: hbvm::mem::softpaging::PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized,
{
log::error!(
"REASON: {reason} \
vaddr: {vaddr} \
size: {size:?} \
Dataptr {dataptr:p}",
);
false
}
}

View file

@ -1,11 +1,14 @@
//! AbleOS Kernel Entrypoint
use hbvm::mem::Address;
use crate::holeybytes::ExecThread;
// use crate::arch::sloop;
use {
crate::{
bootmodules::{build_cmd, BootModules},
device_tree::DeviceTree,
scheduler::Scheduler,
},
alloc::format,
log::{debug, info, trace},
@ -16,7 +19,7 @@ use {
pub fn kmain(cmdline: &str, boot_modules: BootModules) -> ! {
debug!("Entered kmain");
let kcmd = build_cmd("Kernel Command Line", &cmdline);
let kcmd = build_cmd("Kernel Command Line", cmdline);
trace!("Cmdline: {kcmd:?}");
for (i, bm) in boot_modules.iter().enumerate() {
@ -39,13 +42,20 @@ pub fn kmain(cmdline: &str, boot_modules: BootModules) -> ! {
// capabilities::example();
let mut sched = Scheduler::new();
let mut executor = crate::task::Executor::default();
unsafe {
for module in boot_modules.into_iter().take(2) {
executor.spawn(async move {
if let Err(e) = ExecThread::new(&module.bytes, Address::new(4)).await {
log::error!("{e:?}");
}
});
}
sched.new_process(boot_modules[0].bytes.clone());
sched.new_process(boot_modules[1].bytes.clone());
executor.run();
};
sched.run();
// spin_loop();
crate::arch::spin_loop()
}
pub static DEVICE_TREE: Lazy<Mutex<DeviceTree>> = Lazy::new(|| {

View file

@ -12,7 +12,6 @@
ptr_sub_ptr,
custom_test_frameworks
)]
#![deny(clippy::pedantic, missing_docs, warnings)]
#![allow(dead_code)]
#![test_runner(crate::test_runner)]
@ -21,16 +20,16 @@ extern crate alloc;
mod allocator;
mod arch;
mod bootmodules;
pub mod capabilities;
pub mod device_tree;
pub mod handle;
pub mod host;
pub mod ipc;
mod capabilities;
mod device_tree;
mod handle;
mod holeybytes;
mod ipc;
mod kmain;
mod logger;
mod memory;
mod scheduler;
pub mod utils;
mod task;
mod utils;
use versioning::Version;

View file

@ -1,76 +0,0 @@
use {
alloc::{collections::VecDeque, rc::Rc, slice, vec::Vec},
hbvm::validate::validate,
};
use {crate::host::TrapHandler, hbvm::Vm};
const TIMER_QUOTIENT: usize = 100;
pub struct Scheduler<'a> {
data: VecDeque<Vm<'a, TrapHandler, TIMER_QUOTIENT>>,
}
// NOTE: This is a very simple schduler and it sucks and should be replaced with a better one
// Written By Yours Truly: Munir
impl Scheduler<'_> {
pub fn new() -> Self {
Self {
data: VecDeque::new(),
}
}
pub fn new_process(&mut self, program: Vec<u8>) {
let prog = program.clone();
let prog_arc = Rc::new(prog);
let binding = Rc::try_unwrap(prog_arc).ok().unwrap();
#[allow(clippy::redundant_else)]
if let Err(e) = validate(&program.as_slice()) {
log::error!("Program validation error: {e:?}");
} else {
log::info!("valid program");
unsafe {
let slice = slice::from_raw_parts(binding.as_ptr(), binding.len());
let mut vm = Vm::new_unchecked(&*slice, TrapHandler);
vm.memory.insert_test_page();
self.data.push_front(vm);
}
}
}
pub fn run(&mut self) -> ! {
loop {
// If there are no programs to run then sleep.
if self.data.is_empty() {
use crate::arch::spin_loop;
spin_loop();
}
let mut prog = self.data.pop_front().unwrap();
let ret = prog.run();
match ret {
Ok(oki) => match oki {
hbvm::VmRunOk::End => {
log::info!(
"Program ended. {} programs remaining.",
// Add one here because we pop a program
self.data.len() + 1
)
}
hbvm::VmRunOk::Timer => {
log::info!("Timer exhausted. Scheduled program");
self.data.push_back(prog);
}
hbvm::VmRunOk::Ecall => {
// panic!();
log::info!("{:?}", prog.registers);
self.data.push_back(prog);
}
},
Err(_) => {}
}
}
}
}

138
kernel/src/task.rs Normal file
View file

@ -0,0 +1,138 @@
#![allow(unused)]
use {
alloc::{boxed::Box, collections::BTreeMap, sync::Arc, task::Wake},
core::{
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
},
crossbeam_queue::SegQueue,
kiam::when,
slab::Slab,
spin::RwLock,
};
static SPAWN_QUEUE: RwLock<Option<SpawnQueue>> = RwLock::new(None);
pub fn spawn(future: impl Future<Output = ()> + Send + 'static) {
match &*SPAWN_QUEUE.read() {
Some(s) => s.push(Task::new(future)),
None => panic!("no task executor is running"),
}
}
pub fn yield_now() -> impl Future<Output = ()> {
struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.0 {
Poll::Ready(())
} else {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
YieldNow(false)
}
#[derive(Default)]
pub struct Executor {
tasks: Slab<Task>,
queue: TaskQueue,
to_spawn: SpawnQueue,
wakers: BTreeMap<TaskId, Waker>,
}
impl Executor {
pub fn spawn(&mut self, future: impl Future<Output = ()> + Send + 'static) {
self.queue
.push(TaskId(self.tasks.insert(Task::new(future))));
}
pub fn run(&mut self) {
{
let mut global_spawner = SPAWN_QUEUE.write();
if global_spawner.is_some() {
panic!("Task executor is already running");
}
*global_spawner = Some(Arc::clone(&self.to_spawn));
}
loop {
when! {
let Some(id) = self
.to_spawn
.pop()
.map(|t| TaskId(self.tasks.insert(t)))
.or_else(|| self.queue.pop())
=> {
let Some(task) = self.tasks.get_mut(id.0) else {
panic!("Attempted to get task from empty slot: {}", id.0);
};
let mut cx = Context::from_waker(self.wakers.entry(id).or_insert_with(|| {
Waker::from(Arc::new(TaskWaker {
id,
queue: Arc::clone(&self.queue),
}))
}));
match task.poll(&mut cx) {
Poll::Ready(()) => {
self.tasks.remove(id.0);
self.wakers.remove(&id);
}
Poll::Pending => (),
}
},
self.tasks.is_empty() => break,
_ => (),
}
}
*SPAWN_QUEUE.write() = None;
}
}
struct Task {
future: Pin<Box<dyn Future<Output = ()> + Send>>,
}
impl Task {
pub fn new(future: impl Future<Output = ()> + Send + 'static) -> Self {
Self {
future: Box::pin(future),
}
}
fn poll(&mut self, cx: &mut Context) -> Poll<()> {
self.future.as_mut().poll(cx)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
struct TaskId(usize);
type TaskQueue = Arc<SegQueue<TaskId>>;
type SpawnQueue = Arc<SegQueue<Task>>;
struct TaskWaker {
id: TaskId,
queue: TaskQueue,
}
impl Wake for TaskWaker {
fn wake(self: Arc<Self>) {
self.wake_by_ref();
}
fn wake_by_ref(self: &Arc<Self>) {
self.queue.push(self.id);
}
}

Binary file not shown.