Added simple task switcher (x86-64)

This commit is contained in:
Erin 2022-05-20 17:11:32 +02:00 committed by ondra05
parent 6f306b9ece
commit 8be1f2410e
10 changed files with 138 additions and 244 deletions

View file

@ -1,17 +1,10 @@
// #![allow(clippy::print_literal)] // #![allow(clippy::print_literal)]
use super::{gdt, interrupts}; use super::{gdt, interrupts};
use crate::{ use crate::{logger, serial_println};
logger,
scheduler::{capabilities::Capabilities, SCHEDULER},
serial_println,
};
/// x86_64 initialization /// x86_64 initialization
pub fn init() { pub fn init() {
use crate::{ use crate::{network::socket::SimpleSock, relib::network::socket::Socket};
network::socket::SimpleSock, relib::network::socket::Socket,
scheduler::priority::Priority::High, stdio::StdIo,
};
let mut log_socket_id = SimpleSock::new(); let mut log_socket_id = SimpleSock::new();
log_socket_id.register_protocol("Logger".to_string()); log_socket_id.register_protocol("Logger".to_string());
@ -26,16 +19,6 @@ pub fn init() {
gdt::init(); gdt::init();
let mut scheduler = SCHEDULER.lock();
let process_0 = scheduler.new_process(
Capabilities::empty(),
High,
"".to_string(),
StdIo::new("null".to_string()),
);
scheduler.add_process(process_0);
drop(scheduler);
interrupts::init_idt(); interrupts::init_idt();
unsafe { interrupts::PICS.lock().initialize() }; unsafe { interrupts::PICS.lock().initialize() };
x86_64::instructions::interrupts::enable(); x86_64::instructions::interrupts::enable();

View file

@ -67,11 +67,31 @@ extern "x86-interrupt" fn double_fault_handler(
); );
} }
#[naked]
extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: InterruptStackFrame) { extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: InterruptStackFrame) {
kernel::tick(); use super::task_switcher;
unsafe { unsafe {
PICS.lock() asm!(
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8()); // Kernel tick
"push rax",
"call {tick}",
"pop rax",
// Push task's state onto stack
// and save task pointer into scheduler
task_switcher::save_tasks_state!(),
"mov rdi, rsp",
"call {save}",
// Switch to next task (interrupt'll be returned there)
"jmp {switch_to_next}",
tick = sym kernel::tick,
save = sym task_switcher::save_and_enqueue,
switch_to_next = sym task_switcher::switch_to_next,
options(noreturn),
);
} }
} }

View file

@ -4,6 +4,8 @@ pub mod init;
pub mod interrupts; pub mod interrupts;
pub mod memory; pub mod memory;
mod task_switcher;
use crate::arch::drivers::allocator; use crate::arch::drivers::allocator;
use bootloader::{entry_point, BootInfo}; use bootloader::{entry_point, BootInfo};
use x86_64::{instructions::hlt, VirtAddr}; use x86_64::{instructions::hlt, VirtAddr};

View file

@ -0,0 +1,83 @@
use crate::scheduler::{Task, SCHEDULER};
/// Saves task's state onto stack
macro_rules! save_tasks_state {
() => {
"
// Copy task's state
push [rsp + 32] // SS
push [rsp + 32] // RSP
push [rsp + 32] // RFLAGS
push [rsp + 32] // CS
push [rsp + 32] // RIP
// Save task's registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rdx
push rcx
push rbx
push rax
"
};
}
pub(super) use save_tasks_state;
/// Save provided stack pointer into scheduler's queue
pub extern "C" fn save_and_enqueue(sp: u64) {
SCHEDULER.lock().enqueue_suspended(sp);
}
// Fetch and load next task
pub unsafe extern "C" fn switch_to_next() -> ! {
// Fetch next task
let next = SCHEDULER.lock().pop().expect("no task in the task queue");
match next {
Task::Suspended(sp) => asm!(
// Restore task's registers
"mov rsp, {}",
"pop rax",
"pop rbx",
"pop rcx",
"pop rdx",
"pop rsi",
"pop rdi",
"pop r8",
"pop r9",
"pop r10",
"pop r11",
"pop r12",
"pop r13",
"pop r14",
"pop r15",
// Copy things up the stack
"add rsp, 80", // Move 8 bytes above exception stack frame
"push [rsp - 48]", // SS
"push [rsp - 48]", // RSP
"push [rsp - 48]", // RFLAGS
"push [rsp - 48]", // CS
"push [rsp - 48]", // RIP
// Signalise end of the interrupt and return
"push rax",
"mov al, 32",
"out 20h, al",
"pop rax",
"iretq",
in(reg) sp,
options(noreturn),
),
Task::Spawn(_) => todo!("task spawning"),
}
}

View file

@ -1,7 +1,6 @@
#![allow(clippy::empty_loop)] #![allow(clippy::empty_loop)]
use crate::arch::drivers::sysinfo::master; use crate::arch::drivers::sysinfo::master;
use crate::scheduler::SCHEDULER;
use crate::{ use crate::{
arch::{init, sloop}, arch::{init, sloop},
relib::network::socket::{SimpleSock, Socket}, relib::network::socket::{SimpleSock, Socket},
@ -24,12 +23,6 @@ pub fn kernel_main() -> ! {
} else { } else {
log::set_max_level(log::LevelFilter::Off); log::set_max_level(log::LevelFilter::Off);
} }
let scheduler = SCHEDULER.lock();
for proc in &scheduler.execution_queue {
trace!("{:?}", proc);
}
drop(scheduler);
// start_facepalm(); // start_facepalm();
scratchpad(); scratchpad();

28
ableos/src/scheduler.rs Normal file
View file

@ -0,0 +1,28 @@
use alloc::collections::VecDeque;
use spin::{Lazy, Mutex};
pub static SCHEDULER: Lazy<Mutex<Scheduler>> = Lazy::new(|| Mutex::new(Scheduler::default()));
pub enum Task {
Suspended(u64),
Spawn(fn()),
}
#[derive(Default)]
pub struct Scheduler {
task_queue: VecDeque<Task>,
}
impl Scheduler {
pub fn enqueue_spawn(&mut self, f: fn()) {
self.task_queue.push_back(Task::Spawn(f));
}
pub fn enqueue_suspended(&mut self, sp: u64) {
self.task_queue.push_back(Task::Suspended(sp));
}
pub fn pop(&mut self) -> Option<Task> {
self.task_queue.pop_front()
}
}

View file

@ -1,87 +0,0 @@
#![allow(missing_docs)]
pub type SoundCardID = u8;
pub type DeviceID = u8;
pub type ControllerID = u8;
#[derive(Clone, Debug, PartialEq)]
pub enum FileAccess {
All,
Some(Vec<u8>),
None,
}
#[derive(Clone, Debug, PartialEq)]
pub enum ControllerAccess {
All,
Some(Vec<ControllerID>),
None,
}
#[derive(Clone, Debug, PartialEq)]
pub enum SoundCardAccess {
All,
Some(Vec<SoundCardID>),
None,
}
#[derive(Clone, Debug, PartialEq)]
pub enum MouseAccess {
Yes,
No,
}
#[derive(Clone, Debug, PartialEq)]
pub enum KeyboardAccess {
Yes,
No,
}
#[derive(Clone, Debug, PartialEq)]
pub enum NetworkAccess {
Yes,
No,
}
/// A set of capabilities that a process has
#[derive(Clone, Debug, PartialEq)]
pub struct Capabilities {
// TODO: Add more capabilities
pub files: FileAccess,
pub mouse: MouseAccess,
pub keyboard: KeyboardAccess,
pub controllers: ControllerAccess,
pub sound_cards: SoundCardAccess,
pub network_access: NetworkAccess,
}
impl Capabilities {
/// Generate a set of empty capabilities
pub fn empty() -> Self {
Self {
files: FileAccess::None,
mouse: MouseAccess::No,
keyboard: KeyboardAccess::No,
controllers: ControllerAccess::None,
sound_cards: SoundCardAccess::None,
network_access: NetworkAccess::No,
}
}
/// Generate a set of capabilities that allows all access
/// to all devices
///
/// # Safety
/// This is a very dangerous function and should not be used
/// unless you know what you are doing
pub unsafe fn all() -> Self {
Self {
files: FileAccess::All,
mouse: MouseAccess::Yes,
keyboard: KeyboardAccess::Yes,
controllers: ControllerAccess::All,
sound_cards: SoundCardAccess::All,
network_access: NetworkAccess::Yes,
}
}
}

View file

@ -1,94 +0,0 @@
pub mod capabilities;
pub mod priority;
pub mod proc;
use crate::scheduler::capabilities::Capabilities;
use crate::{arch::generate_process_pass, stdio::StdIo};
use kernel::proccess::PID;
use priority::Priority;
use proc::Process;
pub static SCHEDULER: spin::Mutex<Scheduler> = spin::Mutex::new(Scheduler::new());
/// Add additional wake conditions to the list
#[derive(Clone, Debug)]
pub enum WakeCondition {
/// Wake when the process has been blocked for a certain amount of time
TimerInterrupt(u64),
SocketRead(PID),
SocketWrite(PID),
SocketOpen(PID),
SocketClose(PID),
// HardwareEvent,
}
// NOTE: Define what is a sleeping process in the context of the ableOS kernel.
// Blocked processes are processes that are waiting for a certain event to occur.
#[derive(Clone, Debug)]
pub struct BlockedProcess {
pub pid: PID,
pub wake_condition: WakeCondition,
}
pub struct Scheduler {
pub free_pid: PID,
pub process_exec_time: u64,
pub execution_queue: Vec<Process>,
pub sleeping_queue: Vec<BlockedProcess>,
pub blocked_queue: Vec<BlockedProcess>,
// All timed processes sorted by wake time
}
impl Scheduler {
/// Create a new scheduler
pub const fn new() -> Self {
Self {
free_pid: 0,
process_exec_time: 0,
execution_queue: Vec::new(),
sleeping_queue: Vec::new(),
blocked_queue: Vec::new(),
}
}
/// Change the current process to the next process in the list
pub fn next_process(&mut self) {
self.process_exec_time = 0;
let previous_task = self.execution_queue[0].clone();
self.execution_queue.remove(0);
self.execution_queue.push(previous_task);
}
pub fn add_process(&mut self, mut process: Process) {
process.pid = self.free_pid;
self.free_pid += 1;
self.execution_queue.push(process);
}
pub fn new_process(
&mut self,
capabilities: Capabilities,
priority: Priority,
working_dir: String,
stdio: StdIo,
) -> Process {
Process {
pid: 0,
priority,
working_dir,
stdio,
password: generate_process_pass(),
capabilities,
}
}
pub fn sleep_process(&mut self, process: &mut Process) {
let sleeping_process = BlockedProcess {
pid: process.pid,
wake_condition: WakeCondition::TimerInterrupt(0),
};
self.sleeping_queue.push(sleeping_process);
self.execution_queue.remove(0);
}
}

View file

@ -1,10 +0,0 @@
/// Scheduler priority model
#[derive(Clone, Copy, Debug)]
pub enum Priority {
/// Exclusively Kernel space | 20 Timer Tick execution time
High,
/// Kernel / User space | 15 Timer Tick execution time
Medium,
/// low priority userspace code | 10 Timer Tick execution time
Low,
}

View file

@ -1,24 +0,0 @@
//! Process definition and general utilities surrounding them
use super::capabilities::Capabilities;
use super::priority::Priority;
use crate::stdio::StdIo;
use kernel::proccess::PID;
/// A process
#[derive(Clone, Debug)]
pub struct Process {
/// Internal PID
pub pid: PID,
/// Process password
pub password: u128,
///
pub capabilities: Capabilities,
/// A process's priority
pub priority: Priority,
/// A process's current working directory
pub working_dir: String,
pub stdio: StdIo,
}