this sucks

This commit is contained in:
griffi-gh 2024-09-02 01:31:29 +02:00
parent 570382520c
commit 61b99409ce
6 changed files with 83 additions and 22 deletions

View file

@ -116,6 +116,9 @@ impl WorldSaveFile {
header_modified = true; header_modified = true;
self.allocate_sector() self.allocate_sector()
}); });
if header_modified {
self.header.chunk_map.insert(position, sector);
}
let offset = sector as u64 * SECTOR_SIZE as u64; let offset = sector as u64 * SECTOR_SIZE as u64;

View file

@ -2,9 +2,12 @@ use glam::IVec3;
use flume::{Receiver, Sender, TryIter}; use flume::{Receiver, Sender, TryIter};
use shipyard::Unique; use shipyard::Unique;
use crate::chunk::BlockData; use crate::chunk::BlockData;
use super::WorldSaveFile; use super::WorldSaveFile;
// Maximum amount of chunks to save in a single batch before checking if there are any pending read requests
// may be broken, so currently disabled
const MAX_SAVE_BATCH_SIZE: usize = usize::MAX;
pub enum IOCommand { pub enum IOCommand {
SaveChunk { SaveChunk {
position: IVec3, position: IVec3,
@ -39,6 +42,7 @@ struct IOThreadContext {
tx: Sender<IOResponse>, tx: Sender<IOResponse>,
rx: Receiver<IOCommand>, rx: Receiver<IOCommand>,
save: WorldSaveFile, save: WorldSaveFile,
save_queue: Vec<(IVec3, BlockData)>,
} }
//TODO: Implement proper error handling (I/O errors are rlly common) //TODO: Implement proper error handling (I/O errors are rlly common)
@ -54,29 +58,77 @@ impl IOThreadContext {
save: WorldSaveFile, save: WorldSaveFile,
) -> Self { ) -> Self {
// save.load_data().unwrap(); // save.load_data().unwrap();
Self { tx, rx, save } let save_queue = Vec::new();
Self { tx, rx, save, save_queue }
} }
pub fn run(mut self) { pub fn run(mut self) {
loop { loop {
match self.rx.recv().unwrap() { // because were waiting for the next command, we can't process the save_queue
IOCommand::SaveChunk { position, data } => { // which breaks batching, so we need to check if there are any pending save requests
self.save.save_chunk(position, &data).unwrap(); // and if there are, use non-blocking recv to give them a chance to be processed
'rx: while let Some(command) = {
if self.save_queue.len() > 0 {
self.rx.try_recv().ok()
} else {
self.rx.recv().ok()
} }
IOCommand::LoadChunk { position } => { } {
let data = self.save.load_chunk(position).unwrap(); match command {
self.tx.send(IOResponse::ChunkLoaded { position, data }).unwrap(); IOCommand::SaveChunk { position, data } => {
} // if chunk already has a save request, overwrite it
IOCommand::Kys => { for (pos, old_data) in self.save_queue.iter_mut() {
// Process all pending write commands if *pos == position {
for cmd in self.rx.try_iter() { *old_data = data;
let IOCommand::SaveChunk { position, data } = cmd else { continue 'rx;
continue; }
}; }
self.save.save_chunk(position, &data).unwrap(); // if not, save to the queue
self.save_queue.push((position, data));
//log::trace!("amt of unsaved chunks: {}", self.save_queue.len());
} }
self.tx.send(IOResponse::Terminated).unwrap(); IOCommand::LoadChunk { position } => {
return; // HOLD ON
// first check if the chunk is already in the save queue
// if it is, send it and continue
// (NOT doing this WILL result in data loss if the user returns to the chunk too quickly)
for (pos, data) in self.save_queue.iter() {
if *pos == position {
self.tx.send(IOResponse::ChunkLoaded { position, data: Some(data.clone()) }).unwrap();
continue 'rx;
}
}
let data = self.save.load_chunk(position).unwrap();
self.tx.send(IOResponse::ChunkLoaded { position, data }).unwrap();
}
IOCommand::Kys => {
// Process all pending write commands
log::info!("info: queue has {} chunks", self.save_queue.len());
let mut saved_amount = 0;
for (pos, data) in self.save_queue.drain(..) {
self.save.save_chunk(pos, &data).unwrap();
saved_amount += 1;
}
log::debug!("now, moving on to the rx queue...");
for cmd in self.rx.try_iter() {
let IOCommand::SaveChunk { position, data } = cmd else {
continue;
};
self.save.save_chunk(position, &data).unwrap();
saved_amount += 1;
}
log::info!("saved {} chunks on exit", saved_amount);
self.tx.send(IOResponse::Terminated).unwrap();
return;
}
}
}
// between every betch of requests, check if there are any pending save requests
if self.save_queue.len() > 0 {
let will_drain = MAX_SAVE_BATCH_SIZE.min(self.save_queue.len());
log::info!("saving {}/{} chunks with batch size {}...", will_drain, self.save_queue.len(), MAX_SAVE_BATCH_SIZE);
for (pos, data) in self.save_queue.drain(..will_drain) {
self.save.save_chunk(pos, &data).unwrap();
} }
} }
} }

View file

@ -62,10 +62,10 @@ impl ChunkStorage {
} }
} }
#[derive(Unique)] // #[derive(Unique)]
pub struct WorldInfo { // pub struct WorldInfo {
pub seed: u32, // pub seed: u32,
} // }
#[derive(Default, Unique)] #[derive(Default, Unique)]
pub struct ChunkMeshStorage { pub struct ChunkMeshStorage {

View file

@ -57,6 +57,7 @@ pub struct Chunk {
pub desired_state: DesiredChunkState, pub desired_state: DesiredChunkState,
pub abortion: Option<Arc<Atomic<AbortState>>>, pub abortion: Option<Arc<Atomic<AbortState>>>,
pub mesh_dirty: bool, pub mesh_dirty: bool,
pub data_modified: bool,
} }
impl Chunk { impl Chunk {
@ -69,6 +70,7 @@ impl Chunk {
desired_state: Default::default(), desired_state: Default::default(),
abortion: None, abortion: None,
mesh_dirty: false, mesh_dirty: false,
data_modified: false,
} }
} }
} }

View file

@ -282,6 +282,9 @@ fn process_state_changes(
//TODO IMPORTANT: WAIT FOR CHUNK TO FINISH SAVING FIRST BEFORE TRANSITIONING TO UNLOADED //TODO IMPORTANT: WAIT FOR CHUNK TO FINISH SAVING FIRST BEFORE TRANSITIONING TO UNLOADED
// OTHERWISE WE WILL LOSE THE SAVE DATA IF THE USER COMES BACK TO THE CHUNK TOO QUICKLY // OTHERWISE WE WILL LOSE THE SAVE DATA IF THE USER COMES BACK TO THE CHUNK TOO QUICKLY
// ========================================================== // ==========================================================
//XXX: CHECK IF WE REALLY NEED THIS OR IF WE CAN JUST KILL THE CHUNK RIGHT AWAY
//CHANGES TO CHUNK SAVING LOGIC SHOULD HAVE MADE THE ABOVE COMMENT OBSOLETE
if let Some(io) = &io { if let Some(io) = &io {
if let Some(block_data) = &chunk.block_data { if let Some(block_data) = &chunk.block_data {
// log::debug!("issue save command"); // log::debug!("issue save command");

View file

@ -27,6 +27,7 @@ pub fn apply_queued_blocks(
let (chunk_pos, block_pos) = ChunkStorage::to_chunk_coords(event.position); let (chunk_pos, block_pos) = ChunkStorage::to_chunk_coords(event.position);
let chunk = world.chunks.get_mut(&chunk_pos).expect("This error should never happen, if it does then something is super fucked up and the whole project needs to be burnt down."); let chunk = world.chunks.get_mut(&chunk_pos).expect("This error should never happen, if it does then something is super fucked up and the whole project needs to be burnt down.");
chunk.mesh_dirty = true; chunk.mesh_dirty = true;
chunk.data_modified = true;
//If block pos is close to the border, some neighbors may be dirty! //If block pos is close to the border, some neighbors may be dirty!
const DIRECTIONS: [IVec3; 6] = [ const DIRECTIONS: [IVec3; 6] = [
ivec3(1, 0, 0), ivec3(1, 0, 0),