From 61b99409cef63ecccb831bcae02629a719080ee8 Mon Sep 17 00:00:00 2001 From: griffi-gh Date: Mon, 2 Sep 2024 01:31:29 +0200 Subject: [PATCH] this sucks --- kubi-shared/src/data.rs | 3 ++ kubi-shared/src/data/io_thread.rs | 88 ++++++++++++++++++++++++------- kubi/src/world.rs | 8 +-- kubi/src/world/chunk.rs | 2 + kubi/src/world/loading.rs | 3 ++ kubi/src/world/queue.rs | 1 + 6 files changed, 83 insertions(+), 22 deletions(-) diff --git a/kubi-shared/src/data.rs b/kubi-shared/src/data.rs index da113fe..3b66f8d 100644 --- a/kubi-shared/src/data.rs +++ b/kubi-shared/src/data.rs @@ -116,6 +116,9 @@ impl WorldSaveFile { header_modified = true; self.allocate_sector() }); + if header_modified { + self.header.chunk_map.insert(position, sector); + } let offset = sector as u64 * SECTOR_SIZE as u64; diff --git a/kubi-shared/src/data/io_thread.rs b/kubi-shared/src/data/io_thread.rs index 931e531..72b3da1 100644 --- a/kubi-shared/src/data/io_thread.rs +++ b/kubi-shared/src/data/io_thread.rs @@ -2,9 +2,12 @@ use glam::IVec3; use flume::{Receiver, Sender, TryIter}; use shipyard::Unique; use crate::chunk::BlockData; - use super::WorldSaveFile; +// Maximum amount of chunks to save in a single batch before checking if there are any pending read requests +// may be broken, so currently disabled +const MAX_SAVE_BATCH_SIZE: usize = usize::MAX; + pub enum IOCommand { SaveChunk { position: IVec3, @@ -39,6 +42,7 @@ struct IOThreadContext { tx: Sender, rx: Receiver, save: WorldSaveFile, + save_queue: Vec<(IVec3, BlockData)>, } //TODO: Implement proper error handling (I/O errors are rlly common) @@ -54,29 +58,77 @@ impl IOThreadContext { save: WorldSaveFile, ) -> Self { // save.load_data().unwrap(); - Self { tx, rx, save } + let save_queue = Vec::new(); + Self { tx, rx, save, save_queue } } pub fn run(mut self) { loop { - match self.rx.recv().unwrap() { - IOCommand::SaveChunk { position, data } => { - self.save.save_chunk(position, &data).unwrap(); + // because were waiting for the next command, we can't process the save_queue + // which breaks batching, so we need to check if there are any pending save requests + // and if there are, use non-blocking recv to give them a chance to be processed + 'rx: while let Some(command) = { + if self.save_queue.len() > 0 { + self.rx.try_recv().ok() + } else { + self.rx.recv().ok() } - IOCommand::LoadChunk { position } => { - let data = self.save.load_chunk(position).unwrap(); - self.tx.send(IOResponse::ChunkLoaded { position, data }).unwrap(); - } - IOCommand::Kys => { - // Process all pending write commands - for cmd in self.rx.try_iter() { - let IOCommand::SaveChunk { position, data } = cmd else { - continue; - }; - self.save.save_chunk(position, &data).unwrap(); + } { + match command { + IOCommand::SaveChunk { position, data } => { + // if chunk already has a save request, overwrite it + for (pos, old_data) in self.save_queue.iter_mut() { + if *pos == position { + *old_data = data; + continue 'rx; + } + } + // if not, save to the queue + self.save_queue.push((position, data)); + //log::trace!("amt of unsaved chunks: {}", self.save_queue.len()); } - self.tx.send(IOResponse::Terminated).unwrap(); - return; + IOCommand::LoadChunk { position } => { + // HOLD ON + // first check if the chunk is already in the save queue + // if it is, send it and continue + // (NOT doing this WILL result in data loss if the user returns to the chunk too quickly) + for (pos, data) in self.save_queue.iter() { + if *pos == position { + self.tx.send(IOResponse::ChunkLoaded { position, data: Some(data.clone()) }).unwrap(); + continue 'rx; + } + } + let data = self.save.load_chunk(position).unwrap(); + self.tx.send(IOResponse::ChunkLoaded { position, data }).unwrap(); + } + IOCommand::Kys => { + // Process all pending write commands + log::info!("info: queue has {} chunks", self.save_queue.len()); + let mut saved_amount = 0; + for (pos, data) in self.save_queue.drain(..) { + self.save.save_chunk(pos, &data).unwrap(); + saved_amount += 1; + } + log::debug!("now, moving on to the rx queue..."); + for cmd in self.rx.try_iter() { + let IOCommand::SaveChunk { position, data } = cmd else { + continue; + }; + self.save.save_chunk(position, &data).unwrap(); + saved_amount += 1; + } + log::info!("saved {} chunks on exit", saved_amount); + self.tx.send(IOResponse::Terminated).unwrap(); + return; + } + } + } + // between every betch of requests, check if there are any pending save requests + if self.save_queue.len() > 0 { + let will_drain = MAX_SAVE_BATCH_SIZE.min(self.save_queue.len()); + log::info!("saving {}/{} chunks with batch size {}...", will_drain, self.save_queue.len(), MAX_SAVE_BATCH_SIZE); + for (pos, data) in self.save_queue.drain(..will_drain) { + self.save.save_chunk(pos, &data).unwrap(); } } } diff --git a/kubi/src/world.rs b/kubi/src/world.rs index 1626d17..fe1f8ff 100644 --- a/kubi/src/world.rs +++ b/kubi/src/world.rs @@ -62,10 +62,10 @@ impl ChunkStorage { } } -#[derive(Unique)] -pub struct WorldInfo { - pub seed: u32, -} +// #[derive(Unique)] +// pub struct WorldInfo { +// pub seed: u32, +// } #[derive(Default, Unique)] pub struct ChunkMeshStorage { diff --git a/kubi/src/world/chunk.rs b/kubi/src/world/chunk.rs index 27c52a9..f728f28 100644 --- a/kubi/src/world/chunk.rs +++ b/kubi/src/world/chunk.rs @@ -57,6 +57,7 @@ pub struct Chunk { pub desired_state: DesiredChunkState, pub abortion: Option>>, pub mesh_dirty: bool, + pub data_modified: bool, } impl Chunk { @@ -69,6 +70,7 @@ impl Chunk { desired_state: Default::default(), abortion: None, mesh_dirty: false, + data_modified: false, } } } diff --git a/kubi/src/world/loading.rs b/kubi/src/world/loading.rs index 4326e26..2ed2612 100644 --- a/kubi/src/world/loading.rs +++ b/kubi/src/world/loading.rs @@ -282,6 +282,9 @@ fn process_state_changes( //TODO IMPORTANT: WAIT FOR CHUNK TO FINISH SAVING FIRST BEFORE TRANSITIONING TO UNLOADED // OTHERWISE WE WILL LOSE THE SAVE DATA IF THE USER COMES BACK TO THE CHUNK TOO QUICKLY // ========================================================== + //XXX: CHECK IF WE REALLY NEED THIS OR IF WE CAN JUST KILL THE CHUNK RIGHT AWAY + //CHANGES TO CHUNK SAVING LOGIC SHOULD HAVE MADE THE ABOVE COMMENT OBSOLETE + if let Some(io) = &io { if let Some(block_data) = &chunk.block_data { // log::debug!("issue save command"); diff --git a/kubi/src/world/queue.rs b/kubi/src/world/queue.rs index 76d6b02..8f46335 100644 --- a/kubi/src/world/queue.rs +++ b/kubi/src/world/queue.rs @@ -27,6 +27,7 @@ pub fn apply_queued_blocks( let (chunk_pos, block_pos) = ChunkStorage::to_chunk_coords(event.position); let chunk = world.chunks.get_mut(&chunk_pos).expect("This error should never happen, if it does then something is super fucked up and the whole project needs to be burnt down."); chunk.mesh_dirty = true; + chunk.data_modified = true; //If block pos is close to the border, some neighbors may be dirty! const DIRECTIONS: [IVec3; 6] = [ ivec3(1, 0, 0),