diff --git a/kubi-server/src/world.rs b/kubi-server/src/world.rs index 0ae5592..4b34562 100644 --- a/kubi-server/src/world.rs +++ b/kubi-server/src/world.rs @@ -64,7 +64,7 @@ pub fn send_chunk_compressed( let ser_message = ser_message.into_boxed_slice(); client.borrow_mut().send( ser_message, - Channel::World as usize, + Channel::WorldData as usize, SendMode::Reliable ); Ok(()) diff --git a/kubi-server/src/world/tasks.rs b/kubi-server/src/world/tasks.rs index 4e45c92..40aa615 100644 --- a/kubi-server/src/world/tasks.rs +++ b/kubi-server/src/world/tasks.rs @@ -41,9 +41,8 @@ impl ChunkTaskManager { self.pool.spawn(move || { sender.send(match task { ChunkTask::LoadChunk { position: chunk_position, seed } => { - let Some((blocks, queue)) = generate_world(chunk_position, seed, None) else { - return - }; + //unwrap is fine because abort is not possible + let (blocks, queue) = generate_world(chunk_position, seed, None).unwrap(); ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue } } }).unwrap() diff --git a/kubi-shared/src/networking/channels.rs b/kubi-shared/src/networking/channels.rs index c8f1f53..241d7e0 100644 --- a/kubi-shared/src/networking/channels.rs +++ b/kubi-shared/src/networking/channels.rs @@ -1,9 +1,17 @@ -#[repr(u8)] -pub enum Channel { - Generic = 0, - Auth = 1, - World = 2, - Block = 3, - Move = 4, - SysEvt = 5, -} +#[repr(u8)] +pub enum Channel { + #[deprecated] + Generic = 0, + /// Used during the initial handshake process + Auth = 1, + /// Used for sending chunk data from server to client + WorldData = 2, + /// Used for sending/receiving block place events + Block = 3, + /// Used for sending/receiving player movements + Move = 4, + /// Used for system events, like players joining or leaving + SysEvt = 5, + /// Used for subscribing and unsubscribing from chunks + SubReq = 6, +} diff --git a/kubi/src/world/loading.rs b/kubi/src/world/loading.rs index 5798408..ea29999 100644 --- a/kubi/src/world/loading.rs +++ b/kubi/src/world/loading.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use atomic::{Atomic, Ordering}; use glam::{IVec3, ivec3}; use glium::{VertexBuffer, IndexBuffer, index::PrimitiveType}; -use kubi_shared::{networking::messages::ClientToServerMessage, worldgen::AbortState}; +use kubi_shared::{networking::{channels::Channel, messages::ClientToServerMessage}, worldgen::AbortState}; use shipyard::{View, UniqueView, UniqueViewMut, IntoIter, Workload, IntoWorkload, NonSendSync, track}; use uflow::SendMode; use crate::{ @@ -26,8 +26,7 @@ const MAX_CHUNK_OPS: usize = 32; pub fn update_loaded_world_around_player() -> Workload { ( update_chunks_if_player_moved, - unload_downgrade_chunks, - start_required_tasks, + process_state_changes, process_completed_tasks, ).into_sequential_workload() } @@ -91,64 +90,86 @@ pub fn update_chunks_if_player_moved( } } -fn unload_downgrade_chunks( - mut vm_world: UniqueViewMut, - mut vm_meshes: NonSendSync> -) { - if !vm_world.is_modified() { - return - } - //TODO refactor this - //TODO unsubscibe if in multiplayer - vm_world.chunks.retain(|_, chunk| { - if chunk.desired_state == DesiredChunkState::Unloaded { - if let Some(mesh_index) = chunk.mesh_index { - vm_meshes.remove(mesh_index).unwrap(); - } - if let Some(abortion) = &chunk.abortion { - let _ = abortion.compare_exchange( - AbortState::Continue, AbortState::Abort, - Ordering::Relaxed, Ordering::Relaxed - ); - } - false - } else { - match chunk.desired_state { - DesiredChunkState::Nothing if matches!(chunk.current_state, CurrentChunkState::Loading) => { - if let Some(abortion) = &chunk.abortion { - let _ = abortion.compare_exchange( - AbortState::Continue, AbortState::Abort, - Ordering::Relaxed, Ordering::Relaxed - ); - } - }, - DesiredChunkState::Loaded if matches!(chunk.current_state, CurrentChunkState::Rendered | CurrentChunkState::CalculatingMesh | CurrentChunkState::RecalculatingMesh) => { - if let Some(mesh_index) = chunk.mesh_index { - vm_meshes.remove(mesh_index).unwrap(); - } - chunk.mesh_index = None; - chunk.current_state = CurrentChunkState::Loaded; - }, - _ => (), - } - true - } - }) -} - -fn start_required_tasks( +fn process_state_changes( task_manager: UniqueView, mut udp_client: Option>, mut world: UniqueViewMut, + mut vm_meshes: NonSendSync>, ) { if !world.is_modified() { return } + //HACK: cant iterate over chunks.keys() or chunk directly! let hashmap_keys: Vec = world.chunks.keys().copied().collect(); for position in hashmap_keys { - let chunk = world.chunks.get(&position).unwrap(); + let chunk = world.chunks.get_mut(&position).unwrap(); + + //If the chunk is being unloaded, it's essentially dead at this point and we shouldn't bother it + if chunk.current_state == CurrentChunkState::Unloading { + continue + } + // If the chunk is already in the desired state, skip it + if chunk.desired_state.matches_current(chunk.current_state) { + continue + } match chunk.desired_state { + // DesiredChunkState::Unloaded | DesiredChunkState::Nothing: + // Loading -> Nothing + DesiredChunkState::Unloaded | DesiredChunkState::Nothing if chunk.current_state == CurrentChunkState::Loading => { + if let Some(abortion) = &chunk.abortion { + let _ = abortion.compare_exchange( + AbortState::Continue, AbortState::Abort, + Ordering::Relaxed, Ordering::Relaxed + ); + } + chunk.abortion = None; + chunk.current_state = CurrentChunkState::Nothing; + }, + + // DesiredChunkState::Unloaded | DesiredChunkState::Nothing: + // (Loaded, CalculatingMesh) -> Nothing + DesiredChunkState::Unloaded | DesiredChunkState::Nothing if matches!( + chunk.current_state, + CurrentChunkState::Loaded | CurrentChunkState::CalculatingMesh, + ) => { + chunk.block_data = None; + chunk.current_state = CurrentChunkState::Nothing; + }, + + // DesiredChunkState::Unloaded | DesiredChunkState::Nothing: + // (Rendered | RecalculatingMesh) -> Nothing + DesiredChunkState::Unloaded | DesiredChunkState::Nothing if matches!( + chunk.current_state, + CurrentChunkState::Rendered | CurrentChunkState::RecalculatingMesh, + ) => { + if let Some(mesh_index) = chunk.mesh_index { + vm_meshes.remove(mesh_index).unwrap(); + } + chunk.mesh_index = None; + chunk.current_state = CurrentChunkState::Nothing; + }, + + // DesiredChunkState::Loaded: + // CalculatingMesh -> Loaded + DesiredChunkState::Loaded if chunk.current_state == CurrentChunkState::CalculatingMesh => { + chunk.current_state = CurrentChunkState::Loaded; + }, + + // DesiredChunkState::Unloaded | DesiredChunkState::Nothing | DesiredChunkState::Loaded: + // (Rendered | RecalculatingMesh) -> Loaded + DesiredChunkState::Unloaded | DesiredChunkState::Nothing | DesiredChunkState::Loaded if matches!( + chunk.current_state, CurrentChunkState::Rendered | CurrentChunkState::RecalculatingMesh + ) => { + if let Some(mesh_index) = chunk.mesh_index { + vm_meshes.remove(mesh_index).unwrap(); + } + chunk.mesh_index = None; + chunk.current_state = CurrentChunkState::Loaded; + }, + + // DesiredChunkState::Loaded | DesiredChunkState::Rendered: + // Nothing -> Loading DesiredChunkState::Loaded | DesiredChunkState::Rendered if chunk.current_state == CurrentChunkState::Nothing => { let mut abortion = None; //start load task @@ -157,7 +178,7 @@ fn start_required_tasks( postcard::to_allocvec(&ClientToServerMessage::ChunkSubRequest { chunk: position, }).unwrap().into_boxed_slice(), - 0, + Channel::SubReq as usize, SendMode::Reliable ); } else { @@ -176,6 +197,10 @@ fn start_required_tasks( // =========== //log::trace!("Started loading chunk {position}"); }, + + // DesiredChunkState::Rendered: + // Loaded -> CalculatingMesh + // Rendered (dirty) -> RecalculatingMesh DesiredChunkState::Rendered if (chunk.current_state == CurrentChunkState::Loaded || chunk.mesh_dirty) => { //get needed data let Some(neighbors) = world.neighbors_all(position) else { @@ -198,9 +223,41 @@ fn start_required_tasks( // =========== //log::trace!("Started generating mesh for chunk {position}"); } - _ => () + + _ => {}, //panic!("Illegal state transition: {:?} -> {:?}", chunk.current_state, chunk.desired_state), } } + + //Now, separately process state change the state from Nothing to Unloading or Unloaded + world.chunks.retain(|&position, chunk: &mut Chunk| { + if chunk.desired_state == DesiredChunkState::Unloaded { + assert!(chunk.current_state == CurrentChunkState::Nothing, "looks like chunk did not get properly downgraded to Nothing before unloading, this is a bug"); + + chunk.current_state = CurrentChunkState::Unloading; + + //If in multiplayer, send a message to the server to unsubscribe from the chunk + if let Some(client) = &mut udp_client { + client.0.send( + postcard::to_allocvec( + &ClientToServerMessage::ChunkUnsubscribe { chunk: position } + ).unwrap().into_boxed_slice(), + Channel::SubReq as usize, + SendMode::Reliable + ); + // and i think that's it, just kill the chunk right away, the server will take care of the rest + // + // because uflow's reliable packets are ordered, there should be no need to wait for the server to confirm the unsubscription + // because client won't be able to subscribe to it again until the server finishes processing the unsubscription + // :ferrisClueless: + return false + } + + //HACK, since save files are not implemented, just unload immediately + return false + } + true + }); + } fn process_completed_tasks( @@ -215,6 +272,10 @@ fn process_completed_tasks( while let Some(res) = task_manager.receive() { match res { ChunkTaskResponse::LoadedChunk { position, chunk_data, mut queued } => { + //If unwanted chunk is already loaded + //It would be ~~...unethical~~ impossible to abort the operation at this point + //Instead, we'll just throw it away + //check if chunk exists let Some(chunk) = world.chunks.get_mut(&position) else { //to compensate, actually push the ops counter back by one @@ -222,6 +283,8 @@ fn process_completed_tasks( continue }; + chunk.abortion = None; + //check if chunk still wants it if !matches!(chunk.desired_state, DesiredChunkState::Loaded | DesiredChunkState::Rendered) { log::warn!("block data discarded: state undesirable: {:?}", chunk.desired_state);