mirror of
https://github.com/griffi-gh/kubi.git
synced 2024-11-16 12:28:42 -06:00
refactor state transitions
This commit is contained in:
parent
94fa5268fa
commit
49753ecc4c
|
@ -64,7 +64,7 @@ pub fn send_chunk_compressed(
|
||||||
let ser_message = ser_message.into_boxed_slice();
|
let ser_message = ser_message.into_boxed_slice();
|
||||||
client.borrow_mut().send(
|
client.borrow_mut().send(
|
||||||
ser_message,
|
ser_message,
|
||||||
Channel::World as usize,
|
Channel::WorldData as usize,
|
||||||
SendMode::Reliable
|
SendMode::Reliable
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -41,9 +41,8 @@ impl ChunkTaskManager {
|
||||||
self.pool.spawn(move || {
|
self.pool.spawn(move || {
|
||||||
sender.send(match task {
|
sender.send(match task {
|
||||||
ChunkTask::LoadChunk { position: chunk_position, seed } => {
|
ChunkTask::LoadChunk { position: chunk_position, seed } => {
|
||||||
let Some((blocks, queue)) = generate_world(chunk_position, seed, None) else {
|
//unwrap is fine because abort is not possible
|
||||||
return
|
let (blocks, queue) = generate_world(chunk_position, seed, None).unwrap();
|
||||||
};
|
|
||||||
ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue }
|
ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue }
|
||||||
}
|
}
|
||||||
}).unwrap()
|
}).unwrap()
|
||||||
|
|
|
@ -1,9 +1,17 @@
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
pub enum Channel {
|
pub enum Channel {
|
||||||
|
#[deprecated]
|
||||||
Generic = 0,
|
Generic = 0,
|
||||||
|
/// Used during the initial handshake process
|
||||||
Auth = 1,
|
Auth = 1,
|
||||||
World = 2,
|
/// Used for sending chunk data from server to client
|
||||||
|
WorldData = 2,
|
||||||
|
/// Used for sending/receiving block place events
|
||||||
Block = 3,
|
Block = 3,
|
||||||
|
/// Used for sending/receiving player movements
|
||||||
Move = 4,
|
Move = 4,
|
||||||
|
/// Used for system events, like players joining or leaving
|
||||||
SysEvt = 5,
|
SysEvt = 5,
|
||||||
|
/// Used for subscribing and unsubscribing from chunks
|
||||||
|
SubReq = 6,
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::sync::Arc;
|
||||||
use atomic::{Atomic, Ordering};
|
use atomic::{Atomic, Ordering};
|
||||||
use glam::{IVec3, ivec3};
|
use glam::{IVec3, ivec3};
|
||||||
use glium::{VertexBuffer, IndexBuffer, index::PrimitiveType};
|
use glium::{VertexBuffer, IndexBuffer, index::PrimitiveType};
|
||||||
use kubi_shared::{networking::messages::ClientToServerMessage, worldgen::AbortState};
|
use kubi_shared::{networking::{channels::Channel, messages::ClientToServerMessage}, worldgen::AbortState};
|
||||||
use shipyard::{View, UniqueView, UniqueViewMut, IntoIter, Workload, IntoWorkload, NonSendSync, track};
|
use shipyard::{View, UniqueView, UniqueViewMut, IntoIter, Workload, IntoWorkload, NonSendSync, track};
|
||||||
use uflow::SendMode;
|
use uflow::SendMode;
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -26,8 +26,7 @@ const MAX_CHUNK_OPS: usize = 32;
|
||||||
pub fn update_loaded_world_around_player() -> Workload {
|
pub fn update_loaded_world_around_player() -> Workload {
|
||||||
(
|
(
|
||||||
update_chunks_if_player_moved,
|
update_chunks_if_player_moved,
|
||||||
unload_downgrade_chunks,
|
process_state_changes,
|
||||||
start_required_tasks,
|
|
||||||
process_completed_tasks,
|
process_completed_tasks,
|
||||||
).into_sequential_workload()
|
).into_sequential_workload()
|
||||||
}
|
}
|
||||||
|
@ -91,64 +90,86 @@ pub fn update_chunks_if_player_moved(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unload_downgrade_chunks(
|
fn process_state_changes(
|
||||||
mut vm_world: UniqueViewMut<ChunkStorage>,
|
task_manager: UniqueView<ChunkTaskManager>,
|
||||||
mut vm_meshes: NonSendSync<UniqueViewMut<ChunkMeshStorage>>
|
mut udp_client: Option<UniqueViewMut<UdpClient>>,
|
||||||
|
mut world: UniqueViewMut<ChunkStorage>,
|
||||||
|
mut vm_meshes: NonSendSync<UniqueViewMut<ChunkMeshStorage>>,
|
||||||
) {
|
) {
|
||||||
if !vm_world.is_modified() {
|
if !world.is_modified() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//TODO refactor this
|
|
||||||
//TODO unsubscibe if in multiplayer
|
//HACK: cant iterate over chunks.keys() or chunk directly!
|
||||||
vm_world.chunks.retain(|_, chunk| {
|
let hashmap_keys: Vec<IVec3> = world.chunks.keys().copied().collect();
|
||||||
if chunk.desired_state == DesiredChunkState::Unloaded {
|
for position in hashmap_keys {
|
||||||
|
let chunk = world.chunks.get_mut(&position).unwrap();
|
||||||
|
|
||||||
|
//If the chunk is being unloaded, it's essentially dead at this point and we shouldn't bother it
|
||||||
|
if chunk.current_state == CurrentChunkState::Unloading {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If the chunk is already in the desired state, skip it
|
||||||
|
if chunk.desired_state.matches_current(chunk.current_state) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
match chunk.desired_state {
|
||||||
|
// DesiredChunkState::Unloaded | DesiredChunkState::Nothing:
|
||||||
|
// Loading -> Nothing
|
||||||
|
DesiredChunkState::Unloaded | DesiredChunkState::Nothing if chunk.current_state == CurrentChunkState::Loading => {
|
||||||
|
if let Some(abortion) = &chunk.abortion {
|
||||||
|
let _ = abortion.compare_exchange(
|
||||||
|
AbortState::Continue, AbortState::Abort,
|
||||||
|
Ordering::Relaxed, Ordering::Relaxed
|
||||||
|
);
|
||||||
|
}
|
||||||
|
chunk.abortion = None;
|
||||||
|
chunk.current_state = CurrentChunkState::Nothing;
|
||||||
|
},
|
||||||
|
|
||||||
|
// DesiredChunkState::Unloaded | DesiredChunkState::Nothing:
|
||||||
|
// (Loaded, CalculatingMesh) -> Nothing
|
||||||
|
DesiredChunkState::Unloaded | DesiredChunkState::Nothing if matches!(
|
||||||
|
chunk.current_state,
|
||||||
|
CurrentChunkState::Loaded | CurrentChunkState::CalculatingMesh,
|
||||||
|
) => {
|
||||||
|
chunk.block_data = None;
|
||||||
|
chunk.current_state = CurrentChunkState::Nothing;
|
||||||
|
},
|
||||||
|
|
||||||
|
// DesiredChunkState::Unloaded | DesiredChunkState::Nothing:
|
||||||
|
// (Rendered | RecalculatingMesh) -> Nothing
|
||||||
|
DesiredChunkState::Unloaded | DesiredChunkState::Nothing if matches!(
|
||||||
|
chunk.current_state,
|
||||||
|
CurrentChunkState::Rendered | CurrentChunkState::RecalculatingMesh,
|
||||||
|
) => {
|
||||||
if let Some(mesh_index) = chunk.mesh_index {
|
if let Some(mesh_index) = chunk.mesh_index {
|
||||||
vm_meshes.remove(mesh_index).unwrap();
|
vm_meshes.remove(mesh_index).unwrap();
|
||||||
}
|
}
|
||||||
if let Some(abortion) = &chunk.abortion {
|
chunk.mesh_index = None;
|
||||||
let _ = abortion.compare_exchange(
|
chunk.current_state = CurrentChunkState::Nothing;
|
||||||
AbortState::Continue, AbortState::Abort,
|
|
||||||
Ordering::Relaxed, Ordering::Relaxed
|
|
||||||
);
|
|
||||||
}
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
match chunk.desired_state {
|
|
||||||
DesiredChunkState::Nothing if matches!(chunk.current_state, CurrentChunkState::Loading) => {
|
|
||||||
if let Some(abortion) = &chunk.abortion {
|
|
||||||
let _ = abortion.compare_exchange(
|
|
||||||
AbortState::Continue, AbortState::Abort,
|
|
||||||
Ordering::Relaxed, Ordering::Relaxed
|
|
||||||
);
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
DesiredChunkState::Loaded if matches!(chunk.current_state, CurrentChunkState::Rendered | CurrentChunkState::CalculatingMesh | CurrentChunkState::RecalculatingMesh) => {
|
|
||||||
|
// DesiredChunkState::Loaded:
|
||||||
|
// CalculatingMesh -> Loaded
|
||||||
|
DesiredChunkState::Loaded if chunk.current_state == CurrentChunkState::CalculatingMesh => {
|
||||||
|
chunk.current_state = CurrentChunkState::Loaded;
|
||||||
|
},
|
||||||
|
|
||||||
|
// DesiredChunkState::Unloaded | DesiredChunkState::Nothing | DesiredChunkState::Loaded:
|
||||||
|
// (Rendered | RecalculatingMesh) -> Loaded
|
||||||
|
DesiredChunkState::Unloaded | DesiredChunkState::Nothing | DesiredChunkState::Loaded if matches!(
|
||||||
|
chunk.current_state, CurrentChunkState::Rendered | CurrentChunkState::RecalculatingMesh
|
||||||
|
) => {
|
||||||
if let Some(mesh_index) = chunk.mesh_index {
|
if let Some(mesh_index) = chunk.mesh_index {
|
||||||
vm_meshes.remove(mesh_index).unwrap();
|
vm_meshes.remove(mesh_index).unwrap();
|
||||||
}
|
}
|
||||||
chunk.mesh_index = None;
|
chunk.mesh_index = None;
|
||||||
chunk.current_state = CurrentChunkState::Loaded;
|
chunk.current_state = CurrentChunkState::Loaded;
|
||||||
},
|
},
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_required_tasks(
|
// DesiredChunkState::Loaded | DesiredChunkState::Rendered:
|
||||||
task_manager: UniqueView<ChunkTaskManager>,
|
// Nothing -> Loading
|
||||||
mut udp_client: Option<UniqueViewMut<UdpClient>>,
|
|
||||||
mut world: UniqueViewMut<ChunkStorage>,
|
|
||||||
) {
|
|
||||||
if !world.is_modified() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
//HACK: cant iterate over chunks.keys() or chunk directly!
|
|
||||||
let hashmap_keys: Vec<IVec3> = world.chunks.keys().copied().collect();
|
|
||||||
for position in hashmap_keys {
|
|
||||||
let chunk = world.chunks.get(&position).unwrap();
|
|
||||||
match chunk.desired_state {
|
|
||||||
DesiredChunkState::Loaded | DesiredChunkState::Rendered if chunk.current_state == CurrentChunkState::Nothing => {
|
DesiredChunkState::Loaded | DesiredChunkState::Rendered if chunk.current_state == CurrentChunkState::Nothing => {
|
||||||
let mut abortion = None;
|
let mut abortion = None;
|
||||||
//start load task
|
//start load task
|
||||||
|
@ -157,7 +178,7 @@ fn start_required_tasks(
|
||||||
postcard::to_allocvec(&ClientToServerMessage::ChunkSubRequest {
|
postcard::to_allocvec(&ClientToServerMessage::ChunkSubRequest {
|
||||||
chunk: position,
|
chunk: position,
|
||||||
}).unwrap().into_boxed_slice(),
|
}).unwrap().into_boxed_slice(),
|
||||||
0,
|
Channel::SubReq as usize,
|
||||||
SendMode::Reliable
|
SendMode::Reliable
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
|
@ -176,6 +197,10 @@ fn start_required_tasks(
|
||||||
// ===========
|
// ===========
|
||||||
//log::trace!("Started loading chunk {position}");
|
//log::trace!("Started loading chunk {position}");
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// DesiredChunkState::Rendered:
|
||||||
|
// Loaded -> CalculatingMesh
|
||||||
|
// Rendered (dirty) -> RecalculatingMesh
|
||||||
DesiredChunkState::Rendered if (chunk.current_state == CurrentChunkState::Loaded || chunk.mesh_dirty) => {
|
DesiredChunkState::Rendered if (chunk.current_state == CurrentChunkState::Loaded || chunk.mesh_dirty) => {
|
||||||
//get needed data
|
//get needed data
|
||||||
let Some(neighbors) = world.neighbors_all(position) else {
|
let Some(neighbors) = world.neighbors_all(position) else {
|
||||||
|
@ -198,9 +223,41 @@ fn start_required_tasks(
|
||||||
// ===========
|
// ===========
|
||||||
//log::trace!("Started generating mesh for chunk {position}");
|
//log::trace!("Started generating mesh for chunk {position}");
|
||||||
}
|
}
|
||||||
_ => ()
|
|
||||||
|
_ => {}, //panic!("Illegal state transition: {:?} -> {:?}", chunk.current_state, chunk.desired_state),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Now, separately process state change the state from Nothing to Unloading or Unloaded
|
||||||
|
world.chunks.retain(|&position, chunk: &mut Chunk| {
|
||||||
|
if chunk.desired_state == DesiredChunkState::Unloaded {
|
||||||
|
assert!(chunk.current_state == CurrentChunkState::Nothing, "looks like chunk did not get properly downgraded to Nothing before unloading, this is a bug");
|
||||||
|
|
||||||
|
chunk.current_state = CurrentChunkState::Unloading;
|
||||||
|
|
||||||
|
//If in multiplayer, send a message to the server to unsubscribe from the chunk
|
||||||
|
if let Some(client) = &mut udp_client {
|
||||||
|
client.0.send(
|
||||||
|
postcard::to_allocvec(
|
||||||
|
&ClientToServerMessage::ChunkUnsubscribe { chunk: position }
|
||||||
|
).unwrap().into_boxed_slice(),
|
||||||
|
Channel::SubReq as usize,
|
||||||
|
SendMode::Reliable
|
||||||
|
);
|
||||||
|
// and i think that's it, just kill the chunk right away, the server will take care of the rest
|
||||||
|
//
|
||||||
|
// because uflow's reliable packets are ordered, there should be no need to wait for the server to confirm the unsubscription
|
||||||
|
// because client won't be able to subscribe to it again until the server finishes processing the unsubscription
|
||||||
|
// :ferrisClueless:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
//HACK, since save files are not implemented, just unload immediately
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
true
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_completed_tasks(
|
fn process_completed_tasks(
|
||||||
|
@ -215,6 +272,10 @@ fn process_completed_tasks(
|
||||||
while let Some(res) = task_manager.receive() {
|
while let Some(res) = task_manager.receive() {
|
||||||
match res {
|
match res {
|
||||||
ChunkTaskResponse::LoadedChunk { position, chunk_data, mut queued } => {
|
ChunkTaskResponse::LoadedChunk { position, chunk_data, mut queued } => {
|
||||||
|
//If unwanted chunk is already loaded
|
||||||
|
//It would be ~~...unethical~~ impossible to abort the operation at this point
|
||||||
|
//Instead, we'll just throw it away
|
||||||
|
|
||||||
//check if chunk exists
|
//check if chunk exists
|
||||||
let Some(chunk) = world.chunks.get_mut(&position) else {
|
let Some(chunk) = world.chunks.get_mut(&position) else {
|
||||||
//to compensate, actually push the ops counter back by one
|
//to compensate, actually push the ops counter back by one
|
||||||
|
@ -222,6 +283,8 @@ fn process_completed_tasks(
|
||||||
continue
|
continue
|
||||||
};
|
};
|
||||||
|
|
||||||
|
chunk.abortion = None;
|
||||||
|
|
||||||
//check if chunk still wants it
|
//check if chunk still wants it
|
||||||
if !matches!(chunk.desired_state, DesiredChunkState::Loaded | DesiredChunkState::Rendered) {
|
if !matches!(chunk.desired_state, DesiredChunkState::Loaded | DesiredChunkState::Rendered) {
|
||||||
log::warn!("block data discarded: state undesirable: {:?}", chunk.desired_state);
|
log::warn!("block data discarded: state undesirable: {:?}", chunk.desired_state);
|
||||||
|
|
Loading…
Reference in a new issue