Finish upgrading server to uflow

This commit is contained in:
griffi-gh 2023-03-09 03:30:37 +01:00
parent 5723f07a26
commit 3f39f11860
13 changed files with 230 additions and 114 deletions

View file

@ -1,7 +1,10 @@
[server] [server]
address = "0.0.0.0:12345" address = "0.0.0.0:12345"
max_clients = 254 max_clients = 32
timeout_ms = 10000 timeout_ms = 10000
[world] [world]
seed = 0xfeb_face_dead_cafe seed = 0xfeb_face_dead_cafe
[query]
name = "Kubi Server"

View file

@ -1,40 +1,47 @@
use shipyard::{UniqueView, NonSendSync}; use shipyard::{UniqueView, NonSendSync, EntitiesViewMut, ViewMut, UniqueViewMut};
use uflow::{server::Event as ServerEvent, SendMode}; use uflow::{server::Event as ServerEvent, SendMode};
use kubi_shared::networking::messages::{ use kubi_shared::{
ClientToServerMessage, networking::{
ServerToClientMessage, messages::{
InitData, ClientToServerMessage,
C_CLIENT_HELLO ServerToClientMessage,
InitData,
C_CLIENT_HELLO
},
client::{Client, ClientId}, channels::CHANNEL_AUTH
},
player::Player,
transform::Transform
}; };
use crate::{ use crate::{
server::{ServerEvents, UdpServer, IsMessageOfType}, server::{ServerEvents, UdpServer, IsMessageOfType},
config::ConfigTable config::ConfigTable,
client::{ClientAddress, ClientIdMap, ClientAddressMap}
}; };
pub fn authenticate_players( pub fn authenticate_players(
mut entities: EntitiesViewMut,
mut players: ViewMut<Player>,
mut clients: ViewMut<Client>,
mut client_addrs: ViewMut<ClientAddress>,
mut transforms: ViewMut<Transform>,
mut client_entity_map: UniqueViewMut<ClientIdMap>,
mut client_addr_map: UniqueViewMut<ClientAddressMap>,
server: NonSendSync<UniqueView<UdpServer>>, server: NonSendSync<UniqueView<UdpServer>>,
events: UniqueView<ServerEvents>, events: UniqueView<ServerEvents>,
config: UniqueView<ConfigTable> config: UniqueView<ConfigTable>
) { ) {
for event in &events.0 { for event in &events.0 {
// if let ServerEvent::MessageReceived {
// from,
// message: ClientToServerMessage::ClientHello {
// username,
// password
// }
// } = event {
let ServerEvent::Receive(client_addr, data) = event else{ let ServerEvent::Receive(client_addr, data) = event else{
continue continue
}; };
let Some(client) = server.0.client(client_addr) else {
log::error!("Client doesn't exist");
continue
};
if !event.is_message_of_type::<C_CLIENT_HELLO>() { if !event.is_message_of_type::<C_CLIENT_HELLO>() {
continue continue
} }
let Some(client) = server.0.client(client_addr) else {
log::error!("Client doesn't exist");
continue
};
let Ok(parsed_message) = postcard::from_bytes(data) else { let Ok(parsed_message) = postcard::from_bytes(data) else {
log::error!("Malformed message"); log::error!("Malformed message");
continue continue
@ -49,38 +56,70 @@ pub fn authenticate_players(
if let Some(server_password) = &config.server.password { if let Some(server_password) = &config.server.password {
if let Some(user_password) = &password { if let Some(user_password) = &password {
if server_password != user_password { if server_password != user_password {
let res = postcard::to_allocvec(&ServerToClientMessage::ServerFuckOff {
reason: "Passwords don't match".into()
}).unwrap().into_boxed_slice();
client.borrow_mut().send( client.borrow_mut().send(
res, 0, SendMode::Reliable postcard::to_allocvec(&ServerToClientMessage::ServerFuckOff {
reason: "Incorrect password".into()
}).unwrap().into_boxed_slice(),
CHANNEL_AUTH,
SendMode::Reliable
); );
continue continue
} }
} else { } else {
let res = postcard::to_allocvec(&ServerToClientMessage::ServerFuckOff {
reason: "This server is password protected".into()
}).unwrap().into_boxed_slice();
client.borrow_mut().send( client.borrow_mut().send(
res, 0, SendMode::Reliable postcard::to_allocvec(&ServerToClientMessage::ServerFuckOff {
reason: "This server is password protected".into()
}).unwrap().into_boxed_slice(),
CHANNEL_AUTH,
SendMode::Reliable
); );
continue continue
} }
} }
//Find the player ID
let max_clients = config.server.max_clients as ClientId;
let Some(client_id) = (0..max_clients).into_iter().find(|id| {
!client_entity_map.0.contains_key(id)
}) else {
client.borrow_mut().send(
postcard::to_allocvec(&ServerToClientMessage::ServerFuckOff {
reason: "Can't find a free spot for you!".into()
}).unwrap().into_boxed_slice(),
CHANNEL_AUTH,
SendMode::Reliable
);
continue
};
//Spawn the user //Spawn the user
//TODO Spawn the user on server side let entity_id = entities.add_entity((
&mut players,
&mut clients,
&mut client_addrs,
&mut transforms,
), (
Player,
Client(client_id),
ClientAddress(*client_addr),
Transform::default(),
));
//Add the user to the ClientIdMap and ClientAddressMap
client_entity_map.0.insert(client_id, entity_id);
client_addr_map.0.insert(*client_addr, entity_id);
//Approve the user //Approve the user
let res = postcard::to_allocvec(&ServerToClientMessage::ServerHello {
init: InitData {
users: vec![] //TODO create init data
}
}).unwrap().into_boxed_slice();
client.borrow_mut().send( client.borrow_mut().send(
res, 0, SendMode::Reliable postcard::to_allocvec(&ServerToClientMessage::ServerHello {
init: InitData {
users: vec![] //TODO create init data
}
}).unwrap().into_boxed_slice(),
CHANNEL_AUTH,
SendMode::Reliable
); );
log::info!("{username} joined the game!") log::info!("{username}({client_id}) joined the game!")
} }
} }

View file

@ -1,19 +1,34 @@
use shipyard::{Component, EntityId}; use shipyard::{Component, EntityId, Unique, Workload, AllStoragesView};
use hashbrown::HashMap; use hashbrown::HashMap;
use nohash_hasher::BuildNoHashHasher; use nohash_hasher::BuildNoHashHasher;
use std::net::SocketAddr;
use kubi_shared::networking::client::ClientId; use kubi_shared::networking::client::ClientId;
#[derive(Component)] #[derive(Component, Clone, Copy)]
pub struct Client(ClientId); pub struct ClientAddress(pub SocketAddr);
pub struct ClientMap(HashMap<ClientId, EntityId, BuildNoHashHasher<ClientId>>); #[derive(Unique)]
impl ClientMap { pub struct ClientIdMap(pub HashMap<ClientId, EntityId, BuildNoHashHasher<ClientId>>);
impl ClientIdMap {
pub fn new() -> Self { pub fn new() -> Self {
Self(HashMap::with_hasher(BuildNoHashHasher::default())) Self(HashMap::with_hasher(BuildNoHashHasher::default()))
} }
} }
impl Default for ClientMap { impl Default for ClientIdMap {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()
} }
} }
#[derive(Unique, Default)]
pub struct ClientAddressMap(pub HashMap<SocketAddr, EntityId>);
impl ClientAddressMap {
pub fn new() -> Self { Self::default() }
}
pub fn init_client_maps(
storages: AllStoragesView
) {
storages.add_unique(ClientIdMap::new());
storages.add_unique(ClientAddressMap::new());
}

View file

@ -15,10 +15,16 @@ pub struct ConfigTableWorld {
pub seed: u64, pub seed: u64,
} }
#[derive(Serialize, Deserialize)]
pub struct ConfigTableQuery {
pub name: Option<String>
}
#[derive(Unique, Serialize, Deserialize)] #[derive(Unique, Serialize, Deserialize)]
pub struct ConfigTable { pub struct ConfigTable {
pub server: ConfigTableServer, pub server: ConfigTableServer,
pub world: ConfigTableWorld, pub world: ConfigTableWorld,
pub query: ConfigTableQuery,
} }
pub fn read_config( pub fn read_config(

View file

@ -1,23 +1,25 @@
use shipyard::{World, Workload, IntoWorkload}; use shipyard::{World, Workload, IntoWorkload};
use std::{thread, time::Duration}; use std::{thread, time::Duration};
pub(crate) mod util; mod util;
pub(crate) mod config; mod config;
pub(crate) mod server; mod server;
pub(crate) mod client; mod client;
//pub(crate) mod world; mod world;
pub(crate) mod auth; mod auth;
use config::read_config; use config::read_config;
use server::{bind_server, update_server, log_server_errors}; use server::{bind_server, update_server, log_server_errors};
use client::init_client_maps;
use auth::authenticate_players; use auth::authenticate_players;
//use world::{update_world, init_world}; use world::{update_world, init_world};
fn initialize() -> Workload { fn initialize() -> Workload {
( (
read_config, read_config,
bind_server, bind_server,
//init_world, init_client_maps,
init_world,
).into_workload() ).into_workload()
} }
@ -27,7 +29,7 @@ fn update() -> Workload {
( (
log_server_errors, log_server_errors,
authenticate_players, authenticate_players,
//update_world, update_world,
).into_workload() ).into_workload()
).into_sequential_workload() ).into_sequential_workload()
} }

View file

@ -1,11 +1,19 @@
use shipyard::{Unique, UniqueView, UniqueViewMut, Workload, IntoWorkload, AllStoragesView}; use shipyard::{Unique, UniqueView, UniqueViewMut, Workload, IntoWorkload, AllStoragesView, View, Get, NonSendSync};
use glam::IVec3; use glam::IVec3;
use hashbrown::HashMap; use hashbrown::HashMap;
use kubi_shared::networking::messages::{ClientToServerMessage, ServerToClientMessage}; use kubi_shared::networking::{
messages::{ClientToServerMessage, ServerToClientMessage, C_CHUNK_SUB_REQUEST},
channels::CHANNEL_WORLD,
client::Client,
};
use uflow::{
server::Event as ServerEvent,
SendMode
};
use crate::{ use crate::{
server::{UdpServer, ServerEvents}, server::{UdpServer, ServerEvents, IsMessageOfType},
config::ConfigTable, config::ConfigTable,
util::log_error, client::{ClientAddress, ClientIdMap, ClientAddressMap},
}; };
pub mod chunk; pub mod chunk;
@ -26,51 +34,77 @@ impl ChunkManager {
} }
fn process_chunk_requests( fn process_chunk_requests(
mut server: UniqueViewMut<UdpServer>, mut server: NonSendSync<UniqueViewMut<UdpServer>>,
events: UniqueView<ServerEvents>, events: UniqueView<ServerEvents>,
mut chunk_manager: UniqueViewMut<ChunkManager>, mut chunk_manager: UniqueViewMut<ChunkManager>,
task_manager: UniqueView<ChunkTaskManager>, task_manager: UniqueView<ChunkTaskManager>,
config: UniqueView<ConfigTable> config: UniqueView<ConfigTable>,
addr_map: UniqueView<ClientAddressMap>,
clients: View<Client>
) { ) {
for event in &events.0 { for event in &events.0 {
if let ServerEvent::MessageReceived { let ServerEvent::Receive(client_addr, data) = event else{
from: client_id, continue
message: ClientToServerMessage::ChunkSubRequest { };
chunk: chunk_position if !event.is_message_of_type::<C_CHUNK_SUB_REQUEST>() {
} continue
} = event { }
let chunk_position = IVec3::from_array(*chunk_position); let Some(client) = server.0.client(client_addr) else {
if let Some(chunk) = chunk_manager.chunks.get_mut(&chunk_position) { log::error!("Client doesn't exist");
chunk.subscriptions.insert(*client_id); continue
//TODO Start task here if status is "Nothing" };
if let Some(blocks) = &chunk.blocks { let Some(&entity_id) = addr_map.0.get(client_addr) else {
server.0.send_message(*client_id, kubi_shared::networking::messages::ServerToClientMessage::ChunkResponse { log::error!("Client not authenticated");
chunk: chunk_position.to_array(), continue
};
let Ok(&Client(client_id)) = (&clients).get(entity_id) else {
log::error!("Entity ID is invalid");
continue
};
let Ok(parsed_message) = postcard::from_bytes(data) else {
log::error!("Malformed message");
continue
};
let ClientToServerMessage::ChunkSubRequest { chunk: chunk_position } = parsed_message else {
unreachable!()
};
if let Some(chunk) = chunk_manager.chunks.get_mut(&chunk_position) {
chunk.subscriptions.insert(client_id);
//TODO Start task here if status is "Nothing"
if let Some(blocks) = &chunk.blocks {
client.borrow_mut().send(
postcard::to_allocvec(&ServerToClientMessage::ChunkResponse {
chunk: chunk_position,
data: blocks.clone(), data: blocks.clone(),
queued: Vec::with_capacity(0) queued: Vec::with_capacity(0)
}).map_err(log_error).ok(); }).unwrap().into_boxed_slice(),
} CHANNEL_WORLD,
} else { SendMode::Reliable,
let mut chunk = Chunk::new(chunk_position); );
chunk.state = ChunkState::Loading;
chunk.subscriptions.insert(*client_id);
chunk_manager.chunks.insert(chunk_position, chunk);
task_manager.spawn_task(ChunkTask::LoadChunk {
position: chunk_position,
seed: config.world.seed,
});
} }
} else {
let mut chunk = Chunk::new(chunk_position);
chunk.state = ChunkState::Loading;
chunk.subscriptions.insert(client_id);
chunk_manager.chunks.insert(chunk_position, chunk);
task_manager.spawn_task(ChunkTask::LoadChunk {
position: chunk_position,
seed: config.world.seed,
});
} }
} }
} }
fn process_finished_tasks( fn process_finished_tasks(
mut server: UniqueViewMut<UdpServer>, mut server: NonSendSync<UniqueViewMut<UdpServer>>,
task_manager: UniqueView<ChunkTaskManager>, task_manager: UniqueView<ChunkTaskManager>,
mut chunk_manager: UniqueViewMut<ChunkManager>, mut chunk_manager: UniqueViewMut<ChunkManager>,
id_map: UniqueView<ClientIdMap>,
client_addr: View<ClientAddress>,
) { ) {
let mut limit: usize = 8; let mut limit: usize = 8;
while let Some(res) = task_manager.receive() { 'outer: while let Some(res) = task_manager.receive() {
let ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue } = res; let ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue } = res;
let Some(chunk) = chunk_manager.chunks.get_mut(&chunk_position) else { let Some(chunk) = chunk_manager.chunks.get_mut(&chunk_position) else {
log::warn!("Chunk discarded: Doesn't exist"); log::warn!("Chunk discarded: Doesn't exist");
@ -82,18 +116,29 @@ fn process_finished_tasks(
} }
chunk.state = ChunkState::Loaded; chunk.state = ChunkState::Loaded;
chunk.blocks = Some(blocks.clone()); chunk.blocks = Some(blocks.clone());
for &subscriber in &chunk.subscriptions {
server.0.send_message(subscriber, ServerToClientMessage::ChunkResponse {
chunk: chunk_position.to_array(),
data: blocks.clone(),
queued: queue
}).map_err(log_error).ok();
}
log::debug!("Chunk {chunk_position} loaded, {} subs", chunk.subscriptions.len()); log::debug!("Chunk {chunk_position} loaded, {} subs", chunk.subscriptions.len());
//HACK: Implement proper flow control/reliable transport in kubi-udp for &subscriber in &chunk.subscriptions {
limit -= 1; let Some(&entity_id) = id_map.0.get(&subscriber) else {
if limit == 0 { log::error!("Invalid subscriber client id");
break; continue 'outer;
};
let Ok(&ClientAddress(client_addr)) = (&client_addr).get(entity_id) else {
log::error!("Invalid subscriber entity id");
continue 'outer;
};
let Some(client) = server.0.client(&client_addr) else {
log::error!("Client not connected");
continue 'outer;
};
client.borrow_mut().send(
postcard::to_allocvec(&ServerToClientMessage::ChunkResponse {
chunk: chunk_position,
data: blocks.clone(),
queued: queue.clone()
}).unwrap().into_boxed_slice(),
CHANNEL_WORLD,
SendMode::Reliable,
);
} }
} }
} }

View file

@ -1,3 +1,4 @@
pub mod messages; pub mod messages;
pub mod state; pub mod state;
pub mod client; pub mod client;
pub mod channels;

View file

@ -0,0 +1,3 @@
pub const CHANNEL_GENERIC: usize = 0;
pub const CHANNEL_AUTH: usize = 1;
pub const CHANNEL_WORLD: usize = 2;

View file

@ -1,3 +1,7 @@
use shipyard::Component;
pub type ClientId = u16; pub type ClientId = u16;
pub type ClientKey = u16; pub type ClientKey = u16;
#[derive(Component, Clone, Copy, Debug)]
pub struct Client(pub ClientId);

View file

@ -1,10 +1,7 @@
use std::num::NonZeroUsize; use glam::{Vec3, IVec3, Quat};
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
use crate::{chunk::BlockData, queue::QueuedBlock}; use crate::{chunk::BlockData, queue::QueuedBlock};
use super::client::ClientId;
pub type IVec3Arr = [i32; 3];
pub type Vec3Arr = [f32; 3];
pub type QuatArr = [f32; 3];
pub const PROTOCOL_ID: u16 = 2; pub const PROTOCOL_ID: u16 = 2;
@ -20,12 +17,12 @@ pub enum ClientToServerMessage {
password: Option<String>, password: Option<String>,
} = C_CLIENT_HELLO, } = C_CLIENT_HELLO,
PositionChanged { PositionChanged {
position: Vec3Arr, position: Vec3,
velocity: Vec3Arr, velocity: Vec3,
direction: QuatArr, direction: Quat,
} = C_POSITION_CHANGED, } = C_POSITION_CHANGED,
ChunkSubRequest { ChunkSubRequest {
chunk: IVec3Arr, chunk: IVec3,
} = C_CHUNK_SUB_REQUEST, } = C_CHUNK_SUB_REQUEST,
} }
@ -45,11 +42,11 @@ pub enum ServerToClientMessage {
} = S_SERVER_FUCK_OFF, } = S_SERVER_FUCK_OFF,
PlayerPositionChanged { PlayerPositionChanged {
client_id: u8, client_id: u8,
position: Vec3Arr, position: Vec3,
direction: QuatArr, direction: Quat,
} = S_PLAYER_POSITION_CHANGED, } = S_PLAYER_POSITION_CHANGED,
ChunkResponse { ChunkResponse {
chunk: IVec3Arr, chunk: IVec3,
data: BlockData, data: BlockData,
queued: Vec<QueuedBlock>, queued: Vec<QueuedBlock>,
} = S_CHUNK_RESPONSE, } = S_CHUNK_RESPONSE,
@ -59,11 +56,11 @@ pub enum ServerToClientMessage {
#[derive(Serialize, Deserialize, Clone)] #[derive(Serialize, Deserialize, Clone)]
pub struct UserInitData { pub struct UserInitData {
pub client_id: NonZeroUsize, //maybe use the proper type instead pub client_id: ClientId,
pub username: String, pub username: String,
pub position: Vec3Arr, pub position: Vec3,
pub velocity: Vec3Arr, pub velocity: Vec3,
pub direction: QuatArr, pub direction: Quat,
} }
#[derive(Serialize, Deserialize, Clone)] #[derive(Serialize, Deserialize, Clone)]

View file

@ -4,7 +4,8 @@ use std::net::SocketAddr;
use uflow::client::{Client, Config as ClientConfig, Event as ClientEvent}; use uflow::client::{Client, Config as ClientConfig, Event as ClientEvent};
use kubi_shared::networking::{ use kubi_shared::networking::{
messages::{ClientToServerMessage, ServerToClientMessage, S_SERVER_HELLO}, messages::{ClientToServerMessage, ServerToClientMessage, S_SERVER_HELLO},
state::ClientJoinState state::ClientJoinState,
channels::CHANNEL_AUTH,
}; };
use crate::{events::EventComponent, control_flow::SetControlFlow}; use crate::{events::EventComponent, control_flow::SetControlFlow};
@ -77,7 +78,7 @@ fn say_hello(
password: None password: None
} }
).unwrap().into_boxed_slice(), ).unwrap().into_boxed_slice(),
0, CHANNEL_AUTH,
uflow::SendMode::Reliable uflow::SendMode::Reliable
); );
} }

View file

@ -138,7 +138,7 @@ fn start_required_tasks(
if let Some(client) = &mut udp_client { if let Some(client) = &mut udp_client {
client.0.send( client.0.send(
postcard::to_allocvec(&ClientToServerMessage::ChunkSubRequest { postcard::to_allocvec(&ClientToServerMessage::ChunkSubRequest {
chunk: position.to_array() chunk: position,
}).unwrap().into_boxed_slice(), }).unwrap().into_boxed_slice(),
0, 0,
SendMode::Reliable SendMode::Reliable

View file

@ -89,7 +89,7 @@ pub fn inject_network_responses_into_manager_queue(
chunk, data, queued chunk, data, queued
} = postcard::from_bytes(data).expect("Chunk decode failed") else { unreachable!() }; } = postcard::from_bytes(data).expect("Chunk decode failed") else { unreachable!() };
manager.add_sussy_response(ChunkTaskResponse::LoadedChunk { manager.add_sussy_response(ChunkTaskResponse::LoadedChunk {
position: IVec3::from_array(chunk), position: chunk,
chunk_data: data, chunk_data: data,
queued queued
}); });