mirror of
https://github.com/griffi-gh/kubi.git
synced 2024-11-24 07:48:42 -06:00
Compare commits
29 commits
4658a151d0
...
eda6a46875
Author | SHA1 | Date | |
---|---|---|---|
griffi-gh | eda6a46875 | ||
griffi-gh | 1251d07c60 | ||
griffi-gh | a2e5a29e3a | ||
griffi-gh | 4b962b9192 | ||
griffi-gh | 5ebe166002 | ||
griffi-gh | 0433653ac0 | ||
griffi-gh | 9216a5a2a8 | ||
griffi-gh | 1c6ef70121 | ||
griffi-gh | 6dccc97933 | ||
griffi-gh | 1b89756648 | ||
griffi-gh | 63e26e3a5b | ||
griffi-gh | 884551089c | ||
griffi-gh | 37e68912eb | ||
griffi-gh | 64a67d0ffe | ||
griffi-gh | 61b99409ce | ||
griffi-gh | 570382520c | ||
griffi-gh | 2466c02937 | ||
griffi-gh | be1e24ba0c | ||
griffi-gh | bd299bc7a3 | ||
griffi-gh | 518b2c3e79 | ||
griffi-gh | 2849c11621 | ||
griffi-gh | ddca67d603 | ||
griffi-gh | 38432eb9b0 | ||
griffi-gh | 83e187769a | ||
griffi-gh | d0c75d9397 | ||
griffi-gh | bb4d2a80bf | ||
griffi-gh | 097bdc29ff | ||
griffi-gh | 8dd32031ce | ||
griffi-gh | 1e88b78fa4 |
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -1,3 +1,5 @@
|
||||||
|
.direnv
|
||||||
|
|
||||||
# Generated by Cargo
|
# Generated by Cargo
|
||||||
# will have compiled files and executables
|
# will have compiled files and executables
|
||||||
debug/
|
debug/
|
||||||
|
@ -15,6 +17,7 @@ _src
|
||||||
_visualizer.json
|
_visualizer.json
|
||||||
|
|
||||||
*.kubi
|
*.kubi
|
||||||
|
*.kbi
|
||||||
|
|
||||||
/*_log*.txt
|
/*_log*.txt
|
||||||
/*.log
|
/*.log
|
||||||
|
@ -36,3 +39,5 @@ _visualizer.json
|
||||||
*.blend1
|
*.blend1
|
||||||
|
|
||||||
/mods
|
/mods
|
||||||
|
|
||||||
|
|
||||||
|
|
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
|
@ -5,5 +5,6 @@
|
||||||
"unresolved-method",
|
"unresolved-method",
|
||||||
"unresolved-import",
|
"unresolved-import",
|
||||||
"unresolved-field"
|
"unresolved-field"
|
||||||
]
|
],
|
||||||
|
"git.replaceTagsWhenPull": true
|
||||||
}
|
}
|
||||||
|
|
410
Cargo.lock
generated
410
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -4,6 +4,7 @@ max_clients = 32
|
||||||
timeout_ms = 10000
|
timeout_ms = 10000
|
||||||
|
|
||||||
[world]
|
[world]
|
||||||
|
file = "world.kubi"
|
||||||
seed = 0xfeb_face_dead_cafe
|
seed = 0xfeb_face_dead_cafe
|
||||||
preheat_radius = 8
|
preheat_radius = 8
|
||||||
|
|
||||||
|
|
114
flake.lock
Normal file
114
flake.lock
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1725172314,
|
||||||
|
"narHash": "sha256-BtLY9lWu/pe6/ImFwuRRRqMwLacY5AZOKA2hUHUQ64k=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "28b42d01f549c38bd165296fbcb4fe66d98fc24f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-schemas": {
|
||||||
|
"locked": {
|
||||||
|
"narHash": "sha256-ifw8Td8kD08J8DxFbYjeIx5naHcDLz7s2IFP3X42I/U=",
|
||||||
|
"rev": "c702cbb663d6d70bbb716584a2ee3aeb35017279",
|
||||||
|
"revCount": 21,
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://api.flakehub.com/f/pinned/DeterminateSystems/flake-schemas/0.1.1/018a4c59-80e1-708a-bb4d-854930c20f72/source.tar.gz"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://flakehub.com/f/DeterminateSystems/flake-schemas/0.1.1.tar.gz"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1724819573,
|
||||||
|
"narHash": "sha256-GnR7/ibgIH1vhoy8cYdmXE6iyZqKqFxQSVkFgosBh6w=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "71e91c409d1e654808b2621f28a327acfdad8dc2",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"fenix": "fenix",
|
||||||
|
"nixpkgs": "nixpkgs",
|
||||||
|
"yafas": "yafas"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1725094379,
|
||||||
|
"narHash": "sha256-TBujPMMIv8RG6BKlsBEpCln1ePmWz79xTcJOQpU2L18=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "914a1caab54e48a028b2407d0fe6fade89532f67",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"yafas": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-schemas": "flake-schemas",
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1695926485,
|
||||||
|
"narHash": "sha256-wNFFnItckgSs8XeYhhv8vlJs2WF09fSQaWgw4xkDqHQ=",
|
||||||
|
"owner": "UbiqueLambda",
|
||||||
|
"repo": "yafas",
|
||||||
|
"rev": "7772afd6686458ca0ddbc599a52cf5d337367653",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "UbiqueLambda",
|
||||||
|
"repo": "yafas",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
64
flake.nix
Normal file
64
flake.nix
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
{
|
||||||
|
inputs = {
|
||||||
|
nixpkgs = {
|
||||||
|
url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||||
|
};
|
||||||
|
yafas = {
|
||||||
|
url = "github:UbiqueLambda/yafas";
|
||||||
|
};
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs =
|
||||||
|
{
|
||||||
|
self,
|
||||||
|
nixpkgs,
|
||||||
|
yafas,
|
||||||
|
fenix,
|
||||||
|
...
|
||||||
|
}: yafas.allSystems nixpkgs ({ pkgs, system }: {
|
||||||
|
devShells.default = pkgs.mkShell.override {
|
||||||
|
stdenv = if pkgs.stdenv.isLinux then
|
||||||
|
pkgs.stdenvAdapters.useMoldLinker pkgs.clangStdenv
|
||||||
|
else
|
||||||
|
pkgs.clangStdenv;
|
||||||
|
} rec {
|
||||||
|
packages = with pkgs; [
|
||||||
|
(fenix.packages.${system}.complete.withComponents [
|
||||||
|
"cargo"
|
||||||
|
"clippy"
|
||||||
|
"rustc"
|
||||||
|
"rustfmt"
|
||||||
|
"rust-src"
|
||||||
|
"rust-analyzer"
|
||||||
|
])
|
||||||
|
gdb
|
||||||
|
lldb
|
||||||
|
cmake
|
||||||
|
pkg-config
|
||||||
|
];
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
libGL
|
||||||
|
glslang
|
||||||
|
vulkan-tools
|
||||||
|
vulkan-headers
|
||||||
|
vulkan-loader
|
||||||
|
xorg.libX11
|
||||||
|
xorg.libXcursor
|
||||||
|
xorg.libXi
|
||||||
|
xorg.libXrandr
|
||||||
|
xorg.libxcb
|
||||||
|
libxkbcommon
|
||||||
|
wayland
|
||||||
|
udev
|
||||||
|
openssl
|
||||||
|
];
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||||
|
RUSTFLAGS = "-Zthreads=8";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
|
@ -9,7 +9,7 @@ pub fn init() {
|
||||||
use env_logger::{fmt::Color, Builder, Env};
|
use env_logger::{fmt::Color, Builder, Env};
|
||||||
|
|
||||||
let env = Env::default()
|
let env = Env::default()
|
||||||
.filter_or("RUST_LOG", "trace,gilrs=warn,rusty_xinput=warn,wgpu=warn,wgpu_core=warn,wgpu_hal=warn,hui=info,hui-winit=info,hui-glium=info,hui-wgpu=info,naga=warn");
|
.filter_or("RUST_LOG", "trace,gilrs=warn,rusty_xinput=warn,wgpu=warn,wgpu_core=warn,wgpu_hal=warn,hui=info,hui-winit=info,hui-glium=info,hui-wgpu=info,naga=warn,calloop=warn");
|
||||||
Builder::from_env(env)
|
Builder::from_env(env)
|
||||||
.format(|buf, record| {
|
.format(|buf, record| {
|
||||||
let mut level_style = buf.style();
|
let mut level_style = buf.style();
|
||||||
|
|
|
@ -50,7 +50,7 @@ pub fn sync_client_positions(
|
||||||
};
|
};
|
||||||
|
|
||||||
//log movement (annoying duh)
|
//log movement (annoying duh)
|
||||||
log::debug!("dbg: player moved id: {} coords: {} quat: {}", message.client_id, position, direction);
|
// log::debug!("dbg: player moved id: {} coords: {} quat: {}", message.client_id, position, direction);
|
||||||
|
|
||||||
//Apply position to server-side client
|
//Apply position to server-side client
|
||||||
let mut trans = (&mut transforms).get(message.entity_id).unwrap();
|
let mut trans = (&mut transforms).get(message.entity_id).unwrap();
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use shipyard::{AllStoragesView, Unique};
|
use shipyard::{AllStoragesView, Unique};
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
use std::{fs, net::SocketAddr};
|
use std::{fs, net::SocketAddr, path::PathBuf};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct ConfigTableServer {
|
pub struct ConfigTableServer {
|
||||||
|
@ -12,6 +12,7 @@ pub struct ConfigTableServer {
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct ConfigTableWorld {
|
pub struct ConfigTableWorld {
|
||||||
|
pub file: Option<PathBuf>,
|
||||||
pub seed: u64,
|
pub seed: u64,
|
||||||
pub preheat_radius: u32,
|
pub preheat_radius: u32,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use shipyard::{IntoWorkload, Workload, WorkloadModificator, World};
|
use shipyard::{IntoWorkload, Workload, WorkloadModificator, World};
|
||||||
use std::{thread, time::Duration};
|
use std::{thread, time::Duration};
|
||||||
|
use kubi_shared::fixed_timestamp::{FixedTimestamp, init_fixed_timestamp_storage};
|
||||||
|
|
||||||
mod util;
|
mod util;
|
||||||
mod config;
|
mod config;
|
||||||
|
@ -12,10 +13,11 @@ use config::read_config;
|
||||||
use server::{bind_server, update_server, log_server_errors};
|
use server::{bind_server, update_server, log_server_errors};
|
||||||
use client::{init_client_maps, on_client_disconnect, sync_client_positions};
|
use client::{init_client_maps, on_client_disconnect, sync_client_positions};
|
||||||
use auth::authenticate_players;
|
use auth::authenticate_players;
|
||||||
use world::{update_world, init_world};
|
use world::{init_world, save::save_modified, update_world};
|
||||||
|
|
||||||
fn initialize() -> Workload {
|
fn initialize() -> Workload {
|
||||||
(
|
(
|
||||||
|
init_fixed_timestamp_storage,
|
||||||
read_config,
|
read_config,
|
||||||
bind_server,
|
bind_server,
|
||||||
init_client_maps,
|
init_client_maps,
|
||||||
|
@ -32,7 +34,10 @@ fn update() -> Workload {
|
||||||
update_world,
|
update_world,
|
||||||
sync_client_positions,
|
sync_client_positions,
|
||||||
on_client_disconnect,
|
on_client_disconnect,
|
||||||
).into_workload()
|
).into_workload(),
|
||||||
|
save_modified
|
||||||
|
.into_workload()
|
||||||
|
.make_fixed(10000, 0),
|
||||||
).into_sequential_workload()
|
).into_sequential_workload()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,12 +24,13 @@ use crate::{
|
||||||
|
|
||||||
pub mod chunk;
|
pub mod chunk;
|
||||||
pub mod tasks;
|
pub mod tasks;
|
||||||
|
pub mod save;
|
||||||
|
|
||||||
use chunk::Chunk;
|
use chunk::Chunk;
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
tasks::{ChunkTaskManager, ChunkTask, ChunkTaskResponse, init_chunk_task_manager},
|
tasks::{ChunkTaskManager, ChunkTask, ChunkTaskResponse, init_chunk_task_manager},
|
||||||
chunk::ChunkState
|
chunk::ChunkState,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Unique, Default)]
|
#[derive(Unique, Default)]
|
||||||
|
@ -106,7 +107,7 @@ fn process_chunk_requests(
|
||||||
chunk.state = ChunkState::Loading;
|
chunk.state = ChunkState::Loading;
|
||||||
chunk.subscriptions.insert(message.client_id);
|
chunk.subscriptions.insert(message.client_id);
|
||||||
chunk_manager.chunks.insert(chunk_position, chunk);
|
chunk_manager.chunks.insert(chunk_position, chunk);
|
||||||
task_manager.spawn_task(ChunkTask::LoadChunk {
|
task_manager.run(ChunkTask::LoadChunk {
|
||||||
position: chunk_position,
|
position: chunk_position,
|
||||||
seed: config.world.seed,
|
seed: config.world.seed,
|
||||||
});
|
});
|
||||||
|
@ -249,7 +250,11 @@ fn process_block_queue(
|
||||||
let Some(blocks) = &mut chunk.blocks else {
|
let Some(blocks) = &mut chunk.blocks else {
|
||||||
return true
|
return true
|
||||||
};
|
};
|
||||||
blocks[block_position.x as usize][block_position.y as usize][block_position.z as usize] = item.block_type;
|
let block = &mut blocks[block_position.x as usize][block_position.y as usize][block_position.z as usize];
|
||||||
|
if item.block_type != *block {
|
||||||
|
*block = item.block_type;
|
||||||
|
chunk.data_modified = true;
|
||||||
|
}
|
||||||
false
|
false
|
||||||
});
|
});
|
||||||
if initial_len != queue.queue.len() {
|
if initial_len != queue.queue.len() {
|
||||||
|
@ -278,7 +283,7 @@ pub fn preheat_world(
|
||||||
let mut chunk = Chunk::new();
|
let mut chunk = Chunk::new();
|
||||||
chunk.state = ChunkState::Loading;
|
chunk.state = ChunkState::Loading;
|
||||||
chunk_manager.chunks.insert(chunk_position, chunk);
|
chunk_manager.chunks.insert(chunk_position, chunk);
|
||||||
task_manager.spawn_task(ChunkTask::LoadChunk {
|
task_manager.run(ChunkTask::LoadChunk {
|
||||||
position: chunk_position,
|
position: chunk_position,
|
||||||
seed: config.world.seed,
|
seed: config.world.seed,
|
||||||
});
|
});
|
||||||
|
@ -292,7 +297,7 @@ pub fn init_world() -> Workload {
|
||||||
init_chunk_manager_and_block_queue.before_all(preheat_world),
|
init_chunk_manager_and_block_queue.before_all(preheat_world),
|
||||||
init_chunk_task_manager.before_all(preheat_world),
|
init_chunk_task_manager.before_all(preheat_world),
|
||||||
preheat_world,
|
preheat_world,
|
||||||
).into_workload()
|
).into_sequential_workload()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_world() -> Workload {
|
pub fn update_world() -> Workload {
|
||||||
|
|
|
@ -16,13 +16,16 @@ pub struct Chunk {
|
||||||
pub state: ChunkState,
|
pub state: ChunkState,
|
||||||
pub blocks: Option<BlockData>,
|
pub blocks: Option<BlockData>,
|
||||||
pub subscriptions: HashSet<ClientId, BuildNoHashHasher<ClientId>>,
|
pub subscriptions: HashSet<ClientId, BuildNoHashHasher<ClientId>>,
|
||||||
|
pub data_modified: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Chunk {
|
impl Chunk {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
state: ChunkState::Nothing,
|
state: ChunkState::Nothing,
|
||||||
blocks: None,
|
blocks: None,
|
||||||
subscriptions: HashSet::with_capacity_and_hasher(4, BuildNoHashHasher::default()),
|
subscriptions: HashSet::with_capacity_and_hasher(4, BuildNoHashHasher::default()),
|
||||||
|
data_modified: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
43
kubi-server/src/world/save.rs
Normal file
43
kubi-server/src/world/save.rs
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
use kubi_shared::data::{io_thread::IOThreadManager, open_local_save_file};
|
||||||
|
use shipyard::{AllStoragesView, UniqueView, UniqueViewMut};
|
||||||
|
use crate::config::ConfigTable;
|
||||||
|
use super::{
|
||||||
|
tasks::{ChunkTask, ChunkTaskManager},
|
||||||
|
ChunkManager,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn init_save_file(storages: &AllStoragesView) -> Option<IOThreadManager> {
|
||||||
|
let config = storages.borrow::<UniqueView<ConfigTable>>().unwrap();
|
||||||
|
if let Some(file_path) = &config.world.file {
|
||||||
|
log::info!("Initializing save file from {:?}", file_path);
|
||||||
|
let save = open_local_save_file(&file_path).unwrap();
|
||||||
|
Some(IOThreadManager::new(save))
|
||||||
|
} else {
|
||||||
|
log::warn!("No save file specified, world will not be saved");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_modified(
|
||||||
|
mut chunks: UniqueViewMut<ChunkManager>,
|
||||||
|
ctm: UniqueView<ChunkTaskManager>,
|
||||||
|
) {
|
||||||
|
log::info!("Saving...");
|
||||||
|
let mut amount_saved = 0;
|
||||||
|
for (position, chunk) in chunks.chunks.iter_mut() {
|
||||||
|
if chunk.data_modified {
|
||||||
|
let Some(data) = chunk.blocks.clone() else {
|
||||||
|
continue
|
||||||
|
};
|
||||||
|
ctm.run(ChunkTask::SaveChunk {
|
||||||
|
position: *position,
|
||||||
|
data: data,
|
||||||
|
});
|
||||||
|
chunk.data_modified = false;
|
||||||
|
amount_saved += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if amount_saved > 0 {
|
||||||
|
log::info!("Queued {} chunks for saving", amount_saved);
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,16 +4,19 @@ use glam::IVec3;
|
||||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use kubi_shared::{
|
use kubi_shared::{
|
||||||
chunk::BlockData,
|
chunk::BlockData, data::io_thread::{IOCommand, IOResponse, IOThreadManager}, queue::QueuedBlock, worldgen::generate_world
|
||||||
worldgen::generate_world,
|
|
||||||
queue::QueuedBlock,
|
|
||||||
};
|
};
|
||||||
|
use super::save::init_save_file;
|
||||||
|
|
||||||
pub enum ChunkTask {
|
pub enum ChunkTask {
|
||||||
LoadChunk {
|
LoadChunk {
|
||||||
position: IVec3,
|
position: IVec3,
|
||||||
seed: u64,
|
seed: u64,
|
||||||
}
|
},
|
||||||
|
SaveChunk {
|
||||||
|
position: IVec3,
|
||||||
|
data: BlockData,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum ChunkTaskResponse {
|
pub enum ChunkTaskResponse {
|
||||||
|
@ -28,33 +31,74 @@ pub enum ChunkTaskResponse {
|
||||||
pub struct ChunkTaskManager {
|
pub struct ChunkTaskManager {
|
||||||
channel: (Sender<ChunkTaskResponse>, Receiver<ChunkTaskResponse>),
|
channel: (Sender<ChunkTaskResponse>, Receiver<ChunkTaskResponse>),
|
||||||
pool: ThreadPool,
|
pool: ThreadPool,
|
||||||
|
iota: Option<IOThreadManager>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChunkTaskManager {
|
impl ChunkTaskManager {
|
||||||
pub fn new() -> Result<Self> {
|
pub fn new(iota: Option<IOThreadManager>) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
channel: unbounded(),
|
channel: unbounded(),
|
||||||
pool: ThreadPoolBuilder::new().build()?
|
pool: ThreadPoolBuilder::new().build()?,
|
||||||
|
iota,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
pub fn spawn_task(&self, task: ChunkTask) {
|
|
||||||
let sender = self.channel.0.clone();
|
pub fn run(&self, task: ChunkTask) {
|
||||||
self.pool.spawn(move || {
|
match task {
|
||||||
sender.send(match task {
|
ChunkTask::LoadChunk { position: chunk_position, seed } => {
|
||||||
ChunkTask::LoadChunk { position: chunk_position, seed } => {
|
// 1. Check if the chunk exists in the save file
|
||||||
//unwrap is fine because abort is not possible
|
if let ChunkTask::LoadChunk { position, .. } = &task {
|
||||||
let (blocks, queue) = generate_world(chunk_position, seed, None).unwrap();
|
if let Some(iota) = &self.iota {
|
||||||
ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue }
|
if iota.chunk_exists(*position) {
|
||||||
|
iota.send(IOCommand::LoadChunk { position: *position });
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}).unwrap()
|
|
||||||
})
|
// 2. Generate the chunk if it doesn't exist
|
||||||
|
let sender = self.channel.0.clone();
|
||||||
|
self.pool.spawn(move || {
|
||||||
|
sender.send({
|
||||||
|
//unwrap is fine because abort is not possible
|
||||||
|
let (blocks, queue) = generate_world(chunk_position, seed, None).unwrap();
|
||||||
|
ChunkTaskResponse::ChunkLoaded { chunk_position, blocks, queue }
|
||||||
|
}).unwrap()
|
||||||
|
});
|
||||||
|
},
|
||||||
|
ChunkTask::SaveChunk { position, data } => {
|
||||||
|
// Save the chunk to the save file
|
||||||
|
if let Some(iota) = &self.iota {
|
||||||
|
iota.send(IOCommand::SaveChunk { position, data });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn receive(&self) -> Option<ChunkTaskResponse> {
|
pub fn receive(&self) -> Option<ChunkTaskResponse> {
|
||||||
self.channel.1.try_recv().ok()
|
// Try to receive IO results first
|
||||||
|
// If there are none, try to receive worldgen results
|
||||||
|
self.iota.as_ref().map(|iota| {
|
||||||
|
iota.poll_single().map(|response| match response {
|
||||||
|
IOResponse::ChunkLoaded { position, data } => ChunkTaskResponse::ChunkLoaded {
|
||||||
|
chunk_position: position,
|
||||||
|
blocks: data.expect("chunk data exists in the header, but was not loaded"),
|
||||||
|
queue: Vec::with_capacity(0)
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected response from IO thread"),
|
||||||
|
})
|
||||||
|
}).flatten().or_else(|| {
|
||||||
|
self.channel.1.try_recv().ok()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_chunk_task_manager(
|
pub fn init_chunk_task_manager(
|
||||||
storages: AllStoragesView
|
storages: AllStoragesView
|
||||||
) {
|
) {
|
||||||
storages.add_unique(ChunkTaskManager::new().expect("ChunkTaskManager Init failed"));
|
let iota = init_save_file(&storages);
|
||||||
|
storages.add_unique(
|
||||||
|
ChunkTaskManager::new(iota)
|
||||||
|
.expect("ChunkTaskManager Init failed")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ serde = { version = "1.0", default-features = false, features = ["alloc", "deriv
|
||||||
serde_with = "3.4"
|
serde_with = "3.4"
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
flume = "0.11"
|
||||||
fastnoise-lite = { version = "1.1", features = ["std", "f64"] }
|
fastnoise-lite = { version = "1.1", features = ["std", "f64"] }
|
||||||
rand = { version = "0.8", default_features = false, features = ["std", "min_const_gen"] }
|
rand = { version = "0.8", default_features = false, features = ["std", "min_const_gen"] }
|
||||||
rand_xoshiro = "0.6"
|
rand_xoshiro = "0.6"
|
||||||
|
@ -23,6 +24,7 @@ bytemuck = { version = "1.14", features = ["derive"] }
|
||||||
static_assertions = "1.1"
|
static_assertions = "1.1"
|
||||||
nz = "0.4"
|
nz = "0.4"
|
||||||
atomic = "0.6"
|
atomic = "0.6"
|
||||||
|
log = "0.4"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
use std::{
|
use std::{
|
||||||
fs::File,
|
|
||||||
mem::size_of,
|
mem::size_of,
|
||||||
|
fs::{File, OpenOptions},
|
||||||
io::{Read, Seek, SeekFrom, Write},
|
io::{Read, Seek, SeekFrom, Write},
|
||||||
|
path::Path,
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
sync::{Arc, RwLock}
|
sync::{Arc, RwLock}
|
||||||
};
|
};
|
||||||
|
@ -17,6 +18,8 @@ use crate::{
|
||||||
chunk::{CHUNK_SIZE, BlockDataRef, BlockData}
|
chunk::{CHUNK_SIZE, BlockDataRef, BlockData}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub mod io_thread;
|
||||||
|
|
||||||
const SECTOR_SIZE: usize = CHUNK_SIZE * CHUNK_SIZE * CHUNK_SIZE * size_of::<Block>();
|
const SECTOR_SIZE: usize = CHUNK_SIZE * CHUNK_SIZE * CHUNK_SIZE * size_of::<Block>();
|
||||||
const RESERVED_SIZE: usize = 1048576; //~1mb (16 sectors assuming 32x32x32 world of 1byte blocks)
|
const RESERVED_SIZE: usize = 1048576; //~1mb (16 sectors assuming 32x32x32 world of 1byte blocks)
|
||||||
const RESERVED_SECTOR_COUNT: usize = RESERVED_SIZE / SECTOR_SIZE;
|
const RESERVED_SECTOR_COUNT: usize = RESERVED_SIZE / SECTOR_SIZE;
|
||||||
|
@ -47,19 +50,19 @@ impl Default for WorldSaveDataHeader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type SharedHeader = Arc<RwLock<WorldSaveDataHeader>>;
|
||||||
|
|
||||||
#[derive(Unique)]
|
#[derive(Unique)]
|
||||||
pub struct WorldSaveFile {
|
pub struct WorldSaveFile {
|
||||||
pub file: File,
|
pub file: File,
|
||||||
pub header: WorldSaveDataHeader,
|
pub header: SharedHeader,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type SharedSaveFile = Arc<RwLock<WorldSaveFile>>;
|
|
||||||
|
|
||||||
impl WorldSaveFile {
|
impl WorldSaveFile {
|
||||||
pub fn new(file: File) -> Self {
|
pub fn new(file: File) -> Self {
|
||||||
WorldSaveFile {
|
WorldSaveFile {
|
||||||
file,
|
file,
|
||||||
header: WorldSaveDataHeader::default()
|
header: Arc::new(RwLock::new(WorldSaveDataHeader::default())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +79,7 @@ impl WorldSaveFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
let limit = (RESERVED_SIZE - SUBHEADER_SIZE) as u64;
|
let limit = (RESERVED_SIZE - SUBHEADER_SIZE) as u64;
|
||||||
self.header = bincode::deserialize_from((&self.file).take(limit))?;
|
*self.header.write().unwrap() = bincode::deserialize_from((&self.file).take(limit))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -88,7 +91,7 @@ impl WorldSaveFile {
|
||||||
//XXX: this can cause the header to destroy chunk data (if it's WAY too long)
|
//XXX: this can cause the header to destroy chunk data (if it's WAY too long)
|
||||||
// read has checks against this, but write doesn't
|
// read has checks against this, but write doesn't
|
||||||
// 1mb is pretty generous tho, so it's not a *big* deal
|
// 1mb is pretty generous tho, so it's not a *big* deal
|
||||||
bincode::serialize_into(&self.file, &self.header)?;
|
bincode::serialize_into(&self.file, &*self.header.read().unwrap())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,19 +105,28 @@ impl WorldSaveFile {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn allocate_sector(&mut self) -> u32 {
|
// fn allocate_sector(&mut self) -> u32 {
|
||||||
let value = self.header.sector_count + 1;
|
// let mut lock = self.header.write().unwrap();
|
||||||
self.header.sector_count += 1;
|
// let value = lock.sector_count + 1;
|
||||||
value
|
// lock.sector_count += 1;
|
||||||
}
|
// value
|
||||||
|
// }
|
||||||
|
|
||||||
pub fn save_chunk(&mut self, position: IVec3, data: &BlockDataRef) -> Result<()> {
|
pub fn save_chunk(&mut self, position: IVec3, data: &BlockDataRef) -> Result<()> {
|
||||||
|
let mut header_lock = self.header.write().unwrap();
|
||||||
|
|
||||||
let mut header_modified = false;
|
let mut header_modified = false;
|
||||||
let sector = self.header.chunk_map.get(&position).copied().unwrap_or_else(|| {
|
let sector = header_lock.chunk_map.get(&position).copied().unwrap_or_else(|| {
|
||||||
header_modified = true;
|
header_modified = true;
|
||||||
self.allocate_sector()
|
let sector = header_lock.sector_count;
|
||||||
|
header_lock.sector_count += 1;
|
||||||
|
header_lock.chunk_map.insert(position, sector);
|
||||||
|
sector
|
||||||
|
// self.allocate_sector()
|
||||||
});
|
});
|
||||||
|
|
||||||
|
drop(header_lock);
|
||||||
|
|
||||||
let offset = sector as u64 * SECTOR_SIZE as u64;
|
let offset = sector as u64 * SECTOR_SIZE as u64;
|
||||||
|
|
||||||
const_assert_eq!(size_of::<Block>(), 1);
|
const_assert_eq!(size_of::<Block>(), 1);
|
||||||
|
@ -136,11 +148,11 @@ impl WorldSaveFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chunk_exists(&self, position: IVec3) -> bool {
|
pub fn chunk_exists(&self, position: IVec3) -> bool {
|
||||||
self.header.chunk_map.contains_key(&position)
|
self.header.read().unwrap().chunk_map.contains_key(&position)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_chunk(&mut self, position: IVec3) -> Result<Option<BlockData>> {
|
pub fn load_chunk(&mut self, position: IVec3) -> Result<Option<BlockData>> {
|
||||||
let Some(§or) = self.header.chunk_map.get(&position) else {
|
let Some(§or) = self.header.read().unwrap().chunk_map.get(&position) else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -166,4 +178,26 @@ impl WorldSaveFile {
|
||||||
|
|
||||||
Ok(Some(data))
|
Ok(Some(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_shared_header(&self) -> SharedHeader {
|
||||||
|
Arc::clone(&self.header)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Utility function to open a local save file, creating it if it doesn't exist
|
||||||
|
pub fn open_local_save_file(path: &Path) -> Result<WorldSaveFile> {
|
||||||
|
let mut save_file = WorldSaveFile::new({
|
||||||
|
OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.open(path)?
|
||||||
|
});
|
||||||
|
if save_file.file.metadata().unwrap().len() == 0 {
|
||||||
|
save_file.initialize()?;
|
||||||
|
} else {
|
||||||
|
save_file.load_data()?;
|
||||||
|
}
|
||||||
|
Ok(save_file)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
249
kubi-shared/src/data/io_thread.rs
Normal file
249
kubi-shared/src/data/io_thread.rs
Normal file
|
@ -0,0 +1,249 @@
|
||||||
|
use glam::IVec3;
|
||||||
|
use flume::{Receiver, Sender, TryIter};
|
||||||
|
use shipyard::Unique;
|
||||||
|
use crate::chunk::BlockData;
|
||||||
|
use super::{SharedHeader, WorldSaveFile};
|
||||||
|
|
||||||
|
// Maximum amount of chunks to save in a single batch before checking if there are any pending read requests
|
||||||
|
// may be broken, so currently disabled
|
||||||
|
const MAX_SAVE_BATCH_SIZE: usize = usize::MAX;
|
||||||
|
|
||||||
|
pub enum IOCommand {
|
||||||
|
SaveChunk {
|
||||||
|
position: IVec3,
|
||||||
|
data: BlockData,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Load a chunk from the disk and send it to the main thread
|
||||||
|
LoadChunk {
|
||||||
|
position: IVec3,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Process all pending write commands and make the thread end itself
|
||||||
|
/// LoadChunk commands will be ignored after this command is received
|
||||||
|
Kys,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum IOResponse {
|
||||||
|
/// A chunk has been loaded from the disk
|
||||||
|
/// Or not, in which case the data will be None
|
||||||
|
/// and chunk should be generated
|
||||||
|
ChunkLoaded {
|
||||||
|
position: IVec3,
|
||||||
|
data: Option<BlockData>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// The IO thread has been terminated
|
||||||
|
Terminated,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct IOThreadContext {
|
||||||
|
tx: Sender<IOResponse>,
|
||||||
|
rx: Receiver<IOCommand>,
|
||||||
|
save: WorldSaveFile,
|
||||||
|
save_queue: Vec<(IVec3, BlockData)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: Implement proper error handling (I/O errors are rlly common)
|
||||||
|
|
||||||
|
impl IOThreadContext {
|
||||||
|
/// Should be called ON the IO thread
|
||||||
|
///
|
||||||
|
/// Initializes the IO thread context, opening the file at the given path
|
||||||
|
/// If there's an error opening the file, the thread will panic (TODO: handle this more gracefully)
|
||||||
|
pub fn initialize(
|
||||||
|
tx: Sender<IOResponse>,
|
||||||
|
rx: Receiver<IOCommand>,
|
||||||
|
save: WorldSaveFile,
|
||||||
|
) -> Self {
|
||||||
|
// save.load_data().unwrap();
|
||||||
|
let save_queue = Vec::new();
|
||||||
|
Self { tx, rx, save, save_queue }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn run(mut self) {
|
||||||
|
loop {
|
||||||
|
// because were waiting for the next command, we can't process the save_queue
|
||||||
|
// which breaks batching, so we need to check if there are any pending save requests
|
||||||
|
// and if there are, use non-blocking recv to give them a chance to be processed
|
||||||
|
'rx: while let Some(command) = {
|
||||||
|
if self.save_queue.len() > 0 {
|
||||||
|
self.rx.try_recv().ok()
|
||||||
|
} else {
|
||||||
|
self.rx.recv().ok()
|
||||||
|
}
|
||||||
|
} {
|
||||||
|
match command {
|
||||||
|
IOCommand::SaveChunk { position, data } => {
|
||||||
|
// if chunk already has a save request, overwrite it
|
||||||
|
for (pos, old_data) in self.save_queue.iter_mut() {
|
||||||
|
if *pos == position {
|
||||||
|
*old_data = data;
|
||||||
|
continue 'rx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if not, save to the queue
|
||||||
|
self.save_queue.push((position, data));
|
||||||
|
//log::trace!("amt of unsaved chunks: {}", self.save_queue.len());
|
||||||
|
}
|
||||||
|
IOCommand::LoadChunk { position } => {
|
||||||
|
// HOLD ON
|
||||||
|
// first check if the chunk is already in the save queue
|
||||||
|
// if it is, send it and continue
|
||||||
|
// (NOT doing this WILL result in data loss if the user returns to the chunk too quickly)
|
||||||
|
for (pos, data) in self.save_queue.iter() {
|
||||||
|
if *pos == position {
|
||||||
|
self.tx.send(IOResponse::ChunkLoaded { position, data: Some(data.clone()) }).unwrap();
|
||||||
|
continue 'rx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let data = self.save.load_chunk(position).unwrap();
|
||||||
|
self.tx.send(IOResponse::ChunkLoaded { position, data }).unwrap();
|
||||||
|
}
|
||||||
|
IOCommand::Kys => {
|
||||||
|
// Process all pending write commands
|
||||||
|
log::info!("info: queue has {} chunks", self.save_queue.len());
|
||||||
|
let mut saved_amount = 0;
|
||||||
|
for (pos, data) in self.save_queue.drain(..) {
|
||||||
|
self.save.save_chunk(pos, &data).unwrap();
|
||||||
|
saved_amount += 1;
|
||||||
|
}
|
||||||
|
log::debug!("now, moving on to the rx queue...");
|
||||||
|
for cmd in self.rx.try_iter() {
|
||||||
|
let IOCommand::SaveChunk { position, data } = cmd else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
self.save.save_chunk(position, &data).unwrap();
|
||||||
|
saved_amount += 1;
|
||||||
|
}
|
||||||
|
log::info!("saved {} chunks on exit", saved_amount);
|
||||||
|
self.tx.send(IOResponse::Terminated).unwrap();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// between every betch of requests, check if there are any pending save requests
|
||||||
|
if self.save_queue.len() > 0 {
|
||||||
|
let will_drain = MAX_SAVE_BATCH_SIZE.min(self.save_queue.len());
|
||||||
|
log::info!("saving {}/{} chunks with batch size {}...", will_drain, self.save_queue.len(), MAX_SAVE_BATCH_SIZE);
|
||||||
|
for (pos, data) in self.save_queue.drain(..will_drain) {
|
||||||
|
self.save.save_chunk(pos, &data).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct IOSingleThread {
|
||||||
|
tx: Sender<IOCommand>,
|
||||||
|
rx: Receiver<IOResponse>,
|
||||||
|
handle: std::thread::JoinHandle<()>,
|
||||||
|
header: SharedHeader,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IOSingleThread {
|
||||||
|
pub fn spawn(save: WorldSaveFile) -> Self {
|
||||||
|
// Create channels
|
||||||
|
let (command_tx, command_rx) = flume::unbounded();
|
||||||
|
let (response_tx, response_rx) = flume::unbounded();
|
||||||
|
|
||||||
|
// Grab a handle to the header
|
||||||
|
let header = save.get_shared_header();
|
||||||
|
|
||||||
|
// Spawn the thread
|
||||||
|
let builder = std::thread::Builder::new()
|
||||||
|
.name("io-thread".into());
|
||||||
|
let handle = builder.spawn(move || {
|
||||||
|
let context = IOThreadContext::initialize(response_tx, command_rx, save);
|
||||||
|
context.run();
|
||||||
|
}).unwrap();
|
||||||
|
|
||||||
|
IOSingleThread {
|
||||||
|
tx: command_tx,
|
||||||
|
rx: response_rx,
|
||||||
|
handle,
|
||||||
|
header,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a command to the IO thread
|
||||||
|
pub fn send(&self, cmd: IOCommand) {
|
||||||
|
self.tx.send(cmd).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Poll the IO thread for a single response (non-blocking)
|
||||||
|
pub fn poll_single(&self) -> Option<IOResponse> {
|
||||||
|
self.rx.try_recv().ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Poll the IO thread for responses (non-blocking)
|
||||||
|
pub fn poll(&self) -> TryIter<IOResponse> {
|
||||||
|
self.rx.try_iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Signal the IO thread to process the remaining requests and wait for it to terminate
|
||||||
|
pub fn stop_sync(&self) {
|
||||||
|
log::debug!("Stopping IO thread (sync)");
|
||||||
|
|
||||||
|
// Tell the thread to terminate and wait for it to finish
|
||||||
|
self.tx.send(IOCommand::Kys).unwrap();
|
||||||
|
while !matches!(self.rx.recv().unwrap(), IOResponse::Terminated) {}
|
||||||
|
|
||||||
|
// HACK "we have .join at home"
|
||||||
|
while !self.handle.is_finished() {}
|
||||||
|
|
||||||
|
log::debug!("IO thread stopped"); //almost lol
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as stop_sync but doesn't wait for the IO thread to terminate
|
||||||
|
pub fn stop_async(&self) {
|
||||||
|
log::debug!("Stopping IO thread (async)");
|
||||||
|
self.tx.send(IOCommand::Kys).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn chunk_exists(&self, position: IVec3) -> bool {
|
||||||
|
self.header.read().unwrap().chunk_map.contains_key(&position)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for IOSingleThread {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
log::trace!("IOSingleThread dropped, about to sync unsaved data...");
|
||||||
|
self.stop_sync();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// This is a stub for future implemntation that may use multiple IO threads
|
||||||
|
#[derive(Unique)]
|
||||||
|
pub struct IOThreadManager {
|
||||||
|
thread: IOSingleThread,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IOThreadManager {
|
||||||
|
pub fn new(save: WorldSaveFile) -> Self {
|
||||||
|
Self {
|
||||||
|
thread: IOSingleThread::spawn(save)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send(&self, cmd: IOCommand) {
|
||||||
|
self.thread.send(cmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn poll_single(&self) -> Option<IOResponse> {
|
||||||
|
self.thread.poll_single()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn poll(&self) -> TryIter<IOResponse> {
|
||||||
|
self.thread.poll()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn chunk_exists(&self, position: IVec3) -> bool {
|
||||||
|
self.thread.chunk_exists(position)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// i think im a girl :3 (noone will ever read this right? :p)
|
||||||
|
|
|
@ -38,29 +38,194 @@ impl Item {
|
||||||
pub struct ItemCollection(Option<(Item, NonZeroU8)>);
|
pub struct ItemCollection(Option<(Item, NonZeroU8)>);
|
||||||
|
|
||||||
impl ItemCollection {
|
impl ItemCollection {
|
||||||
pub const fn new(item: Item, amount: NonZeroU8) -> Self {
|
/// Create a new item collection with `amount` of `item`
|
||||||
|
///
|
||||||
|
/// If `amount` is 0, the slot will be empty, and the item will be ignored
|
||||||
|
pub const fn new(item: Item, amount: u8) -> Self {
|
||||||
|
if amount == 0 {
|
||||||
|
return Self::new_empty()
|
||||||
|
}
|
||||||
|
// SAFETY: `amount` guaranteed to be non-zero
|
||||||
|
let amount = unsafe { NonZeroU8::new_unchecked(amount) };
|
||||||
|
Self::new_nonzero(item, amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new item collection with `amount` of `item`
|
||||||
|
pub const fn new_nonzero(item: Item, amount: NonZeroU8) -> Self {
|
||||||
Self(Some((item, amount)))
|
Self(Some((item, amount)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a new item collection with a single item
|
||||||
pub const fn new_single(item: Item) -> Self {
|
pub const fn new_single(item: Item) -> Self {
|
||||||
Self(Some((item, nz::u8!(1))))
|
Self(Some((item, nz::u8!(1))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a new empty item collection
|
||||||
pub const fn new_empty() -> Self {
|
pub const fn new_empty() -> Self {
|
||||||
Self(None)
|
Self(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const fn with_amount(&self, amount: NonZeroU8) -> Self {
|
/// Set the amount of items in the slot to `amount`\
|
||||||
|
///
|
||||||
|
/// If `amount` is 0, the slot will be emptied\
|
||||||
|
/// If slot is empty, this will do nothing, even if `amount` is non-zero
|
||||||
|
pub const fn with_amount(&self, amount: u8) -> Self {
|
||||||
|
if amount == 0 {
|
||||||
|
return Self::new_empty()
|
||||||
|
}
|
||||||
|
// SAFETY: `amount` guaranteed to be non-zero
|
||||||
|
let amount = unsafe { NonZeroU8::new_unchecked(amount) };
|
||||||
|
self.with_amount_nonzero(amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the amount of items in the slot to `amount`
|
||||||
|
///
|
||||||
|
/// If slot is empty, this will do nothing
|
||||||
|
pub const fn with_amount_nonzero(&self, amount: NonZeroU8) -> Self {
|
||||||
Self(match self.0 {
|
Self(match self.0 {
|
||||||
Some((item, _)) => Some((item, amount)),
|
Some((item, _)) => Some((item, amount)),
|
||||||
None => None,
|
None => None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if the slot is empty (contains no items)
|
||||||
|
pub const fn is_empty(&self) -> bool {
|
||||||
|
self.0.is_none()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the slot is full (contains the maximum stack size)
|
||||||
|
pub const fn is_full(&self) -> bool {
|
||||||
|
match self.0 {
|
||||||
|
Some((item, amount)) => {
|
||||||
|
amount.get() >= item.descriptor().stack_size.get()
|
||||||
|
},
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the item in the slot
|
||||||
|
///
|
||||||
|
/// If the slot is empty, returns None
|
||||||
|
pub const fn item(&self) -> Option<Item> {
|
||||||
|
match self.0 {
|
||||||
|
Some((item, _)) => Some(item),
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the amount of items in the slot
|
||||||
|
///
|
||||||
|
/// If the slot is empty, returns 0
|
||||||
|
pub const fn amount(&self) -> u8 {
|
||||||
|
match self.0 {
|
||||||
|
Some((_, amount)) => amount.get(),
|
||||||
|
None => 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the amount of items in the slot
|
||||||
|
///
|
||||||
|
/// If the slot is empty, returns None
|
||||||
|
pub const fn amount_nonzero(&self) -> Option<NonZeroU8> {
|
||||||
|
match self.0 {
|
||||||
|
Some((_, amount)) => Some(amount),
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Add items from another slot, copying them\
|
/// Add items from another slot, copying them\
|
||||||
/// Returns the leftover items
|
/// Returns the leftover items (items that could not be added)
|
||||||
pub fn add(&mut self, from: &Self) -> Self {
|
pub fn add(&mut self, from: &Self) -> Self {
|
||||||
let Some((item, count)) = from.0 else { return Self::new_empty() };
|
// If there are no items to add, return
|
||||||
todo!() //TODO finish item slot system
|
let Some((add_item, add_count)) = from.0 else {
|
||||||
|
return Self::new_empty()
|
||||||
|
};
|
||||||
|
let item_stack_size = add_item.descriptor().stack_size;
|
||||||
|
|
||||||
|
// Add items to the slot
|
||||||
|
let (this_slot, leftovers) = match self.0 {
|
||||||
|
None => (
|
||||||
|
(
|
||||||
|
add_item,
|
||||||
|
add_count.min(item_stack_size)
|
||||||
|
),
|
||||||
|
match add_count > item_stack_size {
|
||||||
|
true => Self::new_nonzero(
|
||||||
|
add_item,
|
||||||
|
NonZeroU8::new(
|
||||||
|
add_count.get() - item_stack_size.get()
|
||||||
|
).unwrap(),
|
||||||
|
),
|
||||||
|
false => Self::new_empty()
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Some((cur_item, cur_count)) if cur_item == add_item => {
|
||||||
|
let total_count = cur_count.checked_add(add_count.get()).unwrap();
|
||||||
|
(
|
||||||
|
(
|
||||||
|
cur_item,
|
||||||
|
total_count.min(item_stack_size),
|
||||||
|
),
|
||||||
|
match total_count > item_stack_size {
|
||||||
|
true => Self::new_nonzero(
|
||||||
|
add_item,
|
||||||
|
NonZeroU8::new(
|
||||||
|
total_count.get() - item_stack_size.get()
|
||||||
|
).unwrap()
|
||||||
|
),
|
||||||
|
false => Self::new_empty()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
},
|
||||||
|
// If items are different, do not add anything, everything is leftovers
|
||||||
|
_ => return *from,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.0 = Some(this_slot);
|
||||||
|
leftovers
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move as much as possible items from another slot, removing them
|
||||||
|
///
|
||||||
|
/// This may not be possible if the slot is full or contains a different item
|
||||||
|
pub fn move_all(&mut self, to: &mut Self) {
|
||||||
|
let leftovers = to.add(self);
|
||||||
|
*self = leftovers;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move up to `amount` items from another slot, removing them
|
||||||
|
///
|
||||||
|
/// If `amount` is 0, nothing will be moved
|
||||||
|
pub fn move_up_to(&mut self, to: &mut Self, limit: u8) {
|
||||||
|
|
||||||
|
if self.is_empty() { return }
|
||||||
|
// SAFETY: slot is guaranteed to be non-empty
|
||||||
|
let amount = unsafe { self.amount_nonzero().unwrap_unchecked() } ;
|
||||||
|
|
||||||
|
if limit == 0 { return }
|
||||||
|
// SAFETY: `limit` guaranteed to be non-zero
|
||||||
|
let limit = unsafe { NonZeroU8::new_unchecked(limit) };
|
||||||
|
|
||||||
|
let amount_with_limit = amount.min(limit);
|
||||||
|
let self_with_limit = self.with_amount_nonzero(amount_with_limit);
|
||||||
|
|
||||||
|
let mut leftovers = to.add(&self_with_limit);
|
||||||
|
|
||||||
|
// Compensate for the amount of items that were not moved
|
||||||
|
let amount_difference = amount.get() - amount_with_limit.get();
|
||||||
|
if amount_difference > 0 {
|
||||||
|
let correct_item = self.item().unwrap();
|
||||||
|
let correct_amount = leftovers.amount() + amount_difference;
|
||||||
|
leftovers = Self::new(correct_item, correct_amount);
|
||||||
|
}
|
||||||
|
|
||||||
|
*self = leftovers;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to move a single item from another slot, removing it
|
||||||
|
///
|
||||||
|
/// This may not be possible if the slot is full or contains a different item
|
||||||
|
pub fn move_single(&mut self, to: &mut Self) {
|
||||||
|
self.move_up_to(to, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,3 +8,4 @@ pub mod entity;
|
||||||
pub mod player;
|
pub mod player;
|
||||||
pub mod queue;
|
pub mod queue;
|
||||||
pub mod data;
|
pub mod data;
|
||||||
|
pub mod fixed_timestamp;
|
||||||
|
|
|
@ -48,8 +48,11 @@ ndk = "0.9"
|
||||||
winapi = { version = "0.3", features = ["wincon"] }
|
winapi = { version = "0.3", features = ["wincon"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["raw-evt"]
|
default = ["raw-evt-mouse"]
|
||||||
raw-evt = [] #required for mouse input, but breaks keyboard on android
|
raw-evt-keyboard = [] # use raw input for keyboard. works on x11 and windows, breaks keyboard on android and wayland
|
||||||
|
raw-evt-mouse = [] # use raw input for mouse movement events. *required* for mouse input
|
||||||
|
raw-evt-button = [] # use raw input for mouse button events. used to be the default, but breaks on wayland.
|
||||||
|
c-ffi = [] # generate a C-ffi-compatible `kubi_extern_main` entry point (useful if building as a shared library)
|
||||||
generate_visualizer_data = ["dep:serde_json", "shipyard/serde1"]
|
generate_visualizer_data = ["dep:serde_json", "shipyard/serde1"]
|
||||||
safe_lz4 = ["lz4_flex/safe-encode", "lz4_flex/safe-decode"]
|
safe_lz4 = ["lz4_flex/safe-encode", "lz4_flex/safe-decode"]
|
||||||
parallel = ["shipyard/parallel"] # causes some serious issues!
|
parallel = ["shipyard/parallel"] # causes some serious issues!
|
||||||
|
|
|
@ -34,7 +34,7 @@ fn pick_block_with_number_keys(
|
||||||
mut holding: ViewMut<PlayerHolding>,
|
mut holding: ViewMut<PlayerHolding>,
|
||||||
input: UniqueView<RawKbmInputState>,
|
input: UniqueView<RawKbmInputState>,
|
||||||
) {
|
) {
|
||||||
let Some((_, mut holding)) = (&main_player, &mut holding).iter().next() else { return };
|
let Some((_, holding)) = (&main_player, &mut holding).iter().next() else { return };
|
||||||
for &(key, block) in BLOCK_KEY_MAP {
|
for &(key, block) in BLOCK_KEY_MAP {
|
||||||
if input.keyboard_state.contains(key as u32) {
|
if input.keyboard_state.contains(key as u32) {
|
||||||
holding.0 = Some(block);
|
holding.0 = Some(block);
|
||||||
|
|
|
@ -124,7 +124,7 @@ pub fn update_frustum(
|
||||||
mut cameras: ViewMut<Camera>,
|
mut cameras: ViewMut<Camera>,
|
||||||
transforms: View<Transform, track::All>
|
transforms: View<Transform, track::All>
|
||||||
) {
|
) {
|
||||||
for (mut camera, _) in (&mut cameras, transforms.inserted_or_modified()).iter() {
|
for (camera, _) in (&mut cameras, transforms.inserted_or_modified()).iter() {
|
||||||
camera.frustum = Frustum::compute(&camera);
|
camera.frustum = Frustum::compute(&camera);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ fn update_view_matrix(
|
||||||
mut vm_camera: ViewMut<Camera>,
|
mut vm_camera: ViewMut<Camera>,
|
||||||
v_transform: View<Transform, track::All>
|
v_transform: View<Transform, track::All>
|
||||||
) {
|
) {
|
||||||
for (mut camera, transform) in (&mut vm_camera, v_transform.inserted_or_modified()).iter() {
|
for (camera, transform) in (&mut vm_camera, v_transform.inserted_or_modified()).iter() {
|
||||||
let (_, rotation, translation) = transform.0.to_scale_rotation_translation();
|
let (_, rotation, translation) = transform.0.to_scale_rotation_translation();
|
||||||
let direction = (rotation.normalize() * Vec3::NEG_Z).normalize();
|
let direction = (rotation.normalize() * Vec3::NEG_Z).normalize();
|
||||||
camera.view_matrix = Mat4::look_to_rh(translation, direction, camera.up);
|
camera.view_matrix = Mat4::look_to_rh(translation, direction, camera.up);
|
||||||
|
@ -21,7 +21,7 @@ fn update_perspective_matrix(
|
||||||
ren: UniqueView<Renderer>,
|
ren: UniqueView<Renderer>,
|
||||||
) {
|
) {
|
||||||
let sz = ren.size_vec2();
|
let sz = ren.size_vec2();
|
||||||
for mut camera in (&mut vm_camera).iter() {
|
for camera in (&mut vm_camera).iter() {
|
||||||
camera.perspective_matrix = Mat4::perspective_rh(
|
camera.perspective_matrix = Mat4::perspective_rh(
|
||||||
camera.fov,
|
camera.fov,
|
||||||
sz.x / sz.y,
|
sz.x / sz.y,
|
||||||
|
|
|
@ -106,7 +106,7 @@ pub fn update_client_physics_late(
|
||||||
world: UniqueView<ChunkStorage>,
|
world: UniqueView<ChunkStorage>,
|
||||||
dt: UniqueView<DeltaTime>,
|
dt: UniqueView<DeltaTime>,
|
||||||
) {
|
) {
|
||||||
for (mut actor, mut transform) in (&mut actors, &mut transforms).iter() {
|
for (actor, mut transform) in (&mut actors, &mut transforms).iter() {
|
||||||
if actor.disable {
|
if actor.disable {
|
||||||
actor.forces = Vec3::ZERO;
|
actor.forces = Vec3::ZERO;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use shipyard::{UniqueView, UniqueViewMut, Unique, AllStoragesView};
|
use shipyard::{UniqueView, UniqueViewMut, Unique, AllStoragesView};
|
||||||
use winit::{keyboard::KeyCode, event_loop::ControlFlow};
|
use winit::keyboard::KeyCode;
|
||||||
use crate::input::RawKbmInputState;
|
use crate::input::RawKbmInputState;
|
||||||
|
|
||||||
#[derive(Unique)]
|
#[derive(Unique)]
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use glam::UVec2;
|
use glam::UVec2;
|
||||||
use shipyard::{World, Component, AllStoragesViewMut, SparseSet, NonSendSync, UniqueView};
|
use shipyard::{World, Component, AllStoragesViewMut, SparseSet};
|
||||||
use winit::event::{Event, DeviceEvent, DeviceId, WindowEvent, Touch};
|
use winit::event::{Event, DeviceEvent, DeviceId, WindowEvent, Touch, MouseButton};
|
||||||
use crate::rendering::Renderer;
|
|
||||||
|
|
||||||
pub mod player_actions;
|
pub mod player_actions;
|
||||||
|
|
||||||
|
@ -35,8 +34,12 @@ pub fn process_winit_events(world: &mut World, event: &Event<()>) {
|
||||||
));
|
));
|
||||||
},
|
},
|
||||||
|
|
||||||
#[cfg(not(feature = "raw-evt"))]
|
#[cfg(not(feature = "raw-evt-keyboard"))]
|
||||||
WindowEvent::KeyboardInput { device_id, event, .. } => {
|
WindowEvent::KeyboardInput { device_id, event, .. } => {
|
||||||
|
// HACK: translate KeyboardInput events to raw device events
|
||||||
|
if event.repeat {
|
||||||
|
return;
|
||||||
|
}
|
||||||
world.add_entity((
|
world.add_entity((
|
||||||
EventComponent,
|
EventComponent,
|
||||||
InputDeviceEvent {
|
InputDeviceEvent {
|
||||||
|
@ -49,6 +52,28 @@ pub fn process_winit_events(world: &mut World, event: &Event<()>) {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "raw-evt-button"))]
|
||||||
|
WindowEvent::MouseInput { device_id, state, button } => {
|
||||||
|
// HACK: translate MouseInput events to raw device events
|
||||||
|
world.add_entity((
|
||||||
|
EventComponent,
|
||||||
|
InputDeviceEvent {
|
||||||
|
device_id: *device_id,
|
||||||
|
event: DeviceEvent::Button {
|
||||||
|
button: match button {
|
||||||
|
MouseButton::Left => 0,
|
||||||
|
MouseButton::Right => 1,
|
||||||
|
MouseButton::Middle => 2,
|
||||||
|
MouseButton::Back => 3,
|
||||||
|
MouseButton::Forward => 4,
|
||||||
|
MouseButton::Other(id) => *id as u32,
|
||||||
|
},
|
||||||
|
state: *state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
WindowEvent::Touch(touch) => {
|
WindowEvent::Touch(touch) => {
|
||||||
// if matches!(touch.phase, TouchPhase::Started | TouchPhase::Cancelled | TouchPhase::Ended) {
|
// if matches!(touch.phase, TouchPhase::Started | TouchPhase::Cancelled | TouchPhase::Ended) {
|
||||||
// println!("TOUCH ==================== {:#?}", touch);
|
// println!("TOUCH ==================== {:#?}", touch);
|
||||||
|
@ -64,8 +89,25 @@ pub fn process_winit_events(world: &mut World, event: &Event<()>) {
|
||||||
_ => ()
|
_ => ()
|
||||||
},
|
},
|
||||||
|
|
||||||
#[cfg(feature = "raw-evt")]
|
#[cfg(any(
|
||||||
|
feature = "raw-evt-keyboard",
|
||||||
|
feature = "raw-evt-mouse",
|
||||||
|
feature = "raw-evt-button",
|
||||||
|
))]
|
||||||
Event::DeviceEvent { device_id, event } => {
|
Event::DeviceEvent { device_id, event } => {
|
||||||
|
// Filter out events we don't care about
|
||||||
|
match event {
|
||||||
|
#[cfg(feature = "raw-evt-keyboard")]
|
||||||
|
DeviceEvent::Key(_) => (),
|
||||||
|
|
||||||
|
#[cfg(feature = "raw-evt-mouse")]
|
||||||
|
DeviceEvent::MouseMotion { .. } => (),
|
||||||
|
|
||||||
|
#[cfg(feature = "raw-evt-button")]
|
||||||
|
DeviceEvent::Button { .. } => (),
|
||||||
|
|
||||||
|
_ => return,
|
||||||
|
};
|
||||||
world.add_entity((
|
world.add_entity((
|
||||||
EventComponent,
|
EventComponent,
|
||||||
InputDeviceEvent {
|
InputDeviceEvent {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::{fs::File, path::Path, io::{Read, Seek}};
|
use std::{fs::File, path::Path, io::{Read, Seek}};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use shipyard::{Unique, AllStoragesView};
|
use shipyard::Unique;
|
||||||
|
|
||||||
pub trait ReadOnly: Read + Seek {}
|
pub trait ReadOnly: Read + Seek {}
|
||||||
impl<T: Read + Seek> ReadOnly for T {}
|
impl<T: Read + Seek> ReadOnly for T {}
|
||||||
|
|
|
@ -1,37 +1,28 @@
|
||||||
use shipyard::{AllStoragesView, UniqueViewMut};
|
use shipyard::{AllStoragesView, UniqueViewMut};
|
||||||
use std::{env, net::SocketAddr, fs::OpenOptions, path::Path};
|
use std::{env, net::SocketAddr, path::Path};
|
||||||
use anyhow::Result;
|
|
||||||
use crate::{
|
use crate::{
|
||||||
networking::{GameType, ServerAddress},
|
networking::{GameType, ServerAddress},
|
||||||
state::{GameState, NextState}
|
state::{GameState, NextState}
|
||||||
};
|
};
|
||||||
use kubi_shared::data::WorldSaveFile;
|
use kubi_shared::data::{io_thread::IOThreadManager, open_local_save_file};
|
||||||
|
|
||||||
fn open_local_save_file(path: &Path) -> Result<WorldSaveFile> {
|
|
||||||
let mut save_file = WorldSaveFile::new({
|
|
||||||
OpenOptions::new()
|
|
||||||
.read(true)
|
|
||||||
.write(true)
|
|
||||||
.open("world.kbi")?
|
|
||||||
});
|
|
||||||
if save_file.file.metadata().unwrap().len() == 0 {
|
|
||||||
save_file.initialize()?;
|
|
||||||
} else {
|
|
||||||
save_file.load_data()?;
|
|
||||||
}
|
|
||||||
Ok(save_file)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn initialize_from_args(
|
pub fn initialize_from_args(
|
||||||
all_storages: AllStoragesView,
|
all_storages: AllStoragesView,
|
||||||
) {
|
) {
|
||||||
|
// If an address is provided, we're in multiplayer mode (the first argument is the address)
|
||||||
|
// Otherwise, we're in singleplayer mode and working with local stuff
|
||||||
let args: Vec<String> = env::args().collect();
|
let args: Vec<String> = env::args().collect();
|
||||||
if args.len() > 1 {
|
if args.len() > 1 {
|
||||||
|
// Parse the address and switch the state to connecting
|
||||||
let address = args[1].parse::<SocketAddr>().expect("invalid address");
|
let address = args[1].parse::<SocketAddr>().expect("invalid address");
|
||||||
all_storages.add_unique(GameType::Muliplayer);
|
all_storages.add_unique(GameType::Muliplayer);
|
||||||
all_storages.add_unique(ServerAddress(address));
|
all_storages.add_unique(ServerAddress(address));
|
||||||
all_storages.borrow::<UniqueViewMut<NextState>>().unwrap().0 = Some(GameState::Connecting);
|
all_storages.borrow::<UniqueViewMut<NextState>>().unwrap().0 = Some(GameState::Connecting);
|
||||||
} else {
|
} else {
|
||||||
|
// Open the local save file
|
||||||
|
let save_file = open_local_save_file(Path::new("./world.kubi")).expect("failed to open save file");
|
||||||
|
all_storages.add_unique(IOThreadManager::new(save_file));
|
||||||
|
// Switch the state and kick off the world loading
|
||||||
all_storages.add_unique(GameType::Singleplayer);
|
all_storages.add_unique(GameType::Singleplayer);
|
||||||
all_storages.borrow::<UniqueViewMut<NextState>>().unwrap().0 = Some(GameState::LoadingWorld);
|
all_storages.borrow::<UniqueViewMut<NextState>>().unwrap().0 = Some(GameState::LoadingWorld);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,9 @@ use winit::{
|
||||||
use glam::vec3;
|
use glam::vec3;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
|
//TODO remove these re-exports
|
||||||
pub(crate) use kubi_shared::transform;
|
pub(crate) use kubi_shared::transform;
|
||||||
|
pub(crate) use kubi_shared::fixed_timestamp;
|
||||||
|
|
||||||
mod ui;
|
mod ui;
|
||||||
pub(crate) use ui::{
|
pub(crate) use ui::{
|
||||||
|
@ -51,17 +53,12 @@ pub(crate) mod hui_integration;
|
||||||
pub(crate) mod networking;
|
pub(crate) mod networking;
|
||||||
pub(crate) mod init;
|
pub(crate) mod init;
|
||||||
pub(crate) mod color;
|
pub(crate) mod color;
|
||||||
pub(crate) mod fixed_timestamp;
|
|
||||||
pub(crate) mod filesystem;
|
pub(crate) mod filesystem;
|
||||||
pub(crate) mod client_physics;
|
pub(crate) mod client_physics;
|
||||||
pub(crate) mod chat;
|
pub(crate) mod chat;
|
||||||
|
|
||||||
use world::{
|
use world::{
|
||||||
init_game_world,
|
init_game_world, loading::{save_on_exit, update_loaded_world_around_player}, queue::apply_queued_blocks, raycast::update_raycasts, tasks::ChunkTaskManager
|
||||||
loading::update_loaded_world_around_player,
|
|
||||||
raycast::update_raycasts,
|
|
||||||
queue::apply_queued_blocks,
|
|
||||||
tasks::ChunkTaskManager,
|
|
||||||
};
|
};
|
||||||
use player::{spawn_player, MainPlayer};
|
use player::{spawn_player, MainPlayer};
|
||||||
use prefabs::load_prefabs;
|
use prefabs::load_prefabs;
|
||||||
|
@ -157,7 +154,6 @@ fn update() -> Workload {
|
||||||
kubi_ui_end,
|
kubi_ui_end,
|
||||||
update_state,
|
update_state,
|
||||||
exit_on_esc,
|
exit_on_esc,
|
||||||
disconnect_on_exit.run_if(is_multiplayer),
|
|
||||||
update_rendering_late,
|
update_rendering_late,
|
||||||
).into_sequential_workload()
|
).into_sequential_workload()
|
||||||
}
|
}
|
||||||
|
@ -183,21 +179,44 @@ fn after_render() -> Workload {
|
||||||
).into_sequential_workload()
|
).into_sequential_workload()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_exit() -> Workload{
|
||||||
|
(
|
||||||
|
disconnect_on_exit.run_if(is_multiplayer),
|
||||||
|
save_on_exit.run_if(is_singleplayer),
|
||||||
|
).into_sequential_workload().run_if(is_ingame_or_loading)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(all(windows, not(debug_assertions)))]
|
#[cfg(all(windows, not(debug_assertions)))]
|
||||||
fn attach_console() {
|
fn attach_console() {
|
||||||
use winapi::um::wincon::{AttachConsole, ATTACH_PARENT_PROCESS};
|
use winapi::um::wincon::{AttachConsole, ATTACH_PARENT_PROCESS};
|
||||||
unsafe { AttachConsole(ATTACH_PARENT_PROCESS); }
|
unsafe { AttachConsole(ATTACH_PARENT_PROCESS); }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
|
||||||
#[cfg(target_os = "android")]
|
#[cfg(target_os = "android")]
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
pub fn android_main(app: android_activity::AndroidApp) {
|
pub fn android_main(app: android_activity::AndroidApp) {
|
||||||
use android_activity::WindowManagerFlags;
|
use android_activity::WindowManagerFlags;
|
||||||
app.set_window_flags(WindowManagerFlags::FULLSCREEN, WindowManagerFlags::empty());
|
app.set_window_flags(WindowManagerFlags::FULLSCREEN, WindowManagerFlags::empty());
|
||||||
kubi_main(app)
|
kubi_main(app);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "c-ffi", target_os = "android"))]
|
||||||
|
compile_error!("the c-ffi feature is not supported on android");
|
||||||
|
|
||||||
|
#[cfg(all(feature = "c-ffi", not(target_os = "android")))]
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
|
pub extern "C" fn kubi_extern_main() {
|
||||||
|
// cant let unwinds cross the ffi boundary!
|
||||||
|
// also, hopefully this code should never panic either...
|
||||||
|
let panic = std::panic::catch_unwind(|| {
|
||||||
|
kubi_main();
|
||||||
|
});
|
||||||
|
if panic.is_err() {
|
||||||
|
println!("!!! PANIC CAUGHT ON FFI BOUNDARY !!!");
|
||||||
|
};
|
||||||
|
std::mem::forget(panic); // forget the result, as dropping it will cause unwinding!
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
|
||||||
pub fn kubi_main(
|
pub fn kubi_main(
|
||||||
#[cfg(target_os = "android")]
|
#[cfg(target_os = "android")]
|
||||||
app: android_activity::AndroidApp
|
app: android_activity::AndroidApp
|
||||||
|
@ -227,13 +246,18 @@ pub fn kubi_main(
|
||||||
world.add_workload(update);
|
world.add_workload(update);
|
||||||
//world.add_workload(render);
|
//world.add_workload(render);
|
||||||
world.add_workload(after_render);
|
world.add_workload(after_render);
|
||||||
|
world.add_workload(on_exit);
|
||||||
|
|
||||||
//Save _visualizer.json
|
//Save _visualizer.json
|
||||||
#[cfg(feature = "generate_visualizer_data")]
|
#[cfg(feature = "generate_visualizer_data")] {
|
||||||
std::fs::write(
|
std::fs::write(
|
||||||
"_visualizer.json",
|
"_visualizer.json",
|
||||||
serde_json::to_string(&world.workloads_info()).unwrap(),
|
serde_json::to_string(&world.workloads_info()).unwrap(),
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
log::info!("visualizer data written to ./_visualizer.json");
|
||||||
|
log::warn!("game will exit now, as it's built with generate_visualizer_data");
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
//Run pre-startup procedure
|
//Run pre-startup procedure
|
||||||
world.run_workload(pre_startup).unwrap();
|
world.run_workload(pre_startup).unwrap();
|
||||||
|
@ -330,6 +354,11 @@ pub fn kubi_main(
|
||||||
window_target.exit();
|
window_target.exit();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
Event::LoopExiting => {
|
||||||
|
world.run_workload(on_exit).unwrap();
|
||||||
|
},
|
||||||
|
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#![cfg_attr(
|
#![cfg_attr(
|
||||||
all(windows, not(debug_assertions)),
|
all(windows, not(debug_assertions)),
|
||||||
windows_subsystem = "windows"
|
windows_subsystem = "windows"
|
||||||
)]
|
)]
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,18 @@
|
||||||
use shipyard::{Unique, AllStoragesView, UniqueView, UniqueViewMut, Workload, IntoWorkload, EntitiesViewMut, Component, ViewMut, SystemModificator, View, IntoIter, WorkloadModificator};
|
use shipyard::{Unique, AllStoragesView, UniqueView, UniqueViewMut, Workload, IntoWorkload, EntitiesViewMut, Component, ViewMut, SystemModificator, View, IntoIter, WorkloadModificator};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use uflow::{
|
use uflow::{
|
||||||
client::{Client, Config as ClientConfig, Event as ClientEvent},
|
client::{Client, Config as ClientConfig, Event as ClientEvent},
|
||||||
EndpointConfig
|
EndpointConfig
|
||||||
};
|
};
|
||||||
use kubi_shared::networking::{
|
use kubi_shared::networking::{
|
||||||
messages::ServerToClientMessage,
|
messages::ServerToClientMessage,
|
||||||
state::ClientJoinState,
|
state::ClientJoinState,
|
||||||
client::ClientIdMap,
|
client::ClientIdMap,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
events::EventComponent,
|
events::EventComponent,
|
||||||
control_flow::RequestExit,
|
world::tasks::ChunkTaskManager,
|
||||||
world::tasks::ChunkTaskManager,
|
state::is_ingame_or_loading,
|
||||||
state::is_ingame_or_loading,
|
|
||||||
fixed_timestamp::FixedTimestamp
|
fixed_timestamp::FixedTimestamp
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -159,19 +158,15 @@ pub fn update_networking_late() -> Workload {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disconnect_on_exit(
|
pub fn disconnect_on_exit(
|
||||||
exit: UniqueView<RequestExit>,
|
|
||||||
mut client: UniqueViewMut<UdpClient>,
|
mut client: UniqueViewMut<UdpClient>,
|
||||||
) {
|
) {
|
||||||
//TODO check if this works
|
if client.0.is_active() {
|
||||||
if exit.0 {
|
client.0.flush();
|
||||||
if client.0.is_active() {
|
client.0.disconnect();
|
||||||
client.0.flush();
|
while client.0.is_active() { client.0.step().for_each(|_|()); }
|
||||||
client.0.disconnect();
|
log::info!("Client disconnected");
|
||||||
while client.0.is_active() { client.0.step().for_each(|_|()); }
|
} else {
|
||||||
log::info!("Client disconnected");
|
log::info!("Client inactive")
|
||||||
} else {
|
|
||||||
log::info!("Client inactive")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use glam::{Vec3, Mat4};
|
use glam::Mat4;
|
||||||
use shipyard::{UniqueViewMut, View, IntoIter, AllStoragesView, AllStoragesViewMut, UniqueView, ViewMut, Get};
|
use shipyard::{UniqueViewMut, View, IntoIter, AllStoragesView, AllStoragesViewMut, UniqueView, ViewMut, Get};
|
||||||
use uflow::{SendMode, client::Event as ClientEvent};
|
use uflow::{SendMode, client::Event as ClientEvent};
|
||||||
use kubi_shared::{
|
use kubi_shared::{
|
||||||
|
|
|
@ -37,7 +37,7 @@ pub fn inject_network_responses_into_manager_queue(
|
||||||
let ServerToClientMessage::ChunkResponse {
|
let ServerToClientMessage::ChunkResponse {
|
||||||
chunk, data, queued
|
chunk, data, queued
|
||||||
} = packet else { unreachable!() };
|
} = packet else { unreachable!() };
|
||||||
manager.add_sussy_response(ChunkTaskResponse::LoadedChunk {
|
manager.add_sussy_response(ChunkTaskResponse::ChunkWorldgenDone {
|
||||||
position: chunk,
|
position: chunk,
|
||||||
chunk_data: data,
|
chunk_data: data,
|
||||||
queued
|
queued
|
||||||
|
|
|
@ -1,8 +1,15 @@
|
||||||
use glam::{vec3, EulerRot, Mat4, Quat, Vec2, Vec2Swizzles, Vec3, Vec3Swizzles};
|
use glam::{EulerRot, Mat4, Quat, Vec2, Vec3, Vec3Swizzles};
|
||||||
use shipyard::{track, Component, Get, IntoIter, IntoWithId, IntoWorkload, Unique, UniqueView, View, ViewMut, Workload};
|
use shipyard::{track, Component, Get, IntoIter, IntoWithId, IntoWorkload, UniqueView, View, ViewMut, Workload};
|
||||||
use winit::keyboard::KeyCode;
|
use winit::keyboard::KeyCode;
|
||||||
use std::f32::consts::PI;
|
use std::f32::consts::PI;
|
||||||
use crate::{client_physics::ClPhysicsActor, cursor_lock::CursorLock, delta_time::DeltaTime, input::{Inputs, PrevInputs, RawKbmInputState}, settings::GameSettings, transform::Transform};
|
use crate::{
|
||||||
|
client_physics::ClPhysicsActor,
|
||||||
|
cursor_lock::CursorLock,
|
||||||
|
delta_time::DeltaTime,
|
||||||
|
input::{Inputs, PrevInputs, RawKbmInputState},
|
||||||
|
settings::GameSettings,
|
||||||
|
transform::Transform
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
pub enum PlayerControllerType {
|
pub enum PlayerControllerType {
|
||||||
|
@ -121,7 +128,7 @@ pub fn debug_switch_ctl_type(
|
||||||
mut actors: ViewMut<ClPhysicsActor>,
|
mut actors: ViewMut<ClPhysicsActor>,
|
||||||
kbm_state: UniqueView<RawKbmInputState>,
|
kbm_state: UniqueView<RawKbmInputState>,
|
||||||
) {
|
) {
|
||||||
for (mut controller, mut actor) in (&mut controllers, &mut actors).iter() {
|
for (controller, actor) in (&mut controllers, &mut actors).iter() {
|
||||||
if kbm_state.keyboard_state.contains(KeyCode::F4 as u32) {
|
if kbm_state.keyboard_state.contains(KeyCode::F4 as u32) {
|
||||||
*controller = PlayerController::DEFAULT_FPS_CTL;
|
*controller = PlayerController::DEFAULT_FPS_CTL;
|
||||||
actor.disable = false;
|
actor.disable = false;
|
||||||
|
|
|
@ -2,7 +2,7 @@ use glam::UVec2;
|
||||||
use strum::IntoEnumIterator;
|
use strum::IntoEnumIterator;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use wgpu::util::{DeviceExt, TextureDataOrder};
|
use wgpu::util::{DeviceExt, TextureDataOrder};
|
||||||
use std::{io::{BufReader, Read}, path::{Path, PathBuf}};
|
use std::{io::BufReader, path::{Path, PathBuf}};
|
||||||
use crate::{filesystem::AssetManager, prefabs::ModelVertex, rendering::{BufferPair, Renderer}};
|
use crate::{filesystem::AssetManager, prefabs::ModelVertex, rendering::{BufferPair, Renderer}};
|
||||||
use super::AssetPaths;
|
use super::AssetPaths;
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use shipyard::{AllStoragesViewMut, IntoIter, IntoWorkload, SystemModificator, Unique, UniqueView, UniqueViewMut, View, Workload, WorkloadModificator};
|
use shipyard::{AllStoragesViewMut, IntoIter, IntoWorkload, Unique, UniqueView, UniqueViewMut, View, Workload, WorkloadModificator};
|
||||||
use winit::dpi::PhysicalSize;
|
use winit::dpi::PhysicalSize;
|
||||||
use glam::Vec3;
|
use glam::Vec3;
|
||||||
use crate::{events::WindowResizedEvent, hui_integration::kubi_ui_draw, state::is_ingame};
|
use crate::{events::WindowResizedEvent, hui_integration::kubi_ui_draw, state::is_ingame};
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
use shipyard::{AllStoragesView, IntoIter, IntoWithId, Unique, UniqueView, View};
|
use shipyard::{AllStoragesView, Unique, UniqueView};
|
||||||
use kubi_shared::{entity::Entity, transform::Transform};
|
use crate::prefabs::GpuPrefabs;
|
||||||
use crate::{
|
|
||||||
camera::Camera, prefabs::GpuPrefabs, settings::GameSettings
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{camera_uniform::CameraUniformBuffer, depth::DepthTexture, RenderCtx};
|
use super::{camera_uniform::CameraUniformBuffer, depth::DepthTexture, RenderCtx};
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,13 @@ use winit::{
|
||||||
};
|
};
|
||||||
use crate::settings::{GameSettings, FullscreenMode};
|
use crate::settings::{GameSettings, FullscreenMode};
|
||||||
|
|
||||||
|
const fn get_vsync_mode(vsync: bool) -> wgpu::PresentMode {
|
||||||
|
match vsync {
|
||||||
|
true => wgpu::PresentMode::AutoVsync,
|
||||||
|
false => wgpu::PresentMode::AutoNoVsync,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Unique)]
|
#[derive(Unique)]
|
||||||
pub struct Renderer {
|
pub struct Renderer {
|
||||||
window: Arc<Window>,
|
window: Arc<Window>,
|
||||||
|
@ -75,9 +82,12 @@ impl Renderer {
|
||||||
let size = window.inner_size();
|
let size = window.inner_size();
|
||||||
|
|
||||||
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
|
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
|
||||||
backends: wgpu::Backends::all(),
|
backends: wgpu::util::backend_bits_from_env().unwrap_or(
|
||||||
|
wgpu::Backends::all()
|
||||||
|
),
|
||||||
//Disable validation layer
|
//Disable validation layer
|
||||||
flags: wgpu::InstanceFlags::default() & !wgpu::InstanceFlags::VALIDATION,
|
flags: wgpu::InstanceFlags::default() & !wgpu::InstanceFlags::VALIDATION,
|
||||||
|
dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(),
|
||||||
//we're using vulkan on windows
|
//we're using vulkan on windows
|
||||||
// #[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
// #[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
||||||
// dx12_shader_compiler: wgpu::Dx12Compiler::Dxc {
|
// dx12_shader_compiler: wgpu::Dx12Compiler::Dxc {
|
||||||
|
@ -97,7 +107,9 @@ impl Renderer {
|
||||||
|
|
||||||
let adapter = instance.request_adapter(
|
let adapter = instance.request_adapter(
|
||||||
&wgpu::RequestAdapterOptions {
|
&wgpu::RequestAdapterOptions {
|
||||||
power_preference: wgpu::PowerPreference::HighPerformance,
|
power_preference: wgpu::util::power_preference_from_env().unwrap_or(
|
||||||
|
wgpu::PowerPreference::HighPerformance
|
||||||
|
),
|
||||||
compatible_surface: Some(&surface),
|
compatible_surface: Some(&surface),
|
||||||
force_fallback_adapter: false,
|
force_fallback_adapter: false,
|
||||||
},
|
},
|
||||||
|
@ -116,12 +128,26 @@ impl Renderer {
|
||||||
None,
|
None,
|
||||||
).block_on().unwrap();
|
).block_on().unwrap();
|
||||||
|
|
||||||
let surface_config = surface.get_default_config(&adapter, size.width, size.height).unwrap();
|
let mut surface_config = surface.get_default_config(&adapter, size.width, size.height).unwrap();
|
||||||
|
surface_config.present_mode = get_vsync_mode(settings.vsync);
|
||||||
surface.configure(&device, &surface_config);
|
surface.configure(&device, &surface_config);
|
||||||
|
|
||||||
Self { window, instance, surface, device, queue, surface_config, size }
|
Self { window, instance, surface, device, queue, surface_config, size }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn reload_settings(&mut self, settings: &GameSettings) {
|
||||||
|
// TODO update fullscreen mode
|
||||||
|
|
||||||
|
let mut should_reconfigure = false;
|
||||||
|
|
||||||
|
should_reconfigure |= get_vsync_mode(settings.vsync) != self.surface_config.present_mode;
|
||||||
|
self.surface_config.present_mode = get_vsync_mode(settings.vsync);
|
||||||
|
|
||||||
|
if should_reconfigure {
|
||||||
|
self.reconfigure();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn resize(&mut self, size: PhysicalSize<u32>) {
|
pub fn resize(&mut self, size: PhysicalSize<u32>) {
|
||||||
if size.width == 0 || size.height == 0 {
|
if size.width == 0 || size.height == 0 {
|
||||||
log::warn!("Ignoring resize event with zero width or height");
|
log::warn!("Ignoring resize event with zero width or height");
|
||||||
|
@ -135,7 +161,7 @@ impl Renderer {
|
||||||
self.size = size;
|
self.size = size;
|
||||||
self.surface_config.width = size.width;
|
self.surface_config.width = size.width;
|
||||||
self.surface_config.height = size.height;
|
self.surface_config.height = size.height;
|
||||||
self.surface.configure(&self.device, &self.surface_config);
|
self.reconfigure();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reconfigure(&self) {
|
pub fn reconfigure(&self) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use shipyard::{AllStoragesView, Unique, UniqueView};
|
use shipyard::{AllStoragesView, Unique, UniqueView};
|
||||||
use super::{primitives::FstriPrimitive, RenderCtx, Renderer};
|
use super::{primitives::FstriPrimitive, RenderCtx};
|
||||||
|
|
||||||
mod uniform;
|
mod uniform;
|
||||||
mod pipeline;
|
mod pipeline;
|
||||||
|
|
|
@ -11,7 +11,7 @@ pub struct FullscreenSettings {
|
||||||
|
|
||||||
#[derive(Unique)]
|
#[derive(Unique)]
|
||||||
pub struct GameSettings {
|
pub struct GameSettings {
|
||||||
// pub vsync: bool,
|
pub vsync: bool,
|
||||||
pub fullscreen: Option<FullscreenSettings>,
|
pub fullscreen: Option<FullscreenSettings>,
|
||||||
// pub msaa: Option<u8>,
|
// pub msaa: Option<u8>,
|
||||||
// pub max_anisotropy: Option<u16>,
|
// pub max_anisotropy: Option<u16>,
|
||||||
|
@ -24,7 +24,7 @@ pub struct GameSettings {
|
||||||
impl Default for GameSettings {
|
impl Default for GameSettings {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
// vsync: false,
|
vsync: true,
|
||||||
fullscreen: None,
|
fullscreen: None,
|
||||||
// msaa: Some(4),
|
// msaa: Some(4),
|
||||||
// max_anisotropy: Some(16),
|
// max_anisotropy: Some(16),
|
||||||
|
|
|
@ -1,24 +1,76 @@
|
||||||
use hui::{
|
use hui::{
|
||||||
element::{br::Break, container::Container, slider::Slider, text::Text, UiElementExt},
|
element::{br::Break, container::Container, slider::Slider, text::Text, ElementList, UiElementExt},
|
||||||
layout::{Alignment, Direction},
|
layout::{Alignment, Direction},
|
||||||
signal::Signal,
|
signal::Signal,
|
||||||
rect_frame, size,
|
rect_frame,
|
||||||
|
size,
|
||||||
};
|
};
|
||||||
use shipyard::{NonSendSync, UniqueView, UniqueViewMut};
|
use shipyard::{NonSendSync, UniqueView, UniqueViewMut};
|
||||||
use winit::keyboard::KeyCode;
|
use winit::keyboard::KeyCode;
|
||||||
use crate::{hui_integration::UiState, input::RawKbmInputState, rendering::Renderer, settings::GameSettings};
|
use crate::{
|
||||||
|
hui_integration::UiState,
|
||||||
|
input::RawKbmInputState,
|
||||||
|
rendering::Renderer,
|
||||||
|
settings::GameSettings
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Signal)]
|
#[derive(Signal)]
|
||||||
enum SettingsSignal {
|
enum SettingsSignal {
|
||||||
SetRenderDistance(u8),
|
SetRenderDistance(u8),
|
||||||
SetEnableDynamicCrosshair(bool),
|
SetEnableDynamicCrosshair(bool),
|
||||||
|
SetEnableVsync(bool),
|
||||||
SetEnableDebugChunkBorder(bool),
|
SetEnableDebugChunkBorder(bool),
|
||||||
SetMouseSensitivity(f32),
|
SetMouseSensitivity(f32),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hUI doesn't have a checkbox element yet
|
||||||
|
// so we'll have to use sliders for now
|
||||||
|
fn checkbox(
|
||||||
|
ui: &mut ElementList,
|
||||||
|
text: &'static str,
|
||||||
|
value: bool,
|
||||||
|
signal: impl Fn(bool) -> SettingsSignal + 'static,
|
||||||
|
) {
|
||||||
|
const WIDTH_PX: f32 = 50.;
|
||||||
|
const HEIGHT_PX: f32 = WIDTH_PX / 2.;
|
||||||
|
const HANDLE_PX: f32 = HEIGHT_PX;
|
||||||
|
const TRACK_HEIGHT_RATIO: f32 = 0.75;
|
||||||
|
const TRACK_HEIGHT_PX: f32 = HEIGHT_PX * TRACK_HEIGHT_RATIO;
|
||||||
|
|
||||||
|
Container::default()
|
||||||
|
.with_direction(Direction::Horizontal)
|
||||||
|
.with_align(Alignment::Center)
|
||||||
|
.with_gap(5.)
|
||||||
|
.with_children(|ui| {
|
||||||
|
Text::new(text)
|
||||||
|
.add_child(ui);
|
||||||
|
Slider::new(value as u32 as f32)
|
||||||
|
.with_size(size!(WIDTH_PX, HEIGHT_PX))
|
||||||
|
.with_track_height(TRACK_HEIGHT_RATIO)
|
||||||
|
.with_track(rect_frame! {
|
||||||
|
color: (0.5, 0.5, 0.5),
|
||||||
|
corner_radius: TRACK_HEIGHT_PX * 0.5,
|
||||||
|
})
|
||||||
|
.with_track_active(rect_frame! {
|
||||||
|
color: (0., 0., 0.75),
|
||||||
|
corner_radius: TRACK_HEIGHT_PX * 0.5,
|
||||||
|
})
|
||||||
|
.with_handle_size((HANDLE_PX, 1.))
|
||||||
|
.with_handle(rect_frame! {
|
||||||
|
color: (0., 0., 1.),
|
||||||
|
corner_radius: HANDLE_PX * 0.5,
|
||||||
|
})
|
||||||
|
.on_change(move |f| signal(f >= 0.5))
|
||||||
|
.add_child(ui);
|
||||||
|
Text::new(if value { "On" } else { "Off" })
|
||||||
|
.add_child(ui);
|
||||||
|
})
|
||||||
|
.add_child(ui);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn render_settings_ui(
|
pub fn render_settings_ui(
|
||||||
mut ui: NonSendSync<UniqueViewMut<UiState>>,
|
mut ui: NonSendSync<UniqueViewMut<UiState>>,
|
||||||
ren: UniqueView<Renderer>,
|
mut ren: UniqueViewMut<Renderer>,
|
||||||
mut settings: UniqueViewMut<GameSettings>,
|
mut settings: UniqueViewMut<GameSettings>,
|
||||||
kbd: UniqueView<RawKbmInputState>,
|
kbd: UniqueView<RawKbmInputState>,
|
||||||
) {
|
) {
|
||||||
|
@ -64,28 +116,28 @@ pub fn render_settings_ui(
|
||||||
.add_child(ui);
|
.add_child(ui);
|
||||||
Break.add_child(ui);
|
Break.add_child(ui);
|
||||||
|
|
||||||
Text::new("Dynamic Crosshair")
|
checkbox(
|
||||||
.add_child(ui);
|
ui,
|
||||||
Slider::new(settings.dynamic_crosshair as u32 as f32)
|
"Vsync",
|
||||||
.with_size(size!(50, auto))
|
settings.vsync,
|
||||||
.with_track_height(1.)
|
SettingsSignal::SetEnableVsync
|
||||||
.with_handle_size((25., 1.))
|
);
|
||||||
.on_change(|f| SettingsSignal::SetEnableDynamicCrosshair(f >= 0.5))
|
|
||||||
.add_child(ui);
|
|
||||||
Text::new(if settings.dynamic_crosshair { "On" } else { "Off" })
|
|
||||||
.add_child(ui);
|
|
||||||
Break.add_child(ui);
|
Break.add_child(ui);
|
||||||
|
|
||||||
Text::new("Enable debug chunk border")
|
checkbox(
|
||||||
.add_child(ui);
|
ui,
|
||||||
Slider::new(settings.debug_draw_current_chunk_border as u32 as f32)
|
"Dynamic Crosshair",
|
||||||
.with_size(size!(50, (Slider::DEFAULT_HEIGHT)))
|
settings.dynamic_crosshair,
|
||||||
.with_track_height(1.)
|
SettingsSignal::SetEnableDynamicCrosshair
|
||||||
.with_handle_size((25., 1.))
|
);
|
||||||
.on_change(|f| SettingsSignal::SetEnableDebugChunkBorder(f >= 0.5))
|
Break.add_child(ui);
|
||||||
.add_child(ui);
|
|
||||||
Text::new(if settings.debug_draw_current_chunk_border { "On" } else { "Off" })
|
checkbox(
|
||||||
.add_child(ui);
|
ui,
|
||||||
|
"Debug Chunk Border",
|
||||||
|
settings.debug_draw_current_chunk_border,
|
||||||
|
SettingsSignal::SetEnableDebugChunkBorder
|
||||||
|
);
|
||||||
Break.add_child(ui);
|
Break.add_child(ui);
|
||||||
|
|
||||||
Text::new("Mouse Sensitivity")
|
Text::new("Mouse Sensitivity")
|
||||||
|
@ -104,6 +156,10 @@ pub fn render_settings_ui(
|
||||||
ui.hui.process_signals(|signal: SettingsSignal| match signal {
|
ui.hui.process_signals(|signal: SettingsSignal| match signal {
|
||||||
SettingsSignal::SetRenderDistance(value) => settings.render_distance = value,
|
SettingsSignal::SetRenderDistance(value) => settings.render_distance = value,
|
||||||
SettingsSignal::SetEnableDynamicCrosshair(value) => settings.dynamic_crosshair = value,
|
SettingsSignal::SetEnableDynamicCrosshair(value) => settings.dynamic_crosshair = value,
|
||||||
|
SettingsSignal::SetEnableVsync(value) => {
|
||||||
|
settings.vsync = value;
|
||||||
|
ren.reload_settings(&settings);
|
||||||
|
},
|
||||||
SettingsSignal::SetEnableDebugChunkBorder(value) => settings.debug_draw_current_chunk_border = value && cfg!(not(target_os = "android")),
|
SettingsSignal::SetEnableDebugChunkBorder(value) => settings.debug_draw_current_chunk_border = value && cfg!(not(target_os = "android")),
|
||||||
SettingsSignal::SetMouseSensitivity(value) => settings.mouse_sensitivity = value,
|
SettingsSignal::SetMouseSensitivity(value) => settings.mouse_sensitivity = value,
|
||||||
});
|
});
|
||||||
|
|
|
@ -62,10 +62,10 @@ impl ChunkStorage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Unique)]
|
// #[derive(Unique)]
|
||||||
pub struct WorldInfo {
|
// pub struct WorldInfo {
|
||||||
pub seed: u32,
|
// pub seed: u32,
|
||||||
}
|
// }
|
||||||
|
|
||||||
#[derive(Default, Unique)]
|
#[derive(Default, Unique)]
|
||||||
pub struct ChunkMeshStorage {
|
pub struct ChunkMeshStorage {
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::sync::Arc;
|
||||||
use glam::IVec3;
|
use glam::IVec3;
|
||||||
use atomic::Atomic;
|
use atomic::Atomic;
|
||||||
use kubi_shared::worldgen::AbortState;
|
use kubi_shared::worldgen::AbortState;
|
||||||
use crate::rendering::{world::ChunkVertex, BufferPair};
|
use crate::rendering::BufferPair;
|
||||||
|
|
||||||
pub use kubi_shared::chunk::{CHUNK_SIZE, BlockData};
|
pub use kubi_shared::chunk::{CHUNK_SIZE, BlockData};
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@ pub struct Chunk {
|
||||||
pub desired_state: DesiredChunkState,
|
pub desired_state: DesiredChunkState,
|
||||||
pub abortion: Option<Arc<Atomic<AbortState>>>,
|
pub abortion: Option<Arc<Atomic<AbortState>>>,
|
||||||
pub mesh_dirty: bool,
|
pub mesh_dirty: bool,
|
||||||
|
pub data_modified: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Chunk {
|
impl Chunk {
|
||||||
|
@ -69,6 +70,7 @@ impl Chunk {
|
||||||
desired_state: Default::default(),
|
desired_state: Default::default(),
|
||||||
abortion: None,
|
abortion: None,
|
||||||
mesh_dirty: false,
|
mesh_dirty: false,
|
||||||
|
data_modified: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,14 +1,18 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use atomic::{Atomic, Ordering};
|
use atomic::{Atomic, Ordering};
|
||||||
use glam::{IVec3, ivec3};
|
use glam::{IVec3, ivec3};
|
||||||
use kubi_shared::{networking::{channels::Channel, messages::ClientToServerMessage}, worldgen::AbortState};
|
use kubi_shared::{
|
||||||
|
data::io_thread::{IOCommand, IOResponse, IOThreadManager},
|
||||||
|
networking::{channels::Channel, messages::ClientToServerMessage},
|
||||||
|
worldgen::AbortState,
|
||||||
|
};
|
||||||
use shipyard::{View, UniqueView, UniqueViewMut, IntoIter, Workload, IntoWorkload, NonSendSync, track};
|
use shipyard::{View, UniqueView, UniqueViewMut, IntoIter, Workload, IntoWorkload, NonSendSync, track};
|
||||||
use uflow::SendMode;
|
use uflow::SendMode;
|
||||||
use wgpu::util::DeviceExt;
|
use wgpu::util::DeviceExt;
|
||||||
use crate::{
|
use crate::{
|
||||||
networking::UdpClient,
|
networking::UdpClient,
|
||||||
player::MainPlayer,
|
player::MainPlayer,
|
||||||
rendering::{world::ChunkVertex, BufferPair, Renderer},
|
rendering::{BufferPair, Renderer},
|
||||||
settings::GameSettings,
|
settings::GameSettings,
|
||||||
state::GameState,
|
state::GameState,
|
||||||
transform::Transform,
|
transform::Transform,
|
||||||
|
@ -16,10 +20,12 @@ use crate::{
|
||||||
use super::{
|
use super::{
|
||||||
ChunkStorage, ChunkMeshStorage,
|
ChunkStorage, ChunkMeshStorage,
|
||||||
chunk::{Chunk, DesiredChunkState, CHUNK_SIZE, ChunkMesh, CurrentChunkState, ChunkData},
|
chunk::{Chunk, DesiredChunkState, CHUNK_SIZE, ChunkMesh, CurrentChunkState, ChunkData},
|
||||||
tasks::{ChunkTaskManager, ChunkTaskResponse, ChunkTask},
|
tasks::{ChunkTaskManager, ChunkTaskResponse, ChunkTask},
|
||||||
queue::BlockUpdateQueue,
|
queue::BlockUpdateQueue,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const WORLD_SEED: u64 = 0xfeb_face_dead_cafe;
|
||||||
|
|
||||||
const MAX_CHUNK_OPS_INGAME: usize = 8;
|
const MAX_CHUNK_OPS_INGAME: usize = 8;
|
||||||
const MAX_CHUNK_OPS: usize = 32;
|
const MAX_CHUNK_OPS: usize = 32;
|
||||||
|
|
||||||
|
@ -92,6 +98,7 @@ pub fn update_chunks_if_player_moved(
|
||||||
|
|
||||||
fn process_state_changes(
|
fn process_state_changes(
|
||||||
task_manager: UniqueView<ChunkTaskManager>,
|
task_manager: UniqueView<ChunkTaskManager>,
|
||||||
|
io: Option<UniqueView<IOThreadManager>>,
|
||||||
mut udp_client: Option<UniqueViewMut<UdpClient>>,
|
mut udp_client: Option<UniqueViewMut<UdpClient>>,
|
||||||
mut world: UniqueViewMut<ChunkStorage>,
|
mut world: UniqueViewMut<ChunkStorage>,
|
||||||
mut vm_meshes: NonSendSync<UniqueViewMut<ChunkMeshStorage>>,
|
mut vm_meshes: NonSendSync<UniqueViewMut<ChunkMeshStorage>>,
|
||||||
|
@ -135,7 +142,7 @@ fn process_state_changes(
|
||||||
chunk.current_state,
|
chunk.current_state,
|
||||||
CurrentChunkState::Loaded | CurrentChunkState::CalculatingMesh,
|
CurrentChunkState::Loaded | CurrentChunkState::CalculatingMesh,
|
||||||
) => {
|
) => {
|
||||||
chunk.block_data = None;
|
// chunk.block_data = None; //HACK when downgrading, keep the data so we can save it
|
||||||
chunk.current_state = CurrentChunkState::Nothing;
|
chunk.current_state = CurrentChunkState::Nothing;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -184,18 +191,38 @@ fn process_state_changes(
|
||||||
SendMode::Reliable
|
SendMode::Reliable
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
let atomic = Arc::new(Atomic::new(AbortState::Continue));
|
|
||||||
task_manager.spawn_task(ChunkTask::LoadChunk {
|
// If the chunk exists in the save file (and save file is there in the first place),
|
||||||
seed: 0xbeef_face_dead_cafe,
|
// ... we'll try to load it
|
||||||
position,
|
// Otherwise, we'll run worldgen
|
||||||
abortion: Some(Arc::clone(&atomic)),
|
|
||||||
});
|
let mut should_run_worldgen = true;
|
||||||
abortion = Some(atomic);
|
|
||||||
|
if let Some(io) = &io {
|
||||||
|
if io.chunk_exists(position) {
|
||||||
|
// Try to load the chunk from the save file
|
||||||
|
// In case that fails, we will run worldgen once the IO thread responds
|
||||||
|
io.send(IOCommand::LoadChunk { position });
|
||||||
|
should_run_worldgen = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if should_run_worldgen {
|
||||||
|
let atomic = Arc::new(Atomic::new(AbortState::Continue));
|
||||||
|
task_manager.spawn_task(ChunkTask::ChunkWorldgen {
|
||||||
|
seed: WORLD_SEED,
|
||||||
|
position,
|
||||||
|
abortion: Some(Arc::clone(&atomic)),
|
||||||
|
});
|
||||||
|
abortion = Some(atomic);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Update chunk state
|
//Update chunk state
|
||||||
let chunk = world.chunks.get_mut(&position).unwrap();
|
let chunk = world.chunks.get_mut(&position).unwrap();
|
||||||
chunk.current_state = CurrentChunkState::Loading;
|
chunk.current_state = CurrentChunkState::Loading;
|
||||||
chunk.abortion = abortion;
|
chunk.abortion = abortion;
|
||||||
|
|
||||||
// ===========
|
// ===========
|
||||||
//log::trace!("Started loading chunk {position}");
|
//log::trace!("Started loading chunk {position}");
|
||||||
},
|
},
|
||||||
|
@ -254,7 +281,29 @@ fn process_state_changes(
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//HACK, since save files are not implemented, just unload immediately
|
// If in singleplayer and have an open save file, we need to save the chunk to the disk
|
||||||
|
|
||||||
|
// ==========================================================
|
||||||
|
//TODO IMPORTANT: WAIT FOR CHUNK TO FINISH SAVING FIRST BEFORE TRANSITIONING TO UNLOADED
|
||||||
|
// OTHERWISE WE WILL LOSE THE SAVE DATA IF THE USER COMES BACK TO THE CHUNK TOO QUICKLY
|
||||||
|
// ==========================================================
|
||||||
|
//XXX: CHECK IF WE REALLY NEED THIS OR IF WE CAN JUST KILL THE CHUNK RIGHT AWAY
|
||||||
|
//CHANGES TO CHUNK SAVING LOGIC SHOULD HAVE MADE THE ABOVE COMMENT OBSOLETE
|
||||||
|
|
||||||
|
if let Some(io) = &io {
|
||||||
|
if let Some(block_data) = &chunk.block_data {
|
||||||
|
// Only save the chunk if it has been modified
|
||||||
|
if chunk.data_modified {
|
||||||
|
// log::debug!("issue save command");
|
||||||
|
chunk.data_modified = false;
|
||||||
|
io.send(IOCommand::SaveChunk {
|
||||||
|
position,
|
||||||
|
data: block_data.blocks.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
|
@ -264,6 +313,7 @@ fn process_state_changes(
|
||||||
|
|
||||||
fn process_completed_tasks(
|
fn process_completed_tasks(
|
||||||
task_manager: UniqueView<ChunkTaskManager>,
|
task_manager: UniqueView<ChunkTaskManager>,
|
||||||
|
io: Option<UniqueView<IOThreadManager>>,
|
||||||
mut world: UniqueViewMut<ChunkStorage>,
|
mut world: UniqueViewMut<ChunkStorage>,
|
||||||
mut meshes: NonSendSync<UniqueViewMut<ChunkMeshStorage>>,
|
mut meshes: NonSendSync<UniqueViewMut<ChunkMeshStorage>>,
|
||||||
renderer: UniqueView<Renderer>,
|
renderer: UniqueView<Renderer>,
|
||||||
|
@ -271,9 +321,69 @@ fn process_completed_tasks(
|
||||||
mut queue: UniqueViewMut<BlockUpdateQueue>,
|
mut queue: UniqueViewMut<BlockUpdateQueue>,
|
||||||
) {
|
) {
|
||||||
let mut ops: usize = 0;
|
let mut ops: usize = 0;
|
||||||
while let Some(res) = task_manager.receive() {
|
|
||||||
|
//TODO reduce code duplication between loaded/generated chunks
|
||||||
|
|
||||||
|
// Process IO first
|
||||||
|
if let Some(io) = &io {
|
||||||
|
for response in io.poll() {
|
||||||
|
let IOResponse::ChunkLoaded { position, data } = response else {
|
||||||
|
//TODO this is bad
|
||||||
|
panic!("Unexpected IO response: {:?}", response);
|
||||||
|
};
|
||||||
|
|
||||||
|
//check if chunk exists
|
||||||
|
let Some(chunk) = world.chunks.get_mut(&position) else {
|
||||||
|
log::warn!("LOADED blocks data discarded: chunk doesn't exist");
|
||||||
|
continue
|
||||||
|
};
|
||||||
|
|
||||||
|
//we cannot have abortion here but just in case, reset it
|
||||||
|
chunk.abortion = None;
|
||||||
|
|
||||||
|
//check if chunk still wants it
|
||||||
|
if !matches!(chunk.desired_state, DesiredChunkState::Loaded | DesiredChunkState::Rendered) {
|
||||||
|
log::warn!("LOADED block data discarded: state undesirable: {:?}", chunk.desired_state);
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we actually got the data
|
||||||
|
if let Some(data) = data {
|
||||||
|
// If we did get the data, yay :3
|
||||||
|
chunk.block_data = Some(ChunkData {
|
||||||
|
blocks: data
|
||||||
|
});
|
||||||
|
chunk.current_state = CurrentChunkState::Loaded;
|
||||||
|
} else {
|
||||||
|
// If we didn't get the data, we need to run worldgen
|
||||||
|
// XXX: will this ever happen? we should always have the data in the save file
|
||||||
|
let atomic = Arc::new(Atomic::new(AbortState::Continue));
|
||||||
|
task_manager.spawn_task(ChunkTask::ChunkWorldgen {
|
||||||
|
seed: WORLD_SEED,
|
||||||
|
position,
|
||||||
|
abortion: Some(Arc::clone(&atomic)),
|
||||||
|
});
|
||||||
|
chunk.abortion = Some(atomic);
|
||||||
|
}
|
||||||
|
|
||||||
|
ops += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
//return early if we've reached the limit
|
||||||
|
if ops >= match *state {
|
||||||
|
GameState::InGame => MAX_CHUNK_OPS_INGAME,
|
||||||
|
_ => MAX_CHUNK_OPS,
|
||||||
|
} { return }
|
||||||
|
// XXX: this will completely skip polling the task manager if we've reached the limit
|
||||||
|
// this is probably fine, but it might be worth revisiting later
|
||||||
|
}
|
||||||
|
|
||||||
|
for res in task_manager.poll() {
|
||||||
match res {
|
match res {
|
||||||
ChunkTaskResponse::LoadedChunk { position, chunk_data, mut queued } => {
|
ChunkTaskResponse::ChunkWorldgenDone { position, chunk_data, mut queued } => {
|
||||||
|
//TODO this can fuck shit up really badly if io op gets overwritten by worldgen chunk
|
||||||
|
//TODO only accept if loading stage, not loaded
|
||||||
|
|
||||||
//If unwanted chunk is already loaded
|
//If unwanted chunk is already loaded
|
||||||
//It would be ~~...unethical~~ impossible to abort the operation at this point
|
//It would be ~~...unethical~~ impossible to abort the operation at this point
|
||||||
//Instead, we'll just throw it away
|
//Instead, we'll just throw it away
|
||||||
|
@ -308,7 +418,7 @@ fn process_completed_tasks(
|
||||||
//increase ops counter
|
//increase ops counter
|
||||||
ops += 1;
|
ops += 1;
|
||||||
},
|
},
|
||||||
ChunkTaskResponse::GeneratedMesh {
|
ChunkTaskResponse::GenerateMeshDone {
|
||||||
position,
|
position,
|
||||||
vertices, indices,
|
vertices, indices,
|
||||||
trans_vertices, trans_indices,
|
trans_vertices, trans_indices,
|
||||||
|
@ -392,3 +502,20 @@ fn process_completed_tasks(
|
||||||
} { break }
|
} { break }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Save all modified chunks to the disk
|
||||||
|
pub fn save_on_exit(
|
||||||
|
io: UniqueView<IOThreadManager>,
|
||||||
|
world: UniqueView<ChunkStorage>,
|
||||||
|
) {
|
||||||
|
for (&position, chunk) in &world.chunks {
|
||||||
|
if let Some(block_data) = &chunk.block_data {
|
||||||
|
if chunk.data_modified {
|
||||||
|
io.send(IOCommand::SaveChunk {
|
||||||
|
position,
|
||||||
|
data: block_data.blocks.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use glam::{ivec3, IVec3, Vec3};
|
use glam::{ivec3, IVec3};
|
||||||
use strum::IntoEnumIterator;
|
use strum::IntoEnumIterator;
|
||||||
use kubi_shared::block::{Block, RenderType, Transparency};
|
use kubi_shared::block::{Block, RenderType, Transparency};
|
||||||
use crate::world::chunk::CHUNK_SIZE;
|
use crate::world::chunk::CHUNK_SIZE;
|
||||||
|
|
|
@ -22,11 +22,15 @@ pub fn apply_queued_blocks(
|
||||||
if event.soft && *block != Block::Air {
|
if event.soft && *block != Block::Air {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if event.block_type == *block {
|
||||||
|
return false
|
||||||
|
}
|
||||||
*block = event.block_type;
|
*block = event.block_type;
|
||||||
//mark chunk as dirty
|
//mark chunk as dirty
|
||||||
let (chunk_pos, block_pos) = ChunkStorage::to_chunk_coords(event.position);
|
let (chunk_pos, block_pos) = ChunkStorage::to_chunk_coords(event.position);
|
||||||
let chunk = world.chunks.get_mut(&chunk_pos).expect("This error should never happen, if it does then something is super fucked up and the whole project needs to be burnt down.");
|
let chunk = world.chunks.get_mut(&chunk_pos).expect("This error should never happen, if it does then something is super fucked up and the whole project needs to be burnt down.");
|
||||||
chunk.mesh_dirty = true;
|
chunk.mesh_dirty = true;
|
||||||
|
chunk.data_modified = true;
|
||||||
//If block pos is close to the border, some neighbors may be dirty!
|
//If block pos is close to the border, some neighbors may be dirty!
|
||||||
const DIRECTIONS: [IVec3; 6] = [
|
const DIRECTIONS: [IVec3; 6] = [
|
||||||
ivec3(1, 0, 0),
|
ivec3(1, 0, 0),
|
||||||
|
|
|
@ -57,7 +57,7 @@ pub fn update_raycasts(
|
||||||
if !(world.is_inserted_or_modified() || (transform.inserted_or_modified(), &raycast).iter().next().is_some()) {
|
if !(world.is_inserted_or_modified() || (transform.inserted_or_modified(), &raycast).iter().next().is_some()) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for (transform, mut report) in (&transform, &mut raycast).iter() {
|
for (transform, report) in (&transform, &mut raycast).iter() {
|
||||||
let (_, rotation, position) = transform.0.to_scale_rotation_translation();
|
let (_, rotation, position) = transform.0.to_scale_rotation_translation();
|
||||||
let direction = (rotation.normalize() * Vec3::NEG_Z).normalize();
|
let direction = (rotation.normalize() * Vec3::NEG_Z).normalize();
|
||||||
*report = LookingAtBlock(world.raycast(position, direction, Some(30.)));
|
*report = LookingAtBlock(world.raycast(position, direction, Some(30.)));
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use atomic::Atomic;
|
use atomic::Atomic;
|
||||||
use flume::{Sender, Receiver};
|
use flume::{Receiver, Sender, TryIter};
|
||||||
use glam::IVec3;
|
use glam::IVec3;
|
||||||
use kubi_shared::{queue::QueuedBlock, worldgen::AbortState};
|
use kubi_shared::{queue::QueuedBlock, worldgen::AbortState};
|
||||||
use shipyard::Unique;
|
use shipyard::Unique;
|
||||||
|
@ -13,7 +13,7 @@ use super::{
|
||||||
use crate::rendering::world::ChunkVertex;
|
use crate::rendering::world::ChunkVertex;
|
||||||
|
|
||||||
pub enum ChunkTask {
|
pub enum ChunkTask {
|
||||||
LoadChunk {
|
ChunkWorldgen {
|
||||||
seed: u64,
|
seed: u64,
|
||||||
position: IVec3,
|
position: IVec3,
|
||||||
abortion: Option<Arc<Atomic<AbortState>>>,
|
abortion: Option<Arc<Atomic<AbortState>>>,
|
||||||
|
@ -23,13 +23,14 @@ pub enum ChunkTask {
|
||||||
data: MeshGenData
|
data: MeshGenData
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum ChunkTaskResponse {
|
pub enum ChunkTaskResponse {
|
||||||
LoadedChunk {
|
ChunkWorldgenDone {
|
||||||
position: IVec3,
|
position: IVec3,
|
||||||
chunk_data: BlockData,
|
chunk_data: BlockData,
|
||||||
queued: Vec<QueuedBlock>
|
queued: Vec<QueuedBlock>
|
||||||
},
|
},
|
||||||
GeneratedMesh {
|
GenerateMeshDone {
|
||||||
position: IVec3,
|
position: IVec3,
|
||||||
vertices: Vec<ChunkVertex>,
|
vertices: Vec<ChunkVertex>,
|
||||||
indices: Vec<u32>,
|
indices: Vec<u32>,
|
||||||
|
@ -43,6 +44,7 @@ pub struct ChunkTaskManager {
|
||||||
channel: (Sender<ChunkTaskResponse>, Receiver<ChunkTaskResponse>),
|
channel: (Sender<ChunkTaskResponse>, Receiver<ChunkTaskResponse>),
|
||||||
pool: ThreadPool,
|
pool: ThreadPool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChunkTaskManager {
|
impl ChunkTaskManager {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
@ -50,11 +52,17 @@ impl ChunkTaskManager {
|
||||||
pool: ThreadPoolBuilder::new().num_threads(4).build().unwrap()
|
pool: ThreadPoolBuilder::new().num_threads(4).build().unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO get rid of add_sussy_response
|
||||||
|
|
||||||
|
/// Add a response to the queue, to be picked up by the main thread
|
||||||
|
/// Used by the multiplayer netcode, a huge hack
|
||||||
pub fn add_sussy_response(&self, response: ChunkTaskResponse) {
|
pub fn add_sussy_response(&self, response: ChunkTaskResponse) {
|
||||||
// this WILL get stuck if the channel is bounded
|
// this WILL get stuck if the channel is bounded
|
||||||
// don't make the channel bounded ever
|
// don't make the channel bounded ever
|
||||||
self.channel.0.send(response).unwrap()
|
self.channel.0.send(response).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_task(&self, task: ChunkTask) {
|
pub fn spawn_task(&self, task: ChunkTask) {
|
||||||
let sender = self.channel.0.clone();
|
let sender = self.channel.0.clone();
|
||||||
self.pool.spawn(move || {
|
self.pool.spawn(move || {
|
||||||
|
@ -64,23 +72,29 @@ impl ChunkTaskManager {
|
||||||
(vertices, indices),
|
(vertices, indices),
|
||||||
(trans_vertices, trans_indices),
|
(trans_vertices, trans_indices),
|
||||||
) = generate_mesh(position, data);
|
) = generate_mesh(position, data);
|
||||||
ChunkTaskResponse::GeneratedMesh {
|
ChunkTaskResponse::GenerateMeshDone {
|
||||||
position,
|
position,
|
||||||
vertices, indices,
|
vertices, indices,
|
||||||
trans_vertices, trans_indices,
|
trans_vertices, trans_indices,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ChunkTask::LoadChunk { position, seed, abortion } => {
|
ChunkTask::ChunkWorldgen { position, seed, abortion } => {
|
||||||
let Some((chunk_data, queued)) = generate_world(position, seed, abortion) else {
|
let Some((chunk_data, queued)) = generate_world(position, seed, abortion) else {
|
||||||
log::warn!("aborted operation");
|
log::warn!("aborted operation");
|
||||||
return
|
return
|
||||||
};
|
};
|
||||||
ChunkTaskResponse::LoadedChunk { position, chunk_data, queued }
|
ChunkTaskResponse::ChunkWorldgenDone { position, chunk_data, queued }
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[deprecated(note = "use poll instead")]
|
||||||
pub fn receive(&self) -> Option<ChunkTaskResponse> {
|
pub fn receive(&self) -> Option<ChunkTaskResponse> {
|
||||||
self.channel.1.try_recv().ok()
|
self.channel.1.try_recv().ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn poll(&self) -> TryIter<ChunkTaskResponse> {
|
||||||
|
self.channel.1.try_iter()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue