kubi/kubi-shared/src/data.rs

170 lines
4.6 KiB
Rust
Raw Normal View History

2023-11-19 11:27:20 -06:00
use std::{
fs::File,
mem::size_of,
io::{Read, Seek, SeekFrom, Write},
2023-11-20 13:59:34 -06:00
borrow::Cow,
sync::{Arc, RwLock}
2023-11-19 11:27:20 -06:00
};
2023-11-20 13:59:34 -06:00
use num_enum::TryFromPrimitive;
2023-11-19 11:27:20 -06:00
use serde::{Serialize, Deserialize};
2023-11-20 13:59:34 -06:00
use glam::IVec3;
2023-11-19 11:27:20 -06:00
use hashbrown::HashMap;
use anyhow::Result;
2023-11-20 13:59:34 -06:00
use shipyard::Unique;
use static_assertions::const_assert_eq;
use crate::{
block::Block,
chunk::{CHUNK_SIZE, BlockDataRef, BlockData}
};
2023-11-19 11:27:20 -06:00
const SECTOR_SIZE: usize = CHUNK_SIZE * CHUNK_SIZE * CHUNK_SIZE * size_of::<Block>();
2023-11-19 12:19:37 -06:00
const RESERVED_SIZE: usize = 1048576; //~1mb (16 sectors assuming 32x32x32 world of 1byte blocks)
2023-11-19 11:27:20 -06:00
const RESERVED_SECTOR_COUNT: usize = RESERVED_SIZE / SECTOR_SIZE;
2023-11-19 12:19:37 -06:00
//magic = "KUBI" + IDENTITY (4 bytes)
const HEADER_MAGIC_SIZE: usize = 8;
const HEADER_MAGIC_STR: [u8; 4] = *b"KUBI";
const HEADER_MAGIC_IDENTITY: u32 = 1;
2023-11-19 11:27:20 -06:00
// #[repr(transparent)]
2023-11-20 13:59:34 -06:00
// struct IVec3Hash(IVec3);
2023-11-19 11:27:20 -06:00
#[derive(Serialize, Deserialize)]
2023-11-20 13:59:34 -06:00
pub struct WorldSaveDataHeader {
2023-11-19 11:27:20 -06:00
pub name: Cow<'static, str>,
pub seed: u64,
sector_count: u32,
2023-11-20 13:59:34 -06:00
chunk_map: HashMap<IVec3, u32>,
2023-11-19 11:27:20 -06:00
}
impl Default for WorldSaveDataHeader {
fn default() -> Self {
Self {
name: "World".into(),
seed: 0,
sector_count: RESERVED_SECTOR_COUNT as u32,
chunk_map: HashMap::new()
}
}
}
2023-11-20 13:59:34 -06:00
#[derive(Unique)]
pub struct WorldSaveFile {
2023-11-19 11:27:20 -06:00
pub file: File,
pub header: WorldSaveDataHeader,
}
2023-11-20 13:59:34 -06:00
pub type SharedSaveFile = Arc<RwLock<WorldSaveFile>>;
2023-11-19 11:27:20 -06:00
impl WorldSaveFile {
pub fn new(file: File) -> Self {
WorldSaveFile {
file,
header: WorldSaveDataHeader::default()
}
}
fn read_header(&mut self) -> Result<()> {
self.file.rewind()?;
2023-11-19 12:19:37 -06:00
let mut subheader = [0u8; HEADER_MAGIC_SIZE];
self.file.read_exact(&mut subheader)?;
if subheader[0..4] != HEADER_MAGIC_STR {
return Err(anyhow::anyhow!("invalid file header"));
}
if subheader[4..8] != HEADER_MAGIC_IDENTITY.to_be_bytes() {
return Err(anyhow::anyhow!("this save file cannot be loaded by this version of the game"));
}
let limit = (RESERVED_SIZE - HEADER_MAGIC_SIZE) as u64;
self.header = bincode::deserialize_from((&self.file).take(limit))?;
2023-11-19 11:27:20 -06:00
Ok(())
}
fn write_header(&mut self) -> Result<()> {
self.file.rewind()?;
2023-11-19 12:19:37 -06:00
self.file.write_all(&HEADER_MAGIC_STR)?;
self.file.write_all(&HEADER_MAGIC_IDENTITY.to_be_bytes())?;
//XXX: this can cause the header to destroy chunk data (if it's WAY too long)
// read has checks against this, but write doesn't
// 1mb is pretty generous tho, so it's not a *big* deal
2023-11-19 11:27:20 -06:00
bincode::serialize_into(&self.file, &self.header)?;
Ok(())
}
2023-11-20 13:59:34 -06:00
pub fn initialize(&mut self) -> Result<()> {
self.write_header()?;
Ok(())
}
pub fn load_data(&mut self) -> Result<()> {
self.read_header()?;
Ok(())
}
2023-11-19 11:27:20 -06:00
fn allocate_sector(&mut self) -> u32 {
let value = self.header.sector_count + 1;
self.header.sector_count += 1;
value
}
2023-11-20 13:59:34 -06:00
pub fn save_chunk(&mut self, position: IVec3, data: &BlockDataRef) -> Result<()> {
2023-11-19 11:27:20 -06:00
let mut header_modified = false;
let sector = self.header.chunk_map.get(&position).copied().unwrap_or_else(|| {
header_modified = true;
self.allocate_sector()
});
let offset = sector as u64 * SECTOR_SIZE as u64;
2023-11-20 13:59:34 -06:00
const_assert_eq!(size_of::<Block>(), 1);
2023-11-19 11:27:20 -06:00
let data: &[u8; SECTOR_SIZE] = unsafe { std::mem::transmute(data) };
self.file.seek(SeekFrom::Start(offset))?;
self.file.write_all(data)?;
if header_modified {
self.write_header()?;
}
self.file.sync_data()?;
Ok(())
}
2023-11-20 13:59:34 -06:00
///TODO partial chunk commit (No need to write whole 32kb for a single block change!)
pub fn chunk_set_block() {
todo!()
}
pub fn chunk_exists(&self, position: IVec3) -> bool {
self.header.chunk_map.contains_key(&position)
}
pub fn load_chunk(&mut self, position: IVec3) -> Result<Option<BlockData>> {
let Some(&sector) = self.header.chunk_map.get(&position) else {
return Ok(None);
};
let mut buffer = Box::new([0u8; CHUNK_SIZE * CHUNK_SIZE * CHUNK_SIZE * size_of::<Block>()]);
let offset = sector as u64 * SECTOR_SIZE as u64;
self.file.seek(SeekFrom::Start(offset))?;
self.file.read_exact(&mut buffer[..])?;
//should be safe under these conditions:
//Block is a single byte
//All block data bytes are in valid range
const_assert_eq!(size_of::<Block>(), 1);
for &byte in &buffer[..] {
let block = Block::try_from_primitive(byte);
match block {
//Sanity check, not actually required: (should NEVER happen)
Ok(block) => debug_assert_eq!(byte, block as u8),
Err(_) => anyhow::bail!("invalid block data"),
}
}
let data: BlockData = unsafe { std::mem::transmute(buffer) };
Ok(Some(data))
}
2023-11-19 11:27:20 -06:00
}