Merge pull request #3 from pi-pi3/dev

Merge Dev
This commit is contained in:
Szymon Walter 2018-03-21 18:46:08 +01:00 committed by GitHub
commit ec479815aa
16 changed files with 2027 additions and 2 deletions

5
.gitignore vendored Normal file
View file

@ -0,0 +1,5 @@
/target/
**/*.rs.bk
Cargo.lock
.*.sw?

12
Cargo.toml Normal file
View file

@ -0,0 +1,12 @@
[package]
name = "ext2"
version = "0.1.0"
authors = ["Szymon Walter <walter.szymon.98@gmail.com>"]
[dependencies]
bitflags = "1.0"
rlibc = { version = "1.0", optional = true }
[features]
default = ["no_std"]
no_std = ["rlibc"]

22
LICENSE.md Normal file
View file

@ -0,0 +1,22 @@
# ext2-rs
## an ext2 implementation
Copyright © 2018, Szymon Walter
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from
the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not
be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
#### walter.szymon.98@gmail.com

3
README.md Normal file
View file

@ -0,0 +1,3 @@
# ext2-rs
An OS and architecture independent implementation of ext2 in pure Rust.

BIN
ext2.img Normal file

Binary file not shown.

2
rustfmt.toml Normal file
View file

@ -0,0 +1,2 @@
max_width = 80
wrap_comments = true

54
src/error.rs Normal file
View file

@ -0,0 +1,54 @@
#[cfg(any(test, not(feature = "no_std")))]
use std::io;
/// The set of all possible errors
#[derive(Debug)]
pub enum Error {
BadMagic(u16),
OutOfBounds(usize),
AddressOutOfBounds(u32, u32, usize),
BadBlockGroupCount(u32, u32),
#[cfg(any(test, not(feature = "no_std")))]
Io(io::Error),
}
impl From<Infallible> for Error {
fn from(_: Infallible) -> Error {
unreachable!()
}
}
#[cfg(any(test, not(feature = "no_std")))]
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl PartialEq for Error {
fn eq(&self, rhs: &Error) -> bool {
match (self, rhs) {
(&Error::BadMagic(a), &Error::BadMagic(b)) => a == b,
(&Error::OutOfBounds(a), &Error::OutOfBounds(b)) => a == b,
(
&Error::BadBlockGroupCount(a1, a2),
&Error::BadBlockGroupCount(b1, b2),
) => a1 == b1 && a2 == b2,
_ => false,
}
}
fn ne(&self, rhs: &Error) -> bool {
match (self, rhs) {
(&Error::BadMagic(a), &Error::BadMagic(b)) => a != b,
(&Error::OutOfBounds(a), &Error::OutOfBounds(b)) => a != b,
(
&Error::BadBlockGroupCount(a1, a2),
&Error::BadBlockGroupCount(b1, b2),
) => a1 != b1 || a2 != b2,
_ => false,
}
}
}
pub enum Infallible {}

501
src/fs.rs Normal file
View file

@ -0,0 +1,501 @@
use core::mem;
use core::fmt::{self, Debug};
use core::nonzero::NonZero;
use alloc::Vec;
use error::Error;
use sector::{Address, Size};
use volume::{Volume, VolumeSlice};
use sys::superblock::Superblock;
use sys::block_group::BlockGroupDescriptor;
use sys::inode::Inode as RawInode;
struct Struct<T, S: Size> {
pub inner: T,
pub offset: Address<S>,
}
impl<T, S: Size> From<(T, Address<S>)> for Struct<T, S> {
#[inline]
fn from((inner, offset): (T, Address<S>)) -> Struct<T, S> {
Struct { inner, offset }
}
}
/// Safe wrapper for raw sys structs
pub struct Ext2<S: Size, V: Volume<u8, Address<S>>> {
volume: V,
superblock: Struct<Superblock, S>,
block_groups: Struct<Vec<BlockGroupDescriptor>, S>,
}
impl<S: Size + Copy, V: Volume<u8, Address<S>>> Ext2<S, V>
where
Error: From<V::Error>,
{
pub fn new(volume: V) -> Result<Ext2<S, V>, Error> {
let superblock = unsafe { Struct::from(Superblock::find(&volume)?) };
let block_groups_offset = Address::with_block_size(
superblock.inner.first_data_block + 1,
0,
superblock.inner.log_block_size + 10,
);
let block_groups_count = superblock
.inner
.block_group_count()
.map(|count| count as usize)
.map_err(|(a, b)| Error::BadBlockGroupCount(a, b))?;
let block_groups = unsafe {
BlockGroupDescriptor::find_descriptor_table(
&volume,
block_groups_offset,
block_groups_count,
)?
};
let block_groups = Struct::from(block_groups);
Ok(Ext2 {
volume,
superblock,
block_groups,
})
}
#[allow(dead_code)]
fn update_global(&mut self) -> Result<(), Error> {
// superblock
{
let slice = VolumeSlice::from_cast(
&self.superblock.inner,
self.superblock.offset,
);
let commit = slice.commit();
self.volume.commit(commit).map_err(|err| Error::from(err))?;
}
// block group descriptors
let mut offset = self.block_groups.offset;
for descr in &self.block_groups.inner {
let slice = VolumeSlice::from_cast(descr, offset);
let commit = slice.commit();
self.volume.commit(commit).map_err(|err| Error::from(err))?;
offset =
offset + Address::from(mem::size_of::<BlockGroupDescriptor>());
}
Ok(())
}
pub fn read_inode<'a>(
&'a self,
buf: &mut [u8],
inode: &Inode<'a, S, V>,
) -> Result<usize, Error> {
let total_size = inode.size();
let mut read_size = 0;
let block_size = self.block_size();
let offset = 0;
for (data, _) in InodeBlocks::new(&inode) {
let data_size = block_size
.min(total_size - read_size)
.min(buf.len() - offset);
let end = offset + data_size;
buf[offset..end].copy_from_slice(&data[..data_size]);
read_size += data_size;
}
Ok(read_size)
}
pub fn write_inode<'a>(
&'a self,
_inode: &(Inode<'a, S, V>, Address<S>),
_buf: &[u8],
) -> Result<usize, Error> {
unimplemented!()
}
pub fn root_inode<'a>(&'a self) -> (Inode<'a, S, V>, Address<S>) {
self.inode_nth(2).unwrap()
}
pub fn inode_nth<'a>(
&'a self,
index: usize,
) -> Option<(Inode<'a, S, V>, Address<S>)> {
self.inodes_nth(index).next()
}
pub fn inodes<'a>(&'a self) -> Inodes<'a, S, V> {
self.inodes_nth(1)
}
pub fn inodes_nth<'a>(&'a self, index: usize) -> Inodes<'a, S, V> {
assert!(index > 0, "inodes are 1-indexed");
Inodes {
fs: self,
block_groups: &self.block_groups.inner,
log_block_size: self.log_block_size(),
inode_size: self.inode_size(),
inodes_per_group: self.inodes_count(),
inodes_count: self.total_inodes_count(),
index,
}
}
}
impl<S: Size + Copy, V: Volume<u8, Address<S>>> Ext2<S, V> {
fn superblock(&self) -> &Superblock {
&self.superblock.inner
}
#[allow(dead_code)]
fn superblock_mut(&mut self) -> &mut Superblock {
&mut self.superblock.inner
}
pub fn version(&self) -> (u32, u16) {
(self.superblock().rev_major, self.superblock().rev_minor)
}
pub fn inode_size<'a>(&'a self) -> usize {
if self.version().0 == 0 {
mem::size_of::<Inode<'a, S, V>>()
} else {
// note: inodes bigger than 128 are not supported
self.superblock().inode_size as usize
}
}
pub fn inodes_count(&self) -> usize {
self.superblock().inodes_per_group as _
}
pub fn total_inodes_count(&self) -> usize {
self.superblock().inodes_count as _
}
pub fn block_group_count(&self) -> Result<usize, Error> {
self.superblock()
.block_group_count()
.map(|count| count as usize)
.map_err(|(a, b)| Error::BadBlockGroupCount(a, b))
}
pub fn total_block_count(&self) -> usize {
self.superblock().blocks_count as _
}
pub fn free_block_count(&self) -> usize {
self.superblock().free_blocks_count as _
}
pub fn block_size(&self) -> usize {
self.superblock().block_size()
}
pub fn log_block_size(&self) -> u32 {
self.superblock().log_block_size + 10
}
pub fn sector_size(&self) -> usize {
S::SIZE
}
pub fn log_sector_size(&self) -> u32 {
S::LOG_SIZE
}
}
impl<S: Size, V: Volume<u8, Address<S>>> Debug for Ext2<S, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Ext2<{}>", S::SIZE)
}
}
pub struct Inodes<'a, S: 'a + Size, V: 'a + Volume<u8, Address<S>>> {
fs: &'a Ext2<S, V>,
block_groups: &'a [BlockGroupDescriptor],
log_block_size: u32,
inode_size: usize,
inodes_per_group: usize,
inodes_count: usize,
index: usize,
}
impl<'a, S: Size + Copy, V: 'a + Volume<u8, Address<S>>> Iterator
for Inodes<'a, S, V>
where
Error: From<V::Error>,
{
type Item = (Inode<'a, S, V>, Address<S>);
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.inodes_count {
let block_group = (self.index - 1) / self.inodes_per_group;
let index = (self.index - 1) % self.inodes_per_group;
self.index += 1;
let inodes_block = self.block_groups[block_group].inode_table_block;
let offset = Address::with_block_size(
inodes_block,
(index * self.inode_size) as i32,
self.log_block_size,
);
let raw = unsafe {
RawInode::find_inode(&self.fs.volume, offset, self.inode_size)
.ok()
};
raw.map(|(raw, offset)| (Inode::new(self.fs, raw), offset))
} else {
None
}
}
}
#[derive(Debug, Clone)]
pub struct Inode<'a, S: 'a + Size, V: 'a + Volume<u8, Address<S>>> {
fs: &'a Ext2<S, V>,
inner: RawInode,
}
impl<'a, S: 'a + Size + Copy, V: 'a + Volume<u8, Address<S>>> Inode<'a, S, V> {
pub fn new(fs: &'a Ext2<S, V>, inner: RawInode) -> Inode<'a, S, V> {
Inode { fs, inner }
}
pub fn block(&self, index: usize) -> Option<NonZero<u32>> {
// number of blocks in direct table: 12
// number of blocks in indirect table: block_size/4
// why?
// - a block is n bytes long
// - a block address occupies 32 bits, or 4 bytes
// - thus, n/4
// number of blocks in doubly table: (block_size/4)^2
// why?
// - every entry in the doubly table points to another block
// - that's n/4 blocks, where n is the block size
// - every block contains n/4 block pointers
// - that's n/4 blocks with n/4 pointers each = (n/4)^2
// number of blocks in triply table: (block_size/4)^3
let bs4 = self.fs.block_size() / 4;
if index < 12 {
NonZero::new(self.inner.direct_pointer[index])
} else if index < bs4 + 12 {
let block = self.inner.indirect_pointer;
let offset = index - 12;
let addr = Address::with_block_size(
block,
offset as i32,
self.fs.log_block_size(),
);
let size = Address::from(4_u64);
let slice = self.fs.volume.slice(addr..addr + size);
slice.and_then(|slice| unsafe {
NonZero::new(u32::from_le(slice.dynamic_cast::<u32>().0))
})
} else if index < bs4 * bs4 + bs4 + 12 {
unimplemented!("doubly indirect pointer table");
} else if index < bs4 * bs4 * bs4 + bs4 * bs4 + bs4 + 12 {
unimplemented!("triply indirect pointer table");
} else {
None
}
}
pub fn in_use(&self) -> bool {
self.inner.hard_links > 0
}
pub fn uid(&self) -> u16 {
self.inner.uid
}
pub fn sectors(&self) -> usize {
self.inner.sectors_count as usize
}
pub fn size32(&self) -> u32 {
self.inner.size_low
}
pub fn size64(&self) -> u64 {
self.inner.size_low as u64 | (self.inner.size_high as u64) << 32
}
#[cfg(target_pointer_width = "64")]
#[inline]
pub fn size(&self) -> usize {
self.size64() as usize
}
#[cfg(target_pointer_width = "32")]
#[inline]
pub fn size(&self) -> usize {
self.size32() as usize
}
}
pub struct InodeBlocks<'a: 'b, 'b, S: 'a + Size, V: 'a + Volume<u8, Address<S>>>
{
inode: &'b Inode<'a, S, V>,
index: usize,
}
impl<'a, 'b, S: Size + Copy, V: 'a + Volume<u8, Address<S>>>
InodeBlocks<'a, 'b, S, V>
where
Error: From<V::Error>,
{
pub fn new(inode: &'b Inode<'a, S, V>) -> InodeBlocks<'a, 'b, S, V> {
InodeBlocks { inode, index: 0 }
}
}
impl<'a, 'b, S: Size + Copy, V: 'a + Volume<u8, Address<S>>> Iterator
for InodeBlocks<'a, 'b, S, V>
where
Error: From<V::Error>,
{
type Item = (VolumeSlice<'a, u8, Address<S>>, Address<S>);
fn next(&mut self) -> Option<Self::Item> {
let block = self.inode.block(self.index);
block
.map(|block| {
let block = block.get();
self.index += 1;
Address::with_block_size(
block,
0,
self.inode.fs.log_block_size(),
)
..Address::with_block_size(
block + 1,
0,
self.inode.fs.log_block_size(),
)
})
.and_then(|block| {
let offset = block.start;
self.inode
.fs
.volume
.slice(block)
.map(|slice| (slice, offset))
})
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::cell::RefCell;
use sector::{Address, Size512};
use volume::Volume;
use super::{Ext2, InodeBlocks};
#[test]
fn file_len() {
let file = RefCell::new(File::open("ext2.img").unwrap());
assert_eq!(
Address::<Size512>::from(2048_u64)
- Address::<Size512>::from(1024_u64),
Address::<Size512>::new(2, 0)
);
assert_eq!(
unsafe {
file.slice_unchecked(
Address::<Size512>::from(1024_u64)
..Address::<Size512>::from(2048_u64),
).len()
},
1024
);
}
#[test]
fn file() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Ext2::<Size512, _>::new(file);
assert!(
fs.is_ok(),
"Err({:?})",
fs.err().unwrap_or_else(|| unreachable!()),
);
let fs = fs.unwrap();
let vers = fs.version();
println!("version: {}.{}", vers.0, vers.1);
assert_eq!(128, fs.inode_size());
}
#[test]
fn inodes() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Ext2::<Size512, _>::new(file);
assert!(
fs.is_ok(),
"Err({:?})",
fs.err().unwrap_or_else(|| unreachable!()),
);
let fs = fs.unwrap();
let inodes = fs.inodes().filter(|inode| inode.0.in_use());
for inode in inodes {
println!("{:?}", inode);
}
}
#[test]
fn inode_blocks() {
use std::str;
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Ext2::<Size512, _>::new(file).unwrap();
let inodes = fs.inodes().filter(|inode| {
inode.0.in_use() && inode.0.uid() == 1000 && inode.0.size() < 1024
});
for inode in inodes {
println!("{:?}", inode.0);
let size = inode.0.size();
for block in InodeBlocks::new(&inode.0) {
let (data, _) = block;
assert_eq!(data.len(), fs.block_size());
println!("{:?}", &data[..size]);
let _ = str::from_utf8(&data[..size])
.map(|string| println!("{}", string));
}
}
}
#[test]
fn read_inode() {
let file = RefCell::new(File::open("ext2.img").unwrap());
let fs = Ext2::<Size512, _>::new(file).unwrap();
let inodes = fs.inodes().filter(|inode| {
inode.0.in_use() && inode.0.uid() == 1000 && inode.0.size() < 1024
});
for (inode, _) in inodes {
let mut buf = Vec::with_capacity(inode.size());
unsafe {
buf.set_len(inode.size());
}
let size = fs.read_inode(&mut buf[..], &inode);
assert_eq!(size, Ok(inode.size()));
unsafe {
buf.set_len(size.unwrap());
}
}
}
}

View file

@ -1,4 +1,36 @@
#![cfg_attr(not(test), no_std)]
#![feature(alloc)]
#![feature(specialization)]
#![feature(swap_with_slice)]
#![feature(macro_lifetime_matcher)]
#![feature(const_fn)]
#![feature(step_trait)]
#![feature(nonzero)]
#![cfg_attr(all(not(test), feature = "no_std"), no_std)]
#[macro_use]
extern crate alloc;
#[macro_use]
extern crate bitflags;
#[cfg(any(test, not(feature = "no_std")))]
extern crate core;
pub mod error;
pub mod sys;
pub mod sector;
pub mod volume;
pub mod fs;
#[cfg(test)]
mod tests {}
mod tests {
use sys::superblock::*;
use sys::block_group::*;
use sys::inode::*;
#[test]
fn sizes() {
use std::mem::size_of;
assert_eq!(size_of::<Superblock>(), 1024);
assert_eq!(size_of::<BlockGroupDescriptor>(), 32);
assert_eq!(size_of::<Inode>(), 128);
}
}

233
src/sector.rs Normal file
View file

@ -0,0 +1,233 @@
use core::mem;
use core::marker::PhantomData;
use core::ops::{Add, Sub};
use core::fmt::{self, Debug, Display, LowerHex};
use core::iter::Step;
pub trait Size: PartialOrd {
// log_sector_size = log_2(sector_size)
const LOG_SIZE: u32;
const SIZE: usize = 1 << Self::LOG_SIZE;
const OFFSET_MASK: u32 = (Self::SIZE - 1) as u32;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Size512;
impl Size for Size512 {
const LOG_SIZE: u32 = 9;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Size1024;
impl Size for Size1024 {
const LOG_SIZE: u32 = 10;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Size2048;
impl Size for Size2048 {
const LOG_SIZE: u32 = 11;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Size4096;
impl Size for Size4096 {
const LOG_SIZE: u32 = 12;
}
/// Address in a physical sector
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Address<S: Size> {
sector: u32,
offset: u32,
_phantom: PhantomData<S>,
}
impl<S: Size> Address<S> {
pub unsafe fn new_unchecked(sector: u32, offset: u32) -> Address<S> {
assert!((offset as usize) < S::SIZE, "offset out of sector bounds");
let _phantom = PhantomData;
Address {
sector,
offset,
_phantom,
}
}
pub fn new(sector: u32, offset: i32) -> Address<S> {
let sector = (sector as i32 + (offset >> S::LOG_SIZE)) as u32;
let offset = offset.abs() as u32 & S::OFFSET_MASK;
unsafe { Address::new_unchecked(sector, offset) }
}
pub fn with_block_size(
block: u32,
offset: i32,
log_block_size: u32,
) -> Address<S> {
let block = (block as i32 + (offset >> log_block_size)) as u32;
let offset = offset.abs() as u32 & ((1 << log_block_size) - 1);
let log_diff = log_block_size as i32 - S::LOG_SIZE as i32;
let top_offset = offset >> S::LOG_SIZE;
let offset = offset & ((1 << S::LOG_SIZE) - 1);
let sector = block << log_diff | top_offset;
unsafe { Address::new_unchecked(sector, offset) }
}
pub fn into_index(&self) -> u64 {
((self.sector as u64) << S::LOG_SIZE) + self.offset as u64
}
pub const fn sector_size(&self) -> usize {
S::SIZE
}
pub const fn log_sector_size(&self) -> u32 {
S::LOG_SIZE
}
pub fn sector(&self) -> u32 {
self.sector
}
pub fn offset(&self) -> u32 {
self.offset
}
}
impl<S: Size + Clone + PartialOrd> Step for Address<S> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if end.sector >= start.sector {
Some(end.sector as usize - start.sector as usize)
} else {
None
}
}
fn replace_one(&mut self) -> Self {
mem::replace(self, Address::new(1, 0))
}
fn replace_zero(&mut self) -> Self {
mem::replace(self, Address::new(0, 0))
}
fn add_one(&self) -> Self {
Address::new(self.sector + 1, 0)
}
fn sub_one(&self) -> Self {
Address::new(self.sector - 1, 0)
}
fn add_usize(&self, n: usize) -> Option<Self> {
self.sector
.checked_add(n as u32)
.map(|sector| Address::new(sector, 0))
}
}
impl<S: Size> Debug for Address<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = format!("Address<{}>", S::SIZE);
f.debug_struct(&name)
.field("sector", &self.sector)
.field("offset", &self.offset)
.finish()
}
}
impl<S: Size> Display for Address<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.sector, self.offset)
}
}
impl<S: Size> LowerHex for Address<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:x}:{:x}", self.sector, self.offset)
}
}
impl<S: Size> From<u64> for Address<S> {
fn from(idx: u64) -> Address<S> {
let sector = idx >> S::LOG_SIZE;
let offset = idx & S::OFFSET_MASK as u64;
Address::new(sector as u32, offset as i32)
}
}
impl<S: Size> From<usize> for Address<S> {
fn from(idx: usize) -> Address<S> {
let sector = idx >> S::LOG_SIZE;
let offset = idx & S::OFFSET_MASK as usize;
Address::new(sector as u32, offset as i32)
}
}
impl<S: Size> Add for Address<S> {
type Output = Address<S>;
fn add(self, rhs: Address<S>) -> Address<S> {
Address::new(
self.sector + rhs.sector,
(self.offset + rhs.offset) as i32,
)
}
}
impl<S: Size> Sub for Address<S> {
type Output = Address<S>;
fn sub(self, rhs: Address<S>) -> Address<S> {
Address::new(
self.sector - rhs.sector,
self.offset as i32 - rhs.offset as i32,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn conv() {
assert_eq!(Address::<Size512>::new(0, 1024).into_index(), 1024);
assert_eq!(Address::<Size512>::from(1024_u64).into_index(), 1024);
assert_eq!(
Address::<Size512>::with_block_size(1, 256, 10).into_index(),
1024 + 256
);
assert_eq!(
Address::<Size512>::with_block_size(2, 0, 10).into_index(),
2048
);
assert_eq!(
Address::<Size512>::with_block_size(0, 1792, 10).into_index(),
1792
);
}
#[test]
fn arithmetic() {
assert_eq!(
Address::<Size512>::new(0, 512),
Address::<Size512>::new(1, 0),
);
assert_eq!(
Address::<Size512>::new(2, -256),
Address::<Size512>::new(1, 256),
);
let a = Address::<Size2048>::new(0, 1024);
let b = Address::<Size2048>::new(0, 1024);
assert_eq!(a + b, Address::<Size2048>::new(1, 0));
assert_eq!((a + b).into_index(), 2048);
let a = Address::<Size512>::new(0, 2048);
let b = Address::<Size512>::new(0, 256);
assert_eq!(a - b, Address::<Size512>::new(3, 256));
assert_eq!((a - b).into_index(), 1792);
}
}

139
src/sys/block_group.rs Normal file
View file

@ -0,0 +1,139 @@
use core::mem;
use core::fmt::{self, Debug};
use alloc::Vec;
use error::Error;
use sector::{Address, Size};
use volume::Volume;
/// The Block Group Descriptor Table contains a descriptor for each block group
/// within the file system. The number of block groups within the file system,
/// and correspondingly, the number of entries in the Block Group Descriptor
/// Table, is described above. Each descriptor contains information regarding
/// where important data structures for that group are located.
///
/// The (`BlockGroupDescriptor`) table is located in the block immediately
/// following the Superblock. So if the block size (determined from a field in
/// the superblock) is 1024 bytes per block, the Block Group Descriptor Table
/// will begin at block 2. For any other block size, it will begin at block 1.
/// Remember that blocks are numbered starting at 0, and that block numbers
/// don't usually correspond to physical block addresses.
#[repr(C, packed)]
#[derive(Clone, Copy)]
pub struct BlockGroupDescriptor {
/// Block address of block usage bitmap
pub block_usage_addr: u32,
/// Block address of inode usage bitmap
pub inode_usage_addr: u32,
/// Starting block address of inode table
pub inode_table_block: u32,
/// Number of unallocated blocks in group
pub free_blocks_count: u16,
/// Number of unallocated inodes in group
pub free_inodes_count: u16,
/// Number of directories in group
pub dirs_count: u16,
#[doc(hidden)]
_reserved: [u8; 14],
}
impl Debug for BlockGroupDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BlockGroupDescriptor")
.field("block_usage_addr", unsafe { &self.block_usage_addr })
.field("inode_usage_addr", unsafe { &self.inode_usage_addr })
.field("inode_table_block", unsafe { &self.inode_table_block })
.field("free_blocks_count", unsafe { &self.free_blocks_count })
.field("free_inodes_count", unsafe { &self.free_inodes_count })
.field("dirs_count", unsafe { &self.dirs_count })
.finish()
}
}
impl BlockGroupDescriptor {
pub unsafe fn find_descriptor<
S: Size + Copy + PartialOrd,
V: Volume<u8, Address<S>>,
>(
haystack: &V,
offset: Address<S>,
) -> Result<(BlockGroupDescriptor, Address<S>), Error>
where
Error: From<V::Error>,
{
let end =
offset + Address::from(mem::size_of::<BlockGroupDescriptor>());
if haystack.size() < end {
return Err(Error::AddressOutOfBounds(
end.sector(),
end.offset(),
end.sector_size(),
));
}
let descr = haystack
.slice_unchecked(offset..end)
.dynamic_cast::<BlockGroupDescriptor>();
Ok(descr)
}
pub unsafe fn find_descriptor_table<
S: Size + Copy + PartialOrd,
V: Volume<u8, Address<S>>,
>(
haystack: &V,
offset: Address<S>,
count: usize,
) -> Result<(Vec<BlockGroupDescriptor>, Address<S>), Error>
where
Error: From<V::Error>,
{
let end = offset
+ Address::from(count * mem::size_of::<BlockGroupDescriptor>());
if haystack.size() < end {
return Err(Error::AddressOutOfBounds(
end.sector(),
end.offset(),
end.sector_size(),
));
}
let mut vec = Vec::with_capacity(count);
for i in 0..count {
let offset = offset
+ Address::from(i * mem::size_of::<BlockGroupDescriptor>());
vec.push({
BlockGroupDescriptor::find_descriptor(haystack, offset)?.0
});
}
Ok((vec, offset))
}
}
#[cfg(test)]
mod tests {
use sector::{Address, Size512};
use super::*;
#[test]
fn find() {
let volume = vec![0_u8; 4096];
let table = unsafe {
BlockGroupDescriptor::find_descriptor_table(
&volume,
Address::<Size512>::new(4, 0),
8,
)
};
assert!(
table.is_ok(),
"Err({:?})",
table.err().unwrap_or_else(|| unreachable!()),
);
let table = table.unwrap_or_else(|_| unreachable!());
assert_eq!(table.0.len(), 8);
}
}

200
src/sys/inode.rs Normal file
View file

@ -0,0 +1,200 @@
use core::mem;
use core::fmt::{self, Debug};
use error::Error;
use sector::{Address, Size};
use volume::Volume;
/// An inode is a structure on the disk that represents a file, directory,
/// symbolic link, etc. Inodes do not contain the data of the file / directory /
/// etc. that they represent. Instead, they link to the blocks that actually
/// contain the data. This lets the inodes themselves have a well-defined size
/// which lets them be placed in easily indexed arrays. Each block group has an
/// array of inodes it is responsible for, and conversely every inode within a
/// file system belongs to one of such tables (and one of such block groups).
#[repr(C, packed)]
#[derive(Clone, Copy)]
pub struct Inode {
/// Type and Permissions (see below)
pub type_perm: u16,
/// User ID
pub uid: u16,
/// Lower 32 bits of size in bytes
pub size_low: u32,
/// Last Access Time (in POSIX time)
pub atime: u32,
/// Creation Time (in POSIX time)
pub ctime: u32,
/// Last Modification time (in POSIX time)
pub mtime: u32,
/// Deletion time (in POSIX time)
pub dtime: u32,
/// Group ID
pub gid: u16,
/// Count of hard links (directory entries) to this inode. When this
/// reaches 0, the data blocks are marked as unallocated.
pub hard_links: u16,
/// Count of disk sectors (not Ext2 blocks) in use by this inode, not
/// counting the actual inode structure nor directory entries linking
/// to the inode.
pub sectors_count: u32,
/// Flags
pub flags: u32,
/// Operating System Specific value #1
pub _os_specific_1: [u8; 4],
/// Direct block pointers
pub direct_pointer: [u32; 12],
/// Singly Indirect Block Pointer (Points to a block that is a list of
/// block pointers to data)
pub indirect_pointer: u32,
/// Doubly Indirect Block Pointer (Points to a block that is a list of
/// block pointers to Singly Indirect Blocks)
pub doubly_indirect: u32,
/// Triply Indirect Block Pointer (Points to a block that is a list of
/// block pointers to Doubly Indirect Blocks)
pub triply_indirect: u32,
/// Generation number (Primarily used for NFS)
pub gen_number: u32,
/// In Ext2 version 0, this field is reserved. In version >= 1,
/// Extended attribute block (File ACL).
pub ext_attribute_block: u32,
/// In Ext2 version 0, this field is reserved. In version >= 1, Upper
/// 32 bits of file size (if feature bit set) if it's a file,
/// Directory ACL if it's a directory
pub size_high: u32,
/// Block address of fragment
pub frag_block_addr: u32,
/// Operating System Specific Value #2
pub _os_specific_2: [u8; 12],
}
impl Debug for Inode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Inode")
.field("type_perm", unsafe { &self.type_perm })
.field("uid", unsafe { &self.uid })
.field("size_low", unsafe { &self.size_low })
.field("atime", unsafe { &self.atime })
.field("ctime", unsafe { &self.ctime })
.field("mtime", unsafe { &self.mtime })
.field("dtime", unsafe { &self.dtime })
.field("gid", unsafe { &self.gid })
.field("hard_links", unsafe { &self.hard_links })
.field("sectors_count", unsafe { &self.sectors_count })
.field("flags", unsafe { &self.flags })
.field("os_specific_1", &self._os_specific_1)
.field("direct_pointer", unsafe { &self.direct_pointer })
.field("indirect_pointer", unsafe { &self.indirect_pointer })
.field("doubly_indirect", unsafe { &self.doubly_indirect })
.field("triply_indirect", unsafe { &self.triply_indirect })
.field("gen_number", unsafe { &self.gen_number })
.field("ext_attribute_block", unsafe { &self.ext_attribute_block })
.field("size_high", unsafe { &self.size_high })
.field("frag_block_addr", unsafe { &self.frag_block_addr })
.field("os_specific_2", &self._os_specific_2)
.finish()
}
}
impl Inode {
pub unsafe fn find_inode<
S: Size + Copy + PartialOrd,
V: Volume<u8, Address<S>>,
>(
haystack: &V,
offset: Address<S>,
size: usize,
) -> Result<(Inode, Address<S>), Error>
where
Error: From<V::Error>,
{
if size != mem::size_of::<Inode>() {
unimplemented!("inodes with a size != 128");
}
let end = offset + Address::from(size);
if haystack.size() < end {
return Err(Error::AddressOutOfBounds(
end.sector(),
end.offset(),
end.sector_size(),
));
}
let inode = haystack
.slice_unchecked(offset..end)
.dynamic_cast::<Inode>();
Ok(inode)
}
}
bitflags! {
pub struct TypePerm: u16 {
/// FIFO
const FIFO = 0x1000;
/// Character device
const CHAR_DEVICE = 0x2000;
/// Directory
const DIRECTORY = 0x4000;
/// Block device
const BLOCK_DEVICE = 0x6000;
/// Regular file
const FILE = 0x8000;
/// Symbolic link
const SYMLINK = 0xA000;
/// Unix socket
const SOCKET = 0xC000;
/// Other—execute permission
const O_EXEC = 0x001;
/// Other—write permission
const O_WRITE = 0x002;
/// Other—read permission
const O_READ = 0x004;
/// Group—execute permission
const G_EXEC = 0x008;
/// Group—write permission
const G_WRITE = 0x010;
/// Group—read permission
const G_READ = 0x020;
/// User—execute permission
const U_EXEC = 0x040;
/// User—write permission
const U_WRITE = 0x080;
/// User—read permission
const U_READ = 0x100;
/// Sticky Bit
const STICKY = 0x200;
/// Set group ID
const SET_GID = 0x400;
/// Set user ID
const SET_UID = 0x800;
}
}
bitflags! {
pub struct InodeFlags: u32 {
/// Secure deletion (not used)
const SECURE_DEL = 0x00000001;
/// Keep a copy of data when deleted (not used)
const KEEP_COPY = 0x00000002;
/// File compression (not used)
const COMPRESSION = 0x00000004;
/// Synchronous updates—new data is written immediately to disk
const SYNC_UPDATE = 0x00000008;
/// Immutable file (content cannot be changed)
const IMMUTABLE = 0x00000010;
/// Append only
const APPEND_ONLY = 0x00000020;
/// File is not included in 'dump' command
const NODUMP = 0x00000040;
/// Last accessed time should not updated
const DONT_ATIME = 0x00000080;
/// Hash indexed directory
const HASH_DIR = 0x00010000;
/// AFS directory
const AFS_DIR = 0x00020000;
/// Journal file data
const JOURNAL_DATA = 0x00040000;
}
}

3
src/sys/mod.rs Normal file
View file

@ -0,0 +1,3 @@
pub mod superblock;
pub mod block_group;
pub mod inode;

329
src/sys/superblock.rs Normal file
View file

@ -0,0 +1,329 @@
use core::mem;
use core::fmt::{self, Debug};
use error::Error;
use sector::{Address, Size};
use volume::Volume;
/// Ext2 signature (0xef53), used to help confirm the presence of Ext2 on a
/// volume
pub const EXT2_MAGIC: u16 = 0xef53;
/// Filesystem is free of errors
pub const FS_CLEAN: u16 = 1;
/// Filesystem has errors
pub const FS_ERR: u16 = 2;
/// Ignore errors
pub const ERR_IGNORE: u16 = 1;
/// Remount as read-only on error
pub const ERR_RONLY: u16 = 2;
/// Panic on error
pub const ERR_PANIC: u16 = 3;
/// Creator OS is Linux
pub const OS_LINUX: u32 = 0;
/// Creator OS is Hurd
pub const OS_HURD: u32 = 1;
/// Creator OS is Masix
pub const OS_MASIX: u32 = 2;
/// Creator OS is FreeBSD
pub const OS_FREEBSD: u32 = 3;
/// Creator OS is a BSD4.4-Lite derivative
pub const OS_LITE: u32 = 4;
/// The Superblock contains all information about the layout of the file system
/// and possibly contains other important information like what optional
/// features were used to create the file system.
///
/// The Superblock is always located at byte 1024 from the beginning of the
/// volume and is exactly 1024 bytes in length. For example, if the disk uses
/// 512 byte sectors, the Superblock will begin at LBA 2 and will occupy all of
/// sector 2 and 3.
#[repr(C, packed)]
#[derive(Clone, Copy)]
pub struct Superblock {
// taken from https://wiki.osdev.org/Ext2
/// Total number of inodes in file system
pub inodes_count: u32,
/// Total number of blocks in file system
pub blocks_count: u32,
/// Number of blocks reserved for superuser (see offset 80)
pub r_blocks_count: u32,
/// Total number of unallocated blocks
pub free_blocks_count: u32,
/// Total number of unallocated inodes
pub free_inodes_count: u32,
/// Block number of the block containing the superblock
pub first_data_block: u32,
/// log2 (block size) - 10. (In other words, the number to shift 1,024
/// to the left by to obtain the block size)
pub log_block_size: u32,
/// log2 (fragment size) - 10. (In other words, the number to shift
/// 1,024 to the left by to obtain the fragment size)
pub log_frag_size: i32,
/// Number of blocks in each block group
pub blocks_per_group: u32,
/// Number of fragments in each block group
pub frags_per_group: u32,
/// Number of inodes in each block group
pub inodes_per_group: u32,
/// Last mount time (in POSIX time)
pub mtime: u32,
/// Last written time (in POSIX time)
pub wtime: u32,
/// Number of times the volume has been mounted since its last
/// consistency check (fsck)
pub mnt_count: u16,
/// Number of mounts allowed before a consistency check (fsck) must be
/// done
pub max_mnt_count: i16,
/// Ext2 signature (0xef53), used to help confirm the presence of Ext2
/// on a volume
pub magic: u16,
/// File system state (see `FS_CLEAN` and `FS_ERR`)
pub state: u16,
/// What to do when an error is detected (see `ERR_IGNORE`, `ERR_RONLY` and
/// `ERR_PANIC`)
pub errors: u16,
/// Minor portion of version (combine with Major portion below to
/// construct full version field)
pub rev_minor: u16,
/// POSIX time of last consistency check (fsck)
pub lastcheck: u32,
/// Interval (in POSIX time) between forced consistency checks (fsck)
pub checkinterval: u32,
/// Operating system ID from which the filesystem on this volume was
/// created
pub creator_os: u32,
/// Major portion of version (combine with Minor portion above to
/// construct full version field)
pub rev_major: u32,
/// User ID that can use reserved blocks
pub block_uid: u16,
/// Group ID that can use reserved blocks
pub block_gid: u16,
/// First non-reserved inode in file system.
pub first_inode: u32,
/// Size of each inode structure in bytes.
pub inode_size: u16,
/// Block group that this superblock is part of (if backup copy)
pub block_group: u16,
/// Optional features present (features that are not required to read
/// or write, but usually result in a performance increase)
pub features_opt: FeaturesOptional,
/// Required features present (features that are required to be
/// supported to read or write)
pub features_req: FeaturesRequired,
/// Features that if not supported, the volume must be mounted
/// read-only)
pub features_ronly: FeaturesROnly,
/// File system ID (what is output by blkid)
pub fs_id: [u8; 16],
/// Volume name (C-style string: characters terminated by a 0 byte)
pub volume_name: [u8; 16],
/// Path volume was last mounted to (C-style string: characters
/// terminated by a 0 byte)
pub last_mnt_path: [u8; 64],
/// Compression algorithms used (see Required features above)
pub compression: u32,
/// Number of blocks to preallocate for files
pub prealloc_blocks_files: u8,
/// Number of blocks to preallocate for directories
pub prealloc_blocks_dirs: u8,
#[doc(hidden)]
_unused: [u8; 2],
/// Journal ID (same style as the File system ID above)
pub journal_id: [u8; 16],
/// Journal inode
pub journal_inode: u32,
/// Journal device
pub journal_dev: u32,
/// Head of orphan inode list
pub journal_orphan_head: u32,
#[doc(hidden)]
_reserved: [u8; 788],
}
impl Debug for Superblock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Superblock")
.field("inodes_count", unsafe { &self.inodes_count })
.field("blocks_count", unsafe { &self.blocks_count })
.field("r_blocks_count", unsafe { &self.r_blocks_count })
.field("free_blocks_count", unsafe { &self.free_blocks_count })
.field("free_inodes_count", unsafe { &self.free_inodes_count })
.field("first_data_block", unsafe { &self.first_data_block })
.field("log_block_size", unsafe { &self.log_block_size })
.field("log_frag_size", unsafe { &self.log_frag_size })
.field("blocks_per_group", unsafe { &self.blocks_per_group })
.field("frags_per_group", unsafe { &self.frags_per_group })
.field("inodes_per_group", unsafe { &self.inodes_per_group })
.field("mtime", unsafe { &self.mtime })
.field("wtime", unsafe { &self.wtime })
.field("mnt_count", unsafe { &self.mnt_count })
.field("max_mnt_count", unsafe { &self.max_mnt_count })
.field("magic", unsafe { &self.magic })
.field("state", unsafe { &self.state })
.field("errors", unsafe { &self.errors })
.field("rev_minor", unsafe { &self.rev_minor })
.field("lastcheck", unsafe { &self.lastcheck })
.field("checkinterval", unsafe { &self.checkinterval })
.field("creator_os", unsafe { &self.creator_os })
.field("rev_major", unsafe { &self.rev_major })
.field("block_uid", unsafe { &self.block_uid })
.field("block_gid", unsafe { &self.block_gid })
.field("first_inode", unsafe { &self.first_inode })
.field("inode_size", unsafe { &self.inode_size })
.field("block_group", unsafe { &self.block_group })
.field("features_opt", unsafe { &self.features_opt })
.field("features_req", unsafe { &self.features_req })
.field("features_ronly", unsafe { &self.features_ronly })
.field("fs_id", &self.fs_id)
.field("volume_name", &self.volume_name)
.field("last_mnt_path", &self.last_mnt_path.as_ref())
.field("compression", unsafe { &self.compression })
.field("prealloc_blocks_files", &self.prealloc_blocks_files)
.field("prealloc_blocks_dirs", &self.prealloc_blocks_dirs)
.field("journal_id", &self.journal_id)
.field("journal_inode", unsafe { &self.journal_inode })
.field("journal_dev", unsafe { &self.journal_dev })
.field("journal_orphan_head", unsafe { &self.journal_orphan_head })
.finish()
}
}
impl Superblock {
pub unsafe fn find<S: Size + Copy + PartialOrd, V: Volume<u8, Address<S>>>(
haystack: &V,
) -> Result<(Superblock, Address<S>), Error>
where
Error: From<V::Error>,
{
let offset = Address::from(1024_usize);
let end = offset + Address::from(mem::size_of::<Superblock>());
if haystack.size() < end {
return Err(Error::AddressOutOfBounds(
end.sector(),
end.offset(),
end.sector_size(),
));
}
let superblock = {
haystack
.slice_unchecked(offset..end)
.dynamic_cast::<Superblock>()
};
if superblock.0.magic != EXT2_MAGIC {
Err(Error::BadMagic(superblock.0.magic))
} else {
Ok(superblock)
}
}
#[inline]
pub fn block_size(&self) -> usize {
1024 << self.log_block_size
}
#[inline]
pub fn frag_size(&self) -> usize {
1024 << self.log_frag_size
}
pub fn block_group_count(&self) -> Result<u32, (u32, u32)> {
let blocks_mod = self.blocks_count % self.blocks_per_group;
let inodes_mod = self.inodes_count % self.inodes_per_group;
let blocks_inc = if blocks_mod == 0 { 0 } else { 1 };
let inodes_inc = if inodes_mod == 0 { 0 } else { 1 };
let by_blocks = self.blocks_count / self.blocks_per_group + blocks_inc;
let by_inodes = self.inodes_count / self.inodes_per_group + inodes_inc;
if by_blocks == by_inodes {
Ok(by_blocks)
} else {
Err((by_blocks, by_inodes))
}
}
}
bitflags! {
/// Optional features
pub struct FeaturesOptional: u32 {
/// Preallocate some number of (contiguous?) blocks (see
/// `Superblock::prealloc_blocks_dirs`) to a directory when creating a new one
const PREALLOCATE = 0x0001;
/// AFS server inodes exist
const AFS = 0x0002;
/// File system has a journal (Ext3)
const JOURNAL = 0x0004;
/// Inodes have extended attributes
const EXTENDED_INODE = 0x0008;
/// File system can resize itself for larger partitions
const SELF_RESIZE = 0x0010;
/// Directories use hash index
const HASH_INDEX = 0x0020;
}
}
bitflags! {
/// Required features. If these are not supported; can't mount
pub struct FeaturesRequired: u32 {
/// Compression is used
const REQ_COMPRESSION = 0x0001;
/// Directory entries contain a type field
const REQ_DIRECTORY_TYPE = 0x0002;
/// File system needs to replay its journal
const REQ_REPLAY_JOURNAL = 0x0004;
/// File system uses a journal device
const REQ_JOURNAL_DEVICE = 0x0008;
}
}
bitflags! {
/// ROnly features. If these are not supported; remount as read-only
pub struct FeaturesROnly: u32 {
/// Sparse superblocks and group descriptor tables
const RONLY_SPARSE = 0x0001;
/// File system uses a 64-bit file size
const RONLY_FILE_SIZE_64 = 0x0002;
/// Directory contents are stored in the form of a Binary Tree
const RONLY_BTREE_DIRECTORY = 0x0004;
}
}
#[cfg(test)]
mod tests {
use sector::Size512;
use super::*;
#[test]
fn find() {
let mut volume = vec![0_u8; 4096];
// magic
volume[1024 + 56] = EXT2_MAGIC as u8;
volume[1024 + 57] = (EXT2_MAGIC >> 8) as u8;
let superblock = unsafe { Superblock::find::<Size512, _>(&volume) };
assert!(
superblock.is_ok(),
"Err({:?})",
superblock.err().unwrap_or_else(|| unreachable!()),
);
}
#[test]
fn superblock() {
use std::cell::RefCell;
use std::fs::File;
let file = RefCell::new(File::open("ext2.img").unwrap());
let superblock = unsafe { Superblock::find::<Size512, _>(&file) };
assert!(
superblock.is_ok(),
"Err({:?})",
superblock.err().unwrap_or_else(|| unreachable!()),
);
}
}

97
src/volume/length.rs Normal file
View file

@ -0,0 +1,97 @@
use core::fmt::{self, Debug, Display};
use core::cmp::Ordering;
#[derive(Clone, Copy, Debug, Hash)]
pub enum Length<Idx> {
Unbounded,
Bounded(Idx),
}
impl<Idx: Copy> Length<Idx> {
pub fn try_len(&self) -> Option<Idx> {
match *self {
Length::Unbounded => None,
Length::Bounded(n) => Some(n),
}
}
pub unsafe fn len(&self) -> Idx {
match *self {
Length::Unbounded => panic!(
"attempt to convert `Length::Unbounded` to `Length::Idx`"
),
Length::Bounded(n) => n,
}
}
}
impl<Idx> Length<Idx> {
pub fn is_bounded(&self) -> bool {
match *self {
Length::Unbounded => false,
Length::Bounded(_) => true,
}
}
}
impl<Idx: Debug> Display for Length<Idx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(self, f)
}
}
impl<Idx: PartialEq> PartialEq for Length<Idx> {
fn eq(&self, rhs: &Self) -> bool {
match (self, rhs) {
(&Length::Unbounded, _) => false,
(_, &Length::Unbounded) => false,
(&Length::Bounded(ref a), &Length::Bounded(ref b)) => a.eq(b),
}
}
fn ne(&self, rhs: &Self) -> bool {
match (self, rhs) {
(&Length::Unbounded, _) => false,
(_, &Length::Unbounded) => false,
(&Length::Bounded(ref a), &Length::Bounded(ref b)) => a.ne(b),
}
}
}
impl<Idx: PartialEq> PartialEq<Idx> for Length<Idx> {
fn eq(&self, rhs: &Idx) -> bool {
match *self {
Length::Unbounded => false,
Length::Bounded(ref n) => n.eq(rhs),
}
}
fn ne(&self, rhs: &Idx) -> bool {
match *self {
Length::Unbounded => false,
Length::Bounded(ref n) => n.eq(rhs),
}
}
}
impl<Idx: PartialOrd> PartialOrd for Length<Idx> {
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
match (self, rhs) {
(&Length::Unbounded, &Length::Unbounded) => None,
(&Length::Unbounded, _) => Some(Ordering::Greater),
(_, &Length::Unbounded) => Some(Ordering::Less),
(&Length::Bounded(ref a), &Length::Bounded(ref b)) => {
a.partial_cmp(b)
}
}
}
}
impl<Idx: PartialOrd> PartialOrd<Idx> for Length<Idx> {
fn partial_cmp(&self, rhs: &Idx) -> Option<Ordering> {
match *self {
Length::Unbounded => Some(Ordering::Greater),
Length::Bounded(ref n) => n.partial_cmp(rhs),
}
}
}

393
src/volume/mod.rs Normal file
View file

@ -0,0 +1,393 @@
use core::mem;
use core::slice;
use core::ops::{Deref, DerefMut, Range};
use alloc::Vec;
use alloc::boxed::Box;
use alloc::borrow::{Cow, ToOwned};
use error::Infallible;
use sector::{Address, Size};
pub mod length;
use self::length::Length;
pub trait Volume<T, Idx>
where
[T]: ToOwned,
Idx: PartialEq + PartialOrd,
{
type Error;
fn size(&self) -> Length<Idx>;
fn commit(
&mut self,
slice: Option<VolumeCommit<T, Idx>>,
) -> Result<(), Self::Error>;
unsafe fn slice_unchecked<'a>(
&'a self,
range: Range<Idx>,
) -> VolumeSlice<'a, T, Idx>;
fn slice<'a>(
&'a self,
range: Range<Idx>,
) -> Option<VolumeSlice<'a, T, Idx>> {
if self.size() >= range.end && self.size() > range.start {
unsafe { Some(self.slice_unchecked(range)) }
} else {
None
}
}
}
pub struct VolumeSlice<'a, T: 'a, Idx>
where
[T]: ToOwned,
{
inner: Cow<'a, [T]>,
index: Idx,
}
impl<T, Idx: Default> VolumeSlice<'static, T, Idx>
where
[T]: ToOwned,
{
pub fn with_static(inner: &'static [T]) -> VolumeSlice<'static, T, Idx> {
VolumeSlice {
inner: Cow::Borrowed(inner),
index: Idx::default(),
}
}
}
impl<T, Idx> VolumeSlice<'static, T, Idx>
where
[T]: ToOwned,
{
pub fn new_owned(
inner: <[T] as ToOwned>::Owned,
index: Idx,
) -> VolumeSlice<'static, T, Idx> {
VolumeSlice {
inner: Cow::Owned(inner),
index,
}
}
}
impl<'a, T, Idx> VolumeSlice<'a, T, Idx>
where
[T]: ToOwned,
{
pub fn new(inner: &'a [T], index: Idx) -> VolumeSlice<'a, T, Idx> {
VolumeSlice {
inner: Cow::Borrowed(inner),
index,
}
}
pub fn is_mutated(&self) -> bool {
match self.inner {
Cow::Borrowed(_) => false,
Cow::Owned(_) => true,
}
}
pub fn at_index(&self) -> &Idx {
&self.index
}
}
impl<'a, Idx: Copy> VolumeSlice<'a, u8, Idx> {
pub unsafe fn dynamic_cast<T: Copy>(&self) -> (T, Idx) {
assert!(self.inner.len() >= mem::size_of::<T>());
let index = self.index;
let cast = mem::transmute_copy(self.inner.as_ptr().as_ref().unwrap());
(cast, index)
}
pub fn from_cast<T: Copy>(
cast: &'a T,
index: Idx,
) -> VolumeSlice<'a, u8, Idx> {
let len = mem::size_of::<T>();
let ptr = cast as *const T as *const u8;
let slice = unsafe { slice::from_raw_parts(ptr, len) };
VolumeSlice::new(slice, index)
}
}
impl<'a, T, Idx> VolumeSlice<'a, T, Idx>
where
[T]: ToOwned<Owned = Vec<T>>,
{
pub fn commit(self) -> Option<VolumeCommit<T, Idx>> {
if self.is_mutated() {
Some(VolumeCommit::new(self.inner.into_owned(), self.index))
} else {
None
}
}
}
impl<'a, T, Idx> AsRef<[T]> for VolumeSlice<'a, T, Idx>
where
[T]: ToOwned,
{
fn as_ref(&self) -> &[T] {
self.inner.as_ref()
}
}
impl<'a, T, Idx> AsMut<[T]> for VolumeSlice<'a, T, Idx>
where
[T]: ToOwned,
<[T] as ToOwned>::Owned: AsMut<[T]>,
{
fn as_mut(&mut self) -> &mut [T] {
self.inner.to_mut().as_mut()
}
}
impl<'a, T, Idx> Deref for VolumeSlice<'a, T, Idx>
where
[T]: ToOwned,
{
type Target = [T];
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl<'a, T, Idx> DerefMut for VolumeSlice<'a, T, Idx>
where
[T]: ToOwned,
<[T] as ToOwned>::Owned: AsMut<[T]>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut()
}
}
pub struct VolumeCommit<T, Idx> {
inner: Vec<T>,
index: Idx,
}
impl<T, Idx: Default> VolumeCommit<T, Idx> {
pub fn with_vec(inner: Vec<T>) -> VolumeCommit<T, Idx> {
VolumeCommit {
inner,
index: Idx::default(),
}
}
}
impl<T, Idx> VolumeCommit<T, Idx> {
pub fn new(inner: Vec<T>, index: Idx) -> VolumeCommit<T, Idx> {
VolumeCommit { inner, index }
}
pub fn into_inner(self) -> Vec<T> {
self.inner
}
pub fn at_index(&self) -> &Idx {
&self.index
}
}
impl<T, Idx> AsRef<[T]> for VolumeCommit<T, Idx> {
fn as_ref(&self) -> &[T] {
self.inner.as_ref()
}
}
impl<T, Idx> AsMut<[T]> for VolumeCommit<T, Idx> {
fn as_mut(&mut self) -> &mut [T] {
self.inner.as_mut()
}
}
impl<T, Idx> Deref for VolumeCommit<T, Idx> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl<T, Idx> DerefMut for VolumeCommit<T, Idx> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut()
}
}
macro_rules! impl_slice {
(@inner $volume:ty $( , $lt:lifetime )* ) => {
impl<$( $lt, )* S: Size + PartialOrd + Copy, T> Volume<T, Address<S>>
for $volume
where
T: Clone,
[T]: ToOwned,
{
type Error = Infallible;
fn size(&self) -> Length<Address<S>> {
Length::Bounded(
Address::from(<Self as AsRef<[T]>>::as_ref(self).len())
)
}
fn commit(
&mut self,
slice: Option<VolumeCommit<T, Address<S>>>,
) -> Result<(), Infallible> {
slice.map(|slice| {
let index = slice.at_index().into_index() as usize;
let end = index + slice.as_ref().len();
// XXX: it would be much better to drop the contents of dst
// and move the contents of slice instead of cloning
let dst =
&mut <Self as AsMut<[T]>>::as_mut(self)[index..end];
dst.clone_from_slice(slice.as_ref());
});
Ok(())
}
unsafe fn slice_unchecked<'a>(
&'a self,
range: Range<Address<S>>,
) -> VolumeSlice<'a, T, Address<S>> {
let index = range.start;
let range = range.start.into_index() as usize
..range.end.into_index() as usize;
VolumeSlice::new(
<Self as AsRef<[T]>>::as_ref(self).get_unchecked(range),
index,
)
}
}
};
($volume:ty) => {
impl_slice!(@inner $volume);
};
($volume:ty $( , $lt:lifetime )* ) => {
impl_slice!(@inner $volume $( , $lt )* );
};
}
impl_slice!(&'b mut [T], 'b);
impl_slice!(Vec<T>);
impl_slice!(Box<[T]>);
#[cfg(any(test, not(feature = "no_std")))]
mod file {
use std::ops::Range;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::fs::File;
use std::cell::RefCell;
use sector::{Address, Size};
use super::{Volume, VolumeCommit, VolumeSlice};
use super::length::Length;
impl<S: Size + PartialOrd + Copy> Volume<u8, Address<S>> for RefCell<File> {
type Error = io::Error;
fn size(&self) -> Length<Address<S>> {
Length::Bounded(
self.borrow()
.metadata()
.map(|data| Address::from(data.len()))
.unwrap_or(Address::new(0, 0)),
)
}
fn commit(
&mut self,
slice: Option<VolumeCommit<u8, Address<S>>>,
) -> Result<(), Self::Error> {
slice
.map(|slice| {
let index = *slice.at_index();
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.write(slice.as_ref()))
.map(|_| ())
})
.unwrap_or(Ok(()))
}
unsafe fn slice_unchecked<'a>(
&'a self,
range: Range<Address<S>>,
) -> VolumeSlice<'a, u8, Address<S>> {
let index = range.start;
let len = range.end - range.start;
let mut vec = Vec::with_capacity(len.into_index() as usize);
vec.set_len(len.into_index() as usize);
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.read_exact(&mut vec[..]))
.unwrap_or_else(|err| {
panic!("could't read from File Volume: {:?}", err)
});
VolumeSlice::new_owned(vec, index)
}
fn slice<'a>(
&'a self,
range: Range<Address<S>>,
) -> Option<VolumeSlice<'a, u8, Address<S>>> {
let index = range.start;
let mut vec = Vec::with_capacity((range.end - range.start)
.into_index()
as usize);
unsafe {
vec.set_len((range.end - range.start).into_index() as usize);
}
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.read_exact(&mut vec[..]))
.map(move |_| VolumeSlice::new_owned(vec, index))
.ok()
}
}
}
#[cfg(test)]
mod tests {
use sector::{Address, Size512};
use super::*;
#[test]
fn volume() {
let mut volume = vec![0; 1024];
let commit = {
let mut slice = volume
.slice(
Address::<Size512>::from(256_u64)
..Address::<Size512>::from(512_u64),
)
.unwrap();
slice.iter_mut().for_each(|x| *x = 1);
slice.commit()
};
assert!(volume.commit(commit).is_ok());
for (i, &x) in volume.iter().enumerate() {
if i < 256 || i >= 512 {
assert_eq!(x, 0);
} else {
assert_eq!(x, 1);
}
}
}
}