Change sector::Address implementation to use u32

This way a `u64` is guaranteed to be able to index specific bytes in a
Volume.
This commit is contained in:
Szymon Walter 2018-03-21 18:17:36 +01:00
parent e6218401ce
commit d5423d7199
4 changed files with 59 additions and 69 deletions

View file

@ -6,7 +6,7 @@ use std::io;
pub enum Error {
BadMagic(u16),
OutOfBounds(usize),
AddressOutOfBounds(usize, usize, usize),
AddressOutOfBounds(u32, u32, usize),
BadBlockGroupCount(u32, u32),
#[cfg(any(test, not(feature = "no_std")))]
Io(io::Error),

View file

@ -37,7 +37,7 @@ where
pub fn new(volume: V) -> Result<Ext2<S, V>, Error> {
let superblock = unsafe { Struct::from(Superblock::find(&volume)?) };
let block_groups_offset = Address::with_block_size(
superblock.inner.first_data_block as usize + 1,
superblock.inner.first_data_block + 1,
0,
superblock.inner.log_block_size + 10,
);
@ -237,12 +237,11 @@ where
let index = (self.index - 1) % self.inodes_per_group;
self.index += 1;
let inodes_block =
self.block_groups[block_group].inode_table_block as usize;
let inodes_block = self.block_groups[block_group].inode_table_block;
let offset = Address::with_block_size(
inodes_block,
(index * self.inode_size) as isize,
(index * self.inode_size) as i32,
self.log_block_size,
);
let raw = unsafe {
@ -267,7 +266,7 @@ impl<'a, S: 'a + Size + Copy, V: 'a + Volume<u8, Address<S>>> Inode<'a, S, V> {
Inode { fs, inner }
}
pub fn block(&self, index: usize) -> Option<NonZero<usize>> {
pub fn block(&self, index: usize) -> Option<NonZero<u32>> {
// number of blocks in direct table: 12
// number of blocks in indirect table: block_size/4
// why?
@ -284,19 +283,19 @@ impl<'a, S: 'a + Size + Copy, V: 'a + Volume<u8, Address<S>>> Inode<'a, S, V> {
let bs4 = self.fs.block_size() / 4;
if index < 12 {
NonZero::new(self.inner.direct_pointer[index] as usize)
NonZero::new(self.inner.direct_pointer[index])
} else if index < bs4 + 12 {
let block = self.inner.indirect_pointer as usize;
let block = self.inner.indirect_pointer;
let offset = index - 12;
let addr = Address::with_block_size(
block,
offset as isize,
offset as i32,
self.fs.log_block_size(),
);
let size = Address::from(4_usize);
let size = Address::from(4_u64);
let slice = self.fs.volume.slice(addr..addr + size);
slice.and_then(|slice| unsafe {
NonZero::new(u32::from_le(slice.dynamic_cast::<u32>().0) as usize)
NonZero::new(u32::from_le(slice.dynamic_cast::<u32>().0))
})
} else if index < bs4 * bs4 + bs4 + 12 {
unimplemented!("doubly indirect pointer table");
@ -405,15 +404,15 @@ mod tests {
fn file_len() {
let file = RefCell::new(File::open("ext2.img").unwrap());
assert_eq!(
Address::<Size512>::from(2048_usize)
- Address::<Size512>::from(1024_usize),
Address::<Size512>::from(2048_u64)
- Address::<Size512>::from(1024_u64),
Address::<Size512>::new(2, 0)
);
assert_eq!(
unsafe {
file.slice_unchecked(
Address::<Size512>::from(1024_usize)
..Address::<Size512>::from(2048_usize),
Address::<Size512>::from(1024_u64)
..Address::<Size512>::from(2048_u64),
).len()
},
1024

View file

@ -8,7 +8,7 @@ pub trait Size: PartialOrd {
// log_sector_size = log_2(sector_size)
const LOG_SIZE: u32;
const SIZE: usize = 1 << Self::LOG_SIZE;
const OFFSET_MASK: usize = Self::SIZE - 1;
const OFFSET_MASK: u32 = (Self::SIZE - 1) as u32;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
@ -38,14 +38,14 @@ impl Size for Size4096 {
/// Address in a physical sector
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Address<S: Size> {
sector: usize,
offset: usize,
sector: u32,
offset: u32,
_phantom: PhantomData<S>,
}
impl<S: Size> Address<S> {
pub unsafe fn new_unchecked(sector: usize, offset: usize) -> Address<S> {
assert!(offset < S::SIZE, "offset out of sector bounds");
pub unsafe fn new_unchecked(sector: u32, offset: u32) -> Address<S> {
assert!((offset as usize) < S::SIZE, "offset out of sector bounds");
let _phantom = PhantomData;
Address {
sector,
@ -54,37 +54,31 @@ impl<S: Size> Address<S> {
}
}
pub fn new(sector: usize, offset: isize) -> Address<S> {
let sector = (sector as isize + (offset >> S::LOG_SIZE)) as usize;
let offset = offset.abs() as usize & S::OFFSET_MASK;
pub fn new(sector: u32, offset: i32) -> Address<S> {
let sector = (sector as i32 + (offset >> S::LOG_SIZE)) as u32;
let offset = offset.abs() as u32 & S::OFFSET_MASK;
unsafe { Address::new_unchecked(sector, offset) }
}
pub fn with_block_size(
block: usize,
offset: isize,
block: u32,
offset: i32,
log_block_size: u32,
) -> Address<S> {
let block = (block as isize + (offset >> log_block_size)) as usize;
let offset = offset.abs() as usize & ((1 << log_block_size) - 1);
let block = (block as i32 + (offset >> log_block_size)) as u32;
let offset = offset.abs() as u32 & ((1 << log_block_size) - 1);
let log_diff = log_block_size as isize - S::LOG_SIZE as isize;
let log_diff = log_block_size as i32 - S::LOG_SIZE as i32;
let top_offset = offset >> S::LOG_SIZE;
let offset = offset & ((1 << S::LOG_SIZE) - 1);
let sector = block << log_diff | top_offset;
unsafe { Address::new_unchecked(sector, offset) }
}
pub fn index64(&self) -> u64 {
pub fn into_index(&self) -> u64 {
((self.sector as u64) << S::LOG_SIZE) + self.offset as u64
}
pub fn into_index(&self) -> Option<usize> {
self.sector
.checked_shl(S::LOG_SIZE)
.and_then(|sector| sector.checked_add(self.offset))
}
pub const fn sector_size(&self) -> usize {
S::SIZE
}
@ -93,11 +87,11 @@ impl<S: Size> Address<S> {
S::LOG_SIZE
}
pub fn sector(&self) -> usize {
pub fn sector(&self) -> u32 {
self.sector
}
pub fn offset(&self) -> usize {
pub fn offset(&self) -> u32 {
self.offset
}
}
@ -105,7 +99,7 @@ impl<S: Size> Address<S> {
impl<S: Size + Clone + PartialOrd> Step for Address<S> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if end.sector >= start.sector {
Some(end.sector - start.sector)
Some(end.sector as usize - start.sector as usize)
} else {
None
}
@ -129,7 +123,7 @@ impl<S: Size + Clone + PartialOrd> Step for Address<S> {
fn add_usize(&self, n: usize) -> Option<Self> {
self.sector
.checked_add(n)
.checked_add(n as u32)
.map(|sector| Address::new(sector, 0))
}
}
@ -160,15 +154,15 @@ impl<S: Size> From<u64> for Address<S> {
fn from(idx: u64) -> Address<S> {
let sector = idx >> S::LOG_SIZE;
let offset = idx & S::OFFSET_MASK as u64;
Address::new(sector as usize, offset as isize)
Address::new(sector as u32, offset as i32)
}
}
impl<S: Size> From<usize> for Address<S> {
fn from(idx: usize) -> Address<S> {
let sector = idx >> S::LOG_SIZE;
let offset = idx & S::OFFSET_MASK;
Address::new(sector, offset as isize)
let offset = idx & S::OFFSET_MASK as usize;
Address::new(sector as u32, offset as i32)
}
}
@ -177,7 +171,7 @@ impl<S: Size> Add for Address<S> {
fn add(self, rhs: Address<S>) -> Address<S> {
Address::new(
self.sector + rhs.sector,
(self.offset + rhs.offset) as isize,
(self.offset + rhs.offset) as i32,
)
}
}
@ -187,7 +181,7 @@ impl<S: Size> Sub for Address<S> {
fn sub(self, rhs: Address<S>) -> Address<S> {
Address::new(
self.sector - rhs.sector,
self.offset as isize - rhs.offset as isize,
self.offset as i32 - rhs.offset as i32,
)
}
}
@ -198,22 +192,19 @@ mod tests {
#[test]
fn conv() {
assert_eq!(Address::<Size512>::new(0, 1024).into_index(), Some(1024));
assert_eq!(
Address::<Size512>::from(1024_usize).into_index(),
Some(1024)
);
assert_eq!(Address::<Size512>::new(0, 1024).into_index(), 1024);
assert_eq!(Address::<Size512>::from(1024_u64).into_index(), 1024);
assert_eq!(
Address::<Size512>::with_block_size(1, 256, 10).into_index(),
Some(1024 + 256)
1024 + 256
);
assert_eq!(
Address::<Size512>::with_block_size(2, 0, 10).into_index(),
Some(2048)
2048
);
assert_eq!(
Address::<Size512>::with_block_size(0, 1792, 10).into_index(),
Some(1792)
1792
);
}
@ -232,11 +223,11 @@ mod tests {
let a = Address::<Size2048>::new(0, 1024);
let b = Address::<Size2048>::new(0, 1024);
assert_eq!(a + b, Address::<Size2048>::new(1, 0));
assert_eq!((a + b).into_index(), Some(2048));
assert_eq!((a + b).into_index(), 2048);
let a = Address::<Size512>::new(0, 2048);
let b = Address::<Size512>::new(0, 256);
assert_eq!(a - b, Address::<Size512>::new(3, 256));
assert_eq!((a - b).into_index(), Some(1792));
assert_eq!((a - b).into_index(), 1792);
}
}

View file

@ -246,7 +246,7 @@ macro_rules! impl_slice {
slice: Option<VolumeCommit<T, Address<S>>>,
) -> Result<(), Infallible> {
slice.map(|slice| {
let index = slice.at_index().index64() as usize;
let index = slice.at_index().into_index() as usize;
let end = index + slice.as_ref().len();
// XXX: it would be much better to drop the contents of dst
// and move the contents of slice instead of cloning
@ -262,8 +262,8 @@ macro_rules! impl_slice {
range: Range<Address<S>>,
) -> VolumeSlice<'a, T, Address<S>> {
let index = range.start;
let range = range.start.index64() as usize
..range.end.index64() as usize;
let range = range.start.into_index() as usize
..range.end.into_index() as usize;
VolumeSlice::new(
<Self as AsRef<[T]>>::as_ref(self).get_unchecked(range),
index,
@ -303,7 +303,7 @@ mod file {
self.borrow()
.metadata()
.map(|data| Address::from(data.len()))
.unwrap_or(Address::from(0_usize)),
.unwrap_or(Address::new(0, 0)),
)
}
@ -316,7 +316,7 @@ mod file {
let index = *slice.at_index();
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.index64()))
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.write(slice.as_ref()))
.map(|_| ())
})
@ -329,11 +329,11 @@ mod file {
) -> VolumeSlice<'a, u8, Address<S>> {
let index = range.start;
let len = range.end - range.start;
let mut vec = Vec::with_capacity(len.index64() as usize);
vec.set_len(len.index64() as usize);
let mut vec = Vec::with_capacity(len.into_index() as usize);
vec.set_len(len.into_index() as usize);
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.index64()))
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.read_exact(&mut vec[..]))
.unwrap_or_else(|err| {
panic!("could't read from File Volume: {:?}", err)
@ -346,15 +346,15 @@ mod file {
range: Range<Address<S>>,
) -> Option<VolumeSlice<'a, u8, Address<S>>> {
let index = range.start;
let mut vec = Vec::with_capacity(
(range.end - range.start).index64() as usize,
);
let mut vec = Vec::with_capacity((range.end - range.start)
.into_index()
as usize);
unsafe {
vec.set_len((range.end - range.start).index64() as usize);
vec.set_len((range.end - range.start).into_index() as usize);
}
let mut refmut = self.borrow_mut();
refmut
.seek(SeekFrom::Start(index.index64()))
.seek(SeekFrom::Start(index.into_index()))
.and_then(|_| refmut.read_exact(&mut vec[..]))
.map(move |_| VolumeSlice::new_owned(vec, index))
.ok()
@ -373,8 +373,8 @@ mod tests {
let commit = {
let mut slice = volume
.slice(
Address::<Size512>::from(256_usize)
..Address::<Size512>::from(512_usize),
Address::<Size512>::from(256_u64)
..Address::<Size512>::from(512_u64),
)
.unwrap();
slice.iter_mut().for_each(|x| *x = 1);