Skip to content

Commit

Permalink
Merge c6ee550 into 490e55d
Browse files Browse the repository at this point in the history
  • Loading branch information
wcampbell0x2a authored Sep 12, 2024
2 parents 490e55d + c6ee550 commit 7819e84
Show file tree
Hide file tree
Showing 9 changed files with 106 additions and 63 deletions.
4 changes: 2 additions & 2 deletions backhand-cli/src/bin/unsquashfs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,8 @@ fn extract_all<'a, S: ParallelIterator<Item = &'a Node<SquashfsFileReader>>>(

// write to file
let fd = File::create(&filepath).unwrap();
let mut writer = BufWriter::with_capacity(file.basic.file_size as usize, &fd);
let file = filesystem.file(&file.basic);
let mut writer = BufWriter::with_capacity(file.file_len(), &fd);
let file = filesystem.file(file);
let mut reader = file.reader();

match io::copy(&mut reader, &mut writer) {
Expand Down
13 changes: 5 additions & 8 deletions backhand-test/tests/non_standard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,9 @@ fn full_test(
{
let file = BufReader::new(File::open(og_path).unwrap());
info!("calling from_reader");
let og_filesystem = FilesystemReader::from_reader_with_offset_and_kind(
file,
offset,
Kind::from_kind(&kind),
)
.unwrap();
let og_filesystem =
FilesystemReader::from_reader_with_offset_and_kind(file, offset, Kind::from_kind(kind))
.unwrap();
let mut new_filesystem = FilesystemWriter::from_fs_reader(&og_filesystem).unwrap();
if let Some(pad) = pad {
new_filesystem.set_kib_padding(pad);
Expand All @@ -57,7 +54,7 @@ fn full_test(
let _new_filesystem = FilesystemReader::from_reader_with_offset_and_kind(
created_file,
offset,
Kind::from_kind(&kind),
Kind::from_kind(kind),
)
.unwrap();
}
Expand Down Expand Up @@ -140,7 +137,7 @@ fn test_custom_compressor() {
if let Compressor::Gzip = compressor {
out.resize(out.capacity(), 0);
let mut decompressor = libdeflater::Decompressor::new();
let amt = decompressor.zlib_decompress(&bytes, out).unwrap();
let amt = decompressor.zlib_decompress(bytes, out).unwrap();
out.truncate(amt);
} else {
unimplemented!();
Expand Down
1 change: 0 additions & 1 deletion backhand-test/tests/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,6 @@ fn test_socket_fifo() {
#[test]
#[cfg(any(feature = "zstd"))]
fn no_qemu_test_crates_zstd() {
tracing::trace!("nice");
const FILE_NAME: &str = "crates-io.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
Expand Down
5 changes: 2 additions & 3 deletions backhand/src/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ impl<'a> DataWriter<'a> {
mut writer: W,
) -> Result<(usize, Added), BackhandError> {
//just clone it, because block sizes where never modified, just copy it
let mut block_sizes = reader.file.basic.block_sizes.clone();
let mut block_sizes = reader.file.file.block_sizes().to_vec();
let mut read_buf = vec![];
let mut decompress_buf = vec![];

Expand Down Expand Up @@ -166,7 +166,6 @@ impl<'a> DataWriter<'a> {
return Ok((decompress_buf.len(), Added::Fragment { frag_index, block_offset }));
}

//if is a block, just copy it
writer.write_all(&read_buf)?;
while let Some(block) = reader.next_block(&mut read_buf) {
let block = block?;
Expand All @@ -190,7 +189,7 @@ impl<'a> DataWriter<'a> {
writer.write_all(&read_buf)?;
}
}
let file_size = reader.file.basic.file_size as usize;
let file_size = reader.file.file.file_len();
Ok((file_size, Added::Data { blocks_start, block_sizes }))
}

Expand Down
46 changes: 42 additions & 4 deletions backhand/src/filesystem/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ use std::sync::{Arc, Mutex};

use super::normalize_squashfs_path;
use crate::data::Added;
use crate::inode::{BasicFile, InodeHeader};
use crate::{BackhandError, FilesystemReaderFile, Id};
use crate::inode::{BasicFile, ExtendedFile, InodeHeader};
use crate::{BackhandError, DataSize, FilesystemReaderFile, Id};

/// File information for Node
#[derive(Debug, PartialEq, Eq, Default, Clone, Copy)]
Expand Down Expand Up @@ -91,8 +91,46 @@ pub enum InnerNode<T> {

/// Unread file for filesystem
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct SquashfsFileReader {
pub basic: BasicFile,
pub enum SquashfsFileReader {
Basic(BasicFile),
Extended(ExtendedFile),
}

impl SquashfsFileReader {
pub fn file_len(&self) -> usize {
match self {
SquashfsFileReader::Basic(basic) => basic.file_size as usize,
SquashfsFileReader::Extended(extended) => extended.file_size as usize,
}
}

pub fn frag_index(&self) -> usize {
match self {
SquashfsFileReader::Basic(basic) => basic.frag_index as usize,
SquashfsFileReader::Extended(extended) => extended.frag_index as usize,
}
}

pub fn block_sizes(&self) -> &[DataSize] {
match self {
SquashfsFileReader::Basic(basic) => &basic.block_sizes,
SquashfsFileReader::Extended(extended) => &extended.block_sizes,
}
}

pub fn blocks_start(&self) -> u64 {
match self {
SquashfsFileReader::Basic(basic) => basic.blocks_start as u64,
SquashfsFileReader::Extended(extended) => extended.blocks_start,
}
}

pub fn block_offset(&self) -> u32 {
match self {
SquashfsFileReader::Basic(basic) => basic.block_offset,
SquashfsFileReader::Extended(extended) => extended.block_offset,
}
}
}

/// Read file from other SquashfsFile or an user file
Expand Down
69 changes: 44 additions & 25 deletions backhand/src/filesystem/reader.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
use std::io::{Read, SeekFrom};
use std::io::{Read, SeekFrom, Write};
use std::sync::{Mutex, RwLock};

use tracing::trace;

use super::node::Nodes;
use crate::compressor::{CompressionOptions, Compressor};
use crate::data::DataSize;
use crate::error::BackhandError;
use crate::fragment::Fragment;
use crate::id::Id;
use crate::inode::BasicFile;
use crate::kinds::Kind;
use crate::reader::BufReadSeek;
use crate::squashfs::Cache;
Expand Down Expand Up @@ -130,8 +131,9 @@ impl<'b> FilesystemReader<'b> {
}

/// Return a file handler for this file
pub fn file<'a>(&'a self, basic_file: &'a BasicFile) -> FilesystemReaderFile<'a, 'b> {
FilesystemReaderFile::new(self, basic_file)
pub fn file<'a>(&'a self, file: &'a SquashfsFileReader) -> FilesystemReaderFile<'a, 'b> {
trace!("returning handle for {file:02x?}");
FilesystemReaderFile::new(self, file)
}

/// Iterator of all files, including the root
Expand All @@ -154,7 +156,7 @@ impl<'b> FilesystemReader<'b> {
/// match &node.inner {
/// InnerNode::File(file) => {
/// let mut reader = filesystem
/// .file(&file.basic)
/// .file(&file)
/// .reader();
/// // Then, do something with the reader
/// },
Expand All @@ -171,12 +173,12 @@ impl<'b> FilesystemReader<'b> {
#[derive(Copy, Clone)]
pub struct FilesystemReaderFile<'a, 'b> {
pub(crate) system: &'a FilesystemReader<'b>,
pub(crate) basic: &'a BasicFile,
pub(crate) file: &'a SquashfsFileReader,
}

impl<'a, 'b> FilesystemReaderFile<'a, 'b> {
pub fn new(system: &'a FilesystemReader<'b>, basic: &'a BasicFile) -> Self {
Self { system, basic }
pub fn new(system: &'a FilesystemReader<'b>, file: &'a SquashfsFileReader) -> Self {
Self { system, file }
}

/// Create [`SquashfsReadFile`] that impls [`std::io::Read`] from [`FilesystemReaderFile`].
Expand All @@ -190,18 +192,15 @@ impl<'a, 'b> FilesystemReaderFile<'a, 'b> {
}

pub fn fragment(&self) -> Option<&'a Fragment> {
if self.basic.frag_index == 0xffffffff {
if self.file.frag_index() == 0xffffffff {
None
} else {
self.system
.fragments
.as_ref()
.map(|fragments| &fragments[self.basic.frag_index as usize])
self.system.fragments.as_ref().map(|fragments| &fragments[self.file.frag_index()])
}
}

pub(crate) fn raw_data_reader(&self) -> SquashfsRawData<'a, 'b> {
SquashfsRawData::new(Self { system: self.system, basic: self.basic })
SquashfsRawData::new(Self { system: self.system, file: self.file })
}
}

Expand All @@ -210,7 +209,7 @@ impl<'a, 'b> IntoIterator for FilesystemReaderFile<'a, 'b> {
type Item = <BlockIterator<'a> as Iterator>::Item;

fn into_iter(self) -> Self::IntoIter {
BlockIterator { blocks: &self.basic.block_sizes, fragment: self.fragment() }
BlockIterator { blocks: self.file.block_sizes(), fragment: self.fragment() }
}
}

Expand Down Expand Up @@ -247,14 +246,18 @@ pub(crate) struct RawDataBlock {
pub(crate) struct SquashfsRawData<'a, 'b> {
pub(crate) file: FilesystemReaderFile<'a, 'b>,
current_block: BlockIterator<'a>,
block_len: usize,
blocks_parsed: usize,
pub(crate) pos: u64,
}

impl<'a, 'b> SquashfsRawData<'a, 'b> {
pub fn new(file: FilesystemReaderFile<'a, 'b>) -> Self {
let pos = file.basic.blocks_start.into();
let pos = file.file.blocks_start();
let current_block = file.into_iter();
Self { file, current_block, pos }
let block_len = file.into_iter().count();
let blocks_parsed = 0;
Self { file, current_block, block_len, blocks_parsed, pos }
}

fn read_raw_data(
Expand All @@ -264,6 +267,7 @@ impl<'a, 'b> SquashfsRawData<'a, 'b> {
) -> Result<RawDataBlock, BackhandError> {
match block {
BlockFragment::Block(block) => {
let mut sparse = false;
let block_size = block.size() as usize;
data.resize(block_size, 0);
//NOTE: storing/restoring the file-pos is not required at the
Expand All @@ -272,9 +276,21 @@ impl<'a, 'b> SquashfsRawData<'a, 'b> {
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(self.pos))?;
reader.read_exact(data)?;
// Sparse file
trace!("is is sparse? : {:02x?}", self.file.system.block_size);
if self.blocks_parsed != self.block_len {
trace!("it is sparse!!!!");
if data.len() < self.file.system.block_size as usize {
let sparse_len = self.file.system.block_size as usize - data.len();
trace!("writing {sparse_len:02x?}");
data.write_all(&mut vec![0x00; sparse_len])?;
sparse = true;
}
}

self.pos = reader.stream_position()?;
}
Ok(RawDataBlock { fragment: false, uncompressed: block.uncompressed() })
Ok(RawDataBlock { fragment: false, uncompressed: sparse | block.uncompressed() })
}
BlockFragment::Fragment(fragment) => {
// if in the cache, just read from the cache bytes and return the fragment bytes
Expand Down Expand Up @@ -326,16 +342,18 @@ impl<'a, 'b> SquashfsRawData<'a, 'b> {

#[inline]
pub fn next_block(&mut self, buf: &mut Vec<u8>) -> Option<Result<RawDataBlock, BackhandError>> {
self.current_block.next().map(|next| self.read_raw_data(buf, &next))
let res = self.current_block.next().map(|next| self.read_raw_data(buf, &next));
self.blocks_parsed += 1;
res
}

#[inline]
fn fragment_range(&self) -> std::ops::Range<usize> {
let block_len = self.file.system.block_size as usize;
let block_num = self.file.basic.block_sizes.len();
let file_size = self.file.basic.file_size as usize;
let block_num = self.file.file.block_sizes().len();
let file_size = self.file.file.file_len();
let frag_len = file_size - (block_num * block_len);
let frag_start = self.file.basic.block_offset as usize;
let frag_start = self.file.file.block_offset() as usize;
let frag_end = frag_start + frag_len;
frag_start..frag_end
}
Expand Down Expand Up @@ -381,7 +399,7 @@ impl<'a, 'b> SquashfsRawData<'a, 'b> {
#[inline]
pub fn into_reader(self) -> SquashfsReadFile<'a, 'b> {
let block_size = self.file.system.block_size as usize;
let bytes_available = self.file.basic.file_size as usize;
let bytes_available = self.file.file.file_len();
SquashfsReadFile::new(block_size, self, 0, bytes_available)
}
}
Expand Down Expand Up @@ -420,6 +438,7 @@ impl<'a, 'b> SquashfsReadFile<'a, 'b> {
fn read_available(&mut self, buf: &mut [u8]) -> usize {
let available = self.available();
let read_len = buf.len().min(available.len()).min(self.bytes_available);
trace!("{:02x?}", read_len);
buf[..read_len].copy_from_slice(&available[..read_len]);
self.bytes_available -= read_len;
self.last_read += read_len;
Expand Down Expand Up @@ -448,12 +467,12 @@ impl<'a, 'b> Read for SquashfsReadFile<'a, 'b> {
self.buf_decompress.clear();
return Ok(0);
}
//no data available, read the next block
// no data available, read the next block
if self.available().is_empty() {
self.read_next_block()?;
}

//return data from the read block/fragment
// return data from the read block/fragment
Ok(self.read_available(buf))
}
}
2 changes: 1 addition & 1 deletion backhand/src/filesystem/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ impl<'a, 'b, 'c> FilesystemWriter<'a, 'b, 'c> {
.map(|node| {
let inner = match &node.inner {
InnerNode::File(file) => {
let reader = reader.file(&file.basic);
let reader = reader.file(file);
InnerNode::File(SquashfsFileWriter::SquashfsFile(reader))
}
InnerNode::Symlink(x) => InnerNode::Symlink(x.clone()),
Expand Down
16 changes: 2 additions & 14 deletions backhand/src/inode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,26 +181,14 @@ pub struct BasicFile {
pub block_sizes: Vec<DataSize>,
}

impl From<&ExtendedFile> for BasicFile {
fn from(ex_file: &ExtendedFile) -> Self {
Self {
blocks_start: ex_file.blocks_start as u32,
frag_index: ex_file.frag_index,
block_offset: ex_file.block_offset,
file_size: ex_file.file_size as u32,
block_sizes: ex_file.block_sizes.clone(),
}
}
}

#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(
endian = "endian",
ctx = "endian: deku::ctx::Endian, bytes_used: u64, block_size: u32, block_log: u16"
ctx = "endian: deku::ctx::Endian, _bytes_used: u64, block_size: u32, block_log: u16"
)]
pub struct ExtendedFile {
pub blocks_start: u64,
#[deku(assert = "((*file_size as u128) < TiB1) && (*file_size < bytes_used)")]
#[deku(assert = "((*file_size as u128) < TiB1)")]
pub file_size: u64,
pub sparse: u64,
pub link_count: u32,
Expand Down
13 changes: 8 additions & 5 deletions backhand/src/squashfs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -538,17 +538,20 @@ impl<'b> Squashfs<'b> {
}
// BasicFile
InodeId::BasicFile => {
trace!("before_file: {:#02x?}", entry);
let basic = match &found_inode.inner {
InodeInner::BasicFile(file) => file.clone(),
InodeInner::ExtendedFile(file) => file.into(),
let inner = match &found_inode.inner {
InodeInner::BasicFile(file) => {
SquashfsFileReader::Basic(file.clone())
}
InodeInner::ExtendedFile(file) => {
SquashfsFileReader::Extended(file.clone())
}
_ => {
return Err(BackhandError::UnexpectedInode(
found_inode.inner.clone(),
))
}
};
InnerNode::File(SquashfsFileReader { basic })
InnerNode::File(inner)
}
// Basic Symlink
InodeId::BasicSymlink => {
Expand Down

0 comments on commit 7819e84

Please sign in to comment.