let count = index.index_count();
for pos in 0..count {
- let (start, end, digest) = index.chunk_info(pos)?;
- let size = (end - start) as u32;
- env.register_chunk(digest, size)?;
+ let info = index.chunk_info(pos)?;
+ let size = info.size() as u32;
+ env.register_chunk(info.digest, size)?;
}
let reader = DigestListEncoder::new(Box::new(index));
-use anyhow::{bail, format_err, Error};
-use std::fmt;
+use std::convert::TryFrom;
use std::ffi::{CStr, CString, OsStr};
-use std::os::unix::ffi::OsStrExt;
+use std::fmt;
use std::io::{Read, Write, Seek, SeekFrom};
-use std::convert::TryFrom;
+use std::os::unix::ffi::OsStrExt;
+use anyhow::{bail, format_err, Error};
use chrono::offset::{TimeZone, Local};
-use proxmox::tools::io::ReadExt;
+use pathpatterns::{MatchList, MatchType};
use proxmox::sys::error::io_err_other;
+use proxmox::tools::io::ReadExt;
-use crate::pxar::catalog::BackupCatalogWriter;
-use crate::pxar::{MatchPattern, MatchPatternSlice, MatchType};
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
+use crate::pxar::catalog::BackupCatalogWriter;
use crate::tools::runtime::block_on;
#[repr(u8)]
}
/// Used to specific additional attributes inside DirEntry
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
pub enum DirEntryAttribute {
Directory { start: u64 },
File { size: u64, mtime: u64 },
}
}
+ /// Get file mode bits for this entry to be used with the `MatchList` api.
+ pub fn get_file_mode(&self) -> Option<u32> {
+ Some(
+ match self.attr {
+ DirEntryAttribute::Directory { .. } => pxar::mode::IFDIR,
+ DirEntryAttribute::File { .. } => pxar::mode::IFREG,
+ DirEntryAttribute::Symlink => pxar::mode::IFLNK,
+ DirEntryAttribute::Hardlink => return None,
+ DirEntryAttribute::BlockDevice => pxar::mode::IFBLK,
+ DirEntryAttribute::CharDevice => pxar::mode::IFCHR,
+ DirEntryAttribute::Fifo => pxar::mode::IFIFO,
+ DirEntryAttribute::Socket => pxar::mode::IFSOCK,
+ }
+ as u32
+ )
+ }
+
/// Check if DirEntry is a directory
pub fn is_directory(&self) -> bool {
match self.attr {
&mut self,
parent: &DirEntry,
filename: &[u8],
- ) -> Result<DirEntry, Error> {
+ ) -> Result<Option<DirEntry>, Error> {
let start = match parent.attr {
DirEntryAttribute::Directory { start } => start,
Ok(false) // stop parsing
})?;
- match item {
- None => bail!("no such file"),
- Some(entry) => Ok(entry),
- }
+ Ok(item)
}
/// Read the raw directory info block from current reader position.
/// provided callback on them.
pub fn find(
&mut self,
- mut entry: &mut Vec<DirEntry>,
- pattern: &[MatchPatternSlice],
- callback: &Box<fn(&[DirEntry])>,
+ parent: &DirEntry,
+ file_path: &mut Vec<u8>,
+ match_list: &impl MatchList, //&[MatchEntry],
+ callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
) -> Result<(), Error> {
- let parent = entry.last().unwrap();
- if !parent.is_directory() {
- return Ok(())
- }
-
+ let file_len = file_path.len();
for e in self.read_dir(parent)? {
- match MatchPatternSlice::match_filename_include(
- &CString::new(e.name.clone())?,
- e.is_directory(),
- pattern,
- )? {
- (MatchType::Positive, _) => {
- entry.push(e);
- callback(&entry);
- let pattern = MatchPattern::from_line(b"**/*").unwrap().unwrap();
- let child_pattern = vec![pattern.as_slice()];
- self.find(&mut entry, &child_pattern, callback)?;
- entry.pop();
- }
- (MatchType::PartialPositive, child_pattern)
- | (MatchType::PartialNegative, child_pattern) => {
- entry.push(e);
- self.find(&mut entry, &child_pattern, callback)?;
- entry.pop();
- }
- _ => {}
+ let is_dir = e.is_directory();
+ file_path.truncate(file_len);
+ if !e.name.starts_with(b"/") {
+ file_path.reserve(e.name.len() + 1);
+ file_path.push(b'/');
+ }
+ file_path.extend(&e.name);
+ match match_list.matches(&file_path, e.get_file_mode()) {
+ Some(MatchType::Exclude) => continue,
+ Some(MatchType::Include) => callback(&file_path)?,
+ None => (),
+ }
+ if is_dir {
+ self.find(&e, file_path, match_list, callback)?;
}
}
+ file_path.truncate(file_len);
Ok(())
}
-use std::cell::RefCell;
use std::collections::HashMap;
use std::convert::TryFrom;
-use std::ffi::{CString, OsStr};
+use std::ffi::{CStr, CString, OsStr, OsString};
+use std::future::Future;
use std::io::Write;
+use std::mem;
use std::os::unix::ffi::OsStrExt;
-use std::path::{Component, Path, PathBuf};
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::path::{Path, PathBuf};
+use std::pin::Pin;
-use chrono::{Utc, offset::TimeZone};
use anyhow::{bail, format_err, Error};
-use nix::sys::stat::{Mode, SFlag};
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
-use proxmox::api::{cli::*, *};
-use proxmox::sys::linux::tty;
+use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
+use proxmox::api::api;
+use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
+use proxmox::c_result;
+use proxmox::tools::fs::{create_path, CreateOptions};
+use pxar::{EntryKind, Metadata};
-use super::catalog::{CatalogReader, DirEntry};
-use crate::pxar::*;
-use crate::tools;
+use crate::backup::catalog::{self, DirEntryAttribute};
+// FIXME: Remove looku_self() calls by putting Directory into the dir stack
+use crate::pxar::dir_stack::PxarDirStack;
+use crate::pxar::flags;
+use crate::pxar::fuse::{Accessor, FileEntry};
+use crate::pxar::metadata;
-const PROMPT_PREFIX: &str = "pxar:";
-const PROMPT: &str = ">";
+type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
-/// Interactive shell for interacton with the catalog.
-pub struct Shell {
- /// Readline instance handling input and callbacks
- rl: rustyline::Editor<CliHelper>,
- prompt: String,
-}
+const MAX_SYMLINK_COUNT: usize = 40;
+
+static mut SHELL: Option<usize> = None;
/// This list defines all the shell commands and their properties
/// using the api schema
pub fn catalog_shell_cli() -> CommandLineInterface {
-
- let map = CliCommandMap::new()
- .insert("pwd", CliCommand::new(&API_METHOD_PWD_COMMAND))
- .insert(
- "cd",
- CliCommand::new(&API_METHOD_CD_COMMAND)
- .arg_param(&["path"])
- .completion_cb("path", Shell::complete_path)
- )
- .insert(
- "ls",
- CliCommand::new(&API_METHOD_LS_COMMAND)
- .arg_param(&["path"])
- .completion_cb("path", Shell::complete_path)
- )
- .insert(
- "stat",
- CliCommand::new(&API_METHOD_STAT_COMMAND)
- .arg_param(&["path"])
- .completion_cb("path", Shell::complete_path)
- )
- .insert(
- "select",
- CliCommand::new(&API_METHOD_SELECT_COMMAND)
- .arg_param(&["path"])
- .completion_cb("path", Shell::complete_path)
- )
- .insert(
- "deselect",
- CliCommand::new(&API_METHOD_DESELECT_COMMAND)
- .arg_param(&["path"])
- .completion_cb("path", Shell::complete_path)
- )
- .insert(
- "clear-selected",
- CliCommand::new(&API_METHOD_CLEAR_SELECTED_COMMAND)
- )
- .insert(
- "restore-selected",
- CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
- .arg_param(&["target"])
- .completion_cb("target", tools::complete_file_name)
- )
- .insert(
- "list-selected",
- CliCommand::new(&API_METHOD_LIST_SELECTED_COMMAND),
- )
- .insert(
- "restore",
- CliCommand::new(&API_METHOD_RESTORE_COMMAND)
- .arg_param(&["target"])
- .completion_cb("target", tools::complete_file_name)
- )
- .insert(
- "find",
- CliCommand::new(&API_METHOD_FIND_COMMAND)
- .arg_param(&["path", "pattern"])
- .completion_cb("path", Shell::complete_path)
- )
- .insert_help();
-
- CommandLineInterface::Nested(map)
+ CommandLineInterface::Nested(
+ CliCommandMap::new()
+ .insert("pwd", CliCommand::new(&API_METHOD_PWD_COMMAND))
+ .insert(
+ "cd",
+ CliCommand::new(&API_METHOD_CD_COMMAND)
+ .arg_param(&["path"])
+ .completion_cb("path", complete_path),
+ )
+ .insert(
+ "ls",
+ CliCommand::new(&API_METHOD_LS_COMMAND)
+ .arg_param(&["path"])
+ .completion_cb("path", complete_path),
+ )
+ .insert(
+ "stat",
+ CliCommand::new(&API_METHOD_STAT_COMMAND)
+ .arg_param(&["path"])
+ .completion_cb("path", complete_path),
+ )
+ .insert(
+ "select",
+ CliCommand::new(&API_METHOD_SELECT_COMMAND)
+ .arg_param(&["path"])
+ .completion_cb("path", complete_path),
+ )
+ .insert(
+ "deselect",
+ CliCommand::new(&API_METHOD_DESELECT_COMMAND)
+ .arg_param(&["path"])
+ .completion_cb("path", complete_path),
+ )
+ .insert(
+ "clear-selected",
+ CliCommand::new(&API_METHOD_CLEAR_SELECTED_COMMAND),
+ )
+ .insert(
+ "list-selected",
+ CliCommand::new(&API_METHOD_LIST_SELECTED_COMMAND),
+ )
+ .insert(
+ "restore-selected",
+ CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
+ .arg_param(&["target"])
+ .completion_cb("target", crate::tools::complete_file_name),
+ )
+ .insert(
+ "restore",
+ CliCommand::new(&API_METHOD_RESTORE_COMMAND)
+ .arg_param(&["target"])
+ .completion_cb("target", crate::tools::complete_file_name),
+ )
+ .insert(
+ "find",
+ CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
+ )
+ .insert_help(),
+ )
}
-impl Shell {
- /// Create a new shell for the given catalog and pxar archive.
- pub fn new(
- mut catalog: CatalogReader<std::fs::File>,
- archive_name: &str,
- decoder: Decoder,
- ) -> Result<Self, Error> {
- let catalog_root = catalog.root()?;
- // The root for the given archive as stored in the catalog
- let archive_root = catalog.lookup(&catalog_root, archive_name.as_bytes())?;
- let path = CatalogPathStack::new(archive_root);
-
- CONTEXT.with(|handle| {
- let mut ctx = handle.borrow_mut();
- *ctx = Some(Context {
- catalog,
- selected: Vec::new(),
- decoder,
- path,
- });
- });
-
- let cli_helper = CliHelper::new(catalog_shell_cli());
- let mut rl = rustyline::Editor::<CliHelper>::new();
- rl.set_helper(Some(cli_helper));
-
- Context::with(|ctx| {
- Ok(Self {
- rl,
- prompt: ctx.generate_prompt()?,
- })
- })
- }
-
- /// Start the interactive shell loop
- pub fn shell(mut self) -> Result<(), Error> {
- while let Ok(line) = self.rl.readline(&self.prompt) {
- let helper = self.rl.helper().unwrap();
- let args = match shellword_split(&line) {
- Ok(args) => args,
- Err(err) => {
- println!("Error: {}", err);
- continue;
- }
- };
-
- let rpcenv = CliEnvironment::new();
- let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
- self.rl.add_history_entry(line);
- self.update_prompt()?;
+fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<String> {
+ let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
+ match shell.complete_path(complete_me) {
+ Ok(list) => list,
+ Err(err) => {
+ eprintln!("error during completion: {}", err);
+ Vec::new()
}
- Ok(())
- }
-
- /// Update the prompt to the new working directory
- fn update_prompt(&mut self) -> Result<(), Error> {
- Context::with(|ctx| {
- self.prompt = ctx.generate_prompt()?;
- Ok(())
- })
- }
-
- /// Completions for paths by lookup in the catalog
- fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<String> {
- Context::with(|ctx| {
- let (base, to_complete) = match complete_me.rfind('/') {
- // Split at ind + 1 so the slash remains on base, ok also if
- // ends in slash as split_at accepts up to length as index.
- Some(ind) => complete_me.split_at(ind + 1),
- None => ("", complete_me),
- };
-
- let current = if base.is_empty() {
- ctx.path.last().clone()
- } else {
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(base), &mut ctx.decoder, &mut ctx.catalog, false)?;
- local.last().clone()
- };
-
- let entries = match ctx.catalog.read_dir(¤t) {
- Ok(entries) => entries,
- Err(_) => return Ok(Vec::new()),
- };
-
- let mut list = Vec::new();
- for entry in &entries {
- let mut name = String::from(base);
- if entry.name.starts_with(to_complete.as_bytes()) {
- name.push_str(std::str::from_utf8(&entry.name)?);
- if entry.is_directory() {
- name.push('/');
- }
- list.push(name);
- }
- }
- Ok(list)
- })
- .unwrap_or_default()
}
}
#[api(input: { properties: {} })]
/// List the current working directory.
-fn pwd_command() -> Result<(), Error> {
- Context::with(|ctx| {
- let path = ctx.path.generate_cstring()?;
- let mut out = std::io::stdout();
- out.write_all(&path.as_bytes())?;
- out.write_all(&[b'\n'])?;
- out.flush()?;
- Ok(())
- })
+async fn pwd_command() -> Result<(), Error> {
+ Shell::with(move |shell| shell.pwd()).await
}
#[api(
}
)]
/// Change the current working directory to the new directory
-fn cd_command(path: Option<String>) -> Result<(), Error> {
- Context::with(|ctx| {
- let path = path.unwrap_or_default();
- if path.is_empty() {
- ctx.path.clear();
- return Ok(());
- }
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(path), &mut ctx.decoder, &mut ctx.catalog, true)?;
- if !local.last().is_directory() {
- local.pop();
- eprintln!("not a directory, fallback to parent directory");
- }
- ctx.path = local;
- Ok(())
- })
+async fn cd_command(path: Option<String>) -> Result<(), Error> {
+ let path = path.as_ref().map(Path::new);
+ Shell::with(move |shell| shell.cd(path)).await
}
#[api(
}
)]
/// List the content of working directory or given path.
-fn ls_command(path: Option<String>) -> Result<(), Error> {
- Context::with(|ctx| {
- let parent = if let Some(ref path) = path {
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(path), &mut ctx.decoder, &mut ctx.catalog, false)?;
- local.last().clone()
- } else {
- ctx.path.last().clone()
- };
-
- let list = if parent.is_directory() {
- ctx.catalog.read_dir(&parent)?
- } else {
- vec![parent.clone()]
- };
-
- if list.is_empty() {
- return Ok(());
- }
- let max = list.iter().max_by(|x, y| x.name.len().cmp(&y.name.len()));
- let max = match max {
- Some(dir_entry) => dir_entry.name.len() + 1,
- None => 0,
- };
-
- let (_rows, mut cols) = tty::stdout_terminal_size();
- cols /= max;
-
- let mut out = std::io::stdout();
- for (index, item) in list.iter().enumerate() {
- out.write_all(&item.name)?;
- // Fill with whitespaces
- out.write_all(&vec![b' '; max - item.name.len()])?;
- if index % cols == (cols - 1) {
- out.write_all(&[b'\n'])?;
- }
- }
- // If the last line is not complete, add the newline
- if list.len() % cols != cols - 1 {
- out.write_all(&[b'\n'])?;
- }
- out.flush()?;
- Ok(())
- })
+async fn ls_command(path: Option<String>) -> Result<(), Error> {
+ let path = path.as_ref().map(Path::new);
+ Shell::with(move |shell| shell.ls(path)).await
}
#[api(
)]
/// Read the metadata for a given directory entry.
///
-/// This is expensive because the data has to be read from the pxar `Decoder`,
-/// which means reading over the network.
-fn stat_command(path: String) -> Result<(), Error> {
- Context::with(|ctx| {
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(path), &mut ctx.decoder, &mut ctx.catalog, false)?;
- let canonical = local.canonical(&mut ctx.decoder, &mut ctx.catalog, false)?;
- let item = canonical.lookup(&mut ctx.decoder)?;
- let mut out = std::io::stdout();
- out.write_all(b" File:\t")?;
- out.write_all(item.filename.as_bytes())?;
- out.write_all(b"\n")?;
- out.write_all(format!(" Size:\t{}\t\t", item.size).as_bytes())?;
- out.write_all(b"Type:\t")?;
-
- let mut mode_out = vec![b'-'; 10];
- match SFlag::from_bits_truncate(item.entry.mode as u32) {
- SFlag::S_IFDIR => {
- mode_out[0] = b'd';
- out.write_all(b"directory\n")?;
- }
- SFlag::S_IFREG => {
- mode_out[0] = b'-';
- out.write_all(b"regular file\n")?;
- }
- SFlag::S_IFLNK => {
- mode_out[0] = b'l';
- out.write_all(b"symbolic link\n")?;
- }
- SFlag::S_IFBLK => {
- mode_out[0] = b'b';
- out.write_all(b"block special file\n")?;
- }
- SFlag::S_IFCHR => {
- mode_out[0] = b'c';
- out.write_all(b"character special file\n")?;
- }
- _ => out.write_all(b"unknown\n")?,
- };
-
- let mode = Mode::from_bits_truncate(item.entry.mode as u32);
- if mode.contains(Mode::S_IRUSR) {
- mode_out[1] = b'r';
- }
- if mode.contains(Mode::S_IWUSR) {
- mode_out[2] = b'w';
- }
- match (mode.contains(Mode::S_IXUSR), mode.contains(Mode::S_ISUID)) {
- (false, false) => mode_out[3] = b'-',
- (true, false) => mode_out[3] = b'x',
- (false, true) => mode_out[3] = b'S',
- (true, true) => mode_out[3] = b's',
- }
-
- if mode.contains(Mode::S_IRGRP) {
- mode_out[4] = b'r';
- }
- if mode.contains(Mode::S_IWGRP) {
- mode_out[5] = b'w';
- }
- match (mode.contains(Mode::S_IXGRP), mode.contains(Mode::S_ISGID)) {
- (false, false) => mode_out[6] = b'-',
- (true, false) => mode_out[6] = b'x',
- (false, true) => mode_out[6] = b'S',
- (true, true) => mode_out[6] = b's',
- }
-
- if mode.contains(Mode::S_IROTH) {
- mode_out[7] = b'r';
- }
- if mode.contains(Mode::S_IWOTH) {
- mode_out[8] = b'w';
- }
- match (mode.contains(Mode::S_IXOTH), mode.contains(Mode::S_ISVTX)) {
- (false, false) => mode_out[9] = b'-',
- (true, false) => mode_out[9] = b'x',
- (false, true) => mode_out[9] = b'T',
- (true, true) => mode_out[9] = b't',
- }
-
- if !item.xattr.xattrs.is_empty() {
- mode_out.push(b'+');
- }
-
- out.write_all(b"Access:\t")?;
- out.write_all(&mode_out)?;
- out.write_all(b"\t")?;
- out.write_all(format!(" Uid:\t{}\t", item.entry.uid).as_bytes())?;
- out.write_all(format!("Gid:\t{}\n", item.entry.gid).as_bytes())?;
-
- let time = i64::try_from(item.entry.mtime)?;
- let sec = time / 1_000_000_000;
- let nsec = u32::try_from(time % 1_000_000_000)?;
- let dt = Utc.timestamp(sec, nsec);
- out.write_all(format!("Modify:\t{}\n", dt.to_rfc2822()).as_bytes())?;
- out.flush()?;
- Ok(())
- })
+/// This is expensive because the data has to be read from the pxar archive, which means reading
+/// over the network.
+async fn stat_command(path: String) -> Result<(), Error> {
+ Shell::with(move |shell| shell.stat(PathBuf::from(path))).await
}
#[api(
///
/// This will return an error if the entry is already present in the list or
/// if an invalid path was provided.
-fn select_command(path: String) -> Result<(), Error> {
- Context::with(|ctx| {
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(path), &mut ctx.decoder, &mut ctx.catalog, false)?;
- let canonical = local.canonical(&mut ctx.decoder, &mut ctx.catalog, false)?;
- let pattern = MatchPattern::from_line(canonical.generate_cstring()?.as_bytes())?
- .ok_or_else(|| format_err!("encountered invalid match pattern"))?;
- if ctx.selected.iter().find(|p| **p == pattern).is_none() {
- ctx.selected.push(pattern);
- }
- Ok(())
- })
+async fn select_command(path: String) -> Result<(), Error> {
+ Shell::with(move |shell| shell.select(PathBuf::from(path))).await
}
#[api(
///
/// This will return an error if the entry was not found in the list of entries
/// selected for restore.
-fn deselect_command(path: String) -> Result<(), Error> {
- Context::with(|ctx| {
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(path), &mut ctx.decoder, &mut ctx.catalog, false)?;
- let canonical = local.canonical(&mut ctx.decoder, &mut ctx.catalog, false)?;
- println!("{:?}", canonical.generate_cstring()?);
- let mut pattern = MatchPattern::from_line(canonical.generate_cstring()?.as_bytes())?
- .ok_or_else(|| format_err!("encountered invalid match pattern"))?;
- if let Some(last) = ctx.selected.last() {
- if last == &pattern {
- ctx.selected.pop();
- return Ok(());
- }
- }
- pattern.invert();
- ctx.selected.push(pattern);
- Ok(())
- })
+async fn deselect_command(path: String) -> Result<(), Error> {
+ Shell::with(move |shell| shell.deselect(PathBuf::from(path))).await
}
#[api( input: { properties: { } })]
/// Clear the list of files selected for restore.
-fn clear_selected_command() -> Result<(), Error> {
- Context::with(|ctx| {
- ctx.selected.clear();
- Ok(())
- })
+async fn clear_selected_command() -> Result<(), Error> {
+ Shell::with(move |shell| shell.deselect_all()).await
}
#[api(
input: {
properties: {
- target: {
- type: String,
- description: "target path for restore on local filesystem."
+ patterns: {
+ type: Boolean,
+ description: "List match patterns instead of the matching files.",
+ optional: true,
+ default: false,
}
}
}
)]
-/// Restore the selected entries to the given target path.
-///
-/// Target must not exist on the clients filesystem.
-fn restore_selected_command(target: String) -> Result<(), Error> {
- Context::with(|ctx| {
- if ctx.selected.is_empty() {
- bail!("no entries selected for restore");
- }
-
- // Entry point for the restore is always root here as the provided match
- // patterns are relative to root as well.
- let start_dir = ctx.decoder.root()?;
- ctx.decoder
- .restore(&start_dir, &Path::new(&target), &ctx.selected)?;
- Ok(())
- })
+/// List entries currently selected for restore.
+async fn list_selected_command(patterns: bool) -> Result<(), Error> {
+ Shell::with(move |shell| shell.list_selected(patterns)).await
}
#[api(
input: {
properties: {
pattern: {
- type: Boolean,
- description: "List match patterns instead of the matching files.",
+ type: String,
+ description: "Match pattern for matching files in the catalog."
+ },
+ select: {
+ type: bool,
optional: true,
+ default: false,
+ description: "Add matching filenames to list for restore."
}
}
}
)]
-/// List entries currently selected for restore.
-fn list_selected_command(pattern: Option<bool>) -> Result<(), Error> {
- Context::with(|ctx| {
- let mut out = std::io::stdout();
- if let Some(true) = pattern {
- out.write_all(&MatchPattern::to_bytes(ctx.selected.as_slice()))?;
- } else {
- let mut slices = Vec::with_capacity(ctx.selected.len());
- for pattern in &ctx.selected {
- slices.push(pattern.as_slice());
- }
- let mut dir_stack = vec![ctx.path.root()];
- ctx.catalog.find(
- &mut dir_stack,
- &slices,
- &Box::new(|path: &[DirEntry]| println!("{:?}", Context::generate_cstring(path).unwrap()))
- )?;
- }
- out.flush()?;
- Ok(())
- })
+/// Find entries in the catalog matching the given match pattern.
+async fn find_command(pattern: String, select: bool) -> Result<(), Error> {
+ Shell::with(move |shell| shell.find(pattern, select)).await
}
#[api(
target: {
type: String,
description: "target path for restore on local filesystem."
- },
- pattern: {
- type: String,
- optional: true,
- description: "match pattern to limit files for restore."
}
}
}
)]
-/// Restore the sub-archive given by the current working directory to target.
+/// Restore the selected entries to the given target path.
///
-/// By further providing a pattern, the restore can be limited to a narrower
-/// subset of this sub-archive.
-/// If pattern is not present or empty, the full archive is restored to target.
-fn restore_command(target: String, pattern: Option<String>) -> Result<(), Error> {
- Context::with(|ctx| {
- let pattern = pattern.unwrap_or_default();
- let match_pattern = match pattern.as_str() {
- "" | "/" | "." => Vec::new(),
- _ => vec![MatchPattern::from_line(pattern.as_bytes())?.unwrap()],
- };
- // Decoder entry point for the restore.
- let start_dir = if pattern.starts_with("/") {
- ctx.decoder.root()?
- } else {
- // Get the directory corresponding to the working directory from the
- // archive.
- let cwd = ctx.path.clone();
- cwd.lookup(&mut ctx.decoder)?
- };
-
- ctx.decoder
- .restore(&start_dir, &Path::new(&target), &match_pattern)?;
- Ok(())
- })
+/// Target must not exist on the clients filesystem.
+async fn restore_selected_command(target: String) -> Result<(), Error> {
+ Shell::with(move |shell| shell.restore_selected(PathBuf::from(target))).await
}
#[api(
input: {
properties: {
- path: {
+ target: {
type: String,
- description: "Path to node from where to start the search."
+ description: "target path for restore on local filesystem."
},
pattern: {
type: String,
- description: "Match pattern for matching files in the catalog."
- },
- select: {
- type: bool,
optional: true,
- description: "Add matching filenames to list for restore."
+ description: "match pattern to limit files for restore."
}
}
}
)]
-/// Find entries in the catalog matching the given match pattern.
-fn find_command(path: String, pattern: String, select: Option<bool>) -> Result<(), Error> {
- Context::with(|ctx| {
- let mut local = ctx.path.clone();
- local.traverse(&PathBuf::from(path), &mut ctx.decoder, &mut ctx.catalog, false)?;
- let canonical = local.canonical(&mut ctx.decoder, &mut ctx.catalog, false)?;
- if !local.last().is_directory() {
- bail!("path should be a directory, not a file!");
- }
- let select = select.unwrap_or(false);
-
- let cpath = canonical.generate_cstring().unwrap();
- let pattern = if pattern.starts_with("!") {
- let mut buffer = vec![b'!'];
- buffer.extend_from_slice(cpath.as_bytes());
- buffer.extend_from_slice(pattern[1..pattern.len()].as_bytes());
- buffer
- } else {
- let mut buffer = cpath.as_bytes().to_vec();
- buffer.extend_from_slice(pattern.as_bytes());
- buffer
- };
+/// Restore the sub-archive given by the current working directory to target.
+///
+/// By further providing a pattern, the restore can be limited to a narrower
+/// subset of this sub-archive.
+/// If pattern is not present or empty, the full archive is restored to target.
+async fn restore_command(target: String, pattern: Option<String>) -> Result<(), Error> {
+ Shell::with(move |shell| shell.restore(PathBuf::from(target), pattern)).await
+}
- let pattern = MatchPattern::from_line(&pattern)?
- .ok_or_else(|| format_err!("invalid match pattern"))?;
- let slice = vec![pattern.as_slice()];
-
- // The match pattern all contain the prefix of the entry path in order to
- // store them if selected, so the entry point for find is always the root
- // directory.
- let mut dir_stack = vec![ctx.path.root()];
- ctx.catalog.find(
- &mut dir_stack,
- &slice,
- &Box::new(|path: &[DirEntry]| println!("{:?}", Context::generate_cstring(path).unwrap()))
- )?;
+/// FIXME: Should we use this to fix `step()`?
+///
+/// The `Path` type's component iterator does not tell us anything about trailing slashes or
+/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
+/// here:
+enum PathComponent<'a> {
+ Root,
+ CurDir,
+ ParentDir,
+ Normal(&'a OsStr),
+ TrailingSlash,
+}
+
+struct PathComponentIter<'a> {
+ path: &'a [u8],
+ state: u8, // 0=beginning, 1=ongoing, 2=trailing, 3=finished (fused)
+}
- // Insert if matches should be selected.
- // Avoid duplicate entries of the same match pattern.
- if select && ctx.selected.iter().find(|p| **p == pattern).is_none() {
- ctx.selected.push(pattern);
+impl std::iter::FusedIterator for PathComponentIter<'_> {}
+
+impl<'a> Iterator for PathComponentIter<'a> {
+ type Item = PathComponent<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.path.is_empty() {
+ return None;
}
- Ok(())
- })
-}
+ if self.state == 0 {
+ self.state = 1;
+ if self.path[0] == b'/' {
+ // absolute path
+ self.path = &self.path[1..];
+ return Some(PathComponent::Root);
+ }
+ }
-std::thread_local! {
- static CONTEXT: RefCell<Option<Context>> = RefCell::new(None);
+ // skip slashes
+ let had_slashes = self.path[0] == b'/';
+ while self.path.get(0).copied() == Some(b'/') {
+ self.path = &self.path[1..];
+ }
+
+ Some(match self.path {
+ [] if had_slashes => PathComponent::TrailingSlash,
+ [] => return None,
+ [b'.'] | [b'.', b'/', ..] => {
+ self.path = &self.path[1..];
+ PathComponent::CurDir
+ }
+ [b'.', b'.'] | [b'.', b'.', b'/', ..] => {
+ self.path = &self.path[2..];
+ PathComponent::ParentDir
+ }
+ _ => {
+ let end = self
+ .path
+ .iter()
+ .position(|&b| b == b'/')
+ .unwrap_or(self.path.len());
+ let (out, rest) = self.path.split_at(end);
+ self.path = rest;
+ PathComponent::Normal(OsStr::from_bytes(out))
+ }
+ })
+ }
}
-/// Holds the context needed for access to catalog and decoder
-struct Context {
+pub struct Shell {
+ /// Readline instance handling input and callbacks
+ rl: rustyline::Editor<CliHelper>,
+
+ /// Interactive prompt.
+ prompt: String,
+
/// Calalog reader instance to navigate
- catalog: CatalogReader<std::fs::File>,
+ catalog: CatalogReader,
+
/// List of selected paths for restore
- selected: Vec<MatchPattern>,
- /// Decoder instance for the current pxar archive
- decoder: Decoder,
- /// Handle catalog stuff
- path: CatalogPathStack,
+ selected: HashMap<OsString, MatchEntry>,
+
+ /// pxar accessor instance for the current pxar archive
+ accessor: Accessor,
+
+ /// The current position in the archive.
+ position: Vec<PathStackEntry>,
}
-impl Context {
- /// Execute `call` within a context providing a mut ref to `Context` instance.
- fn with<T, F>(call: F) -> Result<T, Error>
+#[derive(Clone)]
+struct PathStackEntry {
+ /// This is always available. We mainly navigate through the catalog.
+ catalog: catalog::DirEntry,
+
+ /// Whenever we need something from the actual archive we fill this out. This is cached along
+ /// the entire path.
+ pxar: Option<FileEntry>,
+}
+
+impl PathStackEntry {
+ fn new(dir_entry: catalog::DirEntry) -> Self {
+ Self {
+ pxar: None,
+ catalog: dir_entry,
+ }
+ }
+}
+
+impl Shell {
+ /// Create a new shell for the given catalog and pxar archive.
+ pub async fn new(
+ mut catalog: CatalogReader,
+ archive_name: &str,
+ archive: Accessor,
+ ) -> Result<Self, Error> {
+ let cli_helper = CliHelper::new(catalog_shell_cli());
+ let mut rl = rustyline::Editor::<CliHelper>::new();
+ rl.set_helper(Some(cli_helper));
+
+ let catalog_root = catalog.root()?;
+ let archive_root = catalog
+ .lookup(&catalog_root, archive_name.as_bytes())?
+ .ok_or_else(|| format_err!("archive not found in catalog"))?;
+ let position = vec![PathStackEntry::new(archive_root)];
+
+ let mut this = Self {
+ rl,
+ prompt: String::new(),
+ catalog,
+ selected: HashMap::new(),
+ accessor: archive,
+ position,
+ };
+ this.update_prompt();
+ Ok(this)
+ }
+
+ async fn with<'a, Fut, R, F>(call: F) -> Result<R, Error>
where
- F: FnOnce(&mut Context) -> Result<T, Error>,
+ F: FnOnce(&'a mut Shell) -> Fut,
+ Fut: Future<Output = Result<R, Error>>,
+ F: 'a,
+ Fut: 'a,
+ R: 'static,
{
- CONTEXT.with(|cell| {
- let mut ctx = cell.borrow_mut();
- call(&mut ctx.as_mut().unwrap())
- })
+ let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
+ let result = call(&mut *shell).await;
+ result
}
- /// Generate CString from provided stack of `DirEntry`s.
- fn generate_cstring(dir_stack: &[DirEntry]) -> Result<CString, Error> {
- let mut path = vec![b'/'];
- // Skip the archive root, the '/' is displayed for it instead
- for component in dir_stack.iter().skip(1) {
- path.extend_from_slice(&component.name);
- if component.is_directory() {
- path.push(b'/');
+ pub async fn shell(mut self) -> Result<(), Error> {
+ let this = &mut self;
+ unsafe {
+ SHELL = Some(this as *mut Shell as usize);
+ }
+ while let Ok(line) = this.rl.readline(&this.prompt) {
+ let helper = this.rl.helper().unwrap();
+ let args = match cli::shellword_split(&line) {
+ Ok(args) => args,
+ Err(err) => {
+ println!("Error: {}", err);
+ continue;
+ }
+ };
+
+ let _ =
+ cli::handle_command_future(helper.cmd_def(), "", args, cli::CliEnvironment::new())
+ .await;
+ this.rl.add_history_entry(line);
+ this.update_prompt();
+ }
+ Ok(())
+ }
+
+ fn update_prompt(&mut self) {
+ self.prompt = "pxar:".to_string();
+ if self.position.len() <= 1 {
+ self.prompt.push('/');
+ } else {
+ for p in self.position.iter().skip(1) {
+ if !p.catalog.name.starts_with(b"/") {
+ self.prompt.push('/');
+ }
+ match std::str::from_utf8(&p.catalog.name) {
+ Ok(entry) => self.prompt.push_str(entry),
+ Err(_) => self.prompt.push_str("<non-utf8-dir>"),
+ }
}
}
- Ok(unsafe { CString::from_vec_unchecked(path) })
+ self.prompt.push_str(" > ");
+ }
+
+ async fn pwd(&mut self) -> Result<(), Error> {
+ let stack = Self::lookup(
+ &self.position,
+ &mut self.catalog,
+ &self.accessor,
+ None,
+ &mut Some(0),
+ )
+ .await?;
+ let path = Self::format_path_stack(&stack);
+ println!("{:?}", path);
+ Ok(())
}
- /// Generate the CString to display by readline based on
- /// PROMPT_PREFIX, PROMPT and the current working directory.
- fn generate_prompt(&self) -> Result<String, Error> {
- let prompt = format!(
- "{}{} {} ",
- PROMPT_PREFIX,
- self.path.generate_cstring()?.to_string_lossy(),
- PROMPT,
- );
- Ok(prompt)
+ fn new_path_stack(&self) -> Vec<PathStackEntry> {
+ self.position[..1].to_vec()
}
-}
-/// A valid path in the catalog starting from root.
-///
-/// Symlinks are stored by pushing the symlink entry and the target entry onto
-/// the stack. Allows to resolve all symlink in order to generate a canonical
-/// path needed for reading from the archive.
-#[derive(Clone)]
-struct CatalogPathStack {
- stack: Vec<DirEntry>,
- root: DirEntry,
-}
+ async fn resolve_symlink(
+ stack: &mut Vec<PathStackEntry>,
+ catalog: &mut CatalogReader,
+ accessor: &Accessor,
+ follow_symlinks: &mut Option<usize>,
+ ) -> Result<(), Error> {
+ if let Some(ref mut symlink_count) = follow_symlinks {
+ *symlink_count += 1;
+ if *symlink_count > MAX_SYMLINK_COUNT {
+ bail!("too many levels of symbolic links");
+ }
-impl CatalogPathStack {
- /// Create a new stack with given root entry.
- fn new(root: DirEntry) -> Self {
- Self {
- stack: Vec::new(),
- root,
+ let file = Self::walk_pxar_archive(accessor, &mut stack[..]).await?;
+
+ let path = match file.entry().kind() {
+ EntryKind::Symlink(symlink) => Path::new(symlink.as_os_str()),
+ _ => bail!("symlink in the catalog was not a symlink in the archive"),
+ };
+
+ let new_stack =
+ Self::lookup(&stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
+
+ *stack = new_stack;
+
+ Ok(())
+ } else {
+ bail!("target is a symlink");
}
}
- /// Get a clone of the root directories entry.
- fn root(&self) -> DirEntry {
- self.root.clone()
+ /// Walk a path and add it to the path stack.
+ ///
+ /// If the symlink count is used, symlinks will be followed, until we hit the cap and error
+ /// out.
+ async fn step(
+ stack: &mut Vec<PathStackEntry>,
+ catalog: &mut CatalogReader,
+ accessor: &Accessor,
+ component: std::path::Component<'_>,
+ follow_symlinks: &mut Option<usize>,
+ ) -> Result<(), Error> {
+ use std::path::Component;
+ match component {
+ Component::Prefix(_) => bail!("invalid path component (prefix)"),
+ Component::RootDir => stack.truncate(1),
+ Component::CurDir => {
+ if stack.last().unwrap().catalog.is_symlink() {
+ Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
+ }
+ }
+ Component::ParentDir => drop(stack.pop()),
+ Component::Normal(entry) => {
+ if stack.last().unwrap().catalog.is_symlink() {
+ Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
+ }
+ match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
+ Some(dir) => stack.push(PathStackEntry::new(dir)),
+ None => bail!("no such file or directory: {:?}", entry),
+ }
+ }
+ }
+
+ Ok(())
}
- /// Remove all entries from the stack.
- ///
- /// This equals to being at the root directory.
- fn clear(&mut self) {
- self.stack.clear();
+ fn step_nofollow(
+ stack: &mut Vec<PathStackEntry>,
+ catalog: &mut CatalogReader,
+ component: std::path::Component<'_>,
+ ) -> Result<(), Error> {
+ use std::path::Component;
+ match component {
+ Component::Prefix(_) => bail!("invalid path component (prefix)"),
+ Component::RootDir => stack.truncate(1),
+ Component::CurDir => {
+ if stack.last().unwrap().catalog.is_symlink() {
+ bail!("target is a symlink");
+ }
+ }
+ Component::ParentDir => drop(stack.pop()),
+ Component::Normal(entry) => {
+ if stack.last().unwrap().catalog.is_symlink() {
+ bail!("target is a symlink");
+ } else {
+ match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
+ Some(dir) => stack.push(PathStackEntry::new(dir)),
+ None => bail!("no such file or directory: {:?}", entry),
+ }
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// The pxar accessor is required to resolve symbolic links
+ async fn walk_catalog(
+ stack: &mut Vec<PathStackEntry>,
+ catalog: &mut CatalogReader,
+ accessor: &Accessor,
+ path: &Path,
+ follow_symlinks: &mut Option<usize>,
+ ) -> Result<(), Error> {
+ for c in path.components() {
+ Self::step(stack, catalog, accessor, c, follow_symlinks).await?;
+ }
+ Ok(())
+ }
+
+ /// Non-async version cannot follow symlinks.
+ fn walk_catalog_nofollow(
+ stack: &mut Vec<PathStackEntry>,
+ catalog: &mut CatalogReader,
+ path: &Path,
+ ) -> Result<(), Error> {
+ for c in path.components() {
+ Self::step_nofollow(stack, catalog, c)?;
+ }
+ Ok(())
+ }
+
+ /// This assumes that there are no more symlinks in the path stack.
+ async fn walk_pxar_archive(
+ accessor: &Accessor,
+ mut stack: &mut [PathStackEntry],
+ ) -> Result<FileEntry, Error> {
+ if stack[0].pxar.is_none() {
+ stack[0].pxar = Some(accessor.open_root().await?.lookup_self().await?);
+ }
+
+ // Now walk the directory stack:
+ let mut at = 1;
+ while at < stack.len() {
+ if stack[at].pxar.is_some() {
+ at += 1;
+ continue;
+ }
+
+ let parent = stack[at - 1].pxar.as_ref().unwrap();
+ let dir = parent.enter_directory().await?;
+ let name = Path::new(OsStr::from_bytes(&stack[at].catalog.name));
+ stack[at].pxar = Some(
+ dir.lookup(name)
+ .await?
+ .ok_or_else(|| format_err!("no such entry in pxar file: {:?}", name))?,
+ );
+
+ at += 1;
+ }
+
+ Ok(stack.last().unwrap().pxar.clone().unwrap())
+ }
+
+ fn complete_path(&mut self, input: &str) -> Result<Vec<String>, Error> {
+ let mut tmp_stack;
+ let (parent, base, part) = match input.rfind('/') {
+ Some(ind) => {
+ let (base, part) = input.split_at(ind + 1);
+ let path = PathBuf::from(base);
+ if path.is_absolute() {
+ tmp_stack = self.new_path_stack();
+ } else {
+ tmp_stack = self.position.clone();
+ }
+ Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
+ (&tmp_stack.last().unwrap().catalog, base, part)
+ }
+ None => (&self.position.last().unwrap().catalog, "", input),
+ };
+
+ let entries = self.catalog.read_dir(parent)?;
+
+ let mut out = Vec::new();
+ for entry in entries {
+ let mut name = base.to_string();
+ if entry.name.starts_with(part.as_bytes()) {
+ name.push_str(std::str::from_utf8(&entry.name)?);
+ if entry.is_directory() {
+ name.push('/');
+ }
+ out.push(name);
+ }
+ }
+
+ Ok(out)
+ }
+
+ // Break async recursion here: lookup -> walk_catalog -> step -> lookup
+ fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
+ stack: &'s [PathStackEntry],
+ catalog: &'c mut CatalogReader,
+ accessor: &'a Accessor,
+ path: Option<&'p Path>,
+ follow_symlinks: &'y mut Option<usize>,
+ ) -> Pin<Box<dyn Future<Output = Result<Vec<PathStackEntry>, Error>> + Send + 'future>>
+ where
+ 's: 'future,
+ 'c: 'future,
+ 'a: 'future,
+ 'p: 'future,
+ 'y: 'future,
+ {
+ Box::pin(async move {
+ Ok(match path {
+ None => stack.to_vec(),
+ Some(path) => {
+ let mut stack = if path.is_absolute() {
+ stack[..1].to_vec()
+ } else {
+ stack.to_vec()
+ };
+ Self::walk_catalog(&mut stack, catalog, accessor, path, follow_symlinks)
+ .await?;
+ stack
+ }
+ })
+ })
+ }
+
+ async fn ls(&mut self, path: Option<&Path>) -> Result<(), Error> {
+ let stack = Self::lookup(
+ &self.position,
+ &mut self.catalog,
+ &self.accessor,
+ path,
+ &mut Some(0),
+ )
+ .await?;
+
+ let last = stack.last().unwrap();
+ if last.catalog.is_directory() {
+ let items = self.catalog.read_dir(&stack.last().unwrap().catalog)?;
+ let mut out = std::io::stdout();
+ // FIXME: columnize
+ for item in items {
+ out.write_all(&item.name)?;
+ out.write_all(b"\n")?;
+ }
+ } else {
+ let mut out = std::io::stdout();
+ out.write_all(&last.catalog.name)?;
+ out.write_all(b"\n")?;
+ }
+ Ok(())
}
- /// Get a reference to the last entry on the stack.
- fn last(&self) -> &DirEntry {
- self.stack.last().unwrap_or(&self.root)
+ async fn stat(&mut self, path: PathBuf) -> Result<(), Error> {
+ let mut stack = Self::lookup(
+ &self.position,
+ &mut self.catalog,
+ &self.accessor,
+ Some(&path),
+ &mut Some(0),
+ )
+ .await?;
+
+ let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
+ std::io::stdout()
+ .write_all(crate::pxar::format_multi_line_entry(file.entry()).as_bytes())?;
+ Ok(())
+ }
+
+ async fn cd(&mut self, path: Option<&Path>) -> Result<(), Error> {
+ match path {
+ Some(path) => {
+ let new_position = Self::lookup(
+ &self.position,
+ &mut self.catalog,
+ &self.accessor,
+ Some(path),
+ &mut None,
+ )
+ .await?;
+ if !new_position.last().unwrap().catalog.is_directory() {
+ bail!("not a directory");
+ }
+ self.position = new_position;
+ }
+ None => self.position.truncate(1),
+ }
+ self.update_prompt();
+ Ok(())
+ }
+
+ /// This stack must have been canonicalized already!
+ fn format_path_stack(stack: &[PathStackEntry]) -> OsString {
+ if stack.len() <= 1 {
+ return OsString::from("/");
+ }
+
+ let mut out = OsString::new();
+ for c in stack.iter().skip(1) {
+ out.push("/");
+ out.push(OsStr::from_bytes(&c.catalog.name));
+ }
+
+ out
+ }
+
+ async fn select(&mut self, path: PathBuf) -> Result<(), Error> {
+ let stack = Self::lookup(
+ &self.position,
+ &mut self.catalog,
+ &self.accessor,
+ Some(&path),
+ &mut Some(0),
+ )
+ .await?;
+
+ let path = Self::format_path_stack(&stack);
+ let entry = MatchEntry::include(MatchPattern::Literal(path.as_bytes().to_vec()));
+ if self.selected.insert(path.clone(), entry).is_some() {
+ println!("path already selected: {:?}", path);
+ } else {
+ println!("added path: {:?}", path);
+ }
+
+ Ok(())
}
- /// Check if the last entry is a symlink.
- fn last_is_symlink(&self) -> bool {
- self.last().is_symlink()
+ async fn deselect(&mut self, path: PathBuf) -> Result<(), Error> {
+ let stack = Self::lookup(
+ &self.position,
+ &mut self.catalog,
+ &self.accessor,
+ Some(&path),
+ &mut Some(0),
+ )
+ .await?;
+
+ let path = Self::format_path_stack(&stack);
+
+ if self.selected.remove(&path).is_some() {
+ println!("removed path from selection: {:?}", path);
+ } else {
+ println!("path not selected: {:?}", path);
+ }
+
+ Ok(())
}
- /// Check if the last entry is a directory.
- fn last_is_directory(&self) -> bool {
- self.last().is_directory()
+ async fn deselect_all(&mut self) -> Result<(), Error> {
+ self.selected.clear();
+ println!("cleared selection");
+ Ok(())
}
- /// Remove a component, if it was a symlink target,
- /// this removes also the symlink entry.
- fn pop(&mut self) -> Option<DirEntry> {
- let entry = self.stack.pop()?;
- if self.last_is_symlink() {
- self.stack.pop()
+ async fn list_selected(&mut self, patterns: bool) -> Result<(), Error> {
+ if patterns {
+ self.list_selected_patterns().await
} else {
- Some(entry)
+ self.list_matching_files().await
}
}
- /// Add a component to the stack.
- fn push(&mut self, entry: DirEntry) {
- self.stack.push(entry)
+ async fn list_selected_patterns(&self) -> Result<(), Error> {
+ for entry in self.selected.keys() {
+ println!("{:?}", entry);
+ }
+ Ok(())
}
- /// Check if pushing the given entry onto the CatalogPathStack would create a
- /// loop by checking if the same entry is already present.
- fn creates_loop(&self, entry: &DirEntry) -> bool {
- self.stack.iter().any(|comp| comp.eq(entry))
+ fn build_match_list(&self) -> Vec<MatchEntry> {
+ let mut list = Vec::with_capacity(self.selected.len());
+ for entry in self.selected.values() {
+ list.push(entry.clone());
+ }
+ list
}
- /// Starting from this path, traverse the catalog by the provided `path`.
- fn traverse(
+ async fn list_matching_files(&mut self) -> Result<(), Error> {
+ let matches = self.build_match_list();
+
+ self.catalog.find(
+ &self.position[0].catalog,
+ &mut Vec::new(),
+ &matches,
+ &mut |path: &[u8]| -> Result<(), Error> {
+ let mut out = std::io::stdout();
+ out.write_all(path)?;
+ out.write_all(b"\n")?;
+ Ok(())
+ },
+ )?;
+
+ Ok(())
+ }
+
+ async fn find(&mut self, pattern: String, select: bool) -> Result<(), Error> {
+ let pattern_os = OsString::from(pattern.clone());
+ let pattern_entry =
+ MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
+
+ let mut found_some = false;
+ self.catalog.find(
+ &self.position[0].catalog,
+ &mut Vec::new(),
+ &[&pattern_entry],
+ &mut |path: &[u8]| -> Result<(), Error> {
+ found_some = true;
+ let mut out = std::io::stdout();
+ out.write_all(path)?;
+ out.write_all(b"\n")?;
+ Ok(())
+ },
+ )?;
+
+ if found_some && select {
+ self.selected.insert(pattern_os, pattern_entry);
+ }
+
+ Ok(())
+ }
+
+ async fn restore_selected(&mut self, destination: PathBuf) -> Result<(), Error> {
+ if self.selected.is_empty() {
+ bail!("no entries selected");
+ }
+
+ let match_list = self.build_match_list();
+
+ self.restore_with_match_list(destination, &match_list).await
+ }
+
+ async fn restore(
&mut self,
- path: &PathBuf,
- mut decoder: &mut Decoder,
- mut catalog: &mut CatalogReader<std::fs::File>,
- follow_final: bool,
+ destination: PathBuf,
+ pattern: Option<String>,
) -> Result<(), Error> {
- for component in path.components() {
- match component {
- Component::RootDir => self.clear(),
- Component::CurDir => continue,
- Component::ParentDir => { self.pop(); }
- Component::Normal(comp) => {
- let entry = catalog.lookup(self.last(), comp.as_bytes())?;
- if self.creates_loop(&entry) {
- bail!("loop detected, will not follow");
- }
- self.push(entry);
- if self.last_is_symlink() && follow_final {
- let mut canonical = self.canonical(&mut decoder, &mut catalog, follow_final)?;
- let target = canonical.pop().unwrap();
- self.push(target);
- }
- }
- Component::Prefix(_) => bail!("encountered prefix component. Non unix systems not supported."),
+ let tmp;
+ let match_list: &[MatchEntry] = match pattern {
+ None => &[],
+ Some(pattern) => {
+ tmp = [MatchEntry::parse_pattern(
+ pattern,
+ PatternFlag::PATH_NAME,
+ MatchType::Include,
+ )?];
+ &tmp
+ }
+ };
+
+ self.restore_with_match_list(destination, match_list).await
+ }
+
+ async fn restore_with_match_list(
+ &mut self,
+ destination: PathBuf,
+ match_list: &[MatchEntry],
+ ) -> Result<(), Error> {
+ create_path(
+ &destination,
+ None,
+ Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
+ )
+ .map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
+
+ let rootdir = Dir::open(
+ &destination,
+ OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
+ Mode::empty(),
+ )
+ .map_err(|err| {
+ format_err!("unable to open target directory {:?}: {}", destination, err,)
+ })?;
+
+ let mut dir_stack = self.new_path_stack();
+ Self::walk_pxar_archive(&self.accessor, &mut dir_stack).await?;
+ let root_meta = dir_stack
+ .last()
+ .unwrap()
+ .pxar
+ .as_ref()
+ .unwrap()
+ .entry()
+ .metadata()
+ .clone();
+ let pxar_dir_stack = PxarDirStack::new(rootdir, root_meta);
+
+ let mut extractor = ExtractorState::new(
+ flags::DEFAULT,
+ &mut self.catalog,
+ dir_stack,
+ pxar_dir_stack,
+ &match_list,
+ &self.accessor,
+ )?;
+
+ extractor.extract().await
+ }
+}
+
+enum LoopState {
+ Break,
+ Continue,
+}
+
+struct ExtractorState<'a> {
+ path: Vec<u8>,
+ path_len: usize,
+ path_len_stack: Vec<usize>,
+
+ dir_stack: Vec<PathStackEntry>,
+
+ matches: bool,
+ matches_stack: Vec<bool>,
+
+ read_dir: <Vec<catalog::DirEntry> as IntoIterator>::IntoIter,
+ read_dir_stack: Vec<<Vec<catalog::DirEntry> as IntoIterator>::IntoIter>,
+
+ pxar_dir_stack: PxarDirStack,
+
+ catalog: &'a mut CatalogReader,
+ feature_flags: u64,
+ match_list: &'a [MatchEntry],
+ accessor: &'a Accessor,
+}
+
+impl<'a> ExtractorState<'a> {
+ pub fn new(
+ feature_flags: u64,
+ catalog: &'a mut CatalogReader,
+ dir_stack: Vec<PathStackEntry>,
+ pxar_dir_stack: PxarDirStack,
+ match_list: &'a [MatchEntry],
+ accessor: &'a Accessor,
+ ) -> Result<Self, Error> {
+ let read_dir = catalog
+ .read_dir(&dir_stack.last().unwrap().catalog)?
+ .into_iter();
+ Ok(Self {
+ path: Vec::new(),
+ path_len: 0,
+ path_len_stack: Vec::new(),
+
+ dir_stack,
+
+ matches: match_list.is_empty(),
+ matches_stack: Vec::new(),
+
+ read_dir,
+ read_dir_stack: Vec::new(),
+
+ pxar_dir_stack,
+
+ catalog,
+ feature_flags,
+ match_list,
+ accessor,
+ })
+ }
+
+ pub async fn extract(&mut self) -> Result<(), Error> {
+ loop {
+ let entry = match self.read_dir.next() {
+ Some(entry) => entry,
+ None => match self.handle_end_of_directory()? {
+ LoopState::Break => break, // done with root directory
+ LoopState::Continue => continue,
+ },
+ };
+
+ self.path.truncate(self.path_len);
+ if !entry.name.starts_with(b"/") {
+ self.path.reserve(entry.name.len() + 1);
+ self.path.push(b'/');
}
+ self.path.extend(&entry.name);
+
+ self.handle_entry(entry).await?;
}
- if path.as_os_str().as_bytes().ends_with(b"/") && !self.last_is_directory() {
- bail!("entry is not a directory");
+
+ Ok(())
+ }
+
+ fn handle_end_of_directory(&mut self) -> Result<LoopState, Error> {
+ // go up a directory:
+ self.read_dir = match self.read_dir_stack.pop() {
+ Some(r) => r,
+ None => return Ok(LoopState::Break), // out of root directory
+ };
+
+ self.matches = self
+ .matches_stack
+ .pop()
+ .ok_or_else(|| format_err!("internal iterator error (matches_stack)"))?;
+
+ self.dir_stack
+ .pop()
+ .ok_or_else(|| format_err!("internal iterator error (dir_stack)"))?;
+
+ let dir = self
+ .pxar_dir_stack
+ .pop()?
+ .ok_or_else(|| format_err!("internal iterator error (pxar_dir_stack)"))?;
+
+ self.path_len = self
+ .path_len_stack
+ .pop()
+ .ok_or_else(|| format_err!("internal iterator error (path_len_stack)"))?;
+
+ self.path.push(0);
+ let dirname = CStr::from_bytes_with_nul(&self.path[(self.path_len + 1)..])?;
+
+ if let Some(fd) = dir.try_as_raw_fd() {
+ // the directory was created, so apply the metadata:
+ metadata::apply(self.feature_flags, dir.metadata(), fd, dirname)?;
}
+
+ Ok(LoopState::Continue)
+ }
+
+ async fn handle_new_directory(
+ &mut self,
+ entry: catalog::DirEntry,
+ match_result: Option<MatchType>,
+ ) -> Result<(), Error> {
+ // enter a new directory:
+ self.read_dir_stack.push(mem::replace(
+ &mut self.read_dir,
+ self.catalog.read_dir(&entry)?.into_iter(),
+ ));
+ self.matches_stack.push(self.matches);
+ self.dir_stack.push(PathStackEntry::new(entry));
+ self.path_len_stack.push(self.path_len);
+ self.path_len = self.path.len();
+
+ Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
+ let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
+ let dir_meta = dir_pxar.entry().metadata().clone();
+ self.pxar_dir_stack
+ .push(dir_pxar.file_name().to_os_string(), dir_meta)?;
+
+ if self.matches && match_result != Some(MatchType::Exclude) {
+ todo!("create this directory");
+ }
+
Ok(())
}
- /// Create a canonical version of this path with symlinks resolved.
- ///
- /// If resolve final is true, follow also an eventual symlink of the last
- /// path component.
- fn canonical(
- &self,
- mut decoder: &mut Decoder,
- mut catalog: &mut CatalogReader<std::fs::File>,
- resolve_final: bool,
- ) -> Result<Self, Error> {
- let mut canonical = CatalogPathStack::new(self.root.clone());
- let mut iter = self.stack.iter().enumerate();
- while let Some((index, component)) = iter.next() {
- if component.is_directory() {
- canonical.push(component.clone());
- } else if component.is_symlink() {
- canonical.push(component.clone());
- if index != self.stack.len() - 1 || resolve_final {
- // Get the symlink target by traversing the canonical path
- // in the archive up to the symlink.
- let archive_entry = canonical.lookup(&mut decoder)?;
- canonical.pop();
- // Resolving target means also ignoring the target in the iterator, so get it.
- iter.next();
- let target = archive_entry.target
- .ok_or_else(|| format_err!("expected entry with symlink target."))?;
- canonical.traverse(&target, &mut decoder, &mut catalog, resolve_final)?;
+ pub async fn handle_entry(&mut self, entry: catalog::DirEntry) -> Result<(), Error> {
+ let match_result = self.match_list.matches(&self.path, entry.get_file_mode());
+ let did_match = match match_result {
+ Some(MatchType::Include) => true,
+ Some(MatchType::Exclude) => false,
+ None => self.matches,
+ };
+
+ match (did_match, &entry.attr) {
+ (_, DirEntryAttribute::Directory { .. }) => {
+ self.handle_new_directory(entry, match_result).await?;
+ }
+ (true, DirEntryAttribute::File { .. }) => {
+ self.dir_stack.push(PathStackEntry::new(entry));
+ let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
+ self.extract_file(file).await?;
+ self.dir_stack.pop();
+ }
+ (true, DirEntryAttribute::Symlink)
+ | (true, DirEntryAttribute::BlockDevice)
+ | (true, DirEntryAttribute::CharDevice)
+ | (true, DirEntryAttribute::Fifo)
+ | (true, DirEntryAttribute::Socket)
+ | (true, DirEntryAttribute::Hardlink) => {
+ let attr = entry.attr.clone();
+ self.dir_stack.push(PathStackEntry::new(entry));
+ let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
+ self.extract_special(file, attr).await?;
+ self.dir_stack.pop();
+ }
+ (false, _) => (), // skip
+ }
+
+ Ok(())
+ }
+
+ fn path(&self) -> &OsStr {
+ OsStr::from_bytes(&self.path)
+ }
+
+ async fn extract_file(&mut self, entry: FileEntry) -> Result<(), Error> {
+ match entry.kind() {
+ pxar::EntryKind::File { size, .. } => {
+ let mut contents = entry.contents().await?;
+
+ let parent = self.pxar_dir_stack.last_dir_fd(true)?;
+ let mut file = tokio::fs::File::from_std(unsafe {
+ std::fs::File::from_raw_fd(nix::fcntl::openat(
+ parent,
+ entry.file_name(),
+ OFlag::O_CREAT | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
+ Mode::from_bits(0o600).unwrap(),
+ )?)
+ });
+
+ let extracted = tokio::io::copy(&mut contents, &mut file).await?;
+ if *size != extracted {
+ bail!("extracted {} bytes of a file of {} bytes", extracted, size);
}
- } else if index != self.stack.len() - 1 {
- bail!("intermitten node is not symlink nor directory");
- } else {
- canonical.push(component.clone());
+
+ metadata::apply_with_path(
+ flags::DEFAULT,
+ entry.metadata(),
+ file.as_raw_fd(),
+ entry.file_name(),
+ )?;
+
+ Ok(())
+ }
+ _ => {
+ bail!(
+ "catalog file {:?} not a regular file in the archive",
+ self.path()
+ );
}
}
- Ok(canonical)
}
- /// Lookup this path in the archive using the provided decoder.
- fn lookup(&self, decoder: &mut Decoder) -> Result<DirectoryEntry, Error> {
- let mut current = decoder.root()?;
- for component in self.stack.iter() {
- match decoder.lookup(¤t, &OsStr::from_bytes(&component.name))? {
- Some(item) => current = item,
- // This should not happen if catalog an archive are consistent.
- None => bail!("no such file or directory in archive - inconsistent catalog"),
+ async fn extract_special(
+ &mut self,
+ entry: FileEntry,
+ catalog_attr: DirEntryAttribute,
+ ) -> Result<(), Error> {
+ match (catalog_attr, entry.kind()) {
+ (DirEntryAttribute::Symlink, pxar::EntryKind::Symlink(symlink)) => {
+ self.extract_symlink(entry.file_name(), symlink.as_os_str(), entry.metadata())
+ }
+ (DirEntryAttribute::Symlink, _) => {
+ bail!(
+ "catalog symlink {:?} not a symlink in the archive",
+ self.path()
+ );
+ }
+
+ (DirEntryAttribute::Hardlink, pxar::EntryKind::Hardlink(hardlink)) => {
+ self.extract_hardlink(entry.file_name(), hardlink.as_os_str(), entry.metadata())
+ }
+ (DirEntryAttribute::Hardlink, _) => {
+ bail!(
+ "catalog hardlink {:?} not a hardlink in the archive",
+ self.path()
+ );
}
+
+ (ref attr, pxar::EntryKind::Device(device)) => {
+ self.extract_device(attr.clone(), entry.file_name(), device, entry.metadata())
+ }
+
+ (DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => {
+ self.extract_node(entry.file_name(), 0, entry.metadata())
+ }
+ (DirEntryAttribute::Fifo, _) => {
+ bail!("catalog fifo {:?} not a fifo in the archive", self.path());
+ }
+
+ (DirEntryAttribute::Socket, pxar::EntryKind::Socket) => {
+ self.extract_node(entry.file_name(), 0, entry.metadata())
+ }
+ (DirEntryAttribute::Socket, _) => {
+ bail!(
+ "catalog socket {:?} not a socket in the archive",
+ self.path()
+ );
+ }
+
+ attr => bail!("unhandled file type {:?} for {:?}", attr, self.path()),
}
- Ok(current)
}
- /// Generate a CString from this.
- fn generate_cstring(&self) -> Result<CString, Error> {
- let mut path = vec![b'/'];
- let mut iter = self.stack.iter().enumerate();
- while let Some((index, component)) = iter.next() {
- if component.is_symlink() && index != self.stack.len() - 1 {
- let (_, next) = iter.next()
- .ok_or_else(|| format_err!("unresolved symlink encountered"))?;
- // Display the name of the link, not the target
- path.extend_from_slice(&component.name);
- if next.is_directory() {
- path.push(b'/');
+ fn extract_symlink(
+ &mut self,
+ file_name: &OsStr,
+ target: &OsStr,
+ metadata: &Metadata,
+ ) -> Result<(), Error> {
+ let parent = self.pxar_dir_stack.last_dir_fd(true)?;
+ nix::unistd::symlinkat(target, Some(parent), file_name)?;
+
+ metadata::apply_at(
+ self.feature_flags,
+ metadata,
+ parent,
+ &CString::new(file_name.as_bytes())?,
+ )?;
+
+ Ok(())
+ }
+
+ fn extract_hardlink(
+ &mut self,
+ file_name: &OsStr,
+ target: &OsStr,
+ _metadata: &Metadata,
+ ) -> Result<(), Error> {
+ crate::pxar::tools::assert_relative_path(target)?;
+
+ let parent = self.pxar_dir_stack.last_dir_fd(true)?;
+ let root = self.pxar_dir_stack.root_dir_fd()?;
+ nix::unistd::linkat(
+ Some(root),
+ target,
+ Some(parent),
+ file_name,
+ nix::unistd::LinkatFlags::NoSymlinkFollow,
+ )?;
+
+ Ok(())
+ }
+
+ fn extract_device(
+ &mut self,
+ attr: DirEntryAttribute,
+ file_name: &OsStr,
+ device: &pxar::format::Device,
+ metadata: &Metadata,
+ ) -> Result<(), Error> {
+ match attr {
+ DirEntryAttribute::BlockDevice => {
+ if !metadata.stat.is_blockdev() {
+ bail!(
+ "catalog block device {:?} is not a block device in the archive",
+ self.path(),
+ );
}
- } else {
- path.extend_from_slice(&component.name);
- if component.is_directory() {
- path.push(b'/');
+ }
+ DirEntryAttribute::CharDevice => {
+ if !metadata.stat.is_chardev() {
+ bail!(
+ "catalog character device {:?} is not a character device in the archive",
+ self.path(),
+ );
}
}
+ _ => {
+ bail!(
+ "unexpected file type for {:?} in the catalog, \
+ which is a device special file in the archive",
+ self.path(),
+ );
+ }
}
- Ok(unsafe { CString::from_vec_unchecked(path) })
+ self.extract_node(file_name, device.to_dev_t(), metadata)
+ }
+
+ fn extract_node(
+ &mut self,
+ file_name: &OsStr,
+ device: libc::dev_t,
+ metadata: &Metadata,
+ ) -> Result<(), Error> {
+ let mode = metadata.stat.mode;
+ let mode = u32::try_from(mode).map_err(|_| {
+ format_err!(
+ "device node's mode contains illegal bits: 0x{:x} (0o{:o})",
+ mode,
+ mode,
+ )
+ })?;
+
+ let parent = self.pxar_dir_stack.last_dir_fd(true)?;
+ let file_name = CString::new(file_name.as_bytes())?;
+ unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
+ .map_err(|err| format_err!("failed to create device node: {}", err))?;
+
+ metadata::apply_at(self.feature_flags, metadata, parent, &file_name)
}
}
use std::convert::TryInto;
use std::fs::File;
use std::io::{BufWriter, Seek, SeekFrom, Write};
+use std::ops::Range;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use super::chunk_stat::ChunkStat;
use super::chunk_store::ChunkStore;
+use super::index::ChunkReadInfo;
use super::read_chunk::ReadChunk;
use super::Chunker;
use super::IndexFile;
}
#[allow(clippy::cast_ptr_alignment)]
- pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
+ pub fn chunk_info(&self, pos: usize) -> Result<ChunkReadInfo, Error> {
if pos >= self.index_entries {
bail!("chunk index out of range");
}
);
}
- Ok((start, end, unsafe { digest.assume_init() }))
+ Ok(ChunkReadInfo {
+ range: start..end,
+ digest: unsafe { digest.assume_init() },
+ })
}
#[inline]
}
}
+struct CachedChunk {
+ range: Range<u64>,
+ data: Vec<u8>,
+}
+
+impl CachedChunk {
+ /// Perform sanity checks on the range and data size:
+ pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
+ if data.len() as u64 != range.end - range.start {
+ bail!(
+ "read chunk with wrong size ({} != {})",
+ data.len(),
+ range.end - range.start,
+ );
+ }
+ Ok(Self { range, data })
+ }
+}
+
pub struct BufferedDynamicReader<S> {
store: S,
index: DynamicIndexReader,
buffered_chunk_idx: usize,
buffered_chunk_start: u64,
read_offset: u64,
- lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
+ lru_cache: crate::tools::lru_cache::LruCache<usize, CachedChunk>,
}
struct ChunkCacher<'a, S> {
index: &'a DynamicIndexReader,
}
-impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
- fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
- let (start, end, digest) = self.index.chunk_info(index)?;
- self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
+impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
+ fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
+ let info = self.index.chunk_info(index)?;
+ let range = info.range;
+ let data = self.store.read_chunk(&info.digest)?;
+ CachedChunk::new(range, data).map(Some)
}
}
}
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
- let (start, end, data) = self.lru_cache.access(
+ //let (start, end, data) = self.lru_cache.access(
+ let cached_chunk = self.lru_cache.access(
idx,
&mut ChunkCacher {
store: &mut self.store,
},
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
- if (*end - *start) != data.len() as u64 {
- bail!(
- "read chunk with wrong size ({} != {}",
- (*end - *start),
- data.len()
- );
- }
-
// fixme: avoid copy
self.read_buffer.clear();
- self.read_buffer.extend_from_slice(&data);
+ self.read_buffer.extend_from_slice(&cached_chunk.data);
self.buffered_chunk_idx = idx;
- self.buffered_chunk_start = *start;
+ self.buffered_chunk_start = cached_chunk.range.start;
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
Ok(())
}
use std::collections::HashMap;
+use std::ops::Range;
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use futures::*;
+pub struct ChunkReadInfo {
+ pub range: Range<u64>,
+ pub digest: [u8; 32],
+}
+
+impl ChunkReadInfo {
+ #[inline]
+ pub fn size(&self) -> u64 {
+ self.range.end - self.range.start
+ }
+}
+
/// Trait to get digest list from index files
///
/// To allow easy iteration over all used chunks.
-use anyhow::{bail, format_err, Error};
-use nix::unistd::{fork, ForkResult, pipe};
-use std::os::unix::io::RawFd;
-use chrono::{Local, DateTime, Utc, TimeZone};
-use std::path::{Path, PathBuf};
use std::collections::{HashSet, HashMap};
use std::ffi::OsStr;
-use std::io::{Write, Seek, SeekFrom};
+use std::io::{self, Write, Seek, SeekFrom};
use std::os::unix::fs::OpenOptionsExt;
+use std::os::unix::io::RawFd;
+use std::path::{Path, PathBuf};
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+use std::task::{Context, Poll};
+
+use anyhow::{bail, format_err, Error};
+use chrono::{Local, DateTime, Utc, TimeZone};
+use futures::future::FutureExt;
+use futures::select;
+use futures::stream::{StreamExt, TryStreamExt};
+use nix::unistd::{fork, ForkResult, pipe};
+use serde_json::{json, Value};
+use tokio::signal::unix::{signal, SignalKind};
+use tokio::sync::mpsc;
+use xdg::BaseDirectories;
+use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use proxmox::{sortable, identity};
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
use proxmox::sys::linux::tty;
use proxmox_backup::api2::types::*;
use proxmox_backup::client::*;
use proxmox_backup::backup::*;
-use proxmox_backup::pxar::{ self, catalog::* };
-
-use serde_json::{json, Value};
-//use hyper::Body;
-use std::sync::{Arc, Mutex};
-//use regex::Regex;
-use xdg::BaseDirectories;
-
-use futures::*;
-use tokio::sync::mpsc;
+use proxmox_backup::pxar::catalog::*;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
skip_lost_and_found: bool,
crypt_config: Option<Arc<CryptConfig>>,
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
- exclude_pattern: Vec<pxar::MatchPattern>,
+ exclude_pattern: Vec<MatchEntry>,
entries_max: usize,
) -> Result<BackupStats, Error> {
type: Integer,
description: "Max number of entries to hold in memory.",
optional: true,
- default: pxar::ENCODER_MAX_ENTRIES as isize,
+ default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
},
"verbose": {
type: Boolean,
let include_dev = param["include-dev"].as_array();
- let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
+ let entries_max = param["entries-max"].as_u64()
+ .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
let empty = Vec::new();
- let arg_pattern = param["exclude"].as_array().unwrap_or(&empty);
-
- let mut pattern_list = Vec::with_capacity(arg_pattern.len());
- for s in arg_pattern {
- let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
- let p = pxar::MatchPattern::from_line(l.as_bytes())?
- .ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
- pattern_list.push(p);
+ let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
+
+ let mut exclude_list = Vec::with_capacity(exclude_args.len());
+ for entry in exclude_args {
+ let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
+ exclude_list.push(
+ MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
+ .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
+ );
}
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
skip_lost_and_found,
crypt_config.clone(),
catalog.clone(),
- pattern_list.clone(),
+ exclude_list.clone(),
entries_max as usize,
).await?;
manifest.add_file(target, stats.size, stats.csum)?;
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
if let Some(target) = target {
-
- let feature_flags = pxar::flags::DEFAULT;
- let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
- decoder.set_callback(move |path| {
- if verbose {
- eprintln!("{:?}", path);
- }
- Ok(())
- });
- decoder.set_allow_existing_dirs(allow_existing_dirs);
-
- decoder.restore(Path::new(target), &Vec::new())?;
+ proxmox_backup::pxar::extract_archive(
+ pxar::decoder::Decoder::from_std(reader)?,
+ Path::new(target),
+ &[],
+ proxmox_backup::pxar::flags::DEFAULT,
+ allow_existing_dirs,
+ |path| {
+ if verbose {
+ println!("{:?}", path);
+ }
+ },
+ )
+ .map_err(|err| format_err!("error extracting archive - {}", err))?;
} else {
let mut writer = std::fs::OpenOptions::new()
.write(true)
}
}
+use proxmox_backup::client::RemoteChunkReader;
+/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
+/// async use!
+///
+/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
+/// so that we can properly access it from multiple threads simultaneously while not issuing
+/// duplicate simultaneous reads over http.
+struct BufferedDynamicReadAt {
+ inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
+}
+
+impl BufferedDynamicReadAt {
+ fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
+ Self {
+ inner: Mutex::new(inner),
+ }
+ }
+}
+
+impl pxar::accessor::ReadAt for BufferedDynamicReadAt {
+ fn poll_read_at(
+ self: Pin<&Self>,
+ _cx: &mut Context,
+ buf: &mut [u8],
+ offset: u64,
+ ) -> Poll<io::Result<usize>> {
+ use std::io::Read;
+ tokio::task::block_in_place(move || {
+ let mut reader = self.inner.lock().unwrap();
+ reader.seek(SeekFrom::Start(offset))?;
+ Poll::Ready(Ok(reader.read(buf)?))
+ })
+ }
+}
+
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
let repo = extract_repository_from_value(¶m)?;
let archive_name = tools::required_string_param(¶m, "archive-name")?;
let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
let reader = BufferedDynamicReader::new(index, chunk_reader);
- let decoder = pxar::Decoder::new(reader)?;
+ let archive_size = reader.archive_size();
+ let reader: proxmox_backup::pxar::fuse::Reader =
+ Arc::new(BufferedDynamicReadAt::new(reader));
+ let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
let options = OsStr::new("ro,default_permissions");
- let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
- .map_err(|err| format_err!("pxar mount failed: {}", err))?;
- // Mount the session but not call fuse deamonize as this will cause
- // issues with the runtime after the fork
- let deamonize = false;
- session.mount(&Path::new(target), deamonize)?;
+ let session = proxmox_backup::pxar::fuse::Session::mount(
+ decoder,
+ &options,
+ false,
+ Path::new(target),
+ )
+ .map_err(|err| format_err!("pxar mount failed: {}", err))?;
if let Some(pipe) = pipe {
nix::unistd::chdir(Path::new("/")).unwrap();
nix::unistd::close(pipe).unwrap();
}
- let multithreaded = true;
- session.run_loop(multithreaded)?;
+ let mut interrupt = signal(SignalKind::interrupt())?;
+ select! {
+ res = session.fuse() => res?,
+ _ = interrupt.recv().fuse() => {
+ // exit on interrupted
+ }
+ }
} else {
bail!("unknown archive file extension (expected .pxar)");
}
let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
let reader = BufferedDynamicReader::new(index, chunk_reader);
- let mut decoder = pxar::Decoder::new(reader)?;
- decoder.set_callback(|path| {
- println!("{:?}", path);
- Ok(())
- });
+ let archive_size = reader.archive_size();
+ let reader: proxmox_backup::pxar::fuse::Reader =
+ Arc::new(BufferedDynamicReadAt::new(reader));
+ let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile)
catalog_reader,
&server_archive_name,
decoder,
- )?;
+ ).await?;
println!("Starting interactive shell");
- state.shell()?;
+ state.shell().await?;
record_repository(&repo);
-extern crate proxmox_backup;
-
-use anyhow::{format_err, Error};
-
-use proxmox::{sortable, identity};
-use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
-use proxmox::api::schema::*;
-use proxmox::api::cli::*;
-
-use proxmox_backup::tools;
-
-use serde_json::{Value};
-
-use std::io::Write;
-use std::path::{Path, PathBuf};
-use std::fs::OpenOptions;
+use std::collections::HashSet;
use std::ffi::OsStr;
+use std::fs::OpenOptions;
use std::os::unix::fs::OpenOptionsExt;
-use std::os::unix::io::AsRawFd;
-use std::collections::HashSet;
-
-use proxmox_backup::pxar;
-
-fn dump_archive_from_reader<R: std::io::Read>(
- reader: &mut R,
- feature_flags: u64,
- verbose: bool,
-) -> Result<(), Error> {
- let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
-
- let stdout = std::io::stdout();
- let mut out = stdout.lock();
-
- let mut path = PathBuf::new();
- decoder.dump_entry(&mut path, verbose, &mut out)?;
-
- Ok(())
-}
-
-fn dump_archive(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
+use std::path::{Path, PathBuf};
- let archive = tools::required_string_param(¶m, "archive")?;
- let verbose = param["verbose"].as_bool().unwrap_or(false);
+use anyhow::{format_err, Error};
+use futures::future::FutureExt;
+use futures::select;
+use tokio::signal::unix::{signal, SignalKind};
- let feature_flags = pxar::flags::DEFAULT;
+use pathpatterns::{MatchEntry, MatchType, PatternFlag};
- if archive == "-" {
- let stdin = std::io::stdin();
- let mut reader = stdin.lock();
- dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
- } else {
- if verbose { println!("PXAR dump: {}", archive); }
- let file = std::fs::File::open(archive)?;
- let mut reader = std::io::BufReader::new(file);
- dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
- }
+use proxmox::api::cli::*;
+use proxmox::api::api;
- Ok(Value::Null)
-}
+use proxmox_backup::tools;
+use proxmox_backup::pxar::{flags, fuse, format_single_line_entry, ENCODER_MAX_ENTRIES};
fn extract_archive_from_reader<R: std::io::Read>(
reader: &mut R,
feature_flags: u64,
allow_existing_dirs: bool,
verbose: bool,
- pattern: Option<Vec<pxar::MatchPattern>>
+ match_list: &[MatchEntry],
) -> Result<(), Error> {
- let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
- decoder.set_callback(move |path| {
- if verbose {
- println!("{:?}", path);
- }
- Ok(())
- });
- decoder.set_allow_existing_dirs(allow_existing_dirs);
-
- let pattern = pattern.unwrap_or_else(Vec::new);
- decoder.restore(Path::new(target), &pattern)?;
-
- Ok(())
+ proxmox_backup::pxar::extract_archive(
+ pxar::decoder::Decoder::from_std(reader)?,
+ Path::new(target),
+ &match_list,
+ feature_flags,
+ allow_existing_dirs,
+ |path| {
+ if verbose {
+ println!("{:?}", path);
+ }
+ },
+ )
}
+#[api(
+ input: {
+ properties: {
+ archive: {
+ description: "Archive name.",
+ },
+ pattern: {
+ description: "List of paths or pattern matching files to restore",
+ type: Array,
+ items: {
+ type: String,
+ description: "Path or pattern matching files to restore.",
+ },
+ optional: true,
+ },
+ target: {
+ description: "Target directory",
+ optional: true,
+ },
+ verbose: {
+ description: "Verbose output.",
+ optional: true,
+ default: false,
+ },
+ "no-xattrs": {
+ description: "Ignore extended file attributes.",
+ optional: true,
+ default: false,
+ },
+ "no-fcaps": {
+ description: "Ignore file capabilities.",
+ optional: true,
+ default: false,
+ },
+ "no-acls": {
+ description: "Ignore access control list entries.",
+ optional: true,
+ default: false,
+ },
+ "allow-existing-dirs": {
+ description: "Allows directories to already exist on restore.",
+ optional: true,
+ default: false,
+ },
+ "files-from": {
+ description: "File containing match pattern for files to restore.",
+ optional: true,
+ },
+ "no-device-nodes": {
+ description: "Ignore device nodes.",
+ optional: true,
+ default: false,
+ },
+ "no-fifos": {
+ description: "Ignore fifos.",
+ optional: true,
+ default: false,
+ },
+ "no-sockets": {
+ description: "Ignore sockets.",
+ optional: true,
+ default: false,
+ },
+ },
+ },
+)]
+/// Extract an archive.
fn extract_archive(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let archive = tools::required_string_param(¶m, "archive")?;
- let target = param["target"].as_str().unwrap_or(".");
- let verbose = param["verbose"].as_bool().unwrap_or(false);
- let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
- let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
- let no_acls = param["no-acls"].as_bool().unwrap_or(false);
- let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
- let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
- let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
- let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
- let files_from = param["files-from"].as_str();
- let empty = Vec::new();
- let arg_pattern = param["pattern"].as_array().unwrap_or(&empty);
-
- let mut feature_flags = pxar::flags::DEFAULT;
+ archive: String,
+ pattern: Option<Vec<String>>,
+ target: Option<String>,
+ verbose: bool,
+ no_xattrs: bool,
+ no_fcaps: bool,
+ no_acls: bool,
+ allow_existing_dirs: bool,
+ files_from: Option<String>,
+ no_device_nodes: bool,
+ no_fifos: bool,
+ no_sockets: bool,
+) -> Result<(), Error> {
+ let mut feature_flags = flags::DEFAULT;
if no_xattrs {
- feature_flags ^= pxar::flags::WITH_XATTRS;
+ feature_flags ^= flags::WITH_XATTRS;
}
if no_fcaps {
- feature_flags ^= pxar::flags::WITH_FCAPS;
+ feature_flags ^= flags::WITH_FCAPS;
}
if no_acls {
- feature_flags ^= pxar::flags::WITH_ACL;
+ feature_flags ^= flags::WITH_ACL;
}
if no_device_nodes {
- feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
+ feature_flags ^= flags::WITH_DEVICE_NODES;
}
if no_fifos {
- feature_flags ^= pxar::flags::WITH_FIFOS;
+ feature_flags ^= flags::WITH_FIFOS;
}
if no_sockets {
- feature_flags ^= pxar::flags::WITH_SOCKETS;
+ feature_flags ^= flags::WITH_SOCKETS;
}
- let mut pattern_list = Vec::new();
- if let Some(filename) = files_from {
- let dir = nix::dir::Dir::open("./", nix::fcntl::OFlag::O_RDONLY, nix::sys::stat::Mode::empty())?;
- if let Some((mut pattern, _, _)) = pxar::MatchPattern::from_file(dir.as_raw_fd(), filename)? {
- pattern_list.append(&mut pattern);
+ let pattern = pattern.unwrap_or_else(Vec::new);
+ let target = target.as_ref().map_or_else(|| ".", String::as_str);
+
+ let mut match_list = Vec::new();
+ if let Some(filename) = &files_from {
+ for line in proxmox_backup::tools::file_get_non_comment_lines(filename)? {
+ let line = line
+ .map_err(|err| format_err!("error reading {}: {}", filename, err))?;
+ match_list.push(
+ MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Include)
+ .map_err(|err| format_err!("bad pattern in file '{}': {}", filename, err))?,
+ );
}
}
- for s in arg_pattern {
- let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
- let p = pxar::MatchPattern::from_line(l.as_bytes())?
- .ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
- pattern_list.push(p);
+ for entry in pattern {
+ match_list.push(
+ MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include)
+ .map_err(|err| format_err!("error in pattern: {}", err))?,
+ );
}
- let pattern = if pattern_list.is_empty() {
- None
- } else {
- Some(pattern_list)
- };
-
if archive == "-" {
let stdin = std::io::stdin();
let mut reader = stdin.lock();
- extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
+ extract_archive_from_reader(
+ &mut reader,
+ &target,
+ feature_flags,
+ allow_existing_dirs,
+ verbose,
+ &match_list,
+ )?;
} else {
- if verbose { println!("PXAR extract: {}", archive); }
+ if verbose {
+ println!("PXAR extract: {}", archive);
+ }
let file = std::fs::File::open(archive)?;
let mut reader = std::io::BufReader::new(file);
- extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
+ extract_archive_from_reader(
+ &mut reader,
+ &target,
+ feature_flags,
+ allow_existing_dirs,
+ verbose,
+ &match_list,
+ )?;
}
- Ok(Value::Null)
+ Ok(())
}
+#[api(
+ input: {
+ properties: {
+ archive: {
+ description: "Archive name.",
+ },
+ source: {
+ description: "Source directory.",
+ },
+ verbose: {
+ description: "Verbose output.",
+ optional: true,
+ default: false,
+ },
+ "no-xattrs": {
+ description: "Ignore extended file attributes.",
+ optional: true,
+ default: false,
+ },
+ "no-fcaps": {
+ description: "Ignore file capabilities.",
+ optional: true,
+ default: false,
+ },
+ "no-acls": {
+ description: "Ignore access control list entries.",
+ optional: true,
+ default: false,
+ },
+ "all-file-systems": {
+ description: "Include mounted sudirs.",
+ optional: true,
+ default: false,
+ },
+ "no-device-nodes": {
+ description: "Ignore device nodes.",
+ optional: true,
+ default: false,
+ },
+ "no-fifos": {
+ description: "Ignore fifos.",
+ optional: true,
+ default: false,
+ },
+ "no-sockets": {
+ description: "Ignore sockets.",
+ optional: true,
+ default: false,
+ },
+ exclude: {
+ description: "List of paths or pattern matching files to exclude.",
+ optional: true,
+ type: Array,
+ items: {
+ description: "Path or pattern matching files to restore",
+ type: String,
+ },
+ },
+ "entries-max": {
+ description: "Max number of entries loaded at once into memory",
+ optional: true,
+ default: ENCODER_MAX_ENTRIES as isize,
+ minimum: 0,
+ maximum: std::isize::MAX,
+ },
+ },
+ },
+)]
+/// Create a new .pxar archive.
fn create_archive(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let archive = tools::required_string_param(¶m, "archive")?;
- let source = tools::required_string_param(¶m, "source")?;
- let verbose = param["verbose"].as_bool().unwrap_or(false);
- let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
- let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
- let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
- let no_acls = param["no-acls"].as_bool().unwrap_or(false);
- let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
- let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
- let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
- let empty = Vec::new();
- let exclude_pattern = param["exclude"].as_array().unwrap_or(&empty);
- let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
+ archive: String,
+ source: String,
+ verbose: bool,
+ no_xattrs: bool,
+ no_fcaps: bool,
+ no_acls: bool,
+ all_file_systems: bool,
+ no_device_nodes: bool,
+ no_fifos: bool,
+ no_sockets: bool,
+ exclude: Option<Vec<String>>,
+ entries_max: isize,
+) -> Result<(), Error> {
+ let exclude_list = {
+ let input = exclude.unwrap_or_else(Vec::new);
+ let mut exclude = Vec::with_capacity(input.len());
+ for entry in input {
+ exclude.push(
+ MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
+ .map_err(|err| format_err!("error in exclude pattern: {}", err))?,
+ );
+ }
+ exclude
+ };
- let devices = if all_file_systems { None } else { Some(HashSet::new()) };
+ let device_set = if all_file_systems {
+ None
+ } else {
+ Some(HashSet::new())
+ };
let source = PathBuf::from(source);
- let mut dir = nix::dir::Dir::open(
- &source, nix::fcntl::OFlag::O_NOFOLLOW, nix::sys::stat::Mode::empty())?;
+ let dir = nix::dir::Dir::open(
+ &source,
+ nix::fcntl::OFlag::O_NOFOLLOW,
+ nix::sys::stat::Mode::empty(),
+ )?;
let file = OpenOptions::new()
.create_new(true)
.mode(0o640)
.open(archive)?;
- let mut writer = std::io::BufWriter::with_capacity(1024*1024, file);
- let mut feature_flags = pxar::flags::DEFAULT;
+ let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
+ let mut feature_flags = flags::DEFAULT;
if no_xattrs {
- feature_flags ^= pxar::flags::WITH_XATTRS;
+ feature_flags ^= flags::WITH_XATTRS;
}
if no_fcaps {
- feature_flags ^= pxar::flags::WITH_FCAPS;
+ feature_flags ^= flags::WITH_FCAPS;
}
if no_acls {
- feature_flags ^= pxar::flags::WITH_ACL;
+ feature_flags ^= flags::WITH_ACL;
}
if no_device_nodes {
- feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
+ feature_flags ^= flags::WITH_DEVICE_NODES;
}
if no_fifos {
- feature_flags ^= pxar::flags::WITH_FIFOS;
+ feature_flags ^= flags::WITH_FIFOS;
}
if no_sockets {
- feature_flags ^= pxar::flags::WITH_SOCKETS;
- }
-
- let mut pattern_list = Vec::new();
- for s in exclude_pattern {
- let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
- let p = pxar::MatchPattern::from_line(l.as_bytes())?
- .ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
- pattern_list.push(p);
+ feature_flags ^= flags::WITH_SOCKETS;
}
- let catalog = None::<&mut pxar::catalog::DummyCatalogWriter>;
- pxar::Encoder::encode(
- source,
- &mut dir,
- &mut writer,
- catalog,
- devices,
- verbose,
- false,
+ let writer = pxar::encoder::sync::StandardWriter::new(writer);
+ proxmox_backup::pxar::create_archive(
+ dir,
+ writer,
+ exclude_list,
feature_flags,
- pattern_list,
+ device_set,
+ true,
+ |path| {
+ if verbose {
+ println!("{:?}", path);
+ }
+ Ok(())
+ },
entries_max as usize,
+ None,
)?;
- writer.flush()?;
-
- Ok(Value::Null)
+ Ok(())
}
+#[api(
+ input: {
+ properties: {
+ archive: { description: "Archive name." },
+ mountpoint: { description: "Mountpoint for the file system." },
+ verbose: {
+ description: "Verbose output, running in the foreground (for debugging).",
+ optional: true,
+ default: false,
+ },
+ },
+ },
+)]
/// Mount the archive to the provided mountpoint via FUSE.
-fn mount_archive(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
- let archive = tools::required_string_param(¶m, "archive")?;
- let mountpoint = tools::required_string_param(¶m, "mountpoint")?;
- let verbose = param["verbose"].as_bool().unwrap_or(false);
- let no_mt = param["no-mt"].as_bool().unwrap_or(false);
-
- let archive = Path::new(archive);
- let mountpoint = Path::new(mountpoint);
+async fn mount_archive(
+ archive: String,
+ mountpoint: String,
+ verbose: bool,
+) -> Result<(), Error> {
+ let archive = Path::new(&archive);
+ let mountpoint = Path::new(&mountpoint);
let options = OsStr::new("ro,default_permissions");
- let mut session = pxar::fuse::Session::from_path(&archive, &options, verbose)
+
+ let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint)
+ .await
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
- // Mount the session and deamonize if verbose is not set
- session.mount(&mountpoint, !verbose)?;
- session.run_loop(!no_mt)?;
- Ok(Value::Null)
-}
+ let mut interrupt = signal(SignalKind::interrupt())?;
-#[sortable]
-const API_METHOD_CREATE_ARCHIVE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&create_archive),
- &ObjectSchema::new(
- "Create new .pxar archive.",
- &sorted!([
- (
- "archive",
- false,
- &StringSchema::new("Archive name").schema()
- ),
- (
- "source",
- false,
- &StringSchema::new("Source directory.").schema()
- ),
- (
- "verbose",
- true,
- &BooleanSchema::new("Verbose output.")
- .default(false)
- .schema()
- ),
- (
- "no-xattrs",
- true,
- &BooleanSchema::new("Ignore extended file attributes.")
- .default(false)
- .schema()
- ),
- (
- "no-fcaps",
- true,
- &BooleanSchema::new("Ignore file capabilities.")
- .default(false)
- .schema()
- ),
- (
- "no-acls",
- true,
- &BooleanSchema::new("Ignore access control list entries.")
- .default(false)
- .schema()
- ),
- (
- "all-file-systems",
- true,
- &BooleanSchema::new("Include mounted sudirs.")
- .default(false)
- .schema()
- ),
- (
- "no-device-nodes",
- true,
- &BooleanSchema::new("Ignore device nodes.")
- .default(false)
- .schema()
- ),
- (
- "no-fifos",
- true,
- &BooleanSchema::new("Ignore fifos.")
- .default(false)
- .schema()
- ),
- (
- "no-sockets",
- true,
- &BooleanSchema::new("Ignore sockets.")
- .default(false)
- .schema()
- ),
- (
- "exclude",
- true,
- &ArraySchema::new(
- "List of paths or pattern matching files to exclude.",
- &StringSchema::new("Path or pattern matching files to restore.").schema()
- ).schema()
- ),
- (
- "entries-max",
- true,
- &IntegerSchema::new("Max number of entries loaded at once into memory")
- .default(pxar::ENCODER_MAX_ENTRIES as isize)
- .minimum(0)
- .maximum(std::isize::MAX)
- .schema()
- ),
- ]),
- )
-);
+ select! {
+ res = session.fuse() => res?,
+ _ = interrupt.recv().fuse() => {
+ if verbose {
+ eprintln!("interrupted");
+ }
+ }
+ }
-#[sortable]
-const API_METHOD_EXTRACT_ARCHIVE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&extract_archive),
- &ObjectSchema::new(
- "Extract an archive.",
- &sorted!([
- (
- "archive",
- false,
- &StringSchema::new("Archive name.").schema()
- ),
- (
- "pattern",
- true,
- &ArraySchema::new(
- "List of paths or pattern matching files to restore",
- &StringSchema::new("Path or pattern matching files to restore.").schema()
- ).schema()
- ),
- (
- "target",
- true,
- &StringSchema::new("Target directory.").schema()
- ),
- (
- "verbose",
- true,
- &BooleanSchema::new("Verbose output.")
- .default(false)
- .schema()
- ),
- (
- "no-xattrs",
- true,
- &BooleanSchema::new("Ignore extended file attributes.")
- .default(false)
- .schema()
- ),
- (
- "no-fcaps",
- true,
- &BooleanSchema::new("Ignore file capabilities.")
- .default(false)
- .schema()
- ),
- (
- "no-acls",
- true,
- &BooleanSchema::new("Ignore access control list entries.")
- .default(false)
- .schema()
- ),
- (
- "allow-existing-dirs",
- true,
- &BooleanSchema::new("Allows directories to already exist on restore.")
- .default(false)
- .schema()
- ),
- (
- "files-from",
- true,
- &StringSchema::new("Match pattern for files to restore.").schema()
- ),
- (
- "no-device-nodes",
- true,
- &BooleanSchema::new("Ignore device nodes.")
- .default(false)
- .schema()
- ),
- (
- "no-fifos",
- true,
- &BooleanSchema::new("Ignore fifos.")
- .default(false)
- .schema()
- ),
- (
- "no-sockets",
- true,
- &BooleanSchema::new("Ignore sockets.")
- .default(false)
- .schema()
- ),
- ]),
- )
-);
+ Ok(())
+}
-#[sortable]
-const API_METHOD_MOUNT_ARCHIVE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&mount_archive),
- &ObjectSchema::new(
- "Mount the archive as filesystem via FUSE.",
- &sorted!([
- (
- "archive",
- false,
- &StringSchema::new("Archive name.").schema()
- ),
- (
- "mountpoint",
- false,
- &StringSchema::new("Mountpoint for the filesystem root.").schema()
- ),
- (
- "verbose",
- true,
- &BooleanSchema::new("Verbose output, keeps process running in foreground (for debugging).")
- .default(false)
- .schema()
- ),
- (
- "no-mt",
- true,
- &BooleanSchema::new("Run in single threaded mode (for debugging).")
- .default(false)
- .schema()
- ),
- ]),
- )
-);
+#[api(
+ input: {
+ properties: {
+ archive: {
+ description: "Archive name.",
+ },
+ verbose: {
+ description: "Verbose output.",
+ optional: true,
+ default: false,
+ },
+ },
+ },
+)]
+/// List the contents of an archive.
+fn dump_archive(archive: String, verbose: bool) -> Result<(), Error> {
+ for entry in pxar::decoder::Decoder::open(archive)? {
+ let entry = entry?;
-#[sortable]
-const API_METHOD_DUMP_ARCHIVE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&dump_archive),
- &ObjectSchema::new(
- "List the contents of an archive.",
- &sorted!([
- ( "archive", false, &StringSchema::new("Archive name.").schema()),
- ( "verbose", true, &BooleanSchema::new("Verbose output.")
- .default(false)
- .schema()
- ),
- ])
- )
-);
+ if verbose {
+ println!("{}", format_single_line_entry(&entry));
+ } else {
+ println!("{:?}", entry.path());
+ }
+ }
+ Ok(())
+}
fn main() {
-
let cmd_def = CliCommandMap::new()
- .insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
- .arg_param(&["archive", "source"])
- .completion_cb("archive", tools::complete_file_name)
- .completion_cb("source", tools::complete_file_name)
+ .insert(
+ "create",
+ CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
+ .arg_param(&["archive", "source"])
+ .completion_cb("archive", tools::complete_file_name)
+ .completion_cb("source", tools::complete_file_name),
+ )
+ .insert(
+ "extract",
+ CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
+ .arg_param(&["archive", "target"])
+ .completion_cb("archive", tools::complete_file_name)
+ .completion_cb("target", tools::complete_file_name)
+ .completion_cb("files-from", tools::complete_file_name),
)
- .insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
- .arg_param(&["archive", "target"])
- .completion_cb("archive", tools::complete_file_name)
- .completion_cb("target", tools::complete_file_name)
- .completion_cb("files-from", tools::complete_file_name)
- )
- .insert("mount", CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
- .arg_param(&["archive", "mountpoint"])
- .completion_cb("archive", tools::complete_file_name)
- .completion_cb("mountpoint", tools::complete_file_name)
+ .insert(
+ "mount",
+ CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
+ .arg_param(&["archive", "mountpoint"])
+ .completion_cb("archive", tools::complete_file_name)
+ .completion_cb("mountpoint", tools::complete_file_name),
)
- .insert("list", CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
- .arg_param(&["archive"])
- .completion_cb("archive", tools::complete_file_name)
+ .insert(
+ "list",
+ CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
+ .arg_param(&["archive"])
+ .completion_cb("archive", tools::complete_file_name),
);
let rpcenv = CliEnvironment::new();
- run_cli_command(cmd_def, rpcenv, None);
+ run_cli_command(cmd_def, rpcenv, Some(|future| {
+ proxmox_backup::tools::runtime::main(future)
+ }));
}
//! This library implements the client side to access the backups
//! server using https.
-pub mod pipe_to_stream;
mod merge_known_chunks;
+pub mod pipe_to_stream;
mod http_client;
-pub use http_client::*;
+pub use http_client::*;
mod task_log;
pub use task_log::*;
mod pxar_backup_stream;
pub use pxar_backup_stream::*;
-mod pxar_decode_writer;
-pub use pxar_decode_writer::*;
-
mod backup_repo;
pub use backup_repo::*;
use anyhow::{format_err, Error};
use futures::stream::Stream;
-
+use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
-use nix::dir::Dir;
-use crate::pxar;
+use pathpatterns::MatchEntry;
+
use crate::backup::CatalogWriter;
/// Stream implementation to encode and upload .pxar archives.
}
impl Drop for PxarBackupStream {
-
fn drop(&mut self) {
self.rx = None;
self.child.take().unwrap().join().unwrap();
}
impl PxarBackupStream {
-
pub fn new<W: Write + Send + 'static>(
- mut dir: Dir,
- path: PathBuf,
+ dir: Dir,
+ _path: PathBuf,
device_set: Option<HashSet<u64>>,
- verbose: bool,
+ _verbose: bool,
skip_lost_and_found: bool,
catalog: Arc<Mutex<CatalogWriter<W>>>,
- exclude_pattern: Vec<pxar::MatchPattern>,
+ exclude_pattern: Vec<MatchEntry>,
entries_max: usize,
) -> Result<Self, Error> {
-
let (tx, rx) = std::sync::mpsc::sync_channel(10);
- let buffer_size = 256*1024;
+ let buffer_size = 256 * 1024;
let error = Arc::new(Mutex::new(None));
- let error2 = error.clone();
-
- let catalog = catalog.clone();
- let child = std::thread::Builder::new().name("PxarBackupStream".to_string()).spawn(move || {
- let mut guard = catalog.lock().unwrap();
- let mut writer = std::io::BufWriter::with_capacity(buffer_size, crate::tools::StdChannelWriter::new(tx));
-
- if let Err(err) = pxar::Encoder::encode(
- path,
- &mut dir,
- &mut writer,
- Some(&mut *guard),
- device_set,
- verbose,
- skip_lost_and_found,
- pxar::flags::DEFAULT,
- exclude_pattern,
- entries_max,
- ) {
- let mut error = error2.lock().unwrap();
- *error = Some(err.to_string());
- }
- })?;
+ let child = std::thread::Builder::new()
+ .name("PxarBackupStream".to_string())
+ .spawn({
+ let error = Arc::clone(&error);
+ move || {
+ let mut catalog_guard = catalog.lock().unwrap();
+ let writer = std::io::BufWriter::with_capacity(
+ buffer_size,
+ crate::tools::StdChannelWriter::new(tx),
+ );
+
+ let writer = pxar::encoder::sync::StandardWriter::new(writer);
+ if let Err(err) = crate::pxar::create_archive(
+ dir,
+ writer,
+ exclude_pattern,
+ crate::pxar::flags::DEFAULT,
+ device_set,
+ skip_lost_and_found,
+ |_| Ok(()),
+ entries_max,
+ Some(&mut *catalog_guard),
+ ) {
+ let mut error = error.lock().unwrap();
+ *error = Some(err.to_string());
+ }
+ }
+ })?;
Ok(Self {
rx: Some(rx),
verbose: bool,
skip_lost_and_found: bool,
catalog: Arc<Mutex<CatalogWriter<W>>>,
- exclude_pattern: Vec<pxar::MatchPattern>,
+ exclude_pattern: Vec<MatchEntry>,
entries_max: usize,
) -> Result<Self, Error> {
-
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
let path = std::path::PathBuf::from(dirname);
- Self::new(dir, path, device_set, verbose, skip_lost_and_found, catalog, exclude_pattern, entries_max)
+ Self::new(
+ dir,
+ path,
+ device_set,
+ verbose,
+ skip_lost_and_found,
+ catalog,
+ exclude_pattern,
+ entries_max,
+ )
}
}
impl Stream for PxarBackupStream {
-
type Item = Result<Vec<u8>, Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
- { // limit lock scope
+ {
+ // limit lock scope
let error = self.error.lock().unwrap();
if let Some(ref msg) = *error {
return Poll::Ready(Some(Err(format_err!("{}", msg))));
+++ /dev/null
-use anyhow::{Error};
-
-use std::thread;
-use std::os::unix::io::FromRawFd;
-use std::path::{Path, PathBuf};
-use std::io::Write;
-
-use crate::pxar;
-
-/// Writer implementation to deccode a .pxar archive (download).
-
-pub struct PxarDecodeWriter {
- pipe: Option<std::fs::File>,
- child: Option<thread::JoinHandle<()>>,
-}
-
-impl Drop for PxarDecodeWriter {
-
- fn drop(&mut self) {
- drop(self.pipe.take());
- self.child.take().unwrap().join().unwrap();
- }
-}
-
-impl PxarDecodeWriter {
-
- pub fn new(base: &Path, verbose: bool) -> Result<Self, Error> {
- let (rx, tx) = nix::unistd::pipe()?;
-
- let base = PathBuf::from(base);
-
- let child = thread::spawn(move|| {
- let mut reader = unsafe { std::fs::File::from_raw_fd(rx) };
- let mut decoder = pxar::SequentialDecoder::new(&mut reader, pxar::flags::DEFAULT);
- decoder.set_callback(move |path| {
- if verbose {
- println!("{:?}", path);
- }
- Ok(())
- });
-
- if let Err(err) = decoder.restore(&base, &Vec::new()) {
- eprintln!("pxar decode failed - {}", err);
- }
- });
-
- let pipe = unsafe { std::fs::File::from_raw_fd(tx) };
-
- Ok(Self { pipe: Some(pipe), child: Some(child) })
- }
-}
-
-impl Write for PxarDecodeWriter {
-
- fn write(&mut self, buffer: &[u8]) -> Result<usize, std::io::Error> {
- let pipe = match self.pipe {
- Some(ref mut pipe) => pipe,
- None => unreachable!(),
- };
- pipe.write(buffer)
- }
-
- fn flush(&mut self) -> Result<(), std::io::Error> {
- let pipe = match self.pipe {
- Some(ref mut pipe) => pipe,
- None => unreachable!(),
- };
- pipe.flush()
- }
-}
//! (user, group, acl, ...) because this is already defined by the
//! linked `ENTRY`.
-mod binary_search_tree;
-pub use binary_search_tree::*;
-
+pub mod catalog;
+pub(crate) mod create;
+pub(crate) mod dir_stack;
+pub(crate) mod extract;
+pub(crate) mod metadata;
pub mod flags;
-pub use flags::*;
-
-mod format_definition;
-pub use format_definition::*;
-
-mod encoder;
-pub use encoder::*;
-
-mod sequential_decoder;
-pub use sequential_decoder::*;
-
-mod decoder;
-pub use decoder::*;
-
-mod match_pattern;
-pub use match_pattern::*;
-
-mod dir_stack;
-pub use dir_stack::*;
-
pub mod fuse;
-pub use fuse::*;
+pub(crate) mod tools;
-pub mod catalog;
+pub use create::create_archive;
+pub use extract::extract_archive;
+
+/// The format requires to build sorted directory lookup tables in
+/// memory, so we restrict the number of allowed entries to limit
+/// maximum memory usage.
+pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
-mod helper;
+pub use tools::{format_multi_line_entry, format_single_line_entry};
+++ /dev/null
-//! Helpers to generate a binary search tree stored in an array from a
-//! sorted array.
-//!
-//! Specifically, for any given sorted array 'input' permute the
-//! array so that the following rule holds:
-//!
-//! For each array item with index i, the item at 2i+1 is smaller and
-//! the item 2i+2 is larger.
-//!
-//! This structure permits efficient (meaning: O(log(n)) binary
-//! searches: start with item i=0 (i.e. the root of the BST), compare
-//! the value with the searched item, if smaller proceed at item
-//! 2i+1, if larger proceed at item 2i+2, and repeat, until either
-//! the item is found, or the indexes grow beyond the array size,
-//! which means the entry does not exist.
-//!
-//! Effectively this implements bisection, but instead of jumping
-//! around wildly in the array during a single search we only search
-//! with strictly monotonically increasing indexes.
-//!
-//! Algorithm is from casync (camakebst.c), simplified and optimized
-//! for rust. Permutation function originally by L. Bressel, 2017. We
-//! pass permutation info to user provided callback, which actually
-//! implements the data copy.
-//!
-//! The Wikipedia Artikel for [Binary
-//! Heap](https://en.wikipedia.org/wiki/Binary_heap) gives a short
-//! intro howto store binary trees using an array.
-
-use std::cmp::Ordering;
-
-#[allow(clippy::many_single_char_names)]
-fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
- copy_func: &mut F,
- // we work on input array input[o..o+n]
- n: usize,
- o: usize,
- e: usize,
- i: usize,
-) {
- let p = 1 << e;
-
- let t = p + (p>>1) - 1;
-
- let m = if n > t {
- // |...........p.............t....n........(2p)|
- p - 1
- } else {
- // |...........p.....n.......t.............(2p)|
- p - 1 - (t-n)
- };
-
- (copy_func)(o+m, i);
-
- if m > 0 {
- copy_binary_search_tree_inner(copy_func, m, o, e-1, i*2+1);
- }
-
- if (m + 1) < n {
- copy_binary_search_tree_inner(copy_func, n-m-1, o+m+1, e-1, i*2+2);
- }
-}
-
-/// This function calls the provided `copy_func()` with the permutation
-/// info.
-///
-/// ```
-/// # use proxmox_backup::pxar::copy_binary_search_tree;
-/// copy_binary_search_tree(5, |src, dest| {
-/// println!("Copy {} to {}", src, dest);
-/// });
-/// ```
-///
-/// This will produce the following output:
-///
-/// ```no-compile
-/// Copy 3 to 0
-/// Copy 1 to 1
-/// Copy 0 to 3
-/// Copy 2 to 4
-/// Copy 4 to 2
-/// ```
-///
-/// So this generates the following permutation: `[3,1,4,0,2]`.
-
-pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
- n: usize,
- mut copy_func: F,
-) {
- if n == 0 { return };
- let e = (64 - n.leading_zeros() - 1) as usize; // fast log2(n)
-
- copy_binary_search_tree_inner(&mut copy_func, n, 0, e, 0);
-}
-
-
-/// This function searches for the index where the comparison by the provided
-/// `compare()` function returns `Ordering::Equal`.
-/// The order of the comparison matters (noncommutative) and should be search
-/// value compared to value at given index as shown in the examples.
-/// The parameter `skip_multiples` defines the number of matches to ignore while
-/// searching before returning the index in order to lookup duplicate entries in
-/// the tree.
-///
-/// ```
-/// # use proxmox_backup::pxar::{copy_binary_search_tree, search_binary_tree_by};
-/// let mut vals = vec![0,1,2,2,2,3,4,5,6,6,7,8,8,8];
-///
-/// let clone = vals.clone();
-/// copy_binary_search_tree(vals.len(), |s, d| {
-/// vals[d] = clone[s];
-/// });
-/// let should_be = vec![5,2,8,1,3,6,8,0,2,2,4,6,7,8];
-/// assert_eq!(vals, should_be);
-///
-/// let find = 8;
-/// let skip_multiples = 0;
-/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
-/// assert_eq!(idx, Some(2));
-///
-/// let find = 8;
-/// let skip_multiples = 1;
-/// let idx = search_binary_tree_by(2, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
-/// assert_eq!(idx, Some(6));
-///
-/// let find = 8;
-/// let skip_multiples = 1;
-/// let idx = search_binary_tree_by(6, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
-/// assert_eq!(idx, Some(13));
-///
-/// let find = 5;
-/// let skip_multiples = 1;
-/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
-/// assert!(idx.is_none());
-///
-/// let find = 5;
-/// let skip_multiples = 0;
-/// // if start index is equal to the array length, `None` is returned.
-/// let idx = search_binary_tree_by(vals.len(), vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
-/// assert!(idx.is_none());
-///
-/// let find = 5;
-/// let skip_multiples = 0;
-/// // if start index is larger than length, `None` is returned.
-/// let idx = search_binary_tree_by(vals.len() + 1, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
-/// assert!(idx.is_none());
-/// ```
-
-pub fn search_binary_tree_by<F: Copy + Fn(usize) -> Ordering>(
- start: usize,
- size: usize,
- skip_multiples: usize,
- compare: F
-) -> Option<usize> {
- if start >= size {
- return None;
- }
-
- let mut skip = skip_multiples;
- let cmp = compare(start);
- if cmp == Ordering::Equal {
- if skip == 0 {
- // Found matching hash and want this one
- return Some(start);
- }
- // Found matching hash, but we should skip the first `skip_multiple`,
- // so continue search with reduced skip count.
- skip -= 1;
- }
-
- if cmp == Ordering::Less || cmp == Ordering::Equal {
- let res = search_binary_tree_by(2 * start + 1, size, skip, compare);
- if res.is_some() {
- return res;
- }
- }
-
- if cmp == Ordering::Greater || cmp == Ordering::Equal {
- let res = search_binary_tree_by(2 * start + 2, size, skip, compare);
- if res.is_some() {
- return res;
- }
- }
-
- None
-}
-
-#[test]
-fn test_binary_search_tree() {
-
- fn run_test(len: usize) -> Vec<usize> {
-
- const MARKER: usize = 0xfffffff;
- let mut output = vec![];
- for _i in 0..len { output.push(MARKER); }
- copy_binary_search_tree(len, |s, d| {
- assert!(output[d] == MARKER);
- output[d] = s;
- });
- if len < 32 { println!("GOT:{}:{:?}", len, output); }
- for i in 0..len {
- assert!(output[i] != MARKER);
- }
- output
- }
-
- assert!(run_test(0).len() == 0);
- assert!(run_test(1) == [0]);
- assert!(run_test(2) == [1,0]);
- assert!(run_test(3) == [1,0,2]);
- assert!(run_test(4) == [2,1,3,0]);
- assert!(run_test(5) == [3,1,4,0,2]);
- assert!(run_test(6) == [3,1,5,0,2,4]);
- assert!(run_test(7) == [3,1,5,0,2,4,6]);
- assert!(run_test(8) == [4,2,6,1,3,5,7,0]);
- assert!(run_test(9) == [5,3,7,1,4,6,8,0,2]);
- assert!(run_test(10) == [6,3,8,1,5,7,9,0,2,4]);
- assert!(run_test(11) == [7,3,9,1,5,8,10,0,2,4,6]);
- assert!(run_test(12) == [7,3,10,1,5,9,11,0,2,4,6,8]);
- assert!(run_test(13) == [7,3,11,1,5,9,12,0,2,4,6,8,10]);
- assert!(run_test(14) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12]);
- assert!(run_test(15) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12,14]);
- assert!(run_test(16) == [8,4,12,2,6,10,14,1,3,5,7,9,11,13,15,0]);
- assert!(run_test(17) == [9,5,13,3,7,11,15,1,4,6,8,10,12,14,16,0,2]);
-
- for len in 18..1000 {
- run_test(len);
- }
-}
--- /dev/null
+use std::collections::{HashSet, HashMap};
+use std::convert::TryFrom;
+use std::ffi::{CStr, CString, OsStr};
+use std::fmt;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::path::{Path, PathBuf};
+
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
+use nix::errno::Errno;
+use nix::fcntl::OFlag;
+use nix::sys::stat::{FileStat, Mode};
+
+use pathpatterns::{MatchEntry, MatchList, MatchType, PatternFlag};
+use pxar::Metadata;
+use pxar::encoder::LinkOffset;
+
+use proxmox::sys::error::SysError;
+use proxmox::tools::fd::RawFdNum;
+
+use crate::pxar::catalog::BackupCatalogWriter;
+use crate::pxar::flags;
+use crate::pxar::tools::assert_relative_path;
+use crate::tools::{acl, fs, xattr, Fd};
+
+fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
+ let mut fs_stat = std::mem::MaybeUninit::uninit();
+ let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
+ Errno::result(res)?;
+ let fs_stat = unsafe { fs_stat.assume_init() };
+
+ Ok(fs_stat.f_type)
+}
+
+pub fn is_virtual_file_system(magic: i64) -> bool {
+ use proxmox::sys::linux::magic::*;
+
+ match magic {
+ BINFMTFS_MAGIC |
+ CGROUP2_SUPER_MAGIC |
+ CGROUP_SUPER_MAGIC |
+ CONFIGFS_MAGIC |
+ DEBUGFS_MAGIC |
+ DEVPTS_SUPER_MAGIC |
+ EFIVARFS_MAGIC |
+ FUSE_CTL_SUPER_MAGIC |
+ HUGETLBFS_MAGIC |
+ MQUEUE_MAGIC |
+ NFSD_MAGIC |
+ PROC_SUPER_MAGIC |
+ PSTOREFS_MAGIC |
+ RPCAUTH_GSSMAGIC |
+ SECURITYFS_MAGIC |
+ SELINUX_MAGIC |
+ SMACK_MAGIC |
+ SYSFS_MAGIC => true,
+ _ => false
+ }
+}
+
+#[derive(Debug)]
+struct ArchiveError {
+ path: PathBuf,
+ error: Error,
+}
+
+impl ArchiveError {
+ fn new(path: PathBuf, error: Error) -> Self {
+ Self { path, error }
+ }
+}
+
+impl std::error::Error for ArchiveError {}
+
+impl fmt::Display for ArchiveError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "error at {:?}: {}", self.path, self.error)
+ }
+}
+
+#[derive(Eq, PartialEq, Hash)]
+struct HardLinkInfo {
+ st_dev: u64,
+ st_ino: u64,
+}
+
+struct Archiver<'a, 'b> {
+ /// FIXME: use bitflags!() for feature_flags
+ feature_flags: u64,
+ fs_feature_flags: u64,
+ fs_magic: i64,
+ excludes: &'a [MatchEntry],
+ callback: &'a mut dyn FnMut(&Path) -> Result<(), Error>,
+ catalog: Option<&'b mut dyn BackupCatalogWriter>,
+ path: PathBuf,
+ entry_counter: usize,
+ entry_limit: usize,
+ current_st_dev: libc::dev_t,
+ device_set: Option<HashSet<u64>>,
+ hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
+}
+
+type Encoder<'a, 'b> = pxar::encoder::Encoder<'a, &'b mut dyn pxar::encoder::SeqWrite>;
+
+pub fn create_archive<T, F>(
+ source_dir: Dir,
+ mut writer: T,
+ mut excludes: Vec<MatchEntry>,
+ feature_flags: u64,
+ mut device_set: Option<HashSet<u64>>,
+ skip_lost_and_found: bool,
+ mut callback: F,
+ entry_limit: usize,
+ catalog: Option<&mut dyn BackupCatalogWriter>,
+) -> Result<(), Error>
+where
+ T: pxar::encoder::SeqWrite,
+ F: FnMut(&Path) -> Result<(), Error>,
+{
+ let fs_magic = detect_fs_type(source_dir.as_raw_fd())?;
+ if is_virtual_file_system(fs_magic) {
+ bail!("refusing to backup a virtual file system");
+ }
+
+ let fs_feature_flags = flags::feature_flags_from_magic(fs_magic);
+
+ let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?;
+ let metadata = get_metadata(
+ source_dir.as_raw_fd(),
+ &stat,
+ feature_flags & fs_feature_flags,
+ fs_magic,
+ )
+ .map_err(|err| format_err!("failed to get metadata for source directory: {}", err))?;
+
+ if let Some(ref mut set) = device_set {
+ set.insert(stat.st_dev);
+ }
+
+ let writer = &mut writer as &mut dyn pxar::encoder::SeqWrite;
+ let mut encoder = Encoder::new(writer, &metadata)?;
+
+ if skip_lost_and_found {
+ excludes.push(MatchEntry::parse_pattern(
+ "**/lost+found",
+ PatternFlag::PATH_NAME,
+ MatchType::Exclude,
+ )?);
+ }
+
+ let mut archiver = Archiver {
+ feature_flags,
+ fs_feature_flags,
+ fs_magic,
+ callback: &mut callback,
+ excludes: &excludes,
+ catalog,
+ path: PathBuf::new(),
+ entry_counter: 0,
+ entry_limit,
+ current_st_dev: stat.st_dev,
+ device_set,
+ hardlinks: HashMap::new(),
+ };
+
+ archiver.archive_dir_contents(&mut encoder, source_dir)?;
+ encoder.finish()?;
+ Ok(())
+}
+
+struct FileListEntry {
+ name: CString,
+ path: PathBuf,
+ stat: FileStat,
+}
+
+impl<'a, 'b> Archiver<'a, 'b> {
+ fn flags(&self) -> u64 {
+ self.feature_flags & self.fs_feature_flags
+ }
+
+ fn wrap_err(&self, err: Error) -> Error {
+ if err.downcast_ref::<ArchiveError>().is_some() {
+ err
+ } else {
+ ArchiveError::new(self.path.clone(), err).into()
+ }
+ }
+
+ fn archive_dir_contents(&mut self, encoder: &mut Encoder, mut dir: Dir) -> Result<(), Error> {
+ let entry_counter = self.entry_counter;
+
+ let file_list = self.generate_directory_file_list(&mut dir)?;
+
+ let dir_fd = dir.as_raw_fd();
+
+ let old_path = std::mem::take(&mut self.path);
+ for file_entry in file_list {
+ (self.callback)(Path::new(OsStr::from_bytes(file_entry.name.to_bytes())))?;
+ self.path = file_entry.path;
+ self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat)
+ .map_err(|err| self.wrap_err(err))?;
+ }
+ self.path = old_path;
+ self.entry_counter = entry_counter;
+
+ Ok(())
+ }
+
+ fn generate_directory_file_list(&mut self, dir: &mut Dir) -> Result<Vec<FileListEntry>, Error> {
+ let dir_fd = dir.as_raw_fd();
+
+ let mut file_list = Vec::new();
+
+ for file in dir.iter() {
+ let file = file?;
+
+ let file_name = file.file_name().to_owned();
+ let file_name_bytes = file_name.to_bytes();
+ if file_name_bytes == b"." || file_name_bytes == b".." {
+ continue;
+ }
+
+ // FIXME: deal with `.pxarexclude-cli`
+
+ if file_name_bytes == b".pxarexclude" {
+ // FIXME: handle this file!
+ continue;
+ }
+
+ let os_file_name = OsStr::from_bytes(file_name_bytes);
+ assert_relative_path(os_file_name)?;
+ let full_path = self.path.join(os_file_name);
+
+ let stat = match nix::sys::stat::fstatat(
+ dir_fd,
+ file_name.as_c_str(),
+ nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
+ ) {
+ Ok(stat) => stat,
+ Err(ref err) if err.not_found() => continue,
+ Err(err) => bail!("stat failed on {:?}: {}", full_path, err),
+ };
+
+ if self
+ .excludes
+ .matches(full_path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
+ == Some(MatchType::Exclude)
+ {
+ continue;
+ }
+
+ self.entry_counter += 1;
+ if self.entry_counter > self.entry_limit {
+ bail!("exceeded allowed number of file entries (> {})",self.entry_limit);
+ }
+
+ file_list.push(FileListEntry {
+ name: file_name,
+ path: full_path,
+ stat
+ });
+ }
+
+ file_list.sort_unstable_by(|a, b| a.name.cmp(&b.name));
+
+ Ok(file_list)
+ }
+
+ fn add_entry(
+ &mut self,
+ encoder: &mut Encoder,
+ parent: RawFd,
+ c_file_name: &CStr,
+ stat: &FileStat,
+ ) -> Result<(), Error> {
+ use pxar::format::mode;
+
+ let file_mode = stat.st_mode & libc::S_IFMT;
+ let open_mode = if !(file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR) {
+ OFlag::O_PATH
+ } else {
+ OFlag::empty()
+ };
+
+ let fd = Fd::openat(
+ &unsafe { RawFdNum::from_raw_fd(parent) },
+ c_file_name,
+ open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
+ Mode::empty(),
+ )?;
+
+ let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic)?;
+
+ if self
+ .excludes
+ .matches(self.path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
+ == Some(MatchType::Exclude)
+ {
+ return Ok(());
+ }
+
+ let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
+ match metadata.file_type() {
+ mode::IFREG => {
+ let link_info = HardLinkInfo {
+ st_dev: stat.st_dev,
+ st_ino: stat.st_ino,
+ };
+
+ if stat.st_nlink > 1 {
+ if let Some((path, offset)) = self.hardlinks.get(&link_info) {
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_hardlink(c_file_name)?;
+ }
+
+ encoder.add_hardlink(file_name, path, *offset)?;
+
+ return Ok(());
+ }
+ }
+
+ let file_size = stat.st_size as u64;
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_file(c_file_name, file_size, metadata.stat.mtime)?;
+ }
+
+ let offset: LinkOffset =
+ self.add_regular_file(encoder, fd, file_name, &metadata, file_size)?;
+
+ if stat.st_nlink > 1 {
+ self.hardlinks.insert(link_info, (self.path.clone(), offset));
+ }
+
+ Ok(())
+ }
+ mode::IFDIR => {
+ let dir = Dir::from_fd(fd.into_raw_fd())?;
+ self.add_directory(encoder, dir, c_file_name, &metadata, stat)
+ }
+ mode::IFSOCK => {
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_socket(c_file_name)?;
+ }
+
+ Ok(encoder.add_socket(&metadata, file_name)?)
+ }
+ mode::IFIFO => {
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_fifo(c_file_name)?;
+ }
+
+ Ok(encoder.add_fifo(&metadata, file_name)?)
+ }
+ mode::IFLNK => {
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_symlink(c_file_name)?;
+ }
+
+ self.add_symlink(encoder, fd, file_name, &metadata)
+ }
+ mode::IFBLK => {
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_block_device(c_file_name)?;
+ }
+
+ self.add_device(encoder, file_name, &metadata, &stat)
+ }
+ mode::IFCHR => {
+ if let Some(ref mut catalog) = self.catalog {
+ catalog.add_char_device(c_file_name)?;
+ }
+
+ self.add_device(encoder, file_name, &metadata, &stat)
+ }
+ other => bail!(
+ "encountered unknown file type: 0x{:x} (0o{:o})",
+ other,
+ other
+ ),
+ }
+ }
+
+ fn add_directory(
+ &mut self,
+ encoder: &mut Encoder,
+ dir: Dir,
+ dir_name: &CStr,
+ metadata: &Metadata,
+ stat: &FileStat,
+ ) -> Result<(), Error> {
+ let dir_name = OsStr::from_bytes(dir_name.to_bytes());
+
+ let mut encoder = encoder.create_directory(dir_name, &metadata)?;
+
+ let old_fs_magic = self.fs_magic;
+ let old_fs_feature_flags = self.fs_feature_flags;
+ let old_st_dev = self.current_st_dev;
+
+ let mut skip_contents = false;
+ if old_st_dev != stat.st_dev {
+ self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
+ self.fs_feature_flags = flags::feature_flags_from_magic(self.fs_magic);
+ self.current_st_dev = stat.st_dev;
+
+ if is_virtual_file_system(self.fs_magic) {
+ skip_contents = true;
+ } else if let Some(set) = &self.device_set {
+ skip_contents = !set.contains(&stat.st_dev);
+ }
+ }
+
+ let result = if skip_contents {
+ Ok(())
+ } else {
+ self.archive_dir_contents(&mut encoder, dir)
+ };
+
+ self.fs_magic = old_fs_magic;
+ self.fs_feature_flags = old_fs_feature_flags;
+ self.current_st_dev = old_st_dev;
+
+ encoder.finish()?;
+ result
+ }
+
+ fn add_regular_file(
+ &mut self,
+ encoder: &mut Encoder,
+ fd: Fd,
+ file_name: &Path,
+ metadata: &Metadata,
+ file_size: u64,
+ ) -> Result<LinkOffset, Error> {
+ let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
+ let offset = encoder.add_file(metadata, file_name, file_size, &mut file)?;
+ Ok(offset)
+ }
+
+ fn add_symlink(
+ &mut self,
+ encoder: &mut Encoder,
+ fd: Fd,
+ file_name: &Path,
+ metadata: &Metadata,
+ ) -> Result<(), Error> {
+ let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
+ encoder.add_symlink(metadata, file_name, dest)?;
+ Ok(())
+ }
+
+ fn add_device(
+ &mut self,
+ encoder: &mut Encoder,
+ file_name: &Path,
+ metadata: &Metadata,
+ stat: &FileStat,
+ ) -> Result<(), Error> {
+ Ok(encoder.add_device(
+ metadata,
+ file_name,
+ pxar::format::Device::from_dev_t(stat.st_rdev),
+ )?)
+ }
+}
+
+fn get_metadata(fd: RawFd, stat: &FileStat, flags: u64, fs_magic: i64) -> Result<Metadata, Error> {
+ // required for some of these
+ let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
+
+ let mtime = u64::try_from(stat.st_mtime * 1_000_000_000 + stat.st_mtime_nsec)
+ .map_err(|_| format_err!("file with negative mtime"))?;
+
+ let mut meta = Metadata {
+ stat: pxar::Stat {
+ mode: u64::from(stat.st_mode),
+ flags: 0,
+ uid: stat.st_uid,
+ gid: stat.st_gid,
+ mtime,
+ },
+ ..Default::default()
+ };
+
+ get_xattr_fcaps_acl(&mut meta, fd, &proc_path, flags)?;
+ get_chattr(&mut meta, fd)?;
+ get_fat_attr(&mut meta, fd, fs_magic)?;
+ get_quota_project_id(&mut meta, fd, flags, fs_magic)?;
+ Ok(meta)
+}
+
+fn errno_is_unsupported(errno: Errno) -> bool {
+ match errno {
+ Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
+ _ => false,
+ }
+}
+
+fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: u64) -> Result<(), Error> {
+ if 0 == (flags & flags::WITH_FCAPS) {
+ return Ok(());
+ }
+
+ match xattr::fgetxattr(fd, xattr::xattr_name_fcaps()) {
+ Ok(data) => {
+ meta.fcaps = Some(pxar::format::FCaps { data });
+ Ok(())
+ }
+ Err(Errno::ENODATA) => Ok(()),
+ Err(Errno::EOPNOTSUPP) => Ok(()),
+ Err(Errno::EBADF) => Ok(()), // symlinks
+ Err(err) => bail!("failed to read file capabilities: {}", err),
+ }
+}
+
+fn get_xattr_fcaps_acl(
+ meta: &mut Metadata,
+ fd: RawFd,
+ proc_path: &Path,
+ flags: u64,
+) -> Result<(), Error> {
+ if 0 == (flags & flags::WITH_XATTRS) {
+ return Ok(());
+ }
+
+ let xattrs = match xattr::flistxattr(fd) {
+ Ok(names) => names,
+ Err(Errno::EOPNOTSUPP) => return Ok(()),
+ Err(Errno::EBADF) => return Ok(()), // symlinks
+ Err(err) => bail!("failed to read xattrs: {}", err),
+ };
+
+ for attr in &xattrs {
+ if xattr::is_security_capability(&attr) {
+ get_fcaps(meta, fd, flags)?;
+ continue;
+ }
+
+ if xattr::is_acl(&attr) {
+ get_acl(meta, proc_path, flags)?;
+ continue;
+ }
+
+ if !xattr::is_valid_xattr_name(&attr) {
+ continue;
+ }
+
+ match xattr::fgetxattr(fd, attr) {
+ Ok(data) => meta
+ .xattrs
+ .push(pxar::format::XAttr::new(attr.to_bytes(), data)),
+ Err(Errno::ENODATA) => (), // it got removed while we were iterating...
+ Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
+ Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either
+ Err(err) => bail!("error reading extended attribute {:?}: {}", attr, err),
+ }
+ }
+
+ Ok(())
+}
+
+fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
+ let mut attr: usize = 0;
+
+ match unsafe { fs::read_attr_fd(fd, &mut attr) } {
+ Ok(_) => (),
+ Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
+ return Ok(());
+ }
+ Err(err) => bail!("failed to read file attributes: {}", err),
+ }
+
+ metadata.stat.flags |= flags::feature_flags_from_chattr(attr as u32);
+
+ Ok(())
+}
+
+fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> {
+ use proxmox::sys::linux::magic::*;
+
+ if fs_magic != MSDOS_SUPER_MAGIC && fs_magic != FUSE_SUPER_MAGIC {
+ return Ok(());
+ }
+
+ let mut attr: u32 = 0;
+
+ match unsafe { fs::read_fat_attr_fd(fd, &mut attr) } {
+ Ok(_) => (),
+ Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
+ return Ok(());
+ }
+ Err(err) => bail!("failed to read fat attributes: {}", err),
+ }
+
+ metadata.stat.flags |= flags::feature_flags_from_fat_attr(attr);
+
+ Ok(())
+}
+
+/// Read the quota project id for an inode, supported on ext4/XFS/FUSE/ZFS filesystems
+fn get_quota_project_id(
+ metadata: &mut Metadata,
+ fd: RawFd,
+ flags: u64,
+ magic: i64,
+) -> Result<(), Error> {
+ if !(metadata.is_dir() || metadata.is_regular_file()) {
+ return Ok(());
+ }
+
+ if 0 == (flags & flags::WITH_QUOTA_PROJID) {
+ return Ok(());
+ }
+
+ use proxmox::sys::linux::magic::*;
+
+ match magic {
+ EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (),
+ _ => return Ok(()),
+ }
+
+ let mut fsxattr = fs::FSXAttr::default();
+ let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) };
+
+ // On some FUSE filesystems it can happen that ioctl is not supported.
+ // For these cases projid is set to 0 while the error is ignored.
+ if let Err(err) = res {
+ let errno = err
+ .as_errno()
+ .ok_or_else(|| format_err!("error while reading quota project id"))?;
+ if errno_is_unsupported(errno) {
+ return Ok(());
+ } else {
+ bail!("error while reading quota project id ({})", errno);
+ }
+ }
+
+ let projid = fsxattr.fsx_projid as u64;
+ if projid != 0 {
+ metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid });
+ }
+ Ok(())
+}
+
+fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: u64) -> Result<(), Error> {
+ if 0 == (flags & flags::WITH_ACL) {
+ return Ok(());
+ }
+
+ if metadata.is_symlink() {
+ return Ok(());
+ }
+
+ get_acl_do(metadata, proc_path, acl::ACL_TYPE_ACCESS)?;
+
+ if metadata.is_dir() {
+ get_acl_do(metadata, proc_path, acl::ACL_TYPE_DEFAULT)?;
+ }
+
+ Ok(())
+}
+
+fn get_acl_do(
+ metadata: &mut Metadata,
+ proc_path: &Path,
+ acl_type: acl::ACLType,
+) -> Result<(), Error> {
+ // In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
+ // to create a path for acl_get_file(). acl_get_fd() only allows to get
+ // ACL_TYPE_ACCESS attributes.
+ let acl = match acl::ACL::get_file(&proc_path, acl_type) {
+ Ok(acl) => acl,
+ // Don't bail if underlying endpoint does not support acls
+ Err(Errno::EOPNOTSUPP) => return Ok(()),
+ // Don't bail if the endpoint cannot carry acls
+ Err(Errno::EBADF) => return Ok(()),
+ // Don't bail if there is no data
+ Err(Errno::ENODATA) => return Ok(()),
+ Err(err) => bail!("error while reading ACL - {}", err),
+ };
+
+ process_acl(metadata, acl, acl_type)
+}
+
+fn process_acl(
+ metadata: &mut Metadata,
+ acl: acl::ACL,
+ acl_type: acl::ACLType,
+) -> Result<(), Error> {
+ use pxar::format::acl as pxar_acl;
+ use pxar::format::acl::{Group, GroupObject, Permissions, User};
+
+ let mut acl_user = Vec::new();
+ let mut acl_group = Vec::new();
+ let mut acl_group_obj = None;
+ let mut acl_default = None;
+ let mut user_obj_permissions = None;
+ let mut group_obj_permissions = None;
+ let mut other_permissions = None;
+ let mut mask_permissions = None;
+
+ for entry in &mut acl.entries() {
+ let tag = entry.get_tag_type()?;
+ let permissions = entry.get_permissions()?;
+ match tag {
+ acl::ACL_USER_OBJ => user_obj_permissions = Some(Permissions(permissions)),
+ acl::ACL_GROUP_OBJ => group_obj_permissions = Some(Permissions(permissions)),
+ acl::ACL_OTHER => other_permissions = Some(Permissions(permissions)),
+ acl::ACL_MASK => mask_permissions = Some(Permissions(permissions)),
+ acl::ACL_USER => {
+ acl_user.push(User {
+ uid: entry.get_qualifier()?,
+ permissions: Permissions(permissions),
+ });
+ }
+ acl::ACL_GROUP => {
+ acl_group.push(Group {
+ gid: entry.get_qualifier()?,
+ permissions: Permissions(permissions),
+ });
+ }
+ _ => bail!("Unexpected ACL tag encountered!"),
+ }
+ }
+
+ acl_user.sort();
+ acl_group.sort();
+
+ match acl_type {
+ acl::ACL_TYPE_ACCESS => {
+ // The mask permissions are mapped to the stat group permissions
+ // in case that the ACL group permissions were set.
+ // Only in that case we need to store the group permissions,
+ // in the other cases they are identical to the stat group permissions.
+ if let (Some(gop), true) = (group_obj_permissions, mask_permissions.is_some()) {
+ acl_group_obj = Some(GroupObject { permissions: gop });
+ }
+
+ metadata.acl.users = acl_user;
+ metadata.acl.groups = acl_group;
+ }
+ acl::ACL_TYPE_DEFAULT => {
+ if user_obj_permissions != None
+ || group_obj_permissions != None
+ || other_permissions != None
+ || mask_permissions != None
+ {
+ acl_default = Some(pxar_acl::Default {
+ // The value is set to UINT64_MAX as placeholder if one
+ // of the permissions is not set
+ user_obj_permissions: user_obj_permissions.unwrap_or(Permissions::NO_MASK),
+ group_obj_permissions: group_obj_permissions.unwrap_or(Permissions::NO_MASK),
+ other_permissions: other_permissions.unwrap_or(Permissions::NO_MASK),
+ mask_permissions: mask_permissions.unwrap_or(Permissions::NO_MASK),
+ });
+ }
+
+ metadata.acl.default_users = acl_user;
+ metadata.acl.default_groups = acl_group;
+ }
+ _ => bail!("Unexpected ACL type encountered"),
+ }
+
+ metadata.acl.group_obj = acl_group_obj;
+ metadata.acl.default = acl_default;
+
+ Ok(())
+}
+++ /dev/null
-//! *pxar* format decoder for seekable files
-//!
-//! This module contain the code to decode *pxar* archive files.
-
-use std::convert::TryFrom;
-use std::ffi::{OsString, OsStr};
-use std::io::{Read, Seek, SeekFrom};
-use std::path::{Path, PathBuf};
-use std::os::unix::ffi::OsStrExt;
-
-use anyhow::{bail, format_err, Error};
-use libc;
-
-use super::binary_search_tree::search_binary_tree_by;
-use super::format_definition::*;
-use super::sequential_decoder::SequentialDecoder;
-use super::match_pattern::MatchPattern;
-
-use proxmox::tools::io::ReadExt;
-
-pub struct DirectoryEntry {
- /// Points to the `PxarEntry` of the directory
- start: u64,
- /// Points past the goodbye table tail
- end: u64,
- /// Filename of entry
- pub filename: OsString,
- /// Entry (mode, permissions)
- pub entry: PxarEntry,
- /// Extended attributes
- pub xattr: PxarAttributes,
- /// Payload size
- pub size: u64,
- /// Target path for symbolic links
- pub target: Option<PathBuf>,
- /// Start offset of the payload if present.
- pub payload_offset: Option<u64>,
-}
-
-/// Trait to create ReadSeek Decoder trait objects.
-trait ReadSeek: Read + Seek {}
-impl <R: Read + Seek> ReadSeek for R {}
-
-// This one needs Read+Seek
-pub struct Decoder {
- inner: SequentialDecoder<Box<dyn ReadSeek + Send>>,
- root_start: u64,
- root_end: u64,
-}
-
-const HEADER_SIZE: u64 = std::mem::size_of::<PxarHeader>() as u64;
-const GOODBYE_ITEM_SIZE: u64 = std::mem::size_of::<PxarGoodbyeItem>() as u64;
-
-impl Decoder {
- pub fn new<R: Read + Seek + Send + 'static>(mut reader: R) -> Result<Self, Error> {
- let root_end = reader.seek(SeekFrom::End(0))?;
- let boxed_reader: Box<dyn ReadSeek + 'static + Send> = Box::new(reader);
- let inner = SequentialDecoder::new(boxed_reader, super::flags::DEFAULT);
-
- Ok(Self { inner, root_start: 0, root_end })
- }
-
- pub fn set_callback<F: Fn(&Path) -> Result<(), Error> + Send + 'static>(&mut self, callback: F ) {
- self.inner.set_callback(callback);
- }
-
- pub fn root(&mut self) -> Result<DirectoryEntry, Error> {
- self.seek(SeekFrom::Start(0))?;
- let header: PxarHeader = self.inner.read_item()?;
- check_ca_header::<PxarEntry>(&header, PXAR_ENTRY)?;
- let entry: PxarEntry = self.inner.read_item()?;
- let (header, xattr) = self.inner.read_attributes()?;
- let (size, payload_offset) = match header.htype {
- PXAR_PAYLOAD => (header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?)),
- _ => (0, None),
- };
-
- Ok(DirectoryEntry {
- start: self.root_start,
- end: self.root_end,
- filename: OsString::new(), // Empty
- entry,
- xattr,
- size,
- target: None,
- payload_offset,
- })
- }
-
- fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
- let pos = self.inner.get_reader_mut().seek(pos)?;
- Ok(pos)
- }
-
- pub(crate) fn root_end_offset(&self) -> u64 {
- self.root_end
- }
-
- /// Restore the subarchive starting at `dir` to the provided target `path`.
- ///
- /// Only restore the content matched by the MatchPattern `pattern`.
- /// An empty Vec `pattern` means restore all.
- pub fn restore(&mut self, dir: &DirectoryEntry, path: &Path, pattern: &Vec<MatchPattern>) -> Result<(), Error> {
- let start = dir.start;
- self.seek(SeekFrom::Start(start))?;
- self.inner.restore(path, pattern)?;
-
- Ok(())
- }
-
- pub(crate) fn read_directory_entry(
- &mut self,
- start: u64,
- end: u64,
- ) -> Result<DirectoryEntry, Error> {
- self.seek(SeekFrom::Start(start))?;
-
- let head: PxarHeader = self.inner.read_item()?;
-
- if head.htype != PXAR_FILENAME {
- bail!("wrong filename header type for object [{}..{}]", start, end);
- }
-
- let entry_start = start + head.size;
-
- let filename = self.inner.read_filename(head.size)?;
-
- let head: PxarHeader = self.inner.read_item()?;
- if head.htype == PXAR_FORMAT_HARDLINK {
- let (_, offset) = self.inner.read_hardlink(head.size)?;
- // TODO: Howto find correct end offset for hardlink target?
- // This is a bit tricky since we cannot find correct end in an efficient
- // way, on the other hand it doesn't really matter (for now) since target
- // is never a directory and end is not used in such cases.
- return self.read_directory_entry(start - offset, end);
- }
- check_ca_header::<PxarEntry>(&head, PXAR_ENTRY)?;
- let entry: PxarEntry = self.inner.read_item()?;
- let (header, xattr) = self.inner.read_attributes()?;
- let (size, payload_offset, target) = match header.htype {
- PXAR_PAYLOAD =>
- (header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?), None),
- PXAR_SYMLINK =>
- (header.size - HEADER_SIZE, None, Some(self.inner.read_link(header.size)?)),
- _ => (0, None, None),
- };
-
- Ok(DirectoryEntry {
- start: entry_start,
- end,
- filename,
- entry,
- xattr,
- size,
- target,
- payload_offset,
- })
- }
-
- /// Return the goodbye table based on the provided end offset.
- ///
- /// Get the goodbye table entries and the start and end offsets of the
- /// items they reference.
- /// If the start offset is provided, we use that to check the consistency of
- /// the data, else the start offset calculated based on the goodbye tail is
- /// used.
- pub(crate) fn goodbye_table(
- &mut self,
- start: Option<u64>,
- end: u64,
- ) -> Result<Vec<(PxarGoodbyeItem, u64, u64)>, Error> {
- self.seek(SeekFrom::Start(end - GOODBYE_ITEM_SIZE))?;
-
- let tail: PxarGoodbyeItem = self.inner.read_item()?;
- if tail.hash != PXAR_GOODBYE_TAIL_MARKER {
- bail!("missing goodbye tail marker for object at offset {}", end);
- }
-
- // If the start offset was provided, we use and check based on that.
- // If not, we rely on the offset calculated from the goodbye table entry.
- let start = start.unwrap_or(end - tail.offset - tail.size);
- let goodbye_table_size = tail.size;
- if goodbye_table_size < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
- bail!("short goodbye table size for object [{}..{}]", start, end);
- }
-
- let goodbye_inner_size = goodbye_table_size - HEADER_SIZE - GOODBYE_ITEM_SIZE;
- if (goodbye_inner_size % GOODBYE_ITEM_SIZE) != 0 {
- bail!(
- "wrong goodbye inner table size for entry [{}..{}]",
- start,
- end
- );
- }
-
- let goodbye_start = end - goodbye_table_size;
- if tail.offset != (goodbye_start - start) {
- bail!(
- "wrong offset in goodbye tail marker for entry [{}..{}]",
- start,
- end
- );
- }
-
- self.seek(SeekFrom::Start(goodbye_start))?;
- let head: PxarHeader = self.inner.read_item()?;
- if head.htype != PXAR_GOODBYE {
- bail!(
- "wrong goodbye table header type for entry [{}..{}]",
- start,
- end
- );
- }
-
- if head.size != goodbye_table_size {
- bail!("wrong goodbye table size for entry [{}..{}]", start, end);
- }
-
- let mut gb_entries = Vec::new();
- for i in 0..goodbye_inner_size / GOODBYE_ITEM_SIZE {
- let item: PxarGoodbyeItem = self.inner.read_item()?;
- if item.offset > (goodbye_start - start) {
- bail!(
- "goodbye entry {} offset out of range [{}..{}] {} {} {}",
- i,
- start,
- end,
- item.offset,
- goodbye_start,
- start
- );
- }
- let item_start = goodbye_start - item.offset;
- let item_end = item_start + item.size;
- if item_end > goodbye_start {
- bail!("goodbye entry {} end out of range [{}..{}]", i, start, end);
- }
- gb_entries.push((item, item_start, item_end));
- }
-
- Ok(gb_entries)
- }
-
- pub fn list_dir(&mut self, dir: &DirectoryEntry) -> Result<Vec<DirectoryEntry>, Error> {
- let start = dir.start;
- let end = dir.end;
-
- //println!("list_dir1: {} {}", start, end);
-
- if (end - start) < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
- bail!("detected short object [{}..{}]", start, end);
- }
-
- let mut result = vec![];
- let goodbye_table = self.goodbye_table(Some(start), end)?;
- for (_, item_start, item_end) in goodbye_table {
- let entry = self.read_directory_entry(item_start, item_end)?;
- //println!("ENTRY: {} {} {:?}", item_start, item_end, entry.filename);
- result.push(entry);
- }
-
- Ok(result)
- }
-
- pub fn print_filenames<W: std::io::Write>(
- &mut self,
- output: &mut W,
- prefix: &mut PathBuf,
- dir: &DirectoryEntry,
- ) -> Result<(), Error> {
- let mut list = self.list_dir(dir)?;
-
- list.sort_unstable_by(|a, b| a.filename.cmp(&b.filename));
-
- for item in &list {
- prefix.push(item.filename.clone());
-
- let mode = item.entry.mode as u32;
-
- let ifmt = mode & libc::S_IFMT;
-
- writeln!(output, "{:?}", prefix)?;
-
- match ifmt {
- libc::S_IFDIR => self.print_filenames(output, prefix, item)?,
- libc::S_IFREG | libc::S_IFLNK | libc::S_IFBLK | libc::S_IFCHR => {}
- _ => bail!("unknown item mode/type for {:?}", prefix),
- }
-
- prefix.pop();
- }
-
- Ok(())
- }
-
- /// Lookup the item identified by `filename` in the provided `DirectoryEntry`.
- ///
- /// Calculates the hash of the filename and searches for matching entries in
- /// the goodbye table of the provided `DirectoryEntry`.
- /// If found, also the filename is compared to avoid hash collision.
- /// If the filename does not match, the search resumes with the next entry in
- /// the goodbye table.
- /// If there is no entry with matching `filename`, `Ok(None)` is returned.
- pub fn lookup(
- &mut self,
- dir: &DirectoryEntry,
- filename: &OsStr,
- ) -> Result<Option<DirectoryEntry>, Error> {
- let gbt = self.goodbye_table(Some(dir.start), dir.end)?;
- let hash = compute_goodbye_hash(filename.as_bytes());
-
- let mut start_idx = 0;
- let mut skip_multiple = 0;
- loop {
- // Search for the next goodbye entry with matching hash.
- let idx = search_binary_tree_by(
- start_idx,
- gbt.len(),
- skip_multiple,
- |idx| hash.cmp(&gbt[idx].0.hash),
- );
- let (_item, start, end) = match idx {
- Some(idx) => &gbt[idx],
- None => return Ok(None),
- };
-
- let entry = self.read_directory_entry(*start, *end)?;
-
- // Possible hash collision, need to check if the found entry is indeed
- // the filename to lookup.
- if entry.filename == filename {
- return Ok(Some(entry));
- }
- // Hash collision, check the next entry in the goodbye table by starting
- // from given index but skipping one more match (so hash at index itself).
- start_idx = idx.unwrap();
- skip_multiple = 1;
- }
- }
-
- /// Read the payload of the file given by `entry`.
- ///
- /// This will read a files payload as raw bytes starting from `offset` after
- /// the payload marker, reading `size` bytes.
- /// If the payload from `offset` to EOF is smaller than `size` bytes, the
- /// buffer with reduced size is returned.
- /// If `offset` is larger than the payload size of the `DirectoryEntry`, an
- /// empty buffer is returned.
- pub fn read(&mut self, entry: &DirectoryEntry, size: usize, offset: u64) -> Result<Vec<u8>, Error> {
- let start_offset = entry.payload_offset
- .ok_or_else(|| format_err!("entry has no payload offset"))?;
- if offset >= entry.size {
- return Ok(Vec::new());
- }
- let len = if u64::try_from(size)? > entry.size {
- usize::try_from(entry.size)?
- } else {
- size
- };
- self.seek(SeekFrom::Start(start_offset + offset))?;
- let data = self.inner.get_reader_mut().read_exact_allocated(len)?;
-
- Ok(data)
- }
-}
-use std::ffi::{OsStr, OsString};
+use std::ffi::OsString;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::PathBuf;
-use anyhow::{format_err, Error};
-use nix::errno::Errno;
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-use nix::NixPath;
+use nix::sys::stat::{mkdirat, Mode};
-use super::format_definition::{PxarAttributes, PxarEntry};
+use proxmox::sys::error::SysError;
+use pxar::Metadata;
-pub struct PxarDir {
- pub filename: OsString,
- pub entry: PxarEntry,
- pub attr: PxarAttributes,
- pub dir: Option<nix::dir::Dir>,
-}
+use crate::pxar::tools::{assert_relative_path, perms_from_metadata};
-pub struct PxarDirStack {
- root: RawFd,
- data: Vec<PxarDir>,
+pub struct PxarDir {
+ file_name: OsString,
+ metadata: Metadata,
+ dir: Option<Dir>,
}
impl PxarDir {
- pub fn new(filename: &OsStr, entry: PxarEntry, attr: PxarAttributes) -> Self {
+ pub fn new(file_name: OsString, metadata: Metadata) -> Self {
Self {
- filename: filename.to_os_string(),
- entry,
- attr,
+ file_name,
+ metadata,
dir: None,
}
}
- fn create_dir(&self, parent: RawFd, create_new: bool) -> Result<nix::dir::Dir, nix::Error> {
- let res = self
- .filename
- .with_nix_path(|cstr| unsafe { libc::mkdirat(parent, cstr.as_ptr(), libc::S_IRWXU) })?;
+ pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
+ Self {
+ file_name: OsString::from("."),
+ metadata,
+ dir: Some(dir),
+ }
+ }
- match Errno::result(res) {
- Ok(_) => {}
+ fn create_dir(&mut self, parent: RawFd, allow_existing_dirs: bool) -> Result<RawFd, Error> {
+ match mkdirat(
+ parent,
+ self.file_name.as_os_str(),
+ perms_from_metadata(&self.metadata)?,
+ ) {
+ Ok(()) => (),
Err(err) => {
- if err == nix::Error::Sys(nix::errno::Errno::EEXIST) {
- if create_new {
- return Err(err);
- }
- } else {
- return Err(err);
+ if !(allow_existing_dirs && err.already_exists()) {
+ return Err(err.into());
}
}
}
- let dir = nix::dir::Dir::openat(
+ self.open_dir(parent)
+ }
+
+ fn open_dir(&mut self, parent: RawFd) -> Result<RawFd, Error> {
+ let dir = Dir::openat(
parent,
- self.filename.as_os_str(),
+ self.file_name.as_os_str(),
OFlag::O_DIRECTORY,
Mode::empty(),
)?;
- Ok(dir)
+ let fd = dir.as_raw_fd();
+ self.dir = Some(dir);
+
+ Ok(fd)
}
+
+ pub fn try_as_raw_fd(&self) -> Option<RawFd> {
+ self.dir.as_ref().map(AsRawFd::as_raw_fd)
+ }
+
+ pub fn metadata(&self) -> &Metadata {
+ &self.metadata
+ }
+}
+
+pub struct PxarDirStack {
+ dirs: Vec<PxarDir>,
+ path: PathBuf,
+ created: usize,
}
impl PxarDirStack {
- pub fn new(parent: RawFd) -> Self {
+ pub fn new(root: Dir, metadata: Metadata) -> Self {
Self {
- root: parent,
- data: Vec::new(),
+ dirs: vec![PxarDir::with_dir(root, metadata)],
+ path: PathBuf::from("/"),
+ created: 1, // the root directory exists
}
}
- pub fn push(&mut self, dir: PxarDir) {
- self.data.push(dir);
+ pub fn is_empty(&self) -> bool {
+ self.dirs.is_empty()
}
- pub fn pop(&mut self) -> Option<PxarDir> {
- self.data.pop()
+ pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
+ assert_relative_path(&file_name)?;
+ self.path.push(&file_name);
+ self.dirs.push(PxarDir::new(file_name, metadata));
+ Ok(())
}
- pub fn as_path_buf(&self) -> PathBuf {
- let path: PathBuf = self.data.iter().map(|d| d.filename.clone()).collect();
- path
- }
-
- pub fn last(&self) -> Option<&PxarDir> {
- self.data.last()
+ pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
+ let out = self.dirs.pop();
+ if !self.path.pop() {
+ if self.path.as_os_str() == "/" {
+ // we just finished the root directory, make sure this can only happen once:
+ self.path = PathBuf::new();
+ } else {
+ bail!("lost track of path");
+ }
+ }
+ self.created = self.created.min(self.dirs.len());
+ Ok(out)
}
- pub fn last_mut(&mut self) -> Option<&mut PxarDir> {
- self.data.last_mut()
- }
+ pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<RawFd, Error> {
+ // should not be possible given the way we use it:
+ assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
- pub fn last_dir_fd(&self) -> Option<RawFd> {
- let last_dir = self.data.last()?;
- match &last_dir.dir {
- Some(d) => Some(d.as_raw_fd()),
- None => None,
+ let mut fd = self.dirs[self.created - 1]
+ .try_as_raw_fd()
+ .ok_or_else(|| format_err!("lost track of directory file descriptors"))?;
+ while self.created < self.dirs.len() {
+ fd = self.dirs[self.created].create_dir(fd, allow_existing_dirs)?;
+ self.created += 1;
}
+
+ Ok(fd)
}
- pub fn create_all_dirs(&mut self, create_new: bool) -> Result<RawFd, Error> {
- let mut current_fd = self.root;
- for d in &mut self.data {
- match &d.dir {
- Some(dir) => current_fd = dir.as_raw_fd(),
- None => {
- let dir = d
- .create_dir(current_fd, create_new)
- .map_err(|err| format_err!("create dir failed - {}", err))?;
- current_fd = dir.as_raw_fd();
- d.dir = Some(dir);
- }
- }
- }
+ pub fn root_dir_fd(&self) -> Result<RawFd, Error> {
+ // should not be possible given the way we use it:
+ assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
- Ok(current_fd)
+ self.dirs[0]
+ .try_as_raw_fd()
+ .ok_or_else(|| format_err!("lost track of directory file descriptors"))
}
}
+++ /dev/null
-//! *pxar* format encoder.
-//!
-//! This module contain the code to generate *pxar* archive files.
-use std::collections::{HashMap, HashSet};
-use std::ffi::{CStr, CString};
-use std::io::Write;
-use std::os::unix::ffi::OsStrExt;
-use std::os::unix::io::AsRawFd;
-use std::os::unix::io::RawFd;
-use std::path::{Path, PathBuf};
-
-use endian_trait::Endian;
-use anyhow::{bail, format_err, Error};
-use nix::errno::Errno;
-use nix::fcntl::OFlag;
-use nix::sys::stat::FileStat;
-use nix::sys::stat::Mode;
-use nix::NixPath;
-
-use proxmox::tools::vec;
-
-use super::binary_search_tree::*;
-use super::catalog::BackupCatalogWriter;
-use super::flags;
-use super::format_definition::*;
-use super::helper::*;
-use super::match_pattern::{MatchPattern, MatchPatternSlice, MatchType};
-use crate::tools::acl;
-use crate::tools::fs;
-use crate::tools::xattr;
-
-#[derive(Eq, PartialEq, Hash)]
-struct HardLinkInfo {
- st_dev: u64,
- st_ino: u64,
-}
-
-pub struct Encoder<'a, W: Write, C: BackupCatalogWriter> {
- base_path: PathBuf,
- relative_path: PathBuf,
- writer: &'a mut W,
- writer_pos: usize,
- catalog: Option<&'a mut C>,
- _size: usize,
- file_copy_buffer: Vec<u8>,
- device_set: Option<HashSet<u64>>,
- verbose: bool,
- // Flags set by the user
- feature_flags: u64,
- // Flags signaling features supported by the filesystem
- fs_feature_flags: u64,
- hardlinks: HashMap<HardLinkInfo, (PathBuf, u64)>,
- entry_counter: usize,
- entry_max: usize,
-}
-
-impl<'a, W: Write, C: BackupCatalogWriter> Encoder<'a, W, C> {
- // used for error reporting
- fn full_path(&self) -> PathBuf {
- self.base_path.join(&self.relative_path)
- }
-
- /// Create archive, write result data to ``writer``.
- ///
- /// The ``device_set`` can be use used to limit included mount points.
- ///
- /// - ``None``: include all mount points
- /// - ``Some(set)``: only include devices listed in this set (the
- /// root path device is automathically added to this list, so
- /// you can pass an empty set if you want to archive a single
- /// mount point.)
- pub fn encode(
- path: PathBuf,
- dir: &mut nix::dir::Dir,
- writer: &'a mut W,
- catalog: Option<&'a mut C>,
- device_set: Option<HashSet<u64>>,
- verbose: bool,
- skip_lost_and_found: bool, // fixme: should be a feature flag ??
- feature_flags: u64,
- mut excludes: Vec<MatchPattern>,
- entry_max: usize,
- ) -> Result<(), Error> {
- const FILE_COPY_BUFFER_SIZE: usize = 1024 * 1024;
-
- let mut file_copy_buffer = Vec::with_capacity(FILE_COPY_BUFFER_SIZE);
- unsafe {
- file_copy_buffer.set_len(FILE_COPY_BUFFER_SIZE);
- }
-
- // todo: use scandirat??
-
- let dir_fd = dir.as_raw_fd();
- let stat = nix::sys::stat::fstat(dir_fd)
- .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
-
- if !is_directory(&stat) {
- bail!("got unexpected file type {:?} (not a directory)", path);
- }
-
- let mut device_set = device_set.clone();
- if let Some(ref mut set) = device_set {
- set.insert(stat.st_dev);
- }
-
- let magic = detect_fs_type(dir_fd)?;
-
- if is_virtual_file_system(magic) {
- bail!("backup virtual file systems is disabled!");
- }
-
- let fs_feature_flags = flags::feature_flags_from_magic(magic);
-
- let mut me = Self {
- base_path: path,
- relative_path: PathBuf::new(),
- writer,
- writer_pos: 0,
- catalog,
- _size: 0,
- file_copy_buffer,
- device_set,
- verbose,
- feature_flags,
- fs_feature_flags,
- hardlinks: HashMap::new(),
- entry_counter: 0,
- entry_max,
- };
-
- if verbose {
- println!("{:?}", me.full_path());
- }
-
- if skip_lost_and_found {
- excludes.push(MatchPattern::from_line(b"**/lost+found").unwrap().unwrap());
- }
- let mut exclude_slices = Vec::new();
- for excl in &excludes {
- exclude_slices.push(excl.as_slice());
- }
-
- me.encode_dir(dir, &stat, magic, exclude_slices)?;
-
- Ok(())
- }
-
- fn write(&mut self, buf: &[u8]) -> Result<(), Error> {
- self.writer.write_all(buf)?;
- self.writer_pos += buf.len();
- Ok(())
- }
-
- fn write_item<T: Endian>(&mut self, item: T) -> Result<(), Error> {
- let data = item.to_le();
-
- let buffer = unsafe {
- std::slice::from_raw_parts(&data as *const T as *const u8, std::mem::size_of::<T>())
- };
-
- self.write(buffer)?;
-
- Ok(())
- }
-
- fn flush_copy_buffer(&mut self, size: usize) -> Result<(), Error> {
- self.writer.write_all(&self.file_copy_buffer[..size])?;
- self.writer_pos += size;
- Ok(())
- }
-
- fn write_header(&mut self, htype: u64, size: u64) -> Result<(), Error> {
- let size = size + (std::mem::size_of::<PxarHeader>() as u64);
- self.write_item(PxarHeader { size, htype })?;
-
- Ok(())
- }
-
- fn write_filename(&mut self, name: &CStr) -> Result<(), Error> {
- let buffer = name.to_bytes_with_nul();
- self.write_header(PXAR_FILENAME, buffer.len() as u64)?;
- self.write(buffer)?;
-
- Ok(())
- }
-
- fn create_entry(&self, stat: &FileStat) -> Result<PxarEntry, Error> {
- let mode = if is_symlink(&stat) {
- (libc::S_IFLNK | 0o777) as u64
- } else {
- (stat.st_mode & (libc::S_IFMT | 0o7777)) as u64
- };
-
- let mtime = stat.st_mtime * 1_000_000_000 + stat.st_mtime_nsec;
- if mtime < 0 {
- bail!("got strange mtime ({}) from fstat for {:?}.", mtime, self.full_path());
- }
-
- let entry = PxarEntry {
- mode,
- flags: 0,
- uid: stat.st_uid,
- gid: stat.st_gid,
- mtime: mtime as u64,
- };
-
- Ok(entry)
- }
-
- fn read_chattr(&self, fd: RawFd, entry: &mut PxarEntry) -> Result<(), Error> {
- let mut attr: usize = 0;
-
- let res = unsafe { fs::read_attr_fd(fd, &mut attr) };
- if let Err(err) = res {
- if let nix::Error::Sys(errno) = err {
- if errno_is_unsupported(errno) {
- return Ok(());
- };
- }
- bail!("read_attr_fd failed for {:?} - {}", self.full_path(), err);
- }
-
- let flags = flags::feature_flags_from_chattr(attr as u32);
- entry.flags |= flags;
-
- Ok(())
- }
-
- fn read_fat_attr(&self, fd: RawFd, magic: i64, entry: &mut PxarEntry) -> Result<(), Error> {
- use proxmox::sys::linux::magic::*;
-
- if magic != MSDOS_SUPER_MAGIC && magic != FUSE_SUPER_MAGIC {
- return Ok(());
- }
-
- let mut attr: u32 = 0;
-
- let res = unsafe { fs::read_fat_attr_fd(fd, &mut attr) };
- if let Err(err) = res {
- if let nix::Error::Sys(errno) = err {
- if errno_is_unsupported(errno) {
- return Ok(());
- };
- }
- bail!("read_fat_attr_fd failed for {:?} - {}", self.full_path(), err);
- }
-
- let flags = flags::feature_flags_from_fat_attr(attr);
- entry.flags |= flags;
-
- Ok(())
- }
-
- /// True if all of the given feature flags are set in the Encoder, false otherwise
- fn has_features(&self, feature_flags: u64) -> bool {
- (self.feature_flags & self.fs_feature_flags & feature_flags) == feature_flags
- }
-
- /// True if at least one of the given feature flags is set in the Encoder, false otherwise
- fn has_some_features(&self, feature_flags: u64) -> bool {
- (self.feature_flags & self.fs_feature_flags & feature_flags) != 0
- }
-
- fn read_xattrs(
- &self,
- fd: RawFd,
- stat: &FileStat,
- ) -> Result<(Vec<PxarXAttr>, Option<PxarFCaps>), Error> {
- let mut xattrs = Vec::new();
- let mut fcaps = None;
-
- let flags = flags::WITH_XATTRS | flags::WITH_FCAPS;
- if !self.has_some_features(flags) {
- return Ok((xattrs, fcaps));
- }
- // Should never be called on symlinks, just in case check anyway
- if is_symlink(&stat) {
- return Ok((xattrs, fcaps));
- }
-
- let xattr_names = match xattr::flistxattr(fd) {
- Ok(names) => names,
- // Do not bail if the underlying endpoint does not supports xattrs
- Err(Errno::EOPNOTSUPP) => return Ok((xattrs, fcaps)),
- // Do not bail if the endpoint cannot carry xattrs (such as symlinks)
- Err(Errno::EBADF) => return Ok((xattrs, fcaps)),
- Err(err) => bail!("read_xattrs failed for {:?} - {}", self.full_path(), err),
- };
-
- for name in &xattr_names {
- // Only extract the relevant extended attributes
- if !xattr::is_valid_xattr_name(&name) {
- continue;
- }
-
- let value = match xattr::fgetxattr(fd, name) {
- Ok(value) => value,
- // Vanished between flistattr and getxattr, this is ok, silently ignore
- Err(Errno::ENODATA) => continue,
- Err(err) => bail!("read_xattrs failed for {:?} - {}", self.full_path(), err),
- };
-
- if xattr::is_security_capability(&name) {
- if self.has_features(flags::WITH_FCAPS) {
- // fcaps are stored in own format within the archive
- fcaps = Some(PxarFCaps { data: value });
- }
- } else if self.has_features(flags::WITH_XATTRS) {
- xattrs.push(PxarXAttr {
- name: name.to_bytes().to_vec(),
- value,
- });
- }
- }
- xattrs.sort();
-
- Ok((xattrs, fcaps))
- }
-
- fn read_acl(
- &self,
- fd: RawFd,
- stat: &FileStat,
- acl_type: acl::ACLType,
- ) -> Result<PxarACL, Error> {
- let ret = PxarACL {
- users: Vec::new(),
- groups: Vec::new(),
- group_obj: None,
- default: None,
- };
-
- if !self.has_features(flags::WITH_ACL) {
- return Ok(ret);
- }
- if is_symlink(&stat) {
- return Ok(ret);
- }
- if acl_type == acl::ACL_TYPE_DEFAULT && !is_directory(&stat) {
- bail!("ACL_TYPE_DEFAULT only defined for directories.");
- }
-
- // In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
- // to create a path for acl_get_file(). acl_get_fd() only allows to get
- // ACL_TYPE_ACCESS attributes.
- let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
- let acl = match acl::ACL::get_file(&proc_path, acl_type) {
- Ok(acl) => acl,
- // Don't bail if underlying endpoint does not support acls
- Err(Errno::EOPNOTSUPP) => return Ok(ret),
- // Don't bail if the endpoint cannot carry acls
- Err(Errno::EBADF) => return Ok(ret),
- // Don't bail if there is no data
- Err(Errno::ENODATA) => return Ok(ret),
- Err(err) => bail!("error while reading ACL - {}", err),
- };
-
- self.process_acl(acl, acl_type)
- }
-
- fn process_acl(&self, acl: acl::ACL, acl_type: acl::ACLType) -> Result<PxarACL, Error> {
- let mut acl_user = Vec::new();
- let mut acl_group = Vec::new();
- let mut acl_group_obj = None;
- let mut acl_default = None;
- let mut user_obj_permissions = None;
- let mut group_obj_permissions = None;
- let mut other_permissions = None;
- let mut mask_permissions = None;
-
- for entry in &mut acl.entries() {
- let tag = entry.get_tag_type()?;
- let permissions = entry.get_permissions()?;
- match tag {
- acl::ACL_USER_OBJ => user_obj_permissions = Some(permissions),
- acl::ACL_GROUP_OBJ => group_obj_permissions = Some(permissions),
- acl::ACL_OTHER => other_permissions = Some(permissions),
- acl::ACL_MASK => mask_permissions = Some(permissions),
- acl::ACL_USER => {
- acl_user.push(PxarACLUser {
- uid: entry.get_qualifier()?,
- permissions,
- });
- }
- acl::ACL_GROUP => {
- acl_group.push(PxarACLGroup {
- gid: entry.get_qualifier()?,
- permissions,
- });
- }
- _ => bail!("Unexpected ACL tag encountered!"),
- }
- }
-
- acl_user.sort();
- acl_group.sort();
-
- match acl_type {
- acl::ACL_TYPE_ACCESS => {
- // The mask permissions are mapped to the stat group permissions
- // in case that the ACL group permissions were set.
- // Only in that case we need to store the group permissions,
- // in the other cases they are identical to the stat group permissions.
- if let (Some(gop), Some(_)) = (group_obj_permissions, mask_permissions) {
- acl_group_obj = Some(PxarACLGroupObj { permissions: gop });
- }
- }
- acl::ACL_TYPE_DEFAULT => {
- if user_obj_permissions != None
- || group_obj_permissions != None
- || other_permissions != None
- || mask_permissions != None
- {
- acl_default = Some(PxarACLDefault {
- // The value is set to UINT64_MAX as placeholder if one
- // of the permissions is not set
- user_obj_permissions: user_obj_permissions.unwrap_or(std::u64::MAX),
- group_obj_permissions: group_obj_permissions.unwrap_or(std::u64::MAX),
- other_permissions: other_permissions.unwrap_or(std::u64::MAX),
- mask_permissions: mask_permissions.unwrap_or(std::u64::MAX),
- });
- }
- }
- _ => bail!("Unexpected ACL type encountered"),
- }
-
- Ok(PxarACL {
- users: acl_user,
- groups: acl_group,
- group_obj: acl_group_obj,
- default: acl_default,
- })
- }
-
- /// Read the quota project id for an inode, supported on ext4/XFS/FUSE/ZFS filesystems
- fn read_quota_project_id(
- &self,
- fd: RawFd,
- magic: i64,
- stat: &FileStat,
- ) -> Result<Option<PxarQuotaProjID>, Error> {
- if !(is_directory(&stat) || is_reg_file(&stat)) {
- return Ok(None);
- }
- if !self.has_features(flags::WITH_QUOTA_PROJID) {
- return Ok(None);
- }
-
- use proxmox::sys::linux::magic::*;
-
- match magic {
- EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => {
- let mut fsxattr = fs::FSXAttr::default();
- let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) };
-
- // On some FUSE filesystems it can happen that ioctl is not supported.
- // For these cases projid is set to 0 while the error is ignored.
- if let Err(err) = res {
- let errno = err.as_errno().ok_or_else(|| {
- format_err!(
- "error while reading quota project id for {:#?}",
- self.full_path()
- )
- })?;
- if errno_is_unsupported(errno) {
- return Ok(None);
- } else {
- bail!(
- "error while reading quota project id for {:#?} - {}",
- self.full_path(),
- errno
- );
- }
- }
-
- let projid = fsxattr.fsx_projid as u64;
- if projid == 0 {
- Ok(None)
- } else {
- Ok(Some(PxarQuotaProjID { projid }))
- }
- }
- _ => Ok(None),
- }
- }
-
- fn write_entry(&mut self, entry: PxarEntry) -> Result<(), Error> {
- self.write_header(PXAR_ENTRY, std::mem::size_of::<PxarEntry>() as u64)?;
- self.write_item(entry)?;
-
- Ok(())
- }
-
- fn write_xattr(&mut self, xattr: PxarXAttr) -> Result<(), Error> {
- let size = xattr.name.len() + xattr.value.len() + 1; // +1 for '\0' separating name and value
- self.write_header(PXAR_XATTR, size as u64)?;
- self.write(xattr.name.as_slice())?;
- self.write(&[0])?;
- self.write(xattr.value.as_slice())?;
-
- Ok(())
- }
-
- fn write_fcaps(&mut self, fcaps: Option<PxarFCaps>) -> Result<(), Error> {
- if let Some(fcaps) = fcaps {
- let size = fcaps.data.len();
- self.write_header(PXAR_FCAPS, size as u64)?;
- self.write(fcaps.data.as_slice())?;
- }
-
- Ok(())
- }
-
- fn write_acl_user(&mut self, acl_user: PxarACLUser) -> Result<(), Error> {
- self.write_header(PXAR_ACL_USER, std::mem::size_of::<PxarACLUser>() as u64)?;
- self.write_item(acl_user)?;
-
- Ok(())
- }
-
- fn write_acl_group(&mut self, acl_group: PxarACLGroup) -> Result<(), Error> {
- self.write_header(PXAR_ACL_GROUP, std::mem::size_of::<PxarACLGroup>() as u64)?;
- self.write_item(acl_group)?;
-
- Ok(())
- }
-
- fn write_acl_group_obj(&mut self, acl_group_obj: PxarACLGroupObj) -> Result<(), Error> {
- self.write_header(
- PXAR_ACL_GROUP_OBJ,
- std::mem::size_of::<PxarACLGroupObj>() as u64,
- )?;
- self.write_item(acl_group_obj)?;
-
- Ok(())
- }
-
- fn write_acl_default(&mut self, acl_default: PxarACLDefault) -> Result<(), Error> {
- self.write_header(
- PXAR_ACL_DEFAULT,
- std::mem::size_of::<PxarACLDefault>() as u64,
- )?;
- self.write_item(acl_default)?;
-
- Ok(())
- }
-
- fn write_acl_default_user(&mut self, acl_default_user: PxarACLUser) -> Result<(), Error> {
- self.write_header(
- PXAR_ACL_DEFAULT_USER,
- std::mem::size_of::<PxarACLUser>() as u64,
- )?;
- self.write_item(acl_default_user)?;
-
- Ok(())
- }
-
- fn write_acl_default_group(&mut self, acl_default_group: PxarACLGroup) -> Result<(), Error> {
- self.write_header(
- PXAR_ACL_DEFAULT_GROUP,
- std::mem::size_of::<PxarACLGroup>() as u64,
- )?;
- self.write_item(acl_default_group)?;
-
- Ok(())
- }
-
- fn write_quota_project_id(&mut self, projid: PxarQuotaProjID) -> Result<(), Error> {
- self.write_header(
- PXAR_QUOTA_PROJID,
- std::mem::size_of::<PxarQuotaProjID>() as u64,
- )?;
- self.write_item(projid)?;
-
- Ok(())
- }
-
- fn write_goodbye_table(
- &mut self,
- goodbye_offset: usize,
- goodbye_items: &mut [PxarGoodbyeItem],
- ) -> Result<(), Error> {
- goodbye_items.sort_unstable_by(|a, b| a.hash.cmp(&b.hash));
-
- let item_count = goodbye_items.len();
-
- let goodbye_table_size = (item_count + 1) * std::mem::size_of::<PxarGoodbyeItem>();
-
- self.write_header(PXAR_GOODBYE, goodbye_table_size as u64)?;
-
- if self.file_copy_buffer.len() < goodbye_table_size {
- let need = goodbye_table_size - self.file_copy_buffer.len();
- self.file_copy_buffer.reserve(need);
- unsafe {
- self.file_copy_buffer
- .set_len(self.file_copy_buffer.capacity());
- }
- }
-
- let buffer = &mut self.file_copy_buffer;
-
- copy_binary_search_tree(item_count, |s, d| {
- let item = &goodbye_items[s];
- let offset = d * std::mem::size_of::<PxarGoodbyeItem>();
- let dest =
- crate::tools::map_struct_mut::<PxarGoodbyeItem>(&mut buffer[offset..]).unwrap();
- dest.offset = u64::to_le(item.offset);
- dest.size = u64::to_le(item.size);
- dest.hash = u64::to_le(item.hash);
- });
-
- // append PxarGoodbyeTail as last item
- let offset = item_count * std::mem::size_of::<PxarGoodbyeItem>();
- let dest = crate::tools::map_struct_mut::<PxarGoodbyeItem>(&mut buffer[offset..]).unwrap();
- dest.offset = u64::to_le(goodbye_offset as u64);
- dest.size = u64::to_le((goodbye_table_size + std::mem::size_of::<PxarHeader>()) as u64);
- dest.hash = u64::to_le(PXAR_GOODBYE_TAIL_MARKER);
-
- self.flush_copy_buffer(goodbye_table_size)?;
-
- Ok(())
- }
-
- fn encode_dir(
- &mut self,
- dir: &mut nix::dir::Dir,
- dir_stat: &FileStat,
- magic: i64,
- match_pattern: Vec<MatchPatternSlice>,
- ) -> Result<(), Error> {
- //println!("encode_dir: {:?} start {}", self.full_path(), self.writer_pos);
-
- let mut name_list = Vec::new();
-
- let rawfd = dir.as_raw_fd();
-
- let dir_start_pos = self.writer_pos;
-
- let is_root = dir_start_pos == 0;
-
- let mut dir_entry = self.create_entry(&dir_stat)?;
-
- self.read_chattr(rawfd, &mut dir_entry)?;
- self.read_fat_attr(rawfd, magic, &mut dir_entry)?;
-
- // for each node in the directory tree, the filesystem features are
- // checked based on the fs magic number.
- self.fs_feature_flags = flags::feature_flags_from_magic(magic);
-
- let (xattrs, fcaps) = self.read_xattrs(rawfd, &dir_stat)?;
- let acl_access = self.read_acl(rawfd, &dir_stat, acl::ACL_TYPE_ACCESS)?;
- let acl_default = self.read_acl(rawfd, &dir_stat, acl::ACL_TYPE_DEFAULT)?;
- let projid = self.read_quota_project_id(rawfd, magic, &dir_stat)?;
-
- self.write_entry(dir_entry)?;
- for xattr in xattrs {
- self.write_xattr(xattr)?;
- }
- self.write_fcaps(fcaps)?;
-
- for user in acl_access.users {
- self.write_acl_user(user)?;
- }
- for group in acl_access.groups {
- self.write_acl_group(group)?;
- }
- if let Some(group_obj) = acl_access.group_obj {
- self.write_acl_group_obj(group_obj)?;
- }
-
- for default_user in acl_default.users {
- self.write_acl_default_user(default_user)?;
- }
- for default_group in acl_default.groups {
- self.write_acl_default_group(default_group)?;
- }
- if let Some(default) = acl_default.default {
- self.write_acl_default(default)?;
- }
- if let Some(projid) = projid {
- self.write_quota_project_id(projid)?;
- }
-
- let include_children;
- if is_virtual_file_system(magic) {
- include_children = false;
- } else if let Some(set) = &self.device_set {
- include_children = set.contains(&dir_stat.st_dev);
- } else {
- include_children = true;
- }
-
- // Expand the exclude match pattern inherited from the parent by local entries, if present
- let mut local_match_pattern = match_pattern.clone();
- let (pxar_exclude, excludes) = match MatchPattern::from_file(rawfd, ".pxarexclude") {
- Ok(Some((excludes, buffer, stat))) => {
- (Some((buffer, stat)), excludes)
- }
- Ok(None) => (None, Vec::new()),
- Err(nix::Error::Sys(Errno::EACCES)) => {
- // No permission to read .pxarexclude, ignore its contents.
- eprintln!(
- "ignoring match patterns in {:?}: open file failed - EACCES",
- self.full_path().join(".pxarexclude"),
- );
- (None, Vec::new())
- }
- Err(err) => bail!("error while reading exclude file - {}", err),
- };
- for excl in &excludes {
- local_match_pattern.push(excl.as_slice());
- }
-
- if include_children {
- // Exclude patterns passed via the CLI are stored as '.pxarexclude-cli'
- // in the root directory of the archive.
- if is_root && !match_pattern.is_empty() {
- let filename = CString::new(".pxarexclude-cli")?;
- name_list.push((filename, *dir_stat, match_pattern.clone()));
- }
-
- for entry in dir.iter() {
- let entry = entry
- .map_err(|err| format_err!("readir {:?} failed - {}", self.full_path(), err))?;
- let filename = entry.file_name().to_owned();
-
- let name = filename.to_bytes_with_nul();
- if name == b".\0" || name == b"..\0" {
- continue;
- }
- // Do not store a ".pxarexclude-cli" file found in the archive root,
- // as this would confilict with new cli passed exclude patterns,
- // if present.
- if is_root && name == b".pxarexclude-cli\0" {
- eprintln!("skip existing '.pxarexclude-cli' in archive root.");
- continue;
- }
-
- let stat = match nix::sys::stat::fstatat(
- rawfd,
- filename.as_ref(),
- nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
- ) {
- Ok(stat) => stat,
- Err(nix::Error::Sys(Errno::ENOENT)) => {
- let filename_osstr = std::ffi::OsStr::from_bytes(filename.to_bytes());
- self.report_vanished_file(&self.full_path().join(filename_osstr))?;
- continue;
- }
- Err(err) => bail!("fstat {:?} failed - {}", self.full_path(), err),
- };
-
- match MatchPatternSlice::match_filename_exclude(
- &filename,
- is_directory(&stat),
- &local_match_pattern,
- )? {
- (MatchType::Positive, _) => {
- let filename_osstr = std::ffi::OsStr::from_bytes(filename.to_bytes());
- eprintln!(
- "matched by exclude pattern - skipping: {:?}",
- self.full_path().join(filename_osstr)
- );
- }
- (_, child_pattern) => {
- self.entry_counter += 1;
- name_list.push((filename, stat, child_pattern));
- }
- }
-
- if self.entry_counter > self.entry_max {
- bail!(
- "exceeded max number of entries (> {})",
- self.entry_max
- );
- }
- }
- } else {
- eprintln!("skip mount point: {:?}", self.full_path());
- }
-
- name_list.sort_unstable_by(|a, b| a.0.cmp(&b.0));
- let num_entries = name_list.len();
-
- let mut goodbye_items = Vec::with_capacity(num_entries);
-
- for (filename, stat, exclude_list) in name_list {
- let start_pos = self.writer_pos;
-
- if filename.as_bytes() == b".pxarexclude" {
- // pxar_exclude is none in case of error EACCES.
- if let Some((ref content, ref stat)) = pxar_exclude {
- let filefd = match nix::fcntl::openat(
- rawfd,
- filename.as_ref(),
- OFlag::O_NOFOLLOW,
- Mode::empty(),
- ) {
- Ok(filefd) => filefd,
- Err(nix::Error::Sys(Errno::ENOENT)) => {
- self.report_vanished_file(&self.full_path())?;
- continue;
- }
- Err(nix::Error::Sys(Errno::EACCES)) => {
- let filename_osstr = std::ffi::OsStr::from_bytes(filename.to_bytes());
- eprintln!(
- "skipping {:?}: open file failed - EACCES",
- self.full_path().join(filename_osstr),
- );
- continue;
- }
- Err(err) => {
- let filename_osstr = std::ffi::OsStr::from_bytes(filename.to_bytes());
- bail!(
- "open file {:?} failed - {}",
- self.full_path().join(filename_osstr),
- err
- );
- }
- };
-
- let child_magic = if dir_stat.st_dev != stat.st_dev {
- detect_fs_type(filefd)?
- } else {
- magic
- };
-
- self.write_filename(&filename)?;
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_file(&filename, stat.st_size as u64, stat.st_mtime as u64)?;
- }
- self.encode_pxar_exclude(filefd, stat, child_magic, content)?;
- }
- continue;
- }
-
- if is_root && filename.as_bytes() == b".pxarexclude-cli" {
- // '.pxarexclude-cli' is used to store the exclude MatchPatterns
- // passed via the cli in the root directory of the archive.
- self.write_filename(&filename)?;
- let content = MatchPatternSlice::to_bytes(&exclude_list);
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_file(&filename, content.len() as u64, 0)?;
- }
- self.encode_pxar_exclude_cli(stat.st_uid, stat.st_gid, 0, &content)?;
- continue;
- }
-
- self.relative_path
- .push(std::ffi::OsStr::from_bytes(filename.as_bytes()));
-
- if self.verbose {
- println!("{:?}", self.full_path());
- }
-
- if is_directory(&stat) {
- let mut dir = match nix::dir::Dir::openat(
- rawfd,
- filename.as_ref(),
- OFlag::O_DIRECTORY | OFlag::O_NOFOLLOW,
- Mode::empty(),
- ) {
- Ok(dir) => dir,
- Err(nix::Error::Sys(Errno::ENOENT)) => {
- self.report_vanished_file(&self.full_path())?;
- self.relative_path.pop();
- continue;
- }
- Err(nix::Error::Sys(Errno::EACCES)) => {
- eprintln!(
- "skipping {:?}: open dir failed - EACCES",
- self.full_path(),
- );
- self.relative_path.pop();
- continue;
- }
- Err(err) => bail!("open dir {:?} failed - {}", self.full_path(), err),
- };
-
- let child_magic = if dir_stat.st_dev != stat.st_dev {
- detect_fs_type(dir.as_raw_fd())?
- } else {
- magic
- };
-
- self.write_filename(&filename)?;
- if let Some(ref mut catalog) = self.catalog {
- catalog.start_directory(&filename)?;
- }
- self.encode_dir(&mut dir, &stat, child_magic, exclude_list)?;
- if let Some(ref mut catalog) = self.catalog {
- catalog.end_directory()?;
- }
- } else if is_reg_file(&stat) {
- let mut hardlink_target = None;
-
- if stat.st_nlink > 1 {
- let link_info = HardLinkInfo {
- st_dev: stat.st_dev,
- st_ino: stat.st_ino,
- };
- hardlink_target = self.hardlinks.get(&link_info).map(|(v, offset)| {
- let mut target = v.clone().into_os_string();
- target.push("\0"); // add Nul byte
- (target, (start_pos as u64) - offset)
- });
- if hardlink_target == None {
- self.hardlinks
- .insert(link_info, (self.relative_path.clone(), start_pos as u64));
- }
- }
-
- if let Some((target, offset)) = hardlink_target {
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_hardlink(&filename)?;
- }
- self.write_filename(&filename)?;
- self.encode_hardlink(target.as_bytes(), offset)?;
- } else {
- let filefd = match nix::fcntl::openat(
- rawfd,
- filename.as_ref(),
- OFlag::O_NOFOLLOW,
- Mode::empty(),
- ) {
- Ok(filefd) => filefd,
- Err(nix::Error::Sys(Errno::ENOENT)) => {
- self.report_vanished_file(&self.full_path())?;
- self.relative_path.pop();
- continue;
- }
- Err(nix::Error::Sys(Errno::EACCES)) => {
- eprintln!(
- "skipping {:?}: open file failed - EACCES",
- self.full_path(),
- );
- self.relative_path.pop();
- continue;
- }
- Err(err) => bail!("open file {:?} failed - {}", self.full_path(), err),
- };
-
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_file(&filename, stat.st_size as u64, stat.st_mtime as u64)?;
- }
- let child_magic = if dir_stat.st_dev != stat.st_dev {
- detect_fs_type(filefd)?
- } else {
- magic
- };
-
- self.write_filename(&filename)?;
- let res = self.encode_file(filefd, &stat, child_magic);
- let _ = nix::unistd::close(filefd); // ignore close errors
- res?;
- }
- } else if is_symlink(&stat) {
- let mut buffer = vec::undefined(libc::PATH_MAX as usize);
-
- let res = filename.with_nix_path(|cstr| unsafe {
- libc::readlinkat(
- rawfd,
- cstr.as_ptr(),
- buffer.as_mut_ptr() as *mut libc::c_char,
- buffer.len() - 1,
- )
- })?;
-
- match Errno::result(res) {
- Ok(len) => {
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_symlink(&filename)?;
- }
- buffer[len as usize] = 0u8; // add Nul byte
- self.write_filename(&filename)?;
- self.encode_symlink(&buffer[..((len + 1) as usize)], &stat)?
- }
- Err(nix::Error::Sys(Errno::ENOENT)) => {
- self.report_vanished_file(&self.full_path())?;
- self.relative_path.pop();
- continue;
- }
- Err(err) => bail!("readlink {:?} failed - {}", self.full_path(), err),
- }
- } else if is_block_dev(&stat) || is_char_dev(&stat) {
- if self.has_features(flags::WITH_DEVICE_NODES) {
- if let Some(ref mut catalog) = self.catalog {
- if is_block_dev(&stat) {
- catalog.add_block_device(&filename)?;
- } else {
- catalog.add_char_device(&filename)?;
- }
- }
- self.write_filename(&filename)?;
- self.encode_device(&stat)?;
- } else {
- eprintln!("skip device node: {:?}", self.full_path());
- self.relative_path.pop();
- continue;
- }
- } else if is_fifo(&stat) {
- if self.has_features(flags::WITH_FIFOS) {
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_fifo(&filename)?;
- }
- self.write_filename(&filename)?;
- self.encode_special(&stat)?;
- } else {
- eprintln!("skip fifo: {:?}", self.full_path());
- self.relative_path.pop();
- continue;
- }
- } else if is_socket(&stat) {
- if self.has_features(flags::WITH_SOCKETS) {
- if let Some(ref mut catalog) = self.catalog {
- catalog.add_socket(&filename)?;
- }
- self.write_filename(&filename)?;
- self.encode_special(&stat)?;
- } else {
- eprintln!("skip socket: {:?}", self.full_path());
- self.relative_path.pop();
- continue;
- }
- } else {
- bail!(
- "unsupported file type (mode {:o} {:?})",
- stat.st_mode,
- self.full_path()
- );
- }
-
- let end_pos = self.writer_pos;
-
- goodbye_items.push(PxarGoodbyeItem {
- offset: start_pos as u64,
- size: (end_pos - start_pos) as u64,
- hash: compute_goodbye_hash(filename.to_bytes()),
- });
-
- self.relative_path.pop();
- }
-
- //println!("encode_dir: {:?} end {}", self.full_path(), self.writer_pos);
-
- // fixup goodby item offsets
- let goodbye_start = self.writer_pos as u64;
- for item in &mut goodbye_items {
- item.offset = goodbye_start - item.offset;
- }
-
- let goodbye_offset = self.writer_pos - dir_start_pos;
-
- self.write_goodbye_table(goodbye_offset, &mut goodbye_items)?;
- self.entry_counter -= num_entries;
-
- //println!("encode_dir: {:?} end1 {}", self.full_path(), self.writer_pos);
- Ok(())
- }
-
- fn encode_file(&mut self, filefd: RawFd, stat: &FileStat, magic: i64) -> Result<(), Error> {
- //println!("encode_file: {:?}", self.full_path());
-
- let mut entry = self.create_entry(&stat)?;
-
- self.read_chattr(filefd, &mut entry)?;
- self.read_fat_attr(filefd, magic, &mut entry)?;
- let (xattrs, fcaps) = self.read_xattrs(filefd, &stat)?;
- let acl_access = self.read_acl(filefd, &stat, acl::ACL_TYPE_ACCESS)?;
- let projid = self.read_quota_project_id(filefd, magic, &stat)?;
-
- self.write_entry(entry)?;
- for xattr in xattrs {
- self.write_xattr(xattr)?;
- }
- self.write_fcaps(fcaps)?;
- for user in acl_access.users {
- self.write_acl_user(user)?;
- }
- for group in acl_access.groups {
- self.write_acl_group(group)?;
- }
- if let Some(group_obj) = acl_access.group_obj {
- self.write_acl_group_obj(group_obj)?;
- }
- if let Some(projid) = projid {
- self.write_quota_project_id(projid)?;
- }
-
- let include_payload;
- if is_virtual_file_system(magic) {
- include_payload = false;
- } else if let Some(ref set) = &self.device_set {
- include_payload = set.contains(&stat.st_dev);
- } else {
- include_payload = true;
- }
-
- if !include_payload {
- eprintln!("skip content: {:?}", self.full_path());
- self.write_header(PXAR_PAYLOAD, 0)?;
- return Ok(());
- }
-
- let size = stat.st_size as u64;
-
- self.write_header(PXAR_PAYLOAD, size)?;
-
- let mut pos: u64 = 0;
- loop {
- let n = match nix::unistd::read(filefd, &mut self.file_copy_buffer) {
- Ok(n) => n,
- Err(nix::Error::Sys(Errno::EINTR)) => continue, /* try again */
- Err(err) => bail!("read {:?} failed - {}", self.full_path(), err),
- };
- if n == 0 { // EOF
- if pos != size {
- // Note:: casync format cannot handle that
- bail!(
- "detected shrunk file {:?} ({} < {})",
- self.full_path(),
- pos,
- size
- );
- }
- break;
- }
-
- let mut next = pos + (n as u64);
-
- if next > size {
- next = size;
- }
-
- let count = (next - pos) as usize;
-
- self.flush_copy_buffer(count)?;
-
- pos = next;
-
- if pos >= size {
- break;
- }
- }
-
- Ok(())
- }
-
- fn encode_device(&mut self, stat: &FileStat) -> Result<(), Error> {
- let entry = self.create_entry(&stat)?;
-
- self.write_entry(entry)?;
-
- let major = unsafe { libc::major(stat.st_rdev) } as u64;
- let minor = unsafe { libc::minor(stat.st_rdev) } as u64;
-
- //println!("encode_device: {:?} {} {} {}", self.full_path(), stat.st_rdev, major, minor);
-
- self.write_header(PXAR_DEVICE, std::mem::size_of::<PxarDevice>() as u64)?;
- self.write_item(PxarDevice { major, minor })?;
-
- Ok(())
- }
-
- // FIFO or Socket
- fn encode_special(&mut self, stat: &FileStat) -> Result<(), Error> {
- let entry = self.create_entry(&stat)?;
-
- self.write_entry(entry)?;
-
- Ok(())
- }
-
- fn encode_symlink(&mut self, target: &[u8], stat: &FileStat) -> Result<(), Error> {
- //println!("encode_symlink: {:?} -> {:?}", self.full_path(), target);
-
- let entry = self.create_entry(&stat)?;
- self.write_entry(entry)?;
-
- self.write_header(PXAR_SYMLINK, target.len() as u64)?;
- self.write(target)?;
-
- Ok(())
- }
-
- fn encode_hardlink(&mut self, target: &[u8], offset: u64) -> Result<(), Error> {
- //println!("encode_hardlink: {:?} -> {:?}", self.full_path(), target);
-
- // Note: HARDLINK replaces an ENTRY.
- self.write_header(PXAR_FORMAT_HARDLINK, (target.len() as u64) + 8)?;
- self.write_item(offset)?;
- self.write(target)?;
-
- Ok(())
- }
-
- fn encode_pxar_exclude(
- &mut self,
- filefd: RawFd,
- stat: &FileStat,
- magic: i64,
- content: &[u8],
- ) -> Result<(), Error> {
- let mut entry = self.create_entry(&stat)?;
-
- self.read_chattr(filefd, &mut entry)?;
- self.read_fat_attr(filefd, magic, &mut entry)?;
- let (xattrs, fcaps) = self.read_xattrs(filefd, &stat)?;
- let acl_access = self.read_acl(filefd, &stat, acl::ACL_TYPE_ACCESS)?;
- let projid = self.read_quota_project_id(filefd, magic, &stat)?;
-
- self.write_entry(entry)?;
- for xattr in xattrs {
- self.write_xattr(xattr)?;
- }
- self.write_fcaps(fcaps)?;
- for user in acl_access.users {
- self.write_acl_user(user)?;
- }
- for group in acl_access.groups {
- self.write_acl_group(group)?;
- }
- if let Some(group_obj) = acl_access.group_obj {
- self.write_acl_group_obj(group_obj)?;
- }
- if let Some(projid) = projid {
- self.write_quota_project_id(projid)?;
- }
-
- let include_payload;
- if is_virtual_file_system(magic) {
- include_payload = false;
- } else if let Some(set) = &self.device_set {
- include_payload = set.contains(&stat.st_dev);
- } else {
- include_payload = true;
- }
-
- if !include_payload {
- eprintln!("skip content: {:?}", self.full_path());
- self.write_header(PXAR_PAYLOAD, 0)?;
- return Ok(());
- }
-
- let size = content.len();
- self.write_header(PXAR_PAYLOAD, size as u64)?;
- self.writer.write_all(content)?;
- self.writer_pos += size;
-
- Ok(())
- }
-
- /// Encodes the excude match patterns passed via cli as file in the archive.
- fn encode_pxar_exclude_cli(
- &mut self,
- uid: u32,
- gid: u32,
- mtime: u64,
- content: &[u8],
- ) -> Result<(), Error> {
- let entry = PxarEntry {
- mode: (libc::S_IFREG | 0o600) as u64,
- flags: 0,
- uid,
- gid,
- mtime,
- };
- self.write_entry(entry)?;
- let size = content.len();
- self.write_header(PXAR_PAYLOAD, size as u64)?;
- self.writer.write_all(content)?;
- self.writer_pos += size;
-
- Ok(())
- }
-
- // the report_XXX method may raise and error - depending on encoder configuration
-
- fn report_vanished_file(&self, path: &Path) -> Result<(), Error> {
- eprintln!("WARNING: detected vanished file {:?}", path);
-
- Ok(())
- }
-}
-
-fn errno_is_unsupported(errno: Errno) -> bool {
- match errno {
- Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
- _ => false,
- }
-}
-
-fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
- let mut fs_stat = std::mem::MaybeUninit::uninit();
- let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
- Errno::result(res)?;
- let fs_stat = unsafe { fs_stat.assume_init() };
-
- Ok(fs_stat.f_type)
-}
-
-#[inline(always)]
-pub fn is_temporary_file_system(magic: i64) -> bool {
- use proxmox::sys::linux::magic::*;
- magic == RAMFS_MAGIC || magic == TMPFS_MAGIC
-}
-
-pub fn is_virtual_file_system(magic: i64) -> bool {
- use proxmox::sys::linux::magic::*;
-
- match magic {
- BINFMTFS_MAGIC |
- CGROUP2_SUPER_MAGIC |
- CGROUP_SUPER_MAGIC |
- CONFIGFS_MAGIC |
- DEBUGFS_MAGIC |
- DEVPTS_SUPER_MAGIC |
- EFIVARFS_MAGIC |
- FUSE_CTL_SUPER_MAGIC |
- HUGETLBFS_MAGIC |
- MQUEUE_MAGIC |
- NFSD_MAGIC |
- PROC_SUPER_MAGIC |
- PSTOREFS_MAGIC |
- RPCAUTH_GSSMAGIC |
- SECURITYFS_MAGIC |
- SELINUX_MAGIC |
- SMACK_MAGIC |
- SYSFS_MAGIC => true,
- _ => false
- }
-}
--- /dev/null
+//! Code for extraction of pxar contents onto the file system.
+
+use std::convert::TryFrom;
+use std::ffi::{CStr, CString, OsStr};
+use std::io;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::Path;
+
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use pathpatterns::{MatchEntry, MatchList, MatchType};
+use pxar::format::Device;
+use pxar::Metadata;
+
+use proxmox::c_result;
+use proxmox::tools::fs::{create_path, CreateOptions};
+
+use crate::pxar::dir_stack::PxarDirStack;
+use crate::pxar::flags;
+use crate::pxar::metadata;
+
+struct Extractor<'a> {
+ /// FIXME: use bitflags!() for feature_flags
+ feature_flags: u64,
+ allow_existing_dirs: bool,
+ callback: &'a mut dyn FnMut(&Path),
+ dir_stack: PxarDirStack,
+}
+
+impl<'a> Extractor<'a> {
+ fn with_flag(&self, flag: u64) -> bool {
+ flag == (self.feature_flags & flag)
+ }
+}
+
+pub fn extract_archive<T, F>(
+ mut decoder: pxar::decoder::Decoder<T>,
+ destination: &Path,
+ match_list: &[MatchEntry],
+ feature_flags: u64,
+ allow_existing_dirs: bool,
+ mut callback: F,
+) -> Result<(), Error>
+where
+ T: pxar::decoder::SeqRead,
+ F: FnMut(&Path),
+{
+ // we use this to keep track of our directory-traversal
+ decoder.enable_goodbye_entries(true);
+
+ let root = decoder
+ .next()
+ .ok_or_else(|| format_err!("found empty pxar archive"))?
+ .map_err(|err| format_err!("error reading pxar archive: {}", err))?;
+
+ if !root.is_dir() {
+ bail!("pxar archive does not start with a directory entry!");
+ }
+
+ create_path(
+ &destination,
+ None,
+ Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
+ )
+ .map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
+
+ let dir = Dir::open(
+ destination,
+ OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
+ Mode::empty(),
+ )
+ .map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
+
+ let mut extractor = Extractor {
+ feature_flags,
+ allow_existing_dirs,
+ callback: &mut callback,
+ dir_stack: PxarDirStack::new(dir, root.metadata().clone()),
+ };
+
+ let mut match_stack = Vec::new();
+ let mut current_match = true;
+ while let Some(entry) = decoder.next() {
+ use pxar::EntryKind;
+
+ let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
+
+ let file_name_os = entry.file_name();
+
+ // safety check: a file entry in an archive must never contain slashes:
+ if file_name_os.as_bytes().contains(&b'/') {
+ bail!("archive file entry contains slashes, which is invalid and a security concern");
+ }
+
+ let file_name = CString::new(file_name_os.as_bytes())
+ .map_err(|_| format_err!("encountered file name with null-bytes"))?;
+
+ let metadata = entry.metadata();
+
+ let match_result = match_list.matches(
+ entry.path().as_os_str().as_bytes(),
+ Some(metadata.file_type() as u32),
+ );
+
+ let did_match = match match_result {
+ Some(MatchType::Include) => true,
+ Some(MatchType::Exclude) => false,
+ None => current_match,
+ };
+ match (did_match, entry.kind()) {
+ (_, EntryKind::Directory) => {
+ extractor.callback(entry.path());
+
+ extractor
+ .dir_stack
+ .push(file_name_os.to_owned(), metadata.clone())?;
+
+ if current_match && match_result != Some(MatchType::Exclude) {
+ // We're currently in a positive match and this directory does not match an
+ // exclude entry, so make sure it is created:
+ let _ = extractor
+ .dir_stack
+ .last_dir_fd(extractor.allow_existing_dirs)
+ .map_err(|err| {
+ format_err!("error creating entry {:?}: {}", file_name_os, err)
+ })?;
+ }
+
+ // We're starting a new directory, push our old matching state and replace it with
+ // our new one:
+ match_stack.push(current_match);
+ current_match = did_match;
+
+ Ok(())
+ }
+ (_, EntryKind::GoodbyeTable) => {
+ // go up a directory
+ let dir = extractor
+ .dir_stack
+ .pop()
+ .map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
+ .ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
+ // We left a directory, also get back our previous matching state. This is in sync
+ // with `dir_stack` so this should never be empty except for the final goodbye
+ // table, in which case we get back to the default of `true`.
+ current_match = match_stack.pop().unwrap_or(true);
+
+ if let Some(fd) = dir.try_as_raw_fd() {
+ metadata::apply(extractor.feature_flags, dir.metadata(), fd, &file_name)
+ } else {
+ Ok(())
+ }
+ }
+ (true, EntryKind::Symlink(link)) => {
+ extractor.callback(entry.path());
+ extractor.extract_symlink(&file_name, metadata, link.as_ref())
+ }
+ (true, EntryKind::Hardlink(link)) => {
+ extractor.callback(entry.path());
+ extractor.extract_hardlink(&file_name, metadata, link.as_os_str())
+ }
+ (true, EntryKind::Device(dev)) => {
+ if extractor.with_flag(flags::WITH_DEVICE_NODES) {
+ extractor.callback(entry.path());
+ extractor.extract_device(&file_name, metadata, dev)
+ } else {
+ Ok(())
+ }
+ }
+ (true, EntryKind::Fifo) => {
+ if extractor.with_flag(flags::WITH_FIFOS) {
+ extractor.callback(entry.path());
+ extractor.extract_special(&file_name, metadata, 0)
+ } else {
+ Ok(())
+ }
+ }
+ (true, EntryKind::Socket) => {
+ if extractor.with_flag(flags::WITH_SOCKETS) {
+ extractor.callback(entry.path());
+ extractor.extract_special(&file_name, metadata, 0)
+ } else {
+ Ok(())
+ }
+ }
+ (true, EntryKind::File { size, .. }) => extractor.extract_file(
+ &file_name,
+ metadata,
+ *size,
+ &mut decoder.contents().ok_or_else(|| {
+ format_err!("found regular file entry without contents in archive")
+ })?,
+ ),
+ (false, _) => Ok(()), // skip this
+ }
+ .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
+ }
+
+ if !extractor.dir_stack.is_empty() {
+ bail!("unexpected eof while decoding pxar archive");
+ }
+
+ Ok(())
+}
+
+impl<'a> Extractor<'a> {
+ fn parent_fd(&mut self) -> Result<RawFd, Error> {
+ self.dir_stack.last_dir_fd(self.allow_existing_dirs)
+ }
+
+ fn callback(&mut self, path: &Path) {
+ (self.callback)(path)
+ }
+
+ fn extract_symlink(
+ &mut self,
+ file_name: &CStr,
+ metadata: &Metadata,
+ link: &OsStr,
+ ) -> Result<(), Error> {
+ let parent = self.parent_fd()?;
+ nix::unistd::symlinkat(link, Some(parent), file_name)?;
+ metadata::apply_at(self.feature_flags, metadata, parent, file_name)
+ }
+
+ fn extract_hardlink(
+ &mut self,
+ file_name: &CStr,
+ _metadata: &Metadata, // for now we don't use this because hardlinks don't need it...
+ link: &OsStr,
+ ) -> Result<(), Error> {
+ crate::pxar::tools::assert_relative_path(link)?;
+
+ let parent = self.parent_fd()?;
+ let root = self.dir_stack.root_dir_fd()?;
+ let target = CString::new(link.as_bytes())?;
+ nix::unistd::linkat(
+ Some(root),
+ target.as_c_str(),
+ Some(parent),
+ file_name,
+ nix::unistd::LinkatFlags::NoSymlinkFollow,
+ )?;
+
+ Ok(())
+ }
+
+ fn extract_device(
+ &mut self,
+ file_name: &CStr,
+ metadata: &Metadata,
+ device: &Device,
+ ) -> Result<(), Error> {
+ self.extract_special(file_name, metadata, device.to_dev_t())
+ }
+
+ fn extract_special(
+ &mut self,
+ file_name: &CStr,
+ metadata: &Metadata,
+ device: libc::dev_t,
+ ) -> Result<(), Error> {
+ let mode = metadata.stat.mode;
+ let mode = u32::try_from(mode).map_err(|_| {
+ format_err!(
+ "device node's mode contains illegal bits: 0x{:x} (0o{:o})",
+ mode,
+ mode,
+ )
+ })?;
+ let parent = self.parent_fd()?;
+ unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
+ .map_err(|err| format_err!("failed to create device node: {}", err))?;
+
+ metadata::apply_at(self.feature_flags, metadata, parent, file_name)
+ }
+
+ fn extract_file(
+ &mut self,
+ file_name: &CStr,
+ metadata: &Metadata,
+ size: u64,
+ contents: &mut dyn io::Read,
+ ) -> Result<(), Error> {
+ let parent = self.parent_fd()?;
+ let mut file = unsafe {
+ std::fs::File::from_raw_fd(nix::fcntl::openat(
+ parent,
+ file_name,
+ OFlag::O_CREAT | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
+ Mode::from_bits(0o600).unwrap(),
+ )?)
+ };
+
+ let extracted = io::copy(&mut *contents, &mut file)?;
+ if size != extracted {
+ bail!("extracted {} bytes of a file of {} bytes", extracted, size);
+ }
+
+ metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
+ }
+}
//! Flags for known supported features for a given filesystem can be derived
//! from the superblocks magic number.
+// FIXME: use bitflags!() here!
+
/// FAT-style 2s time granularity
pub const WITH_2SEC_TIME: u64 = 0x40;
/// Preserve read only flag of files
+++ /dev/null
-//! *pxar* binary format definition
-//!
-//! Please note the all values are stored in little endian ordering.
-//!
-//! The Archive contains a list of items. Each item starts with a
-//! `PxarHeader`, followed by the item data.
-use std::cmp::Ordering;
-
-use endian_trait::Endian;
-use anyhow::{bail, Error};
-use siphasher::sip::SipHasher24;
-
-
-/// Header types identifying items stored in the archive
-pub const PXAR_ENTRY: u64 = 0x1396fabcea5bbb51;
-pub const PXAR_FILENAME: u64 = 0x6dbb6ebcb3161f0b;
-pub const PXAR_SYMLINK: u64 = 0x664a6fb6830e0d6c;
-pub const PXAR_DEVICE: u64 = 0xac3dace369dfe643;
-pub const PXAR_XATTR: u64 = 0xb8157091f80bc486;
-pub const PXAR_ACL_USER: u64 = 0x297dc88b2ef12faf;
-pub const PXAR_ACL_GROUP: u64 = 0x36f2acb56cb3dd0b;
-pub const PXAR_ACL_GROUP_OBJ: u64 = 0x23047110441f38f3;
-pub const PXAR_ACL_DEFAULT: u64 = 0xfe3eeda6823c8cd0;
-pub const PXAR_ACL_DEFAULT_USER: u64 = 0xbdf03df9bd010a91;
-pub const PXAR_ACL_DEFAULT_GROUP: u64 = 0xa0cb1168782d1f51;
-pub const PXAR_FCAPS: u64 = 0xf7267db0afed0629;
-pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
-
-/// Marks item as hardlink
-/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
-pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
-/// Marks the beginning of the payload (actual content) of regular files
-pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
-/// Marks item as entry of goodbye table
-pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
-/// The end marker used in the GOODBYE object
-pub const PXAR_GOODBYE_TAIL_MARKER: u64 = 0x57446fa533702943;
-
-#[derive(Debug, Endian)]
-#[repr(C)]
-pub struct PxarHeader {
- /// The item type (see `PXAR_` constants).
- pub htype: u64,
- /// The size of the item, including the size of `PxarHeader`.
- pub size: u64,
-}
-
-#[derive(Endian)]
-#[repr(C)]
-pub struct PxarEntry {
- pub mode: u64,
- pub flags: u64,
- pub uid: u32,
- pub gid: u32,
- pub mtime: u64,
-}
-
-#[derive(Endian)]
-#[repr(C)]
-pub struct PxarDevice {
- pub major: u64,
- pub minor: u64,
-}
-
-#[derive(Endian)]
-#[repr(C)]
-pub struct PxarGoodbyeItem {
- /// SipHash24 of the directory item name. The last GOODBYE item
- /// uses the special hash value `PXAR_GOODBYE_TAIL_MARKER`.
- pub hash: u64,
- /// The offset from the start of the GOODBYE object to the start
- /// of the matching directory item (point to a FILENAME). The last
- /// GOODBYE item points to the start of the matching ENTRY
- /// object.
- pub offset: u64,
- /// The overall size of the directory item. The last GOODBYE item
- /// repeats the size of the GOODBYE item.
- pub size: u64,
-}
-
-/// Helper function to extract file names from binary archive.
-pub fn read_os_string(buffer: &[u8]) -> std::ffi::OsString {
- let len = buffer.len();
-
- use std::os::unix::ffi::OsStrExt;
-
- let name = if len > 0 && buffer[len - 1] == 0 {
- std::ffi::OsStr::from_bytes(&buffer[0..len - 1])
- } else {
- std::ffi::OsStr::from_bytes(&buffer)
- };
-
- name.into()
-}
-
-#[derive(Debug, Eq)]
-#[repr(C)]
-pub struct PxarXAttr {
- pub name: Vec<u8>,
- pub value: Vec<u8>,
-}
-
-impl Ord for PxarXAttr {
- fn cmp(&self, other: &PxarXAttr) -> Ordering {
- self.name.cmp(&other.name)
- }
-}
-
-impl PartialOrd for PxarXAttr {
- fn partial_cmp(&self, other: &PxarXAttr) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-impl PartialEq for PxarXAttr {
- fn eq(&self, other: &PxarXAttr) -> bool {
- self.name == other.name
- }
-}
-
-#[derive(Debug)]
-#[repr(C)]
-pub struct PxarFCaps {
- pub data: Vec<u8>,
-}
-
-#[derive(Debug, Endian, Eq)]
-#[repr(C)]
-pub struct PxarACLUser {
- pub uid: u64,
- pub permissions: u64,
- //pub name: Vec<u64>, not impl for now
-}
-
-// TODO if also name is impl, sort by uid, then by name and last by permissions
-impl Ord for PxarACLUser {
- fn cmp(&self, other: &PxarACLUser) -> Ordering {
- match self.uid.cmp(&other.uid) {
- // uids are equal, entries ordered by permissions
- Ordering::Equal => self.permissions.cmp(&other.permissions),
- // uids are different, entries ordered by uid
- uid_order => uid_order,
- }
- }
-}
-
-impl PartialOrd for PxarACLUser {
- fn partial_cmp(&self, other: &PxarACLUser) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-impl PartialEq for PxarACLUser {
- fn eq(&self, other: &PxarACLUser) -> bool {
- self.uid == other.uid && self.permissions == other.permissions
- }
-}
-
-#[derive(Debug, Endian, Eq)]
-#[repr(C)]
-pub struct PxarACLGroup {
- pub gid: u64,
- pub permissions: u64,
- //pub name: Vec<u64>, not impl for now
-}
-
-// TODO if also name is impl, sort by gid, then by name and last by permissions
-impl Ord for PxarACLGroup {
- fn cmp(&self, other: &PxarACLGroup) -> Ordering {
- match self.gid.cmp(&other.gid) {
- // gids are equal, entries are ordered by permissions
- Ordering::Equal => self.permissions.cmp(&other.permissions),
- // gids are different, entries ordered by gid
- gid_ordering => gid_ordering,
- }
- }
-}
-
-impl PartialOrd for PxarACLGroup {
- fn partial_cmp(&self, other: &PxarACLGroup) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-impl PartialEq for PxarACLGroup {
- fn eq(&self, other: &PxarACLGroup) -> bool {
- self.gid == other.gid && self.permissions == other.permissions
- }
-}
-
-#[derive(Debug, Endian)]
-#[repr(C)]
-pub struct PxarACLGroupObj {
- pub permissions: u64,
-}
-
-#[derive(Debug, Endian)]
-#[repr(C)]
-pub struct PxarACLDefault {
- pub user_obj_permissions: u64,
- pub group_obj_permissions: u64,
- pub other_permissions: u64,
- pub mask_permissions: u64,
-}
-
-pub(crate) struct PxarACL {
- pub users: Vec<PxarACLUser>,
- pub groups: Vec<PxarACLGroup>,
- pub group_obj: Option<PxarACLGroupObj>,
- pub default: Option<PxarACLDefault>,
-}
-
-pub const PXAR_ACL_PERMISSION_READ: u64 = 4;
-pub const PXAR_ACL_PERMISSION_WRITE: u64 = 2;
-pub const PXAR_ACL_PERMISSION_EXECUTE: u64 = 1;
-
-#[derive(Debug, Endian)]
-#[repr(C)]
-pub struct PxarQuotaProjID {
- pub projid: u64,
-}
-
-#[derive(Debug, Default)]
-pub struct PxarAttributes {
- pub xattrs: Vec<PxarXAttr>,
- pub fcaps: Option<PxarFCaps>,
- pub quota_projid: Option<PxarQuotaProjID>,
- pub acl_user: Vec<PxarACLUser>,
- pub acl_group: Vec<PxarACLGroup>,
- pub acl_group_obj: Option<PxarACLGroupObj>,
- pub acl_default: Option<PxarACLDefault>,
- pub acl_default_user: Vec<PxarACLUser>,
- pub acl_default_group: Vec<PxarACLGroup>,
-}
-
-/// Create SipHash values for goodby tables.
-//pub fn compute_goodbye_hash(name: &std::ffi::CStr) -> u64 {
-pub fn compute_goodbye_hash(name: &[u8]) -> u64 {
- use std::hash::Hasher;
- let mut hasher = SipHasher24::new_with_keys(0x8574442b0f1d84b3, 0x2736ed30d1c22ec1);
- hasher.write(name);
- hasher.finish()
-}
-
-pub fn check_ca_header<T>(head: &PxarHeader, htype: u64) -> Result<(), Error> {
- if head.htype != htype {
- bail!(
- "got wrong header type ({:016x} != {:016x})",
- head.htype,
- htype
- );
- }
- if head.size != (std::mem::size_of::<T>() + std::mem::size_of::<PxarHeader>()) as u64 {
- bail!("got wrong header size for type {:016x}", htype);
- }
-
- Ok(())
-}
-
-/// The format requires to build sorted directory lookup tables in
-/// memory, so we restrict the number of allowed entries to limit
-/// maximum memory usage.
-pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
-//! Low level FUSE implementation for pxar.
-//!
-//! Allows to mount the archive as read-only filesystem to inspect its contents.
+//! Asynchronous fuse implementation.
-use std::collections::HashMap;
+use std::collections::BTreeMap;
use std::convert::TryFrom;
-use std::ffi::{CStr, CString, OsStr};
-use std::fs::File;
-use std::io::BufReader;
+use std::ffi::{OsStr, OsString};
+use std::future::Future;
+use std::io;
+use std::mem;
+use std::ops::Range;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
-use std::sync::Mutex;
-
-use anyhow::{bail, format_err, Error};
-use libc;
-use libc::{c_char, c_int, c_void, size_t};
-
-use crate::tools::lru_cache::{Cacher, LruCache};
-use crate::tools::acl;
-use super::binary_search_tree::search_binary_tree_by;
-use super::decoder::{Decoder, DirectoryEntry};
-use super::format_definition::PxarGoodbyeItem;
-
-/// Node ID of the root i-node
-///
-/// Offsets in the archive are used as i-node for the fuse implementation, as
-/// they are unique and enough to reference each item in the pxar archive.
-/// The only exception to this is the `FUSE_ROOT_ID`, which is defined as 1 by
-/// the fuse library.
-/// This is okay since offset 1 is part of the root directory entry header and
-/// will therefore not occur again, but remapping to the correct offset of 0 is
-/// required.
-const FUSE_ROOT_ID: u64 = 1;
-
-/// FFI types for easier readability
-type Request = *mut c_void;
-type MutPtr = *mut c_void;
-type ConstPtr = *const c_void;
-type StrPtr = *const c_char;
-type MutStrPtr = *mut c_char;
-
-#[rustfmt::skip]
-#[link(name = "fuse3")]
-extern "C" {
- fn fuse_session_new(args: Option<&FuseArgs>, oprs: Option<&Operations>, size: size_t, op: ConstPtr) -> MutPtr;
- fn fuse_set_signal_handlers(session: ConstPtr) -> c_int;
- fn fuse_remove_signal_handlers(session: ConstPtr);
- fn fuse_daemonize(foreground: c_int) -> c_int;
- fn fuse_session_mount(session: ConstPtr, mountpoint: StrPtr) -> c_int;
- fn fuse_session_unmount(session: ConstPtr);
- fn fuse_session_loop(session: ConstPtr) -> c_int;
- fn fuse_session_loop_mt_31(session: ConstPtr, clone_fd: c_int) -> c_int;
- fn fuse_session_destroy(session: ConstPtr);
- fn fuse_reply_attr(req: Request, attr: Option<&libc::stat>, timeout: f64) -> c_int;
- fn fuse_reply_err(req: Request, errno: c_int) -> c_int;
- fn fuse_reply_buf(req: Request, buf: MutStrPtr, size: size_t) -> c_int;
- fn fuse_reply_entry(req: Request, entry: Option<&EntryParam>) -> c_int;
- fn fuse_reply_xattr(req: Request, size: size_t) -> c_int;
- fn fuse_reply_readlink(req: Request, link: StrPtr) -> c_int;
- fn fuse_req_userdata(req: Request) -> MutPtr;
- fn fuse_add_direntry_plus(req: Request, buf: MutStrPtr, bufsize: size_t, name: StrPtr, stbuf: Option<&EntryParam>, off: c_int) -> c_int;
-}
+use std::pin::Pin;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, RwLock};
+use std::task::{Context, Poll};
-/// Command line arguments passed to fuse.
-#[repr(C)]
-#[derive(Debug)]
-struct FuseArgs {
- argc: c_int,
- argv: *const StrPtr,
- allocated: c_int,
-}
+use anyhow::{format_err, Error};
+use futures::channel::mpsc::UnboundedSender;
+use futures::select;
+use futures::sink::SinkExt;
+use futures::stream::{StreamExt, TryStreamExt};
-/// `Context` for callback functions providing the decoder, caches and the
-/// offset within the archive for the i-node given by the caller.
-struct Context {
- decoder: Decoder,
- /// The start of each DirectoryEntry is used as inode, used as key for this
- /// hashmap.
- ///
- /// This map stores the corresponding end offset, needed to read the
- /// DirectoryEntry via the Decoder as well as the parent, in order
- /// to be able to include the parent directory on readdirplus calls.
- start_end_parent: HashMap<u64, (u64, u64)>,
- gbt_cache: LruCache<u64, Vec<(PxarGoodbyeItem, u64, u64)>>,
- entry_cache: LruCache<u64, DirectoryEntry>,
+use proxmox::tools::vec;
+use pxar::accessor::{self, EntryRangeInfo, ReadAt};
+
+use proxmox_fuse::requests::{self, FuseRequest};
+use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
+
+/// We mark inodes for regular files this way so we know how to access them.
+const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
+
+#[inline]
+fn is_dir_inode(inode: u64) -> bool {
+ 0 == (inode & NON_DIRECTORY_INODE)
}
-/// Cacher for the goodbye table.
-///
-/// Provides the feching of the goodbye table via the decoder on cache misses.
-struct GbtCacher<'a> {
- decoder: &'a mut Decoder,
- map: &'a HashMap<u64, (u64, u64)>,
+/// Our reader type instance used for accessors.
+pub type Reader = Arc<dyn ReadAt + Send + Sync + 'static>;
+
+/// Our Accessor type instance.
+pub type Accessor = accessor::aio::Accessor<Reader>;
+
+/// Our Directory type instance.
+pub type Directory = accessor::aio::Directory<Reader>;
+
+/// Our FileEntry type instance.
+pub type FileEntry = accessor::aio::FileEntry<Reader>;
+
+/// Our FileContents type instance.
+pub type FileContents = accessor::aio::FileContents<Reader>;
+
+pub struct Session {
+ fut: Pin<Box<dyn Future<Output = Result<(), Error>> + Send + Sync + 'static>>,
}
-impl<'a> Cacher<u64, Vec<(PxarGoodbyeItem, u64, u64)>> for GbtCacher<'a> {
- fn fetch(&mut self, key: u64) -> Result<Option<Vec<(PxarGoodbyeItem, u64, u64)>>, Error> {
- let (end, _) = *self.map.get(&key).unwrap();
- let gbt = self.decoder.goodbye_table(None, end)?;
- Ok(Some(gbt))
+impl Session {
+ /// Create a fuse session for an archive.
+ pub async fn mount_path(
+ archive_path: &Path,
+ options: &OsStr,
+ verbose: bool,
+ mountpoint: &Path,
+ ) -> Result<Self, Error> {
+ // FIXME: Add a buffered ReadAt layer!
+ let file = std::fs::File::open(archive_path)?;
+ let file_size = file.metadata()?.len();
+ let reader: Reader = Arc::new(accessor::sync::FileReader::new(file));
+ let accessor = Accessor::new(reader, file_size).await?;
+ Self::mount(accessor, options, verbose, mountpoint)
}
-}
-/// Cacher for the directory entries.
-///
-/// Provides the feching of directory entries via the decoder on cache misses.
-struct EntryCacher<'a> {
- decoder: &'a mut Decoder,
- map: &'a HashMap<u64, (u64, u64)>,
+ /// Create a new fuse session for the given pxar `Accessor`.
+ pub fn mount(
+ accessor: Accessor,
+ options: &OsStr,
+ verbose: bool,
+ path: &Path,
+ ) -> Result<Self, Error> {
+ let fuse = Fuse::builder("pxar-mount")?
+ .debug()
+ .options_os(options)?
+ .enable_readdirplus()
+ .enable_read()
+ .enable_readlink()
+ .enable_read_xattr()
+ .build()?
+ .mount(path)?;
+
+ let session = SessionImpl::new(accessor, verbose);
+
+ Ok(Self {
+ fut: Box::pin(session.main(fuse)),
+ })
+ }
}
-impl<'a> Cacher<u64, DirectoryEntry> for EntryCacher<'a> {
- fn fetch(&mut self, key: u64) -> Result<Option<DirectoryEntry>, Error> {
- let entry = match key {
- 0 => self.decoder.root()?,
- _ => {
- let (end, _) = *self.map.get(&key).unwrap();
- self.decoder.read_directory_entry(key, end)?
- }
- };
- Ok(Some(entry))
+impl Future for Session {
+ type Output = Result<(), Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+ Pin::new(&mut self.fut).poll(cx)
}
}
-impl Context {
- /// Provides mutable references to the `Context` members.
- /// This is needed to avoid borrow conflicts.
- fn as_mut_refs(&mut self) -> (
- &mut Decoder,
- &mut HashMap<u64, (u64, u64)>,
- &mut LruCache<u64, Vec<(PxarGoodbyeItem, u64, u64)>>,
- &mut LruCache<u64, DirectoryEntry>
- ) {
- ( &mut self.decoder, &mut self.start_end_parent, &mut self.gbt_cache, &mut self.entry_cache )
- }
+/// We use this to return an errno value back to the kernel.
+macro_rules! io_return {
+ ($errno:expr) => {
+ return Err(::std::io::Error::from_raw_os_error($errno).into());
+ };
}
-/// `Session` stores a pointer to the session context and is used to mount the
-/// archive to the given mountpoint.
-pub struct Session {
- ptr: MutPtr,
- verbose: bool,
+/// Format an "other" error, see `io_bail` below for details.
+macro_rules! io_format_err {
+ ($($fmt:tt)*) => {
+ ::std::io::Error::new(::std::io::ErrorKind::Other, format!($($fmt)*))
+ }
}
-/// `Operations` defines the callback function table of supported operations.
-#[repr(C)]
-#[derive(Default)]
-#[rustfmt::skip]
-struct Operations {
- // The order in which the functions are listed matters, as the offset in the
- // struct defines what function the fuse driver uses.
- // It should therefore not be altered!
- init: Option<extern fn(userdata: MutPtr)>,
- destroy: Option<extern fn(userdata: MutPtr)>,
- lookup: Option<extern fn(req: Request, parent: u64, name: StrPtr)>,
- forget: Option<extern fn(req: Request, inode: u64, nlookup: u64)>,
- getattr: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr)>,
- setattr: Option<extern fn(req: Request, inode: u64, attr: MutPtr, to_set: c_int, fileinfo: MutPtr)>,
- readlink: Option<extern fn(req: Request, inode: u64)>,
- mknod: Option<extern fn(req: Request, parent: u64, name: StrPtr, mode: c_int, rdev: c_int)>,
- mkdir: Option<extern fn(req: Request, parent: u64, name: StrPtr, mode: c_int)>,
- unlink: Option<extern fn(req: Request, parent: u64, name: StrPtr)>,
- rmdir: Option<extern fn(req: Request, parent: u64, name: StrPtr)>,
- symlink: Option<extern fn(req: Request, link: StrPtr, parent: u64, name: StrPtr)>,
- rename: Option<extern fn(req: Request, parent: u64, name: StrPtr, newparent: u64, newname: StrPtr, flags: c_int)>,
- link: Option<extern fn(req: Request, inode: u64, newparent: u64, newname: StrPtr)>,
- open: Option<extern fn(req: Request, indoe: u64, fileinfo: MutPtr)>,
- read: Option<extern fn(req: Request, inode: u64, size: size_t, offset: c_int, fileinfo: MutPtr)>,
- write: Option<extern fn(req: Request, inode: u64, buffer: StrPtr, size: size_t, offset: c_void, fileinfo: MutPtr)>,
- flush: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr)>,
- release: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr)>,
- fsync: Option<extern fn(req: Request, inode: u64, datasync: c_int, fileinfo: MutPtr)>,
- opendir: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr)>,
- readdir: Option<extern fn(req: Request, inode: u64, size: size_t, offset: c_int, fileinfo: MutPtr)>,
- releasedir: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr)>,
- fsyncdir: Option<extern fn(req: Request, inode: u64, datasync: c_int, fileinfo: MutPtr)>,
- statfs: Option<extern fn(req: Request, inode: u64)>,
- setxattr: Option<extern fn(req: Request, inode: u64, name: StrPtr, value: StrPtr, size: size_t, flags: c_int)>,
- getxattr: Option<extern fn(req: Request, inode: u64, name: StrPtr, size: size_t)>,
- listxattr: Option<extern fn(req: Request, inode: u64, size: size_t)>,
- removexattr: Option<extern fn(req: Request, inode: u64, name: StrPtr)>,
- access: Option<extern fn(req: Request, inode: u64, mask: i32)>,
- create: Option<extern fn(req: Request, parent: u64, name: StrPtr, mode: c_int, fileinfo: MutPtr)>,
- getlk: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr, lock: MutPtr)>,
- setlk: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr, lock: MutPtr, sleep: c_int)>,
- bmap: Option<extern fn(req: Request, inode: u64, blocksize: size_t, idx: u64)>,
- ioctl: Option<extern fn(req: Request, inode: u64, cmd: c_int, arg: MutPtr, fileinfo: MutPtr, flags: c_int, in_buf: ConstPtr, in_bufsz: size_t, out_bufsz: size_t)>,
- poll: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr, pollhandle: MutPtr)>,
- write_buf: Option<extern fn(req: Request, inode: u64, bufv: MutPtr, offset: c_int, fileinfo: MutPtr)>,
- retrieve_reply: Option<extern fn(req: Request, cookie: ConstPtr, inode: u64, offset: c_int, bufv: MutPtr)>,
- forget_multi: Option<extern fn(req: Request, count: size_t, forgets: MutPtr)>,
- flock: Option<extern fn(req: Request, inode: u64, fileinfo: MutPtr, op: c_int)>,
- fallocate: Option<extern fn(req: Request, inode: u64, mode: c_int, offset: c_int, length: c_int, fileinfo: MutPtr)>,
- readdirplus: Option<extern fn(req: Request, inode: u64, size: size_t, offset: c_int, fileinfo: MutPtr)>,
- copy_file_range: Option<extern fn(req: Request, ino_in: u64, off_in: c_int, fi_in: MutPtr, ino_out: u64, off_out: c_int, fi_out: MutPtr, len: size_t, flags: c_int)>,
+/// We use this to bail out of a functionin an unexpected error case. This will cause the fuse
+/// request to be answered with a generic `EIO` error code. The error message contained in here
+/// will be printed to stdout if the verbose flag is used, otherwise silently dropped.
+macro_rules! io_bail {
+ ($($fmt:tt)*) => { return Err(io_format_err!($($fmt)*).into()); }
}
+/// This is what we need to cache as a "lookup" entry. The kernel assumes that these are easily
+/// accessed.
+struct Lookup {
+ refs: AtomicUsize,
-impl Session {
+ inode: u64,
+ parent: u64,
+ entry_range_info: EntryRangeInfo,
+ content_range: Option<Range<u64>>,
+}
- /// Create a new low level fuse session.
- ///
- /// `Session` is created using the provided mount options and sets the
- /// default signal handlers.
- /// Options have to be provided as comma separated OsStr, e.g.
- /// ("ro,default_permissions").
- pub fn from_path(archive_path: &Path, options: &OsStr, verbose: bool) -> Result<Self, Error> {
- let file = File::open(archive_path)?;
- let reader = BufReader::new(file);
- let decoder = Decoder::new(reader)?;
- Self::new(decoder, options, verbose)
+impl Lookup {
+ fn new(
+ inode: u64,
+ parent: u64,
+ entry_range_info: EntryRangeInfo,
+ content_range: Option<Range<u64>>,
+ ) -> Box<Lookup> {
+ Box::new(Self {
+ refs: AtomicUsize::new(1),
+ inode,
+ parent,
+ entry_range_info,
+ content_range,
+ })
}
- /// Create a new low level fuse session using the given `Decoder`.
- ///
- /// `Session` is created using the provided mount options and sets the
- /// default signal handlers.
- /// Options have to be provided as comma separated OsStr, e.g.
- /// ("ro,default_permissions").
- pub fn new(decoder: Decoder, options: &OsStr, verbose: bool) -> Result<Self, Error> {
- let args = Self::setup_args(options, verbose)?;
- let oprs = Self::setup_callbacks();
- let mut map = HashMap::new();
- // Insert entry for the root directory, with itself as parent.
- map.insert(0, (decoder.root_end_offset(), 0));
-
- let ctx = Context {
- decoder,
- start_end_parent: map,
- entry_cache: LruCache::new(1024),
- gbt_cache: LruCache::new(1024),
- };
-
- let session_ctx = Box::new(Mutex::new(ctx));
- let arg_ptrs: Vec<_> = args.iter().map(|opt| opt.as_ptr()).collect();
- let fuse_args = FuseArgs {
- argc: arg_ptrs.len() as i32,
- argv: arg_ptrs.as_ptr(),
- allocated: 0,
- };
- let session_ptr = unsafe {
- fuse_session_new(
- Some(&fuse_args),
- Some(&oprs),
- std::mem::size_of::<Operations>(),
- // Ownership of session_ctx is passed to the session here.
- // It has to be reclaimed before dropping the session to free
- // the `Context` and close the underlying file. This is done inside
- // the destroy callback function.
- Box::into_raw(session_ctx) as ConstPtr,
- )
- };
-
- if session_ptr.is_null() {
- bail!("error while creating new fuse session");
+ /// Decrease the reference count by `count`. Note that this must not include the reference held
+ /// by `self` itself, so this must not decrease the count below 2.
+ fn forget(&self, count: usize) -> Result<(), Error> {
+ loop {
+ let old = self.refs.load(Ordering::Acquire);
+ if count >= old {
+ io_bail!("reference count underflow");
+ }
+ let new = old - count;
+ match self
+ .refs
+ .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
+ {
+ Ok(_) => break Ok(()),
+ Err(_) => continue,
+ }
}
+ }
- if unsafe { fuse_set_signal_handlers(session_ptr) } != 0 {
- bail!("error while setting signal handlers");
+ fn get_ref<'a>(&self, session: &'a SessionImpl) -> LookupRef<'a> {
+ if self.refs.fetch_add(1, Ordering::AcqRel) == 0 {
+ panic!("atomic refcount increased from 0 to 1");
}
- Ok(Self { ptr: session_ptr, verbose })
+ LookupRef {
+ session,
+ lookup: self as *const Lookup,
+ }
}
+}
- fn setup_args(options: &OsStr, verbose: bool) -> Result<Vec<CString>, Error> {
- // First argument should be the executable name
- let mut arguments = vec![
- CString::new("pxar-mount").unwrap(),
- CString::new("-o").unwrap(),
- CString::new(options.as_bytes())?,
- ];
- if verbose {
- arguments.push(CString::new("--debug").unwrap());
- }
+struct LookupRef<'a> {
+ session: &'a SessionImpl,
+ lookup: *const Lookup,
+}
+
+unsafe impl<'a> Send for LookupRef<'a> {}
+unsafe impl<'a> Sync for LookupRef<'a> {}
- Ok(arguments)
+impl<'a> Clone for LookupRef<'a> {
+ fn clone(&self) -> Self {
+ self.get_ref(self.session)
}
+}
+
+impl<'a> std::ops::Deref for LookupRef<'a> {
+ type Target = Lookup;
- fn setup_callbacks() -> Operations {
- // Register the callback functions for the session
- let mut oprs = Operations::default();
- oprs.init = Some(Self::init);
- oprs.destroy = Some(Self::destroy);
- oprs.lookup = Some(Self::lookup);
- oprs.getattr = Some(Self::getattr);
- oprs.readlink = Some(Self::readlink);
- oprs.read = Some(Self::read);
- oprs.getxattr = Some(Self::getxattr);
- oprs.listxattr = Some(Self::listxattr);
- oprs.readdirplus = Some(Self::readdirplus);
- oprs
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.lookup }
}
+}
- /// Mount the filesystem on the given mountpoint.
- ///
- /// Actually mount the filesystem for this session on the provided mountpoint
- /// and daemonize process.
- pub fn mount(&mut self, mountpoint: &Path, deamonize: bool) -> Result<(), Error> {
- if self.verbose {
- println!("Mounting archive to {:#?}", mountpoint);
- }
- let mountpoint = mountpoint.canonicalize()?;
- let path_cstr = CString::new(mountpoint.as_os_str().as_bytes())
- .map_err(|err| format_err!("invalid mountpoint - {}", err))?;
- if unsafe { fuse_session_mount(self.ptr, path_cstr.as_ptr()) } != 0 {
- bail!("mounting on {:#?} failed", mountpoint);
+impl<'a> Drop for LookupRef<'a> {
+ fn drop(&mut self) {
+ if self.lookup.is_null() {
+ return;
}
- // Send process to background if deamonize is set
- if deamonize && unsafe { fuse_daemonize(0) } != 0 {
- bail!("could not send process to background");
+ if self.refs.fetch_sub(1, Ordering::AcqRel) == 1 {
+ let inode = self.inode;
+ drop(self.session.lookups.write().unwrap().remove(&inode));
}
+ }
+}
- Ok(())
+impl<'a> LookupRef<'a> {
+ fn leak(mut self) -> &'a Lookup {
+ unsafe { &*mem::replace(&mut self.lookup, std::ptr::null()) }
}
+}
- /// Execute session loop which handles requests from kernel.
- ///
- /// The multi_threaded flag controls if the session loop runs in
- /// single-threaded or multi-threaded mode.
- /// Single-threaded mode is intended for debugging only.
- pub fn run_loop(&mut self, multi_threaded: bool) -> Result<(), Error> {
- if self.verbose {
- println!("Executing fuse session loop");
- }
- let result = match multi_threaded {
- true => unsafe { fuse_session_loop_mt_31(self.ptr, 1) },
- false => unsafe { fuse_session_loop(self.ptr) },
- };
- if result < 0 {
- bail!("fuse session loop exited with - {}", result);
- }
- if result > 0 {
- eprintln!("fuse session loop received signal - {}", result);
- }
+struct SessionImpl {
+ accessor: Accessor,
+ verbose: bool,
+ lookups: RwLock<BTreeMap<u64, Box<Lookup>>>,
+}
- Ok(())
+impl SessionImpl {
+ fn new(accessor: Accessor, verbose: bool) -> Self {
+ let root = Lookup::new(
+ ROOT_ID,
+ ROOT_ID,
+ EntryRangeInfo::toplevel(0..accessor.size()),
+ None,
+ );
+
+ let mut tree = BTreeMap::new();
+ tree.insert(ROOT_ID, root);
+
+ Self {
+ accessor,
+ verbose,
+ lookups: RwLock::new(tree),
+ }
}
- /// Creates a context providing exclusive mutable references to the members of
- /// `Context`.
+ /// Here's how we deal with errors:
///
- /// Same as run_in_context except it provides ref mut to the individual members
- /// of `Context` in order to avoid borrow conflicts.
- fn run_with_context_refs<F>(req: Request, inode: u64, code: F)
- where
- F: FnOnce(
- &mut Decoder,
- &mut HashMap<u64, (u64, u64)>,
- &mut LruCache<u64, Vec<(PxarGoodbyeItem, u64, u64)>>,
- &mut LruCache<u64, DirectoryEntry>,
- u64,
- ) -> Result<(), i32>,
- {
- let boxed_ctx = unsafe {
- let ptr = fuse_req_userdata(req) as *mut Mutex<Context>;
- Box::from_raw(ptr)
- };
- let result = boxed_ctx
- .lock()
- .map(|mut ctx| {
- let ino_offset = match inode {
- FUSE_ROOT_ID => 0,
- _ => inode,
- };
- let (decoder, map, gbt_cache, entry_cache) = ctx.as_mut_refs();
- code(decoder, map, gbt_cache, entry_cache, ino_offset)
- })
- .unwrap_or(Err(libc::EIO));
+ /// Any error will be printed if the verbose flag was set, otherwise the message will be
+ /// silently dropped.
+ ///
+ /// Opaque errors will cause the fuse main loop to bail out with that error.
+ ///
+ /// `io::Error`s will cause the fuse request to responded to with the given `io::Error`. An
+ /// `io::ErrorKind::Other` translates to a generic `EIO`.
+ async fn handle_err(
+ &self,
+ request: impl FuseRequest,
+ err: Error,
+ mut sender: UnboundedSender<Error>,
+ ) {
+ let final_result = match err.downcast::<io::Error>() {
+ Ok(err) => {
+ if err.kind() == io::ErrorKind::Other {
+ if self.verbose {
+ eprintln!("an IO error occurred: {}", err);
+ }
+ }
- if let Err(err) = result {
- unsafe {
- let _res = fuse_reply_err(req, err);
+ // fail the request
+ request.io_fail(err).map_err(Error::from)
+ }
+ Err(err) => {
+ // `bail` (non-`io::Error`) is used for fatal errors which should actually cancel:
+ if self.verbose {
+ eprintln!("internal error: {}, bailing out", err);
+ }
+ Err(err)
}
+ };
+ if let Err(err) = final_result {
+ // either we failed to send the error code to fuse, or the above was not an
+ // `io::Error`, so in this case notify the main loop:
+ sender
+ .send(err)
+ .await
+ .expect("failed to propagate error to main loop");
}
-
- // Release ownership of boxed context, do not drop it.
- let _ = Box::into_raw(boxed_ctx);
}
- /// Callback functions for fuse kernel driver.
- extern "C" fn init(_decoder: MutPtr) {
- // Notting to do here for now
+ async fn main(self, fuse: Fuse) -> Result<(), Error> {
+ Arc::new(self).main_do(fuse).await
}
- /// Cleanup the userdata created while creating the session, which is the `Context`
- extern "C" fn destroy(ctx: MutPtr) {
- // Get ownership of the `Context` and drop it when Box goes out of scope.
- unsafe { Box::from_raw(ctx) };
+ async fn main_do(self: Arc<Self>, fuse: Fuse) -> Result<(), Error> {
+ let (err_send, mut err_recv) = futures::channel::mpsc::unbounded::<Error>();
+ let mut fuse = fuse.fuse(); // make this a futures::stream::FusedStream!
+ loop {
+ select! {
+ request = fuse.try_next() => match request? {
+ Some(request) => {
+ tokio::spawn(Arc::clone(&self).handle_request(request, err_send.clone()));
+ }
+ None => break,
+ },
+ err = err_recv.next() => match err {
+ Some(err) => if self.verbose {
+ eprintln!("cancelling fuse main loop due to error: {}", err);
+ return Err(err);
+ },
+ None => panic!("error channel was closed unexpectedly"),
+ },
+ }
+ }
+ Ok(())
}
- /// Lookup `name` in the directory referenced by `parent` i-node.
- ///
- /// Inserts also the child and parent file offset in the hashmap to quickly
- /// obtain the parent offset based on the child offset.
- /// Caches goodbye table of parent and attributes of child, if found.
- extern "C" fn lookup(req: Request, parent: u64, name: StrPtr) {
- let filename = unsafe { CStr::from_ptr(name) };
- let hash = super::format_definition::compute_goodbye_hash(filename.to_bytes());
-
- Self::run_with_context_refs(req, parent, |decoder, map, gbt_cache, entry_cache, ino_offset| {
- let gbt = gbt_cache.access(ino_offset, &mut GbtCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let mut start_idx = 0;
- let mut skip_multiple = 0;
- loop {
- // Search for the next goodbye entry with matching hash.
- let idx = search_binary_tree_by(
- start_idx,
- gbt.len(),
- skip_multiple,
- |idx| hash.cmp(&gbt[idx].0.hash),
- ).ok_or_else(|| libc::ENOENT)?;
-
- let (_item, start, end) = &gbt[idx];
- map.insert(*start, (*end, ino_offset));
-
- let entry = entry_cache.access(*start, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::ENOENT)?;
-
- // Possible hash collision, need to check if the found entry is indeed
- // the filename to lookup.
- if entry.filename.as_bytes() == filename.to_bytes() {
- let e = EntryParam {
- inode: *start,
- generation: 1,
- attr: stat(*start, &entry)?,
- attr_timeout: std::f64::MAX,
- entry_timeout: std::f64::MAX,
- };
-
- let _res = unsafe { fuse_reply_entry(req, Some(&e)) };
- return Ok(())
+ async fn handle_request(
+ self: Arc<Self>,
+ request: Request,
+ mut err_sender: UnboundedSender<Error>,
+ ) {
+ let result: Result<(), Error> = match request {
+ Request::Lookup(request) => {
+ match self.lookup(request.parent, &request.file_name).await {
+ Ok((entry, lookup)) => match request.reply(&entry) {
+ Ok(()) => {
+ lookup.leak();
+ Ok(())
+ }
+ Err(err) => Err(Error::from(err)),
+ },
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ }
+ }
+ Request::Forget(request) => match self.forget(request.inode, request.count as usize) {
+ Ok(()) => {
+ request.reply();
+ Ok(())
+ }
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ },
+ Request::Getattr(request) => match self.getattr(request.inode).await {
+ Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ },
+ Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
+ Ok(lookups) => match request.reply() {
+ Ok(()) => {
+ for i in lookups {
+ i.leak();
+ }
+ Ok(())
+ }
+ Err(err) => Err(Error::from(err)),
+ },
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ },
+ Request::Read(request) => {
+ match self.read(request.inode, request.size, request.offset).await {
+ Ok(data) => request.reply(&data).map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
}
- // Hash collision, check the next entry in the goodbye table by starting
- // from given index but skipping one more match (so hash at index itself).
- start_idx = idx;
- skip_multiple = 1;
}
- });
+ Request::Readlink(request) => match self.readlink(request.inode).await {
+ Ok(data) => request.reply(&data).map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ },
+ Request::ListXAttrSize(request) => match self.listxattrs(request.inode).await {
+ Ok(data) => request
+ .reply(
+ data.into_iter()
+ .fold(0, |sum, i| sum + i.name().to_bytes_with_nul().len()),
+ )
+ .map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ },
+ Request::ListXAttr(mut request) => match self.listxattrs_into(&mut request).await {
+ Ok(ReplyBufState::Ok) => request.reply().map_err(Error::from),
+ Ok(ReplyBufState::Full) => request.fail_full().map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ },
+ Request::GetXAttrSize(request) => {
+ match self.getxattr(request.inode, &request.attr_name).await {
+ Ok(xattr) => request.reply(xattr.value().len()).map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ }
+ }
+ Request::GetXAttr(request) => {
+ match self.getxattr(request.inode, &request.attr_name).await {
+ Ok(xattr) => request.reply(xattr.value()).map_err(Error::from),
+ Err(err) => return self.handle_err(request, err, err_sender).await,
+ }
+ }
+ other => {
+ if self.verbose {
+ eprintln!("Received unexpected fuse request");
+ }
+ other.fail(libc::ENOSYS).map_err(Error::from)
+ }
+ };
+
+ if let Err(err) = result {
+ err_sender
+ .send(err)
+ .await
+ .expect("failed to propagate error to main loop");
+ }
}
- extern "C" fn getattr(req: Request, inode: u64, _fileinfo: MutPtr) {
- Self::run_with_context_refs(req, inode, |decoder, map, _, entry_cache, ino_offset| {
- let entry = entry_cache.access(ino_offset, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let attr = stat(inode, &entry)?;
- let _res = unsafe {
- // Since fs is read-only, the timeout can be max.
- let timeout = std::f64::MAX;
- fuse_reply_attr(req, Some(&attr), timeout)
- };
-
- Ok(())
- });
+ fn get_lookup(&self, inode: u64) -> Result<LookupRef, Error> {
+ let lookups = self.lookups.read().unwrap();
+ if let Some(lookup) = lookups.get(&inode) {
+ return Ok(lookup.get_ref(self));
+ }
+ io_return!(libc::ENOENT);
}
- extern "C" fn readlink(req: Request, inode: u64) {
- Self::run_with_context_refs(req, inode, |decoder, map, _, entry_cache, ino_offset| {
- let entry = entry_cache
- .access(ino_offset, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let target = entry.target.as_ref().ok_or_else(|| libc::EIO)?;
- let link = CString::new(target.as_os_str().as_bytes()).map_err(|_| libc::EIO)?;
- let _ret = unsafe { fuse_reply_readlink(req, link.as_ptr()) };
-
- Ok(())
- });
+ async fn open_dir(&self, inode: u64) -> Result<Directory, Error> {
+ if inode == ROOT_ID {
+ Ok(self.accessor.open_root().await?)
+ } else if !is_dir_inode(inode) {
+ io_return!(libc::ENOTDIR);
+ } else {
+ Ok(unsafe { self.accessor.open_dir_at_end(inode).await? })
+ }
}
- extern "C" fn read(req: Request, inode: u64, size: size_t, offset: c_int, _fileinfo: MutPtr) {
- Self::run_with_context_refs(req, inode, |decoder, map, _gbt_cache, entry_cache, ino_offset| {
- let entry = entry_cache.access(ino_offset, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let mut data = decoder.read(&entry, size, offset as u64).map_err(|_| libc::EIO)?;
-
- let _res = unsafe {
- let len = data.len();
- let dptr = data.as_mut_ptr() as *mut c_char;
- fuse_reply_buf(req, dptr, len)
- };
-
- Ok(())
- });
+ async fn open_entry(&self, lookup: &LookupRef<'_>) -> io::Result<FileEntry> {
+ unsafe {
+ self.accessor
+ .open_file_at_range(&lookup.entry_range_info)
+ .await
+ }
}
- /// Read and return the entries of the directory referenced by i-node.
- ///
- /// Replies to the request with the entries fitting into a buffer of length
- /// `size`, as requested by the caller.
- /// `offset` identifies the start index of entries to return. This is used on
- /// repeated calls, occurring if not all entries fitted into the buffer.
- extern "C" fn readdirplus(req: Request, inode: u64, size: size_t, offset: c_int, _fileinfo: MutPtr) {
- let offset = offset as usize;
-
- Self::run_with_context_refs(req, inode, |decoder, map, gbt_cache, entry_cache, ino_offset| {
- let gbt = gbt_cache.access(ino_offset, &mut GbtCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::ENOENT)?;
- let n_entries = gbt.len();
- let mut buf = ReplyBuf::new(req, size, offset);
-
- if offset < n_entries {
- for e in gbt[offset..gbt.len()].iter() {
- map.insert(e.1, (e.2, ino_offset));
- let entry = entry_cache.access(e.1, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let name = CString::new(entry.filename.as_bytes())
- .map_err(|_| libc::EIO)?;
- let attr = EntryParam {
- inode: e.1,
- generation: 1,
- attr: stat(e.1, &entry).map_err(|_| libc::EIO)?,
- attr_timeout: std::f64::MAX,
- entry_timeout: std::f64::MAX,
- };
- match buf.fill(&name, &attr) {
- Ok(ReplyBufState::Okay) => {}
- Ok(ReplyBufState::Overfull) => return buf.reply_filled(),
- Err(_) => return Err(libc::EIO),
- }
- }
- }
+ fn open_content(&self, lookup: &LookupRef) -> Result<FileContents, Error> {
+ if is_dir_inode(lookup.inode) {
+ io_return!(libc::EISDIR);
+ }
- // Add current directory entry "."
- if offset <= n_entries {
- let entry = entry_cache.access(ino_offset, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let name = CString::new(".").unwrap();
- let attr = EntryParam {
- inode: inode,
- generation: 1,
- attr: stat(inode, &entry).map_err(|_| libc::EIO)?,
- attr_timeout: std::f64::MAX,
- entry_timeout: std::f64::MAX,
- };
- match buf.fill(&name, &attr) {
- Ok(ReplyBufState::Okay) => {}
- Ok(ReplyBufState::Overfull) => return buf.reply_filled(),
- Err(_) => return Err(libc::EIO),
- }
- }
+ match lookup.content_range.clone() {
+ Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }),
+ None => io_return!(libc::EBADF),
+ }
+ }
- // Add parent directory entry ".."
- if offset <= n_entries + 1 {
- let (_, parent) = *map.get(&ino_offset).unwrap();
- let entry = entry_cache.access(parent, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let inode = if parent == 0 { FUSE_ROOT_ID } else { parent };
- let name = CString::new("..").unwrap();
- let attr = EntryParam {
- inode: inode,
- generation: 1,
- attr: stat(inode, &entry).map_err(|_| libc::EIO)?,
- attr_timeout: std::f64::MAX,
- entry_timeout: std::f64::MAX,
- };
- match buf.fill(&name, &attr) {
- Ok(ReplyBufState::Okay) => {}
- Ok(ReplyBufState::Overfull) => return buf.reply_filled(),
- Err(_) => return Err(libc::EIO),
- }
- }
+ fn make_lookup(&self, parent: u64, inode: u64, entry: &FileEntry) -> Result<LookupRef, Error> {
+ let lookups = self.lookups.read().unwrap();
+ if let Some(lookup) = lookups.get(&inode) {
+ return Ok(lookup.get_ref(self));
+ }
+ drop(lookups);
+
+ let entry = Lookup::new(
+ inode,
+ parent,
+ entry.entry_range_info().clone(),
+ entry.content_range()?,
+ );
+ let reference = entry.get_ref(self);
+ entry.refs.store(1, Ordering::Release);
+
+ let mut lookups = self.lookups.write().unwrap();
+ if let Some(lookup) = lookups.get(&inode) {
+ return Ok(lookup.get_ref(self));
+ }
- buf.reply_filled()
- });
+ lookups.insert(inode, entry);
+ drop(lookups);
+ Ok(reference)
}
- /// Get the value of the extended attribute of `inode` identified by `name`.
- extern "C" fn getxattr(req: Request, inode: u64, name: StrPtr, size: size_t) {
- let name = unsafe { CStr::from_ptr(name) };
-
- Self::run_with_context_refs(req, inode, |decoder, map, _, entry_cache, ino_offset| {
- let entry = entry_cache.access(ino_offset, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
-
- // Some of the extended attributes are stored separately in the archive,
- // so check if requested name matches one of those.
- match name.to_bytes() {
- b"security.capability" => {
- match &mut entry.xattr.fcaps {
- None => return Err(libc::ENODATA),
- Some(fcaps) => return Self::xattr_reply_value(req, &mut fcaps.data, size),
- }
- }
- b"system.posix_acl_access" => {
- // Make sure to return if there are no matching extended attributes in the archive
- if entry.xattr.acl_group_obj.is_none()
- && entry.xattr.acl_user.is_empty()
- && entry.xattr.acl_group.is_empty() {
- return Err(libc::ENODATA);
- }
- let mut buffer = acl::ACLXAttrBuffer::new(acl::ACL_EA_VERSION);
-
- buffer.add_entry(acl::ACL_USER_OBJ, None, acl::mode_user_to_acl_permissions(entry.entry.mode));
- match &entry.xattr.acl_group_obj {
- Some(group_obj) => {
- buffer.add_entry(acl::ACL_MASK, None, acl::mode_group_to_acl_permissions(entry.entry.mode));
- buffer.add_entry(acl::ACL_GROUP_OBJ, None, group_obj.permissions);
- }
- None => {
- buffer.add_entry(acl::ACL_GROUP_OBJ, None, acl::mode_group_to_acl_permissions(entry.entry.mode));
- }
- }
- buffer.add_entry(acl::ACL_OTHER, None, acl::mode_other_to_acl_permissions(entry.entry.mode));
+ fn forget(&self, inode: u64, count: usize) -> Result<(), Error> {
+ let node = self.get_lookup(inode)?;
+ node.forget(count)?;
+ Ok(())
+ }
- for user in &mut entry.xattr.acl_user {
- buffer.add_entry(acl::ACL_USER, Some(user.uid), user.permissions);
- }
- for group in &mut entry.xattr.acl_group {
- buffer.add_entry(acl::ACL_GROUP, Some(group.gid), group.permissions);
- }
- return Self::xattr_reply_value(req, buffer.as_mut_slice(), size);
- }
- b"system.posix_acl_default" => {
- if let Some(default) = &entry.xattr.acl_default {
- let mut buffer = acl::ACLXAttrBuffer::new(acl::ACL_EA_VERSION);
+ async fn lookup<'a>(
+ &'a self,
+ parent: u64,
+ file_name: &OsStr,
+ ) -> Result<(EntryParam, LookupRef<'a>), Error> {
+ let dir = self.open_dir(parent).await?;
- buffer.add_entry(acl::ACL_USER_OBJ, None, default.user_obj_permissions);
- buffer.add_entry(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions);
- buffer.add_entry(acl::ACL_OTHER, None, default.other_permissions);
+ let entry = match { dir }.lookup(file_name).await? {
+ Some(entry) => entry,
+ None => io_return!(libc::ENOENT),
+ };
- if default.mask_permissions != std::u64::MAX {
- buffer.add_entry(acl::ACL_MASK, None, default.mask_permissions);
- }
+ let entry = if let pxar::EntryKind::Hardlink(_) = entry.kind() {
+ // we don't know the file's end-offset, so we'll just allow the decoder to decode the
+ // entire rest of the archive until we figure out something better...
+ let entry = self.accessor.follow_hardlink(&entry).await?;
- for user in &mut entry.xattr.acl_default_user {
- buffer.add_entry(acl::ACL_USER, Some(user.uid), user.permissions);
- }
- for group in &mut entry.xattr.acl_default_group {
- buffer.add_entry(acl::ACL_GROUP, Some(group.gid), group.permissions);
- }
- if buffer.len() > 0 {
- return Self::xattr_reply_value(req, buffer.as_mut_slice(), size);
- }
- }
- }
- name => {
- for xattr in &mut entry.xattr.xattrs {
- if name == xattr.name.as_slice() {
- return Self::xattr_reply_value(req, &mut xattr.value, size);
- }
- }
- }
+ if let pxar::EntryKind::Hardlink(_) = entry.kind() {
+ // hardlinks must not point to other hardlinks...
+ io_return!(libc::ELOOP);
}
+ entry
+ } else {
+ entry
+ };
- Err(libc::ENODATA)
- });
+ let response = to_entry(&entry)?;
+ let inode = response.inode;
+ Ok((response, self.make_lookup(parent, inode, &entry)?))
}
- /// Get a list of the extended attribute of `inode`.
- extern "C" fn listxattr(req: Request, inode: u64, size: size_t) {
- Self::run_with_context_refs(req, inode, |decoder, map, _, entry_cache, ino_offset| {
- let entry = entry_cache.access(ino_offset, &mut EntryCacher { decoder, map })
- .map_err(|_| libc::EIO)?
- .ok_or_else(|| libc::EIO)?;
- let mut buffer = Vec::new();
- if entry.xattr.fcaps.is_some() {
- buffer.extend_from_slice(b"security.capability\0");
- }
- if entry.xattr.acl_default.is_some() {
- buffer.extend_from_slice(b"system.posix_acl_default\0");
+ async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
+ let entry = unsafe {
+ self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
+ };
+ to_stat(inode, &entry)
+ }
+
+ async fn readdirplus<'a>(
+ &'a self,
+ request: &mut requests::ReaddirPlus,
+ ) -> Result<Vec<LookupRef<'a>>, Error> {
+ let mut lookups = Vec::new();
+ let offset = usize::try_from(request.offset)
+ .map_err(|_| io_format_err!("directory offset out of range"))?;
+
+ let dir = self.open_dir(request.inode).await?;
+ let dir_lookup = self.get_lookup(request.inode)?;
+
+ let entry_count = dir.read_dir().count() as isize;
+
+ let mut next = offset as isize;
+ let mut iter = dir.read_dir().skip(offset);
+ while let Some(file) = iter.next().await {
+ next += 1;
+ let file = file?.decode_entry().await?;
+ let stat = to_stat(to_inode(&file), &file)?;
+ let name = file.file_name();
+ match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
+ ReplyBufState::Ok => (),
+ ReplyBufState::Full => return Ok(lookups),
}
- if entry.xattr.acl_group_obj.is_some()
- || !entry.xattr.acl_user.is_empty()
- || !entry.xattr.acl_group.is_empty() {
- buffer.extend_from_slice(b"system.posix_acl_user\0");
+ lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
+ }
+
+ if next == entry_count {
+ next += 1;
+ let file = dir.lookup_self().await?;
+ let stat = to_stat(to_inode(&file), &file)?;
+ let name = OsStr::new(".");
+ match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
+ ReplyBufState::Ok => (),
+ ReplyBufState::Full => return Ok(lookups),
}
- for xattr in &mut entry.xattr.xattrs {
- buffer.append(&mut xattr.name);
- buffer.push(b'\0');
+ lookups.push(LookupRef::clone(&dir_lookup));
+ }
+
+ if next == entry_count + 1 {
+ next += 1;
+ let lookup = self.get_lookup(dir_lookup.parent)?;
+ let parent_dir = self.open_dir(lookup.inode).await?;
+ let file = parent_dir.lookup_self().await?;
+ let stat = to_stat(to_inode(&file), &file)?;
+ let name = OsStr::new("..");
+ match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
+ ReplyBufState::Ok => (),
+ ReplyBufState::Full => return Ok(lookups),
}
+ lookups.push(lookup);
+ }
- Self::xattr_reply_value(req, &mut buffer, size)
- });
+ Ok(lookups)
}
- /// Helper function used to respond to get- and listxattr calls in order to
- /// de-duplicate code.
- fn xattr_reply_value(req: Request, value: &mut [u8], size: size_t) -> Result<(), i32> {
- let len = value.len();
-
- if size == 0 {
- // reply the needed buffer size to fit value
- let _res = unsafe { fuse_reply_xattr(req, len) };
- } else if size < len {
- // value does not fit into requested buffer size
- return Err(libc::ERANGE);
- } else {
- // value fits into requested buffer size, send value
- let _res = unsafe {
- let vptr = value.as_mut_ptr() as *mut c_char;
- fuse_reply_buf(req, vptr, len)
- };
+ async fn read(&self, inode: u64, len: usize, offset: u64) -> Result<Vec<u8>, Error> {
+ let file = self.get_lookup(inode)?;
+ let content = self.open_content(&file)?;
+ let mut buf = vec::undefined(len);
+ let got = content.read_at(&mut buf, offset).await?;
+ buf.truncate(got);
+ Ok(buf)
+ }
+
+ async fn readlink(&self, inode: u64) -> Result<OsString, Error> {
+ let lookup = self.get_lookup(inode)?;
+ let file = self.open_entry(&lookup).await?;
+ match file.get_symlink() {
+ None => io_return!(libc::EINVAL),
+ Some(link) => Ok(link.to_owned()),
}
+ }
- Ok(())
+ async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
+ // FIXME: Special cases:
+ // b"security.capability
+ // b"system.posix_acl_access
+ // b"system.posix_acl_default
+
+ let lookup = self.get_lookup(inode)?;
+ Ok(self
+ .open_entry(&lookup)
+ .await?
+ .into_entry()
+ .into_metadata()
+ .xattrs)
}
-}
-impl Drop for Session {
- fn drop(&mut self) {
- unsafe {
- fuse_session_unmount(self.ptr);
- fuse_remove_signal_handlers(self.ptr);
- fuse_session_destroy(self.ptr);
+ async fn listxattrs_into(
+ &self,
+ request: &mut requests::ListXAttr,
+ ) -> Result<ReplyBufState, Error> {
+ let xattrs = self.listxattrs(request.inode).await?;
+
+ for entry in xattrs {
+ match request.add_c_string(entry.name()) {
+ ReplyBufState::Ok => (),
+ ReplyBufState::Full => return Ok(ReplyBufState::Full),
+ }
}
+
+ Ok(ReplyBufState::Ok)
}
-}
-/// FUSE entry for fuse_reply_entry in lookup callback
-#[repr(C)]
-struct EntryParam {
- inode: u64,
- generation: u64,
- attr: libc::stat,
- attr_timeout: f64,
- entry_timeout: f64,
-}
+ async fn getxattr(&self, inode: u64, xattr: &OsStr) -> Result<pxar::format::XAttr, Error> {
+ // TODO: pxar::Accessor could probably get a more optimized method to fetch a specific
+ // xattr for an entry...
-/// Create a `libc::stat` with the provided i-node and entry
-fn stat(inode: u64, entry: &DirectoryEntry) -> Result<libc::stat, i32> {
- let nlink = match (entry.entry.mode as u32) & libc::S_IFMT {
- libc::S_IFDIR => 2,
- _ => 1,
- };
- let time = i64::try_from(entry.entry.mtime).map_err(|_| libc::EIO)?;
- let sec = time / 1_000_000_000;
- let nsec = time % 1_000_000_000;
+ // FIXME: Special cases:
+ // b"security.capability
+ // b"system.posix_acl_access
+ // b"system.posix_acl_default
- let mut attr: libc::stat = unsafe { std::mem::zeroed() };
- attr.st_ino = inode;
- attr.st_nlink = nlink;
- attr.st_mode = u32::try_from(entry.entry.mode).map_err(|_| libc::EIO)?;
- attr.st_size = i64::try_from(entry.size).map_err(|_| libc::EIO)?;
- attr.st_uid = entry.entry.uid;
- attr.st_gid = entry.entry.gid;
- attr.st_atime = sec;
- attr.st_atime_nsec = nsec;
- attr.st_mtime = sec;
- attr.st_mtime_nsec = nsec;
- attr.st_ctime = sec;
- attr.st_ctime_nsec = nsec;
-
- Ok(attr)
+ let xattrs = self.listxattrs(inode).await?;
+ for entry in xattrs {
+ if entry.name().to_bytes() == xattr.as_bytes() {
+ return Ok(entry);
+ }
+ }
+ io_return!(libc::ENODATA);
+ }
}
-/// State of ReplyBuf after last add_entry call
-enum ReplyBufState {
- /// Entry was successfully added to ReplyBuf
- Okay,
- /// Entry did not fit into ReplyBuf, was not added
- Overfull,
+#[inline]
+fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
+ to_entry_param(to_inode(&entry), &entry)
}
-/// Used to correctly fill and reply the buffer for the readdirplus callback
-struct ReplyBuf {
- /// internal buffer holding the binary data
- buffer: Vec<u8>,
- /// offset up to which the buffer is filled already
- filled: usize,
- /// fuse request the buffer is used to reply to
- req: Request,
- /// index of the next item, telling from were to start on the next readdirplus
- /// callback in case not everything fitted in the buffer on the first reply.
- next: usize,
+#[inline]
+fn to_inode(entry: &FileEntry) -> u64 {
+ if entry.is_dir() {
+ entry.entry_range_info().entry_range.end
+ } else {
+ entry.entry_range_info().entry_range.start | NON_DIRECTORY_INODE
+ }
}
-impl ReplyBuf {
- /// Create a new empty `ReplyBuf` of `size` with element counting index at `next`.
- fn new(req: Request, size: usize, next: usize) -> Self {
- Self {
- buffer: vec![0; size],
- filled: 0,
- req,
- next,
- }
- }
+fn to_entry_param(inode: u64, entry: &pxar::Entry) -> Result<EntryParam, Error> {
+ Ok(EntryParam::simple(inode, to_stat(inode, entry)?))
+}
- /// Reply to the `Request` with the filled buffer
- fn reply_filled(&mut self) -> Result<(), i32> {
- let _res = unsafe {
- let ptr = self.buffer.as_mut_ptr() as *mut c_char;
- fuse_reply_buf(self.req, ptr, self.filled)
- };
+fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
+ let nlink = if entry.is_dir() { 2 } else { 1 };
- Ok(())
- }
+ let metadata = entry.metadata();
- /// Fill the buffer for the fuse reply with the next dir entry by invoking the
- /// fuse_add_direntry_plus helper function for the readdirplus callback.
- /// The attr type T is has to be `libc::stat` or `EntryParam` accordingly.
- fn fill(&mut self, name: &CString, attr: &EntryParam) -> Result<ReplyBufState, Error> {
- self.next += 1;
- let size = self.buffer.len();
- let bytes = unsafe {
- let bptr = self.buffer.as_mut_ptr() as *mut c_char;
- let nptr = name.as_ptr();
- fuse_add_direntry_plus(
- self.req,
- bptr.offset(self.filled as isize),
- size - self.filled,
- nptr,
- Some(&attr),
- i32::try_from(self.next)?,
- ) as usize
- };
- self.filled += bytes;
- // Never exceed the max size requested in the callback (=buffer.len())
- if self.filled > size {
- // Entry did not fit, so go back to previous state
- self.filled -= bytes;
- self.next -= 1;
- return Ok(ReplyBufState::Overfull);
- }
+ let time = i64::try_from(metadata.stat.mtime)
+ .map_err(|_| format_err!("mtime does not fit into a signed 64 bit integer"))?;
+ let sec = time / 1_000_000_000;
+ let nsec = time % 1_000_000_000;
- Ok(ReplyBufState::Okay)
- }
+ let mut stat: libc::stat = unsafe { mem::zeroed() };
+ stat.st_ino = inode;
+ stat.st_nlink = nlink;
+ stat.st_mode = u32::try_from(metadata.stat.mode)
+ .map_err(|err| format_err!("mode does not fit into st_mode field: {}", err))?;
+ stat.st_size = i64::try_from(entry.file_size().unwrap_or(0))
+ .map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
+ stat.st_uid = metadata.stat.uid;
+ stat.st_gid = metadata.stat.gid;
+ stat.st_atime = sec;
+ stat.st_atime_nsec = nsec;
+ stat.st_mtime = sec;
+ stat.st_mtime_nsec = nsec;
+ stat.st_ctime = sec;
+ stat.st_ctime_nsec = nsec;
+ Ok(stat)
}
+++ /dev/null
-use libc;
-use nix::sys::stat::FileStat;
-
-#[inline(always)]
-pub fn is_directory(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR
-}
-
-#[inline(always)]
-pub fn is_symlink(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFLNK
-}
-
-#[inline(always)]
-pub fn is_reg_file(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFREG
-}
-
-#[inline(always)]
-pub fn is_block_dev(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFBLK
-}
-
-#[inline(always)]
-pub fn is_char_dev(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFCHR
-}
-
-#[inline(always)]
-pub fn is_fifo(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFIFO
-}
-#[inline(always)]
-pub fn is_socket(stat: &FileStat) -> bool {
- (stat.st_mode & libc::S_IFMT) == libc::S_IFSOCK
-}
+++ /dev/null
-//! `MatchPattern` defines a match pattern used to match filenames encountered
-//! during encoding or decoding of a `pxar` archive.
-//! `fnmatch` is used internally to match filenames against the patterns.
-//! Shell wildcard pattern can be used to match multiple filenames, see manpage
-//! `glob(7)`.
-//! `**` is treated special, as it matches multiple directories in a path.
-
-use std::ffi::{CStr, CString};
-use std::fs::File;
-use std::io::Read;
-use std::os::unix::io::{FromRawFd, RawFd};
-
-use anyhow::{bail, Error};
-use libc::{c_char, c_int};
-use nix::errno::Errno;
-use nix::fcntl;
-use nix::fcntl::{AtFlags, OFlag};
-use nix::sys::stat;
-use nix::sys::stat::{FileStat, Mode};
-use nix::NixPath;
-
-pub const FNM_NOMATCH: c_int = 1;
-
-extern "C" {
- fn fnmatch(pattern: *const c_char, string: *const c_char, flags: c_int) -> c_int;
-}
-
-#[derive(Debug, PartialEq, Clone, Copy)]
-pub enum MatchType {
- None,
- Positive,
- Negative,
- PartialPositive,
- PartialNegative,
-}
-
-/// `MatchPattern` provides functionality for filename glob pattern matching
-/// based on glibc's `fnmatch`.
-/// Positive matches return `MatchType::PartialPositive` or `MatchType::Positive`.
-/// Patterns starting with `!` are interpreted as negation, meaning they will
-/// return `MatchType::PartialNegative` or `MatchType::Negative`.
-/// No matches result in `MatchType::None`.
-/// # Examples:
-/// ```
-/// # use std::ffi::CString;
-/// # use self::proxmox_backup::pxar::{MatchPattern, MatchType};
-/// # fn main() -> Result<(), anyhow::Error> {
-/// let filename = CString::new("some.conf")?;
-/// let is_dir = false;
-///
-/// /// Positive match of any file ending in `.conf` in any subdirectory
-/// let positive = MatchPattern::from_line(b"**/*.conf")?.unwrap();
-/// let m_positive = positive.as_slice().matches_filename(&filename, is_dir)?;
-/// assert!(m_positive == MatchType::Positive);
-///
-/// /// Negative match of filenames starting with `s`
-/// let negative = MatchPattern::from_line(b"![s]*")?.unwrap();
-/// let m_negative = negative.as_slice().matches_filename(&filename, is_dir)?;
-/// assert!(m_negative == MatchType::Negative);
-/// # Ok(())
-/// # }
-/// ```
-#[derive(Clone, Eq, PartialOrd)]
-pub struct MatchPattern {
- pattern: Vec<u8>,
- match_positive: bool,
- match_dir_only: bool,
-}
-
-impl std::cmp::PartialEq for MatchPattern {
- fn eq(&self, other: &Self) -> bool {
- self.pattern == other.pattern
- && self.match_positive == other.match_positive
- && self.match_dir_only == other.match_dir_only
- }
-}
-
-impl std::cmp::Ord for MatchPattern {
- fn cmp(&self, other: &Self) -> std::cmp::Ordering {
- (&self.pattern, &self.match_positive, &self.match_dir_only)
- .cmp(&(&other.pattern, &other.match_positive, &other.match_dir_only))
- }
-}
-
-impl MatchPattern {
- /// Read a list of `MatchPattern` from file.
- /// The file is read line by line (lines terminated by newline character),
- /// each line may only contain one pattern.
- /// Leading `/` are ignored and lines starting with `#` are interpreted as
- /// comments and not included in the resulting list.
- /// Patterns ending in `/` will match only directories.
- ///
- /// On success, a list of match pattern is returned as well as the raw file
- /// byte buffer together with the files stats.
- /// This is done in order to avoid reading the file more than once during
- /// encoding of the archive.
- pub fn from_file<P: ?Sized + NixPath>(
- parent_fd: RawFd,
- filename: &P,
- ) -> Result<Option<(Vec<MatchPattern>, Vec<u8>, FileStat)>, nix::Error> {
- let stat = match stat::fstatat(parent_fd, filename, AtFlags::AT_SYMLINK_NOFOLLOW) {
- Ok(stat) => stat,
- Err(nix::Error::Sys(Errno::ENOENT)) => return Ok(None),
- Err(err) => return Err(err),
- };
-
- let filefd = fcntl::openat(parent_fd, filename, OFlag::O_NOFOLLOW, Mode::empty())?;
- let mut file = unsafe { File::from_raw_fd(filefd) };
-
- let mut content_buffer = Vec::new();
- let _bytes = file.read_to_end(&mut content_buffer)
- .map_err(|_| Errno::EIO)?;
-
- let mut match_pattern = Vec::new();
- for line in content_buffer.split(|&c| c == b'\n') {
- if line.is_empty() {
- continue;
- }
- if let Some(pattern) = Self::from_line(line)? {
- match_pattern.push(pattern);
- }
- }
-
- Ok(Some((match_pattern, content_buffer, stat)))
- }
-
- /// Interpret a byte buffer as a sinlge line containing a valid
- /// `MatchPattern`.
- /// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
- /// Pattern starting with '!' are interpreted as negative match pattern.
- /// Pattern with trailing `/` match only against directories.
- /// `.` as well as `..` and any pattern containing `\0` are invalid and will
- /// result in an error with Errno::EINVAL.
- pub fn from_line(line: &[u8]) -> Result<Option<MatchPattern>, nix::Error> {
- let mut input = line;
-
- if input.starts_with(b"#") {
- return Ok(None);
- }
-
- let match_positive = if input.starts_with(b"!") {
- // Reduce slice view to exclude "!"
- input = &input[1..];
- false
- } else {
- true
- };
-
- // Paths ending in / match only directory names (no filenames)
- let match_dir_only = if input.ends_with(b"/") {
- let len = input.len();
- input = &input[..len - 1];
- true
- } else {
- false
- };
-
- // Ignore initial slash
- if input.starts_with(b"/") {
- input = &input[1..];
- }
-
- if input.is_empty() || input == b"." || input == b".." || input.contains(&b'\0') {
- return Err(nix::Error::Sys(Errno::EINVAL));
- }
-
- Ok(Some(MatchPattern {
- pattern: input.to_vec(),
- match_positive,
- match_dir_only,
- }))
- }
-
-
- /// Create a `MatchPatternSlice` of the `MatchPattern` to give a view of the
- /// `MatchPattern` without copying its content.
- pub fn as_slice<'a>(&'a self) -> MatchPatternSlice<'a> {
- MatchPatternSlice {
- pattern: self.pattern.as_slice(),
- match_positive: self.match_positive,
- match_dir_only: self.match_dir_only,
- }
- }
-
- /// Dump the content of the `MatchPattern` to stdout.
- /// Intended for debugging purposes only.
- pub fn dump(&self) {
- match (self.match_positive, self.match_dir_only) {
- (true, true) => println!("{:#?}/", self.pattern),
- (true, false) => println!("{:#?}", self.pattern),
- (false, true) => println!("!{:#?}/", self.pattern),
- (false, false) => println!("!{:#?}", self.pattern),
- }
- }
-
- /// Convert a list of MatchPattern to bytes in order to write them to e.g.
- /// a file.
- pub fn to_bytes(patterns: &[MatchPattern]) -> Vec<u8> {
- let mut slices = Vec::new();
- for pattern in patterns {
- slices.push(pattern.as_slice());
- }
-
- MatchPatternSlice::to_bytes(&slices)
- }
-
- /// Invert the match type for this MatchPattern.
- pub fn invert(&mut self) {
- self.match_positive = !self.match_positive;
- }
-}
-
-#[derive(Clone)]
-pub struct MatchPatternSlice<'a> {
- pattern: &'a [u8],
- match_positive: bool,
- match_dir_only: bool,
-}
-
-impl<'a> MatchPatternSlice<'a> {
- /// Returns the pattern before the first `/` encountered as `MatchPatternSlice`.
- /// If no slash is encountered, the `MatchPatternSlice` will be a copy of the
- /// original pattern.
- /// ```
- /// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
- /// # fn main() -> Result<(), anyhow::Error> {
- /// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
- /// let slice = pattern.as_slice();
- /// let front = slice.get_front_pattern();
- /// /// ... will be the same as ...
- /// let front_pattern = MatchPattern::from_line(b"some")?.unwrap();
- /// let front_slice = front_pattern.as_slice();
- /// # Ok(())
- /// # }
- /// ```
- pub fn get_front_pattern(&'a self) -> MatchPatternSlice<'a> {
- let (front, _) = self.split_at_slash();
- MatchPatternSlice {
- pattern: front,
- match_positive: self.match_positive,
- match_dir_only: self.match_dir_only,
- }
- }
-
- /// Returns the pattern after the first encountered `/` as `MatchPatternSlice`.
- /// If no slash is encountered, the `MatchPatternSlice` will be empty.
- /// ```
- /// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
- /// # fn main() -> Result<(), anyhow::Error> {
- /// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
- /// let slice = pattern.as_slice();
- /// let rest = slice.get_rest_pattern();
- /// /// ... will be the same as ...
- /// let rest_pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
- /// let rest_slice = rest_pattern.as_slice();
- /// # Ok(())
- /// # }
- /// ```
- pub fn get_rest_pattern(&'a self) -> MatchPatternSlice<'a> {
- let (_, rest) = self.split_at_slash();
- MatchPatternSlice {
- pattern: rest,
- match_positive: self.match_positive,
- match_dir_only: self.match_dir_only,
- }
- }
-
- /// Splits the `MatchPatternSlice` at the first slash encountered and returns the
- /// content before (front pattern) and after the slash (rest pattern),
- /// omitting the slash itself.
- /// Slices starting with `**/` are an exception to this, as the corresponding
- /// `MatchPattern` is intended to match multiple directories.
- /// These pattern slices therefore return a `*` as front pattern and the original
- /// pattern itself as rest pattern.
- fn split_at_slash(&'a self) -> (&'a [u8], &'a [u8]) {
- let pattern = if self.pattern.starts_with(b"./") {
- &self.pattern[2..]
- } else {
- self.pattern
- };
-
- let (mut front, mut rest) = match pattern.iter().position(|&c| c == b'/') {
- Some(ind) => {
- let (front, rest) = pattern.split_at(ind);
- (front, &rest[1..])
- }
- None => (pattern, &pattern[0..0]),
- };
- // '**' is treated such that it maches any directory
- if front == b"**" {
- front = b"*";
- rest = pattern;
- }
-
- (front, rest)
- }
-
- /// Convert a list of `MatchPatternSlice`s to bytes in order to write them to e.g.
- /// a file.
- pub fn to_bytes(patterns: &[MatchPatternSlice]) -> Vec<u8> {
- let mut buffer = Vec::new();
- for pattern in patterns {
- if !pattern.match_positive { buffer.push(b'!'); }
- buffer.extend_from_slice(&pattern.pattern);
- if pattern.match_dir_only { buffer.push(b'/'); }
- buffer.push(b'\n');
- }
- buffer
- }
-
- /// Match the given filename against this `MatchPatternSlice`.
- /// If the filename matches the pattern completely, `MatchType::Positive` or
- /// `MatchType::Negative` is returned, depending if the match pattern is was
- /// declared as positive (no `!` prefix) or negative (`!` prefix).
- /// If the pattern matched only up to the first slash of the pattern,
- /// `MatchType::PartialPositive` or `MatchType::PartialNegatie` is returned.
- /// If the pattern was postfixed by a trailing `/` a match is only valid if
- /// the parameter `is_dir` equals `true`.
- /// No match results in `MatchType::None`.
- pub fn matches_filename(&self, filename: &CStr, is_dir: bool) -> Result<MatchType, Error> {
- let mut res = MatchType::None;
- let (front, _) = self.split_at_slash();
-
- let front = CString::new(front).unwrap();
- let fnmatch_res = unsafe {
- let front_ptr = front.as_ptr() as *const libc::c_char;
- let filename_ptr = filename.as_ptr() as *const libc::c_char;
- fnmatch(front_ptr, filename_ptr, 0)
- };
- if fnmatch_res < 0 {
- bail!("error in fnmatch inside of MatchPattern");
- }
- if fnmatch_res == 0 {
- res = if self.match_positive {
- MatchType::PartialPositive
- } else {
- MatchType::PartialNegative
- };
- }
-
- let full = if self.pattern.starts_with(b"**/") {
- CString::new(&self.pattern[3..]).unwrap()
- } else {
- CString::new(&self.pattern[..]).unwrap()
- };
- let fnmatch_res = unsafe {
- let full_ptr = full.as_ptr() as *const libc::c_char;
- let filename_ptr = filename.as_ptr() as *const libc::c_char;
- fnmatch(full_ptr, filename_ptr, 0)
- };
- if fnmatch_res < 0 {
- bail!("error in fnmatch inside of MatchPattern");
- }
- if fnmatch_res == 0 {
- res = if self.match_positive {
- MatchType::Positive
- } else {
- MatchType::Negative
- };
- }
-
- if !is_dir && self.match_dir_only {
- res = MatchType::None;
- }
-
- if !is_dir && (res == MatchType::PartialPositive || res == MatchType::PartialNegative) {
- res = MatchType::None;
- }
-
- Ok(res)
- }
-
- /// Match the given filename against the set of `MatchPatternSlice`s.
- ///
- /// A positive match is intended to includes the full subtree (unless another
- /// negative match excludes entries later).
- /// The `MatchType` together with an updated `MatchPatternSlice` list for passing
- /// to the matched child is returned.
- /// ```
- /// # use std::ffi::CString;
- /// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
- /// # fn main() -> Result<(), anyhow::Error> {
- /// let patterns = vec![
- /// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
- /// MatchPattern::from_line(b"to_match/")?.unwrap()
- /// ];
- /// let mut slices = Vec::new();
- /// for pattern in &patterns {
- /// slices.push(pattern.as_slice());
- /// }
- /// let filename = CString::new("some")?;
- /// let is_dir = true;
- /// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
- /// &filename,
- /// is_dir,
- /// &slices
- /// )?;
- /// assert_eq!(match_type, MatchType::PartialPositive);
- /// /// child pattern will be the same as ...
- /// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
- /// let slice = pattern.as_slice();
- ///
- /// let filename = CString::new("to_match")?;
- /// let is_dir = true;
- /// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
- /// &filename,
- /// is_dir,
- /// &slices
- /// )?;
- /// assert_eq!(match_type, MatchType::Positive);
- /// /// child pattern will be the same as ...
- /// let pattern = MatchPattern::from_line(b"**/*")?.unwrap();
- /// let slice = pattern.as_slice();
- /// # Ok(())
- /// # }
- /// ```
- pub fn match_filename_include(
- filename: &CStr,
- is_dir: bool,
- match_pattern: &'a [MatchPatternSlice<'a>],
- ) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
- let mut child_pattern = Vec::new();
- let mut match_state = MatchType::None;
-
- for pattern in match_pattern {
- match pattern.matches_filename(filename, is_dir)? {
- MatchType::None => continue,
- MatchType::Positive => match_state = MatchType::Positive,
- MatchType::Negative => match_state = MatchType::Negative,
- MatchType::PartialPositive => {
- if match_state != MatchType::Negative && match_state != MatchType::Positive {
- match_state = MatchType::PartialPositive;
- }
- child_pattern.push(pattern.get_rest_pattern());
- }
- MatchType::PartialNegative => {
- if match_state == MatchType::PartialPositive {
- match_state = MatchType::PartialNegative;
- }
- child_pattern.push(pattern.get_rest_pattern());
- }
- }
- }
-
- Ok((match_state, child_pattern))
- }
-
- /// Match the given filename against the set of `MatchPatternSlice`s.
- ///
- /// A positive match is intended to exclude the full subtree, independent of
- /// matches deeper down the tree.
- /// The `MatchType` together with an updated `MatchPattern` list for passing
- /// to the matched child is returned.
- /// ```
- /// # use std::ffi::CString;
- /// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
- /// # fn main() -> Result<(), anyhow::Error> {
- /// let patterns = vec![
- /// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
- /// MatchPattern::from_line(b"to_match/")?.unwrap()
- /// ];
- /// let mut slices = Vec::new();
- /// for pattern in &patterns {
- /// slices.push(pattern.as_slice());
- /// }
- /// let filename = CString::new("some")?;
- /// let is_dir = true;
- /// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
- /// &filename,
- /// is_dir,
- /// &slices,
- /// )?;
- /// assert_eq!(match_type, MatchType::PartialPositive);
- /// /// child pattern will be the same as ...
- /// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
- /// let slice = pattern.as_slice();
- ///
- /// let filename = CString::new("to_match")?;
- /// let is_dir = true;
- /// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
- /// &filename,
- /// is_dir,
- /// &slices,
- /// )?;
- /// assert_eq!(match_type, MatchType::Positive);
- /// /// child pattern will be empty
- /// # Ok(())
- /// # }
- /// ```
- pub fn match_filename_exclude(
- filename: &CStr,
- is_dir: bool,
- match_pattern: &'a [MatchPatternSlice<'a>],
- ) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
- let mut child_pattern = Vec::new();
- let mut match_state = MatchType::None;
-
- for pattern in match_pattern {
- match pattern.matches_filename(filename, is_dir)? {
- MatchType::None => {}
- MatchType::Positive => match_state = MatchType::Positive,
- MatchType::Negative => match_state = MatchType::Negative,
- match_type => {
- if match_state != MatchType::Positive && match_state != MatchType::Negative {
- match_state = match_type;
- }
- child_pattern.push(pattern.get_rest_pattern());
- }
- }
- }
-
- Ok((match_state, child_pattern))
- }
-}
--- /dev/null
+use std::ffi::{CStr, CString};
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::Path;
+
+use anyhow::{bail, format_err, Error};
+use nix::errno::Errno;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use pxar::Metadata;
+
+use proxmox::sys::error::SysError;
+use proxmox::tools::fd::RawFdNum;
+use proxmox::{c_result, c_try};
+
+use crate::pxar::flags;
+use crate::pxar::tools::perms_from_metadata;
+use crate::tools::{acl, fs, xattr};
+
+//
+// utility functions
+//
+
+fn flags_contain(flags: u64, test_flag: u64) -> bool {
+ 0 != (flags & test_flag)
+}
+
+fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
+ if err.is_errno(Errno::EOPNOTSUPP) {
+ Ok(())
+ } else {
+ Err(err)
+ }
+}
+
+fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
+ if err.is_errno(Errno::EOPNOTSUPP) {
+ *not_supp = true;
+ Ok(())
+ } else {
+ Err(err)
+ }
+}
+
+fn nsec_to_update_timespec(mtime_nsec: u64) -> [libc::timespec; 2] {
+ // restore mtime
+ const UTIME_OMIT: i64 = (1 << 30) - 2;
+ const NANOS_PER_SEC: i64 = 1_000_000_000;
+
+ let sec = (mtime_nsec as i64) / NANOS_PER_SEC;
+ let nsec = (mtime_nsec as i64) % NANOS_PER_SEC;
+
+ let times: [libc::timespec; 2] = [
+ libc::timespec {
+ tv_sec: 0,
+ tv_nsec: UTIME_OMIT,
+ },
+ libc::timespec {
+ tv_sec: sec,
+ tv_nsec: nsec,
+ },
+ ];
+
+ times
+}
+
+//
+// metadata application:
+//
+
+pub fn apply_at(
+ flags: u64,
+ metadata: &Metadata,
+ parent: RawFd,
+ file_name: &CStr,
+) -> Result<(), Error> {
+ let fd = proxmox::tools::fd::Fd::openat(
+ &unsafe { RawFdNum::from_raw_fd(parent) },
+ file_name,
+ OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
+ Mode::empty(),
+ )?;
+
+ apply(flags, metadata, fd.as_raw_fd(), file_name)
+}
+
+pub fn apply_with_path<T: AsRef<Path>>(
+ flags: u64,
+ metadata: &Metadata,
+ fd: RawFd,
+ file_name: T,
+) -> Result<(), Error> {
+ apply(
+ flags,
+ metadata,
+ fd,
+ &CString::new(file_name.as_ref().as_os_str().as_bytes())?,
+ )
+}
+
+pub fn apply(flags: u64, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
+ let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
+ let c_proc_path = c_proc_path.as_ptr();
+
+ if metadata.stat.flags != 0 {
+ todo!("apply flags!");
+ }
+
+ unsafe {
+ // UID and GID first, as this fails if we lose access anyway.
+ c_result!(libc::chown(
+ c_proc_path,
+ metadata.stat.uid,
+ metadata.stat.gid
+ ))
+ .map(drop)
+ .or_else(allow_notsupp)?;
+ }
+
+ let mut skip_xattrs = false;
+ apply_xattrs(flags, c_proc_path, metadata, &mut skip_xattrs)?;
+ add_fcaps(flags, c_proc_path, metadata, &mut skip_xattrs)?;
+ apply_acls(flags, c_proc_path, metadata)?;
+ apply_quota_project_id(flags, fd, metadata)?;
+
+ // Finally mode and time. We may lose access with mode, but the changing the mode also
+ // affects times.
+ if !metadata.is_symlink() {
+ c_result!(unsafe { libc::chmod(c_proc_path, perms_from_metadata(metadata)?.bits()) })
+ .map(drop)
+ .or_else(allow_notsupp)?;
+ }
+
+ let res = c_result!(unsafe {
+ libc::utimensat(
+ libc::AT_FDCWD,
+ c_proc_path,
+ nsec_to_update_timespec(metadata.stat.mtime).as_ptr(),
+ 0,
+ )
+ });
+ match res {
+ Ok(_) => (),
+ Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
+ Err(ref err) if err.is_errno(Errno::EPERM) => {
+ println!(
+ "failed to restore mtime attribute on {:?}: {}",
+ file_name, err
+ );
+ }
+ Err(err) => return Err(err.into()),
+ }
+
+ Ok(())
+}
+
+fn add_fcaps(
+ flags: u64,
+ c_proc_path: *const libc::c_char,
+ metadata: &Metadata,
+ skip_xattrs: &mut bool,
+) -> Result<(), Error> {
+ if *skip_xattrs || !flags_contain(flags, flags::WITH_FCAPS) {
+ return Ok(());
+ }
+ let fcaps = match metadata.fcaps.as_ref() {
+ Some(fcaps) => fcaps,
+ None => return Ok(()),
+ };
+
+ c_result!(unsafe {
+ libc::setxattr(
+ c_proc_path,
+ xattr::xattr_name_fcaps().as_ptr(),
+ fcaps.data.as_ptr() as *const libc::c_void,
+ fcaps.data.len(),
+ 0,
+ )
+ })
+ .map(drop)
+ .or_else(|err| allow_notsupp_remember(err, skip_xattrs))?;
+
+ Ok(())
+}
+
+fn apply_xattrs(
+ flags: u64,
+ c_proc_path: *const libc::c_char,
+ metadata: &Metadata,
+ skip_xattrs: &mut bool,
+) -> Result<(), Error> {
+ if *skip_xattrs || !flags_contain(flags, flags::WITH_XATTRS) {
+ return Ok(());
+ }
+
+ for xattr in &metadata.xattrs {
+ if *skip_xattrs {
+ return Ok(());
+ }
+
+ if !xattr::is_valid_xattr_name(xattr.name()) {
+ println!("skipping invalid xattr named {:?}", xattr.name());
+ continue;
+ }
+
+ c_result!(unsafe {
+ libc::setxattr(
+ c_proc_path,
+ xattr.name().as_ptr() as *const libc::c_char,
+ xattr.value().as_ptr() as *const libc::c_void,
+ xattr.value().len(),
+ 0,
+ )
+ })
+ .map(drop)
+ .or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))?;
+ }
+
+ Ok(())
+}
+
+fn apply_acls(
+ flags: u64,
+ c_proc_path: *const libc::c_char,
+ metadata: &Metadata,
+) -> Result<(), Error> {
+ if !flags_contain(flags, flags::WITH_ACL) || metadata.acl.is_empty() {
+ return Ok(());
+ }
+
+ let mut acl = acl::ACL::init(5)?;
+
+ // acl type access:
+ acl.add_entry_full(
+ acl::ACL_USER_OBJ,
+ None,
+ acl::mode_user_to_acl_permissions(metadata.stat.mode),
+ )?;
+
+ acl.add_entry_full(
+ acl::ACL_OTHER,
+ None,
+ acl::mode_other_to_acl_permissions(metadata.stat.mode),
+ )?;
+
+ match metadata.acl.group_obj.as_ref() {
+ Some(group_obj) => {
+ acl.add_entry_full(
+ acl::ACL_MASK,
+ None,
+ acl::mode_group_to_acl_permissions(metadata.stat.mode),
+ )?;
+ acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
+ }
+ None => {
+ acl.add_entry_full(
+ acl::ACL_GROUP_OBJ,
+ None,
+ acl::mode_group_to_acl_permissions(metadata.stat.mode),
+ )?;
+ }
+ }
+
+ for user in &metadata.acl.users {
+ acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
+ }
+
+ for group in &metadata.acl.groups {
+ acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
+ }
+
+ if !acl.is_valid() {
+ bail!("Error while restoring ACL - ACL invalid");
+ }
+
+ c_try!(unsafe { acl::acl_set_file(c_proc_path, acl::ACL_TYPE_ACCESS, acl.ptr,) });
+ drop(acl);
+
+ // acl type default:
+ if let Some(default) = metadata.acl.default.as_ref() {
+ let mut acl = acl::ACL::init(5)?;
+
+ acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
+
+ acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
+
+ acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
+
+ if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
+ acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
+ }
+
+ for user in &metadata.acl.default_users {
+ acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
+ }
+
+ for group in &metadata.acl.default_groups {
+ acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
+ }
+
+ if !acl.is_valid() {
+ bail!("Error while restoring ACL - ACL invalid");
+ }
+
+ c_try!(unsafe { acl::acl_set_file(c_proc_path, acl::ACL_TYPE_DEFAULT, acl.ptr,) });
+ }
+
+ Ok(())
+}
+
+fn apply_quota_project_id(flags: u64, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
+ if !flags_contain(flags, flags::WITH_QUOTA_PROJID) {
+ return Ok(());
+ }
+
+ let projid = match metadata.quota_project_id {
+ Some(projid) => projid,
+ None => return Ok(()),
+ };
+
+ let mut fsxattr = fs::FSXAttr::default();
+ unsafe {
+ fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
+ format_err!(
+ "error while getting fsxattr to restore quota project id - {}",
+ err
+ )
+ })?;
+
+ fsxattr.fsx_projid = projid.projid as u32;
+
+ fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
+ format_err!(
+ "error while setting fsxattr to restore quota project id - {}",
+ err
+ )
+ })?;
+ }
+
+ Ok(())
+}
+++ /dev/null
-//! *pxar* format decoder.
-//!
-//! This module contain the code to decode *pxar* archive files.
-use std::ffi::{CStr, CString};
-use std::ffi::{OsStr, OsString};
-use std::io::{Read, Write};
-use std::os::unix::ffi::{OsStrExt, OsStringExt};
-use std::os::unix::io::AsRawFd;
-use std::os::unix::io::FromRawFd;
-use std::os::unix::io::RawFd;
-use std::path::{Path, PathBuf};
-
-use endian_trait::Endian;
-use anyhow::{bail, format_err, Error};
-use nix::errno::Errno;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-use nix::NixPath;
-
-use proxmox::tools::io::ReadExt;
-use proxmox::tools::vec;
-
-use super::dir_stack::{PxarDir, PxarDirStack};
-use super::flags;
-use super::format_definition::*;
-use super::match_pattern::{MatchPattern, MatchPatternSlice, MatchType};
-
-use crate::tools::acl;
-use crate::tools::fs;
-use crate::tools::xattr;
-
-// This one need Read, but works without Seek
-pub struct SequentialDecoder<R: Read> {
- reader: R,
- feature_flags: u64,
- allow_existing_dirs: bool,
- skip_buffer: Vec<u8>,
- callback: Option<Box<dyn Fn(&Path) -> Result<(), Error> + Send>>,
-}
-
-const HEADER_SIZE: u64 = std::mem::size_of::<PxarHeader>() as u64;
-
-impl<R: Read> SequentialDecoder<R> {
-
- pub fn new(
- reader: R,
- feature_flags: u64,
- ) -> Self {
- let skip_buffer = vec::undefined(64 * 1024);
-
- Self {
- reader,
- feature_flags,
- allow_existing_dirs: false,
- skip_buffer,
- callback: None,
- }
- }
-
- pub fn set_callback<F: Fn(&Path) -> Result<(), Error> + Send + 'static>(&mut self, callback: F ) {
- self.callback = Some(Box::new(callback));
- }
-
- pub fn set_allow_existing_dirs(&mut self, allow: bool) {
- self.allow_existing_dirs = allow;
- }
-
- pub(crate) fn get_reader_mut(&mut self) -> &mut R {
- &mut self.reader
- }
-
- pub(crate) fn read_item<T: Endian>(&mut self) -> Result<T, Error> {
- let mut result = std::mem::MaybeUninit::<T>::uninit();
-
- let buffer = unsafe {
- std::slice::from_raw_parts_mut(result.as_mut_ptr() as *mut u8, std::mem::size_of::<T>())
- };
-
- self.reader.read_exact(buffer)?;
- let result = unsafe { result.assume_init() };
-
- Ok(result.from_le())
- }
-
- pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
- if size < (HEADER_SIZE + 2) {
- bail!("detected short link target.");
- }
- let target_len = size - HEADER_SIZE;
-
- if target_len > (libc::PATH_MAX as u64) {
- bail!("link target too long ({}).", target_len);
- }
-
- let mut buffer = self.reader.read_exact_allocated(target_len as usize)?;
-
- let last_byte = buffer.pop().unwrap();
- if last_byte != 0u8 {
- bail!("link target not nul terminated.");
- }
-
- Ok(PathBuf::from(std::ffi::OsString::from_vec(buffer)))
- }
-
- pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
- if size < (HEADER_SIZE + 8 + 2) {
- bail!("detected short hardlink header.");
- }
- let offset: u64 = self.read_item()?;
- let target = self.read_link(size - 8)?;
-
- for c in target.components() {
- match c {
- std::path::Component::Normal(_) => { /* OK */ }
- _ => bail!("hardlink target contains invalid component {:?}", c),
- }
- }
-
- Ok((target, offset))
- }
-
- pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
- if size < (HEADER_SIZE + 2) {
- bail!("detected short filename");
- }
- let name_len = size - HEADER_SIZE;
-
- if name_len > ((libc::FILENAME_MAX as u64) + 1) {
- bail!("filename too long ({}).", name_len);
- }
-
- let mut buffer = self.reader.read_exact_allocated(name_len as usize)?;
-
- let last_byte = buffer.pop().unwrap();
- if last_byte != 0u8 {
- bail!("filename entry not nul terminated.");
- }
-
- if buffer == b"." || buffer == b".." {
- bail!("found invalid filename '.' or '..'.");
- }
-
- if buffer.iter().any(|b| (*b == b'/' || *b == b'\0')) {
- bail!("found invalid filename with slashes or nul bytes.");
- }
-
- let name = std::ffi::OsString::from_vec(buffer);
- if name.is_empty() {
- bail!("found empty filename.");
- }
-
- Ok(name)
- }
-
- fn has_features(&self, feature_flags: u64) -> bool {
- (self.feature_flags & feature_flags) == feature_flags
- }
-
- fn read_xattr(&mut self, size: usize) -> Result<PxarXAttr, Error> {
- let buffer = self.reader.read_exact_allocated(size)?;
-
- let separator = buffer
- .iter()
- .position(|c| *c == b'\0')
- .ok_or_else(|| format_err!("no value found in xattr"))?;
-
- let (name, value) = buffer.split_at(separator + 1);
- let c_name = unsafe { CStr::from_bytes_with_nul_unchecked(name) };
- if !xattr::is_valid_xattr_name(c_name) || xattr::is_security_capability(c_name) {
- bail!("incorrect xattr name - {:?}.", c_name);
- }
-
- Ok(PxarXAttr {
- name: name.to_vec(),
- value: value[1..].to_vec(),
- })
- }
-
- fn read_fcaps(&mut self, size: usize) -> Result<PxarFCaps, Error> {
- let buffer = self.reader.read_exact_allocated(size)?;
-
- Ok(PxarFCaps { data: buffer })
- }
-
- pub(crate) fn read_attributes(&mut self) -> Result<(PxarHeader, PxarAttributes), Error> {
- let mut attr = PxarAttributes::default();
- let mut head: PxarHeader = self.read_item()?;
- let mut size = (head.size - HEADER_SIZE) as usize;
- loop {
- match head.htype {
- PXAR_XATTR => {
- if self.has_features(flags::WITH_XATTRS) {
- attr.xattrs.push(self.read_xattr(size)?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_FCAPS => {
- if self.has_features(flags::WITH_FCAPS) {
- attr.fcaps = Some(self.read_fcaps(size)?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_ACL_USER => {
- if self.has_features(flags::WITH_ACL) {
- attr.acl_user.push(self.read_item::<PxarACLUser>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_ACL_GROUP => {
- if self.has_features(flags::WITH_ACL) {
- attr.acl_group.push(self.read_item::<PxarACLGroup>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_ACL_GROUP_OBJ => {
- if self.has_features(flags::WITH_ACL) {
- attr.acl_group_obj = Some(self.read_item::<PxarACLGroupObj>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_ACL_DEFAULT => {
- if self.has_features(flags::WITH_ACL) {
- attr.acl_default = Some(self.read_item::<PxarACLDefault>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_ACL_DEFAULT_USER => {
- if self.has_features(flags::WITH_ACL) {
- attr.acl_default_user.push(self.read_item::<PxarACLUser>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_ACL_DEFAULT_GROUP => {
- if self.has_features(flags::WITH_ACL) {
- attr.acl_default_group
- .push(self.read_item::<PxarACLGroup>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- PXAR_QUOTA_PROJID => {
- if self.has_features(flags::WITH_QUOTA_PROJID) {
- attr.quota_projid = Some(self.read_item::<PxarQuotaProjID>()?);
- } else {
- self.skip_bytes(size)?;
- }
- }
- _ => break,
- }
- head = self.read_item()?;
- size = (head.size - HEADER_SIZE) as usize;
- }
-
- Ok((head, attr))
- }
-
- fn restore_attributes(
- &mut self,
- fd: RawFd,
- attr: &PxarAttributes,
- entry: &PxarEntry,
- ) -> Result<(), Error> {
- self.restore_xattrs_fcaps_fd(fd, &attr.xattrs, &attr.fcaps)?;
-
- let mut acl = acl::ACL::init(5)?;
- acl.add_entry_full(
- acl::ACL_USER_OBJ,
- None,
- acl::mode_user_to_acl_permissions(entry.mode),
- )?;
- acl.add_entry_full(
- acl::ACL_OTHER,
- None,
- acl::mode_other_to_acl_permissions(entry.mode),
- )?;
- match &attr.acl_group_obj {
- Some(group_obj) => {
- acl.add_entry_full(
- acl::ACL_MASK,
- None,
- acl::mode_group_to_acl_permissions(entry.mode),
- )?;
- acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions)?;
- }
- None => {
- acl.add_entry_full(
- acl::ACL_GROUP_OBJ,
- None,
- acl::mode_group_to_acl_permissions(entry.mode),
- )?;
- }
- }
- for user in &attr.acl_user {
- acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions)?;
- }
- for group in &attr.acl_group {
- acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions)?;
- }
- let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
- if !acl.is_valid() {
- bail!("Error while restoring ACL - ACL invalid");
- }
- acl.set_file(&proc_path, acl::ACL_TYPE_ACCESS)?;
-
- if let Some(default) = &attr.acl_default {
- let mut acl = acl::ACL::init(5)?;
- acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions)?;
- acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions)?;
- acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions)?;
- if default.mask_permissions != std::u64::MAX {
- acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions)?;
- }
- for user in &attr.acl_default_user {
- acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions)?;
- }
- for group in &attr.acl_default_group {
- acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions)?;
- }
- if !acl.is_valid() {
- bail!("Error while restoring ACL - ACL invalid");
- }
- acl.set_file(&proc_path, acl::ACL_TYPE_DEFAULT)?;
- }
- self.restore_quota_projid(fd, &attr.quota_projid)?;
-
- Ok(())
- }
-
- // Restore xattrs and fcaps to the given RawFd.
- fn restore_xattrs_fcaps_fd(
- &mut self,
- fd: RawFd,
- xattrs: &[PxarXAttr],
- fcaps: &Option<PxarFCaps>,
- ) -> Result<(), Error> {
- for xattr in xattrs {
- let name = CString::new(&xattr.name[..])
- .map_err(|_| format_err!("invalid xattr name with zeroes"))?;
- if let Err(err) = xattr::fsetxattr(fd, &name, &xattr.value) {
- bail!("fsetxattr failed with error: {}", err);
- }
- }
- if let Some(fcaps) = fcaps {
- if let Err(err) = xattr::fsetxattr_fcaps(fd, &fcaps.data) {
- bail!("fsetxattr_fcaps failed with error: {}", err);
- }
- }
-
- Ok(())
- }
-
- fn restore_quota_projid(
- &mut self,
- fd: RawFd,
- projid: &Option<PxarQuotaProjID>,
- ) -> Result<(), Error> {
- if let Some(projid) = projid {
- let mut fsxattr = fs::FSXAttr::default();
- unsafe {
- fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
- format_err!(
- "error while getting fsxattr to restore quota project id - {}",
- err
- )
- })?;
- }
- fsxattr.fsx_projid = projid.projid as u32;
- unsafe {
- fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
- format_err!(
- "error while setting fsxattr to restore quota project id - {}",
- err
- )
- })?;
- }
- }
-
- Ok(())
- }
-
- fn restore_mode(&mut self, entry: &PxarEntry, fd: RawFd) -> Result<(), Error> {
- let mode = Mode::from_bits_truncate((entry.mode as u32) & 0o7777);
-
- nix::sys::stat::fchmod(fd, mode)?;
-
- Ok(())
- }
-
- fn restore_mode_at(
- &mut self,
- entry: &PxarEntry,
- dirfd: RawFd,
- filename: &OsStr,
- ) -> Result<(), Error> {
- let mode = Mode::from_bits_truncate((entry.mode as u32) & 0o7777);
-
- // NOTE: we want :FchmodatFlags::NoFollowSymlink, but fchmodat does not support that
- // on linux (see man fchmodat). Fortunately, we can simply avoid calling this on symlinks.
- nix::sys::stat::fchmodat(
- Some(dirfd),
- filename,
- mode,
- nix::sys::stat::FchmodatFlags::FollowSymlink,
- )?;
-
- Ok(())
- }
-
- fn restore_ugid(&mut self, entry: &PxarEntry, fd: RawFd) -> Result<(), Error> {
- let uid = entry.uid;
- let gid = entry.gid;
-
- let res = unsafe { libc::fchown(fd, uid, gid) };
- Errno::result(res)?;
-
- Ok(())
- }
-
- fn restore_ugid_at(
- &mut self,
- entry: &PxarEntry,
- dirfd: RawFd,
- filename: &OsStr,
- ) -> Result<(), Error> {
- let uid = entry.uid;
- let gid = entry.gid;
-
- let res = filename.with_nix_path(|cstr| unsafe {
- libc::fchownat(dirfd, cstr.as_ptr(), uid, gid, libc::AT_SYMLINK_NOFOLLOW)
- })?;
- Errno::result(res)?;
-
- Ok(())
- }
-
- fn restore_mtime(&mut self, entry: &PxarEntry, fd: RawFd) -> Result<(), Error> {
- let times = nsec_to_update_timespec(entry.mtime);
-
- let res = unsafe { libc::futimens(fd, ×[0]) };
- Errno::result(res)?;
-
- Ok(())
- }
-
- fn restore_mtime_at(
- &mut self,
- entry: &PxarEntry,
- dirfd: RawFd,
- filename: &OsStr,
- ) -> Result<(), Error> {
- let times = nsec_to_update_timespec(entry.mtime);
-
- let res = filename.with_nix_path(|cstr| unsafe {
- libc::utimensat(dirfd, cstr.as_ptr(), ×[0], libc::AT_SYMLINK_NOFOLLOW)
- })?;
- Errno::result(res)?;
-
- Ok(())
- }
-
- fn restore_device_at(
- &mut self,
- entry: &PxarEntry,
- dirfd: RawFd,
- filename: &OsStr,
- device: &PxarDevice,
- ) -> Result<(), Error> {
- let rdev = nix::sys::stat::makedev(device.major, device.minor);
- let mode = ((entry.mode as u32) & libc::S_IFMT) | 0o0600;
- let res = filename
- .with_nix_path(|cstr| unsafe { libc::mknodat(dirfd, cstr.as_ptr(), mode, rdev) })?;
- Errno::result(res)?;
-
- Ok(())
- }
-
- fn restore_socket_at(&mut self, dirfd: RawFd, filename: &OsStr) -> Result<(), Error> {
- let mode = libc::S_IFSOCK | 0o0600;
- let res = filename
- .with_nix_path(|cstr| unsafe { libc::mknodat(dirfd, cstr.as_ptr(), mode, 0) })?;
- Errno::result(res)?;
-
- Ok(())
- }
-
- fn restore_fifo_at(&mut self, dirfd: RawFd, filename: &OsStr) -> Result<(), Error> {
- let mode = libc::S_IFIFO | 0o0600;
- let res =
- filename.with_nix_path(|cstr| unsafe { libc::mkfifoat(dirfd, cstr.as_ptr(), mode) })?;
- Errno::result(res)?;
-
- Ok(())
- }
-
- pub(crate) fn skip_bytes(&mut self, count: usize) -> Result<(), Error> {
- let mut done = 0;
- while done < count {
- let todo = count - done;
- let n = if todo > self.skip_buffer.len() {
- self.skip_buffer.len()
- } else {
- todo
- };
- let data = &mut self.skip_buffer[..n];
- self.reader.read_exact(data)?;
- done += n;
- }
- Ok(())
- }
-
- fn restore_symlink(
- &mut self,
- parent_fd: Option<RawFd>,
- full_path: &PathBuf,
- entry: &PxarEntry,
- filename: &OsStr,
- ) -> Result<(), Error> {
- //fixme: create symlink
- //fixme: restore permission, acls, xattr, ...
-
- let head: PxarHeader = self.read_item()?;
- match head.htype {
- PXAR_SYMLINK => {
- let target = self.read_link(head.size)?;
- //println!("TARGET: {:?}", target);
- if let Some(fd) = parent_fd {
- if let Err(err) = symlinkat(&target, fd, filename) {
- bail!("create symlink {:?} failed - {}", full_path, err);
- }
- }
- }
- _ => bail!(
- "got unknown header type inside symlink entry {:016x}",
- head.htype
- ),
- }
-
- if let Some(fd) = parent_fd {
- // self.restore_mode_at(&entry, fd, filename)?; //not supported on symlinks
- self.restore_ugid_at(&entry, fd, filename)?;
- self.restore_mtime_at(&entry, fd, filename)?;
- }
-
- Ok(())
- }
-
- fn restore_socket(
- &mut self,
- parent_fd: Option<RawFd>,
- entry: &PxarEntry,
- filename: &OsStr,
- ) -> Result<(), Error> {
- if !self.has_features(flags::WITH_SOCKETS) {
- return Ok(());
- }
- if let Some(fd) = parent_fd {
- self.restore_socket_at(fd, filename)?;
- self.restore_mode_at(&entry, fd, filename)?;
- self.restore_ugid_at(&entry, fd, filename)?;
- self.restore_mtime_at(&entry, fd, filename)?;
- }
-
- Ok(())
- }
-
- fn restore_fifo(
- &mut self,
- parent_fd: Option<RawFd>,
- entry: &PxarEntry,
- filename: &OsStr,
- ) -> Result<(), Error> {
- if !self.has_features(flags::WITH_FIFOS) {
- return Ok(());
- }
- if let Some(fd) = parent_fd {
- self.restore_fifo_at(fd, filename)?;
- self.restore_mode_at(&entry, fd, filename)?;
- self.restore_ugid_at(&entry, fd, filename)?;
- self.restore_mtime_at(&entry, fd, filename)?;
- }
-
- Ok(())
- }
-
- fn restore_device(
- &mut self,
- parent_fd: Option<RawFd>,
- entry: &PxarEntry,
- filename: &OsStr,
- ) -> Result<(), Error> {
- let head: PxarHeader = self.read_item()?;
- if head.htype != PXAR_DEVICE {
- bail!(
- "got unknown header type inside device entry {:016x}",
- head.htype
- );
- }
- let device: PxarDevice = self.read_item()?;
- if !self.has_features(flags::WITH_DEVICE_NODES) {
- return Ok(());
- }
- if let Some(fd) = parent_fd {
- self.restore_device_at(&entry, fd, filename, &device)?;
- self.restore_mode_at(&entry, fd, filename)?;
- self.restore_ugid_at(&entry, fd, filename)?;
- self.restore_mtime_at(&entry, fd, filename)?;
- }
-
- Ok(())
- }
-
- /// Restores a regular file with its content and associated attributes to the
- /// folder provided by the raw filedescriptor.
- /// If None is passed instead of a filedescriptor, the file is not restored but
- /// the archive reader is skipping over it instead.
- fn restore_regular_file(
- &mut self,
- parent_fd: Option<RawFd>,
- full_path: &PathBuf,
- entry: &PxarEntry,
- filename: &OsStr,
- ) -> Result<(), Error> {
- let (head, attr) = self
- .read_attributes()
- .map_err(|err| format_err!("Reading of file attributes failed - {}", err))?;
-
- if let Some(fd) = parent_fd {
- let flags = OFlag::O_CREAT | OFlag::O_WRONLY | OFlag::O_EXCL;
- let open_mode = Mode::from_bits_truncate(0o0600 | entry.mode as u32); //fixme: upper 32bits of entry.mode?
- let mut file = file_openat(fd, filename, flags, open_mode)
- .map_err(|err| format_err!("open file {:?} failed - {}", full_path, err))?;
-
- if head.htype != PXAR_PAYLOAD {
- bail!("got unknown header type for file entry {:016x}", head.htype);
- }
-
- if head.size < HEADER_SIZE {
- bail!("detected short payload");
- }
- let need = (head.size - HEADER_SIZE) as usize;
-
- let mut read_buffer = unsafe { vec::uninitialized(64 * 1024) };
- let mut done = 0;
- while done < need {
- let todo = need - done;
- let n = if todo > read_buffer.len() {
- read_buffer.len()
- } else {
- todo
- };
- let data = &mut read_buffer[..n];
- self.reader.read_exact(data)?;
- file.write_all(data)?;
- done += n;
- }
-
- self.restore_ugid(&entry, file.as_raw_fd())?;
- // fcaps have to be restored after restore_ugid as chown clears security.capability xattr, see CVE-2015-1350
- self.restore_attributes(file.as_raw_fd(), &attr, &entry)?;
- self.restore_mode(&entry, file.as_raw_fd())?;
- self.restore_mtime(&entry, file.as_raw_fd())?;
- } else {
- if head.htype != PXAR_PAYLOAD {
- bail!("got unknown header type for file entry {:016x}", head.htype);
- }
- if head.size < HEADER_SIZE {
- bail!("detected short payload");
- }
- self.skip_bytes((head.size - HEADER_SIZE) as usize)?;
- }
-
- Ok(())
- }
-
- fn restore_dir(
- &mut self,
- base_path: &Path,
- dirs: &mut PxarDirStack,
- entry: PxarEntry,
- filename: &OsStr,
- matched: MatchType,
- match_pattern: &[MatchPatternSlice],
- ) -> Result<(), Error> {
- let (mut head, attr) = self
- .read_attributes()
- .map_err(|err| format_err!("Reading of directory attributes failed - {}", err))?;
-
- let dir = PxarDir::new(filename, entry, attr);
- dirs.push(dir);
- if matched == MatchType::Positive {
- dirs.create_all_dirs(!self.allow_existing_dirs)?;
- }
-
- while head.htype == PXAR_FILENAME {
- let name = self.read_filename(head.size)?;
- self.restore_dir_entry(base_path, dirs, &name, matched, match_pattern)?;
- head = self.read_item()?;
- }
-
- if head.htype != PXAR_GOODBYE {
- bail!(
- "got unknown header type inside directory entry {:016x}",
- head.htype
- );
- }
-
- if head.size < HEADER_SIZE {
- bail!("detected short goodbye table");
- }
- self.skip_bytes((head.size - HEADER_SIZE) as usize)?;
-
- let last = dirs
- .pop()
- .ok_or_else(|| format_err!("Tried to pop beyond dir root - this should not happen!"))?;
- if let Some(d) = last.dir {
- let fd = d.as_raw_fd();
- self.restore_ugid(&last.entry, fd)?;
- // fcaps have to be restored after restore_ugid as chown clears security.capability xattr, see CVE-2015-1350
- self.restore_attributes(fd, &last.attr, &last.entry)?;
- self.restore_mode(&last.entry, fd)?;
- self.restore_mtime(&last.entry, fd)?;
- }
-
- Ok(())
- }
-
- /// Restore an archive into the specified directory.
- ///
- /// The directory is created if it does not exist.
- pub fn restore(&mut self, path: &Path, match_pattern: &[MatchPattern]) -> Result<(), Error> {
- let mut slices = Vec::new();
- for pattern in match_pattern {
- slices.push(pattern.as_slice());
- }
- std::fs::create_dir_all(path)
- .map_err(|err| format_err!("error while creating directory {:?} - {}", path, err))?;
-
- let dir = nix::dir::Dir::open(
- path,
- nix::fcntl::OFlag::O_DIRECTORY,
- nix::sys::stat::Mode::empty(),
- )
- .map_err(|err| format_err!("unable to open target directory {:?} - {}", path, err))?;
- let fd = dir.as_raw_fd();
- let mut dirs = PxarDirStack::new(fd);
- // An empty match pattern list indicates to restore the full archive.
- let matched = if slices.is_empty() {
- MatchType::Positive
- } else {
- MatchType::None
- };
-
- let header: PxarHeader = self.read_item()?;
- check_ca_header::<PxarEntry>(&header, PXAR_ENTRY)?;
- let entry: PxarEntry = self.read_item()?;
-
- let (mut head, attr) = self
- .read_attributes()
- .map_err(|err| format_err!("Reading of directory attributes failed - {}", err))?;
-
- while head.htype == PXAR_FILENAME {
- let name = self.read_filename(head.size)?;
- self.restore_dir_entry(path, &mut dirs, &name, matched, &slices)?;
- head = self.read_item()?;
- }
-
- if head.htype != PXAR_GOODBYE {
- bail!(
- "got unknown header type inside directory entry {:016x}",
- head.htype
- );
- }
-
- if head.size < HEADER_SIZE {
- bail!("detected short goodbye table");
- }
- self.skip_bytes((head.size - HEADER_SIZE) as usize)?;
-
- self.restore_ugid(&entry, fd)?;
- // fcaps have to be restored after restore_ugid as chown clears security.capability xattr, see CVE-2015-1350
- self.restore_attributes(fd, &attr, &entry)?;
- self.restore_mode(&entry, fd)?;
- self.restore_mtime(&entry, fd)?;
-
- Ok(())
- }
-
- fn restore_dir_entry(
- &mut self,
- base_path: &Path,
- dirs: &mut PxarDirStack,
- filename: &OsStr,
- parent_matched: MatchType,
- match_pattern: &[MatchPatternSlice],
- ) -> Result<(), Error> {
- let relative_path = dirs.as_path_buf();
- let full_path = base_path.join(&relative_path).join(filename);
-
- let head: PxarHeader = self.read_item()?;
- if head.htype == PXAR_FORMAT_HARDLINK {
- let (target, _offset) = self.read_hardlink(head.size)?;
- let target_path = base_path.join(&target);
- if dirs.last_dir_fd().is_some() {
- if let Some(ref callback) = self.callback {
- (callback)(&full_path)?;
- }
- hardlink(&target_path, &full_path)?;
- }
- return Ok(());
- }
-
- check_ca_header::<PxarEntry>(&head, PXAR_ENTRY)?;
- let entry: PxarEntry = self.read_item()?;
- let ifmt = entry.mode as u32 & libc::S_IFMT;
-
- let mut child_pattern = Vec::new();
- // If parent was a match, then children should be assumed to match too
- // This is especially the case when the full archive is restored and
- // there are no match pattern.
- let mut matched = parent_matched;
- if !match_pattern.is_empty() {
- match MatchPatternSlice::match_filename_include(
- &CString::new(filename.as_bytes())?,
- ifmt == libc::S_IFDIR,
- match_pattern,
- )? {
- (MatchType::None, _) => matched = MatchType::None,
- (MatchType::Negative, _) => matched = MatchType::Negative,
- (MatchType::Positive, _) => matched = MatchType::Positive,
- (match_type, pattern) => {
- matched = match_type;
- child_pattern = pattern;
- }
- }
- }
-
- let fd = if matched == MatchType::Positive {
- Some(dirs.create_all_dirs(!self.allow_existing_dirs)?)
- } else {
- None
- };
-
- if fd.is_some() {
- if let Some(ref callback) = self.callback {
- (callback)(&full_path)?;
- }
- }
-
- match ifmt {
- libc::S_IFDIR => {
- self.restore_dir(base_path, dirs, entry, &filename, matched, &child_pattern)
- }
- libc::S_IFLNK => self.restore_symlink(fd, &full_path, &entry, &filename),
- libc::S_IFSOCK => self.restore_socket(fd, &entry, &filename),
- libc::S_IFIFO => self.restore_fifo(fd, &entry, &filename),
- libc::S_IFBLK | libc::S_IFCHR => self.restore_device(fd, &entry, &filename),
- libc::S_IFREG => self.restore_regular_file(fd, &full_path, &entry, &filename),
- _ => Ok(()),
- }
- }
-
- /// List/Dump archive content.
- ///
- /// Simply print the list of contained files. This dumps archive
- /// format details when the verbose flag is set (useful for debug).
- pub fn dump_entry<W: std::io::Write>(
- &mut self,
- path: &mut PathBuf,
- verbose: bool,
- output: &mut W,
- ) -> Result<(), Error> {
- let print_head = |head: &PxarHeader| {
- println!("Type: {:016x}", head.htype);
- println!("Size: {}", head.size);
- };
-
- let head: PxarHeader = self.read_item()?;
- if verbose {
- println!("Path: {:?}", path);
- print_head(&head);
- } else {
- println!("{:?}", path);
- }
-
- if head.htype == PXAR_FORMAT_HARDLINK {
- let (target, offset) = self.read_hardlink(head.size)?;
- if verbose {
- println!("Hardlink: {} {:?}", offset, target);
- }
- return Ok(());
- }
-
- check_ca_header::<PxarEntry>(&head, PXAR_ENTRY)?;
- let entry: PxarEntry = self.read_item()?;
-
- if verbose {
- println!(
- "Mode: {:08x} {:08x}",
- entry.mode,
- (entry.mode as u32) & libc::S_IFDIR
- );
- }
-
- let ifmt = (entry.mode as u32) & libc::S_IFMT;
-
- if ifmt == libc::S_IFDIR {
- let mut entry_count = 0;
-
- loop {
- let head: PxarHeader = self.read_item()?;
- if verbose {
- print_head(&head);
- }
-
- // This call covers all the cases of the match statement
- // regarding extended attributes. These calls will never
- // break on the loop and can therefore be handled separately.
- // If the header was matched, true is returned and we can continue
- if self.dump_if_attribute(&head, verbose)? {
- continue;
- }
-
- match head.htype {
- PXAR_FILENAME => {
- let name = self.read_filename(head.size)?;
- if verbose {
- println!("Name: {:?}", name);
- }
- entry_count += 1;
- path.push(&name);
- self.dump_entry(path, verbose, output)?;
- path.pop();
- }
- PXAR_GOODBYE => {
- let table_size = (head.size - HEADER_SIZE) as usize;
- if verbose {
- println!("Goodbye: {:?}", path);
- self.dump_goodby_entries(entry_count, table_size)?;
- } else {
- self.skip_bytes(table_size)?;
- }
- break;
- }
- _ => panic!("got unexpected header type inside directory"),
- }
- }
- } else if (ifmt == libc::S_IFBLK)
- || (ifmt == libc::S_IFCHR)
- || (ifmt == libc::S_IFLNK)
- || (ifmt == libc::S_IFREG)
- {
- loop {
- let head: PxarHeader = self.read_item()?;
- if verbose {
- print_head(&head);
- }
-
- // This call covers all the cases of the match statement
- // regarding extended attributes. These calls will never
- // break on the loop and can therefore be handled separately.
- // If the header was matched, true is returned and we can continue
- if self.dump_if_attribute(&head, verbose)? {
- continue;
- }
-
- match head.htype {
- PXAR_SYMLINK => {
- let target = self.read_link(head.size)?;
- if verbose {
- println!("Symlink: {:?}", target);
- }
- break;
- }
- PXAR_DEVICE => {
- let device: PxarDevice = self.read_item()?;
- if verbose {
- println!("Device: {}, {}", device.major, device.minor);
- }
- break;
- }
- PXAR_PAYLOAD => {
- let payload_size = (head.size - HEADER_SIZE) as usize;
- if verbose {
- println!("Payload: {}", payload_size);
- }
- self.skip_bytes(payload_size)?;
- break;
- }
- _ => {
- panic!("got unexpected header type inside non-directory");
- }
- }
- }
- } else if ifmt == libc::S_IFIFO {
- if verbose {
- println!("Fifo:");
- }
- } else if ifmt == libc::S_IFSOCK {
- if verbose {
- println!("Socket:");
- }
- } else {
- panic!("unknown st_mode");
- }
- Ok(())
- }
-
- fn dump_if_attribute(&mut self, header: &PxarHeader, verbose: bool) -> Result<bool, Error> {
- match header.htype {
- PXAR_XATTR => {
- let xattr = self.read_xattr((header.size - HEADER_SIZE) as usize)?;
- if verbose && self.has_features(flags::WITH_XATTRS) {
- println!("XAttr: {:?}", xattr);
- }
- }
- PXAR_FCAPS => {
- let fcaps = self.read_fcaps((header.size - HEADER_SIZE) as usize)?;
- if verbose && self.has_features(flags::WITH_FCAPS) {
- println!("FCaps: {:?}", fcaps);
- }
- }
- PXAR_ACL_USER => {
- let user = self.read_item::<PxarACLUser>()?;
- if verbose && self.has_features(flags::WITH_ACL) {
- println!("ACLUser: {:?}", user);
- }
- }
- PXAR_ACL_GROUP => {
- let group = self.read_item::<PxarACLGroup>()?;
- if verbose && self.has_features(flags::WITH_ACL) {
- println!("ACLGroup: {:?}", group);
- }
- }
- PXAR_ACL_GROUP_OBJ => {
- let group_obj = self.read_item::<PxarACLGroupObj>()?;
- if verbose && self.has_features(flags::WITH_ACL) {
- println!("ACLGroupObj: {:?}", group_obj);
- }
- }
- PXAR_ACL_DEFAULT => {
- let default = self.read_item::<PxarACLDefault>()?;
- if verbose && self.has_features(flags::WITH_ACL) {
- println!("ACLDefault: {:?}", default);
- }
- }
- PXAR_ACL_DEFAULT_USER => {
- let default_user = self.read_item::<PxarACLUser>()?;
- if verbose && self.has_features(flags::WITH_ACL) {
- println!("ACLDefaultUser: {:?}", default_user);
- }
- }
- PXAR_ACL_DEFAULT_GROUP => {
- let default_group = self.read_item::<PxarACLGroup>()?;
- if verbose && self.has_features(flags::WITH_ACL) {
- println!("ACLDefaultGroup: {:?}", default_group);
- }
- }
- PXAR_QUOTA_PROJID => {
- let quota_projid = self.read_item::<PxarQuotaProjID>()?;
- if verbose && self.has_features(flags::WITH_QUOTA_PROJID) {
- println!("Quota project id: {:?}", quota_projid);
- }
- }
- _ => return Ok(false),
- }
-
- Ok(true)
- }
-
- fn dump_goodby_entries(&mut self, entry_count: usize, table_size: usize) -> Result<(), Error> {
- const GOODBYE_ITEM_SIZE: usize = std::mem::size_of::<PxarGoodbyeItem>();
-
- if table_size < GOODBYE_ITEM_SIZE {
- bail!(
- "Goodbye table to small ({} < {})",
- table_size,
- GOODBYE_ITEM_SIZE
- );
- }
- if (table_size % GOODBYE_ITEM_SIZE) != 0 {
- bail!("Goodbye table with strange size ({})", table_size);
- }
-
- let entries = table_size / GOODBYE_ITEM_SIZE;
-
- if entry_count != (entries - 1) {
- bail!(
- "Goodbye table with wrong entry count ({} != {})",
- entry_count,
- entries - 1
- );
- }
-
- let mut count = 0;
-
- loop {
- let item: PxarGoodbyeItem = self.read_item()?;
- count += 1;
- if item.hash == PXAR_GOODBYE_TAIL_MARKER {
- if count != entries {
- bail!("unexpected goodbye tail marker");
- }
- println!("Goodby tail mark.");
- break;
- }
- println!(
- "Goodby item: offset {}, size {}, hash {:016x}",
- item.offset, item.size, item.hash
- );
- if count >= entries {
- bail!("too many goodbye items (no tail marker)");
- }
- }
-
- Ok(())
- }
-}
-
-fn file_openat(
- parent: RawFd,
- filename: &OsStr,
- flags: OFlag,
- mode: Mode,
-) -> Result<std::fs::File, Error> {
- let fd =
- filename.with_nix_path(|cstr| nix::fcntl::openat(parent, cstr, flags, mode))??;
-
- let file = unsafe { std::fs::File::from_raw_fd(fd) };
-
- Ok(file)
-}
-
-fn hardlink(oldpath: &Path, newpath: &Path) -> Result<(), Error> {
- oldpath.with_nix_path(|oldpath| {
- newpath.with_nix_path(|newpath| {
- let res = unsafe { libc::link(oldpath.as_ptr(), newpath.as_ptr()) };
- Errno::result(res)?;
- Ok(())
- })?
- })?
-}
-
-fn symlinkat(target: &Path, parent: RawFd, linkname: &OsStr) -> Result<(), Error> {
- target.with_nix_path(|target| {
- linkname.with_nix_path(|linkname| {
- let res = unsafe { libc::symlinkat(target.as_ptr(), parent, linkname.as_ptr()) };
- Errno::result(res)?;
- Ok(())
- })?
- })?
-}
-
-fn nsec_to_update_timespec(mtime_nsec: u64) -> [libc::timespec; 2] {
- // restore mtime
- const UTIME_OMIT: i64 = (1 << 30) - 2;
- const NANOS_PER_SEC: i64 = 1_000_000_000;
-
- let sec = (mtime_nsec as i64) / NANOS_PER_SEC;
- let nsec = (mtime_nsec as i64) % NANOS_PER_SEC;
-
- let times: [libc::timespec; 2] = [
- libc::timespec {
- tv_sec: 0,
- tv_nsec: UTIME_OMIT,
- },
- libc::timespec {
- tv_sec: sec,
- tv_nsec: nsec,
- },
- ];
-
- times
-}
--- /dev/null
+//! Some common methods used within the pxar code.
+
+use std::convert::TryFrom;
+use std::ffi::OsStr;
+use std::os::unix::ffi::OsStrExt;
+use std::path::Path;
+
+use anyhow::{bail, format_err, Error};
+use nix::sys::stat::Mode;
+
+use pxar::{mode, Entry, EntryKind, Metadata};
+
+/// Get the file permissions as `nix::Mode`
+pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
+ let mode = meta.stat.get_permission_bits();
+ u32::try_from(mode)
+ .map_err(drop)
+ .and_then(|mode| Mode::from_bits(mode).ok_or(()))
+ .map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
+}
+
+/// Make sure path is relative and not '.' or '..'.
+pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
+ assert_relative_path_do(Path::new(path))
+}
+
+fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
+ if !path.is_relative() {
+ bail!("bad absolute file name in archive: {:?}", path);
+ }
+
+ let mut components = path.components();
+ match components.next() {
+ Some(std::path::Component::Normal(_)) => (),
+ _ => bail!("invalid path component in archive: {:?}", path),
+ }
+
+ if components.next().is_some() {
+ bail!(
+ "invalid path with multiple components in archive: {:?}",
+ path
+ );
+ }
+
+ Ok(())
+}
+
+#[rustfmt::skip]
+fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
+ [
+ if 0 != c & 4 { b'r' } else { b'-' },
+ if 0 != c & 2 { b'w' } else { b'-' },
+ match (c & 1, special) {
+ (0, false) => b'-',
+ (0, true) => special_no_x,
+ (_, false) => b'x',
+ (_, true) => special_x,
+ }
+ ]
+}
+
+fn mode_string(entry: &Entry) -> String {
+ // https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
+ // additionally we use:
+ // file type capital 'L' hard links
+ // a second '+' after the mode to show non-acl xattr presence
+ //
+ // Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
+
+ let meta = entry.metadata();
+ let mode = meta.stat.mode;
+ let type_char = if entry.is_hardlink() {
+ 'L'
+ } else {
+ match mode & mode::IFMT {
+ mode::IFREG => '-',
+ mode::IFBLK => 'b',
+ mode::IFCHR => 'c',
+ mode::IFDIR => 'd',
+ mode::IFLNK => 'l',
+ mode::IFIFO => 'p',
+ mode::IFSOCK => 's',
+ _ => '?',
+ }
+ };
+
+ let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
+ let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
+ let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
+
+ let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
+
+ let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
+
+ format!(
+ "{}{}{}{}{}{}",
+ type_char,
+ unsafe { std::str::from_utf8_unchecked(&fmt_u) },
+ unsafe { std::str::from_utf8_unchecked(&fmt_g) },
+ unsafe { std::str::from_utf8_unchecked(&fmt_o) },
+ has_acls,
+ has_xattrs,
+ )
+}
+
+pub fn format_single_line_entry(entry: &Entry) -> String {
+ use chrono::offset::TimeZone;
+
+ let mode_string = mode_string(entry);
+
+ let meta = entry.metadata();
+ let mtime = meta.mtime_as_duration();
+ let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
+
+ let (size, link) = match entry.kind() {
+ EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
+ EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
+ EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
+ EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
+ _ => ("0".to_string(), String::new()),
+ };
+
+ format!(
+ "{} {:<13} {} {:>8} {:?}{}",
+ mode_string,
+ format!("{}/{}", meta.stat.uid, meta.stat.gid),
+ mtime.format("%Y-%m-%d %H:%M:%S"),
+ size,
+ entry.path(),
+ link,
+ )
+}
+
+pub fn format_multi_line_entry(entry: &Entry) -> String {
+ use chrono::offset::TimeZone;
+
+ let mode_string = mode_string(entry);
+
+ let meta = entry.metadata();
+ let mtime = meta.mtime_as_duration();
+ let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
+
+ let (size, link, type_name) = match entry.kind() {
+ EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
+ EntryKind::Symlink(link) => (
+ "0".to_string(),
+ format!(" -> {:?}", link.as_os_str()),
+ "symlink",
+ ),
+ EntryKind::Hardlink(link) => (
+ "0".to_string(),
+ format!(" -> {:?}", link.as_os_str()),
+ "symlink",
+ ),
+ EntryKind::Device(dev) => (
+ format!("{},{}", dev.major, dev.minor),
+ String::new(),
+ if meta.stat.is_chardev() {
+ "characters pecial file"
+ } else if meta.stat.is_blockdev() {
+ "block special file"
+ } else {
+ "device"
+ },
+ ),
+ EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
+ EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
+ EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
+ EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
+ };
+
+ let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
+ Ok(name) => std::borrow::Cow::Borrowed(name),
+ Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
+ };
+
+ format!(
+ " File: {}{}\n \
+ Size: {:<13} Type: {}\n\
+ Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
+ Modify: {}\n",
+ file_name,
+ link,
+ size,
+ type_name,
+ meta.file_mode(),
+ mode_string,
+ meta.stat.uid,
+ meta.stat.gid,
+ mtime.format("%Y-%m-%d %H:%M:%S"),
+ )
+}
#[link(name = "acl")]
extern "C" {
fn acl_get_file(path: *const c_char, acl_type: ACLType) -> *mut c_void;
- fn acl_set_file(path: *const c_char, acl_type: ACLType, acl: *mut c_void) -> c_int;
+ // FIXME: remove 'pub' after the cleanup
+ pub(crate) fn acl_set_file(path: *const c_char, acl_type: ACLType, acl: *mut c_void) -> c_int;
fn acl_get_fd(fd: RawFd) -> *mut c_void;
fn acl_get_entry(acl: *const c_void, entry_id: c_int, entry: *mut *mut c_void) -> c_int;
fn acl_create_entry(acl: *mut *mut c_void, entry: *mut *mut c_void) -> c_int;
#[derive(Debug)]
pub struct ACL {
- ptr: *mut c_void,
+ // FIXME: remove 'pub' after the cleanup
+ pub(crate) ptr: *mut c_void,
}
impl Drop for ACL {
c_str!("security.capability")
}
+/// `"system.posix_acl_access"` as a CStr to avoid typos.
+///
+/// This cannot be `const` until `const_cstr_unchecked` is stable.
+#[inline]
+pub fn xattr_acl_access() -> &'static CStr {
+ c_str!("system.posix_acl_access")
+}
+
+/// `"system.posix_acl_default"` as a CStr to avoid typos.
+///
+/// This cannot be `const` until `const_cstr_unchecked` is stable.
+#[inline]
+pub fn xattr_acl_default() -> &'static CStr {
+ c_str!("system.posix_acl_default")
+}
+
/// Result of `flistxattr`, allows iterating over the attributes as a list of `&CStr`s.
///
/// Listing xattrs produces a list separated by zeroes, inherently making them available as `&CStr`
name.to_bytes() == xattr_name_fcaps().to_bytes()
}
+pub fn is_acl(name: &CStr) -> bool {
+ name.to_bytes() == xattr_acl_access().to_bytes()
+ || name.to_bytes() == xattr_acl_default().to_bytes()
+}
+
/// Check if the passed name buffer starts with a valid xattr namespace prefix
/// and is within the length limit of 255 bytes
pub fn is_valid_xattr_name(c_name: &CStr) -> bool {
.status()
.expect("failed to execute casync");
- let mut writer = std::fs::OpenOptions::new()
+ let writer = std::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open("test-proxmox.catar")?;
+ let writer = pxar::encoder::sync::StandardWriter::new(writer);
- let mut dir = nix::dir::Dir::open(
+ let dir = nix::dir::Dir::open(
dir_name, nix::fcntl::OFlag::O_NOFOLLOW,
nix::sys::stat::Mode::empty())?;
- let path = std::path::PathBuf::from(dir_name);
-
- let catalog = None::<&mut catalog::DummyCatalogWriter>;
- Encoder::encode(
- path,
- &mut dir,
- &mut writer,
- catalog,
+ create_archive(
+ dir,
+ writer,
+ Vec::new(),
+ flags::DEFAULT,
None,
false,
- false,
- flags::DEFAULT,
- Vec::new(),
+ |_| Ok(()),
ENCODER_MAX_ENTRIES,
+ None,
)?;
Command::new("cmp")