From: Wolfgang Bumiller Date: Tue, 6 Jul 2021 11:26:35 +0000 (+0200) Subject: add pbs-tools subcrate X-Git-Tag: v2.0.10~295 X-Git-Url: https://git.proxmox.com/?p=proxmox-backup.git;a=commitdiff_plain;h=770a36e53a14482564fa52e2ef2e9f62d9c7ff75 add pbs-tools subcrate Signed-off-by: Wolfgang Bumiller --- diff --git a/Cargo.toml b/Cargo.toml index b9c31550..e70a37cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"] members = [ "pbs-buildcfg", "pbs-runtime", + "pbs-tools", ] [lib] @@ -94,6 +95,7 @@ proxmox-openid = "0.6.0" pbs-buildcfg = { path = "pbs-buildcfg" } pbs-runtime = { path = "pbs-runtime" } +pbs-tools = { path = "pbs-tools" } [features] default = [] diff --git a/Makefile b/Makefile index f6915c5f..684d1f6e 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,8 @@ RESTORE_BIN := \ SUBCRATES := \ pbs-buildcfg \ - pbs-runtime + pbs-runtime \ + pbs-tools ifeq ($(BUILD_MODE), release) CARGO_BUILD_ARGS += --release diff --git a/pbs-tools/Cargo.toml b/pbs-tools/Cargo.toml new file mode 100644 index 00000000..8087aed5 --- /dev/null +++ b/pbs-tools/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "pbs-tools" +version = "0.1.0" +authors = ["Proxmox Support Team "] +edition = "2018" +description = "common tools used throughout pbs" + +# This must not depend on any subcrates more closely related to pbs itself. +[dependencies] +anyhow = "1.0" +libc = "0.2" +nix = "0.19.1" +regex = "1.2" +serde = "1.0" +serde_json = "1.0" + +proxmox = { version = "0.11.5", default-features = false, features = [] } diff --git a/pbs-tools/src/borrow.rs b/pbs-tools/src/borrow.rs new file mode 100644 index 00000000..66b68ada --- /dev/null +++ b/pbs-tools/src/borrow.rs @@ -0,0 +1,59 @@ +/// This ties two values T and U together, such that T does not move and cannot be used as long as +/// there's an U. This essentially replaces the borrow checker's job for dependent values which +/// need to be stored together in a struct {}, and is similar to what the 'rental' crate produces. +pub struct Tied(Option>, Option>); + +impl Drop for Tied { + fn drop(&mut self) { + // let's be explicit about order here! + std::mem::drop(self.1.take()); + } +} + +impl Tied { + /// Takes an owner and a function producing the depending value. The owner will be inaccessible + /// until the tied value is resolved. The dependent value is only accessible by reference. + pub fn new(owner: T, producer: F) -> Self + where + F: FnOnce(*mut T) -> Box, + { + let mut owner = Box::new(owner); + let dep = producer(&mut *owner); + Tied(Some(owner), Some(dep)) + } + + pub fn into_boxed_inner(mut self) -> Box { + self.1 = None; + self.0.take().unwrap() + } + + pub fn into_inner(self) -> T { + *self.into_boxed_inner() + } +} + +impl AsRef for Tied { + fn as_ref(&self) -> &U { + self.1.as_ref().unwrap() + } +} + +impl AsMut for Tied { + fn as_mut(&mut self) -> &mut U { + self.1.as_mut().unwrap() + } +} + +impl std::ops::Deref for Tied { + type Target = U; + + fn deref(&self) -> &U { + self.as_ref() + } +} + +impl std::ops::DerefMut for Tied { + fn deref_mut(&mut self) -> &mut U { + self.as_mut() + } +} diff --git a/pbs-tools/src/format.rs b/pbs-tools/src/format.rs new file mode 100644 index 00000000..a69f5a17 --- /dev/null +++ b/pbs-tools/src/format.rs @@ -0,0 +1,148 @@ +use anyhow::{Error}; +use serde_json::Value; + +pub fn strip_server_file_extension(name: &str) -> String { + if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { + name[..name.len()-5].to_owned() + } else { + name.to_owned() // should not happen + } +} + +pub fn render_backup_file_list(files: &[String]) -> String { + let mut files: Vec = files.iter() + .map(|v| strip_server_file_extension(&v)) + .collect(); + + files.sort(); + + crate::str::join(&files, ' ') +} + +pub fn render_epoch(value: &Value, _record: &Value) -> Result { + if value.is_null() { return Ok(String::new()); } + let text = match value.as_i64() { + Some(epoch) => { + if let Ok(epoch_string) = proxmox::tools::time::strftime_local("%c", epoch as i64) { + epoch_string + } else { + epoch.to_string() + } + }, + None => { + value.to_string() + } + }; + Ok(text) +} + +pub fn render_task_status(value: &Value, record: &Value) -> Result { + if record["endtime"].is_null() { + Ok(value.as_str().unwrap_or("running").to_string()) + } else { + Ok(value.as_str().unwrap_or("unknown").to_string()) + } +} + +pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result { + let value = value.as_bool().unwrap_or(true); + Ok((if value { "1" } else { "0" }).to_string()) +} + +pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result { + if value.is_null() { return Ok(String::new()); } + let text = match value.as_u64() { + Some(bytes) => { + HumanByte::from(bytes).to_string() + } + None => { + value.to_string() + } + }; + Ok(text) +} + +pub struct HumanByte { + b: usize, +} +impl std::fmt::Display for HumanByte { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.b < 1024 { + return write!(f, "{} B", self.b); + } + let kb: f64 = self.b as f64 / 1024.0; + if kb < 1024.0 { + return write!(f, "{:.2} KiB", kb); + } + let mb: f64 = kb / 1024.0; + if mb < 1024.0 { + return write!(f, "{:.2} MiB", mb); + } + let gb: f64 = mb / 1024.0; + if gb < 1024.0 { + return write!(f, "{:.2} GiB", gb); + } + let tb: f64 = gb / 1024.0; + if tb < 1024.0 { + return write!(f, "{:.2} TiB", tb); + } + let pb: f64 = tb / 1024.0; + return write!(f, "{:.2} PiB", pb); + } +} +impl From for HumanByte { + fn from(v: usize) -> Self { + HumanByte { b: v } + } +} +impl From for HumanByte { + fn from(v: u64) -> Self { + HumanByte { b: v as usize } + } +} + +pub fn as_fingerprint(bytes: &[u8]) -> String { + proxmox::tools::digest_to_hex(bytes) + .as_bytes() + .chunks(2) + .map(|v| std::str::from_utf8(v).unwrap()) + .collect::>().join(":") +} + +pub mod bytes_as_fingerprint { + use serde::{Deserialize, Serializer, Deserializer}; + + pub fn serialize( + bytes: &[u8; 32], + serializer: S, + ) -> Result + where + S: Serializer, + { + let s = super::as_fingerprint(bytes); + serializer.serialize_str(&s) + } + + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result<[u8; 32], D::Error> + where + D: Deserializer<'de>, + { + let mut s = String::deserialize(deserializer)?; + s.retain(|c| c != ':'); + proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom) + } +} + +#[test] +fn correct_byte_convert() { + fn convert(b: usize) -> String { + HumanByte::from(b).to_string() + } + assert_eq!(convert(1023), "1023 B"); + assert_eq!(convert(1<<10), "1.00 KiB"); + assert_eq!(convert(1<<20), "1.00 MiB"); + assert_eq!(convert((1<<30) + 103 * (1<<20)), "1.10 GiB"); + assert_eq!(convert((2<<50) + 500 * (1<<40)), "2.49 PiB"); +} diff --git a/pbs-tools/src/fs.rs b/pbs-tools/src/fs.rs new file mode 100644 index 00000000..9f8325e1 --- /dev/null +++ b/pbs-tools/src/fs.rs @@ -0,0 +1,346 @@ +//! File system helper utilities. + +use std::borrow::{Borrow, BorrowMut}; +use std::ops::{Deref, DerefMut}; +use std::os::unix::io::{AsRawFd, RawFd}; + +use anyhow::{bail, format_err, Error}; +use nix::dir; +use nix::dir::Dir; +use nix::fcntl::OFlag; +use nix::sys::stat::Mode; + +use regex::Regex; + +use proxmox::sys::error::SysError; + +use crate::borrow::Tied; + +pub type DirLockGuard = Dir; + +/// This wraps nix::dir::Entry with the parent directory's file descriptor. +pub struct ReadDirEntry { + entry: dir::Entry, + parent_fd: RawFd, +} + +impl Into for ReadDirEntry { + fn into(self) -> dir::Entry { + self.entry + } +} + +impl Deref for ReadDirEntry { + type Target = dir::Entry; + + fn deref(&self) -> &Self::Target { + &self.entry + } +} + +impl DerefMut for ReadDirEntry { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.entry + } +} + +impl AsRef for ReadDirEntry { + fn as_ref(&self) -> &dir::Entry { + &self.entry + } +} + +impl AsMut for ReadDirEntry { + fn as_mut(&mut self) -> &mut dir::Entry { + &mut self.entry + } +} + +impl Borrow for ReadDirEntry { + fn borrow(&self) -> &dir::Entry { + &self.entry + } +} + +impl BorrowMut for ReadDirEntry { + fn borrow_mut(&mut self) -> &mut dir::Entry { + &mut self.entry + } +} + +impl ReadDirEntry { + #[inline] + pub fn parent_fd(&self) -> RawFd { + self.parent_fd + } + + pub unsafe fn file_name_utf8_unchecked(&self) -> &str { + std::str::from_utf8_unchecked(self.file_name().to_bytes()) + } +} + +// Since Tied implements Deref to U, a Tied already implements Iterator. +// This is simply a wrapper with a shorter type name mapping nix::Error to anyhow::Error. +/// Wrapper over a pair of `nix::dir::Dir` and `nix::dir::Iter`, returned by `read_subdir()`. +pub struct ReadDir { + iter: Tied> + Send>, + dir_fd: RawFd, +} + +impl Iterator for ReadDir { + type Item = Result; + + fn next(&mut self) -> Option { + self.iter.next().map(|res| { + res.map(|entry| ReadDirEntry { entry, parent_fd: self.dir_fd }) + .map_err(Error::from) + }) + } +} + +/// Create an iterator over sub directory entries. +/// This uses `openat` on `dirfd`, so `path` can be relative to that or an absolute path. +pub fn read_subdir(dirfd: RawFd, path: &P) -> nix::Result { + let dir = Dir::openat(dirfd, path, OFlag::O_RDONLY, Mode::empty())?; + let fd = dir.as_raw_fd(); + let iter = Tied::new(dir, |dir| { + Box::new(unsafe { (*dir).iter() }) + as Box> + Send> + }); + Ok(ReadDir { iter, dir_fd: fd }) +} + +/// Scan through a directory with a regular expression. This is simply a shortcut filtering the +/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored. +pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>( + dirfd: RawFd, + path: &P, + regex: &'a regex::Regex, +) -> Result> + 'a, nix::Error> { + Ok(read_subdir(dirfd, path)?.filter_file_name_regex(regex)) +} + +/// Scan directory for matching file names with a callback. +/// +/// Scan through all directory entries and call `callback()` function +/// if the entry name matches the regular expression. This function +/// used unix `openat()`, so you can pass absolute or relative file +/// names. This function simply skips non-UTF8 encoded names. +pub fn scandir( + dirfd: RawFd, + path: &P, + regex: ®ex::Regex, + mut callback: F, +) -> Result<(), Error> +where + F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>, + P: ?Sized + nix::NixPath, +{ + for entry in scan_subdir(dirfd, path, regex)? { + let entry = entry?; + let file_type = match entry.file_type() { + Some(file_type) => file_type, + None => bail!("unable to detect file type"), + }; + + callback( + entry.parent_fd(), + unsafe { entry.file_name_utf8_unchecked() }, + file_type, + )?; + } + Ok(()) +} + + +/// Helper trait to provide a combinators for directory entry iterators. +pub trait FileIterOps +where + Self: Sized + Iterator>, + T: Borrow, + E: Into + Send + Sync, +{ + /// Filter by file type. This is more convenient than using the `filter` method alone as this + /// also includes error handling and handling of files without a type (via an error). + fn filter_file_type(self, ty: dir::Type) -> FileTypeFilter { + FileTypeFilter { inner: self, ty } + } + + /// Filter by file name. Note that file names which aren't valid utf-8 will be treated as if + /// they do not match the pattern. + fn filter_file_name_regex(self, regex: &Regex) -> FileNameRegexFilter { + FileNameRegexFilter { inner: self, regex } + } +} + +impl FileIterOps for I +where + I: Iterator>, + T: Borrow, + E: Into + Send + Sync, +{ +} + +/// This filters files from its inner iterator by a file type. Files with no type produce an error. +pub struct FileTypeFilter +where + I: Iterator>, + T: Borrow, + E: Into + Send + Sync, +{ + inner: I, + ty: nix::dir::Type, +} + +impl Iterator for FileTypeFilter +where + I: Iterator>, + T: Borrow, + E: Into + Send + Sync, +{ + type Item = Result; + + fn next(&mut self) -> Option { + loop { + let item = self.inner.next()?.map_err(|e| e.into()); + match item { + Ok(ref entry) => match entry.borrow().file_type() { + Some(ty) => { + if ty == self.ty { + return Some(item); + } else { + continue; + } + } + None => return Some(Err(format_err!("unable to detect file type"))), + }, + Err(_) => return Some(item), + } + } + } +} + +/// This filters files by name via a Regex. Files whose file name aren't valid utf-8 are skipped +/// silently. +pub struct FileNameRegexFilter<'a, I, T, E> +where + I: Iterator>, + T: Borrow, +{ + inner: I, + regex: &'a Regex, +} + +impl Iterator for FileNameRegexFilter<'_, I, T, E> +where + I: Iterator>, + T: Borrow, +{ + type Item = Result; + + fn next(&mut self) -> Option { + loop { + let item = self.inner.next()?; + match item { + Ok(ref entry) => { + if let Ok(name) = entry.borrow().file_name().to_str() { + if self.regex.is_match(name) { + return Some(item); + } + } + // file did not match regex or isn't valid utf-8 + continue; + }, + Err(_) => return Some(item), + } + } + } +} + +// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long) +// read Linux file system attributes (see man chattr) +nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long); +nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long); + +// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) +// read FAT file system attributes +nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32); +nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32); + +// From /usr/include/linux/fs.h +// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr) +// #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) +nix::ioctl_read!(fs_ioc_fsgetxattr, b'X', 31, FSXAttr); +nix::ioctl_write_ptr!(fs_ioc_fssetxattr, b'X', 32, FSXAttr); + +#[repr(C)] +#[derive(Debug)] +pub struct FSXAttr { + pub fsx_xflags: u32, + pub fsx_extsize: u32, + pub fsx_nextents: u32, + pub fsx_projid: u32, + pub fsx_cowextsize: u32, + pub fsx_pad: [u8; 8], +} + +impl Default for FSXAttr { + fn default() -> Self { + FSXAttr { + fsx_xflags: 0u32, + fsx_extsize: 0u32, + fsx_nextents: 0u32, + fsx_projid: 0u32, + fsx_cowextsize: 0u32, + fsx_pad: [0u8; 8], + } + } +} + +/// Attempt to acquire a shared flock on the given path, 'what' and +/// 'would_block_message' are used for error formatting. +pub fn lock_dir_noblock_shared( + path: &std::path::Path, + what: &str, + would_block_msg: &str, +) -> Result { + do_lock_dir_noblock(path, what, would_block_msg, false) +} + +/// Attempt to acquire an exclusive flock on the given path, 'what' and +/// 'would_block_message' are used for error formatting. +pub fn lock_dir_noblock( + path: &std::path::Path, + what: &str, + would_block_msg: &str, +) -> Result { + do_lock_dir_noblock(path, what, would_block_msg, true) +} + +fn do_lock_dir_noblock( + path: &std::path::Path, + what: &str, + would_block_msg: &str, + exclusive: bool, +) -> Result { + let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty()) + .map_err(|err| { + format_err!("unable to open {} directory {:?} for locking - {}", what, path, err) + })?; + + // acquire in non-blocking mode, no point in waiting here since other + // backups could still take a very long time + proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0))) + .map_err(|err| { + format_err!( + "unable to acquire lock on {} directory {:?} - {}", what, path, + if err.would_block() { + String::from(would_block_msg) + } else { + err.to_string() + } + ) + })?; + + Ok(handle) +} diff --git a/pbs-tools/src/lib.rs b/pbs-tools/src/lib.rs new file mode 100644 index 00000000..c4221f0d --- /dev/null +++ b/pbs-tools/src/lib.rs @@ -0,0 +1,4 @@ +pub mod borrow; +pub mod format; +pub mod fs; +pub mod str; diff --git a/pbs-tools/src/str.rs b/pbs-tools/src/str.rs new file mode 100644 index 00000000..9b2d66ef --- /dev/null +++ b/pbs-tools/src/str.rs @@ -0,0 +1,17 @@ +//! String related utilities. + +use std::borrow::Borrow; + +pub fn join>(data: &[S], sep: char) -> String { + let mut list = String::new(); + + for item in data { + if !list.is_empty() { + list.push(sep); + } + list.push_str(item.borrow()); + } + + list +} + diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 36fdc1b2..d302b2f7 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -12,13 +12,14 @@ use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironm use proxmox::api::router::SubdirMap; use proxmox::api::schema::*; +use pbs_tools::fs::lock_dir_noblock_shared; + use crate::tools; use crate::server::{WorkerTask, H2Service}; use crate::backup::*; use crate::api2::types::*; use crate::config::acl::PRIV_DATASTORE_BACKUP; use crate::config::cached_user_info::CachedUserInfo; -use crate::tools::fs::lock_dir_noblock_shared; mod environment; use environment::*; diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 6f292177..ae8a3974 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -73,7 +73,7 @@ pub fn list_datastore_mounts() -> Result, Error> { let mut list = Vec::new(); let basedir = "/etc/systemd/system"; - for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? { + for item in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? { let item = item?; let name = item.file_name().to_string_lossy().to_string(); diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index d0081fad..81d92bf1 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -27,6 +27,8 @@ use proxmox::{ }, }; +use pbs_tools::fs::lock_dir_noblock_shared; + use crate::{ api2::{ helpers, @@ -50,10 +52,7 @@ use crate::{ WorkerTask, H2Service, }, - tools::{ - self, - fs::lock_dir_noblock_shared, - }, + tools, config::{ acl::{ PRIV_DATASTORE_READ, diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs index 9cf36b37..0e4a539f 100644 --- a/src/api2/tape/drive.rs +++ b/src/api2/tape/drive.rs @@ -719,7 +719,7 @@ pub async fn read_label( flat.encryption_key_fingerprint = set .encryption_key_fingerprint .as_ref() - .map(|fp| crate::tools::format::as_fingerprint(fp.bytes())); + .map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())); let encrypt_fingerprint = set.encryption_key_fingerprint.clone() .map(|fp| (fp, set.uuid.clone())); diff --git a/src/backup/backup_info.rs b/src/backup/backup_info.rs index 47e2db72..34bf26a3 100644 --- a/src/backup/backup_info.rs +++ b/src/backup/backup_info.rs @@ -1,10 +1,8 @@ -use crate::tools; - -use anyhow::{bail, format_err, Error}; use std::os::unix::io::RawFd; - use std::path::{Path, PathBuf}; +use anyhow::{bail, format_err, Error}; + use crate::api2::types::{ BACKUP_ID_REGEX, BACKUP_TYPE_REGEX, @@ -81,7 +79,7 @@ impl BackupGroup { let mut path = base_path.to_owned(); path.push(self.group_path()); - tools::scandir( + pbs_tools::fs::scandir( libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, @@ -108,7 +106,7 @@ impl BackupGroup { let mut path = base_path.to_owned(); path.push(self.group_path()); - tools::scandir( + pbs_tools::fs::scandir( libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, @@ -342,7 +340,7 @@ impl BackupInfo { pub fn list_backup_groups(base_path: &Path) -> Result, Error> { let mut list = Vec::new(); - tools::scandir( + pbs_tools::fs::scandir( libc::AT_FDCWD, base_path, &BACKUP_TYPE_REGEX, @@ -350,7 +348,7 @@ impl BackupInfo { if file_type != nix::dir::Type::Directory { return Ok(()); } - tools::scandir( + pbs_tools::fs::scandir( l0_fd, backup_type, &BACKUP_ID_REGEX, @@ -384,7 +382,7 @@ fn list_backup_files( ) -> Result, Error> { let mut files = vec![]; - tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| { + pbs_tools::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| { if file_type != nix::dir::Type::File { return Ok(()); } diff --git a/src/backup/checksum_reader.rs b/src/backup/checksum_reader.rs index a4fbce59..dd6c9d29 100644 --- a/src/backup/checksum_reader.rs +++ b/src/backup/checksum_reader.rs @@ -2,8 +2,9 @@ use anyhow::{Error}; use std::sync::Arc; use std::io::Read; +use pbs_tools::borrow::Tied; + use super::CryptConfig; -use crate::tools::borrow::Tied; pub struct ChecksumReader { reader: R, diff --git a/src/backup/checksum_writer.rs b/src/backup/checksum_writer.rs index 5aac6511..14a75503 100644 --- a/src/backup/checksum_writer.rs +++ b/src/backup/checksum_writer.rs @@ -3,8 +3,9 @@ use std::io::Write; use anyhow::{Error}; +use pbs_tools::borrow::Tied; + use super::CryptConfig; -use crate::tools::borrow::Tied; pub struct ChecksumWriter { writer: W, diff --git a/src/backup/chunk_store.rs b/src/backup/chunk_store.rs index e9cc3897..1ae85d64 100644 --- a/src/backup/chunk_store.rs +++ b/src/backup/chunk_store.rs @@ -190,7 +190,7 @@ impl ChunkStore { pub fn get_chunk_iterator( &self, ) -> Result< - impl Iterator, usize, bool)> + std::iter::FusedIterator, + impl Iterator, usize, bool)> + std::iter::FusedIterator, Error > { use nix::dir::Dir; @@ -208,7 +208,7 @@ impl ChunkStore { })?; let mut done = false; - let mut inner: Option = None; + let mut inner: Option = None; let mut at = 0; let mut percentage = 0; Ok(std::iter::from_fn(move || { @@ -252,7 +252,7 @@ impl ChunkStore { let subdir: &str = &format!("{:04x}", at); percentage = (at * 100) / 0x10000; at += 1; - match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) { + match pbs_tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) { Ok(dir) => { inner = Some(dir); // start reading: diff --git a/src/backup/crypt_config.rs b/src/backup/crypt_config.rs index 53dc1e41..e8d69e82 100644 --- a/src/backup/crypt_config.rs +++ b/src/backup/crypt_config.rs @@ -17,10 +17,10 @@ use openssl::pkcs5::pbkdf2_hmac; use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode}; use serde::{Deserialize, Serialize}; -use crate::tools::format::{as_fingerprint, bytes_as_fingerprint}; - use proxmox::api::api; +use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint}; + // openssl::sha::sha256(b"Proxmox Backup Encryption Key Fingerprint") /// This constant is used to compute fingerprints. const FINGERPRINT_INPUT: [u8; 32] = [ diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs index 116d2441..55458de3 100644 --- a/src/backup/datastore.rs +++ b/src/backup/datastore.rs @@ -12,6 +12,9 @@ use lazy_static::lazy_static; use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked}; +use pbs_tools::format::HumanByte; +use pbs_tools::fs::{lock_dir_noblock, DirLockGuard}; + use super::backup_info::{BackupGroup, BackupDir}; use super::chunk_store::ChunkStore; use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; @@ -22,8 +25,6 @@ use super::{DataBlob, ArchiveType, archive_type}; use crate::config::datastore::{self, DataStoreConfig}; use crate::task::TaskState; use crate::tools; -use crate::tools::format::HumanByte; -use crate::tools::fs::{lock_dir_noblock, DirLockGuard}; use crate::api2::types::{Authid, GarbageCollectionStatus}; use crate::server::UPID; @@ -110,7 +111,7 @@ impl DataStore { pub fn get_chunk_iterator( &self, ) -> Result< - impl Iterator, usize, bool)>, + impl Iterator, usize, bool)>, Error > { self.chunk_store.get_chunk_iterator() @@ -215,7 +216,7 @@ impl DataStore { wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string()); manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); }); - for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? { + for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? { if let Ok(item) = item { if let Some(file_type) = item.file_type() { if file_type != nix::dir::Type::File { continue; } @@ -254,7 +255,7 @@ impl DataStore { let full_path = self.group_path(backup_group); - let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?; + let _guard = pbs_tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?; log::info!("removing backup group {:?}", full_path); diff --git a/src/backup/key_derivation.rs b/src/backup/key_derivation.rs index 5b46a70c..7276e4e2 100644 --- a/src/backup/key_derivation.rs +++ b/src/backup/key_derivation.rs @@ -100,7 +100,7 @@ impl From<&KeyConfig> for KeyInfo { fingerprint: key_config .fingerprint .as_ref() - .map(|fp| crate::tools::format::as_fingerprint(fp.bytes())), + .map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())), hint: key_config.hint.clone(), } } diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 503a404b..59aa25d0 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -6,6 +6,8 @@ use std::time::Instant; use anyhow::{bail, format_err, Error}; +use pbs_tools::fs::lock_dir_noblock_shared; + use crate::{ api2::types::*, backup::{ @@ -25,7 +27,6 @@ use crate::{ server::UPID, task::TaskState, task_log, - tools::fs::lock_dir_noblock_shared, tools::ParallelHandler, }; @@ -577,4 +578,4 @@ pub fn verify_filter( } } } -} \ No newline at end of file +} diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index bd52abbc..6ec0a805 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -280,7 +280,7 @@ async fn list_backup_groups(param: Value) -> Result { let render_files = |_v: &Value, record: &Value| -> Result { let item: GroupListItem = serde_json::from_value(record.to_owned())?; - Ok(tools::format::render_backup_file_list(&item.files)) + Ok(pbs_tools::format::render_backup_file_list(&item.files)) }; let options = default_table_format_options() @@ -1300,7 +1300,7 @@ async fn prune_async(mut param: Value) -> Result { .sortby("backup-id", false) .sortby("backup-time", false) .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot")) - .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date")) + .column(ColumnConfig::new("backup-time").renderer(pbs_tools::format::render_epoch).header("date")) .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action")) ; diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index a7e7eecf..79f80513 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -139,11 +139,12 @@ async fn task_list(param: Value) -> Result { let mut data = result["data"].take(); let return_type = &api2::node::tasks::API_METHOD_LIST_TASKS.returns; + use pbs_tools::format::{render_epoch, render_task_status}; let options = default_table_format_options() - .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch)) - .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch)) + .column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch)) + .column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch)) .column(ColumnConfig::new("upid")) - .column(ColumnConfig::new("status").renderer(tools::format::render_task_status)); + .column(ColumnConfig::new("status").renderer(render_task_status)); format_and_print_result_full(&mut data, return_type, &output_format, &options); diff --git a/src/bin/proxmox-tape.rs b/src/bin/proxmox-tape.rs index c095fe43..e2e88781 100644 --- a/src/bin/proxmox-tape.rs +++ b/src/bin/proxmox-tape.rs @@ -14,12 +14,13 @@ use proxmox::{ }, }; +use pbs_tools::format::{ + HumanByte, + render_epoch, + render_bytes_human_readable, +}; + use proxmox_backup::{ - tools::format::{ - HumanByte, - render_epoch, - render_bytes_human_readable, - }, client::{ connect_to_localhost, view_task_result, diff --git a/src/bin/proxmox_backup_client/key.rs b/src/bin/proxmox_backup_client/key.rs index c442fad9..39ff44d2 100644 --- a/src/bin/proxmox_backup_client/key.rs +++ b/src/bin/proxmox_backup_client/key.rs @@ -272,8 +272,8 @@ fn show_key(path: Option, param: Value) -> Result<(), Error> { let options = proxmox::api::cli::default_table_format_options() .column(ColumnConfig::new("path")) .column(ColumnConfig::new("kdf")) - .column(ColumnConfig::new("created").renderer(tools::format::render_epoch)) - .column(ColumnConfig::new("modified").renderer(tools::format::render_epoch)) + .column(ColumnConfig::new("created").renderer(pbs_tools::format::render_epoch)) + .column(ColumnConfig::new("modified").renderer(pbs_tools::format::render_epoch)) .column(ColumnConfig::new("fingerprint")) .column(ColumnConfig::new("hint")); diff --git a/src/bin/proxmox_backup_client/snapshot.rs b/src/bin/proxmox_backup_client/snapshot.rs index a98b1ca2..7deb6647 100644 --- a/src/bin/proxmox_backup_client/snapshot.rs +++ b/src/bin/proxmox_backup_client/snapshot.rs @@ -87,7 +87,7 @@ async fn list_snapshots(param: Value) -> Result { for file in &item.files { filenames.push(file.filename.to_string()); } - Ok(tools::format::render_backup_file_list(&filenames[..])) + Ok(pbs_tools::format::render_backup_file_list(&filenames[..])) }; let options = default_table_format_options() @@ -95,7 +95,7 @@ async fn list_snapshots(param: Value) -> Result { .sortby("backup-id", false) .sortby("backup-time", false) .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot")) - .column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable)) + .column(ColumnConfig::new("size").renderer(pbs_tools::format::render_bytes_human_readable)) .column(ColumnConfig::new("files").renderer(render_files)) ; diff --git a/src/bin/proxmox_backup_client/task.rs b/src/bin/proxmox_backup_client/task.rs index dc71ab27..e7683932 100644 --- a/src/bin/proxmox_backup_client/task.rs +++ b/src/bin/proxmox_backup_client/task.rs @@ -66,11 +66,12 @@ async fn task_list(param: Value) -> Result { let return_type = &proxmox_backup::api2::node::tasks::API_METHOD_LIST_TASKS.returns; + use pbs_tools::format::{render_epoch, render_task_status}; let options = default_table_format_options() - .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch)) - .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch)) + .column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch)) + .column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch)) .column(ColumnConfig::new("upid")) - .column(ColumnConfig::new("status").renderer(tools::format::render_task_status)); + .column(ColumnConfig::new("status").renderer(render_task_status)); format_and_print_result_full(&mut data, return_type, &output_format, &options); diff --git a/src/bin/proxmox_backup_manager/user.rs b/src/bin/proxmox_backup_manager/user.rs index 6603db1b..b7935214 100644 --- a/src/bin/proxmox_backup_manager/user.rs +++ b/src/bin/proxmox_backup_manager/user.rs @@ -6,7 +6,6 @@ use std::collections::HashMap; use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler}; use proxmox_backup::config; -use proxmox_backup::tools; use proxmox_backup::api2; use proxmox_backup::api2::types::{ACL_PATH_SCHEMA, Authid, Userid}; @@ -52,7 +51,7 @@ fn list_users(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result Result) -> Ve pub fn complete_archive_name(arg: &str, param: &HashMap) -> Vec { complete_server_file_name(arg, param) .iter() - .map(|v| tools::format::strip_server_file_extension(&v)) + .map(|v| pbs_tools::format::strip_server_file_extension(&v)) .collect() } @@ -243,7 +243,7 @@ pub fn complete_pxar_archive_name(arg: &str, param: &HashMap) -> .iter() .filter_map(|name| { if name.ends_with(".pxar.didx") { - Some(tools::format::strip_server_file_extension(name)) + Some(pbs_tools::format::strip_server_file_extension(name)) } else { None } @@ -256,7 +256,7 @@ pub fn complete_img_archive_name(arg: &str, param: &HashMap) -> .iter() .filter_map(|name| { if name.ends_with(".img.fidx") { - Some(tools::format::strip_server_file_extension(name)) + Some(pbs_tools::format::strip_server_file_extension(name)) } else { None } diff --git a/src/bin/proxmox_restore_daemon/api.rs b/src/bin/proxmox_restore_daemon/api.rs index d7355370..b3721160 100644 --- a/src/bin/proxmox_restore_daemon/api.rs +++ b/src/bin/proxmox_restore_daemon/api.rs @@ -1,4 +1,9 @@ ///! File-restore API running inside the restore VM +use std::ffi::OsStr; +use std::fs; +use std::os::unix::ffi::OsStrExt; +use std::path::{Path, PathBuf}; + use anyhow::{bail, Error}; use futures::FutureExt; use hyper::http::request::Parts; @@ -8,21 +13,18 @@ use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern}; use serde_json::Value; use tokio::sync::Semaphore; -use std::ffi::OsStr; -use std::fs; -use std::os::unix::ffi::OsStrExt; -use std::path::{Path, PathBuf}; - use proxmox::api::{ api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap, }; use proxmox::{identity, list_subdirs_api_method, sortable}; +use pbs_tools::fs::read_subdir; + use proxmox_backup::api2::types::*; use proxmox_backup::backup::DirEntryAttribute; use proxmox_backup::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES}; -use proxmox_backup::tools::{self, fs::read_subdir, zip::zip_directory}; +use proxmox_backup::tools::{self, zip::zip_directory}; use pxar::encoder::aio::TokioWriter; diff --git a/src/bin/proxmox_restore_daemon/disk.rs b/src/bin/proxmox_restore_daemon/disk.rs index 42b8d496..1bcfc798 100644 --- a/src/bin/proxmox_restore_daemon/disk.rs +++ b/src/bin/proxmox_restore_daemon/disk.rs @@ -366,7 +366,7 @@ impl DiskState { // create mapping for virtio drives and .fidx files (via serial description) // note: disks::DiskManager relies on udev, which we don't have - for entry in proxmox_backup::tools::fs::scan_subdir( + for entry in pbs_tools::fs::scan_subdir( libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX, @@ -411,7 +411,7 @@ impl DiskState { } let mut parts = Vec::new(); - for entry in proxmox_backup::tools::fs::scan_subdir( + for entry in pbs_tools::fs::scan_subdir( libc::AT_FDCWD, sys_path, &VIRTIO_PART_REGEX, diff --git a/src/bin/proxmox_tape/encryption_key.rs b/src/bin/proxmox_tape/encryption_key.rs index 907d2d63..6fcd2877 100644 --- a/src/bin/proxmox_tape/encryption_key.rs +++ b/src/bin/proxmox_tape/encryption_key.rs @@ -13,7 +13,6 @@ use proxmox::{ use proxmox_backup::{ tools::{ - self, paperkey::{ PaperkeyFormat, generate_paper_key, @@ -144,8 +143,8 @@ fn show_key( let options = proxmox::api::cli::default_table_format_options() .column(ColumnConfig::new("kdf")) - .column(ColumnConfig::new("created").renderer(tools::format::render_epoch)) - .column(ColumnConfig::new("modified").renderer(tools::format::render_epoch)) + .column(ColumnConfig::new("created").renderer(pbs_tools::format::render_epoch)) + .column(ColumnConfig::new("modified").renderer(pbs_tools::format::render_epoch)) .column(ColumnConfig::new("fingerprint")) .column(ColumnConfig::new("hint")); diff --git a/src/client/backup_writer.rs b/src/client/backup_writer.rs index 1e54d39d..7ef90793 100644 --- a/src/client/backup_writer.rs +++ b/src/client/backup_writer.rs @@ -14,9 +14,10 @@ use tokio_stream::wrappers::ReceiverStream; use proxmox::tools::digest_to_hex; +use pbs_tools::format::HumanByte; + use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo}; use crate::backup::*; -use crate::tools::format::HumanByte; use super::{H2Client, HttpClient}; @@ -333,7 +334,7 @@ impl BackupWriter { let archive = if self.verbose { archive_name.to_string() } else { - crate::tools::format::strip_server_file_extension(archive_name) + pbs_tools::format::strip_server_file_extension(archive_name) }; if archive_name != CATALOG_NAME { let speed: HumanByte = diff --git a/src/config/acme/mod.rs b/src/config/acme/mod.rs index 2534471a..2c5af756 100644 --- a/src/config/acme/mod.rs +++ b/src/config/acme/mod.rs @@ -69,7 +69,7 @@ pub fn foreach_acme_account(mut func: F) -> Result<(), Error> where F: FnMut(AcmeAccountName) -> ControlFlow>, { - match crate::tools::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) { + match pbs_tools::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) { Ok(files) => { for file in files { let file = file?; diff --git a/src/config/tape_encryption_keys.rs b/src/config/tape_encryption_keys.rs index 42c3184d..e3aab1d8 100644 --- a/src/config/tape_encryption_keys.rs +++ b/src/config/tape_encryption_keys.rs @@ -225,5 +225,5 @@ pub fn complete_key_fingerprint(_arg: &str, _param: &HashMap) -> Err(_) => return Vec::new(), }; - data.keys().map(|fp| crate::tools::format::as_fingerprint(fp.bytes())).collect() + data.keys().map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())).collect() } diff --git a/src/pxar/create.rs b/src/pxar/create.rs index 71bf2999..011e2d8e 100644 --- a/src/pxar/create.rs +++ b/src/pxar/create.rs @@ -24,11 +24,13 @@ use proxmox::sys::error::SysError; use proxmox::tools::fd::RawFdNum; use proxmox::tools::vec; +use pbs_tools::fs; + use crate::pxar::catalog::BackupCatalogWriter; use crate::pxar::metadata::errno_is_unsupported; use crate::pxar::Flags; use crate::pxar::tools::assert_single_path_component; -use crate::tools::{acl, fs, xattr, Fd}; +use crate::tools::{acl, xattr, Fd}; /// Pxar options for creating a pxar archive/stream #[derive(Default, Clone)] diff --git a/src/pxar/metadata.rs b/src/pxar/metadata.rs index 666af70e..e399c63c 100644 --- a/src/pxar/metadata.rs +++ b/src/pxar/metadata.rs @@ -13,9 +13,11 @@ use proxmox::c_result; use proxmox::sys::error::SysError; use proxmox::tools::fd::RawFdNum; +use pbs_tools::fs; + use crate::pxar::tools::perms_from_metadata; use crate::pxar::Flags; -use crate::tools::{acl, fs, xattr}; +use crate::tools::{acl, xattr}; // // utility functions diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs index 8c72250e..2362b673 100644 --- a/src/server/email_notifications.rs +++ b/src/server/email_notifications.rs @@ -7,6 +7,8 @@ use proxmox::tools::email::sendmail; use proxmox::api::schema::parse_property_string; use proxmox::try_block; +use pbs_tools::format::HumanByte; + use crate::{ config::datastore::DataStoreConfig, config::verify::VerificationJobConfig, @@ -19,7 +21,6 @@ use crate::{ Notify, DatastoreNotify, }, - tools::format::HumanByte, }; const GC_OK_TEMPLATE: &str = r###" diff --git a/src/tape/drive/lto/mod.rs b/src/tape/drive/lto/mod.rs index 8c319b07..d919d536 100644 --- a/src/tape/drive/lto/mod.rs +++ b/src/tape/drive/lto/mod.rs @@ -409,7 +409,7 @@ impl TapeDriver for LtoTapeHandle { } let output = if let Some((fingerprint, uuid)) = key_fingerprint { - let fingerprint = crate::tools::format::as_fingerprint(fingerprint.bytes()); + let fingerprint = pbs_tools::format::as_fingerprint(fingerprint.bytes()); run_sg_tape_cmd("encryption", &[ "--fingerprint", &fingerprint, "--uuid", &uuid.to_string(), diff --git a/src/tape/helpers/snapshot_reader.rs b/src/tape/helpers/snapshot_reader.rs index 416c88c1..78043953 100644 --- a/src/tape/helpers/snapshot_reader.rs +++ b/src/tape/helpers/snapshot_reader.rs @@ -6,8 +6,9 @@ use std::fs::File; use anyhow::{bail, Error}; use nix::dir::Dir; +use pbs_tools::fs::lock_dir_noblock_shared; + use crate::{ - tools::fs::lock_dir_noblock_shared, backup::{ DataStore, BackupDir, diff --git a/src/tape/linux_list_drives.rs b/src/tape/linux_list_drives.rs index 78ee6e42..a6458b97 100644 --- a/src/tape/linux_list_drives.rs +++ b/src/tape/linux_list_drives.rs @@ -3,13 +3,14 @@ use std::collections::HashMap; use anyhow::{bail, Error}; +use pbs_tools::fs::scan_subdir; + use crate::{ api2::types::{ DeviceKind, OptionalDeviceIdentification, TapeDeviceInfo, }, - tools::fs::scan_subdir, }; lazy_static::lazy_static!{ diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index 8be97a36..65b52a42 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -8,6 +8,8 @@ use std::collections::{HashSet, HashMap}; use anyhow::{bail, format_err, Error}; use endian_trait::Endian; +use pbs_tools::fs::read_subdir; + use proxmox::tools::{ Uuid, fs::{ @@ -22,7 +24,6 @@ use proxmox::tools::{ }; use crate::{ - tools::fs::read_subdir, backup::BackupDir, tape::{ MediaId, diff --git a/src/tools/borrow.rs b/src/tools/borrow.rs deleted file mode 100644 index 66b68ada..00000000 --- a/src/tools/borrow.rs +++ /dev/null @@ -1,59 +0,0 @@ -/// This ties two values T and U together, such that T does not move and cannot be used as long as -/// there's an U. This essentially replaces the borrow checker's job for dependent values which -/// need to be stored together in a struct {}, and is similar to what the 'rental' crate produces. -pub struct Tied(Option>, Option>); - -impl Drop for Tied { - fn drop(&mut self) { - // let's be explicit about order here! - std::mem::drop(self.1.take()); - } -} - -impl Tied { - /// Takes an owner and a function producing the depending value. The owner will be inaccessible - /// until the tied value is resolved. The dependent value is only accessible by reference. - pub fn new(owner: T, producer: F) -> Self - where - F: FnOnce(*mut T) -> Box, - { - let mut owner = Box::new(owner); - let dep = producer(&mut *owner); - Tied(Some(owner), Some(dep)) - } - - pub fn into_boxed_inner(mut self) -> Box { - self.1 = None; - self.0.take().unwrap() - } - - pub fn into_inner(self) -> T { - *self.into_boxed_inner() - } -} - -impl AsRef for Tied { - fn as_ref(&self) -> &U { - self.1.as_ref().unwrap() - } -} - -impl AsMut for Tied { - fn as_mut(&mut self) -> &mut U { - self.1.as_mut().unwrap() - } -} - -impl std::ops::Deref for Tied { - type Target = U; - - fn deref(&self) -> &U { - self.as_ref() - } -} - -impl std::ops::DerefMut for Tied { - fn deref_mut(&mut self) -> &mut U { - self.as_mut() - } -} diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 8573695d..1a72a6c8 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -485,7 +485,7 @@ impl Disk { let mut map = HashMap::new(); - for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? { + for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? { let item = item?; let name = match item.file_name().to_str() { Ok(name) => name, @@ -661,7 +661,7 @@ fn scan_partitions( let mut found_dm = false; let mut found_partitions = false; - for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? { + for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? { let item = item?; let name = match item.file_name().to_str() { Ok(name) => name, @@ -749,7 +749,7 @@ pub fn get_disks( let mut result = HashMap::new(); - for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? { + for item in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? { let item = item?; let name = item.file_name().to_str().unwrap().to_string(); @@ -959,7 +959,7 @@ pub fn create_file_system(disk: &Disk, fs_type: FileSystemType) -> Result<(), Er pub fn complete_disk_name(_arg: &str, _param: &HashMap) -> Vec { let mut list = Vec::new(); - let dir = match crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) { + let dir = match pbs_tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) { Ok(dir) => dir, Err(_) => return list, }; diff --git a/src/tools/format.rs b/src/tools/format.rs deleted file mode 100644 index 70d0490b..00000000 --- a/src/tools/format.rs +++ /dev/null @@ -1,149 +0,0 @@ -use anyhow::{Error}; -use serde_json::Value; - -pub fn strip_server_file_extension(name: &str) -> String { - - if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { - name[..name.len()-5].to_owned() - } else { - name.to_owned() // should not happen - } -} - -pub fn render_backup_file_list(files: &[String]) -> String { - let mut files: Vec = files.iter() - .map(|v| strip_server_file_extension(&v)) - .collect(); - - files.sort(); - - super::join(&files, ' ') -} - -pub fn render_epoch(value: &Value, _record: &Value) -> Result { - if value.is_null() { return Ok(String::new()); } - let text = match value.as_i64() { - Some(epoch) => { - if let Ok(epoch_string) = proxmox::tools::time::strftime_local("%c", epoch as i64) { - epoch_string - } else { - epoch.to_string() - } - }, - None => { - value.to_string() - } - }; - Ok(text) -} - -pub fn render_task_status(value: &Value, record: &Value) -> Result { - if record["endtime"].is_null() { - Ok(value.as_str().unwrap_or("running").to_string()) - } else { - Ok(value.as_str().unwrap_or("unknown").to_string()) - } -} - -pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result { - let value = value.as_bool().unwrap_or(true); - Ok((if value { "1" } else { "0" }).to_string()) -} - -pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result { - if value.is_null() { return Ok(String::new()); } - let text = match value.as_u64() { - Some(bytes) => { - HumanByte::from(bytes).to_string() - } - None => { - value.to_string() - } - }; - Ok(text) -} - -pub struct HumanByte { - b: usize, -} -impl std::fmt::Display for HumanByte { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.b < 1024 { - return write!(f, "{} B", self.b); - } - let kb: f64 = self.b as f64 / 1024.0; - if kb < 1024.0 { - return write!(f, "{:.2} KiB", kb); - } - let mb: f64 = kb / 1024.0; - if mb < 1024.0 { - return write!(f, "{:.2} MiB", mb); - } - let gb: f64 = mb / 1024.0; - if gb < 1024.0 { - return write!(f, "{:.2} GiB", gb); - } - let tb: f64 = gb / 1024.0; - if tb < 1024.0 { - return write!(f, "{:.2} TiB", tb); - } - let pb: f64 = tb / 1024.0; - return write!(f, "{:.2} PiB", pb); - } -} -impl From for HumanByte { - fn from(v: usize) -> Self { - HumanByte { b: v } - } -} -impl From for HumanByte { - fn from(v: u64) -> Self { - HumanByte { b: v as usize } - } -} - -pub fn as_fingerprint(bytes: &[u8]) -> String { - proxmox::tools::digest_to_hex(bytes) - .as_bytes() - .chunks(2) - .map(|v| std::str::from_utf8(v).unwrap()) - .collect::>().join(":") -} - -pub mod bytes_as_fingerprint { - use serde::{Deserialize, Serializer, Deserializer}; - - pub fn serialize( - bytes: &[u8; 32], - serializer: S, - ) -> Result - where - S: Serializer, - { - let s = crate::tools::format::as_fingerprint(bytes); - serializer.serialize_str(&s) - } - - pub fn deserialize<'de, D>( - deserializer: D, - ) -> Result<[u8; 32], D::Error> - where - D: Deserializer<'de>, - { - let mut s = String::deserialize(deserializer)?; - s.retain(|c| c != ':'); - proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom) - } -} - -#[test] -fn correct_byte_convert() { - fn convert(b: usize) -> String { - HumanByte::from(b).to_string() - } - assert_eq!(convert(1023), "1023 B"); - assert_eq!(convert(1<<10), "1.00 KiB"); - assert_eq!(convert(1<<20), "1.00 MiB"); - assert_eq!(convert((1<<30) + 103 * (1<<20)), "1.10 GiB"); - assert_eq!(convert((2<<50) + 500 * (1<<40)), "2.49 PiB"); -} diff --git a/src/tools/fs.rs b/src/tools/fs.rs deleted file mode 100644 index 6e0b1271..00000000 --- a/src/tools/fs.rs +++ /dev/null @@ -1,314 +0,0 @@ -//! File system helper utilities. - -use std::borrow::{Borrow, BorrowMut}; -use std::ops::{Deref, DerefMut}; -use std::os::unix::io::{AsRawFd, RawFd}; - -use anyhow::{format_err, Error}; -use nix::dir; -use nix::dir::Dir; -use nix::fcntl::OFlag; -use nix::sys::stat::Mode; - -use regex::Regex; - -use proxmox::sys::error::SysError; - - -use crate::tools::borrow::Tied; - -pub type DirLockGuard = Dir; - -/// This wraps nix::dir::Entry with the parent directory's file descriptor. -pub struct ReadDirEntry { - entry: dir::Entry, - parent_fd: RawFd, -} - -impl Into for ReadDirEntry { - fn into(self) -> dir::Entry { - self.entry - } -} - -impl Deref for ReadDirEntry { - type Target = dir::Entry; - - fn deref(&self) -> &Self::Target { - &self.entry - } -} - -impl DerefMut for ReadDirEntry { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.entry - } -} - -impl AsRef for ReadDirEntry { - fn as_ref(&self) -> &dir::Entry { - &self.entry - } -} - -impl AsMut for ReadDirEntry { - fn as_mut(&mut self) -> &mut dir::Entry { - &mut self.entry - } -} - -impl Borrow for ReadDirEntry { - fn borrow(&self) -> &dir::Entry { - &self.entry - } -} - -impl BorrowMut for ReadDirEntry { - fn borrow_mut(&mut self) -> &mut dir::Entry { - &mut self.entry - } -} - -impl ReadDirEntry { - #[inline] - pub fn parent_fd(&self) -> RawFd { - self.parent_fd - } - - pub unsafe fn file_name_utf8_unchecked(&self) -> &str { - std::str::from_utf8_unchecked(self.file_name().to_bytes()) - } -} - -// Since Tied implements Deref to U, a Tied already implements Iterator. -// This is simply a wrapper with a shorter type name mapping nix::Error to anyhow::Error. -/// Wrapper over a pair of `nix::dir::Dir` and `nix::dir::Iter`, returned by `read_subdir()`. -pub struct ReadDir { - iter: Tied> + Send>, - dir_fd: RawFd, -} - -impl Iterator for ReadDir { - type Item = Result; - - fn next(&mut self) -> Option { - self.iter.next().map(|res| { - res.map(|entry| ReadDirEntry { entry, parent_fd: self.dir_fd }) - .map_err(Error::from) - }) - } -} - -/// Create an iterator over sub directory entries. -/// This uses `openat` on `dirfd`, so `path` can be relative to that or an absolute path. -pub fn read_subdir(dirfd: RawFd, path: &P) -> nix::Result { - let dir = Dir::openat(dirfd, path, OFlag::O_RDONLY, Mode::empty())?; - let fd = dir.as_raw_fd(); - let iter = Tied::new(dir, |dir| { - Box::new(unsafe { (*dir).iter() }) - as Box> + Send> - }); - Ok(ReadDir { iter, dir_fd: fd }) -} - -/// Scan through a directory with a regular expression. This is simply a shortcut filtering the -/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored. -pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>( - dirfd: RawFd, - path: &P, - regex: &'a regex::Regex, -) -> Result> + 'a, nix::Error> { - Ok(read_subdir(dirfd, path)?.filter_file_name_regex(regex)) -} - -/// Helper trait to provide a combinators for directory entry iterators. -pub trait FileIterOps -where - Self: Sized + Iterator>, - T: Borrow, - E: Into + Send + Sync, -{ - /// Filter by file type. This is more convenient than using the `filter` method alone as this - /// also includes error handling and handling of files without a type (via an error). - fn filter_file_type(self, ty: dir::Type) -> FileTypeFilter { - FileTypeFilter { inner: self, ty } - } - - /// Filter by file name. Note that file names which aren't valid utf-8 will be treated as if - /// they do not match the pattern. - fn filter_file_name_regex(self, regex: &Regex) -> FileNameRegexFilter { - FileNameRegexFilter { inner: self, regex } - } -} - -impl FileIterOps for I -where - I: Iterator>, - T: Borrow, - E: Into + Send + Sync, -{ -} - -/// This filters files from its inner iterator by a file type. Files with no type produce an error. -pub struct FileTypeFilter -where - I: Iterator>, - T: Borrow, - E: Into + Send + Sync, -{ - inner: I, - ty: nix::dir::Type, -} - -impl Iterator for FileTypeFilter -where - I: Iterator>, - T: Borrow, - E: Into + Send + Sync, -{ - type Item = Result; - - fn next(&mut self) -> Option { - loop { - let item = self.inner.next()?.map_err(|e| e.into()); - match item { - Ok(ref entry) => match entry.borrow().file_type() { - Some(ty) => { - if ty == self.ty { - return Some(item); - } else { - continue; - } - } - None => return Some(Err(format_err!("unable to detect file type"))), - }, - Err(_) => return Some(item), - } - } - } -} - -/// This filters files by name via a Regex. Files whose file name aren't valid utf-8 are skipped -/// silently. -pub struct FileNameRegexFilter<'a, I, T, E> -where - I: Iterator>, - T: Borrow, -{ - inner: I, - regex: &'a Regex, -} - -impl Iterator for FileNameRegexFilter<'_, I, T, E> -where - I: Iterator>, - T: Borrow, -{ - type Item = Result; - - fn next(&mut self) -> Option { - loop { - let item = self.inner.next()?; - match item { - Ok(ref entry) => { - if let Ok(name) = entry.borrow().file_name().to_str() { - if self.regex.is_match(name) { - return Some(item); - } - } - // file did not match regex or isn't valid utf-8 - continue; - }, - Err(_) => return Some(item), - } - } - } -} - -// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long) -// read Linux file system attributes (see man chattr) -nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long); -nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long); - -// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) -// read FAT file system attributes -nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32); -nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32); - -// From /usr/include/linux/fs.h -// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr) -// #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) -nix::ioctl_read!(fs_ioc_fsgetxattr, b'X', 31, FSXAttr); -nix::ioctl_write_ptr!(fs_ioc_fssetxattr, b'X', 32, FSXAttr); - -#[repr(C)] -#[derive(Debug)] -pub struct FSXAttr { - pub fsx_xflags: u32, - pub fsx_extsize: u32, - pub fsx_nextents: u32, - pub fsx_projid: u32, - pub fsx_cowextsize: u32, - pub fsx_pad: [u8; 8], -} - -impl Default for FSXAttr { - fn default() -> Self { - FSXAttr { - fsx_xflags: 0u32, - fsx_extsize: 0u32, - fsx_nextents: 0u32, - fsx_projid: 0u32, - fsx_cowextsize: 0u32, - fsx_pad: [0u8; 8], - } - } -} - -/// Attempt to acquire a shared flock on the given path, 'what' and -/// 'would_block_message' are used for error formatting. -pub fn lock_dir_noblock_shared( - path: &std::path::Path, - what: &str, - would_block_msg: &str, -) -> Result { - do_lock_dir_noblock(path, what, would_block_msg, false) -} - -/// Attempt to acquire an exclusive flock on the given path, 'what' and -/// 'would_block_message' are used for error formatting. -pub fn lock_dir_noblock( - path: &std::path::Path, - what: &str, - would_block_msg: &str, -) -> Result { - do_lock_dir_noblock(path, what, would_block_msg, true) -} - -fn do_lock_dir_noblock( - path: &std::path::Path, - what: &str, - would_block_msg: &str, - exclusive: bool, -) -> Result { - let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty()) - .map_err(|err| { - format_err!("unable to open {} directory {:?} for locking - {}", what, path, err) - })?; - - // acquire in non-blocking mode, no point in waiting here since other - // backups could still take a very long time - proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0))) - .map_err(|err| { - format_err!( - "unable to acquire lock on {} directory {:?} - {}", what, path, - if err.would_block() { - String::from(would_block_msg) - } else { - err.to_string() - } - ) - })?; - - Ok(handle) -} diff --git a/src/tools/fuse_loop.rs b/src/tools/fuse_loop.rs index b93381a5..68d8b0a9 100644 --- a/src/tools/fuse_loop.rs +++ b/src/tools/fuse_loop.rs @@ -19,7 +19,6 @@ use proxmox::const_regex; use proxmox::tools::time; use proxmox_fuse::{*, requests::FuseRequest}; use super::loopdev; -use super::fs; const RUN_DIR: &str = "/run/pbs-loopdev"; @@ -356,7 +355,7 @@ fn unmap_from_backing(backing_file: &Path, loopdev: Option<&str>) -> Result<(), pub fn find_all_mappings() -> Result)>, Error> { // get map of all /dev/loop mappings belonging to us let mut loopmap = HashMap::new(); - for ent in fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? { + for ent in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? { if let Ok(ent) = ent { let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy()); if let Ok(file) = get_backing_file(&loopdev) { @@ -366,7 +365,7 @@ pub fn find_all_mappings() -> Result { diff --git a/src/tools/mod.rs b/src/tools/mod.rs index 23943990..d092b95a 100644 --- a/src/tools/mod.rs +++ b/src/tools/mod.rs @@ -2,7 +2,6 @@ //! //! This is a collection of small and useful tools. use std::any::Any; -use std::borrow::Borrow; use std::collections::HashMap; use std::hash::BuildHasher; use std::fs::File; @@ -27,15 +26,12 @@ use proxmox_http::{ pub mod acl; pub mod apt; pub mod async_io; -pub mod borrow; pub mod cert; pub mod compression; pub mod config; pub mod cpio; pub mod daemon; pub mod disks; -pub mod format; -pub mod fs; pub mod fuse_loop; mod memcom; @@ -235,38 +231,6 @@ where result } -/// Scan directory for matching file names. -/// -/// Scan through all directory entries and call `callback()` function -/// if the entry name matches the regular expression. This function -/// used unix `openat()`, so you can pass absolute or relative file -/// names. This function simply skips non-UTF8 encoded names. -pub fn scandir( - dirfd: RawFd, - path: &P, - regex: ®ex::Regex, - mut callback: F, -) -> Result<(), Error> -where - F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>, - P: ?Sized + nix::NixPath, -{ - for entry in self::fs::scan_subdir(dirfd, path, regex)? { - let entry = entry?; - let file_type = match entry.file_type() { - Some(file_type) => file_type, - None => bail!("unable to detect file type"), - }; - - callback( - entry.parent_fd(), - unsafe { entry.file_name_utf8_unchecked() }, - file_type, - )?; - } - Ok(()) -} - /// Shortcut for md5 sums. pub fn md5sum(data: &[u8]) -> Result { hash(MessageDigest::md5(), data).map_err(Error::from) @@ -317,19 +281,6 @@ pub fn percent_encode_component(comp: &str) -> String { utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string() } -pub fn join>(data: &[S], sep: char) -> String { - let mut list = String::new(); - - for item in data { - if !list.is_empty() { - list.push(sep); - } - list.push_str(item.borrow()); - } - - list -} - /// Detect modified configuration files /// /// This function fails with a reasonable error message if checksums do not match.