members = [
"pbs-buildcfg",
"pbs-runtime",
+ "pbs-tools",
]
[lib]
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-runtime = { path = "pbs-runtime" }
+pbs-tools = { path = "pbs-tools" }
[features]
default = []
SUBCRATES := \
pbs-buildcfg \
- pbs-runtime
+ pbs-runtime \
+ pbs-tools
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release
--- /dev/null
+[package]
+name = "pbs-tools"
+version = "0.1.0"
+authors = ["Proxmox Support Team <support@proxmox.com>"]
+edition = "2018"
+description = "common tools used throughout pbs"
+
+# This must not depend on any subcrates more closely related to pbs itself.
+[dependencies]
+anyhow = "1.0"
+libc = "0.2"
+nix = "0.19.1"
+regex = "1.2"
+serde = "1.0"
+serde_json = "1.0"
+
+proxmox = { version = "0.11.5", default-features = false, features = [] }
--- /dev/null
+/// This ties two values T and U together, such that T does not move and cannot be used as long as
+/// there's an U. This essentially replaces the borrow checker's job for dependent values which
+/// need to be stored together in a struct {}, and is similar to what the 'rental' crate produces.
+pub struct Tied<T, U: ?Sized>(Option<Box<T>>, Option<Box<U>>);
+
+impl<T, U: ?Sized> Drop for Tied<T, U> {
+ fn drop(&mut self) {
+ // let's be explicit about order here!
+ std::mem::drop(self.1.take());
+ }
+}
+
+impl<T, U: ?Sized> Tied<T, U> {
+ /// Takes an owner and a function producing the depending value. The owner will be inaccessible
+ /// until the tied value is resolved. The dependent value is only accessible by reference.
+ pub fn new<F>(owner: T, producer: F) -> Self
+ where
+ F: FnOnce(*mut T) -> Box<U>,
+ {
+ let mut owner = Box::new(owner);
+ let dep = producer(&mut *owner);
+ Tied(Some(owner), Some(dep))
+ }
+
+ pub fn into_boxed_inner(mut self) -> Box<T> {
+ self.1 = None;
+ self.0.take().unwrap()
+ }
+
+ pub fn into_inner(self) -> T {
+ *self.into_boxed_inner()
+ }
+}
+
+impl<T, U: ?Sized> AsRef<U> for Tied<T, U> {
+ fn as_ref(&self) -> &U {
+ self.1.as_ref().unwrap()
+ }
+}
+
+impl<T, U: ?Sized> AsMut<U> for Tied<T, U> {
+ fn as_mut(&mut self) -> &mut U {
+ self.1.as_mut().unwrap()
+ }
+}
+
+impl<T, U: ?Sized> std::ops::Deref for Tied<T, U> {
+ type Target = U;
+
+ fn deref(&self) -> &U {
+ self.as_ref()
+ }
+}
+
+impl<T, U: ?Sized> std::ops::DerefMut for Tied<T, U> {
+ fn deref_mut(&mut self) -> &mut U {
+ self.as_mut()
+ }
+}
--- /dev/null
+use anyhow::{Error};
+use serde_json::Value;
+
+pub fn strip_server_file_extension(name: &str) -> String {
+ if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
+ name[..name.len()-5].to_owned()
+ } else {
+ name.to_owned() // should not happen
+ }
+}
+
+pub fn render_backup_file_list(files: &[String]) -> String {
+ let mut files: Vec<String> = files.iter()
+ .map(|v| strip_server_file_extension(&v))
+ .collect();
+
+ files.sort();
+
+ crate::str::join(&files, ' ')
+}
+
+pub fn render_epoch(value: &Value, _record: &Value) -> Result<String, Error> {
+ if value.is_null() { return Ok(String::new()); }
+ let text = match value.as_i64() {
+ Some(epoch) => {
+ if let Ok(epoch_string) = proxmox::tools::time::strftime_local("%c", epoch as i64) {
+ epoch_string
+ } else {
+ epoch.to_string()
+ }
+ },
+ None => {
+ value.to_string()
+ }
+ };
+ Ok(text)
+}
+
+pub fn render_task_status(value: &Value, record: &Value) -> Result<String, Error> {
+ if record["endtime"].is_null() {
+ Ok(value.as_str().unwrap_or("running").to_string())
+ } else {
+ Ok(value.as_str().unwrap_or("unknown").to_string())
+ }
+}
+
+pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result<String, Error> {
+ let value = value.as_bool().unwrap_or(true);
+ Ok((if value { "1" } else { "0" }).to_string())
+}
+
+pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result<String, Error> {
+ if value.is_null() { return Ok(String::new()); }
+ let text = match value.as_u64() {
+ Some(bytes) => {
+ HumanByte::from(bytes).to_string()
+ }
+ None => {
+ value.to_string()
+ }
+ };
+ Ok(text)
+}
+
+pub struct HumanByte {
+ b: usize,
+}
+impl std::fmt::Display for HumanByte {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if self.b < 1024 {
+ return write!(f, "{} B", self.b);
+ }
+ let kb: f64 = self.b as f64 / 1024.0;
+ if kb < 1024.0 {
+ return write!(f, "{:.2} KiB", kb);
+ }
+ let mb: f64 = kb / 1024.0;
+ if mb < 1024.0 {
+ return write!(f, "{:.2} MiB", mb);
+ }
+ let gb: f64 = mb / 1024.0;
+ if gb < 1024.0 {
+ return write!(f, "{:.2} GiB", gb);
+ }
+ let tb: f64 = gb / 1024.0;
+ if tb < 1024.0 {
+ return write!(f, "{:.2} TiB", tb);
+ }
+ let pb: f64 = tb / 1024.0;
+ return write!(f, "{:.2} PiB", pb);
+ }
+}
+impl From<usize> for HumanByte {
+ fn from(v: usize) -> Self {
+ HumanByte { b: v }
+ }
+}
+impl From<u64> for HumanByte {
+ fn from(v: u64) -> Self {
+ HumanByte { b: v as usize }
+ }
+}
+
+pub fn as_fingerprint(bytes: &[u8]) -> String {
+ proxmox::tools::digest_to_hex(bytes)
+ .as_bytes()
+ .chunks(2)
+ .map(|v| std::str::from_utf8(v).unwrap())
+ .collect::<Vec<&str>>().join(":")
+}
+
+pub mod bytes_as_fingerprint {
+ use serde::{Deserialize, Serializer, Deserializer};
+
+ pub fn serialize<S>(
+ bytes: &[u8; 32],
+ serializer: S,
+ ) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let s = super::as_fingerprint(bytes);
+ serializer.serialize_str(&s)
+ }
+
+ pub fn deserialize<'de, D>(
+ deserializer: D,
+ ) -> Result<[u8; 32], D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let mut s = String::deserialize(deserializer)?;
+ s.retain(|c| c != ':');
+ proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
+ }
+}
+
+#[test]
+fn correct_byte_convert() {
+ fn convert(b: usize) -> String {
+ HumanByte::from(b).to_string()
+ }
+ assert_eq!(convert(1023), "1023 B");
+ assert_eq!(convert(1<<10), "1.00 KiB");
+ assert_eq!(convert(1<<20), "1.00 MiB");
+ assert_eq!(convert((1<<30) + 103 * (1<<20)), "1.10 GiB");
+ assert_eq!(convert((2<<50) + 500 * (1<<40)), "2.49 PiB");
+}
--- /dev/null
+//! File system helper utilities.
+
+use std::borrow::{Borrow, BorrowMut};
+use std::ops::{Deref, DerefMut};
+use std::os::unix::io::{AsRawFd, RawFd};
+
+use anyhow::{bail, format_err, Error};
+use nix::dir;
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use regex::Regex;
+
+use proxmox::sys::error::SysError;
+
+use crate::borrow::Tied;
+
+pub type DirLockGuard = Dir;
+
+/// This wraps nix::dir::Entry with the parent directory's file descriptor.
+pub struct ReadDirEntry {
+ entry: dir::Entry,
+ parent_fd: RawFd,
+}
+
+impl Into<dir::Entry> for ReadDirEntry {
+ fn into(self) -> dir::Entry {
+ self.entry
+ }
+}
+
+impl Deref for ReadDirEntry {
+ type Target = dir::Entry;
+
+ fn deref(&self) -> &Self::Target {
+ &self.entry
+ }
+}
+
+impl DerefMut for ReadDirEntry {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.entry
+ }
+}
+
+impl AsRef<dir::Entry> for ReadDirEntry {
+ fn as_ref(&self) -> &dir::Entry {
+ &self.entry
+ }
+}
+
+impl AsMut<dir::Entry> for ReadDirEntry {
+ fn as_mut(&mut self) -> &mut dir::Entry {
+ &mut self.entry
+ }
+}
+
+impl Borrow<dir::Entry> for ReadDirEntry {
+ fn borrow(&self) -> &dir::Entry {
+ &self.entry
+ }
+}
+
+impl BorrowMut<dir::Entry> for ReadDirEntry {
+ fn borrow_mut(&mut self) -> &mut dir::Entry {
+ &mut self.entry
+ }
+}
+
+impl ReadDirEntry {
+ #[inline]
+ pub fn parent_fd(&self) -> RawFd {
+ self.parent_fd
+ }
+
+ pub unsafe fn file_name_utf8_unchecked(&self) -> &str {
+ std::str::from_utf8_unchecked(self.file_name().to_bytes())
+ }
+}
+
+// Since Tied<T, U> implements Deref to U, a Tied<Dir, Iterator> already implements Iterator.
+// This is simply a wrapper with a shorter type name mapping nix::Error to anyhow::Error.
+/// Wrapper over a pair of `nix::dir::Dir` and `nix::dir::Iter`, returned by `read_subdir()`.
+pub struct ReadDir {
+ iter: Tied<Dir, dyn Iterator<Item = nix::Result<dir::Entry>> + Send>,
+ dir_fd: RawFd,
+}
+
+impl Iterator for ReadDir {
+ type Item = Result<ReadDirEntry, Error>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next().map(|res| {
+ res.map(|entry| ReadDirEntry { entry, parent_fd: self.dir_fd })
+ .map_err(Error::from)
+ })
+ }
+}
+
+/// Create an iterator over sub directory entries.
+/// This uses `openat` on `dirfd`, so `path` can be relative to that or an absolute path.
+pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Result<ReadDir> {
+ let dir = Dir::openat(dirfd, path, OFlag::O_RDONLY, Mode::empty())?;
+ let fd = dir.as_raw_fd();
+ let iter = Tied::new(dir, |dir| {
+ Box::new(unsafe { (*dir).iter() })
+ as Box<dyn Iterator<Item = nix::Result<dir::Entry>> + Send>
+ });
+ Ok(ReadDir { iter, dir_fd: fd })
+}
+
+/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
+/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
+pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
+ dirfd: RawFd,
+ path: &P,
+ regex: &'a regex::Regex,
+) -> Result<impl Iterator<Item = Result<ReadDirEntry, Error>> + 'a, nix::Error> {
+ Ok(read_subdir(dirfd, path)?.filter_file_name_regex(regex))
+}
+
+/// Scan directory for matching file names with a callback.
+///
+/// Scan through all directory entries and call `callback()` function
+/// if the entry name matches the regular expression. This function
+/// used unix `openat()`, so you can pass absolute or relative file
+/// names. This function simply skips non-UTF8 encoded names.
+pub fn scandir<P, F>(
+ dirfd: RawFd,
+ path: &P,
+ regex: ®ex::Regex,
+ mut callback: F,
+) -> Result<(), Error>
+where
+ F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>,
+ P: ?Sized + nix::NixPath,
+{
+ for entry in scan_subdir(dirfd, path, regex)? {
+ let entry = entry?;
+ let file_type = match entry.file_type() {
+ Some(file_type) => file_type,
+ None => bail!("unable to detect file type"),
+ };
+
+ callback(
+ entry.parent_fd(),
+ unsafe { entry.file_name_utf8_unchecked() },
+ file_type,
+ )?;
+ }
+ Ok(())
+}
+
+
+/// Helper trait to provide a combinators for directory entry iterators.
+pub trait FileIterOps<T, E>
+where
+ Self: Sized + Iterator<Item = Result<T, E>>,
+ T: Borrow<dir::Entry>,
+ E: Into<Error> + Send + Sync,
+{
+ /// Filter by file type. This is more convenient than using the `filter` method alone as this
+ /// also includes error handling and handling of files without a type (via an error).
+ fn filter_file_type(self, ty: dir::Type) -> FileTypeFilter<Self, T, E> {
+ FileTypeFilter { inner: self, ty }
+ }
+
+ /// Filter by file name. Note that file names which aren't valid utf-8 will be treated as if
+ /// they do not match the pattern.
+ fn filter_file_name_regex(self, regex: &Regex) -> FileNameRegexFilter<Self, T, E> {
+ FileNameRegexFilter { inner: self, regex }
+ }
+}
+
+impl<I, T, E> FileIterOps<T, E> for I
+where
+ I: Iterator<Item = Result<T, E>>,
+ T: Borrow<dir::Entry>,
+ E: Into<Error> + Send + Sync,
+{
+}
+
+/// This filters files from its inner iterator by a file type. Files with no type produce an error.
+pub struct FileTypeFilter<I, T, E>
+where
+ I: Iterator<Item = Result<T, E>>,
+ T: Borrow<dir::Entry>,
+ E: Into<Error> + Send + Sync,
+{
+ inner: I,
+ ty: nix::dir::Type,
+}
+
+impl<I, T, E> Iterator for FileTypeFilter<I, T, E>
+where
+ I: Iterator<Item = Result<T, E>>,
+ T: Borrow<dir::Entry>,
+ E: Into<Error> + Send + Sync,
+{
+ type Item = Result<T, Error>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ let item = self.inner.next()?.map_err(|e| e.into());
+ match item {
+ Ok(ref entry) => match entry.borrow().file_type() {
+ Some(ty) => {
+ if ty == self.ty {
+ return Some(item);
+ } else {
+ continue;
+ }
+ }
+ None => return Some(Err(format_err!("unable to detect file type"))),
+ },
+ Err(_) => return Some(item),
+ }
+ }
+ }
+}
+
+/// This filters files by name via a Regex. Files whose file name aren't valid utf-8 are skipped
+/// silently.
+pub struct FileNameRegexFilter<'a, I, T, E>
+where
+ I: Iterator<Item = Result<T, E>>,
+ T: Borrow<dir::Entry>,
+{
+ inner: I,
+ regex: &'a Regex,
+}
+
+impl<I, T, E> Iterator for FileNameRegexFilter<'_, I, T, E>
+where
+ I: Iterator<Item = Result<T, E>>,
+ T: Borrow<dir::Entry>,
+{
+ type Item = Result<T, E>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ let item = self.inner.next()?;
+ match item {
+ Ok(ref entry) => {
+ if let Ok(name) = entry.borrow().file_name().to_str() {
+ if self.regex.is_match(name) {
+ return Some(item);
+ }
+ }
+ // file did not match regex or isn't valid utf-8
+ continue;
+ },
+ Err(_) => return Some(item),
+ }
+ }
+ }
+}
+
+// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long)
+// read Linux file system attributes (see man chattr)
+nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long);
+nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long);
+
+// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
+// read FAT file system attributes
+nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32);
+nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32);
+
+// From /usr/include/linux/fs.h
+// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
+// #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
+nix::ioctl_read!(fs_ioc_fsgetxattr, b'X', 31, FSXAttr);
+nix::ioctl_write_ptr!(fs_ioc_fssetxattr, b'X', 32, FSXAttr);
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct FSXAttr {
+ pub fsx_xflags: u32,
+ pub fsx_extsize: u32,
+ pub fsx_nextents: u32,
+ pub fsx_projid: u32,
+ pub fsx_cowextsize: u32,
+ pub fsx_pad: [u8; 8],
+}
+
+impl Default for FSXAttr {
+ fn default() -> Self {
+ FSXAttr {
+ fsx_xflags: 0u32,
+ fsx_extsize: 0u32,
+ fsx_nextents: 0u32,
+ fsx_projid: 0u32,
+ fsx_cowextsize: 0u32,
+ fsx_pad: [0u8; 8],
+ }
+ }
+}
+
+/// Attempt to acquire a shared flock on the given path, 'what' and
+/// 'would_block_message' are used for error formatting.
+pub fn lock_dir_noblock_shared(
+ path: &std::path::Path,
+ what: &str,
+ would_block_msg: &str,
+) -> Result<DirLockGuard, Error> {
+ do_lock_dir_noblock(path, what, would_block_msg, false)
+}
+
+/// Attempt to acquire an exclusive flock on the given path, 'what' and
+/// 'would_block_message' are used for error formatting.
+pub fn lock_dir_noblock(
+ path: &std::path::Path,
+ what: &str,
+ would_block_msg: &str,
+) -> Result<DirLockGuard, Error> {
+ do_lock_dir_noblock(path, what, would_block_msg, true)
+}
+
+fn do_lock_dir_noblock(
+ path: &std::path::Path,
+ what: &str,
+ would_block_msg: &str,
+ exclusive: bool,
+) -> Result<DirLockGuard, Error> {
+ let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
+ .map_err(|err| {
+ format_err!("unable to open {} directory {:?} for locking - {}", what, path, err)
+ })?;
+
+ // acquire in non-blocking mode, no point in waiting here since other
+ // backups could still take a very long time
+ proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0)))
+ .map_err(|err| {
+ format_err!(
+ "unable to acquire lock on {} directory {:?} - {}", what, path,
+ if err.would_block() {
+ String::from(would_block_msg)
+ } else {
+ err.to_string()
+ }
+ )
+ })?;
+
+ Ok(handle)
+}
--- /dev/null
+pub mod borrow;
+pub mod format;
+pub mod fs;
+pub mod str;
--- /dev/null
+//! String related utilities.
+
+use std::borrow::Borrow;
+
+pub fn join<S: Borrow<str>>(data: &[S], sep: char) -> String {
+ let mut list = String::new();
+
+ for item in data {
+ if !list.is_empty() {
+ list.push(sep);
+ }
+ list.push_str(item.borrow());
+ }
+
+ list
+}
+
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
+use pbs_tools::fs::lock_dir_noblock_shared;
+
use crate::tools;
use crate::server::{WorkerTask, H2Service};
use crate::backup::*;
use crate::api2::types::*;
use crate::config::acl::PRIV_DATASTORE_BACKUP;
use crate::config::cached_user_info::CachedUserInfo;
-use crate::tools::fs::lock_dir_noblock_shared;
mod environment;
use environment::*;
let mut list = Vec::new();
let basedir = "/etc/systemd/system";
- for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
+ for item in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
let item = item?;
let name = item.file_name().to_string_lossy().to_string();
},
};
+use pbs_tools::fs::lock_dir_noblock_shared;
+
use crate::{
api2::{
helpers,
WorkerTask,
H2Service,
},
- tools::{
- self,
- fs::lock_dir_noblock_shared,
- },
+ tools,
config::{
acl::{
PRIV_DATASTORE_READ,
flat.encryption_key_fingerprint = set
.encryption_key_fingerprint
.as_ref()
- .map(|fp| crate::tools::format::as_fingerprint(fp.bytes()));
+ .map(|fp| pbs_tools::format::as_fingerprint(fp.bytes()));
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
.map(|fp| (fp, set.uuid.clone()));
-use crate::tools;
-
-use anyhow::{bail, format_err, Error};
use std::os::unix::io::RawFd;
-
use std::path::{Path, PathBuf};
+use anyhow::{bail, format_err, Error};
+
use crate::api2::types::{
BACKUP_ID_REGEX,
BACKUP_TYPE_REGEX,
let mut path = base_path.to_owned();
path.push(self.group_path());
- tools::scandir(
+ pbs_tools::fs::scandir(
libc::AT_FDCWD,
&path,
&BACKUP_DATE_REGEX,
let mut path = base_path.to_owned();
path.push(self.group_path());
- tools::scandir(
+ pbs_tools::fs::scandir(
libc::AT_FDCWD,
&path,
&BACKUP_DATE_REGEX,
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
let mut list = Vec::new();
- tools::scandir(
+ pbs_tools::fs::scandir(
libc::AT_FDCWD,
base_path,
&BACKUP_TYPE_REGEX,
if file_type != nix::dir::Type::Directory {
return Ok(());
}
- tools::scandir(
+ pbs_tools::fs::scandir(
l0_fd,
backup_type,
&BACKUP_ID_REGEX,
) -> Result<Vec<String>, Error> {
let mut files = vec![];
- tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
+ pbs_tools::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
if file_type != nix::dir::Type::File {
return Ok(());
}
use std::sync::Arc;
use std::io::Read;
+use pbs_tools::borrow::Tied;
+
use super::CryptConfig;
-use crate::tools::borrow::Tied;
pub struct ChecksumReader<R> {
reader: R,
use anyhow::{Error};
+use pbs_tools::borrow::Tied;
+
use super::CryptConfig;
-use crate::tools::borrow::Tied;
pub struct ChecksumWriter<W> {
writer: W,
pub fn get_chunk_iterator(
&self,
) -> Result<
- impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
+ impl Iterator<Item = (Result<pbs_tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
Error
> {
use nix::dir::Dir;
})?;
let mut done = false;
- let mut inner: Option<tools::fs::ReadDir> = None;
+ let mut inner: Option<pbs_tools::fs::ReadDir> = None;
let mut at = 0;
let mut percentage = 0;
Ok(std::iter::from_fn(move || {
let subdir: &str = &format!("{:04x}", at);
percentage = (at * 100) / 0x10000;
at += 1;
- match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
+ match pbs_tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
Ok(dir) => {
inner = Some(dir);
// start reading:
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
use serde::{Deserialize, Serialize};
-use crate::tools::format::{as_fingerprint, bytes_as_fingerprint};
-
use proxmox::api::api;
+use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
+
// openssl::sha::sha256(b"Proxmox Backup Encryption Key Fingerprint")
/// This constant is used to compute fingerprints.
const FINGERPRINT_INPUT: [u8; 32] = [
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked};
+use pbs_tools::format::HumanByte;
+use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
+
use super::backup_info::{BackupGroup, BackupDir};
use super::chunk_store::ChunkStore;
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::config::datastore::{self, DataStoreConfig};
use crate::task::TaskState;
use crate::tools;
-use crate::tools::format::HumanByte;
-use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
use crate::api2::types::{Authid, GarbageCollectionStatus};
use crate::server::UPID;
pub fn get_chunk_iterator(
&self,
) -> Result<
- impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)>,
+ impl Iterator<Item = (Result<pbs_tools::fs::ReadDirEntry, Error>, usize, bool)>,
Error
> {
self.chunk_store.get_chunk_iterator()
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
- for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
+ for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
if let Ok(item) = item {
if let Some(file_type) = item.file_type() {
if file_type != nix::dir::Type::File { continue; }
let full_path = self.group_path(backup_group);
- let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
+ let _guard = pbs_tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
log::info!("removing backup group {:?}", full_path);
fingerprint: key_config
.fingerprint
.as_ref()
- .map(|fp| crate::tools::format::as_fingerprint(fp.bytes())),
+ .map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())),
hint: key_config.hint.clone(),
}
}
use anyhow::{bail, format_err, Error};
+use pbs_tools::fs::lock_dir_noblock_shared;
+
use crate::{
api2::types::*,
backup::{
server::UPID,
task::TaskState,
task_log,
- tools::fs::lock_dir_noblock_shared,
tools::ParallelHandler,
};
}
}
}
-}
\ No newline at end of file
+}
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- Ok(tools::format::render_backup_file_list(&item.files))
+ Ok(pbs_tools::format::render_backup_file_list(&item.files))
};
let options = default_table_format_options()
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
- .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
+ .column(ColumnConfig::new("backup-time").renderer(pbs_tools::format::render_epoch).header("date"))
.column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
;
let mut data = result["data"].take();
let return_type = &api2::node::tasks::API_METHOD_LIST_TASKS.returns;
+ use pbs_tools::format::{render_epoch, render_task_status};
let options = default_table_format_options()
- .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
- .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
+ .column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch))
+ .column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch))
.column(ColumnConfig::new("upid"))
- .column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
+ .column(ColumnConfig::new("status").renderer(render_task_status));
format_and_print_result_full(&mut data, return_type, &output_format, &options);
},
};
+use pbs_tools::format::{
+ HumanByte,
+ render_epoch,
+ render_bytes_human_readable,
+};
+
use proxmox_backup::{
- tools::format::{
- HumanByte,
- render_epoch,
- render_bytes_human_readable,
- },
client::{
connect_to_localhost,
view_task_result,
let options = proxmox::api::cli::default_table_format_options()
.column(ColumnConfig::new("path"))
.column(ColumnConfig::new("kdf"))
- .column(ColumnConfig::new("created").renderer(tools::format::render_epoch))
- .column(ColumnConfig::new("modified").renderer(tools::format::render_epoch))
+ .column(ColumnConfig::new("created").renderer(pbs_tools::format::render_epoch))
+ .column(ColumnConfig::new("modified").renderer(pbs_tools::format::render_epoch))
.column(ColumnConfig::new("fingerprint"))
.column(ColumnConfig::new("hint"));
for file in &item.files {
filenames.push(file.filename.to_string());
}
- Ok(tools::format::render_backup_file_list(&filenames[..]))
+ Ok(pbs_tools::format::render_backup_file_list(&filenames[..]))
};
let options = default_table_format_options()
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
- .column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable))
+ .column(ColumnConfig::new("size").renderer(pbs_tools::format::render_bytes_human_readable))
.column(ColumnConfig::new("files").renderer(render_files))
;
let return_type = &proxmox_backup::api2::node::tasks::API_METHOD_LIST_TASKS.returns;
+ use pbs_tools::format::{render_epoch, render_task_status};
let options = default_table_format_options()
- .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
- .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
+ .column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch))
+ .column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch))
.column(ColumnConfig::new("upid"))
- .column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
+ .column(ColumnConfig::new("status").renderer(render_task_status));
format_and_print_result_full(&mut data, return_type, &output_format, &options);
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
-use proxmox_backup::tools;
use proxmox_backup::api2;
use proxmox_backup::api2::types::{ACL_PATH_SCHEMA, Authid, Userid};
.column(ColumnConfig::new("userid"))
.column(
ColumnConfig::new("enable")
- .renderer(tools::format::render_bool_with_default_true)
+ .renderer(pbs_tools::format::render_bool_with_default_true)
)
.column(
ColumnConfig::new("expire")
.column(ColumnConfig::new("tokenid"))
.column(
ColumnConfig::new("enable")
- .renderer(tools::format::render_bool_with_default_true)
+ .renderer(pbs_tools::format::render_bool_with_default_true)
)
.column(
ColumnConfig::new("expire")
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
- .map(|v| tools::format::strip_server_file_extension(&v))
+ .map(|v| pbs_tools::format::strip_server_file_extension(&v))
.collect()
}
.iter()
.filter_map(|name| {
if name.ends_with(".pxar.didx") {
- Some(tools::format::strip_server_file_extension(name))
+ Some(pbs_tools::format::strip_server_file_extension(name))
} else {
None
}
.iter()
.filter_map(|name| {
if name.ends_with(".img.fidx") {
- Some(tools::format::strip_server_file_extension(name))
+ Some(pbs_tools::format::strip_server_file_extension(name))
} else {
None
}
///! File-restore API running inside the restore VM
+use std::ffi::OsStr;
+use std::fs;
+use std::os::unix::ffi::OsStrExt;
+use std::path::{Path, PathBuf};
+
use anyhow::{bail, Error};
use futures::FutureExt;
use hyper::http::request::Parts;
use serde_json::Value;
use tokio::sync::Semaphore;
-use std::ffi::OsStr;
-use std::fs;
-use std::os::unix::ffi::OsStrExt;
-use std::path::{Path, PathBuf};
-
use proxmox::api::{
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment,
SubdirMap,
};
use proxmox::{identity, list_subdirs_api_method, sortable};
+use pbs_tools::fs::read_subdir;
+
use proxmox_backup::api2::types::*;
use proxmox_backup::backup::DirEntryAttribute;
use proxmox_backup::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
-use proxmox_backup::tools::{self, fs::read_subdir, zip::zip_directory};
+use proxmox_backup::tools::{self, zip::zip_directory};
use pxar::encoder::aio::TokioWriter;
// create mapping for virtio drives and .fidx files (via serial description)
// note: disks::DiskManager relies on udev, which we don't have
- for entry in proxmox_backup::tools::fs::scan_subdir(
+ for entry in pbs_tools::fs::scan_subdir(
libc::AT_FDCWD,
"/sys/block",
&BLOCKDEVICE_NAME_REGEX,
}
let mut parts = Vec::new();
- for entry in proxmox_backup::tools::fs::scan_subdir(
+ for entry in pbs_tools::fs::scan_subdir(
libc::AT_FDCWD,
sys_path,
&VIRTIO_PART_REGEX,
use proxmox_backup::{
tools::{
- self,
paperkey::{
PaperkeyFormat,
generate_paper_key,
let options = proxmox::api::cli::default_table_format_options()
.column(ColumnConfig::new("kdf"))
- .column(ColumnConfig::new("created").renderer(tools::format::render_epoch))
- .column(ColumnConfig::new("modified").renderer(tools::format::render_epoch))
+ .column(ColumnConfig::new("created").renderer(pbs_tools::format::render_epoch))
+ .column(ColumnConfig::new("modified").renderer(pbs_tools::format::render_epoch))
.column(ColumnConfig::new("fingerprint"))
.column(ColumnConfig::new("hint"));
use proxmox::tools::digest_to_hex;
+use pbs_tools::format::HumanByte;
+
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use crate::backup::*;
-use crate::tools::format::HumanByte;
use super::{H2Client, HttpClient};
let archive = if self.verbose {
archive_name.to_string()
} else {
- crate::tools::format::strip_server_file_extension(archive_name)
+ pbs_tools::format::strip_server_file_extension(archive_name)
};
if archive_name != CATALOG_NAME {
let speed: HumanByte =
where
F: FnMut(AcmeAccountName) -> ControlFlow<Result<(), Error>>,
{
- match crate::tools::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) {
+ match pbs_tools::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) {
Ok(files) => {
for file in files {
let file = file?;
Err(_) => return Vec::new(),
};
- data.keys().map(|fp| crate::tools::format::as_fingerprint(fp.bytes())).collect()
+ data.keys().map(|fp| pbs_tools::format::as_fingerprint(fp.bytes())).collect()
}
use proxmox::tools::fd::RawFdNum;
use proxmox::tools::vec;
+use pbs_tools::fs;
+
use crate::pxar::catalog::BackupCatalogWriter;
use crate::pxar::metadata::errno_is_unsupported;
use crate::pxar::Flags;
use crate::pxar::tools::assert_single_path_component;
-use crate::tools::{acl, fs, xattr, Fd};
+use crate::tools::{acl, xattr, Fd};
/// Pxar options for creating a pxar archive/stream
#[derive(Default, Clone)]
use proxmox::sys::error::SysError;
use proxmox::tools::fd::RawFdNum;
+use pbs_tools::fs;
+
use crate::pxar::tools::perms_from_metadata;
use crate::pxar::Flags;
-use crate::tools::{acl, fs, xattr};
+use crate::tools::{acl, xattr};
//
// utility functions
use proxmox::api::schema::parse_property_string;
use proxmox::try_block;
+use pbs_tools::format::HumanByte;
+
use crate::{
config::datastore::DataStoreConfig,
config::verify::VerificationJobConfig,
Notify,
DatastoreNotify,
},
- tools::format::HumanByte,
};
const GC_OK_TEMPLATE: &str = r###"
}
let output = if let Some((fingerprint, uuid)) = key_fingerprint {
- let fingerprint = crate::tools::format::as_fingerprint(fingerprint.bytes());
+ let fingerprint = pbs_tools::format::as_fingerprint(fingerprint.bytes());
run_sg_tape_cmd("encryption", &[
"--fingerprint", &fingerprint,
"--uuid", &uuid.to_string(),
use anyhow::{bail, Error};
use nix::dir::Dir;
+use pbs_tools::fs::lock_dir_noblock_shared;
+
use crate::{
- tools::fs::lock_dir_noblock_shared,
backup::{
DataStore,
BackupDir,
use anyhow::{bail, Error};
+use pbs_tools::fs::scan_subdir;
+
use crate::{
api2::types::{
DeviceKind,
OptionalDeviceIdentification,
TapeDeviceInfo,
},
- tools::fs::scan_subdir,
};
lazy_static::lazy_static!{
use anyhow::{bail, format_err, Error};
use endian_trait::Endian;
+use pbs_tools::fs::read_subdir;
+
use proxmox::tools::{
Uuid,
fs::{
};
use crate::{
- tools::fs::read_subdir,
backup::BackupDir,
tape::{
MediaId,
+++ /dev/null
-/// This ties two values T and U together, such that T does not move and cannot be used as long as
-/// there's an U. This essentially replaces the borrow checker's job for dependent values which
-/// need to be stored together in a struct {}, and is similar to what the 'rental' crate produces.
-pub struct Tied<T, U: ?Sized>(Option<Box<T>>, Option<Box<U>>);
-
-impl<T, U: ?Sized> Drop for Tied<T, U> {
- fn drop(&mut self) {
- // let's be explicit about order here!
- std::mem::drop(self.1.take());
- }
-}
-
-impl<T, U: ?Sized> Tied<T, U> {
- /// Takes an owner and a function producing the depending value. The owner will be inaccessible
- /// until the tied value is resolved. The dependent value is only accessible by reference.
- pub fn new<F>(owner: T, producer: F) -> Self
- where
- F: FnOnce(*mut T) -> Box<U>,
- {
- let mut owner = Box::new(owner);
- let dep = producer(&mut *owner);
- Tied(Some(owner), Some(dep))
- }
-
- pub fn into_boxed_inner(mut self) -> Box<T> {
- self.1 = None;
- self.0.take().unwrap()
- }
-
- pub fn into_inner(self) -> T {
- *self.into_boxed_inner()
- }
-}
-
-impl<T, U: ?Sized> AsRef<U> for Tied<T, U> {
- fn as_ref(&self) -> &U {
- self.1.as_ref().unwrap()
- }
-}
-
-impl<T, U: ?Sized> AsMut<U> for Tied<T, U> {
- fn as_mut(&mut self) -> &mut U {
- self.1.as_mut().unwrap()
- }
-}
-
-impl<T, U: ?Sized> std::ops::Deref for Tied<T, U> {
- type Target = U;
-
- fn deref(&self) -> &U {
- self.as_ref()
- }
-}
-
-impl<T, U: ?Sized> std::ops::DerefMut for Tied<T, U> {
- fn deref_mut(&mut self) -> &mut U {
- self.as_mut()
- }
-}
let mut map = HashMap::new();
- for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? {
+ for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? {
let item = item?;
let name = match item.file_name().to_str() {
Ok(name) => name,
let mut found_dm = false;
let mut found_partitions = false;
- for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? {
+ for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? {
let item = item?;
let name = match item.file_name().to_str() {
Ok(name) => name,
let mut result = HashMap::new();
- for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? {
+ for item in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? {
let item = item?;
let name = item.file_name().to_str().unwrap().to_string();
pub fn complete_disk_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut list = Vec::new();
- let dir = match crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) {
+ let dir = match pbs_tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) {
Ok(dir) => dir,
Err(_) => return list,
};
+++ /dev/null
-use anyhow::{Error};
-use serde_json::Value;
-
-pub fn strip_server_file_extension(name: &str) -> String {
-
- if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
- name[..name.len()-5].to_owned()
- } else {
- name.to_owned() // should not happen
- }
-}
-
-pub fn render_backup_file_list(files: &[String]) -> String {
- let mut files: Vec<String> = files.iter()
- .map(|v| strip_server_file_extension(&v))
- .collect();
-
- files.sort();
-
- super::join(&files, ' ')
-}
-
-pub fn render_epoch(value: &Value, _record: &Value) -> Result<String, Error> {
- if value.is_null() { return Ok(String::new()); }
- let text = match value.as_i64() {
- Some(epoch) => {
- if let Ok(epoch_string) = proxmox::tools::time::strftime_local("%c", epoch as i64) {
- epoch_string
- } else {
- epoch.to_string()
- }
- },
- None => {
- value.to_string()
- }
- };
- Ok(text)
-}
-
-pub fn render_task_status(value: &Value, record: &Value) -> Result<String, Error> {
- if record["endtime"].is_null() {
- Ok(value.as_str().unwrap_or("running").to_string())
- } else {
- Ok(value.as_str().unwrap_or("unknown").to_string())
- }
-}
-
-pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result<String, Error> {
- let value = value.as_bool().unwrap_or(true);
- Ok((if value { "1" } else { "0" }).to_string())
-}
-
-pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result<String, Error> {
- if value.is_null() { return Ok(String::new()); }
- let text = match value.as_u64() {
- Some(bytes) => {
- HumanByte::from(bytes).to_string()
- }
- None => {
- value.to_string()
- }
- };
- Ok(text)
-}
-
-pub struct HumanByte {
- b: usize,
-}
-impl std::fmt::Display for HumanByte {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- if self.b < 1024 {
- return write!(f, "{} B", self.b);
- }
- let kb: f64 = self.b as f64 / 1024.0;
- if kb < 1024.0 {
- return write!(f, "{:.2} KiB", kb);
- }
- let mb: f64 = kb / 1024.0;
- if mb < 1024.0 {
- return write!(f, "{:.2} MiB", mb);
- }
- let gb: f64 = mb / 1024.0;
- if gb < 1024.0 {
- return write!(f, "{:.2} GiB", gb);
- }
- let tb: f64 = gb / 1024.0;
- if tb < 1024.0 {
- return write!(f, "{:.2} TiB", tb);
- }
- let pb: f64 = tb / 1024.0;
- return write!(f, "{:.2} PiB", pb);
- }
-}
-impl From<usize> for HumanByte {
- fn from(v: usize) -> Self {
- HumanByte { b: v }
- }
-}
-impl From<u64> for HumanByte {
- fn from(v: u64) -> Self {
- HumanByte { b: v as usize }
- }
-}
-
-pub fn as_fingerprint(bytes: &[u8]) -> String {
- proxmox::tools::digest_to_hex(bytes)
- .as_bytes()
- .chunks(2)
- .map(|v| std::str::from_utf8(v).unwrap())
- .collect::<Vec<&str>>().join(":")
-}
-
-pub mod bytes_as_fingerprint {
- use serde::{Deserialize, Serializer, Deserializer};
-
- pub fn serialize<S>(
- bytes: &[u8; 32],
- serializer: S,
- ) -> Result<S::Ok, S::Error>
- where
- S: Serializer,
- {
- let s = crate::tools::format::as_fingerprint(bytes);
- serializer.serialize_str(&s)
- }
-
- pub fn deserialize<'de, D>(
- deserializer: D,
- ) -> Result<[u8; 32], D::Error>
- where
- D: Deserializer<'de>,
- {
- let mut s = String::deserialize(deserializer)?;
- s.retain(|c| c != ':');
- proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
- }
-}
-
-#[test]
-fn correct_byte_convert() {
- fn convert(b: usize) -> String {
- HumanByte::from(b).to_string()
- }
- assert_eq!(convert(1023), "1023 B");
- assert_eq!(convert(1<<10), "1.00 KiB");
- assert_eq!(convert(1<<20), "1.00 MiB");
- assert_eq!(convert((1<<30) + 103 * (1<<20)), "1.10 GiB");
- assert_eq!(convert((2<<50) + 500 * (1<<40)), "2.49 PiB");
-}
+++ /dev/null
-//! File system helper utilities.
-
-use std::borrow::{Borrow, BorrowMut};
-use std::ops::{Deref, DerefMut};
-use std::os::unix::io::{AsRawFd, RawFd};
-
-use anyhow::{format_err, Error};
-use nix::dir;
-use nix::dir::Dir;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-
-use regex::Regex;
-
-use proxmox::sys::error::SysError;
-
-
-use crate::tools::borrow::Tied;
-
-pub type DirLockGuard = Dir;
-
-/// This wraps nix::dir::Entry with the parent directory's file descriptor.
-pub struct ReadDirEntry {
- entry: dir::Entry,
- parent_fd: RawFd,
-}
-
-impl Into<dir::Entry> for ReadDirEntry {
- fn into(self) -> dir::Entry {
- self.entry
- }
-}
-
-impl Deref for ReadDirEntry {
- type Target = dir::Entry;
-
- fn deref(&self) -> &Self::Target {
- &self.entry
- }
-}
-
-impl DerefMut for ReadDirEntry {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.entry
- }
-}
-
-impl AsRef<dir::Entry> for ReadDirEntry {
- fn as_ref(&self) -> &dir::Entry {
- &self.entry
- }
-}
-
-impl AsMut<dir::Entry> for ReadDirEntry {
- fn as_mut(&mut self) -> &mut dir::Entry {
- &mut self.entry
- }
-}
-
-impl Borrow<dir::Entry> for ReadDirEntry {
- fn borrow(&self) -> &dir::Entry {
- &self.entry
- }
-}
-
-impl BorrowMut<dir::Entry> for ReadDirEntry {
- fn borrow_mut(&mut self) -> &mut dir::Entry {
- &mut self.entry
- }
-}
-
-impl ReadDirEntry {
- #[inline]
- pub fn parent_fd(&self) -> RawFd {
- self.parent_fd
- }
-
- pub unsafe fn file_name_utf8_unchecked(&self) -> &str {
- std::str::from_utf8_unchecked(self.file_name().to_bytes())
- }
-}
-
-// Since Tied<T, U> implements Deref to U, a Tied<Dir, Iterator> already implements Iterator.
-// This is simply a wrapper with a shorter type name mapping nix::Error to anyhow::Error.
-/// Wrapper over a pair of `nix::dir::Dir` and `nix::dir::Iter`, returned by `read_subdir()`.
-pub struct ReadDir {
- iter: Tied<Dir, dyn Iterator<Item = nix::Result<dir::Entry>> + Send>,
- dir_fd: RawFd,
-}
-
-impl Iterator for ReadDir {
- type Item = Result<ReadDirEntry, Error>;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.iter.next().map(|res| {
- res.map(|entry| ReadDirEntry { entry, parent_fd: self.dir_fd })
- .map_err(Error::from)
- })
- }
-}
-
-/// Create an iterator over sub directory entries.
-/// This uses `openat` on `dirfd`, so `path` can be relative to that or an absolute path.
-pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Result<ReadDir> {
- let dir = Dir::openat(dirfd, path, OFlag::O_RDONLY, Mode::empty())?;
- let fd = dir.as_raw_fd();
- let iter = Tied::new(dir, |dir| {
- Box::new(unsafe { (*dir).iter() })
- as Box<dyn Iterator<Item = nix::Result<dir::Entry>> + Send>
- });
- Ok(ReadDir { iter, dir_fd: fd })
-}
-
-/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
-/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
-pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
- dirfd: RawFd,
- path: &P,
- regex: &'a regex::Regex,
-) -> Result<impl Iterator<Item = Result<ReadDirEntry, Error>> + 'a, nix::Error> {
- Ok(read_subdir(dirfd, path)?.filter_file_name_regex(regex))
-}
-
-/// Helper trait to provide a combinators for directory entry iterators.
-pub trait FileIterOps<T, E>
-where
- Self: Sized + Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
- /// Filter by file type. This is more convenient than using the `filter` method alone as this
- /// also includes error handling and handling of files without a type (via an error).
- fn filter_file_type(self, ty: dir::Type) -> FileTypeFilter<Self, T, E> {
- FileTypeFilter { inner: self, ty }
- }
-
- /// Filter by file name. Note that file names which aren't valid utf-8 will be treated as if
- /// they do not match the pattern.
- fn filter_file_name_regex(self, regex: &Regex) -> FileNameRegexFilter<Self, T, E> {
- FileNameRegexFilter { inner: self, regex }
- }
-}
-
-impl<I, T, E> FileIterOps<T, E> for I
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
-}
-
-/// This filters files from its inner iterator by a file type. Files with no type produce an error.
-pub struct FileTypeFilter<I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
- inner: I,
- ty: nix::dir::Type,
-}
-
-impl<I, T, E> Iterator for FileTypeFilter<I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
- type Item = Result<T, Error>;
-
- fn next(&mut self) -> Option<Self::Item> {
- loop {
- let item = self.inner.next()?.map_err(|e| e.into());
- match item {
- Ok(ref entry) => match entry.borrow().file_type() {
- Some(ty) => {
- if ty == self.ty {
- return Some(item);
- } else {
- continue;
- }
- }
- None => return Some(Err(format_err!("unable to detect file type"))),
- },
- Err(_) => return Some(item),
- }
- }
- }
-}
-
-/// This filters files by name via a Regex. Files whose file name aren't valid utf-8 are skipped
-/// silently.
-pub struct FileNameRegexFilter<'a, I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
-{
- inner: I,
- regex: &'a Regex,
-}
-
-impl<I, T, E> Iterator for FileNameRegexFilter<'_, I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
-{
- type Item = Result<T, E>;
-
- fn next(&mut self) -> Option<Self::Item> {
- loop {
- let item = self.inner.next()?;
- match item {
- Ok(ref entry) => {
- if let Ok(name) = entry.borrow().file_name().to_str() {
- if self.regex.is_match(name) {
- return Some(item);
- }
- }
- // file did not match regex or isn't valid utf-8
- continue;
- },
- Err(_) => return Some(item),
- }
- }
- }
-}
-
-// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long)
-// read Linux file system attributes (see man chattr)
-nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long);
-nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long);
-
-// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
-// read FAT file system attributes
-nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32);
-nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32);
-
-// From /usr/include/linux/fs.h
-// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
-// #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
-nix::ioctl_read!(fs_ioc_fsgetxattr, b'X', 31, FSXAttr);
-nix::ioctl_write_ptr!(fs_ioc_fssetxattr, b'X', 32, FSXAttr);
-
-#[repr(C)]
-#[derive(Debug)]
-pub struct FSXAttr {
- pub fsx_xflags: u32,
- pub fsx_extsize: u32,
- pub fsx_nextents: u32,
- pub fsx_projid: u32,
- pub fsx_cowextsize: u32,
- pub fsx_pad: [u8; 8],
-}
-
-impl Default for FSXAttr {
- fn default() -> Self {
- FSXAttr {
- fsx_xflags: 0u32,
- fsx_extsize: 0u32,
- fsx_nextents: 0u32,
- fsx_projid: 0u32,
- fsx_cowextsize: 0u32,
- fsx_pad: [0u8; 8],
- }
- }
-}
-
-/// Attempt to acquire a shared flock on the given path, 'what' and
-/// 'would_block_message' are used for error formatting.
-pub fn lock_dir_noblock_shared(
- path: &std::path::Path,
- what: &str,
- would_block_msg: &str,
-) -> Result<DirLockGuard, Error> {
- do_lock_dir_noblock(path, what, would_block_msg, false)
-}
-
-/// Attempt to acquire an exclusive flock on the given path, 'what' and
-/// 'would_block_message' are used for error formatting.
-pub fn lock_dir_noblock(
- path: &std::path::Path,
- what: &str,
- would_block_msg: &str,
-) -> Result<DirLockGuard, Error> {
- do_lock_dir_noblock(path, what, would_block_msg, true)
-}
-
-fn do_lock_dir_noblock(
- path: &std::path::Path,
- what: &str,
- would_block_msg: &str,
- exclusive: bool,
-) -> Result<DirLockGuard, Error> {
- let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
- .map_err(|err| {
- format_err!("unable to open {} directory {:?} for locking - {}", what, path, err)
- })?;
-
- // acquire in non-blocking mode, no point in waiting here since other
- // backups could still take a very long time
- proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0)))
- .map_err(|err| {
- format_err!(
- "unable to acquire lock on {} directory {:?} - {}", what, path,
- if err.would_block() {
- String::from(would_block_msg)
- } else {
- err.to_string()
- }
- )
- })?;
-
- Ok(handle)
-}
use proxmox::tools::time;
use proxmox_fuse::{*, requests::FuseRequest};
use super::loopdev;
-use super::fs;
const RUN_DIR: &str = "/run/pbs-loopdev";
pub fn find_all_mappings() -> Result<impl Iterator<Item = (String, Option<String>)>, Error> {
// get map of all /dev/loop mappings belonging to us
let mut loopmap = HashMap::new();
- for ent in fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? {
+ for ent in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? {
if let Ok(ent) = ent {
let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy());
if let Ok(file) = get_backing_file(&loopdev) {
}
}
- Ok(fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?
+ Ok(pbs_tools::fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?
.filter_map(move |ent| {
match ent {
Ok(ent) => {
//!
//! This is a collection of small and useful tools.
use std::any::Any;
-use std::borrow::Borrow;
use std::collections::HashMap;
use std::hash::BuildHasher;
use std::fs::File;
pub mod acl;
pub mod apt;
pub mod async_io;
-pub mod borrow;
pub mod cert;
pub mod compression;
pub mod config;
pub mod cpio;
pub mod daemon;
pub mod disks;
-pub mod format;
-pub mod fs;
pub mod fuse_loop;
mod memcom;
result
}
-/// Scan directory for matching file names.
-///
-/// Scan through all directory entries and call `callback()` function
-/// if the entry name matches the regular expression. This function
-/// used unix `openat()`, so you can pass absolute or relative file
-/// names. This function simply skips non-UTF8 encoded names.
-pub fn scandir<P, F>(
- dirfd: RawFd,
- path: &P,
- regex: ®ex::Regex,
- mut callback: F,
-) -> Result<(), Error>
-where
- F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>,
- P: ?Sized + nix::NixPath,
-{
- for entry in self::fs::scan_subdir(dirfd, path, regex)? {
- let entry = entry?;
- let file_type = match entry.file_type() {
- Some(file_type) => file_type,
- None => bail!("unable to detect file type"),
- };
-
- callback(
- entry.parent_fd(),
- unsafe { entry.file_name_utf8_unchecked() },
- file_type,
- )?;
- }
- Ok(())
-}
-
/// Shortcut for md5 sums.
pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
hash(MessageDigest::md5(), data).map_err(Error::from)
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
}
-pub fn join<S: Borrow<str>>(data: &[S], sep: char) -> String {
- let mut list = String::new();
-
- for item in data {
- if !list.is_empty() {
- list.push(sep);
- }
- list.push_str(item.borrow());
- }
-
- list
-}
-
/// Detect modified configuration files
///
/// This function fails with a reasonable error message if checksums do not match.