//!
//! This is a collection of small and useful tools.
use std::any::Any;
+use std::borrow::Borrow;
use std::collections::HashMap;
use std::hash::BuildHasher;
-use std::fs::{File, OpenOptions};
-use std::io::ErrorKind;
-use std::io::Read;
-use std::os::unix::io::{AsRawFd, RawFd};
+use std::fs::File;
+use std::io::{self, BufRead, Read, Seek, SeekFrom};
+use std::os::unix::io::RawFd;
use std::path::Path;
-use std::time::Duration;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use openssl::hash::{hash, DigestBytes, MessageDigest};
-use percent_encoding::AsciiSet;
-
-use proxmox::tools::vec;
+use percent_encoding::{utf8_percent_encode, AsciiSet};
pub use proxmox::tools::fd::Fd;
pub mod acl;
+pub mod apt;
pub mod async_io;
pub mod borrow;
+pub mod cert;
pub mod daemon;
pub mod disks;
-pub mod fs;
pub mod format;
+pub mod fs;
+pub mod fuse_loop;
+pub mod http;
+pub mod json;
+pub mod logrotate;
+pub mod loopdev;
pub mod lru_cache;
+pub mod nom;
pub mod runtime;
-pub mod ticket;
-pub mod timer;
+pub mod serde_filter;
+pub mod socket;
+pub mod statistics;
+pub mod subscription;
pub mod systemd;
+pub mod ticket;
+pub mod xattr;
+pub mod zip;
+pub mod sgutils2;
+pub mod paperkey;
+
+pub mod parallel_handler;
+pub use parallel_handler::ParallelHandler;
mod wrapped_reader_stream;
-pub use wrapped_reader_stream::*;
+pub use wrapped_reader_stream::{AsyncReaderStream, StdChannelStream, WrappedReaderStream};
+
+mod async_channel_writer;
+pub use async_channel_writer::AsyncChannelWriter;
mod std_channel_writer;
-pub use std_channel_writer::*;
+pub use std_channel_writer::StdChannelWriter;
-pub mod xattr;
+mod tokio_writer_adapter;
+pub use tokio_writer_adapter::TokioWriterAdapter;
mod process_locker;
-pub use process_locker::*;
+pub use process_locker::{ProcessLocker, ProcessLockExclusiveGuard, ProcessLockSharedGuard};
mod file_logger;
-pub use file_logger::*;
+pub use file_logger::{FileLogger, FileLogOptions};
mod broadcast_future;
-pub use broadcast_future::*;
+pub use broadcast_future::{BroadcastData, BroadcastFuture};
/// The `BufferedRead` trait provides a single function
/// `buffered_read`. It returns a reference to an internal buffer. The
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
}
-/// Directly map a type into a binary buffer. This is mostly useful
-/// for reading structured data from a byte stream (file). You need to
-/// make sure that the buffer location does not change, so please
-/// avoid vec resize while you use such map.
-///
-/// This function panics if the buffer is not large enough.
-pub fn map_struct<T>(buffer: &[u8]) -> Result<&T, Error> {
- if buffer.len() < ::std::mem::size_of::<T>() {
- bail!("unable to map struct - buffer too small");
- }
- Ok(unsafe { &*(buffer.as_ptr() as *const T) })
-}
-
-/// Directly map a type into a mutable binary buffer. This is mostly
-/// useful for writing structured data into a byte stream (file). You
-/// need to make sure that the buffer location does not change, so
-/// please avoid vec resize while you use such map.
-///
-/// This function panics if the buffer is not large enough.
-pub fn map_struct_mut<T>(buffer: &mut [u8]) -> Result<&mut T, Error> {
- if buffer.len() < ::std::mem::size_of::<T>() {
- bail!("unable to map struct - buffer too small");
- }
- Ok(unsafe { &mut *(buffer.as_ptr() as *mut T) })
-}
-
-/// Create a file lock using fntl. This function allows you to specify
-/// a timeout if you want to avoid infinite blocking.
-pub fn lock_file<F: AsRawFd>(
- file: &mut F,
- exclusive: bool,
- timeout: Option<Duration>,
-) -> Result<(), Error> {
- let lockarg = if exclusive {
- nix::fcntl::FlockArg::LockExclusive
- } else {
- nix::fcntl::FlockArg::LockShared
- };
-
- let timeout = match timeout {
- None => {
- nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
- return Ok(());
- }
- Some(t) => t,
- };
-
- // unblock the timeout signal temporarily
- let _sigblock_guard = timer::unblock_timeout_signal();
-
- // setup a timeout timer
- let mut timer = timer::Timer::create(
- timer::Clock::Realtime,
- timer::TimerEvent::ThisThreadSignal(timer::SIGTIMEOUT),
- )?;
-
- timer.arm(
- timer::TimerSpec::new()
- .value(Some(timeout))
- .interval(Some(Duration::from_millis(10))),
- )?;
-
- nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
- Ok(())
-}
-
-/// Open or create a lock file (append mode). Then try to
-/// aquire a lock using `lock_file()`.
-pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
- let path = path.as_ref();
- let mut file = match OpenOptions::new().create(true).append(true).open(path) {
- Ok(file) => file,
- Err(err) => bail!("Unable to open lock {:?} - {}", path, err),
- };
- match lock_file(&mut file, true, Some(timeout)) {
- Ok(_) => Ok(file),
- Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
- }
-}
-
-/// Split a file into equal sized chunks. The last chunk may be
-/// smaller. Note: We cannot implement an `Iterator`, because iterators
-/// cannot return a borrowed buffer ref (we want zero-copy)
-pub fn file_chunker<C, R>(mut file: R, chunk_size: usize, mut chunk_cb: C) -> Result<(), Error>
-where
- C: FnMut(usize, &[u8]) -> Result<bool, Error>,
- R: Read,
-{
- const READ_BUFFER_SIZE: usize = 4 * 1024 * 1024; // 4M
-
- if chunk_size > READ_BUFFER_SIZE {
- bail!("chunk size too large!");
- }
-
- let mut buf = vec::undefined(READ_BUFFER_SIZE);
-
- let mut pos = 0;
- let mut file_pos = 0;
- loop {
- let mut eof = false;
- let mut tmp = &mut buf[..];
- // try to read large portions, at least chunk_size
- while pos < chunk_size {
- match file.read(tmp) {
- Ok(0) => {
- eof = true;
- break;
- }
- Ok(n) => {
- pos += n;
- if pos > chunk_size {
- break;
- }
- tmp = &mut tmp[n..];
- }
- Err(ref e) if e.kind() == ErrorKind::Interrupted => { /* try again */ }
- Err(e) => bail!("read chunk failed - {}", e.to_string()),
- }
- }
- let mut start = 0;
- while start + chunk_size <= pos {
- if !(chunk_cb)(file_pos, &buf[start..start + chunk_size])? {
- break;
- }
- file_pos += chunk_size;
- start += chunk_size;
- }
- if eof {
- if start < pos {
- (chunk_cb)(file_pos, &buf[start..pos])?;
- //file_pos += pos - start;
- }
- break;
- } else {
- let rest = pos - start;
- if rest > 0 {
- let ptr = buf.as_mut_ptr();
- unsafe {
- std::ptr::copy_nonoverlapping(ptr.add(start), ptr, rest);
- }
- pos = rest;
- } else {
- pos = 0;
- }
- }
- }
-
- Ok(())
-}
-
pub fn json_object_to_query(data: Value) -> Result<String, Error> {
let mut query = url::form_urlencoded::Serializer::new(String::new());
}
}
-pub fn required_integer_param<'a>(param: &'a Value, name: &str) -> Result<i64, Error> {
+pub fn required_integer_param(param: &Value, name: &str) -> Result<i64, Error> {
match param[name].as_i64() {
Some(s) => Ok(s),
None => bail!("missing parameter '{}'", name),
}
}
-pub fn required_integer_property<'a>(param: &'a Value, name: &str) -> Result<i64, Error> {
+pub fn required_integer_property(param: &Value, name: &str) -> Result<i64, Error> {
match param[name].as_i64() {
Some(s) => Ok(s),
None => bail!("missing property '{}'", name),
}
}
-pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result<Vec<Value>, Error> {
+pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> {
match param[name].as_array() {
- Some(s) => Ok(s.to_vec()),
+ Some(s) => Ok(&s),
None => bail!("missing parameter '{}'", name),
}
}
-pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<Vec<Value>, Error> {
+pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> {
match param[name].as_array() {
- Some(s) => Ok(s.to_vec()),
+ Some(s) => Ok(&s),
None => bail!("missing property '{}'", name),
}
}
-pub fn complete_file_name<S: BuildHasher>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String> {
+pub fn complete_file_name<S>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String>
+where
+ S: BuildHasher,
+{
let mut result = vec![];
use nix::fcntl::AtFlags;
pub fn get_hardware_address() -> Result<String, Error> {
static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
- let contents = proxmox::tools::fs::file_get_contents(FILENAME)?;
- let digest = md5sum(&contents)?;
+ let contents = proxmox::tools::fs::file_get_contents(FILENAME)
+ .map_err(|e| format_err!("Error getting host key - {}", e))?;
+ let digest = md5sum(&contents)
+ .map_err(|e| format_err!("Error digesting host key - {}", e))?;
- Ok(proxmox::tools::bin_to_hex(&digest))
+ Ok(proxmox::tools::bin_to_hex(&digest).to_uppercase())
}
pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
Ok(())
}
-/// Extract authentication cookie from cookie header.
+/// Extract a specific cookie from cookie header.
/// We assume cookie_name is already url encoded.
-pub fn extract_auth_cookie(cookie: &str, cookie_name: &str) -> Option<String> {
+pub fn extract_cookie(cookie: &str, cookie_name: &str) -> Option<String> {
for pair in cookie.split(';') {
let (name, value) = match pair.find('=') {
Some(i) => (pair[..i].trim(), pair[(i + 1)..].trim()),
None
}
-pub fn join(data: &Vec<String>, sep: char) -> String {
+/// percent encode a url component
+pub fn percent_encode_component(comp: &str) -> String {
+ utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
+}
+
+pub fn join<S: Borrow<str>>(data: &[S], sep: char) -> String {
let mut list = String::new();
for item in data {
if !list.is_empty() {
list.push(sep);
}
- list.push_str(item);
+ list.push_str(item.borrow());
}
list
/// Detect modified configuration files
///
-/// This function fails with a resonable error message if checksums do not match.
+/// This function fails with a reasonable error message if checksums do not match.
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
if digest1 != digest2 {
- bail!("detected modified configuration - file changed by other user? Try again.");
+ bail!("detected modified configuration - file changed by other user? Try again.");
}
Ok(())
}
Ok((path, components))
}
+/// Helper to check result from std::process::Command output
+///
+/// The exit_code_check() function should return true if the exit code
+/// is considered successful.
+pub fn command_output(
+ output: std::process::Output,
+ exit_code_check: Option<fn(i32) -> bool>,
+) -> Result<Vec<u8>, Error> {
+
+ if !output.status.success() {
+ match output.status.code() {
+ Some(code) => {
+ let is_ok = match exit_code_check {
+ Some(check_fn) => check_fn(code),
+ None => code == 0,
+ };
+ if !is_ok {
+ let msg = String::from_utf8(output.stderr)
+ .map(|m| if m.is_empty() { String::from("no error message") } else { m })
+ .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
+
+ bail!("status code: {} - {}", code, msg);
+ }
+ }
+ None => bail!("terminated by signal"),
+ }
+ }
+
+ Ok(output.stdout)
+}
+
+/// Helper to check result from std::process::Command output, returns String.
+///
+/// The exit_code_check() function should return true if the exit code
+/// is considered successful.
+pub fn command_output_as_string(
+ output: std::process::Output,
+ exit_code_check: Option<fn(i32) -> bool>,
+) -> Result<String, Error> {
+ let output = command_output(output, exit_code_check)?;
+ let output = String::from_utf8(output)?;
+ Ok(output)
+}
+
+pub fn run_command(
+ mut command: std::process::Command,
+ exit_code_check: Option<fn(i32) -> bool>,
+) -> Result<String, Error> {
+
+ let output = command.output()
+ .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
+
+ let output = command_output_as_string(output, exit_code_check)
+ .map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
+
+ Ok(output)
+}
+
pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
let mut flags = FdFlag::from_bits(fcntl(fd, F_GETFD)?)
Ok(())
}
-// wrap nix::unistd::pipe2 + O_CLOEXEC into something returning guarded file descriptors
+/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file
+/// descriptors.
pub fn pipe() -> Result<(Fd, Fd), Error> {
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
Ok((Fd(pin), Fd(pout)))
}
+/// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file
+/// descriptors.
+pub fn socketpair() -> Result<(Fd, Fd), Error> {
+ use nix::sys::socket;
+ let (pa, pb) = socket::socketpair(
+ socket::AddressFamily::Unix,
+ socket::SockType::Stream,
+ None,
+ socket::SockFlag::SOCK_CLOEXEC,
+ )?;
+ Ok((Fd(pa), Fd(pb)))
+}
+
+
/// An easy way to convert types to Any
///
/// Mostly useful to downcast trait objects (see RpcEnvironment).
.add(b'?')
.add(b'{')
.add(b'}');
+
+/// Get an iterator over lines of a file, skipping empty lines and comments (lines starting with a
+/// `#`).
+pub fn file_get_non_comment_lines<P: AsRef<Path>>(
+ path: P,
+) -> Result<impl Iterator<Item = io::Result<String>>, Error> {
+ let path = path.as_ref();
+
+ Ok(io::BufReader::new(
+ File::open(path).map_err(|err| format_err!("error opening {:?}: {}", path, err))?,
+ )
+ .lines()
+ .filter_map(|line| match line {
+ Ok(line) => {
+ let line = line.trim();
+ if line.is_empty() || line.starts_with('#') {
+ None
+ } else {
+ Some(Ok(line.to_string()))
+ }
+ }
+ Err(err) => Some(Err(err)),
+ }))
+}
+
+pub fn setup_safe_path_env() {
+ std::env::set_var("PATH", "/sbin:/bin:/usr/sbin:/usr/bin");
+ // Make %ENV safer - as suggested by https://perldoc.perl.org/perlsec.html
+ for name in &["IFS", "CDPATH", "ENV", "BASH_ENV"] {
+ std::env::remove_var(name);
+ }
+}
+
+pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
+ let line = match line.iter().position(|&b| !b.is_ascii_whitespace()) {
+ Some(n) => &line[n..],
+ None => return &[],
+ };
+ match line.iter().rev().position(|&b| !b.is_ascii_whitespace()) {
+ Some(n) => &line[..(line.len() - n)],
+ None => &[],
+ }
+}
+
+/// Seeks to start of file and computes the SHA256 hash
+pub fn compute_file_csum(file: &mut File) -> Result<([u8; 32], u64), Error> {
+
+ file.seek(SeekFrom::Start(0))?;
+
+ let mut hasher = openssl::sha::Sha256::new();
+ let mut buffer = proxmox::tools::vec::undefined(256*1024);
+ let mut size: u64 = 0;
+
+ loop {
+ let count = match file.read(&mut buffer) {
+ Ok(0) => break,
+ Ok(count) => count,
+ Err(ref err) if err.kind() == std::io::ErrorKind::Interrupted => {
+ continue;
+ }
+ Err(err) => return Err(err.into()),
+ };
+ size += count as u64;
+ hasher.update(&buffer[..count]);
+ }
+
+ let csum = hasher.finish();
+
+ Ok((csum, size))
+}
+
+/// Create the base run-directory.
+///
+/// This exists to fixate the permissions for the run *base* directory while allowing intermediate
+/// directories after it to have different permissions.
+pub fn create_run_dir() -> Result<(), Error> {
+ let _: bool = proxmox::tools::fs::create_path(PROXMOX_BACKUP_RUN_DIR_M!(), None, None)?;
+ Ok(())
+}