pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
-proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
-proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
+proxmox-http = { version = "0.6", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = "1"
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-tfa = { version = "1.3", features = [ "api", "api-types" ] }
proxmox-time = "1"
proxmox-uuid = "1"
-proxmox-shared-memory = "0.1.1"
-proxmox-sys = "0.1.2"
+proxmox-serde = "0.1"
+proxmox-shared-memory = "0.2"
+proxmox-sys = { version = "0.2", features = [ "sortable-macro" ] }
+
proxmox-acme-rs = "0.3"
proxmox-apt = "0.8.0"
-proxmox-async = "0.2"
+proxmox-async = "0.3"
proxmox-openid = "0.9.0"
pbs-api-types = { path = "pbs-api-types" }
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
- librust-proxmox-0.15+default-dev (>= 0.15.3-~~),
- librust-proxmox-0.15+sortable-macro-dev (>= 0.15.3-~~),
- librust-proxmox-0.15+tokio-dev (>= 0.15.3-~~),
librust-proxmox-acme-rs-0.3+default-dev,
librust-proxmox-apt-0.8+default-dev,
- librust-proxmox-async-0.2+default-dev,
+ librust-proxmox-async-0.3+default-dev,
librust-proxmox-borrow-1+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
- librust-proxmox-http-0.5+client-dev (>= 0.5.4-~~),
- librust-proxmox-http-0.5+default-dev (>= 0.5.4-~~),
- librust-proxmox-http-0.5+http-helpers-dev (>= 0.5.4-~~),
- librust-proxmox-http-0.5+websocket-dev (>= 0.5.4-~~),
+ librust-proxmox-http-0.6+client-dev,
+ librust-proxmox-http-0.6+default-dev,
+ librust-proxmox-http-0.6+http-helpers-dev,
+ librust-proxmox-http-0.6+websocket-dev,
librust-proxmox-io-1+default-dev,
librust-proxmox-io-1+tokio-dev,
librust-proxmox-lang-1+default-dev,
librust-proxmox-schema-1+default-dev (>= 1.0.1-~~),
librust-proxmox-schema-1+upid-api-impl-dev (>= 1.0.1-~~),
librust-proxmox-section-config-1+default-dev,
- librust-proxmox-shared-memory-0.1+default-dev (>= 0.1.1-~~),
- librust-proxmox-sys-0.1+default-dev (>= 0.1.2-~~),
+ librust-proxmox-shared-memory-0.2+default-dev,
+ librust-proxmox-sys-0.2+default-dev,
+ librust-proxmox-sys-0.2+sortable-macro-dev,
librust-proxmox-tfa-1+api-dev (>= 1.3-~~),
librust-proxmox-tfa-1+api-types-dev (>= 1.3-~~),
librust-proxmox-tfa-1+default-dev (>= 1.3-~~),
fn main() -> Result<(), Error> {
- let input = proxmox::sys::linux::random_data(1024*1024)?;
+ let input = proxmox_sys::linux::random_data(1024*1024)?;
rate_test("crc32", &|| {
let mut crchasher = crc32fast::Hasher::new();
input.len()
});
- let key = proxmox::sys::linux::random_data(32)?;
+ let key = proxmox_sys::linux::random_data(32)?;
- let iv = proxmox::sys::linux::random_data(16)?;
+ let iv = proxmox_sys::linux::random_data(16)?;
let cipher = openssl::symm::Cipher::aes_256_gcm();
regex = "1.2"
serde = { version = "1.0", features = ["derive"] }
-proxmox = "0.15.3"
proxmox-lang = "1.0.0"
proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] }
+proxmox-serde = "0.1"
proxmox-time = "1.1"
proxmox-uuid = { version = "1.0.0", features = [ "serde" ] }
+
+proxmox-sys = "0.2" # only needed foör nodename()??
\ No newline at end of file
--- /dev/null
+//! Predefined Regular Expressions
+//!
+//! This is a collection of useful regular expressions
+
+use lazy_static::lazy_static;
+use regex::Regex;
+
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") }
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") }
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) }
+
+/// Returns the regular expression string to match IPv4 addresses
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) }
+
+/// Returns the regular expression string to match IPv6 addresses
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPV6RE { () => (concat!(r"(?:",
+ r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|",
+ r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|",
+ r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|",
+ r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|",
+ r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|",
+ r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|",
+ r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|",
+ r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|",
+ r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))"))
+}
+
+/// Returns the regular expression string to match IP addresses (v4 or v6)
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) }
+
+/// Regular expression string to match IP addresses where IPv6 addresses require brackets around
+/// them, while for IPv4 they are forbidden.
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! IPRE_BRACKET { () => (
+ concat!(r"(?:",
+ IPV4RE!(),
+ r"|\[(?:",
+ IPV6RE!(),
+ r")\]",
+ r")"))
+}
+
+lazy_static! {
+ pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap();
+ pub static ref IP_BRACKET_REGEX: Regex =
+ Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap();
+ pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap();
+ pub static ref SYSTEMD_DATETIME_REGEX: Regex =
+ Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap();
+}
+
+#[test]
+fn test_regexes() {
+ assert!(IP_REGEX.is_match("127.0.0.1"));
+ assert!(IP_REGEX.is_match("::1"));
+ assert!(IP_REGEX.is_match("2014:b3a::27"));
+ assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1"));
+ assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF"));
+
+ assert!(IP_BRACKET_REGEX.is_match("127.0.0.1"));
+ assert!(IP_BRACKET_REGEX.is_match("[::1]"));
+ assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]"));
+ assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]"));
+ assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]"));
+}
fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string();
tmp.retain(|c| c != ':');
- let bytes = proxmox::tools::hex_to_digest(&tmp)?;
+ let mut bytes = [0u8; 32];
+ hex::decode_to_slice(&tmp, &mut bytes)?;
Ok(Fingerprint::new(bytes))
}
}
}
}
-proxmox::forward_deserialize_to_from_str!(HumanByte);
-proxmox::forward_serialize_to_display!(HumanByte);
+proxmox_serde::forward_deserialize_to_from_str!(HumanByte);
+proxmox_serde::forward_serialize_to_display!(HumanByte);
#[test]
fn test_human_byte_parser() -> Result<(), Error> {
}
}
-proxmox::forward_deserialize_to_from_str!(GroupFilter);
-proxmox::forward_serialize_to_display!(GroupFilter);
+proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
+proxmox_serde::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ())
use serde::{Deserialize, Serialize};
use anyhow::bail;
+pub mod common_regex;
+
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType,
};
-use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
use proxmox_time::parse_daily_duration;
#[rustfmt::skip]
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
.format(&ApiStringFormat::VerifyFn(|node| {
- if node == "localhost" || node == proxmox::tools::nodename() {
+ if node == "localhost" || node == proxmox_sys::nodename() {
Ok(())
} else {
bail!("no such node '{}'", node);
pub name: String,
// Note: The stored password is base64 encoded
#[serde(skip_serializing_if="String::is_empty")]
- #[serde(with = "proxmox::tools::serde::string_as_base64")]
+ #[serde(with = "proxmox_serde::string_as_base64")]
pub password: String,
#[serde(flatten)]
pub config: RemoteConfig,
Vault(String),
}
-proxmox::forward_deserialize_to_from_str!(MediaLocation);
-proxmox::forward_serialize_to_display!(MediaLocation);
+proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
+proxmox_serde::forward_serialize_to_display!(MediaLocation);
impl proxmox_schema::ApiType for MediaLocation {
const API_SCHEMA: Schema = StringSchema::new(
assert_eq!(auth_id.to_string(), "test@pam!bar".to_string());
}
-proxmox::forward_deserialize_to_from_str!(Userid);
-proxmox::forward_serialize_to_display!(Userid);
+proxmox_serde::forward_deserialize_to_from_str!(Userid);
+proxmox_serde::forward_serialize_to_display!(Userid);
-proxmox::forward_deserialize_to_from_str!(Authid);
-proxmox::forward_serialize_to_display!(Authid);
+proxmox_serde::forward_deserialize_to_from_str!(Authid);
+proxmox_serde::forward_serialize_to_display!(Authid);
bitflags = "1.2.1"
bytes = "1.0"
futures = "0.3"
+hex = "0.4.3"
h2 = { version = "0.3", features = [ "stream" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
xdg = "2.2"
pathpatterns = "0.1.2"
-proxmox = "0.15.3"
-proxmox-async = "0.2"
+
+proxmox-async = "0.3"
proxmox-fuse = "0.1.1"
-proxmox-http = { version = "0.5.4", features = [ "client", "http-helpers", "websocket" ] }
+proxmox-http = { version = "0.6", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = "1"
proxmox-time = "1"
+proxmox-sys = "0.2"
+
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-api-types = { path = "../pbs-api-types" }
use futures::future::AbortHandle;
use serde_json::{json, Value};
-use proxmox::tools::digest_to_hex;
-
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::sha::sha256;
use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, BackupManifest};
output: W,
) -> Result<(), Error> {
let path = "chunk";
- let param = json!({ "digest": digest_to_hex(digest) });
+ let param = json!({ "digest": hex::encode(digest) });
self.h2.download(path, Some(param), output).await
}
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
-use proxmox::tools::digest_to_hex;
-
use pbs_api_types::HumanByte;
use pbs_tools::crypt_config::CryptConfig;
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
"wid": wid ,
"chunk-count": upload_stats.chunk_count,
"size": upload_stats.size,
- "csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
+ "csum": hex::encode(&upload_stats.csum),
});
let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats {
let mut digest_list = vec![];
let mut offset_list = vec![];
for (offset, digest) in chunk_list {
- digest_list.push(digest_to_hex(&digest));
+ digest_list.push(hex::encode(&digest));
offset_list.push(offset);
}
if verbose { println!("append chunks list len ({})", digest_list.len()); }
if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
let offset = chunk_info.offset;
let digest = chunk_info.digest;
- let digest_str = digest_to_hex(&digest);
+ let digest_str = hex::encode(&digest);
/* too verbose, needs finer verbosity setting granularity
if verbose {
use nix::sys::stat::Mode;
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
use proxmox_schema::api;
use pxar::{EntryKind, Metadata};
use percent_encoding::percent_encode;
use xdg::BaseDirectories;
-use proxmox::{
- sys::linux::tty,
- tools::fs::{file_get_json, replace_file, CreateOptions},
-};
+use proxmox_sys::linux::tty;
+use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions};
use proxmox_router::HttpError;
use proxmox_http::client::{HttpsConnector, RateLimiter};
Ok(fp) => fp,
Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
};
- let fp_string = proxmox::tools::digest_to_hex(&fp);
+ let fp_string = hex::encode(&fp);
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
.collect::<Vec<&str>>().join(":");
use pxar::Metadata;
use pxar::encoder::{SeqWrite, LinkOffset};
-use proxmox::sys::error::SysError;
-use proxmox::tools::fd::RawFdNum;
-use proxmox::tools::fd::Fd;
+use proxmox_sys::error::SysError;
+use proxmox_sys::fd::RawFdNum;
+use proxmox_sys::fd::Fd;
+use proxmox_sys::fs::{self, acl, xattr};
use proxmox_io::vec;
use proxmox_lang::c_str;
use pbs_datastore::catalog::BackupCatalogWriter;
-use pbs_tools::{acl, fs, xattr};
use pbs_tools::str::strip_ascii_whitespace;
use crate::pxar::metadata::errno_is_unsupported;
#[rustfmt::skip]
pub fn is_virtual_file_system(magic: i64) -> bool {
- use proxmox::sys::linux::magic::*;
+ use proxmox_sys::linux::magic::*;
matches!(magic, BINFMTFS_MAGIC |
CGROUP2_SUPER_MAGIC |
}
fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> {
- use proxmox::sys::linux::magic::*;
+ use proxmox_sys::linux::magic::*;
if fs_magic != MSDOS_SUPER_MAGIC && fs_magic != FUSE_SUPER_MAGIC {
return Ok(());
return Ok(());
}
- use proxmox::sys::linux::magic::*;
+ use proxmox_sys::linux::magic::*;
match magic {
EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (),
use nix::fcntl::OFlag;
use nix::sys::stat::{mkdirat, Mode};
-use proxmox::sys::error::SysError;
-use proxmox::tools::fd::BorrowedFd;
+use proxmox_sys::error::SysError;
+use proxmox_sys::fd::BorrowedFd;
use pxar::Metadata;
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
use pxar::format::Device;
use pxar::{Entry, EntryKind, Metadata};
-use proxmox::c_result;
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::c_result;
+use proxmox_sys::fs::{create_path, CreateOptions};
use proxmox_io::{sparse_copy, sparse_copy_async};
use proxmox_async::zip::{ZipEncoder, ZipEntry};
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
pub fn from_magic(magic: i64) -> Flags {
- use proxmox::sys::linux::magic::*;
+ use proxmox_sys::linux::magic::*;
match magic {
MSDOS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
use proxmox_fuse::requests::{self, FuseRequest};
use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
-
-use pbs_tools::xattr;
+use proxmox_sys::fs::xattr;
/// We mark inodes for regular files this way so we know how to access them.
const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
use pxar::Metadata;
-use proxmox::c_result;
-use proxmox::sys::error::SysError;
-use proxmox::tools::fd::RawFdNum;
-
-use pbs_tools::{acl, fs, xattr};
+use proxmox_sys::c_result;
+use proxmox_sys::error::SysError;
+use proxmox_sys::fd::RawFdNum;
+use proxmox_sys::fs::{self, acl, xattr};
use crate::pxar::tools::perms_from_metadata;
use crate::pxar::Flags;
path_info: &Path,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
- let fd = proxmox::tools::fd::Fd::openat(
+ let fd = proxmox_sys::fd::Fd::openat(
&unsafe { RawFdNum::from_raw_fd(parent) },
file_name,
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::sys::linux::tty;
-use proxmox::tools::fs::file_get_contents;
+use proxmox_sys::linux::tty;
+use proxmox_sys::fs::file_get_contents;
use proxmox_schema::*;
use pbs_api_types::CryptMode;
// safe w.r.t. concurrency
fn test_crypto_parameters_handling() -> Result<(), Error> {
use serde_json::json;
- use proxmox::tools::fs::{replace_file, CreateOptions};
+ use proxmox_sys::fs::{replace_file, CreateOptions};
let some_key = vec![1;1];
let default_key = vec![2;1];
use proxmox_schema::*;
use proxmox_router::cli::{complete_file_name, shellword_split};
-use proxmox::tools::fs::file_get_json;
+use proxmox_sys::fs::file_get_json;
use pbs_api_types::{BACKUP_REPO_URL, Authid, RateLimitConfig, UserWithTokens};
use pbs_datastore::BackupDir;
let args = shellword_split(command)?;
let mut command = Command::new(&args[0]);
command.args(&args[1..]);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
return Ok(Some(firstline(output)));
}
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
-proxmox = "0.15.3"
proxmox-lang = "1"
proxmox-router = { version = "1.1", default-features = false }
proxmox-schema = "1"
proxmox-section-config = "1"
proxmox-time = "1"
-proxmox-shared-memory = "0.1.1"
-proxmox-sys = "0.1.2"
+proxmox-serde = "0.1"
+proxmox-shared-memory = "0.2"
+proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
use once_cell::sync::OnceCell;
use nix::sys::stat::Mode;
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
// openssl::sha::sha256(b"Proxmox Backup ConfigVersionCache v1.0")[0..8];
pub const PROXMOX_BACKUP_CONFIG_VERSION_CACHE_MAGIC_1_0: [u8; 8] = [25, 198, 168, 230, 154, 132, 143, 131];
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(DOMAINS_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(DOMAINS_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
use anyhow::{bail, format_err, Context, Error};
use serde::{Deserialize, Serialize};
-use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
+use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_lang::try_block;
use pbs_api_types::{Kdf, KeyInfo, Fingerprint};
n: u64,
r: u64,
p: u64,
- #[serde(with = "proxmox::tools::serde::bytes_as_base64")]
+ #[serde(with = "proxmox_serde::bytes_as_base64")]
salt: Vec<u8>,
},
PBKDF2 {
iter: usize,
- #[serde(with = "proxmox::tools::serde::bytes_as_base64")]
+ #[serde(with = "proxmox_serde::bytes_as_base64")]
salt: Vec<u8>,
},
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct KeyConfig {
pub kdf: Option<KeyDerivationConfig>,
- #[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
+ #[serde(with = "proxmox_serde::epoch_as_rfc3339")]
pub created: i64,
- #[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
+ #[serde(with = "proxmox_serde::epoch_as_rfc3339")]
pub modified: i64,
- #[serde(with = "proxmox::tools::serde::bytes_as_base64")]
+ #[serde(with = "proxmox_serde::bytes_as_base64")]
pub data: Vec<u8>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
/// Creates a new key using random data, protected by passphrase.
pub fn new(passphrase: &[u8], kdf: Kdf) -> Result<([u8;32], Self), Error> {
let mut key = [0u8; 32];
- proxmox::sys::linux::fill_with_random_data(&mut key)?;
+ proxmox_sys::linux::fill_with_random_data(&mut key)?;
let key_config = Self::with_key(&key, passphrase, kdf)?;
Ok((key, key_config))
}
bail!("got strange key length ({} != 32)", raw_key.len())
}
- let salt = proxmox::sys::linux::random_data(32)?;
+ let salt = proxmox_sys::linux::random_data(32)?;
let kdf = match kdf {
Kdf::Scrypt => KeyDerivationConfig::Scrypt {
let cipher = openssl::symm::Cipher::aes_256_gcm();
- let iv = proxmox::sys::linux::random_data(16)?;
+ let iv = proxmox_sys::linux::random_data(16)?;
let mut tag = [0u8; 16];
let encrypted_key = openssl::symm::encrypt_aead(
exclusive: bool,
) -> Result<BackupLockGuard, Error> {
let user = backup_user()?;
- let options = proxmox::tools::fs::CreateOptions::new()
+ let options = proxmox_sys::fs::CreateOptions::new()
.perm(nix::sys::stat::Mode::from_bits_truncate(0o660))
.owner(user.uid)
.group(user.gid);
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
- let file = proxmox::tools::fs::open_file_locked(&path, timeout, exclusive, options)?;
+ let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
Ok(BackupLockGuard(Some(file)))
}
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
// set the correct owner/group/permissions while saving file
// owner(rw) = root, group(r)= backup
- let options = proxmox::tools::fs::CreateOptions::new()
+ let options = proxmox_sys::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(backup_user.gid);
- proxmox::tools::fs::replace_file(path, data, options, true)?;
+ proxmox_sys::fs::replace_file(path, data, options, true)?;
Ok(())
}
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
// set the correct owner/group/permissions while saving file
// owner(rw) = root, group(r)= root
- let options = proxmox::tools::fs::CreateOptions::new()
+ let options = proxmox_sys::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
- proxmox::tools::fs::replace_file(path, data, options, true)?;
+ proxmox_sys::fs::replace_file(path, data, options, true)?;
Ok(())
}
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
use nix::sys::socket::{socket, AddressFamily, SockType, SockFlag};
use regex::Regex;
-use proxmox::*; // for IP macros
-use proxmox::tools::fd::Fd;
+use pbs_api_types::*; // for IP macros
+
+use proxmox_sys::fd::Fd;
pub static IPV4_REVERSE_MASK: &[&str] = &[
"0.0.0.0",
.output()
.map_err(|err| format_err!("failed to execute diff - {}", err))?;
- let diff = pbs_tools::command_output_as_string(output, Some(|c| c == 0 || c == 1))
+ let diff = proxmox_sys::command::command_output_as_string(output, Some(|c| c == 0 || c == 1))
.map_err(|err| format_err!("diff failed: {}", err))?;
Ok(diff)
.output()
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
- pbs_tools::command_output(output, None)
+ proxmox_sys::command::command_output(output, None)
.map_err(|err| format_err!("ifreload failed: {}", err))?;
use lazy_static::lazy_static;
use regex::Regex;
-use proxmox::tools::{fs::replace_file, fs::CreateOptions};
+use proxmox_sys::{fs::replace_file, fs::CreateOptions};
mod helper;
pub use helper::*;
pub fn config() -> Result<(NetworkConfig, [u8;32]), Error> {
- let content = match proxmox::tools::fs::file_get_optional_contents(NETWORK_INTERFACES_NEW_FILENAME)? {
+ let content = match proxmox_sys::fs::file_get_optional_contents(NETWORK_INTERFACES_NEW_FILENAME)? {
Some(content) => content,
None => {
- let content = proxmox::tools::fs::file_get_optional_contents(NETWORK_INTERFACES_FILENAME)?;
+ let content = proxmox_sys::fs::file_get_optional_contents(NETWORK_INTERFACES_FILENAME)?;
content.unwrap_or_default()
}
};
self.eat(Token::Gateway)?;
let gateway = self.next_text()?;
- if proxmox::tools::common_regex::IP_REGEX.is_match(&gateway) {
+ if pbs_api_types::common_regex::IP_REGEX.is_match(&gateway) {
if gateway.contains(':') {
set_gateway_v6(interface, gateway)?;
} else {
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(SYNC_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
-use proxmox::tools::fs::file_read_optional_string;
+use proxmox_sys::fs::file_read_optional_string;
use pbs_api_types::Fingerprint;
use crate::key_config::KeyConfig;
mod hex_key {
use serde::{self, Deserialize, Serializer, Deserializer};
-
+ use hex::FromHex;
+
pub fn serialize<S>(
csum: &[u8; 32],
serializer: S,
where
S: Serializer,
{
- let s = proxmox::tools::digest_to_hex(csum);
+ let s = hex::encode(csum);
serializer.serialize_str(&s)
}
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
- proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
+ <[u8; 32]>::from_hex(&s).map_err(serde::de::Error::custom)
}
}
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
use serde::{Serialize, Deserialize};
use serde_json::{from_value, Value};
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use pbs_api_types::Authid;
//use crate::auth;
}
fn read_file() -> Result<HashMap<Authid, String>, Error> {
- let json = proxmox::tools::fs::file_get_json(CONF_FILE, Some(Value::Null))?;
+ let json = proxmox_sys::fs::file_get_json(CONF_FILE, Some(Value::Null))?;
if json == Value::Null {
Ok(HashMap::new())
.group(backup_user.gid);
let json = serde_json::to_vec(&data)?;
- proxmox::tools::fs::replace_file(CONF_FILE, &json, options, true)
+ proxmox_sys::fs::replace_file(CONF_FILE, &json, options, true)
}
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(TRAFFIC_CONTROL_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(TRAFFIC_CONTROL_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(USER_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(USER_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(VERIFICATION_CFG_FILENAME)?;
+ let content = proxmox_sys::fs::file_read_optional_string(VERIFICATION_CFG_FILENAME)?;
let content = content.unwrap_or_else(String::new);
let digest = openssl::sha::sha256(content.as_bytes());
crc32fast = "1"
endian_trait = { version = "0.6", features = [ "arrays" ] }
futures = "0.3"
+hex = "0.4.3"
lazy_static = "1.4"
libc = "0.2"
log = "0.4"
pathpatterns = "0.1.2"
pxar = "0.10.1"
-proxmox = "0.15.3"
proxmox-borrow = "1"
proxmox-io = "1"
proxmox-lang = "1"
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-uuid = "1"
-proxmox-sys = "0.1.2"
+proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-tools = { path = "../pbs-tools" }
let mut path = base_path.to_owned();
path.push(self.group_path());
- pbs_tools::fs::scandir(
+ proxmox_sys::fs::scandir(
libc::AT_FDCWD,
&path,
&BACKUP_DATE_REGEX,
let mut path = base_path.to_owned();
path.push(self.group_path());
- pbs_tools::fs::scandir(
+ proxmox_sys::fs::scandir(
libc::AT_FDCWD,
&path,
&BACKUP_DATE_REGEX,
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
let mut list = Vec::new();
- pbs_tools::fs::scandir(
+ proxmox_sys::fs::scandir(
libc::AT_FDCWD,
base_path,
&BACKUP_TYPE_REGEX,
if file_type != nix::dir::Type::Directory {
return Ok(());
}
- pbs_tools::fs::scandir(
+ proxmox_sys::fs::scandir(
l0_fd,
backup_type,
&BACKUP_ID_REGEX,
) -> Result<Vec<String>, Error> {
let mut files = vec![];
- pbs_tools::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
+ proxmox_sys::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
if file_type != nix::dir::Type::File {
return Ok(());
}
use futures::ready;
use tokio::io::{AsyncRead, AsyncSeek, ReadBuf};
-use proxmox::io_format_err;
-use proxmox::sys::error::io_err_other;
+use proxmox_sys::io_format_err;
+use proxmox_sys::error::io_err_other;
use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
use anyhow::{bail, format_err, Error};
-use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
+use proxmox_sys::fs::{CreateOptions, create_path, create_dir};
use proxmox_sys::process_locker::{ProcessLocker, ProcessLockSharedGuard, ProcessLockExclusiveGuard};
-use proxmox_sys::worker_task_context::WorkerTaskContext;
+use proxmox_sys::WorkerTaskContext;
use proxmox_sys::task_log;
use pbs_api_types::GarbageCollectionStatus;
// create lock file with correct owner/group
let lockfile_path = Self::lockfile_path(&base);
- proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
+ proxmox_sys::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
// create 64*1024 subdirs
let mut last_percentage = 0;
pub fn get_chunk_iterator(
&self,
) -> Result<
- impl Iterator<Item = (Result<pbs_tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
+ impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
Error
> {
use nix::dir::Dir;
})?;
let mut done = false;
- let mut inner: Option<pbs_tools::fs::ReadDir> = None;
+ let mut inner: Option<proxmox_sys::fs::ReadDir> = None;
let mut at = 0;
let mut percentage = 0;
Ok(std::iter::from_fn(move || {
let subdir: &str = &format!("{:04x}", at);
percentage = (at * 100) / 0x10000;
at += 1;
- match pbs_tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
+ match proxmox_sys::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
Ok(dir) => {
inner = Some(dir);
// start reading:
digest: &[u8; 32],
) -> Result<(bool, u64), Error> {
- //println!("DIGEST {}", proxmox::tools::digest_to_hex(digest));
+ //println!("DIGEST {}", hex::encode(digest));
let (chunk_path, digest_str) = self.chunk_path(digest);
let mut chunk_path = self.chunk_dir.clone();
let prefix = digest_to_prefix(digest);
chunk_path.push(&prefix);
- let digest_str = proxmox::tools::digest_to_hex(digest);
+ let digest_str = hex::encode(digest);
chunk_path.push(&digest_str);
(chunk_path, digest_str)
}
pub fn new(writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
let mut iv = [0u8; 16];
- proxmox::sys::linux::fill_with_random_data(&mut iv)?;
+ proxmox_sys::linux::fill_with_random_data(&mut iv)?;
let block_size = config.cipher().block_size();
let crypter = config.data_crypter(&iv, openssl::symm::Mode::Encrypt)?;
) -> Result<([u8;16], [u8;16]), Error> {
let mut iv = [0u8; 16];
- proxmox::sys::linux::fill_with_random_data(&mut iv)?;
+ proxmox_sys::linux::fill_with_random_data(&mut iv)?;
let mut tag = [0u8; 16];
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
-use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
+use proxmox_sys::fs::{replace_file, file_read_optional_string, CreateOptions};
use proxmox_sys::process_locker::ProcessLockSharedGuard;
-use proxmox_sys::worker_task_context::WorkerTaskContext;
+use proxmox_sys::WorkerTaskContext;
use proxmox_sys::{task_log, task_warn};
+use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus, HumanByte};
-use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::DataBlob;
pub fn get_chunk_iterator(
&self,
) -> Result<
- impl Iterator<Item = (Result<pbs_tools::fs::ReadDirEntry, Error>, usize, bool)>,
+ impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>,
Error
> {
self.chunk_store.get_chunk_iterator()
map_err(|err| {
format_err!(
"fast_index_verification error, stat_chunk {} failed - {}",
- proxmox::tools::digest_to_hex(&info.digest),
+ hex::encode(&info.digest),
err,
)
})?;
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
- for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
+ for item in proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
if let Ok(item) = item {
if let Some(file_type) = item.file_type() {
if file_type != nix::dir::Type::File { continue; }
let full_path = self.group_path(backup_group);
- let _guard = pbs_tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
+ let _guard = proxmox_sys::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
log::info!("removing backup group {:?}", full_path);
let mut full_path = self.base_path();
full_path.push(backup_group.group_path());
full_path.push("owner");
- let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
+ let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
Ok(owner.trim_end().parse()?) // remove trailing newline
}
task_warn!(
worker,
"warning: unable to access non-existent chunk {}, required by {:?}",
- proxmox::tools::digest_to_hex(digest),
+ hex::encode(digest),
file_name,
);
use anyhow::{bail, format_err, Error};
-use proxmox::tools::mmap::Mmap;
+use proxmox_sys::mmap::Mmap;
use proxmox_io::ReadExt;
use proxmox_uuid::Uuid;
use proxmox_sys::process_locker::ProcessLockSharedGuard;
chunk_size,
(compressed_size * 100) / (chunk_size as u64),
is_duplicate,
- proxmox::tools::digest_to_hex(&digest)
+ hex::encode(&digest)
);
self.index.add_chunk(self.chunk_offset as u64, &digest)?;
self.chunk_buffer.truncate(0);
chunk_len,
(compressed_size * 100) / (chunk_len as u64),
is_duplicate,
- proxmox::tools::digest_to_hex(digest)
+ hex::encode(digest)
);
if is_duplicate {
mod hex_csum {
use serde::{self, Deserialize, Serializer, Deserializer};
+ use hex::FromHex;
pub fn serialize<S>(
csum: &[u8; 32],
where
S: Serializer,
{
- let s = proxmox::tools::digest_to_hex(csum);
+ let s = hex::encode(csum);
serializer.serialize_str(&s)
}
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
- proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
+ <[u8; 32]>::from_hex(&s).map_err(serde::de::Error::custom)
}
}
if let Some(crypt_config) = crypt_config {
let sig = self.signature(crypt_config)?;
- manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
+ manifest["signature"] = hex::encode(&sig).into();
let fingerprint = &Fingerprint::new(crypt_config.fingerprint());
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
}
if let Some(ref crypt_config) = crypt_config {
if let Some(signature) = signature {
- let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
+ let expected_signature = hex::encode(&Self::json_signature(&json, crypt_config)?);
let fingerprint = &json["unprotected"]["key-fingerprint"];
if fingerprint != &Value::Null {
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
let manifest: BackupManifest = serde_json::from_value(manifest)?;
- let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
+ let expected_signature = hex::encode(&manifest.signature(&crypt_config)?);
assert_eq!(signature, expected_signature);
.wait_with_output()
.map_err(|_| format_err!("Failed to read stdout"))?;
- let output = pbs_tools::command_output(output, None)?;
+ let output = proxmox_sys::command::command_output(output, None)?;
Ok(output)
}
use anyhow::{bail, Error};
use nix::dir::Dir;
+use proxmox_sys::fs::lock_dir_noblock_shared;
+
use crate::backup_info::BackupDir;
use crate::index::IndexFile;
use crate::fixed_index::FixedIndexReader;
use crate::dynamic_index::DynamicIndexReader;
use crate::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
use crate::DataStore;
-use pbs_tools::fs::lock_dir_noblock_shared;
/// Helper to access the contents of a datastore backup snapshot
///
proxmox-time = "1"
proxmox-fuse = "0.1.1"
-
-pbs-tools = { path = "../pbs-tools" }
+proxmox-sys = "0.2"
\ No newline at end of file
let mut command = std::process::Command::new("fusermount");
command.arg("-u");
command.arg(&backing_file);
- let _ = pbs_tools::run_command(command, None);
+ let _ = proxmox_sys::command::run_command(command, None);
let _ = remove_file(&backing_file);
backing_file.set_extension("pid");
pub fn find_all_mappings() -> Result<impl Iterator<Item = (String, Option<String>)>, Error> {
// get map of all /dev/loop mappings belonging to us
let mut loopmap = HashMap::new();
- for ent in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? {
+ for ent in proxmox_sys::fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? {
if let Ok(ent) = ent {
let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy());
if let Ok(file) = get_backing_file(&loopdev) {
}
Ok(
- pbs_tools::fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?.filter_map(move |ent| {
+ proxmox_sys::fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?.filter_map(move |ent| {
match ent {
Ok(ent) => {
let file = ent.file_name().to_string_lossy();
anyhow = "1.0"
thiserror = "1.0"
endian_trait = { version = "0.6", features = ["arrays"] }
+hex = "0.4.3"
nix = "0.19.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
regex = "1.2"
udev = "0.4"
-proxmox = "0.15.3"
proxmox-io = "1"
proxmox-lang = "1"
# api-macro is only used by the binaries, so maybe we should split them out
# router::cli is only used by binaries, so maybe we should split them out
proxmox-router = "1.1"
+proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-tools = { path = "../pbs-tools" }
fn check_buffer(buffer: &BlockHeader, seq_nr: u32) -> Result<(usize, bool), std::io::Error> {
if buffer.magic != PROXMOX_TAPE_BLOCK_HEADER_MAGIC_1_0 {
- proxmox::io_bail!("detected tape block with wrong magic number - not written by proxmox tape");
+ proxmox_sys::io_bail!("detected tape block with wrong magic number - not written by proxmox tape");
}
if seq_nr != buffer.seq_nr() {
- proxmox::io_bail!(
+ proxmox_sys::io_bail!(
"detected tape block with wrong sequence number ({} != {})",
seq_nr, buffer.seq_nr())
}
let found_end_marker = buffer.flags.contains(BlockHeaderFlags::END_OF_STREAM);
if size > buffer.payload.len() {
- proxmox::io_bail!("detected tape block with wrong payload size ({} > {}", size, buffer.payload.len());
+ proxmox_sys::io_bail!("detected tape block with wrong payload size ({} > {}", size, buffer.payload.len());
} else if size == 0 && !found_end_marker {
- proxmox::io_bail!("detected tape block with zero payload size");
+ proxmox_sys::io_bail!("detected tape block with zero payload size");
}
let bytes = reader.read_block(data)?;
if bytes != BlockHeader::SIZE {
- return Err(proxmox::io_format_err!("got wrong block size").into());
+ return Err(proxmox_sys::io_format_err!("got wrong block size").into());
}
Ok(())
let mut tmp_buf = [0u8; 512]; // use a small buffer for testing EOF
match reader.read_block(&mut tmp_buf) {
Ok(_) => {
- proxmox::io_bail!("detected tape block after block-stream end marker");
+ proxmox_sys::io_bail!("detected tape block after block-stream end marker");
}
Err(BlockReadError::EndOfFile) => {
return Ok(());
}
Err(BlockReadError::EndOfStream) => {
- proxmox::io_bail!("got unexpected end of tape");
+ proxmox_sys::io_bail!("got unexpected end of tape");
}
Err(BlockReadError::Error(err)) => {
return Err(err);
self.got_eod = true;
self.read_pos = self.buffer.payload.len();
if !self.found_end_marker && check_end_marker {
- proxmox::io_bail!("detected tape stream without end marker");
+ proxmox_sys::io_bail!("detected tape stream without end marker");
}
return Ok(0); // EOD
}
Err(BlockReadError::EndOfStream) => {
- proxmox::io_bail!("got unexpected end of tape");
+ proxmox_sys::io_bail!("got unexpected end of tape");
}
Err(BlockReadError::Error(err)) => {
return Err(err);
fn is_incomplete(&self) -> Result<bool, std::io::Error> {
if !self.got_eod {
- proxmox::io_bail!("is_incomplete failed: EOD not reached");
+ proxmox_sys::io_bail!("is_incomplete failed: EOD not reached");
}
if !self.found_end_marker {
- proxmox::io_bail!("is_incomplete failed: no end marker found");
+ proxmox_sys::io_bail!("is_incomplete failed: no end marker found");
}
Ok(self.incomplete)
fn has_end_marker(&self) -> Result<bool, std::io::Error> {
if !self.got_eod {
- proxmox::io_bail!("has_end_marker failed: EOD not reached");
+ proxmox_sys::io_bail!("has_end_marker failed: EOD not reached");
}
Ok(self.found_end_marker)
fn read(&mut self, buffer: &mut [u8]) -> Result<usize, std::io::Error> {
if self.read_error {
- proxmox::io_bail!("detected read after error - internal error");
+ proxmox_sys::io_bail!("detected read after error - internal error");
}
let mut buffer_size = self.buffer.size();
#[test]
fn large_data() -> Result<(), Error> {
- let data = proxmox::sys::linux::random_data(1024*1024*5)?;
+ let data = proxmox_sys::linux::random_data(1024*1024*5)?;
write_and_verify(&data)
}
let writer = EmulateTapeWriter::new(&mut tape_data, 1024*1024);
let mut writer = BlockedWriter::new(writer);
// write at least one block
- let data = proxmox::sys::linux::random_data(PROXMOX_TAPE_BLOCK_SIZE)?;
+ let data = proxmox_sys::linux::random_data(PROXMOX_TAPE_BLOCK_SIZE)?;
writer.write_all(&data)?;
// but do not call finish here
}
fn write_eof(&mut self) -> Result<(), std::io::Error> {
if self.wrote_eof {
- proxmox::io_bail!("BlockedWriter: detected multiple EOF writes");
+ proxmox_sys::io_bail!("BlockedWriter: detected multiple EOF writes");
}
self.wrote_eof = true;
impl <R: Read> BlockRead for EmulateTapeReader<R> {
fn read_block(&mut self, buffer: &mut [u8]) -> Result<usize, BlockReadError> {
if self.got_eof {
- return Err(BlockReadError::Error(proxmox::io_format_err!("detected read after EOF!")));
+ return Err(BlockReadError::Error(proxmox_sys::io_format_err!("detected read after EOF!")));
}
match self.reader.read_exact_or_eof(buffer)? {
false => {
// test buffer len after EOF test (to allow EOF test with small buffers in BufferedReader)
if buffer.len() != PROXMOX_TAPE_BLOCK_SIZE {
return Err(BlockReadError::Error(
- proxmox::io_format_err!(
+ proxmox_sys::io_format_err!(
"EmulateTapeReader: read_block with wrong block size ({} != {})",
buffer.len(),
PROXMOX_TAPE_BLOCK_SIZE,
fn write_block(&mut self, buffer: &[u8]) -> Result<bool, io::Error> {
if buffer.len() != PROXMOX_TAPE_BLOCK_SIZE {
- proxmox::io_bail!("EmulateTapeWriter: got write with wrong block size ({} != {}",
+ proxmox_sys::io_bail!("EmulateTapeWriter: got write with wrong block size ({} != {}",
buffer.len(), PROXMOX_TAPE_BLOCK_SIZE);
}
fn write_filemark(&mut self) -> Result<(), std::io::Error> {
if self.wrote_eof {
- proxmox::io_bail!("EmulateTapeWriter: detected multiple EOF writes");
+ proxmox_sys::io_bail!("EmulateTapeWriter: detected multiple EOF writes");
}
// do nothing, just record the call
self.wrote_eof = true;
use anyhow::{bail, format_err, Error};
use nix::fcntl::{fcntl, FcntlArg, OFlag};
-use proxmox::sys::error::SysResult;
+use proxmox_sys::error::SysResult;
+use proxmox_sys::fs::scan_subdir;
-use pbs_tools::fs::scan_subdir;
use pbs_api_types::{DeviceKind, OptionalDeviceIdentification, TapeDeviceInfo};
lazy_static::lazy_static!{
mod report_density;
pub use report_density::*;
-use proxmox::sys::error::SysResult;
+use proxmox_sys::error::SysResult;
use proxmox_io::{ReadExt, WriteExt};
use pbs_api_types::{MamAttribute, Lp17VolumeStatistics, LtoDriveAndMediaStatus};
) -> Result<(), std::io::Error> {
if count > 255 {
- proxmox::io_bail!("write_filemarks failed: got strange count '{}'", count);
+ proxmox_sys::io_bail!("write_filemarks failed: got strange count '{}'", count);
}
let mut sg_raw = SgRaw::new(&mut self.file, 16)
- .map_err(|err| proxmox::io_format_err!("write_filemarks failed (alloc) - {}", err))?;
+ .map_err(|err| proxmox_sys::io_format_err!("write_filemarks failed (alloc) - {}", err))?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
/* LEOM - ignore */
}
Err(err) => {
- proxmox::io_bail!("write filemark failed - {}", err);
+ proxmox_sys::io_bail!("write filemark failed - {}", err);
}
}
let transfer_len = data.len();
if transfer_len > 0x800000 {
- proxmox::io_bail!("write failed - data too large");
+ proxmox_sys::io_bail!("write failed - data too large");
}
let mut sg_raw = SgRaw::new(&mut self.file, 0)
return Ok(true); // LEOM
}
Err(err) => {
- proxmox::io_bail!("write failed - {}", err);
+ proxmox_sys::io_bail!("write failed - {}", err);
}
}
}
if transfer_len > 0xFFFFFF {
return Err(BlockReadError::Error(
- proxmox::io_format_err!("read failed - buffer too large")
+ proxmox_sys::io_format_err!("read failed - buffer too large")
));
}
}
Err(err) => {
return Err(BlockReadError::Error(
- proxmox::io_format_err!("read failed - {}", err)
+ proxmox_sys::io_format_err!("read failed - {}", err)
));
}
};
if data.len() != transfer_len {
return Err(BlockReadError::Error(
- proxmox::io_format_err!("read failed - unexpected block len ({} != {})", data.len(), buffer.len())
+ proxmox_sys::io_format_err!("read failed - unexpected block len ({} != {})", data.len(), buffer.len())
));
}
fn read_block(&mut self, buffer: &mut [u8]) -> Result<usize, BlockReadError> {
if self.end_of_file {
- return Err(BlockReadError::Error(proxmox::io_format_err!("detected read after EOF!")));
+ return Err(BlockReadError::Error(proxmox_sys::io_format_err!("detected read after EOF!")));
}
match self.sg_tape.read_block(buffer) {
Ok(usize) => Ok(usize),
unreachable!();
}
},
- MamFormat::BINARY => proxmox::tools::digest_to_hex(&data),
+ MamFormat::BINARY => hex::encode(&data),
};
list.push(MamAttribute {
id: head_id,
data: &[u8],
) -> Result<bool, std::io::Error> {
if header.size as usize != data.len() {
- proxmox::io_bail!("write_header with wrong size - internal error");
+ proxmox_sys::io_bail!("write_header with wrong size - internal error");
}
let header = header.to_le();
walkdir = "2"
zstd = { version = "0.6", features = [ "bindgen" ] }
-proxmox = { version = "0.15.3", default-features = false, features = [ "tokio" ] }
-proxmox-async = "0.2"
+#proxmox = { version = "0.15.3", default-features = false, features = [ "tokio" ] }
+proxmox-async = "0.3"
proxmox-borrow = "1"
proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = { version = "1" }
proxmox-time = { version = "1" }
+proxmox-sys = "0.2"
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-api-types = { path = "../pbs-api-types" }
+++ /dev/null
-//! Implementation of the calls to handle POSIX access control lists
-
-// see C header file <sys/acl.h> for reference
-extern crate libc;
-
-use std::ffi::CString;
-use std::marker::PhantomData;
-use std::os::unix::ffi::OsStrExt;
-use std::os::unix::io::RawFd;
-use std::path::Path;
-use std::ptr;
-
-use libc::{c_char, c_int, c_uint, c_void};
-use nix::errno::Errno;
-use nix::NixPath;
-
-// from: acl/include/acl.h
-pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
-// acl_perm_t values
-pub type ACLPerm = c_uint;
-pub const ACL_READ: ACLPerm = 0x04;
-pub const ACL_WRITE: ACLPerm = 0x02;
-pub const ACL_EXECUTE: ACLPerm = 0x01;
-
-// acl_tag_t values
-pub type ACLTag = c_int;
-pub const ACL_UNDEFINED_TAG: ACLTag = 0x00;
-pub const ACL_USER_OBJ: ACLTag = 0x01;
-pub const ACL_USER: ACLTag = 0x02;
-pub const ACL_GROUP_OBJ: ACLTag = 0x04;
-pub const ACL_GROUP: ACLTag = 0x08;
-pub const ACL_MASK: ACLTag = 0x10;
-pub const ACL_OTHER: ACLTag = 0x20;
-
-// acl_type_t values
-pub type ACLType = c_uint;
-pub const ACL_TYPE_ACCESS: ACLType = 0x8000;
-pub const ACL_TYPE_DEFAULT: ACLType = 0x4000;
-
-// acl entry constants
-pub const ACL_FIRST_ENTRY: c_int = 0;
-pub const ACL_NEXT_ENTRY: c_int = 1;
-
-// acl to extended attribute names constants
-// from: acl/include/acl_ea.h
-pub const ACL_EA_ACCESS: &str = "system.posix_acl_access";
-pub const ACL_EA_DEFAULT: &str = "system.posix_acl_default";
-pub const ACL_EA_VERSION: u32 = 0x0002;
-
-#[link(name = "acl")]
-extern "C" {
- fn acl_get_file(path: *const c_char, acl_type: ACLType) -> *mut c_void;
- fn acl_set_file(path: *const c_char, acl_type: ACLType, acl: *mut c_void) -> c_int;
- fn acl_get_fd(fd: RawFd) -> *mut c_void;
- fn acl_get_entry(acl: *const c_void, entry_id: c_int, entry: *mut *mut c_void) -> c_int;
- fn acl_create_entry(acl: *mut *mut c_void, entry: *mut *mut c_void) -> c_int;
- fn acl_get_tag_type(entry: *mut c_void, tag_type: *mut ACLTag) -> c_int;
- fn acl_set_tag_type(entry: *mut c_void, tag_type: ACLTag) -> c_int;
- fn acl_get_permset(entry: *mut c_void, permset: *mut *mut c_void) -> c_int;
- fn acl_clear_perms(permset: *mut c_void) -> c_int;
- fn acl_get_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
- fn acl_add_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
- fn acl_get_qualifier(entry: *mut c_void) -> *mut c_void;
- fn acl_set_qualifier(entry: *mut c_void, qualifier: *const c_void) -> c_int;
- fn acl_init(count: c_int) -> *mut c_void;
- fn acl_valid(ptr: *const c_void) -> c_int;
- fn acl_free(ptr: *mut c_void) -> c_int;
-}
-
-#[derive(Debug)]
-pub struct ACL {
- ptr: *mut c_void,
-}
-
-impl Drop for ACL {
- fn drop(&mut self) {
- let ret = unsafe { acl_free(self.ptr) };
- if ret != 0 {
- panic!("invalid pointer encountered while dropping ACL - {}", Errno::last());
- }
- }
-}
-
-impl ACL {
- pub fn init(count: usize) -> Result<ACL, nix::errno::Errno> {
- let ptr = unsafe { acl_init(count as i32 as c_int) };
- if ptr.is_null() {
- return Err(Errno::last());
- }
-
- Ok(ACL { ptr })
- }
-
- pub fn get_file<P: AsRef<Path>>(path: P, acl_type: ACLType) -> Result<ACL, nix::errno::Errno> {
- let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
- let ptr = unsafe { acl_get_file(path_cstr.as_ptr(), acl_type) };
- if ptr.is_null() {
- return Err(Errno::last());
- }
-
- Ok(ACL { ptr })
- }
-
- pub fn set_file<P: NixPath + ?Sized>(&self, path: &P, acl_type: ACLType) -> nix::Result<()> {
- path.with_nix_path(|path| {
- Errno::result(unsafe { acl_set_file(path.as_ptr(), acl_type, self.ptr) })
- })?
- .map(drop)
- }
-
- pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
- let ptr = unsafe { acl_get_fd(fd) };
- if ptr.is_null() {
- return Err(Errno::last());
- }
-
- Ok(ACL { ptr })
- }
-
- pub fn create_entry(&mut self) -> Result<ACLEntry, nix::errno::Errno> {
- let mut ptr = ptr::null_mut() as *mut c_void;
- let res = unsafe { acl_create_entry(&mut self.ptr, &mut ptr) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- Ok(ACLEntry {
- ptr,
- _phantom: PhantomData,
- })
- }
-
- pub fn is_valid(&self) -> bool {
- let res = unsafe { acl_valid(self.ptr) };
- if res == 0 {
- return true;
- }
-
- false
- }
-
- pub fn entries(self) -> ACLEntriesIterator {
- ACLEntriesIterator {
- acl: self,
- current: ACL_FIRST_ENTRY,
- }
- }
-
- pub fn add_entry_full(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64)
- -> Result<(), nix::errno::Errno>
- {
- let mut entry = self.create_entry()?;
- entry.set_tag_type(tag)?;
- if let Some(qualifier) = qualifier {
- entry.set_qualifier(qualifier)?;
- }
- entry.set_permissions(permissions)?;
-
- Ok(())
- }
-}
-
-#[derive(Debug)]
-pub struct ACLEntry<'a> {
- ptr: *mut c_void,
- _phantom: PhantomData<&'a mut ()>,
-}
-
-impl<'a> ACLEntry<'a> {
- pub fn get_tag_type(&self) -> Result<ACLTag, nix::errno::Errno> {
- let mut tag = ACL_UNDEFINED_TAG;
- let res = unsafe { acl_get_tag_type(self.ptr, &mut tag as *mut ACLTag) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- Ok(tag)
- }
-
- pub fn set_tag_type(&mut self, tag: ACLTag) -> Result<(), nix::errno::Errno> {
- let res = unsafe { acl_set_tag_type(self.ptr, tag) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- Ok(())
- }
-
- pub fn get_permissions(&self) -> Result<u64, nix::errno::Errno> {
- let mut permissions = 0;
- let mut permset = ptr::null_mut() as *mut c_void;
- let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
- res = unsafe { acl_get_perm(permset, perm) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- if res == 1 {
- permissions |= perm as u64;
- }
- }
-
- Ok(permissions)
- }
-
- pub fn set_permissions(&mut self, permissions: u64) -> Result<u64, nix::errno::Errno> {
- let mut permset = ptr::null_mut() as *mut c_void;
- let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- res = unsafe { acl_clear_perms(permset) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
- if permissions & perm as u64 == perm as u64 {
- res = unsafe { acl_add_perm(permset, perm) };
- if res < 0 {
- return Err(Errno::last());
- }
- }
- }
-
- Ok(permissions)
- }
-
- pub fn get_qualifier(&self) -> Result<u64, nix::errno::Errno> {
- let qualifier = unsafe { acl_get_qualifier(self.ptr) };
- if qualifier.is_null() {
- return Err(Errno::last());
- }
- let result = unsafe { *(qualifier as *const u32) as u64 };
- let ret = unsafe { acl_free(qualifier) };
- if ret != 0 {
- panic!("invalid pointer encountered while dropping ACL qualifier - {}", Errno::last());
- }
-
- Ok(result)
- }
-
- pub fn set_qualifier(&mut self, qualifier: u64) -> Result<(), nix::errno::Errno> {
- let val = qualifier as u32;
- let val_ptr: *const u32 = &val;
- let res = unsafe { acl_set_qualifier(self.ptr, val_ptr as *const c_void) };
- if res < 0 {
- return Err(Errno::last());
- }
-
- Ok(())
- }
-}
-
-#[derive(Debug)]
-pub struct ACLEntriesIterator {
- acl: ACL,
- current: c_int,
-}
-
-impl<'a> Iterator for &'a mut ACLEntriesIterator {
- type Item = ACLEntry<'a>;
-
- fn next(&mut self) -> Option<Self::Item> {
- let mut entry_ptr = ptr::null_mut();
- let res = unsafe { acl_get_entry(self.acl.ptr, self.current, &mut entry_ptr) };
- self.current = ACL_NEXT_ENTRY;
- if res == 1 {
- return Some(ACLEntry { ptr: entry_ptr, _phantom: PhantomData });
- }
-
- None
- }
-}
-
-/// Helper to transform `PxarEntry`s user mode to acl permissions.
-pub fn mode_user_to_acl_permissions(mode: u64) -> u64 {
- (mode >> 6) & 7
-}
-
-/// Helper to transform `PxarEntry`s group mode to acl permissions.
-pub fn mode_group_to_acl_permissions(mode: u64) -> u64 {
- (mode >> 3) & 7
-}
-
-/// Helper to transform `PxarEntry`s other mode to acl permissions.
-pub fn mode_other_to_acl_permissions(mode: u64) -> u64 {
- mode & 7
-}
-
-/// Buffer to compose ACLs as extended attribute.
-pub struct ACLXAttrBuffer {
- buffer: Vec<u8>,
-}
-
-impl ACLXAttrBuffer {
- /// Create a new buffer to write ACLs as extended attribute.
- ///
- /// `version` defines the ACL_EA_VERSION found in acl/include/acl_ea.h
- pub fn new(version: u32) -> Self {
- let mut buffer = Vec::new();
- buffer.extend_from_slice(&version.to_le_bytes());
- Self { buffer }
- }
-
- /// Add ACL entry to buffer.
- pub fn add_entry(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64) {
- self.buffer.extend_from_slice(&(tag as u16).to_le_bytes());
- self.buffer.extend_from_slice(&(permissions as u16).to_le_bytes());
- match qualifier {
- Some(qualifier) => self.buffer.extend_from_slice(&(qualifier as u32).to_le_bytes()),
- None => self.buffer.extend_from_slice(&ACL_UNDEFINED_ID.to_le_bytes()),
- }
- }
-
- /// Length of the buffer in bytes.
- pub fn len(&self) -> usize {
- self.buffer.len()
- }
-
- /// The buffer always contains at least the version, it is never empty
- pub const fn is_empty(&self) -> bool { false }
-
- /// Borrow raw buffer as mut slice.
- pub fn as_mut_slice(&mut self) -> &mut [u8] {
- self.buffer.as_mut_slice()
- }
-}
impl CertInfo {
pub fn from_path(path: PathBuf) -> Result<Self, Error> {
- Self::from_pem(&proxmox::tools::fs::file_get_contents(&path)?)
+ Self::from_pem(&proxmox_sys::fs::file_get_contents(&path)?)
.map_err(|err| format_err!("failed to load certificate from {:?} - {}", path, err))
}
+++ /dev/null
-use anyhow::{bail, format_err, Error};
-
-/// Helper to check result from std::process::Command output
-///
-/// The exit_code_check() function should return true if the exit code
-/// is considered successful.
-pub fn command_output(
- output: std::process::Output,
- exit_code_check: Option<fn(i32) -> bool>,
-) -> Result<Vec<u8>, Error> {
- if !output.status.success() {
- match output.status.code() {
- Some(code) => {
- let is_ok = match exit_code_check {
- Some(check_fn) => check_fn(code),
- None => code == 0,
- };
- if !is_ok {
- let msg = String::from_utf8(output.stderr)
- .map(|m| {
- if m.is_empty() {
- String::from("no error message")
- } else {
- m
- }
- })
- .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
-
- bail!("status code: {} - {}", code, msg);
- }
- }
- None => bail!("terminated by signal"),
- }
- }
-
- Ok(output.stdout)
-}
-
-/// Helper to check result from std::process::Command output, returns String.
-///
-/// The exit_code_check() function should return true if the exit code
-/// is considered successful.
-pub fn command_output_as_string(
- output: std::process::Output,
- exit_code_check: Option<fn(i32) -> bool>,
-) -> Result<String, Error> {
- let output = command_output(output, exit_code_check)?;
- let output = String::from_utf8(output)?;
- Ok(output)
-}
-
-pub fn run_command(
- mut command: std::process::Command,
- exit_code_check: Option<fn(i32) -> bool>,
-) -> Result<String, Error> {
- let output = command
- .output()
- .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
-
- let output = command_output_as_string(output, exit_code_check)
- .map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
-
- Ok(output)
-}
+++ /dev/null
-//! File system helper utilities.
-
-use std::borrow::{Borrow, BorrowMut};
-use std::fs::File;
-use std::io::{self, BufRead};
-use std::ops::{Deref, DerefMut};
-use std::os::unix::io::{AsRawFd, RawFd};
-use std::path::Path;
-
-use anyhow::{bail, format_err, Error};
-use nix::dir;
-use nix::dir::Dir;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-
-use regex::Regex;
-
-use proxmox::sys::error::SysError;
-use proxmox_borrow::Tied;
-
-pub type DirLockGuard = Dir;
-
-/// This wraps nix::dir::Entry with the parent directory's file descriptor.
-pub struct ReadDirEntry {
- entry: dir::Entry,
- parent_fd: RawFd,
-}
-
-impl Into<dir::Entry> for ReadDirEntry {
- fn into(self) -> dir::Entry {
- self.entry
- }
-}
-
-impl Deref for ReadDirEntry {
- type Target = dir::Entry;
-
- fn deref(&self) -> &Self::Target {
- &self.entry
- }
-}
-
-impl DerefMut for ReadDirEntry {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.entry
- }
-}
-
-impl AsRef<dir::Entry> for ReadDirEntry {
- fn as_ref(&self) -> &dir::Entry {
- &self.entry
- }
-}
-
-impl AsMut<dir::Entry> for ReadDirEntry {
- fn as_mut(&mut self) -> &mut dir::Entry {
- &mut self.entry
- }
-}
-
-impl Borrow<dir::Entry> for ReadDirEntry {
- fn borrow(&self) -> &dir::Entry {
- &self.entry
- }
-}
-
-impl BorrowMut<dir::Entry> for ReadDirEntry {
- fn borrow_mut(&mut self) -> &mut dir::Entry {
- &mut self.entry
- }
-}
-
-impl ReadDirEntry {
- #[inline]
- pub fn parent_fd(&self) -> RawFd {
- self.parent_fd
- }
-
- pub unsafe fn file_name_utf8_unchecked(&self) -> &str {
- std::str::from_utf8_unchecked(self.file_name().to_bytes())
- }
-}
-
-// Since Tied<T, U> implements Deref to U, a Tied<Dir, Iterator> already implements Iterator.
-// This is simply a wrapper with a shorter type name mapping nix::Error to anyhow::Error.
-/// Wrapper over a pair of `nix::dir::Dir` and `nix::dir::Iter`, returned by `read_subdir()`.
-pub struct ReadDir {
- iter: Tied<Dir, dyn Iterator<Item = nix::Result<dir::Entry>> + Send>,
- dir_fd: RawFd,
-}
-
-impl Iterator for ReadDir {
- type Item = Result<ReadDirEntry, Error>;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.iter.next().map(|res| {
- res.map(|entry| ReadDirEntry { entry, parent_fd: self.dir_fd })
- .map_err(Error::from)
- })
- }
-}
-
-/// Create an iterator over sub directory entries.
-/// This uses `openat` on `dirfd`, so `path` can be relative to that or an absolute path.
-pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Result<ReadDir> {
- let dir = Dir::openat(dirfd, path, OFlag::O_RDONLY, Mode::empty())?;
- let fd = dir.as_raw_fd();
- let iter = Tied::new(dir, |dir| {
- Box::new(unsafe { (*dir).iter() })
- as Box<dyn Iterator<Item = nix::Result<dir::Entry>> + Send>
- });
- Ok(ReadDir { iter, dir_fd: fd })
-}
-
-/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
-/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
-pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
- dirfd: RawFd,
- path: &P,
- regex: &'a regex::Regex,
-) -> Result<impl Iterator<Item = Result<ReadDirEntry, Error>> + 'a, nix::Error> {
- Ok(read_subdir(dirfd, path)?.filter_file_name_regex(regex))
-}
-
-/// Scan directory for matching file names with a callback.
-///
-/// Scan through all directory entries and call `callback()` function
-/// if the entry name matches the regular expression. This function
-/// used unix `openat()`, so you can pass absolute or relative file
-/// names. This function simply skips non-UTF8 encoded names.
-pub fn scandir<P, F>(
- dirfd: RawFd,
- path: &P,
- regex: ®ex::Regex,
- mut callback: F,
-) -> Result<(), Error>
-where
- F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>,
- P: ?Sized + nix::NixPath,
-{
- for entry in scan_subdir(dirfd, path, regex)? {
- let entry = entry?;
- let file_type = match entry.file_type() {
- Some(file_type) => file_type,
- None => bail!("unable to detect file type"),
- };
-
- callback(
- entry.parent_fd(),
- unsafe { entry.file_name_utf8_unchecked() },
- file_type,
- )?;
- }
- Ok(())
-}
-
-
-/// Helper trait to provide a combinators for directory entry iterators.
-pub trait FileIterOps<T, E>
-where
- Self: Sized + Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
- /// Filter by file type. This is more convenient than using the `filter` method alone as this
- /// also includes error handling and handling of files without a type (via an error).
- fn filter_file_type(self, ty: dir::Type) -> FileTypeFilter<Self, T, E> {
- FileTypeFilter { inner: self, ty }
- }
-
- /// Filter by file name. Note that file names which aren't valid utf-8 will be treated as if
- /// they do not match the pattern.
- fn filter_file_name_regex(self, regex: &Regex) -> FileNameRegexFilter<Self, T, E> {
- FileNameRegexFilter { inner: self, regex }
- }
-}
-
-impl<I, T, E> FileIterOps<T, E> for I
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
-}
-
-/// This filters files from its inner iterator by a file type. Files with no type produce an error.
-pub struct FileTypeFilter<I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
- inner: I,
- ty: nix::dir::Type,
-}
-
-impl<I, T, E> Iterator for FileTypeFilter<I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
- E: Into<Error> + Send + Sync,
-{
- type Item = Result<T, Error>;
-
- fn next(&mut self) -> Option<Self::Item> {
- loop {
- let item = self.inner.next()?.map_err(|e| e.into());
- match item {
- Ok(ref entry) => match entry.borrow().file_type() {
- Some(ty) => {
- if ty == self.ty {
- return Some(item);
- } else {
- continue;
- }
- }
- None => return Some(Err(format_err!("unable to detect file type"))),
- },
- Err(_) => return Some(item),
- }
- }
- }
-}
-
-/// This filters files by name via a Regex. Files whose file name aren't valid utf-8 are skipped
-/// silently.
-pub struct FileNameRegexFilter<'a, I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
-{
- inner: I,
- regex: &'a Regex,
-}
-
-impl<I, T, E> Iterator for FileNameRegexFilter<'_, I, T, E>
-where
- I: Iterator<Item = Result<T, E>>,
- T: Borrow<dir::Entry>,
-{
- type Item = Result<T, E>;
-
- fn next(&mut self) -> Option<Self::Item> {
- loop {
- let item = self.inner.next()?;
- match item {
- Ok(ref entry) => {
- if let Ok(name) = entry.borrow().file_name().to_str() {
- if self.regex.is_match(name) {
- return Some(item);
- }
- }
- // file did not match regex or isn't valid utf-8
- continue;
- },
- Err(_) => return Some(item),
- }
- }
- }
-}
-
-// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long)
-// read Linux file system attributes (see man chattr)
-nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long);
-nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long);
-
-// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
-// read FAT file system attributes
-nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32);
-nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32);
-
-// From /usr/include/linux/fs.h
-// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
-// #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
-nix::ioctl_read!(fs_ioc_fsgetxattr, b'X', 31, FSXAttr);
-nix::ioctl_write_ptr!(fs_ioc_fssetxattr, b'X', 32, FSXAttr);
-
-#[repr(C)]
-#[derive(Debug)]
-pub struct FSXAttr {
- pub fsx_xflags: u32,
- pub fsx_extsize: u32,
- pub fsx_nextents: u32,
- pub fsx_projid: u32,
- pub fsx_cowextsize: u32,
- pub fsx_pad: [u8; 8],
-}
-
-impl Default for FSXAttr {
- fn default() -> Self {
- FSXAttr {
- fsx_xflags: 0u32,
- fsx_extsize: 0u32,
- fsx_nextents: 0u32,
- fsx_projid: 0u32,
- fsx_cowextsize: 0u32,
- fsx_pad: [0u8; 8],
- }
- }
-}
-
-/// Attempt to acquire a shared flock on the given path, 'what' and
-/// 'would_block_message' are used for error formatting.
-pub fn lock_dir_noblock_shared(
- path: &std::path::Path,
- what: &str,
- would_block_msg: &str,
-) -> Result<DirLockGuard, Error> {
- do_lock_dir_noblock(path, what, would_block_msg, false)
-}
-
-/// Attempt to acquire an exclusive flock on the given path, 'what' and
-/// 'would_block_message' are used for error formatting.
-pub fn lock_dir_noblock(
- path: &std::path::Path,
- what: &str,
- would_block_msg: &str,
-) -> Result<DirLockGuard, Error> {
- do_lock_dir_noblock(path, what, would_block_msg, true)
-}
-
-fn do_lock_dir_noblock(
- path: &std::path::Path,
- what: &str,
- would_block_msg: &str,
- exclusive: bool,
-) -> Result<DirLockGuard, Error> {
- let mut handle = Dir::open(path, OFlag::O_RDONLY, Mode::empty())
- .map_err(|err| {
- format_err!("unable to open {} directory {:?} for locking - {}", what, path, err)
- })?;
-
- // acquire in non-blocking mode, no point in waiting here since other
- // backups could still take a very long time
- proxmox::tools::fs::lock_file(&mut handle, exclusive, Some(std::time::Duration::from_nanos(0)))
- .map_err(|err| {
- format_err!(
- "unable to acquire lock on {} directory {:?} - {}", what, path,
- if err.would_block() {
- String::from(would_block_msg)
- } else {
- err.to_string()
- }
- )
- })?;
-
- Ok(handle)
-}
-
-/// Get an iterator over lines of a file, skipping empty lines and comments (lines starting with a
-/// `#`).
-pub fn file_get_non_comment_lines<P: AsRef<Path>>(
- path: P,
-) -> Result<impl Iterator<Item = io::Result<String>>, Error> {
- let path = path.as_ref();
-
- Ok(io::BufReader::new(
- File::open(path).map_err(|err| format_err!("error opening {:?}: {}", path, err))?,
- )
- .lines()
- .filter_map(|line| match line {
- Ok(line) => {
- let line = line.trim();
- if line.is_empty() || line.starts_with('#') {
- None
- } else {
- Some(Ok(line.to_string()))
- }
- }
- Err(err) => Some(Err(err)),
- }))
-}
//! I/O utilities.
-use proxmox::tools::fd::Fd;
+use proxmox_sys::fd::Fd;
/// The `BufferedRead` trait provides a single function
/// `buffered_read`. It returns a reference to an internal buffer. The
-pub mod acl;
pub mod cert;
pub mod cli;
pub mod crypt_config;
pub mod format;
-pub mod fs;
pub mod io;
pub mod json;
pub mod lru_cache;
pub mod sync;
pub mod sys;
pub mod ticket;
-pub mod xattr;
pub mod async_lru_cache;
-
-mod command;
-pub use command::{command_output, command_output_as_string, run_command};
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
self.0
.send(Ok(buf.to_vec()))
- .map_err(proxmox::sys::error::io_err_other)
+ .map_err(proxmox_sys::error::io_err_other)
.and(Ok(buf.len()))
}
+++ /dev/null
-//! Wrapper functions for the libc xattr calls
-
-use std::ffi::CStr;
-use std::os::unix::io::RawFd;
-
-use nix::errno::Errno;
-
-use proxmox_io::vec;
-use proxmox_lang::c_str;
-
-/// `"security.capability"` as a CStr to avoid typos.
-///
-/// This cannot be `const` until `const_cstr_unchecked` is stable.
-#[inline]
-pub fn xattr_name_fcaps() -> &'static CStr {
- c_str!("security.capability")
-}
-
-/// `"system.posix_acl_access"` as a CStr to avoid typos.
-///
-/// This cannot be `const` until `const_cstr_unchecked` is stable.
-#[inline]
-pub fn xattr_acl_access() -> &'static CStr {
- c_str!("system.posix_acl_access")
-}
-
-/// `"system.posix_acl_default"` as a CStr to avoid typos.
-///
-/// This cannot be `const` until `const_cstr_unchecked` is stable.
-#[inline]
-pub fn xattr_acl_default() -> &'static CStr {
- c_str!("system.posix_acl_default")
-}
-
-/// Result of `flistxattr`, allows iterating over the attributes as a list of `&CStr`s.
-///
-/// Listing xattrs produces a list separated by zeroes, inherently making them available as `&CStr`
-/// already, so we make use of this fact and reflect this in the interface.
-pub struct ListXAttr {
- data: Vec<u8>,
-}
-
-impl ListXAttr {
- fn new(data: Vec<u8>) -> Self {
- Self { data }
- }
-}
-
-impl<'a> IntoIterator for &'a ListXAttr {
- type Item = &'a CStr;
- type IntoIter = ListXAttrIter<'a>;
-
- fn into_iter(self) -> Self::IntoIter {
- ListXAttrIter {
- data: &self.data,
- at: 0,
- }
- }
-}
-
-/// Iterator over the extended attribute entries in a `ListXAttr`.
-pub struct ListXAttrIter<'a> {
- data: &'a [u8],
- at: usize,
-}
-
-impl<'a> Iterator for ListXAttrIter<'a> {
- type Item = &'a CStr;
-
- fn next(&mut self) -> Option<&'a CStr> {
- let data = &self.data[self.at..];
- let next = data.iter().position(|b| *b == 0)? + 1;
- self.at += next;
- Some(unsafe { CStr::from_bytes_with_nul_unchecked(&data[..next]) })
- }
-}
-
-/// Return a list of extended attributes accessible as an iterator over items of type `&CStr`.
-pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
- // Initial buffer size for the attribute list, if content does not fit
- // it gets dynamically increased until big enough.
- let mut size = 256;
- let mut buffer = vec::undefined(size);
- let mut bytes = unsafe {
- libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
- };
- while bytes < 0 {
- let err = Errno::last();
- match err {
- Errno::ERANGE => {
- // Buffer was not big enough to fit the list, retry with double the size
- size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
- },
- _ => return Err(err),
- }
- // Retry to read the list with new buffer
- buffer.resize(size, 0);
- bytes = unsafe {
- libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
- };
- }
- buffer.truncate(bytes as usize);
-
- Ok(ListXAttr::new(buffer))
-}
-
-/// Get an extended attribute by name.
-///
-/// Extended attributes may not contain zeroes, which we enforce in the API by using a `&CStr`
-/// type.
-pub fn fgetxattr(fd: RawFd, name: &CStr) -> Result<Vec<u8>, nix::errno::Errno> {
- let mut size = 256;
- let mut buffer = vec::undefined(size);
- let mut bytes = unsafe {
- libc::fgetxattr(fd, name.as_ptr(), buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
- };
- while bytes < 0 {
- let err = Errno::last();
- match err {
- Errno::ERANGE => {
- // Buffer was not big enough to fit the value, retry with double the size
- size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
- },
- _ => return Err(err),
- }
- buffer.resize(size, 0);
- bytes = unsafe {
- libc::fgetxattr(fd, name.as_ptr() as *const libc::c_char, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
- };
- }
- buffer.resize(bytes as usize, 0);
-
- Ok(buffer)
-}
-
-/// Set an extended attribute on a file descriptor.
-pub fn fsetxattr(fd: RawFd, name: &CStr, data: &[u8]) -> Result<(), nix::errno::Errno> {
- let flags = 0 as libc::c_int;
- let result = unsafe {
- libc::fsetxattr(fd, name.as_ptr(), data.as_ptr() as *const libc::c_void, data.len(), flags)
- };
- if result < 0 {
- return Err(Errno::last());
- }
-
- Ok(())
-}
-
-pub fn fsetxattr_fcaps(fd: RawFd, fcaps: &[u8]) -> Result<(), nix::errno::Errno> {
- // TODO casync checks and removes capabilities if they are set
- fsetxattr(fd, xattr_name_fcaps(), fcaps)
-}
-
-pub fn is_security_capability(name: &CStr) -> bool {
- name.to_bytes() == xattr_name_fcaps().to_bytes()
-}
-
-pub fn is_acl(name: &CStr) -> bool {
- name.to_bytes() == xattr_acl_access().to_bytes()
- || name.to_bytes() == xattr_acl_default().to_bytes()
-}
-
-/// Check if the passed name buffer starts with a valid xattr namespace prefix
-/// and is within the length limit of 255 bytes
-pub fn is_valid_xattr_name(c_name: &CStr) -> bool {
- let name = c_name.to_bytes();
- if name.is_empty() || name.len() > 255 {
- return false;
- }
- if name.starts_with(b"user.") || name.starts_with(b"trusted.") {
- return true;
- }
- // samba saves windows ACLs there
- if name == b"security.NTACL" {
- return true;
- }
- is_security_capability(c_name)
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use std::ffi::CString;
- use std::fs::OpenOptions;
- use std::os::unix::io::AsRawFd;
-
- use nix::errno::Errno;
-
- use proxmox_lang::c_str;
-
- #[test]
- fn test_fsetxattr_fgetxattr() {
- let path = "./test-xattrs.txt";
- let file = OpenOptions::new()
- .write(true)
- .create(true)
- .open(&path)
- .unwrap();
-
- let fd = file.as_raw_fd();
-
- assert!(fsetxattr(fd, c_str!("user.attribute0"), b"value0").is_ok());
- assert!(fsetxattr(fd, c_str!("user.empty"), b"").is_ok());
-
- if nix::unistd::Uid::current() != nix::unistd::ROOT {
- assert_eq!(fsetxattr(fd, c_str!("trusted.attribute0"), b"value0"), Err(Errno::EPERM));
- }
-
- let v0 = fgetxattr(fd, c_str!("user.attribute0")).unwrap();
- let v1 = fgetxattr(fd, c_str!("user.empty")).unwrap();
-
- assert_eq!(v0, b"value0".as_ref());
- assert_eq!(v1, b"".as_ref());
- assert_eq!(fgetxattr(fd, c_str!("user.attribute1")), Err(Errno::ENODATA));
-
- std::fs::remove_file(&path).unwrap();
- }
-
- #[test]
- fn test_is_valid_xattr_name() {
- let too_long = CString::new(vec![b'a'; 265]).unwrap();
-
- assert!(!is_valid_xattr_name(&too_long));
- assert!(!is_valid_xattr_name(c_str!("system.attr")));
- assert!(is_valid_xattr_name(c_str!("user.attr")));
- assert!(is_valid_xattr_name(c_str!("trusted.attr")));
- assert!(is_valid_xattr_name(super::xattr_name_fcaps()));
- }
-}
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
-proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
-proxmox-async = "0.2"
+proxmox-async = "0.3"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-time = "1"
+proxmox-sys = { version = "0.2", features = [ "sortable-macro" ] }
+
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
let crypt_config = CryptConfig::new(testkey)?;
- //let random_data = proxmox::sys::linux::random_data(1024*1024)?;
+ //let random_data = proxmox_sys::linux::random_data(1024*1024)?;
let mut random_data = vec![];
// generate pseudo random byte sequence
for i in 0..256*1024 {
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::sys::linux::tty;
-use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
+use proxmox_sys::linux::tty;
+use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_router::cli::{
complete_file_name, format_and_print_result_full, get_output_format,
CliCommand, CliCommandMap, ColumnConfig,
let kdf = kdf.unwrap_or_default();
let mut key = [0u8; 32];
- proxmox::sys::linux::fill_with_random_data(&mut key)?;
+ proxmox_sys::linux::fill_with_random_data(&mut key)?;
match kdf {
Kdf::None => {
use xdg::BaseDirectories;
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
-use proxmox::tools::fs::{file_get_json, replace_file, CreateOptions, image_size};
+use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions, image_size};
use proxmox_router::{ApiMethod, RpcEnvironment, cli::*};
use proxmox_schema::api;
use proxmox_time::{strftime_local, epoch_i64};
let crypto = crypto_parameters(¶m)?;
- let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
+ let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox_sys::nodename());
let backup_type = param["backup-type"].as_str().unwrap_or("host");
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
- println!("Client name: {}", proxmox::tools::nodename());
+ println!("Client name: {}", proxmox_sys::nodename());
let start_time = std::time::Instant::now();
use serde_json::Value;
use tokio::signal::unix::{signal, SignalKind};
-use proxmox::{sortable, identity};
-use proxmox::tools::fd::Fd;
+use proxmox_sys::{sortable, identity};
+use proxmox_sys::fd::Fd;
use proxmox_router::{ApiHandler, ApiMethod, RpcEnvironment, cli::*};
use proxmox_schema::*;
match pbs_fuse_loop::find_all_mappings() {
Ok(mappings) => mappings
.filter_map(|(name, _)| {
- proxmox::tools::systemd::unescape_unit(&name).ok()
+ proxmox_sys::systemd::unescape_unit(&name).ok()
}).collect(),
Err(_) => Vec::new()
}
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
- let name_escaped = proxmox::tools::systemd::escape_unit(name, false);
+ let name_escaped = proxmox_sys::systemd::escape_unit(name, false);
let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
let loopdev = session.loopdev_path.clone();
pbs_fuse_loop::cleanup_unused_run_files(None);
let mut any = false;
for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? {
- let name = proxmox::tools::systemd::unescape_unit(&backing)?;
+ let name = proxmox_sys::systemd::unescape_unit(&backing)?;
println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name);
any = true;
}
if name.starts_with("/dev/loop") {
pbs_fuse_loop::unmap_loopdev(name)?;
} else {
- let name = proxmox::tools::systemd::escape_unit(&name, false);
+ let name = proxmox_sys::systemd::escape_unit(&name, false);
pbs_fuse_loop::unmap_name(name)?;
}
use anyhow::Error;
use serde_json::{json, Value};
-use proxmox::tools::fs::file_get_contents;
+use proxmox_sys::fs::file_get_contents;
use proxmox_router::cli::*;
use proxmox_schema::api;
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
-proxmox = { version = "0.15.3" }
-proxmox-async = "0.2"
+#proxmox = { version = "0.15.3" }
+proxmox-async = "0.3"
proxmox-lang = "1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-uuid = "1"
-proxmox-sys = "0.1.2"
+proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
use serde::{Deserialize, Serialize};
use serde_json::json;
-use proxmox::tools::fs::lock_file;
+use proxmox_sys::fs::lock_file;
use pbs_client::{DEFAULT_VSOCK_PORT, BackupRepository, VsockClient};
use pbs_datastore::backup_info::BackupDir;
fn make_name(repo: &BackupRepository, snap: &BackupDir) -> String {
let full = format!("qemu_{}/{}", repo, snap);
- proxmox::tools::systemd::escape_unit(&full, false)
+ proxmox_sys::systemd::escape_unit(&full, false)
}
/// remove non-responsive VMs from given map, returns 'true' if map was modified
let resp = client
.get("api2/json/status", Some(json!({"keep-timeout": true})))
.await;
- let name = proxmox::tools::systemd::unescape_unit(n)
+ let name = proxmox_sys::systemd::unescape_unit(n)
.unwrap_or_else(|_| "<invalid name>".to_owned());
let mut extra = json!({"pid": s.pid, "cid": s.cid});
fn stop(&self, id: String) -> Async<Result<(), Error>> {
async move {
- let name = proxmox::tools::systemd::escape_unit(&id, false);
+ let name = proxmox_sys::systemd::escape_unit(&id, false);
let mut map = VMStateMap::load()?;
let map_mod = cleanup_map(&mut map.map).await;
match map.map.get(&name) {
match VMStateMap::load_read_only() {
Ok(state) => state
.iter()
- .filter_map(|(name, _)| proxmox::tools::systemd::unescape_unit(&name).ok())
+ .filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(&name).ok())
.collect(),
Err(_) => Vec::new(),
}
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
use proxmox_router::cli::{
complete_file_name, default_table_format_options,
format_and_print_result_full, get_output_format,
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
-use proxmox::tools::fs::{create_path, file_read_string, make_tmp_file, CreateOptions};
-use proxmox::tools::fd::fd_change_cloexec;
+use proxmox_sys::fs::{create_path, file_read_string, make_tmp_file, CreateOptions};
+use proxmox_sys::fd::fd_change_cloexec;
use proxmox_sys::logrotate::LogRotate;
use pbs_client::{VsockClient, DEFAULT_VSOCK_PORT};
tower-service = "0.3.0"
url = "2.1"
-proxmox = "0.15.3"
-proxmox-async = "0.2"
+#proxmox = "0.15.3"
+proxmox-async = "0.3"
proxmox-io = "1"
proxmox-lang = "1"
-proxmox-http = { version = "0.5.0", features = [ "client" ] }
+proxmox-http = { version = "0.6", features = [ "client" ] }
proxmox-router = "1.1"
proxmox-schema = { version = "1", features = [ "api-macro", "upid-api-impl" ] }
proxmox-time = "1"
-proxmox-sys = "0.1.2"
+proxmox-sys = "0.2"
use handlebars::Handlebars;
use serde::Serialize;
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
use proxmox_router::{ApiMethod, Router, RpcEnvironmentType, UserInformation};
use crate::{ServerAdapter, AuthError, FileLogger, FileLogOptions, CommandSocket, RestEnvironment};
use futures::future::{self, Either};
use nix::unistd::{fork, ForkResult};
-use proxmox::tools::fd::{fd_change_cloexec, Fd};
+use proxmox_sys::fd::{fd_change_cloexec, Fd};
use proxmox_io::{ReadExt, WriteExt};
// Unfortunately FnBox is nightly-only and Box<FnOnce> is unusable, so just use Box<Fn>...
let ident = ident.as_bytes();
let fd = unsafe { sd_journal_stream_fd(ident.as_ptr(), libc::LOG_INFO, 1) };
if fd >= 0 && fd != 1 {
- let fd = proxmox::tools::fd::Fd(fd); // add drop handler
+ let fd = proxmox_sys::fd::Fd(fd); // add drop handler
nix::unistd::dup2(fd.as_raw_fd(), 1)?;
} else {
log::error!("failed to update STDOUT journal redirection ({})", fd);
}
let fd = unsafe { sd_journal_stream_fd(ident.as_ptr(), libc::LOG_ERR, 1) };
if fd >= 0 && fd != 2 {
- let fd = proxmox::tools::fd::Fd(fd); // add drop handler
+ let fd = proxmox_sys::fd::Fd(fd); // add drop handler
nix::unistd::dup2(fd.as_raw_fd(), 2)?;
} else {
log::error!("failed to update STDERR journal redirection ({})", fd);
use anyhow::Error;
use nix::fcntl::OFlag;
-use proxmox::tools::fs::{CreateOptions, atomic_open_or_create_file};
+use proxmox_sys::fs::{CreateOptions, atomic_open_or_create_file};
/// Options to control the behavior of a [FileLogger] instance
#[derive(Default)]
use http::request::Parts;
use http::HeaderMap;
-use proxmox::tools::fd::Fd;
-use proxmox::sys::linux::procfs::PidStat;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fd::Fd;
+use proxmox_sys::linux::procfs::PidStat;
+use proxmox_sys::fs::CreateOptions;
use proxmox_router::UserInformation;
mod compression;
/// Helper to write the PID into a file
pub fn write_pid(pid_fn: &str) -> Result<(), Error> {
let pid_str = format!("{}\n", *PID);
- proxmox::tools::fs::replace_file(pid_fn, pid_str.as_bytes(), CreateOptions::new(), false)
+ proxmox_sys::fs::replace_file(pid_fn, pid_str.as_bytes(), CreateOptions::new(), false)
}
/// Helper to read the PID from a file
pub fn read_pid(pid_fn: &str) -> Result<i32, Error> {
- let pid = proxmox::tools::fs::file_get_contents(pid_fn)?;
+ let pid = proxmox_sys::fs::file_get_contents(pid_fn)?;
let pid = std::str::from_utf8(&pid)?.trim();
pid.parse().map_err(|err| format_err!("could not parse pid - {}", err))
}
resp.map(|body| {
Body::wrap_stream(DeflateEncoder::with_quality(
TryStreamExt::map_err(body, |err| {
- proxmox::io_format_err!("error during compression: {}", err)
+ proxmox_sys::io_format_err!("error during compression: {}", err)
}),
Level::Default,
))
use nix::fcntl::OFlag;
use once_cell::sync::OnceCell;
-use proxmox::sys::linux::procfs;
-use proxmox::tools::fs::{create_path, replace_file, atomic_open_or_create_file, CreateOptions};
+use proxmox_sys::linux::procfs;
+use proxmox_sys::fs::{create_path, replace_file, atomic_open_or_create_file, CreateOptions};
use proxmox_lang::try_block;
use proxmox_schema::upid::UPID;
-use proxmox_sys::worker_task_context::{WorkerTaskContext};
+use proxmox_sys::WorkerTaskContext;
use proxmox_sys::logrotate::{LogRotate, LogRotateFiles};
use crate::{CommandSocket, FileLogger, FileLogOptions};
let timeout = std::time::Duration::new(10, 0);
- let file = proxmox::tools::fs::open_file_locked(
+ let file = proxmox_sys::fs::open_file_locked(
&self.task_lock_fn,
timeout,
exclusive,
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
-proxmox = { version = "0.15.3", features = [ "sortable-macro" ] }
-proxmox-async = "0.2"
+proxmox-async = "0.3"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-time = "1"
+proxmox-sys = { version = "0.2", features = [ "sortable-macro" ] }
pbs-api-types = { path = "../pbs-api-types" }
pbs-tools = { path = "../pbs-tools" }
use tokio::sync::Semaphore;
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
-use proxmox::{identity, sortable};
use proxmox_router::{
list_subdirs_api_method,
ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::*;
use proxmox_async::zip::zip_directory;
+use proxmox_sys::fs::read_subdir;
+use proxmox_sys::{identity, sortable};
use pbs_api_types::file_restore::RestoreDaemonStatus;
use pbs_client::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
use pbs_datastore::catalog::{ArchiveEntry, DirEntryAttribute};
-use pbs_tools::fs::read_subdir;
use pbs_tools::json::required_string_param;
use pxar::encoder::aio::TokioWriter;
use lazy_static::lazy_static;
use log::{info, warn};
-use proxmox::tools::fs;
+use proxmox_sys::fs;
+use proxmox_sys::command::run_command;
use proxmox_schema::const_regex;
use pbs_api_types::BLOCKDEVICE_NAME_REGEX;
-use pbs_tools::run_command;
const_regex! {
VIRTIO_PART_REGEX = r"^vd[a-z]+(\d+)$";
// create mapping for virtio drives and .fidx files (via serial description)
// note: disks::DiskManager relies on udev, which we don't have
- for entry in pbs_tools::fs::scan_subdir(
+ for entry in proxmox_sys::fs::scan_subdir(
libc::AT_FDCWD,
"/sys/block",
&BLOCKDEVICE_NAME_REGEX,
}
let mut parts = Vec::new();
- for entry in pbs_tools::fs::scan_subdir(
+ for entry in proxmox_sys::fs::scan_subdir(
libc::AT_FDCWD,
sys_path,
&VIRTIO_PART_REGEX,
serde_json = "1.0"
serde_cbor = "0.11.1"
-proxmox = { version = "0.15.3" }
+#proxmox = { version = "0.15.3" }
proxmox-time = "1"
proxmox-schema = { version = "1", features = [ "api-macro" ] }
+proxmox-sys = "0.2"
\ No newline at end of file
use proxmox_schema::{api, parse_property_string};
use proxmox_schema::{ApiStringFormat, ApiType, IntegerSchema, Schema, StringSchema};
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use proxmox_rrd::rrd::{CF, DST, RRA, RRD};
use crossbeam_channel::{bounded, TryRecvError};
use anyhow::{format_err, bail, Error};
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
use crate::rrd::{DST, CF, RRD, RRA};
use nix::fcntl::OFlag;
use crossbeam_channel::Receiver;
-use proxmox::tools::fs::atomic_open_or_create_file;
+use proxmox_sys::fs::atomic_open_or_create_file;
const RRD_JOURNAL_NAME: &str = "rrd.journal";
use anyhow::{bail, Error};
-use proxmox::tools::fs::create_path;
+use proxmox_sys::fs::create_path;
use crate::rrd::{CF, DST, RRD};
use anyhow::{bail, format_err, Error};
use serde::{Serialize, Deserialize};
-use proxmox::tools::fs::{make_tmp_file, CreateOptions};
+use proxmox_sys::fs::{make_tmp_file, CreateOptions};
use proxmox_schema::api;
use crate::rrd_v1;
use anyhow::{bail, Error};
use proxmox_rrd::rrd::RRD;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
fn compare_file(fn1: &str, fn2: &str) -> Result<(), Error> {
tokio = { version = "1.6", features = [ "rt", "rt-multi-thread" ] }
pathpatterns = "0.1.2"
-proxmox = "0.15.3"
-proxmox-async = "0.2"
+#proxmox = "0.15.3"
+proxmox-async = "0.3"
proxmox-schema = { version = "1", features = [ "api-macro" ] }
proxmox-router = "1.1"
+proxmox-sys = "0.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-client = { path = "../pbs-client" }
let mut match_list = Vec::new();
if let Some(filename) = &files_from {
- for line in pbs_tools::fs::file_get_non_comment_lines(filename)? {
+ for line in proxmox_sys::fs::file_get_non_comment_lines(filename)? {
let line = line
.map_err(|err| format_err!("error reading {}: {}", filename, err))?;
match_list.push(
use nix::sys::stat::Mode;
use serde::{Deserialize, Serialize};
-use proxmox::tools::fs::{replace_file, CreateOptions};
+use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_acme_rs::account::AccountCreator;
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
use proxmox_acme_rs::order::{Order, OrderData};
//! Manage Access Control Lists
use anyhow::{bail, Error};
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::api;
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let (mut tree, expected_digest) = pbs_config::acl::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
list.push(serde_json::from_value(entry)?);
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
use std::collections::HashMap;
use std::collections::HashSet;
-use proxmox::{identity, sortable};
+use proxmox_sys::{identity, sortable};
use proxmox_router::{
http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission,
};
use std::convert::TryFrom;
use anyhow::{bail, format_err, Error};
-
use serde_json::{json, Value};
-use proxmox::{identity, sortable};
+use proxmox_sys::{identity, sortable};
use proxmox_router::{
http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission,
};
use serde::{Serialize, Deserialize};
use serde_json::{json, Value};
use std::collections::HashMap;
+use hex::FromHex;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, SubdirMap, Permission};
use proxmox_schema::api;
let list:Vec<User> = config.convert_to_typed_array("user")?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens {
pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<User, Error> {
let (config, digest) = pbs_config::user::config()?;
let user = config.lookup("user", userid.as_str())?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(user)
}
let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let tokenid = Authid::from((userid, Some(token_name)));
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
config.lookup("token", &tokenid.to_string())
}
let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let list:Vec<ApiToken> = config.convert_to_typed_array("token")?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
let filter_by_owner = |token: ApiToken| {
if token.tokenid.is_token() && token.tokenid.user() == &userid {
use serde_json::{json, Value};
use tokio_stream::wrappers::ReceiverStream;
-use proxmox::{identity, sortable};
-use proxmox::tools::fs::{
+use proxmox_sys::{identity, sortable};
+use proxmox_sys::fs::{
file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
};
use proxmox_router::{
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::sortable;
+use proxmox_sys::sortable;
use proxmox_router::{
list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
Permission,
list.push(SyncJobStatus { config: job, status });
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
}
// also return the configuration digest
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
use anyhow::{format_err, Error};
use serde_json::Value;
-use proxmox::sortable;
+use proxmox_sys::sortable;
use proxmox_router::{
list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
Permission,
list.push(VerificationJobStatus { config: job, status });
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
use ::serde::{Serialize};
use serde_json::{json, Value};
-use proxmox::tools::digest_to_hex;
-use proxmox::tools::fs::{replace_file, CreateOptions};
+use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
use pbs_datastore::{DataStore, DataBlob};
fn log_upload_stat(&self, archive_name: &str, csum: &[u8; 32], uuid: &[u8; 16], size: u64, chunk_count: u64, upload_stat: &UploadStatistic) {
self.log(format!("Upload statistics for '{}'", archive_name));
- self.log(format!("UUID: {}", digest_to_hex(uuid)));
- self.log(format!("Checksum: {}", digest_to_hex(csum)));
+ self.log(format!("UUID: {}", hex::encode(uuid)));
+ self.log(format!("Checksum: {}", hex::encode(csum)));
self.log(format!("Size: {}", size));
self.log(format!("Chunk count: {}", chunk_count));
use hyper::http::request::Parts;
use hyper::{Body, Response, Request, StatusCode};
use serde_json::{json, Value};
+use hex::FromHex;
-use proxmox::{sortable, identity};
+use proxmox_sys::{sortable, identity};
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{
ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, SubdirMap, Permission,
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_ARCHIVE_NAME_SCHEMA,
};
-use pbs_tools::fs::lock_dir_noblock_shared;
+use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
use pbs_config::CachedUserInfo;
use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
};
let (old_csum, _) = index.compute_csum();
- let old_csum = proxmox::tools::digest_to_hex(&old_csum);
+ let old_csum = hex::encode(&old_csum);
if old_csum != csum {
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
csum, old_csum);
for (i, item) in digest_list.iter().enumerate() {
let digest_str = item.as_str().unwrap();
- let digest = proxmox::tools::hex_to_digest(digest_str)?;
+ let digest = <[u8; 32]>::from_hex(digest_str)?;
let offset = offset_list[i].as_u64().unwrap();
let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
for (i, item) in digest_list.iter().enumerate() {
let digest_str = item.as_str().unwrap();
- let digest = proxmox::tools::hex_to_digest(digest_str)?;
+ let digest = <[u8; 32]>::from_hex(digest_str)?;
let offset = offset_list[i].as_u64().unwrap();
let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
let chunk_count = required_integer_param(¶m, "chunk-count")? as u64;
let size = required_integer_param(¶m, "size")? as u64;
let csum_str = required_string_param(¶m, "csum")?;
- let csum = proxmox::tools::hex_to_digest(csum_str)?;
+ let csum = <[u8; 32]>::from_hex(csum_str)?;
let env: &BackupEnvironment = rpcenv.as_ref();
let chunk_count = required_integer_param(¶m, "chunk-count")? as u64;
let size = required_integer_param(¶m, "size")? as u64;
let csum_str = required_string_param(¶m, "csum")?;
- let csum = proxmox::tools::hex_to_digest(csum_str)?;
+ let csum = <[u8; 32]>::from_hex(csum_str)?;
let env: &BackupEnvironment = rpcenv.as_ref();
use hyper::Body;
use hyper::http::request::Parts;
use serde_json::{json, Value};
+use hex::FromHex;
-use proxmox::{sortable, identity};
+use proxmox_sys::{sortable, identity};
use proxmox_router::{ApiResponseFuture, ApiHandler, ApiMethod, RpcEnvironment};
use proxmox_schema::*;
let encoded_size = required_integer_param(¶m, "encoded-size")? as u32;
let digest_str = required_string_param(¶m, "digest")?;
- let digest = proxmox::tools::hex_to_digest(digest_str)?;
+ let digest = <[u8; 32]>::from_hex(digest_str)?;
let env: &BackupEnvironment = rpcenv.as_ref();
UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
env.register_fixed_chunk(wid, digest, size, compressed_size, is_duplicate)?;
- let digest_str = proxmox::tools::digest_to_hex(&digest);
+ let digest_str = hex::encode(&digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
let result = Ok(json!(digest_str));
let encoded_size = required_integer_param(¶m, "encoded-size")? as u32;
let digest_str = required_string_param(¶m, "digest")?;
- let digest = proxmox::tools::hex_to_digest(digest_str)?;
+ let digest = <[u8; 32]>::from_hex(digest_str)?;
let env: &BackupEnvironment = rpcenv.as_ref();
.await?;
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
- let digest_str = proxmox::tools::digest_to_hex(&digest);
+ let digest_str = hex::encode(&digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
let result = Ok(json!(digest_str));
use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
-use proxmox::{identity, sortable};
+use proxmox_sys::{identity, sortable};
pub mod tfa;
pub mod openid;
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::api;
let list = config.convert_to_typed_array("openid")?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let (mut domains, expected_digest) = domains::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let config = domains.lookup("openid", &realm)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(config)
}
let (mut domains, expected_digest) = domains::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
//! If we add more, it should be moved into a sub module.
use anyhow::Error;
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission, SubdirMap};
use proxmox_schema::api;
Some(c) => c,
None => return Ok(None),
};
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(Some(config))
}
if let Some(wa) = &mut tfa.webauthn {
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(
&digest,
&crate::config::tfa::webauthn_config_digest(&wa)?,
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
+use hex::FromHex;
use proxmox_router::{
http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
/// List ACME challenge plugins.
pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginConfig>, Error> {
let (plugins, digest) = plugin::config()?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(plugins
.iter()
.map(|(id, (ty, data))| modify_cfg_for_api(&id, &ty, data))
/// List ACME challenge plugins.
pub fn get_plugin(id: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<PluginConfig, Error> {
let (plugins, digest) = plugin::config()?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
match plugins.get(&id) {
Some((ty, data)) => Ok(modify_cfg_for_api(&id, &ty, &data)),
let (mut plugins, expected_digest) = plugin::config()?;
if let Some(digest) = digest {
- let digest = proxmox::tools::hex_to_digest(&digest)?;
+ let digest = <[u8; 32]>::from_hex(&digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use serde_json::Value;
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::{api, parse_property_string};
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(data)
}
})
.collect();
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let (mut config, expected_digest) = pbs_config::drive::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
use proxmox_schema::{api, ApiType, parse_property_string};
use proxmox_section_config::SectionConfigData;
-use proxmox_sys::worker_task_context::WorkerTaskContext;
+use proxmox_sys::WorkerTaskContext;
use pbs_datastore::chunk_store::ChunkStore;
use pbs_config::BackupLockGuard;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let (config, digest) = pbs_config::datastore::config()?;
let store_config = config.lookup("datastore", &name)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(store_config)
}
let (mut config, expected_digest) = pbs_config::datastore::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::datastore::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use serde_json::Value;
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::api;
let data: LtoTapeDrive = config.lookup("lto", &name)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(data)
}
})
.collect();
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(drive_list)
}
let (mut config, expected_digest) = pbs_config::drive::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
})
.collect();
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
use anyhow::{bail, format_err, Error};
-use proxmox::sortable;
+use proxmox_sys::sortable;
use proxmox_router::SubdirMap;
use proxmox_router::list_subdirs_api_method;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{http_err, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::api;
})
.collect();
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let (config, digest) = pbs_config::remote::config()?;
let mut data: Remote = config.lookup("remote", &name)?;
data.password = "".to_string(); // do not return password in api
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(data)
}
let (mut config, expected_digest) = pbs_config::remote::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::remote::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::api;
let list = config.convert_to_typed_array("sync")?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
let list = list
.into_iter()
bail!("permission check failed");
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(sync_job)
}
let (mut config, expected_digest) = sync::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = sync::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::api;
})
.collect();
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let job = config.lookup("backup", &id)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(job)
}
let mut data: TapeBackupJobConfig = config.lookup("backup", &id)?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::tape_job::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::Value;
+use hex::FromHex;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::api;
list.push(item.into());
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let (mut config_map, expected_digest) = load_key_configs()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut key_map, _) = load_keys()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::api;
let list: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
) -> Result<TrafficControlRule, Error> {
let (config, digest) = pbs_config::traffic_control::config()?;
let data: TrafficControlRule = config.lookup("rule", &name)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(data)
}
let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission};
use proxmox_schema::api;
privs & required_privs != 00
}).collect();
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(verification_job)
}
let (mut config, expected_digest) = verify::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use serde_json::{json, Value};
use std::collections::HashMap;
-use proxmox::tools::fs::{replace_file, CreateOptions};
+use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_router::{
list_subdirs_api_method, RpcEnvironment, RpcEnvironmentType, Permission, Router, SubdirMap
};
command.arg("changelog");
command.arg("-qq"); // don't display download progress
command.arg(name);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
Ok(json!(output))
}
}
/// Get APT repository information.
pub fn get_repositories() -> Result<Value, Error> {
let (files, errors, digest) = proxmox_apt::repositories::repositories()?;
- let digest = proxmox::tools::digest_to_hex(&digest);
+ let digest = hex::encode(&digest);
let suite = proxmox_apt::repositories::get_current_release_codename()?;
let suite = proxmox_apt::repositories::get_current_release_codename()?;
if let Some(expected_digest) = digest {
- let current_digest = proxmox::tools::digest_to_hex(¤t_digest);
+ let current_digest = hex::encode(¤t_digest);
crate::tools::assert_if_modified(&expected_digest, ¤t_digest)?;
}
let (mut files, errors, current_digest) = proxmox_apt::repositories::repositories()?;
if let Some(expected_digest) = digest {
- let current_digest = proxmox::tools::digest_to_hex(¤t_digest);
+ let current_digest = hex::encode(¤t_digest);
crate::tools::assert_if_modified(&expected_digest, ¤t_digest)?;
}
fn get_certificate_pem() -> Result<String, Error> {
let cert_path = configdir!("/proxy.pem");
- let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
+ let cert_pem = proxmox_sys::fs::file_get_contents(&cert_path)?;
String::from_utf8(cert_pem)
.map_err(|_| format_err!("certificate in {:?} is not a valid PEM file", cert_path))
}
use anyhow::Error;
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
/// Get the node configuration
pub fn get_node_config(mut rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig, Error> {
let (config, digest) = crate::config::node::config()?;
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(config)
}
if let Some(digest) = digest {
// FIXME: GUI doesn't handle our non-inlined digest part here properly...
if !digest.is_empty() {
- let digest = proxmox::tools::hex_to_digest(&digest)?;
+ let digest = <[u8; 32]>::from_hex(&digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
}
let mut list = Vec::new();
let basedir = "/etc/systemd/system";
- for item in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
+ for item in proxmox_sys::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
let item = item?;
let name = item.file_name().to_string_lossy().to_string();
}
// disable systemd mount-unit
- let mut mount_unit_name = proxmox::tools::systemd::escape_unit(&path, true);
+ let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&path, true);
mount_unit_name.push_str(".mount");
crate::tools::systemd::disable_unit(&mount_unit_name)?;
// try to unmount, if that fails tell the user to reboot or unmount manually
let mut command = std::process::Command::new("umount");
command.arg(&path);
- match pbs_tools::run_command(command, None) {
+ match proxmox_sys::command::run_command(command, None) {
Err(_) => bail!(
"Could not umount '{}' since it is busy. It will stay mounted \
until the next reboot or until unmounted manually!",
what: &str,
) -> Result<String, Error> {
- let mut mount_unit_name = proxmox::tools::systemd::escape_unit(&mount_point, true);
+ let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&mount_point, true);
mount_unit_name.push_str(".mount");
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use proxmox::{sortable, identity};
+use proxmox_sys::{sortable, identity};
use proxmox_router::{
list_subdirs_api_method, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
};
task_log!(worker, "# {:?}", command);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
task_log!(worker, "{}", output);
if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
- let import_unit = format!("zfs-import@{}.service", proxmox::tools::systemd::escape_unit(&name, false));
+ let import_unit = format!("zfs-import@{}.service", proxmox_sys::systemd::escape_unit(&name, false));
crate::tools::systemd::enable_unit(&import_unit)?;
}
let mut command = std::process::Command::new("zfs");
command.args(&["set", &format!("compression={}", compression), &name]);
task_log!(worker, "# {:?}", command);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
task_log!(worker, "{}", output);
}
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::api;
-use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
-use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
+use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
+use pbs_api_types::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
use pbs_api_types::{
PROXMOX_CONFIG_DIGEST_SCHEMA, FIRST_DNS_SERVER_SCHEMA, SECOND_DNS_SERVER_SCHEMA,
let raw = file_get_contents(RESOLV_CONF_FN)?;
- result["digest"] = Value::from(proxmox::tools::digest_to_hex(&sha::sha256(&raw)));
+ result["digest"] = Value::from(hex::encode(&sha::sha256(&raw)));
let data = String::from_utf8(raw)?;
use serde_json::{json, Value};
use tokio::io::{AsyncBufReadExt, BufReader};
-use proxmox::{identity, sortable};
-use proxmox::tools::fd::fd_change_cloexec;
+use proxmox_sys::{identity, sortable};
+use proxmox_sys::fd::fd_change_cloexec;
use proxmox_router::{
ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment, Router, SubdirMap,
#[api]
/// List Nodes (only for compatiblity)
fn list_nodes() -> Result<Value, Error> {
- Ok(json!([ { "node": proxmox::tools::nodename().to_string() } ]))
+ Ok(json!([ { "node": proxmox_sys::nodename().to_string() } ]))
}
pub const SUBDIRS: SubdirMap = &[
use anyhow::{Error, bail};
use serde_json::{Value, to_value};
use ::serde::{Deserialize, Serialize};
+use hex::FromHex;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::{api, parse_property_string};
) -> Result<Value, Error> {
let (config, digest) = network::config()?;
- let digest = proxmox::tools::digest_to_hex(&digest);
+ let digest = hex::encode(&digest);
let mut list = Vec::new();
let interface = config.lookup(&iface)?;
let mut data: Value = to_value(interface)?;
- data["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ data["digest"] = hex::encode(&digest).into();
Ok(data)
}
let (mut config, expected_digest) = network::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let (mut config, expected_digest) = network::config()?;
if let Some(ref digest) = digest {
- let digest = proxmox::tools::hex_to_digest(digest)?;
+ let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use proxmox::{sortable, identity};
+use proxmox_sys::{sortable, identity};
use proxmox_router::{list_subdirs_api_method, Router, Permission, RpcEnvironment, SubdirMap};
use proxmox_schema::api;
use anyhow::{Error, format_err, bail};
use serde_json::Value;
-use proxmox::sys::linux::procfs;
+use proxmox_sys::linux::procfs;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::api;
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use proxmox::{identity, sortable};
+use proxmox_sys::{identity, sortable};
use proxmox_router::{list_subdirs_api_method, Router, RpcEnvironment, Permission, SubdirMap};
use proxmox_schema::api;
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
-use proxmox::tools::fs::{file_read_firstline, replace_file, CreateOptions};
+use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions};
use proxmox_router::{Router, Permission};
use proxmox_schema::api;
use hyper::http::request::Parts;
use hyper::{Body, Response, Request, StatusCode};
use serde_json::Value;
+use hex::FromHex;
-use proxmox::{identity, sortable};
+use proxmox_sys::{identity, sortable};
use proxmox_router::{
http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
Router, RpcEnvironment, SubdirMap,
CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP,
BACKUP_ARCHIVE_NAME_SCHEMA,
};
-use pbs_tools::fs::lock_dir_noblock_shared;
+use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_tools::json::{required_integer_param, required_string_param};
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use pbs_datastore::backup_info::BackupDir;
let env: &ReaderEnvironment = rpcenv.as_ref();
let digest_str = required_string_param(¶m, "digest")?;
- let digest = proxmox::tools::hex_to_digest(digest_str)?;
+ let digest = <[u8; 32]>::from_hex(digest_str)?;
if !env.check_chunk_access(digest) {
env.log(format!("attempted to download chunk {} which is not in registered chunk list", digest_str));
let env2 = env.clone();
let digest_str = required_string_param(¶m, "digest")?;
- let digest = proxmox::tools::hex_to_digest(digest_str)?;
+ let digest = <[u8; 32]>::from_hex(digest_str)?;
let (path, _) = env.datastore.chunk_path(&digest);
use proxmox_lang::try_block;
use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::api;
-use proxmox_sys::{task_log, task_warn, worker_task_context::WorkerTaskContext};
+use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig,
list.push(TapeBackupJobStatus { config: job, status, next_media_label });
}
- rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+ rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::{sortable, identity};
+use proxmox_sys::{sortable, identity};
use proxmox_router::{
list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
};
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::tools::fs::{replace_file, CreateOptions};
+use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_io::ReadExt;
use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::{api, parse_property_string};
use proxmox_section_config::SectionConfigData;
use proxmox_uuid::Uuid;
-use proxmox_sys::{task_log, task_warn, worker_task_context::WorkerTaskContext};
+use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
Authid, Userid, CryptMode,
worker.check_abort()?;
if verbose {
- task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
+ task_log!(worker, "Found chunk: {}", hex::encode(&digest));
}
chunks.push(digest);
let chunk_exists = datastore2.cond_touch_chunk(&digest, false)?;
if !chunk_exists {
if verbose {
- task_log!(worker2, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
+ task_log!(worker2, "Insert chunk: {}", hex::encode(&digest));
}
bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
- // println!("verify and write {}", proxmox::tools::digest_to_hex(&digest));
+ // println!("verify and write {}", hex::encode(&digest));
chunk.verify_crc()?;
if chunk.crypt_mode()? == CryptMode::None {
chunk.decode(None, Some(&digest))?; // verify digest
datastore2.insert_chunk(&chunk, &digest)?;
} else if verbose {
- task_log!(worker2, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
+ task_log!(worker2, "Found existing chunk: {}", hex::encode(&digest));
}
Ok(())
},
impl ProxmoxAuthenticator for PBS {
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
- let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
+ let data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
match data[username.as_str()].as_str() {
None => bail!("no password set"),
Some(enc_password) => proxmox_sys::crypt::verify_crypt_pw(password, enc_password)?,
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
let enc_password = proxmox_sys::crypt::encrypt_pw(password)?;
- let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
+ let mut data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
data[username.as_str()] = enc_password.into();
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
- let options = proxmox::tools::fs::CreateOptions::new()
+ let options = proxmox_sys::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
let data = serde_json::to_vec_pretty(&data)?;
- proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options, true)?;
+ proxmox_sys::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options, true)?;
Ok(())
}
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error> {
- let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
+ let mut data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
if let Some(map) = data.as_object_mut() {
map.remove(username.as_str());
}
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
- let options = proxmox::tools::fs::CreateOptions::new()
+ let options = proxmox_sys::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
let data = serde_json::to_vec_pretty(&data)?;
- proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options, true)?;
+ proxmox_sys::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options, true)?;
Ok(())
}
use openssl::rsa::Rsa;
use openssl::sha;
-use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
+use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_lang::try_block;
use pbs_buildcfg::configdir;
use anyhow::{bail, format_err, Error};
-use proxmox_sys::{task_log, worker_task_context::WorkerTaskContext};
+use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{Authid, CryptMode, VerifyState, UPID, SnapshotVerifyState};
use pbs_datastore::{DataStore, DataBlob, StoreProgress};
use pbs_datastore::backup_info::{BackupGroup, BackupDir, BackupInfo};
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
-use pbs_tools::fs::lock_dir_noblock_shared;
+use proxmox_sys::fs::lock_dir_noblock_shared;
use crate::tools::ParallelHandler;
if verify_worker.verified_chunks.lock().unwrap().contains(digest) {
true
} else if verify_worker.corrupt_chunks.lock().unwrap().contains(digest) {
- let digest_str = proxmox::tools::digest_to_hex(digest);
+ let digest_str = hex::encode(digest);
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
errors.fetch_add(1, Ordering::SeqCst);
true
use proxmox_lang::try_block;
use proxmox_router::{RpcEnvironmentType, UserInformation};
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use proxmox_rest_server::{daemon, AuthError, ApiConfig, RestServer, RestEnvironment, ServerAdapter};
use anyhow::Error;
use serde_json::{json, Value};
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use proxmox_router::{cli::*, RpcEnvironment};
use proxmox_schema::api;
use serde_json::{json, Value};
use http::{Method, HeaderMap};
-use proxmox::sys::linux::socket::set_tcp_keepalive;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::linux::socket::set_tcp_keepalive;
+use proxmox_sys::fs::CreateOptions;
use proxmox_lang::try_block;
use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation};
use proxmox_http::client::{RateLimitedStream, ShareableRateLimit};
None => (None, None),
};
- let nodename = proxmox::tools::nodename();
+ let nodename = proxmox_sys::nodename();
let user = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
let csrf_token = csrf_token.unwrap_or_else(|| String::from(""));
let max_files = 20; // times twenty files gives > 100000 task entries
let user = pbs_config::backup_user()?;
- let options = proxmox::tools::fs::CreateOptions::new()
+ let options = proxmox_sys::fs::CreateOptions::new()
.owner(user.uid)
.group(user.gid);
}
fn generate_host_stats_sync() {
- use proxmox::sys::linux::procfs::{
+ use proxmox_sys::linux::procfs::{
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
match read_proc_stat() {
use anyhow::Error;
use serde_json::json;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use proxmox_router::{cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::api2;
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use walkdir::WalkDir;
+use hex::FromHex;
use proxmox_router::cli::{
format_and_print_result, get_output_format, CliCommand, CliCommandMap, CommandLineInterface,
let digest_raw: Option<[u8; 32]> = digest
.map(|ref d| {
- proxmox::tools::hex_to_digest(d)
+ <[u8; 32]>::from_hex(d)
.map_err(|e| format_err!("could not parse chunk - {}", e))
})
.map_or(Ok(None), |r| r.map(Some))?;
for pos in 0..index.index_count() {
let digest = index.index_digest(pos).unwrap();
- chunk_digests.insert(proxmox::tools::digest_to_hex(digest));
+ chunk_digests.insert(hex::encode(digest));
}
json!({
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::tools::digest_to_hex;
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
use proxmox_schema::api;
let mut data = Vec::with_capacity(4 * 1024 * 1024);
for pos in 0..index.index_count() {
let chunk_digest = index.index_digest(pos).unwrap();
- let digest_str = digest_to_hex(chunk_digest);
+ let digest_str = hex::encode(chunk_digest);
let digest_prefix = &digest_str[0..4];
let chunk_path = chunks_path.join(digest_prefix).join(digest_str);
let mut chunk_file = std::fs::File::open(&chunk_path)
use anyhow::{bail, Error};
use serde_json::Value;
-use proxmox::tools::fs::file_get_contents;
+use proxmox_sys::fs::file_get_contents;
use proxmox_router::{cli::*, ApiHandler, RpcEnvironment};
use proxmox_schema::api;
use proxmox_router::{cli::*, ApiHandler, RpcEnvironment};
use proxmox_schema::api;
-use proxmox::sys::linux::tty;
+use proxmox_sys::linux::tty;
use pbs_api_types::{
Fingerprint, Kdf, DRIVE_NAME_SCHEMA, TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox::sys::error::SysError;
-use proxmox::tools::fs::{CreateOptions, file_read_string};
+use proxmox_sys::error::SysError;
+use proxmox_sys::fs::{CreateOptions, file_read_string};
use pbs_api_types::PROXMOX_SAFE_ID_REGEX;
}
fn create_acme_subdir(dir: &str) -> nix::Result<()> {
- match proxmox::tools::fs::create_dir(dir, root_only()) {
+ match proxmox_sys::fs::create_dir(dir, root_only()) {
Ok(()) => Ok(()),
Err(err) if err.already_exists() => Ok(()),
Err(err) => Err(err),
where
F: FnMut(AcmeAccountName) -> ControlFlow<Result<(), Error>>,
{
- match pbs_tools::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) {
+ match proxmox_sys::fs::scan_subdir(-1, ACME_ACCOUNT_DIR, &PROXMOX_SAFE_ID_REGEX) {
Ok(files) => {
for file in files {
let file = file?;
// We handle this property separately in the API calls.
/// DNS plugin data (base64url encoded without padding).
- #[serde(with = "proxmox::tools::serde::string_as_base64url_nopad")]
+ #[serde(with = "proxmox_serde::string_as_base64url_nopad")]
pub data: String,
}
}
pub fn config() -> Result<(PluginData, [u8; 32]), Error> {
- let content = proxmox::tools::fs::file_read_optional_string(ACME_PLUGIN_CFG_FILENAME)?
+ let content = proxmox_sys::fs::file_read_optional_string(ACME_PLUGIN_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let expire = openssl::asn1::Asn1Time::days_from_now(365*1000)?;
x509.set_not_after(&expire)?;
- let nodename = proxmox::tools::nodename();
+ let nodename = proxmox_sys::nodename();
let mut fqdn = nodename.to_owned();
let resolv_conf = crate::api2::node::dns::read_etc_resolv_conf()?;
/// Read the Node Config.
pub fn config() -> Result<(NodeConfig, [u8; 32]), Error> {
let content =
- proxmox::tools::fs::file_read_optional_string(CONF_FILE)?.unwrap_or_else(|| "".to_string());
+ proxmox_sys::fs::file_read_optional_string(CONF_FILE)?.unwrap_or_else(|| "".to_string());
let digest = openssl::sha::sha256(content.as_bytes());
let data: NodeConfig = crate::tools::config::from_str(&content, &NodeConfig::API_SCHEMA)?;
use anyhow::{bail, format_err, Error};
use nix::sys::stat::Mode;
-use proxmox::sys::error::SysError;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::error::SysError;
+use proxmox_sys::fs::CreateOptions;
use proxmox_tfa::totp::Totp;
pub use proxmox_tfa::api::{
let options = CreateOptions::new().perm(Mode::from_bits_truncate(0o0600));
let json = serde_json::to_vec(data)?;
- proxmox::tools::fs::replace_file(CONF_FILE, &json, options, true)
+ proxmox_sys::fs::replace_file(CONF_FILE, &json, options, true)
}
/// Cleanup non-existent users from the tfa config.
);
}
- proxmox::c_try!(unsafe { libc::ftruncate(self.lock.as_raw_fd(), 0) });
+ proxmox_sys::c_try!(unsafe { libc::ftruncate(self.lock.as_raw_fd(), 0) });
Ok(())
}
fn open(&self, userid: &str) -> Result<Self::Data, Error> {
crate::server::create_run_dir()?;
let options = CreateOptions::new().perm(Mode::from_bits_truncate(0o0600));
- proxmox::tools::fs::create_path(CHALLENGE_DATA_PATH, Some(options.clone()), Some(options))
+ proxmox_sys::fs::create_path(CHALLENGE_DATA_PATH, Some(options.clone()), Some(options))
.map_err(|err| {
format_err!(
"failed to crate challenge data dir {:?}: {}",
.open(&path)
.map_err(|err| format_err!("failed to create challenge file {:?}: {}", path, err))?;
- proxmox::tools::fs::lock_file(&mut file, true, None)?;
+ proxmox_sys::fs::lock_file(&mut file, true, None)?;
// the file may be empty, so read to a temporary buffer first:
let mut data = Vec::with_capacity(4096);
Err(err) => return Err(err.into()),
};
- proxmox::tools::fs::lock_file(&mut file, true, None)?;
+ proxmox_sys::fs::lock_file(&mut file, true, None)?;
let inner = serde_json::from_reader(&mut file).map_err(|err| {
format_err!("failed to read challenge data for user {}: {}", userid, err)
use anyhow::{format_err, Error};
use once_cell::sync::OnceCell;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use proxmox_rrd::RRDCache;
use proxmox_rrd::rrd::{RRD, DST, CF};
use handlebars::{Handlebars, Helper, Context, RenderError, RenderContext, Output, HelperResult, TemplateError};
-use proxmox::tools::email::sendmail;
+use proxmox_sys::email::sendmail;
use proxmox_lang::try_block;
use proxmox_schema::{parse_property_string, ApiType};
// so we include html as well
let html = format!("<html><body><pre>\n{}\n<pre>", handlebars::html_escape(text));
- let nodename = proxmox::tools::nodename();
+ let nodename = proxmox_sys::nodename();
let author = format!("Proxmox Backup Server - {}", nodename);
// user will surely request that they can change this
- let nodename = proxmox::tools::nodename();
+ let nodename = proxmox_sys::nodename();
let mut fqdn = nodename.to_owned();
if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
) -> Result<(), Error> {
// update mails always go to the root@pam configured email..
if let Some(email) = lookup_user_email(Userid::root_userid()) {
- let nodename = proxmox::tools::nodename();
+ let nodename = proxmox_sys::nodename();
let subject = format!("New software packages available ({})", nodename);
let (fqdn, port) = get_server_url();
use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
-use proxmox::tools::fs::{
+use proxmox_sys::fs::{
create_path, file_read_optional_string, replace_file, CreateOptions,
};
use anyhow::Error;
use serde_json::Value;
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
use pbs_buildcfg;
"sync chunk writer",
4,
move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
- // println!("verify and write {}", proxmox::tools::digest_to_hex(&digest));
+ // println!("verify and write {}", hex::encode(&digest));
chunk.verify_unencrypted(size as usize, &digest)?;
target2.insert_chunk(&chunk, &digest)?;
Ok(())
target.cond_touch_chunk(&info.digest, false)
})?;
if chunk_exists {
- //task_log!(worker, "chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest));
+ //task_log!(worker, "chunk {} exists {}", pos, hex::encode(digest));
return Ok::<_, Error>(());
}
- //task_log!(worker, "sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest));
+ //task_log!(worker, "sync {} chunk {}", pos, hex::encode(digest));
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
let raw_size = chunk.raw_size() as usize;
}
pub fn generate_report() -> String {
- use proxmox::tools::fs::file_read_optional_string;
+ use proxmox_sys::fs::file_read_optional_string;
let file_contents = files()
.iter()
use anyhow::{bail, Error};
use nix::sys::stat::Mode;
-use proxmox::tools::fs::{create_path, CreateOptions};
+use proxmox_sys::fs::{create_path, CreateOptions};
use proxmox_http::client::{RateLimit, RateLimiter, ShareableRateLimit};
use proxmox_shared_memory::{Init, SharedMemory, SharedMutex};
use anyhow::{bail, Error};
-use proxmox::tools::fs::{CreateOptions, replace_file, file_read_optional_string};
+use proxmox_sys::fs::{CreateOptions, replace_file, file_read_optional_string};
use pbs_api_types::{ScsiTapeChanger, LtoTapeDrive};
use anyhow::Error;
-use pbs_tools::run_command;
+use proxmox_sys::command::run_command;
use pbs_api_types::ScsiTapeChanger;
use pbs_tape::MtxStatus;
Fingerprint, MamAttribute, LtoDriveAndMediaStatus, LtoTapeDrive, Lp17VolumeStatistics,
};
use pbs_config::key_config::KeyConfig;
-use pbs_tools::run_command;
+use proxmox_sys::command::run_command;
use pbs_tape::{
TapeWrite, TapeRead, BlockReadError, MediaContentHeader,
sg_tape::{SgTape, TapeAlertFlags},
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
-use proxmox::{
- tools::{
- fs::{
- lock_file,
- atomic_open_or_create_file,
- file_read_optional_string,
- replace_file,
- CreateOptions,
- }
- },
+use proxmox_sys::fs::{
+ lock_file,
+ atomic_open_or_create_file,
+ file_read_optional_string,
+ replace_file,
+ CreateOptions,
};
+
use proxmox_io::ReadExt;
use proxmox_section_config::SectionConfigData;
use proxmox_uuid::Uuid;
-use proxmox_sys::{task_log, worker_task_context::WorkerTaskContext};
+use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{VirtualTapeDrive, LtoTapeDrive, Fingerprint};
use pbs_config::key_config::KeyConfig;
// Uses systemd escape_unit to compute a file name from `device_path`, the try
// to lock `/var/lock/<name>`.
fn open_device_lock(device_path: &str) -> Result<std::fs::File, Error> {
- let lock_name = proxmox::tools::systemd::escape_unit(device_path, true);
+ let lock_name = proxmox_sys::systemd::escape_unit(device_path, true);
let mut path = std::path::PathBuf::from(crate::tape::DRIVE_LOCK_DIR);
path.push(lock_name);
use anyhow::{bail, format_err, Error};
use serde::{Serialize, Deserialize};
-use proxmox::tools::{
+use proxmox_sys::{
fs::{replace_file, CreateOptions},
};
let options = CreateOptions::new();
let timeout = std::time::Duration::new(10, 0);
- let lock = proxmox::tools::fs::open_file_locked(&lock_path, timeout, true, options)?;
+ let lock = proxmox_sys::fs::open_file_locked(&lock_path, timeout, true, options)?;
Ok(VirtualTapeHandle {
_lock: lock,
fn load_tape_index(&self, tape_name: &str) -> Result<TapeIndex, Error> {
let path = self.tape_index_path(tape_name);
- let raw = proxmox::tools::fs::file_get_contents(&path)?;
+ let raw = proxmox_sys::fs::file_get_contents(&path)?;
if raw.is_empty() {
return Ok(TapeIndex { files: 0 });
}
current_tape: None,
})?;
- let data = proxmox::tools::fs::file_get_json(&path, Some(default))?;
+ let data = proxmox_sys::fs::file_get_json(&path, Some(default))?;
let status: VirtualDriveStatus = serde_json::from_value(data)?;
Ok(status)
}
Ok(Box::new(reader))
}
None => {
- return Err(BlockReadError::Error(proxmox::io_format_err!("drive is empty (no tape loaded).")));
+ return Err(BlockReadError::Error(proxmox_sys::io_format_err!("drive is empty (no tape loaded).")));
}
}
}
Ok(writer)
}
- None => proxmox::io_bail!("drive is empty (no tape loaded)."),
+ None => proxmox_sys::io_bail!("drive is empty (no tape loaded)."),
}
}
use std::fs::File;
use std::io::Read;
-use proxmox::sys::error::SysError;
+use proxmox_sys::error::SysError;
use proxmox_uuid::Uuid;
use pbs_tape::{
while remaining != 0 {
let got = file.read(&mut file_copy_buffer[..])?;
if got as u64 > remaining {
- proxmox::io_bail!("catalog '{}' changed while reading", uuid);
+ proxmox_sys::io_bail!("catalog '{}' changed while reading", uuid);
}
writer.write_all(&file_copy_buffer[..got])?;
remaining -= got as u64;
}
if remaining > 0 {
- proxmox::io_bail!("catalog '{}' shrunk while reading", uuid);
+ proxmox_sys::io_bail!("catalog '{}' shrunk while reading", uuid);
}
Ok(())
});
fn write_all(&mut self, data: &[u8]) -> Result<bool, std::io::Error> {
match self.writer {
Some(ref mut writer) => writer.write_all(data),
- None => proxmox::io_bail!(
+ None => proxmox_sys::io_bail!(
"detected write after archive finished - internal error"),
}
}
if self.reader.is_none() {
let mut reader = (self.next_reader_fn)()
- .map_err(|err| proxmox::io_format_err!("multi-volume next failed: {}", err))?;
+ .map_err(|err| proxmox_sys::io_format_err!("multi-volume next failed: {}", err))?;
proxmox_lang::try_block!({
let part_header: MediaContentHeader = unsafe { reader.read_le_value()? };
self.reader = Some(reader);
if part_header.uuid != self.header.uuid {
- proxmox::io_bail!("got wrong part uuid");
+ proxmox_sys::io_bail!("got wrong part uuid");
}
if part_header.content_magic!= self.header.content_magic {
- proxmox::io_bail!("got wrong part content magic");
+ proxmox_sys::io_bail!("got wrong part content magic");
}
let expect_part_number = self.header.part_number + 1;
if part_header.part_number != expect_part_number {
- proxmox::io_bail!("got wrong part number ({} != {})",
+ proxmox_sys::io_bail!("got wrong part number ({} != {})",
part_header.part_number, expect_part_number);
}
Ok(())
}).map_err(|err| {
- proxmox::io_format_err!("multi-volume read content header failed: {}", err)
+ proxmox_sys::io_format_err!("multi-volume read content header failed: {}", err)
})?;
}
fn write_all(&mut self, buf: &[u8]) -> Result<bool, std::io::Error> {
if self.finished {
- proxmox::io_bail!("multi-volume writer already finished: internal error");
+ proxmox_sys::io_bail!("multi-volume writer already finished: internal error");
}
if self.got_leom {
if !self.wrote_header {
- proxmox::io_bail!("multi-volume writer: got LEOM before writing anything - internal error");
+ proxmox_sys::io_bail!("multi-volume writer: got LEOM before writing anything - internal error");
}
let mut writer = match self.writer.take() {
Some(writer) => writer,
- None => proxmox::io_bail!("multi-volume writer: no writer -internal error"),
+ None => proxmox_sys::io_bail!("multi-volume writer: no writer -internal error"),
};
self.bytes_written = writer.bytes_written();
writer.finish(true)?;
if self.writer.is_none() {
if self.header.part_number >= 255 {
- proxmox::io_bail!("multi-volume writer: too many parts");
+ proxmox_sys::io_bail!("multi-volume writer: too many parts");
}
self.writer = Some(
(self.next_writer_fn)()
- .map_err(|err| proxmox::io_format_err!("multi-volume get next volume failed: {}", err))?
+ .map_err(|err| proxmox_sys::io_format_err!("multi-volume get next volume failed: {}", err))?
);
self.got_leom = false;
self.wrote_header = false;
fn finish(&mut self, incomplete: bool) -> Result<bool, std::io::Error> {
if incomplete {
- proxmox::io_bail!(
+ proxmox_sys::io_bail!(
"incomplete flag makes no sense for multi-volume stream: internal error");
}
match self.writer.take() {
- None if self.finished => proxmox::io_bail!(
+ None if self.finished => proxmox_sys::io_bail!(
"multi-volume writer already finished: internal error"),
None => Ok(false),
Some(ref mut writer) => {
use std::pin::Pin;
use std::task::{Context, Poll};
-use proxmox::sys::error::SysError;
+use proxmox_sys::error::SysError;
use proxmox_uuid::Uuid;
use pbs_tape::{
for filename in file_list.iter() {
let mut file = snapshot_reader.open_file(filename)
- .map_err(|err| proxmox::io_format_err!("open file '{}' failed - {}", filename, err))?;
+ .map_err(|err| proxmox_sys::io_format_err!("open file '{}' failed - {}", filename, err))?;
let metadata = file.metadata()?;
let file_size = metadata.len();
let metadata: pxar::Metadata = metadata.into();
if !metadata.is_regular_file() {
- proxmox::io_bail!("file '{}' is not a regular file", filename);
+ proxmox_sys::io_bail!("file '{}' is not a regular file", filename);
}
let mut remaining = file_size;
while remaining != 0 {
let got = file.read(&mut file_copy_buffer[..])?;
if got as u64 > remaining {
- proxmox::io_bail!("file '{}' changed while reading", filename);
+ proxmox_sys::io_bail!("file '{}' changed while reading", filename);
}
out.write_all(&file_copy_buffer[..got])?;
remaining -= got as u64;
}
if remaining > 0 {
- proxmox::io_bail!("file '{}' shrunk while reading", filename);
+ proxmox_sys::io_bail!("file '{}' shrunk while reading", filename);
}
}
encoder.finish()?;
use serde::{Serialize, Deserialize};
use serde_json::json;
-use proxmox::tools::fs::{replace_file, file_get_json, CreateOptions};
+use proxmox_sys::fs::{replace_file, file_get_json, CreateOptions};
use proxmox_uuid::Uuid;
use proxmox_time::compute_next_event;
use anyhow::{bail, format_err, Error};
use endian_trait::Endian;
-use pbs_tools::fs::read_subdir;
+use proxmox_sys::fs::read_subdir;
use pbs_datastore::backup_info::BackupDir;
-use proxmox::tools::fs::{
+use proxmox_sys::fs::{
fchown,
create_path,
CreateOptions,
};
if self.log_to_stdout {
- println!("C|{}", proxmox::tools::digest_to_hex(digest));
+ println!("C|{}", hex::encode(digest));
}
self.pending.push(b'C');
use anyhow::{format_err, bail, Error};
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use crate::tape::{MediaCatalog, MediaId};
.owner(backup_user.uid)
.group(backup_user.gid);
- proxmox::tools::fs::replace_file(
+ proxmox_sys::fs::replace_file(
cache_path,
data.as_bytes(),
options,
use anyhow::{format_err, Error};
-use proxmox::tools::fs::{
+use proxmox_sys::fs::{
create_path,
CreateOptions,
};
Some(Err(err)) => bail!("{}", err),
};
- //println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(digest), blob.raw_size());
+ //println!("CHUNK {} size {}", hex::encode(digest), blob.raw_size());
match writer.try_write_chunk(&digest, &blob) {
Ok(true) => {
};
let blob = datastore.load_chunk(&digest)?;
- //println!("LOAD CHUNK {}", proxmox::tools::digest_to_hex(&digest));
+ //println!("LOAD CHUNK {}", hex::encode(&digest));
match tx.send(Ok(Some((digest, blob)))) {
Ok(()) => {},
Err(err) => {
use anyhow::{Error, bail, format_err};
use apt_pkg_native::Cache;
-use proxmox::tools::fs::{file_read_optional_string, replace_file, CreateOptions};
+use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
use proxmox_schema::const_regex;
use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
command.arg("changelog");
command.arg("--print-uris");
command.arg(package);
- let output = pbs_tools::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
+ let output = proxmox_sys::command::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
let output = match output.splitn(2, ' ').next() {
Some(output) => {
if output.len() < 2 {
let mut command = std::process::Command::new(PVS_BIN_PATH);
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
let mut device_set: HashSet<u64> = HashSet::new();
use ::serde::{Deserialize, Serialize};
-use proxmox::sys::error::io_err_other;
-use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
-use proxmox::{io_bail, io_format_err};
+use proxmox_sys::error::io_err_other;
+use proxmox_sys::linux::procfs::{MountInfo, mountinfo::Device};
+use proxmox_sys::{io_bail, io_format_err};
use proxmox_schema::api;
use pbs_api_types::{BLOCKDEVICE_NAME_REGEX, StorageStatus};
let mut map = HashMap::new();
- for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? {
+ for item in proxmox_sys::fs::read_subdir(libc::AT_FDCWD, sys_path)? {
let item = item?;
let name = match item.file_name().to_str() {
Ok(name) => name,
let mut command = std::process::Command::new("lsblk");
command.args(&["--json", "-o", "path,parttype,fstype"]);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
let mut output: serde_json::Value = output.parse()?;
let mut found_dm = false;
let mut found_partitions = false;
- for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? {
+ for item in proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &sys_path)? {
let item = item?;
let name = match item.file_name().to_str() {
Ok(name) => name,
let mut result = HashMap::new();
- for item in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? {
+ for item in proxmox_sys::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? {
let item = item?;
let name = item.file_name().to_str().unwrap().to_string();
command.arg("--rereadpt");
command.arg(disk_path);
- pbs_tools::run_command(command, None)?;
+ proxmox_sys::command::run_command(command, None)?;
Ok(())
}
command.arg(disk_path);
command.args(&["-U", uuid]);
- pbs_tools::run_command(command, None)?;
+ proxmox_sys::command::run_command(command, None)?;
Ok(())
}
command.args(&["-n1", "-t1:8300"]);
command.arg(disk_path);
- pbs_tools::run_command(command, None)?;
+ proxmox_sys::command::run_command(command, None)?;
let mut partitions = disk.partitions()?;
command.args(&["-t", &fs_type]);
command.arg(disk_path);
- pbs_tools::run_command(command, None)?;
+ proxmox_sys::command::run_command(command, None)?;
Ok(())
}
pub fn complete_disk_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut list = Vec::new();
- let dir = match pbs_tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) {
+ let dir = match proxmox_sys::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) {
Ok(dir) => dir,
Err(_) => return list,
};
command.args(&["-o", "export"]);
command.arg(disk_path);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
for line in output.lines() {
if let Some(uuid) = line.strip_prefix("UUID=") {
};
command.arg(disk_path);
- let output = pbs_tools::run_command(command, Some(|exitcode|
+ let output = proxmox_sys::command::run_command(command, Some(|exitcode|
(exitcode & 0b0111) == 0 // only bits 0-2 are fatal errors
))?;
path.push(pool);
path.push("io");
- let text = match proxmox::tools::fs::file_read_optional_string(&path)? {
+ let text = match proxmox_sys::fs::file_read_optional_string(&path)? {
Some(text) => text,
None => { return Ok(None); }
};
if let Some(pool) = pool { command.arg(pool); }
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
parse_zpool_list(&output)
}
let mut command = std::process::Command::new("zpool");
command.args(&["status", "-p", "-P", pool]);
- let output = pbs_tools::run_command(command, None)?;
+ let output = proxmox_sys::command::run_command(command, None)?;
parse_zpool_status(&output)
}
};
use tokio_openssl::SslStream;
-use proxmox::sys::linux::socket::set_tcp_keepalive;
+use proxmox_sys::linux::socket::set_tcp_keepalive;
use proxmox_http::http::{MaybeTlsStream, ProxyConfig};
pub fn get_hardware_address() -> Result<String, Error> {
static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
- let contents = proxmox::tools::fs::file_get_contents(FILENAME)
+ let contents = proxmox_sys::fs::file_get_contents(FILENAME)
.map_err(|e| format_err!("Error getting host key - {}", e))?;
let digest = md5sum(&contents)
.map_err(|e| format_err!("Error digesting host key - {}", e))?;
- Ok(proxmox::tools::bin_to_hex(&digest).to_uppercase())
+ Ok(hex::encode(&digest).to_uppercase())
}
pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
use proxmox_schema::api;
-use proxmox::tools::fs::{replace_file, CreateOptions};
+use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_http::client::SimpleHttp;
use pbs_tools::json::json_object_to_query;
checktime: i64
) -> Result<(String, String), Error> {
// WHCMS sample code feeds the key into this, but it's just a challenge, so keep it simple
- let rand = proxmox::tools::bin_to_hex(&proxmox::sys::linux::random_data(16)?);
+ let rand = hex::encode(&proxmox_sys::linux::random_data(16)?);
let challenge = format!("{}{}", checktime, rand);
let params = json!({
if let SubscriptionStatus::ACTIVE = info.status {
let response_raw = format!("{}{}", SHARED_KEY_DATA, challenge);
- let expected = proxmox::tools::bin_to_hex(&tools::md5sum(response_raw.as_bytes())?);
+ let expected = hex::encode(&tools::md5sum(response_raw.as_bytes())?);
if expected != md5hash {
bail!("Subscription API challenge failed, expected {} != got {}", expected, md5hash);
}
/// reads in subscription information and does a basic integrity verification
pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
- let cfg = proxmox::tools::fs::file_read_optional_string(&SUBSCRIPTION_FN)?;
+ let cfg = proxmox_sys::fs::file_read_optional_string(&SUBSCRIPTION_FN)?;
let cfg = if let Some(cfg) = cfg { cfg } else { return Ok(None); };
let mut cfg = cfg.lines();
use proxmox_schema::*;
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
-use proxmox::tools::{fs::replace_file, fs::CreateOptions};
+use proxmox_sys::{fs::replace_file, fs::CreateOptions};
lazy_static! {
fn parse_systemd_config(config: &SectionConfig, filename: &str) -> Result<SectionConfigData, Error> {
- let raw = proxmox::tools::fs::file_get_contents(filename)?;
+ let raw = proxmox_sys::fs::file_get_contents(filename)?;
let input = String::from_utf8(raw)?;
let data = config.parse(filename, &input)?;
fn test_escape_unit() -> Result<(), Error> {
fn test_escape(i: &str, expected: &str, is_path: bool) {
- use proxmox::tools::systemd::{escape_unit, unescape_unit};
+ use proxmox_sys::systemd::{escape_unit, unescape_unit};
let escaped = escape_unit(i, is_path);
assert_eq!(escaped, expected);
extern crate tokio;
extern crate nix;
-use proxmox::tools::fs::CreateOptions;
+use proxmox_sys::fs::CreateOptions;
use proxmox_lang::try_block;
-use proxmox_sys::{task_log, worker_task_context::WorkerTaskContext};
+use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{Authid, UPID};