use anyhow::{format_err, Error};
-use std::io::{Write, Seek, SeekFrom};
use std::fs::File;
-use std::sync::Arc;
+use std::io::{Seek, SeekFrom, Write};
use std::os::unix::fs::OpenOptionsExt;
+use std::sync::Arc;
use futures::future::AbortHandle;
use serde_json::{json, Value};
-use pbs_tools::crypt_config::CryptConfig;
-use pbs_tools::sha::sha256;
-use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, BackupManifest};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
+use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
+use pbs_tools::crypt_config::CryptConfig;
+use pbs_tools::sha::sha256;
-use super::{HttpClient, H2Client};
+use super::{H2Client, HttpClient};
/// Backup Reader
pub struct BackupReader {
}
impl Drop for BackupReader {
-
fn drop(&mut self) {
self.abort.abort();
}
}
impl BackupReader {
-
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
- Arc::new(Self { h2, abort, crypt_config})
+ Arc::new(Self {
+ h2,
+ abort,
+ crypt_config,
+ })
}
/// Create a new instance by upgrading the connection at '/api2/json/reader'
backup_time: i64,
debug: bool,
) -> Result<Arc<BackupReader>, Error> {
-
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
"store": datastore,
"debug": debug,
});
- let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
-
- let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
+ let req = HttpClient::request_builder(
+ client.server(),
+ client.port(),
+ "GET",
+ "/api2/json/reader",
+ Some(param),
+ )
+ .unwrap();
+
+ let (h2, abort) = client
+ .start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
+ .await?;
Ok(BackupReader::new(h2, abort, crypt_config))
}
/// Execute a GET request
- pub async fn get(
- &self,
- path: &str,
- param: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.get(path, param).await
}
/// Execute a PUT request
- pub async fn put(
- &self,
- path: &str,
- param: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.put(path, param).await
}
/// Execute a POST request
- pub async fn post(
- &self,
- path: &str,
- param: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.post(path, param).await
}
/// Execute a GET request and send output to a writer
- pub async fn download<W: Write + Send>(
- &self,
- file_name: &str,
- output: W,
- ) -> Result<(), Error> {
+ pub async fn download<W: Write + Send>(&self, file_name: &str, output: W) -> Result<(), Error> {
let path = "download";
let param = json!({ "file-name": file_name });
self.h2.download(path, Some(param), output).await
/// Execute a special GET request and send output to a writer
///
/// This writes random data, and is only useful to test download speed.
- pub async fn speedtest<W: Write + Send>(
- &self,
- output: W,
- ) -> Result<(), Error> {
+ pub async fn speedtest<W: Write + Send>(&self, output: W) -> Result<(), Error> {
self.h2.download("speedtest", None, output).await
}
///
/// The manifest signature is verified if we have a crypt_config.
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
-
let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(None, None)?;
- let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
+ let manifest =
+ BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok((manifest, data))
}
manifest: &BackupManifest,
name: &str,
) -> Result<DataBlobReader<'_, File>, Error> {
-
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
manifest: &BackupManifest,
name: &str,
) -> Result<DynamicIndexReader, Error> {
-
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
manifest: &BackupManifest,
name: &str,
) -> Result<FixedIndexReader, Error> {
-
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
use anyhow::{format_err, Error};
-use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
+use pbs_api_types::{Authid, Userid, BACKUP_REPO_URL_REGEX, IP_V6_REGEX};
/// Reference remote backup locations
///
}
impl BackupRepository {
-
- pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
+ pub fn new(
+ auth_id: Option<Authid>,
+ host: Option<String>,
+ port: Option<u16>,
+ store: String,
+ ) -> Self {
let host = match host {
- Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
- Some(format!("[{}]", host))
- },
+ Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => Some(format!("[{}]", host)),
other => other,
};
- Self { auth_id, host, port, store }
+ Self {
+ auth_id,
+ host,
+ port,
+ store,
+ }
}
pub fn auth_id(&self) -> &Authid {
impl fmt::Display for BackupRepository {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match (&self.auth_id, &self.host, self.port) {
- (Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
+ (Some(auth_id), _, _) => write!(
+ f,
+ "{}@{}:{}:{}",
+ auth_id,
+ self.host(),
+ self.port(),
+ self.store
+ ),
(None, Some(host), None) => write!(f, "{}:{}", host, self.store),
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
(None, None, None) => write!(f, "{}", self.store),
/// `host` parts are optional, where `host` defaults to the local
/// host, and `user` defaults to `root@pam`.
fn from_str(url: &str) -> Result<Self, Self::Err> {
-
- let cap = (BACKUP_REPO_URL_REGEX.regex_obj)().captures(url)
+ let cap = (BACKUP_REPO_URL_REGEX.regex_obj)()
+ .captures(url)
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
Ok(Self {
- auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
+ auth_id: cap
+ .get(1)
+ .map(|m| Authid::try_from(m.as_str().to_owned()))
+ .transpose()?,
host: cap.get(2).map(|m| m.as_str().to_owned()),
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
store: cap[4].to_owned(),
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
}
-pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
- "Backup source specification ([<label>:<path>]).")
- .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
- .schema();
-
-pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
+pub const BACKUP_SOURCE_SCHEMA: Schema =
+ StringSchema::new("Backup source specification ([<label>:<path>]).")
+ .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
+ .schema();
+
+pub enum BackupSpecificationType {
+ PXAR,
+ IMAGE,
+ CONFIG,
+ LOGFILE,
+}
pub struct BackupSpecification {
- pub archive_name: String, // left part
+ pub archive_name: String, // left part
pub config_string: String, // right part
pub spec_type: BackupSpecificationType,
}
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
-
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
let archive_name = caps.get(1).unwrap().as_str().into();
let extension = caps.get(2).unwrap().as_str();
- let config_string = caps.get(3).unwrap().as_str().into();
+ let config_string = caps.get(3).unwrap().as_str().into();
let spec_type = match extension {
"pxar" => BackupSpecificationType::PXAR,
"img" => BackupSpecificationType::IMAGE,
"log" => BackupSpecificationType::LOGFILE,
_ => bail!("unknown backup source type '{}'", extension),
};
- return Ok(BackupSpecification { archive_name, config_string, spec_type });
+ return Ok(BackupSpecification {
+ archive_name,
+ config_string,
+ spec_type,
+ });
}
bail!("unable to parse backup source specification '{}'", value);
use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::HumanByte;
-use pbs_tools::crypt_config::CryptConfig;
-use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
+use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
+use pbs_tools::crypt_config::CryptConfig;
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use nix::sys::stat::Mode;
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
-use proxmox_sys::fs::{create_path, CreateOptions};
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
use proxmox_schema::api;
+use proxmox_sys::fs::{create_path, CreateOptions};
use pxar::{EntryKind, Metadata};
-use proxmox_async::runtime::block_in_place;
use pbs_datastore::catalog::{self, DirEntryAttribute};
+use proxmox_async::runtime::block_in_place;
-use crate::pxar::Flags;
use crate::pxar::fuse::{Accessor, FileEntry};
+use crate::pxar::Flags;
type CatalogReader = pbs_datastore::catalog::CatalogReader<std::fs::File>;
"find",
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
)
- .insert(
- "exit",
- CliCommand::new(&API_METHOD_EXIT),
- )
+ .insert("exit", CliCommand::new(&API_METHOD_EXIT))
.insert_help(),
)
}
}
self.path.extend(&entry.name);
- self.extractor.set_path(OsString::from_vec(self.path.clone()));
+ self.extractor
+ .set_path(OsString::from_vec(self.path.clone()));
self.handle_entry(entry).await?;
}
let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
let dir_meta = dir_pxar.entry().metadata().clone();
let create = self.matches && match_result != Some(MatchType::Exclude);
- self.extractor.enter_directory(dir_pxar.file_name().to_os_string(), dir_meta, create)?;
+ self.extractor
+ .enter_directory(dir_pxar.file_name().to_os_string(), dir_meta, create)?;
Ok(())
}
pxar::EntryKind::File { size, .. } => {
let file_name = CString::new(entry.file_name().as_bytes())?;
let mut contents = entry.contents().await?;
- self.extractor.async_extract_file(
- &file_name,
- entry.metadata(),
- *size,
- &mut contents,
- )
- .await
+ self.extractor
+ .async_extract_file(&file_name, entry.metadata(), *size, &mut contents)
+ .await
}
_ => {
bail!(
let file_name = CString::new(entry.file_name().as_bytes())?;
match (catalog_attr, entry.kind()) {
(DirEntryAttribute::Symlink, pxar::EntryKind::Symlink(symlink)) => {
- block_in_place(|| self.extractor.extract_symlink(
- &file_name,
- entry.metadata(),
- symlink.as_os_str(),
- ))
+ block_in_place(|| {
+ self.extractor.extract_symlink(
+ &file_name,
+ entry.metadata(),
+ symlink.as_os_str(),
+ )
+ })
}
(DirEntryAttribute::Symlink, _) => {
bail!(
}
(DirEntryAttribute::Hardlink, pxar::EntryKind::Hardlink(hardlink)) => {
- block_in_place(|| self.extractor.extract_hardlink(&file_name, hardlink.as_os_str()))
+ block_in_place(|| {
+ self.extractor
+ .extract_hardlink(&file_name, hardlink.as_os_str())
+ })
}
(DirEntryAttribute::Hardlink, _) => {
bail!(
self.extract_device(attr.clone(), &file_name, device, entry.metadata())
}
- (DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => {
- block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
- }
+ (DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => block_in_place(|| {
+ self.extractor
+ .extract_special(&file_name, entry.metadata(), 0)
+ }),
(DirEntryAttribute::Fifo, _) => {
bail!("catalog fifo {:?} not a fifo in the archive", self.path());
}
- (DirEntryAttribute::Socket, pxar::EntryKind::Socket) => {
- block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
- }
+ (DirEntryAttribute::Socket, pxar::EntryKind::Socket) => block_in_place(|| {
+ self.extractor
+ .extract_special(&file_name, entry.metadata(), 0)
+ }),
(DirEntryAttribute::Socket, _) => {
bail!(
"catalog socket {:?} not a socket in the archive",
);
}
}
- block_in_place(|| self.extractor.extract_special(file_name, metadata, device.to_dev_t()))
+ block_in_place(|| {
+ self.extractor
+ .extract_special(file_name, metadata, device.to_dev_t())
+ })
}
}
use std::pin::Pin;
use std::task::{Context, Poll};
+use anyhow::Error;
use bytes::BytesMut;
-use anyhow::{Error};
use futures::ready;
use futures::stream::{Stream, TryStream};
impl<S: Unpin> ChunkStream<S> {
pub fn new(input: S, chunk_size: Option<usize>) -> Self {
- Self { input, chunker: Chunker::new(chunk_size.unwrap_or(4*1024*1024)), buffer: BytesMut::new(), scan_pos: 0}
+ Self {
+ input,
+ chunker: Chunker::new(chunk_size.unwrap_or(4 * 1024 * 1024)),
+ buffer: BytesMut::new(),
+ scan_pos: 0,
+ }
}
}
S::Ok: AsRef<[u8]>,
S::Error: Into<Error>,
{
-
type Item = Result<BytesMut, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
impl<S: Unpin> FixedChunkStream<S> {
pub fn new(input: S, chunk_size: usize) -> Self {
- Self { input, chunk_size, buffer: BytesMut::new() }
+ Self {
+ input,
+ chunk_size,
+ buffer: BytesMut::new(),
+ }
}
}
{
type Item = Result<BytesMut, S::Error>;
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Result<BytesMut, S::Error>>> {
+ fn poll_next(
+ self: Pin<&mut Self>,
+ cx: &mut Context,
+ ) -> Poll<Option<Result<BytesMut, S::Error>>> {
let this = self.get_mut();
loop {
if this.buffer.len() >= this.chunk_size {
use anyhow::{bail, format_err, Error};
use futures::*;
-use http::Uri;
use http::header::HeaderValue;
+use http::Uri;
use http::{Request, Response};
-use hyper::Body;
use hyper::client::{Client, HttpConnector};
-use openssl::{ssl::{SslConnector, SslMethod}, x509::X509StoreContextRef};
-use serde_json::{json, Value};
+use hyper::Body;
+use openssl::{
+ ssl::{SslConnector, SslMethod},
+ x509::X509StoreContextRef,
+};
use percent_encoding::percent_encode;
+use serde_json::{json, Value};
use xdg::BaseDirectories;
-use proxmox_sys::linux::tty;
-use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions};
use proxmox_router::HttpError;
+use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions};
+use proxmox_sys::linux::tty;
+use proxmox_async::broadcast_future::BroadcastFuture;
use proxmox_http::client::{HttpsConnector, RateLimiter};
use proxmox_http::uri::build_authority;
-use proxmox_async::broadcast_future::BroadcastFuture;
-use pbs_api_types::{Authid, Userid, RateLimitConfig};
use pbs_api_types::percent_encoding::DEFAULT_ENCODE_SET;
+use pbs_api_types::{Authid, RateLimitConfig, Userid};
use pbs_tools::json::json_object_to_query;
use pbs_tools::ticket;
}
impl HttpClientOptions {
-
pub fn new_interactive(password: Option<String>, fingerprint: Option<String>) -> Self {
Self {
password,
/// Delete stored ticket data (logout)
pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
-
let base = BaseDirectories::with_prefix(prefix)?;
// usually /run/user/<uid>/...
map.remove(username.as_str());
}
- replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
+ replace_file(
+ path,
+ data.to_string().as_bytes(),
+ CreateOptions::new().perm(mode),
+ false,
+ )?;
Ok(())
}
fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<(), Error> {
-
let base = BaseDirectories::with_prefix(prefix)?;
// usually ~/.config/<prefix>/fingerprints
}
fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
-
let base = BaseDirectories::with_prefix(prefix).ok()?;
// usually ~/.config/<prefix>/fingerprints
None
}
-fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, token: &str) -> Result<(), Error> {
-
+fn store_ticket_info(
+ prefix: &str,
+ server: &str,
+ username: &str,
+ ticket: &str,
+ token: &str,
+) -> Result<(), Error> {
let base = BaseDirectories::with_prefix(prefix)?;
// usually /run/user/<uid>/...
}
}
- replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode), false)?;
+ replace_file(
+ path,
+ new_data.to_string().as_bytes(),
+ CreateOptions::new().perm(mode),
+ false,
+ )?;
Ok(())
}
auth_id: &Authid,
mut options: HttpClientOptions,
) -> Result<Self, Error> {
-
let verified_fingerprint = Arc::new(Mutex::new(None));
let mut expected_fingerprint = options.fingerprint.take();
let interactive = options.interactive;
let fingerprint_cache = options.fingerprint_cache;
let prefix = options.prefix.clone();
- ssl_connector_builder.set_verify_callback(openssl::ssl::SslVerifyMode::PEER, move |valid, ctx| {
- match Self::verify_callback(valid, ctx, expected_fingerprint.as_ref(), interactive) {
+ ssl_connector_builder.set_verify_callback(
+ openssl::ssl::SslVerifyMode::PEER,
+ move |valid, ctx| match Self::verify_callback(
+ valid,
+ ctx,
+ expected_fingerprint.as_ref(),
+ interactive,
+ ) {
Ok(None) => true,
Ok(Some(fingerprint)) => {
if fingerprint_cache && prefix.is_some() {
- if let Err(err) = store_fingerprint(
- prefix.as_ref().unwrap(), &server, &fingerprint) {
+ if let Err(err) =
+ store_fingerprint(prefix.as_ref().unwrap(), &server, &fingerprint)
+ {
eprintln!("{}", err);
}
}
*verified_fingerprint.lock().unwrap() = Some(fingerprint);
true
- },
+ }
Err(err) => {
eprintln!("certificate validation failed - {}", err);
false
- },
- }
- });
+ }
+ },
+ );
} else {
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
}
httpc.enforce_http(false); // we want https...
httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
- let mut https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
+ let mut https = HttpsConnector::with_connector(
+ httpc,
+ ssl_connector_builder.build(),
+ PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
+ );
if let Some(rate_in) = options.limit.rate_in {
let burst_in = options.limit.burst_in.unwrap_or(rate_in).as_u64();
- https.set_read_limiter(Some(Arc::new(Mutex::new(
- RateLimiter::new(rate_in.as_u64(), burst_in)
- ))));
+ https.set_read_limiter(Some(Arc::new(Mutex::new(RateLimiter::new(
+ rate_in.as_u64(),
+ burst_in,
+ )))));
}
if let Some(rate_out) = options.limit.rate_out {
let burst_out = options.limit.burst_out.unwrap_or(rate_out).as_u64();
- https.set_write_limiter(Some(Arc::new(Mutex::new(
- RateLimiter::new(rate_out.as_u64(), burst_out)
- ))));
+ https.set_write_limiter(Some(Arc::new(Mutex::new(RateLimiter::new(
+ rate_out.as_u64(),
+ burst_out,
+ )))));
}
let client = Client::builder()
- //.http2_initial_stream_window_size( (1 << 31) - 2)
- //.http2_initial_connection_window_size( (1 << 31) - 2)
+ //.http2_initial_stream_window_size( (1 << 31) - 2)
+ //.http2_initial_connection_window_size( (1 << 31) - 2)
.build::<_, Body>(https);
let password = options.password.take();
let renewal_future = async move {
loop {
- tokio::time::sleep(Duration::new(60*15, 0)).await; // 15 minutes
+ tokio::time::sleep(Duration::new(60 * 15, 0)).await; // 15 minutes
let (auth_id, ticket) = {
let authinfo = auth2.read().unwrap().clone();
(authinfo.auth_id, authinfo.ticket)
};
- match Self::credentials(client2.clone(), server2.clone(), port, auth_id.user().clone(), ticket).await {
+ match Self::credentials(
+ client2.clone(),
+ server2.clone(),
+ port,
+ auth_id.user().clone(),
+ ticket,
+ )
+ .await
+ {
Ok(auth) => {
if use_ticket_cache && prefix2.is_some() {
- let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
+ let _ = store_ticket_info(
+ prefix2.as_ref().unwrap(),
+ &server2,
+ &auth.auth_id.to_string(),
+ &auth.ticket,
+ &auth.token,
+ );
}
*auth2.write().unwrap() = auth;
- },
+ }
Err(err) => {
eprintln!("re-authentication failed: {}", err);
return;
port,
auth_id.user().clone(),
password,
- ).map_ok({
+ )
+ .map_ok({
let server = server.to_string();
let prefix = options.prefix.clone();
let authinfo = auth.clone();
move |auth| {
if use_ticket_cache && prefix.is_some() {
- let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
+ let _ = store_ticket_info(
+ prefix.as_ref().unwrap(),
+ &server,
+ &auth.auth_id.to_string(),
+ &auth.ticket,
+ &auth.token,
+ );
}
*authinfo.write().unwrap() = auth;
tokio::spawn(renewal_future);
expected_fingerprint: Option<&String>,
interactive: bool,
) -> Result<Option<String>, Error> {
-
if openssl_valid {
return Ok(None);
}
};
let depth = ctx.error_depth();
- if depth != 0 { bail!("context depth != 0") }
+ if depth != 0 {
+ bail!("context depth != 0")
+ }
let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) {
Ok(fp) => fp,
Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
};
let fp_string = hex::encode(&fp);
- let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
- .collect::<Vec<&str>>().join(":");
+ let fp_string = fp_string
+ .as_bytes()
+ .chunks(2)
+ .map(|v| std::str::from_utf8(v).unwrap())
+ .collect::<Vec<&str>>()
+ .join(":");
if let Some(expected_fingerprint) = expected_fingerprint {
let expected_fingerprint = expected_fingerprint.to_lowercase();
}
pub async fn request(&self, mut req: Request<Body>) -> Result<Value, Error> {
-
let client = self.client.clone();
- let auth = self.login().await?;
+ let auth = self.login().await?;
if auth.auth_id.is_token() {
- let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
- req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
+ let enc_api_token = format!(
+ "PBSAPIToken {}:{}",
+ auth.auth_id,
+ percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
+ );
+ req.headers_mut().insert(
+ "Authorization",
+ HeaderValue::from_str(&enc_api_token).unwrap(),
+ );
} else {
- let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
- req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
- req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
+ let enc_ticket = format!(
+ "PBSAuthCookie={}",
+ percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
+ );
+ req.headers_mut()
+ .insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
+ req.headers_mut().insert(
+ "CSRFPreventionToken",
+ HeaderValue::from_str(&auth.token).unwrap(),
+ );
}
Self::api_request(client, req).await
}
- pub async fn get(
- &self,
- path: &str,
- data: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn get(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "GET", path, data)?;
self.request(req).await
}
- pub async fn delete(
- &self,
- path: &str,
- data: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn delete(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?;
self.request(req).await
}
- pub async fn post(
- &self,
- path: &str,
- data: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn post(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "POST", path, data)?;
self.request(req).await
}
- pub async fn put(
- &self,
- path: &str,
- data: Option<Value>,
- ) -> Result<Value, Error> {
+ pub async fn put(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder(&self.server, self.port, "PUT", path, data)?;
self.request(req).await
}
- pub async fn download(
- &self,
- path: &str,
- output: &mut (dyn Write + Send),
- ) -> Result<(), Error> {
+ pub async fn download(&self, path: &str, output: &mut (dyn Write + Send)) -> Result<(), Error> {
let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?;
let client = self.client.clone();
let auth = self.login().await?;
- let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
- req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
+ let enc_ticket = format!(
+ "PBSAuthCookie={}",
+ percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
+ );
+ req.headers_mut()
+ .insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
- let resp = tokio::time::timeout(
- HTTP_TIMEOUT,
- client.request(req)
- )
+ let resp = tokio::time::timeout(HTTP_TIMEOUT, client.request(req))
.await
.map_err(|_| format_err!("http download request timed out"))??;
let status = resp.status();
path: &str,
data: Option<Value>,
) -> Result<Value, Error> {
-
let query = match data {
Some(data) => Some(json_object_to_query(data)?),
None => None,
.uri(url)
.header("User-Agent", "proxmox-backup-client/1.0")
.header("Content-Type", content_type)
- .body(body).unwrap();
+ .body(body)
+ .unwrap();
self.request(req).await
}
mut req: Request<Body>,
protocol_name: String,
) -> Result<(H2Client, futures::future::AbortHandle), Error> {
-
let client = self.client.clone();
- let auth = self.login().await?;
+ let auth = self.login().await?;
if auth.auth_id.is_token() {
- let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
- req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
+ let enc_api_token = format!(
+ "PBSAPIToken {}:{}",
+ auth.auth_id,
+ percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
+ );
+ req.headers_mut().insert(
+ "Authorization",
+ HeaderValue::from_str(&enc_api_token).unwrap(),
+ );
} else {
- let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
- req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
- req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
+ let enc_ticket = format!(
+ "PBSAuthCookie={}",
+ percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
+ );
+ req.headers_mut()
+ .insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
+ req.headers_mut().insert(
+ "CSRFPreventionToken",
+ HeaderValue::from_str(&auth.token).unwrap(),
+ );
}
- req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
+ req.headers_mut()
+ .insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
- let resp = tokio::time::timeout(
- HTTP_TIMEOUT,
- client.request(req)
- )
+ let resp = tokio::time::timeout(HTTP_TIMEOUT, client.request(req))
.await
.map_err(|_| format_err!("http upgrade request timed out"))??;
let status = resp.status();
let (h2, connection) = h2::client::Builder::new()
.initial_connection_window_size(max_window_size)
.initial_window_size(max_window_size)
- .max_frame_size(4*1024*1024)
+ .max_frame_size(4 * 1024 * 1024)
.handshake(upgraded)
.await?;
- let connection = connection
- .map_err(|_| eprintln!("HTTP/2.0 connection failed"));
+ let connection = connection.map_err(|_| eprintln!("HTTP/2.0 connection failed"));
let (connection, abort) = futures::future::abortable(connection);
// A cancellable future returns an Option which is None when cancelled and
password: String,
) -> Result<AuthInfo, Error> {
let data = json!({ "username": username, "password": password });
- let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
+ let req = Self::request_builder(
+ &server,
+ port,
+ "POST",
+ "/api2/json/access/ticket",
+ Some(data),
+ )?;
let cred = Self::api_request(client, req).await?;
let auth = AuthInfo {
auth_id: cred["data"]["username"].as_str().unwrap().parse()?,
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
- token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
+ token: cred["data"]["CSRFPreventionToken"]
+ .as_str()
+ .unwrap()
+ .to_owned(),
};
Ok(auth)
async fn api_request(
client: Client<HttpsConnector>,
- req: Request<Body>
+ req: Request<Body>,
) -> Result<Value, Error> {
-
Self::api_response(
- tokio::time::timeout(
- HTTP_TIMEOUT,
- client.request(req)
- )
+ tokio::time::timeout(HTTP_TIMEOUT, client.request(req))
.await
- .map_err(|_| format_err!("http request timed out"))??
- ).await
+ .map_err(|_| format_err!("http request timed out"))??,
+ )
+ .await
}
// Read-only access to server property
self.port
}
- pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
+ pub fn request_builder(
+ server: &str,
+ port: u16,
+ method: &str,
+ path: &str,
+ data: Option<Value>,
+ ) -> Result<Request<Body>, Error> {
if let Some(data) = data {
if method == "POST" {
let url = build_uri(server, port, path, None)?;
.method(method)
.uri(url)
.header("User-Agent", "proxmox-backup-client/1.0")
- .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
+ .header(
+ hyper::header::CONTENT_TYPE,
+ "application/x-www-form-urlencoded",
+ )
.body(Body::empty())?;
Ok(request)
}
.method(method)
.uri(url)
.header("User-Agent", "proxmox-backup-client/1.0")
- .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
+ .header(
+ hyper::header::CONTENT_TYPE,
+ "application/x-www-form-urlencoded",
+ )
.body(Body::empty())?;
Ok(request)
}
}
-
#[derive(Clone)]
pub struct H2Client {
h2: h2::client::SendRequest<bytes::Bytes>,
}
impl H2Client {
-
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
Self { h2 }
}
- pub async fn get(
- &self,
- path: &str,
- param: Option<Value>
- ) -> Result<Value, Error> {
+ pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder("localhost", "GET", path, param, None).unwrap();
self.request(req).await
}
- pub async fn put(
- &self,
- path: &str,
- param: Option<Value>
- ) -> Result<Value, Error> {
+ pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder("localhost", "PUT", path, param, None).unwrap();
self.request(req).await
}
- pub async fn post(
- &self,
- path: &str,
- param: Option<Value>
- ) -> Result<Value, Error> {
+ pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
let req = Self::request_builder("localhost", "POST", path, param, None).unwrap();
self.request(req).await
}
content_type: &str,
data: Vec<u8>,
) -> Result<Value, Error> {
- let request = Self::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
+ let request =
+ Self::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
let mut send_request = self.h2.clone().ready().await?;
.await
}
- async fn request(
- &self,
- request: Request<()>,
- ) -> Result<Value, Error> {
-
+ async fn request(&self, request: Request<()>) -> Result<Value, Error> {
self.send_request(request, None)
- .and_then(move |response| {
- response
- .map_err(Error::from)
- .and_then(Self::h2api_response)
- })
+ .and_then(move |response| response.map_err(Error::from).and_then(Self::h2api_response))
.await
}
request: Request<()>,
data: Option<bytes::Bytes>,
) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
-
- self.h2.clone()
+ self.h2
+ .clone()
.ready()
.map_err(Error::from)
.and_then(move |mut send_request| async move {
})
}
- pub async fn h2api_response(
- response: Response<h2::RecvStream>,
- ) -> Result<Value, Error> {
+ pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> {
let status = response.status();
let (_head, mut body) = response.into_parts();
let query = json_object_to_query(param)?;
// We detected problem with hyper around 6000 characters - so we try to keep on the safe side
if query.len() > 4096 {
- bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
+ bail!(
+ "h2 query data too large ({} bytes) - please encode data inside body",
+ query.len()
+ );
}
Some(query)
}
-use std::collections::{HashSet, HashMap};
+use std::collections::{HashMap, HashSet};
use std::ffi::{CStr, CString, OsStr};
use std::fmt;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
+use futures::future::BoxFuture;
+use futures::FutureExt;
use nix::dir::Dir;
use nix::errno::Errno;
use nix::fcntl::OFlag;
use nix::sys::stat::{FileStat, Mode};
-use futures::future::BoxFuture;
-use futures::FutureExt;
use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
+use pxar::encoder::{LinkOffset, SeqWrite};
use pxar::Metadata;
-use pxar::encoder::{SeqWrite, LinkOffset};
+use proxmox_io::vec;
+use proxmox_lang::c_str;
use proxmox_sys::error::SysError;
-use proxmox_sys::fd::RawFdNum;
use proxmox_sys::fd::Fd;
+use proxmox_sys::fd::RawFdNum;
use proxmox_sys::fs::{self, acl, xattr};
-use proxmox_io::vec;
-use proxmox_lang::c_str;
use pbs_datastore::catalog::BackupCatalogWriter;
use crate::pxar::metadata::errno_is_unsupported;
-use crate::pxar::Flags;
use crate::pxar::tools::assert_single_path_component;
+use crate::pxar::Flags;
/// Pxar options for creating a pxar archive/stream
#[derive(Default, Clone)]
pub verbose: bool,
}
-
fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
let mut fs_stat = std::mem::MaybeUninit::uninit();
let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
file_copy_buffer: vec::undefined(4 * 1024 * 1024),
};
- archiver.archive_dir_contents(&mut encoder, source_dir, true).await?;
+ archiver
+ .archive_dir_contents(&mut encoder, source_dir, true)
+ .await?;
encoder.finish().await?;
Ok(())
}
let file_name = file_entry.name.to_bytes();
if is_root && file_name == b".pxarexclude-cli" {
- self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count).await?;
+ self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count)
+ .await?;
continue;
}
(self.callback)(&file_entry.path)?;
self.path = file_entry.path;
- self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat).await
+ self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat)
+ .await
.map_err(|err| self.wrap_err(err))?;
}
self.path = old_path;
self.patterns.truncate(old_patterns_count);
Ok(())
- }.boxed()
+ }
+ .boxed()
}
/// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
Ok(None)
}
Err(nix::Error::Sys(Errno::EACCES)) => {
- writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
+ writeln!(
+ self.errors,
+ "failed to open file: {:?}: access denied",
+ file_name
+ )?;
Ok(None)
}
Err(nix::Error::Sys(Errno::EPERM)) if !noatime.is_empty() => {
continue;
}
Err(other) => Err(Error::from(other)),
- }
+ };
}
}
let _ = writeln!(
self.errors,
"ignoring .pxarexclude after read error in {:?}: {}",
- self.path,
- err,
+ self.path, err,
);
self.patterns.truncate(old_pattern_count);
return Ok(());
) -> Result<(), Error> {
let content = generate_pxar_excludes_cli(&self.patterns[..patterns_count]);
if let Some(ref catalog) = self.catalog {
- catalog.lock().unwrap().add_file(file_name, content.len() as u64, 0)?;
+ catalog
+ .lock()
+ .unwrap()
+ .add_file(file_name, content.len() as u64, 0)?;
}
let mut metadata = Metadata::default();
metadata.stat.mode = pxar::format::mode::IFREG | 0o600;
- let mut file = encoder.create_file(&metadata, ".pxarexclude-cli", content.len() as u64).await?;
+ let mut file = encoder
+ .create_file(&metadata, ".pxarexclude-cli", content.len() as u64)
+ .await?;
file.write_all(&content).await?;
Ok(())
self.entry_counter += 1;
if self.entry_counter > self.entry_limit {
- bail!("exceeded allowed number of file entries (> {})",self.entry_limit);
+ bail!(
+ "exceeded allowed number of file entries (> {})",
+ self.entry_limit
+ );
}
file_list.push(FileListEntry {
name: file_name,
path: full_path,
- stat
+ stat,
});
}
}
fn report_vanished_file(&mut self) -> Result<(), Error> {
- writeln!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
+ writeln!(
+ self.errors,
+ "warning: file vanished while reading: {:?}",
+ self.path
+ )?;
Ok(())
}
None => return Ok(()),
};
- let metadata = get_metadata(fd.as_raw_fd(), stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?;
+ let metadata = get_metadata(
+ fd.as_raw_fd(),
+ stat,
+ self.flags(),
+ self.fs_magic,
+ &mut self.fs_feature_flags,
+ )?;
let match_path = PathBuf::from("/").join(self.path.clone());
if self
let file_size = stat.st_size as u64;
if let Some(ref catalog) = self.catalog {
- catalog.lock().unwrap().add_file(c_file_name, file_size, stat.st_mtime)?;
+ catalog
+ .lock()
+ .unwrap()
+ .add_file(c_file_name, file_size, stat.st_mtime)?;
}
- let offset: LinkOffset =
- self.add_regular_file(encoder, fd, file_name, &metadata, file_size).await?;
+ let offset: LinkOffset = self
+ .add_regular_file(encoder, fd, file_name, &metadata, file_size)
+ .await?;
if stat.st_nlink > 1 {
- self.hardlinks.insert(link_info, (self.path.clone(), offset));
+ self.hardlinks
+ .insert(link_info, (self.path.clone(), offset));
}
Ok(())
if let Some(ref catalog) = self.catalog {
catalog.lock().unwrap().start_directory(c_file_name)?;
}
- let result = self.add_directory(encoder, dir, c_file_name, &metadata, stat).await;
+ let result = self
+ .add_directory(encoder, dir, c_file_name, &metadata, stat)
+ .await;
if let Some(ref catalog) = self.catalog {
catalog.lock().unwrap().end_directory()?;
}
metadata: &Metadata,
stat: &FileStat,
) -> Result<(), Error> {
- Ok(encoder.add_device(
- metadata,
- file_name,
- pxar::format::Device::from_dev_t(stat.st_rdev),
- ).await?)
+ Ok(encoder
+ .add_device(
+ metadata,
+ file_name,
+ pxar::format::Device::from_dev_t(stat.st_rdev),
+ )
+ .await?)
}
}
-fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64, fs_feature_flags: &mut Flags) -> Result<Metadata, Error> {
+fn get_metadata(
+ fd: RawFd,
+ stat: &FileStat,
+ flags: Flags,
+ fs_magic: i64,
+ fs_feature_flags: &mut Flags,
+) -> Result<Metadata, Error> {
// required for some of these
let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
Ok(meta)
}
-fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
+fn get_fcaps(
+ meta: &mut Metadata,
+ fd: RawFd,
+ flags: Flags,
+ fs_feature_flags: &mut Flags,
+) -> Result<(), Error> {
if !flags.contains(Flags::WITH_FCAPS) {
return Ok(());
}
Err(Errno::EOPNOTSUPP) => {
fs_feature_flags.remove(Flags::WITH_XATTRS);
return Ok(());
- },
+ }
Err(Errno::EBADF) => return Ok(()), // symlinks
Err(err) => bail!("failed to read xattrs: {}", err),
};
Ok(())
}
-fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
+fn get_acl(
+ metadata: &mut Metadata,
+ proc_path: &Path,
+ flags: Flags,
+ fs_feature_flags: &mut Flags,
+) -> Result<(), Error> {
if !flags.contains(Flags::WITH_ACL) {
return Ok(());
}
}
}
-// form /usr/include/linux/fs.h
-const FS_APPEND_FL: c_long = 0x0000_0020;
-const FS_NOATIME_FL: c_long = 0x0000_0080;
-const FS_COMPR_FL: c_long = 0x0000_0004;
-const FS_NOCOW_FL: c_long = 0x0080_0000;
-const FS_NODUMP_FL: c_long = 0x0000_0040;
-const FS_DIRSYNC_FL: c_long = 0x0001_0000;
-const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
-const FS_SYNC_FL: c_long = 0x0000_0008;
-const FS_NOCOMP_FL: c_long = 0x0000_0400;
-const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
-
-pub(crate) const INITIAL_FS_FLAGS: c_long =
- FS_NOATIME_FL
- | FS_COMPR_FL
- | FS_NOCOW_FL
- | FS_NOCOMP_FL
- | FS_PROJINHERIT_FL;
+#[rustfmt::skip]
+mod fs_flags {
+use libc::c_long;
+ // form /usr/include/linux/fs.h
+ pub const FS_APPEND_FL: c_long = 0x0000_0020;
+ pub const FS_NOATIME_FL: c_long = 0x0000_0080;
+ pub const FS_COMPR_FL: c_long = 0x0000_0004;
+ pub const FS_NOCOW_FL: c_long = 0x0080_0000;
+ pub const FS_NODUMP_FL: c_long = 0x0000_0040;
+ pub const FS_DIRSYNC_FL: c_long = 0x0001_0000;
+ pub const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
+ pub const FS_SYNC_FL: c_long = 0x0000_0008;
+ pub const FS_NOCOMP_FL: c_long = 0x0000_0400;
+ pub const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
+
+ // from /usr/include/linux/msdos_fs.h
+ pub const ATTR_HIDDEN: u32 = 2;
+ pub const ATTR_SYS: u32 = 4;
+ pub const ATTR_ARCH: u32 = 32;
+
+ pub(crate) const INITIAL_FS_FLAGS: c_long =
+ FS_NOATIME_FL
+ | FS_COMPR_FL
+ | FS_NOCOW_FL
+ | FS_NOCOMP_FL
+ | FS_PROJINHERIT_FL;
+
+}
+use fs_flags::*; // for code formating/rusfmt
#[rustfmt::skip]
const CHATTR_MAP: [(Flags, c_long); 10] = [
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
];
-// from /usr/include/linux/msdos_fs.h
-const ATTR_HIDDEN: u32 = 2;
-const ATTR_SYS: u32 = 4;
-const ATTR_ARCH: u32 = 32;
-
#[rustfmt::skip]
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
use proxmox_sys::linux::magic::*;
match magic {
MSDOS_SUPER_MAGIC => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_FAT_ATTRS
- },
+ Flags::WITH_2SEC_TIME | Flags::WITH_READ_ONLY | Flags::WITH_FAT_ATTRS
+ }
EXT4_SUPER_MAGIC => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_PERMISSIONS |
- Flags::WITH_SYMLINKS |
- Flags::WITH_DEVICE_NODES |
- Flags::WITH_FIFOS |
- Flags::WITH_SOCKETS |
- Flags::WITH_FLAG_APPEND |
- Flags::WITH_FLAG_NOATIME |
- Flags::WITH_FLAG_NODUMP |
- Flags::WITH_FLAG_DIRSYNC |
- Flags::WITH_FLAG_IMMUTABLE |
- Flags::WITH_FLAG_SYNC |
- Flags::WITH_XATTRS |
- Flags::WITH_ACL |
- Flags::WITH_SELINUX |
- Flags::WITH_FCAPS |
- Flags::WITH_QUOTA_PROJID
- },
+ Flags::WITH_2SEC_TIME
+ | Flags::WITH_READ_ONLY
+ | Flags::WITH_PERMISSIONS
+ | Flags::WITH_SYMLINKS
+ | Flags::WITH_DEVICE_NODES
+ | Flags::WITH_FIFOS
+ | Flags::WITH_SOCKETS
+ | Flags::WITH_FLAG_APPEND
+ | Flags::WITH_FLAG_NOATIME
+ | Flags::WITH_FLAG_NODUMP
+ | Flags::WITH_FLAG_DIRSYNC
+ | Flags::WITH_FLAG_IMMUTABLE
+ | Flags::WITH_FLAG_SYNC
+ | Flags::WITH_XATTRS
+ | Flags::WITH_ACL
+ | Flags::WITH_SELINUX
+ | Flags::WITH_FCAPS
+ | Flags::WITH_QUOTA_PROJID
+ }
XFS_SUPER_MAGIC => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_PERMISSIONS |
- Flags::WITH_SYMLINKS |
- Flags::WITH_DEVICE_NODES |
- Flags::WITH_FIFOS |
- Flags::WITH_SOCKETS |
- Flags::WITH_FLAG_APPEND |
- Flags::WITH_FLAG_NOATIME |
- Flags::WITH_FLAG_NODUMP |
- Flags::WITH_FLAG_IMMUTABLE |
- Flags::WITH_FLAG_SYNC |
- Flags::WITH_XATTRS |
- Flags::WITH_ACL |
- Flags::WITH_SELINUX |
- Flags::WITH_FCAPS |
- Flags::WITH_QUOTA_PROJID
- },
+ Flags::WITH_2SEC_TIME
+ | Flags::WITH_READ_ONLY
+ | Flags::WITH_PERMISSIONS
+ | Flags::WITH_SYMLINKS
+ | Flags::WITH_DEVICE_NODES
+ | Flags::WITH_FIFOS
+ | Flags::WITH_SOCKETS
+ | Flags::WITH_FLAG_APPEND
+ | Flags::WITH_FLAG_NOATIME
+ | Flags::WITH_FLAG_NODUMP
+ | Flags::WITH_FLAG_IMMUTABLE
+ | Flags::WITH_FLAG_SYNC
+ | Flags::WITH_XATTRS
+ | Flags::WITH_ACL
+ | Flags::WITH_SELINUX
+ | Flags::WITH_FCAPS
+ | Flags::WITH_QUOTA_PROJID
+ }
ZFS_SUPER_MAGIC => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_PERMISSIONS |
- Flags::WITH_SYMLINKS |
- Flags::WITH_DEVICE_NODES |
- Flags::WITH_FIFOS |
- Flags::WITH_SOCKETS |
- Flags::WITH_FLAG_APPEND |
- Flags::WITH_FLAG_NOATIME |
- Flags::WITH_FLAG_NODUMP |
- Flags::WITH_FLAG_DIRSYNC |
- Flags::WITH_FLAG_IMMUTABLE |
- Flags::WITH_FLAG_SYNC |
- Flags::WITH_XATTRS |
- Flags::WITH_ACL |
- Flags::WITH_SELINUX |
- Flags::WITH_FCAPS |
- Flags::WITH_QUOTA_PROJID
- },
+ Flags::WITH_2SEC_TIME
+ | Flags::WITH_READ_ONLY
+ | Flags::WITH_PERMISSIONS
+ | Flags::WITH_SYMLINKS
+ | Flags::WITH_DEVICE_NODES
+ | Flags::WITH_FIFOS
+ | Flags::WITH_SOCKETS
+ | Flags::WITH_FLAG_APPEND
+ | Flags::WITH_FLAG_NOATIME
+ | Flags::WITH_FLAG_NODUMP
+ | Flags::WITH_FLAG_DIRSYNC
+ | Flags::WITH_FLAG_IMMUTABLE
+ | Flags::WITH_FLAG_SYNC
+ | Flags::WITH_XATTRS
+ | Flags::WITH_ACL
+ | Flags::WITH_SELINUX
+ | Flags::WITH_FCAPS
+ | Flags::WITH_QUOTA_PROJID
+ }
BTRFS_SUPER_MAGIC => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_PERMISSIONS |
- Flags::WITH_SYMLINKS |
- Flags::WITH_DEVICE_NODES |
- Flags::WITH_FIFOS |
- Flags::WITH_SOCKETS |
- Flags::WITH_FLAG_APPEND |
- Flags::WITH_FLAG_NOATIME |
- Flags::WITH_FLAG_COMPR |
- Flags::WITH_FLAG_NOCOW |
- Flags::WITH_FLAG_NODUMP |
- Flags::WITH_FLAG_DIRSYNC |
- Flags::WITH_FLAG_IMMUTABLE |
- Flags::WITH_FLAG_SYNC |
- Flags::WITH_FLAG_NOCOMP |
- Flags::WITH_XATTRS |
- Flags::WITH_ACL |
- Flags::WITH_SELINUX |
- Flags::WITH_SUBVOLUME |
- Flags::WITH_SUBVOLUME_RO |
- Flags::WITH_FCAPS
- },
+ Flags::WITH_2SEC_TIME
+ | Flags::WITH_READ_ONLY
+ | Flags::WITH_PERMISSIONS
+ | Flags::WITH_SYMLINKS
+ | Flags::WITH_DEVICE_NODES
+ | Flags::WITH_FIFOS
+ | Flags::WITH_SOCKETS
+ | Flags::WITH_FLAG_APPEND
+ | Flags::WITH_FLAG_NOATIME
+ | Flags::WITH_FLAG_COMPR
+ | Flags::WITH_FLAG_NOCOW
+ | Flags::WITH_FLAG_NODUMP
+ | Flags::WITH_FLAG_DIRSYNC
+ | Flags::WITH_FLAG_IMMUTABLE
+ | Flags::WITH_FLAG_SYNC
+ | Flags::WITH_FLAG_NOCOMP
+ | Flags::WITH_XATTRS
+ | Flags::WITH_ACL
+ | Flags::WITH_SELINUX
+ | Flags::WITH_SUBVOLUME
+ | Flags::WITH_SUBVOLUME_RO
+ | Flags::WITH_FCAPS
+ }
TMPFS_MAGIC => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_PERMISSIONS |
- Flags::WITH_SYMLINKS |
- Flags::WITH_DEVICE_NODES |
- Flags::WITH_FIFOS |
- Flags::WITH_SOCKETS |
- Flags::WITH_ACL |
- Flags::WITH_SELINUX
- },
+ Flags::WITH_2SEC_TIME
+ | Flags::WITH_READ_ONLY
+ | Flags::WITH_PERMISSIONS
+ | Flags::WITH_SYMLINKS
+ | Flags::WITH_DEVICE_NODES
+ | Flags::WITH_FIFOS
+ | Flags::WITH_SOCKETS
+ | Flags::WITH_ACL
+ | Flags::WITH_SELINUX
+ }
// FUSE mounts are special as the supported feature set
// is not clear a priori.
- FUSE_SUPER_MAGIC => {
- Flags::WITH_FUSE
- },
+ FUSE_SUPER_MAGIC => Flags::WITH_FUSE,
_ => {
- Flags::WITH_2SEC_TIME |
- Flags::WITH_READ_ONLY |
- Flags::WITH_PERMISSIONS |
- Flags::WITH_SYMLINKS |
- Flags::WITH_DEVICE_NODES |
- Flags::WITH_FIFOS |
- Flags::WITH_SOCKETS |
- Flags::WITH_XATTRS |
- Flags::WITH_ACL |
- Flags::WITH_FCAPS
- },
+ Flags::WITH_2SEC_TIME
+ | Flags::WITH_READ_ONLY
+ | Flags::WITH_PERMISSIONS
+ | Flags::WITH_SYMLINKS
+ | Flags::WITH_DEVICE_NODES
+ | Flags::WITH_FIFOS
+ | Flags::WITH_SOCKETS
+ | Flags::WITH_XATTRS
+ | Flags::WITH_ACL
+ | Flags::WITH_FCAPS
+ }
}
}
}
async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
let entry = unsafe {
- self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
+ self.accessor
+ .open_file_at_range(&self.get_lookup(inode)?.entry_range_info)
+ .await?
};
to_stat(inode, &entry)
}
async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
let lookup = self.get_lookup(inode)?;
- let metadata = self
- .open_entry(&lookup)
- .await?
- .into_entry()
- .into_metadata();
+ let metadata = self.open_entry(&lookup).await?.into_entry().into_metadata();
let mut xattrs = metadata.xattrs;
}
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
- matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
+ matches!(
+ errno,
+ Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL
+ )
}
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
use anyhow::{bail, format_err, Error};
use nix::sys::stat::Mode;
-use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
+use pxar::{format::StatxTimestamp, mode, Entry, EntryKind, Metadata};
/// Get the file permissions as `nix::Mode`
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
+use futures::future::{AbortHandle, Abortable};
use futures::stream::Stream;
-use futures::future::{Abortable, AbortHandle};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
},
Some(catalog),
options,
- ).await {
+ )
+ .await
+ {
let mut error = error2.lock().unwrap();
*error = Some(err.to_string());
}
) -> Result<Self, Error> {
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
- Self::new(
- dir,
- catalog,
- options,
- )
+ Self::new(dir, catalog, options)
}
}
-use std::future::Future;
use std::collections::HashMap;
+use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use proxmox_async::runtime::block_on;
-use pbs_tools::crypt_config::CryptConfig;
use pbs_api_types::CryptMode;
use pbs_datastore::data_blob::DataBlob;
-use pbs_datastore::read_chunk::ReadChunk;
use pbs_datastore::read_chunk::AsyncReadChunk;
+use pbs_datastore::read_chunk::ReadChunk;
+use pbs_tools::crypt_config::CryptConfig;
use super::BackupReader;
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
- self.client
- .download_chunk(digest, &mut chunk_data)
- .await?;
+ self.client.download_chunk(digest, &mut chunk_data).await?;
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
match self.crypt_mode {
- CryptMode::Encrypt => {
- match chunk.crypt_mode()? {
- CryptMode::Encrypt => Ok(chunk),
- CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
+ CryptMode::Encrypt => match chunk.crypt_mode()? {
+ CryptMode::Encrypt => Ok(chunk),
+ CryptMode::SignOnly | CryptMode::None => {
+ bail!("Index and chunk CryptMode don't match.")
}
},
- CryptMode::SignOnly | CryptMode::None => {
- match chunk.crypt_mode()? {
- CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
- CryptMode::SignOnly | CryptMode::None => Ok(chunk),
- }
+ CryptMode::SignOnly | CryptMode::None => match chunk.crypt_mode()? {
+ CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
+ CryptMode::SignOnly | CryptMode::None => Ok(chunk),
},
}
}
let chunk = Self::read_raw_chunk(self, digest).await?;
- let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
+ let raw_data =
+ chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
let use_cache = self.cache_hint.contains_key(digest);
if use_cache {
-use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
+use std::sync::{
+ atomic::{AtomicUsize, Ordering},
+ Arc,
+};
use anyhow::{bail, Error};
+use futures::*;
use serde_json::{json, Value};
use tokio::signal::unix::{signal, SignalKind};
-use futures::*;
use proxmox_router::cli::format_and_print_result;
upid_str: &str,
strip_date: bool,
) -> Result<(), Error> {
-
let mut signal_stream = signal(SignalKind::interrupt())?;
let abort_count = Arc::new(AtomicUsize::new(0));
let abort_count2 = Arc::clone(&abort_count);
};
let request_future = async move {
-
let mut start = 1;
let limit = 500;
loop {
-
let abort = abort_count.load(Ordering::Relaxed);
if abort > 0 {
- let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
+ let path = format!(
+ "api2/json/nodes/localhost/tasks/{}",
+ percent_encode_component(upid_str)
+ );
let _ = client.delete(&path, None).await?;
}
let param = json!({ "start": start, "limit": limit, "test-status": true });
- let path = format!("api2/json/nodes/localhost/tasks/{}/log", percent_encode_component(upid_str));
+ let path = format!(
+ "api2/json/nodes/localhost/tasks/{}/log",
+ percent_encode_component(upid_str)
+ );
let result = client.get(&path, Some(param)).await?;
let active = result["active"].as_bool().unwrap();
for item in data {
let n = item["n"].as_u64().unwrap();
let t = item["t"].as_str().unwrap();
- if n != start { bail!("got wrong line number in response data ({} != {}", n, start); }
+ if n != start {
+ bail!("got wrong line number in response data ({} != {}", n, start);
+ }
if strip_date && t.len() > 27 && &t[25..27] == ": " {
let line = &t[27..];
println!("{}", line);
break;
}
} else if lines != limit {
- bail!("got wrong number of lines from server ({} != {})", lines, limit);
+ bail!(
+ "got wrong number of lines from server ({} != {})",
+ lines,
+ limit
+ );
}
}
Ok(())
};
- futures::select!{
+ futures::select! {
request = request_future.fuse() => request?,
abort = abort_future.fuse() => abort?,
};
use std::convert::TryFrom;
-use std::path::PathBuf;
-use std::os::unix::io::{FromRawFd, RawFd};
use std::io::Read;
+use std::os::unix::io::{FromRawFd, RawFd};
+use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox_sys::linux::tty;
-use proxmox_sys::fs::file_get_contents;
use proxmox_schema::*;
+use proxmox_sys::fs::file_get_contents;
+use proxmox_sys::linux::tty;
use pbs_api_types::CryptMode;
let key_fd = match param.get("keyfd") {
Some(Value::Number(key_fd)) => Some(
- RawFd::try_from(key_fd
- .as_i64()
- .ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
+ RawFd::try_from(
+ key_fd
+ .as_i64()
+ .ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?,
)
- .map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
+ .map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?,
),
Some(_) => bail!("bad --keyfd parameter type"),
None => None,
let master_pubkey_fd = match param.get("master-pubkey-fd") {
Some(Value::Number(key_fd)) => Some(
- RawFd::try_from(key_fd
- .as_i64()
- .ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
+ RawFd::try_from(
+ key_fd
+ .as_i64()
+ .ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?,
)
- .map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
+ .map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?,
),
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
None => None,
if keep_keyfd_open {
// don't close fd if requested, and try to reset seek position
std::mem::forget(input);
- unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
+ unsafe {
+ libc::lseek(fd, 0, libc::SEEK_SET);
+ }
}
Some(KeyWithSource::from_fd(data))
}
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
// safe w.r.t. concurrency
fn test_crypto_parameters_handling() -> Result<(), Error> {
- use serde_json::json;
use proxmox_sys::fs::{replace_file, CreateOptions};
+ use serde_json::json;
- let some_key = vec![1;1];
- let default_key = vec![2;1];
+ let some_key = vec![1; 1];
+ let default_key = vec![2; 1];
- let some_master_key = vec![3;1];
- let default_master_key = vec![4;1];
+ let some_master_key = vec![3; 1];
+ let default_master_key = vec![4; 1];
let testdir = create_testdir("key_source")?;
};
replace_file(&keypath, &some_key, CreateOptions::default(), false)?;
- replace_file(&master_keypath, &some_master_key, CreateOptions::default(), false)?;
+ replace_file(
+ &master_keypath,
+ &some_master_key,
+ CreateOptions::default(),
+ false,
+ )?;
// no params, no default key == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
- let res = crypto_parameters(&json!({"keyfile": keypath}));
+ let res = crypto_parameters(&json!({ "keyfile": keypath }));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
+ assert!(crypto_parameters(&json!({ "keyfile": invalid_keypath })).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
+ assert!(
+ crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err()
+ );
+ assert!(
+ crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err()
+ );
// now set a default key
- unsafe { set_test_encryption_key(Ok(Some(default_key))); }
+ unsafe {
+ set_test_encryption_key(Ok(Some(default_key)));
+ }
// and repeat
assert_eq!(res.unwrap(), default_key_res);
// keyfile param == key from keyfile
- let res = crypto_parameters(&json!({"keyfile": keypath}));
+ let res = crypto_parameters(&json!({ "keyfile": keypath }));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
+ assert!(crypto_parameters(&json!({ "keyfile": invalid_keypath })).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
+ assert!(
+ crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err()
+ );
+ assert!(
+ crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err()
+ );
// now make default key retrieval error
- unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
+ unsafe {
+ set_test_encryption_key(Err(format_err!("test error")));
+ }
// and repeat
assert!(crypto_parameters(&json!({})).is_err());
// keyfile param == key from keyfile
- let res = crypto_parameters(&json!({"keyfile": keypath}));
+ let res = crypto_parameters(&json!({ "keyfile": keypath }));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
+ assert!(crypto_parameters(&json!({ "keyfile": invalid_keypath })).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
+ assert!(
+ crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err()
+ );
+ assert!(
+ crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err()
+ );
// now remove default key again
- unsafe { set_test_encryption_key(Ok(None)); }
+ unsafe {
+ set_test_encryption_key(Ok(None));
+ }
// set a default master key
- unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key))); }
+ unsafe {
+ set_test_default_master_pubkey(Ok(Some(default_master_key)));
+ }
// and use an explicit master key
- assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
+ assert!(crypto_parameters(&json!({ "master-pubkey-file": master_keypath })).is_err());
// just a default == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
// same with fallback to default master key
- let res = crypto_parameters(&json!({"keyfile": keypath}));
+ let res = crypto_parameters(&json!({ "keyfile": keypath }));
assert_eq!(res.unwrap(), some_key_default_master_res);
// crypt mode none == error
- assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
+ assert!(crypto_parameters(
+ &json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})
+ )
+ .is_err());
// with just default master key == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt without enc key == error
- assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
+ assert!(crypto_parameters(
+ &json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})
+ )
+ .is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
- assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
+ assert!(crypto_parameters(
+ &json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})
+ )
+ .is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode encrypt with keyfile == key from keyfile with correct mode
- let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
+ let res = crypto_parameters(
+ &json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}),
+ );
assert_eq!(res.unwrap(), some_key_some_master_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// invalid master keyfile parameter always errors when a key is passed, even with a valid
// default master key
- assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
+ assert!(
+ crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath}))
+ .is_err()
+ );
+ assert!(crypto_parameters(
+ &json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})
+ )
+ .is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
- assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
+ assert!(crypto_parameters(
+ &json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})
+ )
+ .is_err());
Ok(())
}
//! Shared tools useful for common CLI clients.
use std::collections::HashMap;
+use std::env::VarError::{NotPresent, NotUnicode};
use std::fs::File;
+use std::io::{BufRead, BufReader};
use std::os::unix::io::FromRawFd;
-use std::env::VarError::{NotUnicode, NotPresent};
-use std::io::{BufReader, BufRead};
use std::process::Command;
use anyhow::{bail, format_err, Context, Error};
use serde_json::{json, Value};
use xdg::BaseDirectories;
-use proxmox_schema::*;
use proxmox_router::cli::{complete_file_name, shellword_split};
+use proxmox_schema::*;
use proxmox_sys::fs::file_get_json;
-use pbs_api_types::{BACKUP_REPO_URL, Authid, RateLimitConfig, UserWithTokens};
+use pbs_api_types::{Authid, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL};
use pbs_datastore::BackupDir;
use pbs_tools::json::json_object_to_query;
///
/// Only return the first line of data (without CRLF).
pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
-
let firstline = |data: String| -> String {
match data.lines().next() {
Some(line) => line.to_string(),
match std::env::var(base_name) {
Ok(p) => return Ok(Some(firstline(p))),
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", base_name)),
- Err(NotPresent) => {},
+ Err(NotPresent) => {}
};
let env_name = format!("{}_FD", base_name);
match std::env::var(&env_name) {
Ok(fd_str) => {
- let fd: i32 = fd_str.parse()
- .map_err(|err| format_err!("unable to parse file descriptor in ENV({}): {}", env_name, err))?;
+ let fd: i32 = fd_str.parse().map_err(|err| {
+ format_err!(
+ "unable to parse file descriptor in ENV({}): {}",
+ env_name,
+ err
+ )
+ })?;
let mut file = unsafe { File::from_raw_fd(fd) };
return Ok(Some(firstline_file(&mut file)?));
}
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
- Err(NotPresent) => {},
+ Err(NotPresent) => {}
}
let env_name = format!("{}_FILE", base_name);
return Ok(Some(firstline_file(&mut file)?));
}
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
- Err(NotPresent) => {},
+ Err(NotPresent) => {}
}
let env_name = format!("{}_CMD", base_name);
return Ok(Some(firstline(output)));
}
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", env_name)),
- Err(NotPresent) => {},
+ Err(NotPresent) => {}
}
Ok(None)
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
- let options = HttpClientOptions::new_interactive(password, fingerprint)
- .rate_limit(rate_limit);
+ let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit);
HttpClient::new(server, port, auth_id, options)
}
/// like get, but simply ignore errors and return Null instead
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
-
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD).unwrap_or(None);
// ticket cache, but no questions asked
- let options = HttpClientOptions::new_interactive(password, fingerprint)
- .interactive(false);
+ let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false);
let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
Ok(v) => v,
}
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
-
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
proxmox_async::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
}
-pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-
+pub async fn complete_group_or_snapshot_do(
+ arg: &str,
+ param: &HashMap<String, String>,
+) -> Vec<String> {
if arg.matches('/').count() < 2 {
let groups = complete_backup_group_do(param).await;
let mut result = vec![];
}
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
-
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
if let Some(list) = data.as_array() {
for item in list {
- if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
- (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
- {
+ if let (Some(backup_id), Some(backup_type), Some(backup_time)) = (
+ item["backup-id"].as_str(),
+ item["backup-type"].as_str(),
+ item["backup-time"].as_i64(),
+ ) {
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
}
}
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
-
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
};
let snapshot: BackupDir = match param.get("snapshot") {
- Some(path) => {
- match path.parse() {
- Ok(v) => v,
- _ => return result,
- }
- }
+ Some(path) => match path.parse() {
+ Ok(v) => v,
+ _ => return result,
+ },
_ => return result,
};
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
- })).unwrap();
+ }))
+ .unwrap();
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
}
pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
-
let mut result = vec![];
let mut size = 64;
loop {
result.push(size.to_string());
size *= 2;
- if size > 4096 { break; }
+ if size > 4096 {
+ break;
+ }
}
result
}
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
-
let mut result = vec![];
let repo = match extract_repository_from_map(param) {