]> git.proxmox.com Git - proxmox-backup.git/commitdiff
move client to pbs-client subcrate
authorWolfgang Bumiller <w.bumiller@proxmox.com>
Mon, 19 Jul 2021 08:50:18 +0000 (10:50 +0200)
committerWolfgang Bumiller <w.bumiller@proxmox.com>
Mon, 19 Jul 2021 10:58:43 +0000 (12:58 +0200)
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
100 files changed:
Cargo.toml
Makefile
examples/download-speed.rs
examples/upload-speed.rs
pbs-api-types/src/lib.rs
pbs-api-types/src/user.rs [new file with mode: 0644]
pbs-api-types/src/userid.rs
pbs-client/Cargo.toml [new file with mode: 0644]
pbs-client/src/backup_reader.rs [new file with mode: 0644]
pbs-client/src/backup_repo.rs [new file with mode: 0644]
pbs-client/src/backup_specification.rs [new file with mode: 0644]
pbs-client/src/backup_writer.rs [new file with mode: 0644]
pbs-client/src/catalog_shell.rs [new file with mode: 0644]
pbs-client/src/http_client.rs [new file with mode: 0644]
pbs-client/src/lib.rs [new file with mode: 0644]
pbs-client/src/merge_known_chunks.rs [new file with mode: 0644]
pbs-client/src/pipe_to_stream.rs [new file with mode: 0644]
pbs-client/src/pxar/create.rs [new file with mode: 0644]
pbs-client/src/pxar/dir_stack.rs [new file with mode: 0644]
pbs-client/src/pxar/extract.rs [new file with mode: 0644]
pbs-client/src/pxar/flags.rs [new file with mode: 0644]
pbs-client/src/pxar/fuse.rs [new file with mode: 0644]
pbs-client/src/pxar/metadata.rs [new file with mode: 0644]
pbs-client/src/pxar/mod.rs [new file with mode: 0644]
pbs-client/src/pxar/tools.rs [new file with mode: 0644]
pbs-client/src/pxar_backup_stream.rs [new file with mode: 0644]
pbs-client/src/remote_chunk_reader.rs [new file with mode: 0644]
pbs-client/src/task_log.rs [new file with mode: 0644]
pbs-client/src/tools/key_source.rs [new file with mode: 0644]
pbs-client/src/tools/mod.rs [new file with mode: 0644]
pbs-client/src/vsock_client.rs [new file with mode: 0644]
pbs-tools/Cargo.toml
pbs-tools/src/acl.rs [new file with mode: 0644]
pbs-tools/src/compression.rs [new file with mode: 0644]
pbs-tools/src/fs.rs
pbs-tools/src/lib.rs
pbs-tools/src/ops.rs [new file with mode: 0644]
pbs-tools/src/str.rs
pbs-tools/src/xattr.rs [new file with mode: 0644]
pbs-tools/src/zip.rs [new file with mode: 0644]
src/api2/access/user.rs
src/api2/admin/datastore.rs
src/api2/backup/mod.rs
src/api2/config/remote.rs
src/api2/pull.rs
src/api2/reader/mod.rs
src/api2/types/mod.rs
src/backup/catalog_shell.rs [deleted file]
src/backup/mod.rs
src/bin/dump-catalog-shell-cli.rs
src/bin/proxmox-backup-client.rs
src/bin/proxmox-backup-manager.rs
src/bin/proxmox-file-restore.rs
src/bin/proxmox-restore-daemon.rs
src/bin/proxmox-tape.rs
src/bin/proxmox_backup_client/benchmark.rs
src/bin/proxmox_backup_client/catalog.rs
src/bin/proxmox_backup_client/key.rs
src/bin/proxmox_backup_client/mount.rs
src/bin/proxmox_backup_client/snapshot.rs
src/bin/proxmox_backup_client/task.rs
src/bin/proxmox_backup_manager/datastore.rs
src/bin/proxmox_client_tools/key_source.rs [deleted file]
src/bin/proxmox_client_tools/mod.rs [deleted file]
src/bin/proxmox_file_restore/block_driver.rs
src/bin/proxmox_file_restore/block_driver_qemu.rs
src/bin/proxmox_file_restore/qemu_helper.rs
src/bin/proxmox_restore_daemon/api.rs
src/bin/proxmox_tape/backup_job.rs
src/bin/pxar.rs
src/client/backup_reader.rs [deleted file]
src/client/backup_repo.rs [deleted file]
src/client/backup_specification.rs [deleted file]
src/client/backup_writer.rs [deleted file]
src/client/http_client.rs [deleted file]
src/client/merge_known_chunks.rs [deleted file]
src/client/mod.rs [deleted file]
src/client/pipe_to_stream.rs [deleted file]
src/client/pxar_backup_stream.rs [deleted file]
src/client/remote_chunk_reader.rs [deleted file]
src/client/task_log.rs [deleted file]
src/client/vsock_client.rs [deleted file]
src/config/user.rs
src/lib.rs
src/pxar/create.rs [deleted file]
src/pxar/dir_stack.rs [deleted file]
src/pxar/extract.rs [deleted file]
src/pxar/flags.rs [deleted file]
src/pxar/fuse.rs [deleted file]
src/pxar/metadata.rs [deleted file]
src/pxar/mod.rs [deleted file]
src/pxar/tools.rs [deleted file]
src/server/pull.rs
src/server/rest.rs
src/tools/acl.rs [deleted file]
src/tools/compression.rs
src/tools/mod.rs
src/tools/xattr.rs [deleted file]
src/tools/zip.rs [deleted file]
tests/catar.rs

index 9c41605ac1f3b262be802bb55cbd5cc1857d1c7c..8dcd50d15ae7a8e9bce3d26c1c55ecd9e28acb15 100644 (file)
@@ -22,6 +22,7 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
 [workspace]
 members = [
     "pbs-buildcfg",
+    "pbs-client",
     "pbs-datastore",
     "pbs-runtime",
     "pbs-systemd",
@@ -95,6 +96,7 @@ proxmox-openid = "0.6.0"
 
 pbs-api-types = { path = "pbs-api-types" }
 pbs-buildcfg = { path = "pbs-buildcfg" }
+pbs-client = { path = "pbs-client" }
 pbs-datastore = { path = "pbs-datastore" }
 pbs-runtime = { path = "pbs-runtime" }
 pbs-systemd = { path = "pbs-systemd" }
index 0ca0457eda747267aaca9f52a17762175aab359b..e0fcaf62e4378aa1b4b55073355e1d91c71979dc 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -33,6 +33,7 @@ RESTORE_BIN := \
 SUBCRATES := \
        pbs-api-types \
        pbs-buildcfg \
+       pbs-client \
        pbs-datastore \
        pbs-runtime \
        pbs-systemd \
index 90b007e06cc40e487b77ae1d7fb70a0706067918..471d30f0bf85fca05a081ecd0eac800e0de61165 100644 (file)
@@ -2,8 +2,8 @@ use std::io::Write;
 
 use anyhow::{Error};
 
-use proxmox_backup::api2::types::Authid;
-use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
+use pbs_api_types::Authid;
+use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
 
 pub struct DummyWriter {
     bytes: usize,
index c2c549cae2abded0b3c8b76f65d6b693d1e17475..7c2fd0e7dbb52530ea03e24b7993f876b96d527e 100644 (file)
@@ -1,7 +1,7 @@
 use anyhow::{Error};
 
-use proxmox_backup::api2::types::Authid;
-use proxmox_backup::client::*;
+use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
+use pbs_api_types::Authid;
 
 async fn upload_speed() -> Result<f64, Error> {
 
index c07699d11a126ac70182dcb48f3d0dd15f62a045..00d1e3a53c803fe5f7ae586c7649fb1ff4a3bf5d 100644 (file)
@@ -40,6 +40,13 @@ pub use userid::{Tokenname, TokennameRef};
 pub use userid::{Username, UsernameRef};
 pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA};
 
+#[macro_use]
+mod user;
+pub use user::{ApiToken, User, UserWithTokens};
+pub use user::{
+    EMAIL_SCHEMA, ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
+};
+
 pub mod upid;
 pub use upid::UPID;
 
@@ -146,35 +153,33 @@ pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema =
         .format(&FINGERPRINT_SHA256_FORMAT)
         .schema();
 
-pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new(
-    "Number of daily backups to keep.")
+pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
     .minimum(1)
     .schema();
 
-pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new(
-    "Number of hourly backups to keep.")
-    .minimum(1)
-    .schema();
+pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
+    IntegerSchema::new("Number of hourly backups to keep.")
+        .minimum(1)
+        .schema();
 
-pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new(
-    "Number of backups to keep.")
+pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
     .minimum(1)
     .schema();
 
-pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new(
-    "Number of monthly backups to keep.")
-    .minimum(1)
-    .schema();
+pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
+    IntegerSchema::new("Number of monthly backups to keep.")
+        .minimum(1)
+        .schema();
 
-pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new(
-    "Number of weekly backups to keep.")
-    .minimum(1)
-    .schema();
+pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
+    IntegerSchema::new("Number of weekly backups to keep.")
+        .minimum(1)
+        .schema();
 
-pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
-    "Number of yearly backups to keep.")
-    .minimum(1)
-    .schema();
+pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
+    IntegerSchema::new("Number of yearly backups to keep.")
+        .minimum(1)
+        .schema();
 
 pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
@@ -186,6 +191,14 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl
     .format(&SINGLE_LINE_COMMENT_FORMAT)
     .schema();
 
+pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
+    "Prevent changes if current configuration file has different \
+    SHA256 digest. This can be used to prevent concurrent \
+    modifications.",
+)
+.format(&PVE_CONFIG_DIGEST_FORMAT)
+.schema();
+
 pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
 
 /// API schema format definition for repository URLs
@@ -411,7 +424,7 @@ pub struct GroupListItem {
     #[serde(skip_serializing_if = "Option::is_none")]
     pub owner: Option<Authid>,
     /// The first line from group "notes"
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub comment: Option<String>,
 }
 
diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs
new file mode 100644 (file)
index 0000000..9111cce
--- /dev/null
@@ -0,0 +1,205 @@
+use serde::{Deserialize, Serialize};
+
+use proxmox::api::api;
+use proxmox::api::schema::{BooleanSchema, IntegerSchema, Schema, StringSchema};
+
+use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
+use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
+
+pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
+    "Enable the account (default). You can set this to '0' to disable the account.")
+    .default(true)
+    .schema();
+
+pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
+    "Account expiration date (seconds since epoch). '0' means no expiration date.")
+    .default(0)
+    .minimum(0)
+    .schema();
+
+pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
+    .format(&SINGLE_LINE_COMMENT_FORMAT)
+    .min_length(2)
+    .max_length(64)
+    .schema();
+
+pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
+    .format(&SINGLE_LINE_COMMENT_FORMAT)
+    .min_length(2)
+    .max_length(64)
+    .schema();
+
+pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
+    .format(&SINGLE_LINE_COMMENT_FORMAT)
+    .min_length(2)
+    .max_length(64)
+    .schema();
+
+#[api(
+    properties: {
+        userid: {
+            type: Userid,
+        },
+        comment: {
+            optional: true,
+            schema: SINGLE_LINE_COMMENT_SCHEMA,
+        },
+        enable: {
+            optional: true,
+            schema: ENABLE_USER_SCHEMA,
+        },
+        expire: {
+            optional: true,
+            schema: EXPIRE_USER_SCHEMA,
+        },
+        firstname: {
+            optional: true,
+            schema: FIRST_NAME_SCHEMA,
+        },
+        lastname: {
+            schema: LAST_NAME_SCHEMA,
+            optional: true,
+         },
+        email: {
+            schema: EMAIL_SCHEMA,
+            optional: true,
+        },
+        tokens: {
+            type: Array,
+            optional: true,
+            description: "List of user's API tokens.",
+            items: {
+                type: ApiToken
+            },
+        },
+    }
+)]
+#[derive(Serialize,Deserialize)]
+/// User properties with added list of ApiTokens
+pub struct UserWithTokens {
+    pub userid: Userid,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub comment: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub enable: Option<bool>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub expire: Option<i64>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub firstname: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub lastname: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub email: Option<String>,
+    #[serde(skip_serializing_if="Vec::is_empty", default)]
+    pub tokens: Vec<ApiToken>,
+}
+
+#[api(
+    properties: {
+        tokenid: {
+            schema: PROXMOX_TOKEN_ID_SCHEMA,
+        },
+        comment: {
+            optional: true,
+            schema: SINGLE_LINE_COMMENT_SCHEMA,
+        },
+        enable: {
+            optional: true,
+            schema: ENABLE_USER_SCHEMA,
+        },
+        expire: {
+            optional: true,
+            schema: EXPIRE_USER_SCHEMA,
+        },
+    }
+)]
+#[derive(Serialize,Deserialize)]
+/// ApiToken properties.
+pub struct ApiToken {
+    pub tokenid: Authid,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub comment: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub enable: Option<bool>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub expire: Option<i64>,
+}
+
+impl ApiToken {
+    pub fn is_active(&self) -> bool {
+        if !self.enable.unwrap_or(true) {
+            return false;
+        }
+        if let Some(expire) = self.expire {
+            let now =  proxmox::tools::time::epoch_i64();
+            if expire > 0 && expire <= now {
+                return false;
+            }
+        }
+        true
+    }
+}
+
+#[api(
+    properties: {
+        userid: {
+            type: Userid,
+        },
+        comment: {
+            optional: true,
+            schema: SINGLE_LINE_COMMENT_SCHEMA,
+        },
+        enable: {
+            optional: true,
+            schema: ENABLE_USER_SCHEMA,
+        },
+        expire: {
+            optional: true,
+            schema: EXPIRE_USER_SCHEMA,
+        },
+        firstname: {
+            optional: true,
+            schema: FIRST_NAME_SCHEMA,
+        },
+        lastname: {
+            schema: LAST_NAME_SCHEMA,
+            optional: true,
+         },
+        email: {
+            schema: EMAIL_SCHEMA,
+            optional: true,
+        },
+    }
+)]
+#[derive(Serialize,Deserialize)]
+/// User properties.
+pub struct User {
+    pub userid: Userid,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub comment: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub enable: Option<bool>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub expire: Option<i64>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub firstname: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub lastname: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub email: Option<String>,
+}
+
+impl User {
+    pub fn is_active(&self) -> bool {
+        if !self.enable.unwrap_or(true) {
+            return false;
+        }
+        if let Some(expire) = self.expire {
+            let now =  proxmox::tools::time::epoch_i64();
+            if expire > 0 && expire <= now {
+                return false;
+            }
+        }
+        true
+    }
+}
index 08335b93a9e1371902920aa513037cdea109f38b..e931181e6c8eeb282cdf385b263d7aa2966b76b7 100644 (file)
@@ -98,7 +98,6 @@ pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
         .max_length(32);
 pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
 
-
 #[api(
     type: String,
     format: &PROXMOX_USER_NAME_FORMAT,
diff --git a/pbs-client/Cargo.toml b/pbs-client/Cargo.toml
new file mode 100644 (file)
index 0000000..c5dbf14
--- /dev/null
@@ -0,0 +1,40 @@
+[package]
+name = "pbs-client"
+version = "0.1.0"
+authors = ["Wolfgang Bumiller <w.bumiller@proxmox.com>"]
+edition = "2018"
+description = "The main proxmox backup client crate"
+
+[dependencies]
+anyhow = "1.0"
+bitflags = "1.2.1"
+bytes = "1.0"
+futures = "0.3"
+h2 = { version = "0.3", features = [ "stream" ] }
+http = "0.2"
+hyper = { version = "0.14", features = [ "full" ] }
+lazy_static = "1.4"
+libc = "0.2"
+nix = "0.19.1"
+openssl = "0.10"
+percent-encoding = "2.1"
+pin-project = "1.0"
+regex = "1.2"
+rustyline = "7"
+serde_json = "1.0"
+tokio = { version = "1.6", features = [ "fs", "signal" ] }
+tokio-stream = "0.1.0"
+tower-service = "0.3.0"
+xdg = "2.2"
+
+pathpatterns = "0.1.2"
+proxmox = { version = "0.11.5", default-features = false, features = [ "cli" ] }
+proxmox-fuse = "0.1.1"
+proxmox-http = { version = "0.2.1", features = [ "client", "http-helpers", "websocket" ] }
+pxar = { version = "0.10.1", features = [ "tokio-io" ] }
+
+pbs-api-types = { path = "../pbs-api-types" }
+pbs-buildcfg = { path = "../pbs-buildcfg" }
+pbs-datastore = { path = "../pbs-datastore" }
+pbs-runtime = { path = "../pbs-runtime" }
+pbs-tools = { path = "../pbs-tools" }
diff --git a/pbs-client/src/backup_reader.rs b/pbs-client/src/backup_reader.rs
new file mode 100644 (file)
index 0000000..1702d44
--- /dev/null
@@ -0,0 +1,229 @@
+use anyhow::{format_err, Error};
+use std::io::{Write, Seek, SeekFrom};
+use std::fs::File;
+use std::sync::Arc;
+use std::os::unix::fs::OpenOptionsExt;
+
+use futures::future::AbortHandle;
+use serde_json::{json, Value};
+
+use proxmox::tools::digest_to_hex;
+
+use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, CryptConfig, BackupManifest};
+use pbs_datastore::data_blob::DataBlob;
+use pbs_datastore::data_blob_reader::DataBlobReader;
+use pbs_datastore::dynamic_index::DynamicIndexReader;
+use pbs_datastore::fixed_index::FixedIndexReader;
+use pbs_datastore::index::IndexFile;
+use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
+use pbs_tools::sha::sha256;
+
+use super::{HttpClient, H2Client};
+
+/// Backup Reader
+pub struct BackupReader {
+    h2: H2Client,
+    abort: AbortHandle,
+    crypt_config: Option<Arc<CryptConfig>>,
+}
+
+impl Drop for BackupReader {
+
+    fn drop(&mut self) {
+        self.abort.abort();
+    }
+}
+
+impl BackupReader {
+
+    fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
+        Arc::new(Self { h2, abort, crypt_config})
+    }
+
+    /// Create a new instance by upgrading the connection at '/api2/json/reader'
+    pub async fn start(
+        client: HttpClient,
+        crypt_config: Option<Arc<CryptConfig>>,
+        datastore: &str,
+        backup_type: &str,
+        backup_id: &str,
+        backup_time: i64,
+        debug: bool,
+    ) -> Result<Arc<BackupReader>, Error> {
+
+        let param = json!({
+            "backup-type": backup_type,
+            "backup-id": backup_id,
+            "backup-time": backup_time,
+            "store": datastore,
+            "debug": debug,
+        });
+        let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
+
+        let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
+
+        Ok(BackupReader::new(h2, abort, crypt_config))
+    }
+
+    /// Execute a GET request
+    pub async fn get(
+        &self,
+        path: &str,
+        param: Option<Value>,
+    ) -> Result<Value, Error> {
+        self.h2.get(path, param).await
+    }
+
+    /// Execute a PUT request
+    pub async fn put(
+        &self,
+        path: &str,
+        param: Option<Value>,
+    ) -> Result<Value, Error> {
+        self.h2.put(path, param).await
+    }
+
+    /// Execute a POST request
+    pub async fn post(
+        &self,
+        path: &str,
+        param: Option<Value>,
+    ) -> Result<Value, Error> {
+        self.h2.post(path, param).await
+    }
+
+    /// Execute a GET request and send output to a writer
+    pub async fn download<W: Write + Send>(
+        &self,
+        file_name: &str,
+        output: W,
+    ) -> Result<(), Error> {
+        let path = "download";
+        let param = json!({ "file-name": file_name });
+        self.h2.download(path, Some(param), output).await
+    }
+
+    /// Execute a special GET request and send output to a writer
+    ///
+    /// This writes random data, and is only useful to test download speed.
+    pub async fn speedtest<W: Write + Send>(
+        &self,
+        output: W,
+    ) -> Result<(), Error> {
+        self.h2.download("speedtest", None, output).await
+    }
+
+    /// Download a specific chunk
+    pub async fn download_chunk<W: Write + Send>(
+        &self,
+        digest: &[u8; 32],
+        output: W,
+    ) -> Result<(), Error> {
+        let path = "chunk";
+        let param = json!({ "digest": digest_to_hex(digest) });
+        self.h2.download(path, Some(param), output).await
+    }
+
+    pub fn force_close(self) {
+        self.abort.abort();
+    }
+
+    /// Download backup manifest (index.json)
+    ///
+    /// The manifest signature is verified if we have a crypt_config.
+    pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
+
+        let mut raw_data = Vec::with_capacity(64 * 1024);
+        self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
+        let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
+        // no expected digest available
+        let data = blob.decode(None, None)?;
+
+        let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
+
+        Ok((manifest, data))
+    }
+
+    /// Download a .blob file
+    ///
+    /// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
+    /// the provided manifest.
+    pub async fn download_blob(
+        &self,
+        manifest: &BackupManifest,
+        name: &str,
+    ) -> Result<DataBlobReader<'_, File>, Error> {
+
+        let mut tmpfile = std::fs::OpenOptions::new()
+            .write(true)
+            .read(true)
+            .custom_flags(libc::O_TMPFILE)
+            .open("/tmp")?;
+
+        self.download(name, &mut tmpfile).await?;
+
+        tmpfile.seek(SeekFrom::Start(0))?;
+        let (csum, size) = sha256(&mut tmpfile)?;
+        manifest.verify_file(name, &csum, size)?;
+
+        tmpfile.seek(SeekFrom::Start(0))?;
+
+        DataBlobReader::new(tmpfile, self.crypt_config.clone())
+    }
+
+    /// Download dynamic index file
+    ///
+    /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
+    /// the provided manifest.
+    pub async fn download_dynamic_index(
+        &self,
+        manifest: &BackupManifest,
+        name: &str,
+    ) -> Result<DynamicIndexReader, Error> {
+
+        let mut tmpfile = std::fs::OpenOptions::new()
+            .write(true)
+            .read(true)
+            .custom_flags(libc::O_TMPFILE)
+            .open("/tmp")?;
+
+        self.download(name, &mut tmpfile).await?;
+
+        let index = DynamicIndexReader::new(tmpfile)
+            .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
+
+        // Note: do not use values stored in index (not trusted) - instead, computed them again
+        let (csum, size) = index.compute_csum();
+        manifest.verify_file(name, &csum, size)?;
+
+        Ok(index)
+    }
+
+    /// Download fixed index file
+    ///
+    /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
+    /// the provided manifest.
+    pub async fn download_fixed_index(
+        &self,
+        manifest: &BackupManifest,
+        name: &str,
+    ) -> Result<FixedIndexReader, Error> {
+
+        let mut tmpfile = std::fs::OpenOptions::new()
+            .write(true)
+            .read(true)
+            .custom_flags(libc::O_TMPFILE)
+            .open("/tmp")?;
+
+        self.download(name, &mut tmpfile).await?;
+
+        let index = FixedIndexReader::new(tmpfile)
+            .map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
+
+        // Note: do not use values stored in index (not trusted) - instead, computed them again
+        let (csum, size) = index.compute_csum();
+        manifest.verify_file(name, &csum, size)?;
+
+        Ok(index)
+    }
+}
diff --git a/pbs-client/src/backup_repo.rs b/pbs-client/src/backup_repo.rs
new file mode 100644 (file)
index 0000000..dc9b8ec
--- /dev/null
@@ -0,0 +1,101 @@
+use std::convert::TryFrom;
+use std::fmt;
+
+use anyhow::{format_err, Error};
+
+use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
+
+/// Reference remote backup locations
+///
+
+#[derive(Debug)]
+pub struct BackupRepository {
+    /// The user name used for Authentication
+    auth_id: Option<Authid>,
+    /// The host name or IP address
+    host: Option<String>,
+    /// The port
+    port: Option<u16>,
+    /// The name of the datastore
+    store: String,
+}
+
+impl BackupRepository {
+
+    pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
+        let host = match host {
+            Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
+                Some(format!("[{}]", host))
+            },
+            other => other,
+        };
+        Self { auth_id, host, port, store }
+    }
+
+    pub fn auth_id(&self) -> &Authid {
+        if let Some(ref auth_id) = self.auth_id {
+            return auth_id;
+        }
+
+        &Authid::root_auth_id()
+    }
+
+    pub fn user(&self) -> &Userid {
+        if let Some(auth_id) = &self.auth_id {
+            return auth_id.user();
+        }
+
+        Userid::root_userid()
+    }
+
+    pub fn host(&self) -> &str {
+        if let Some(ref host) = self.host {
+            return host;
+        }
+        "localhost"
+    }
+
+    pub fn port(&self) -> u16 {
+        if let Some(port) = self.port {
+            return port;
+        }
+        8007
+    }
+
+    pub fn store(&self) -> &str {
+        &self.store
+    }
+}
+
+impl fmt::Display for BackupRepository {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match (&self.auth_id, &self.host, self.port) {
+            (Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
+            (None, Some(host), None) => write!(f, "{}:{}", host, self.store),
+            (None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
+            (None, None, None) => write!(f, "{}", self.store),
+        }
+    }
+}
+
+impl std::str::FromStr for BackupRepository {
+    type Err = Error;
+
+    /// Parse a repository URL.
+    ///
+    /// This parses strings like `user@host:datastore`. The `user` and
+    /// `host` parts are optional, where `host` defaults to the local
+    /// host, and `user` defaults to `root@pam`.
+    fn from_str(url: &str) -> Result<Self, Self::Err> {
+
+        let cap = (BACKUP_REPO_URL_REGEX.regex_obj)().captures(url)
+            .ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
+
+        Ok(Self {
+            auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
+            host: cap.get(2).map(|m| m.as_str().to_owned()),
+            port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
+            store: cap[4].to_owned(),
+        })
+    }
+}
diff --git a/pbs-client/src/backup_specification.rs b/pbs-client/src/backup_specification.rs
new file mode 100644 (file)
index 0000000..627c183
--- /dev/null
@@ -0,0 +1,39 @@
+use anyhow::{bail, Error};
+
+use proxmox::api::schema::*;
+
+proxmox::const_regex! {
+    BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
+}
+
+pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
+    "Backup source specification ([<label>:<path>]).")
+    .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
+    .schema();
+
+pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
+
+pub struct BackupSpecification {
+    pub archive_name: String, // left part
+    pub config_string: String, // right part
+    pub spec_type: BackupSpecificationType,
+}
+
+pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
+
+    if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
+        let archive_name = caps.get(1).unwrap().as_str().into();
+        let extension = caps.get(2).unwrap().as_str();
+        let config_string =  caps.get(3).unwrap().as_str().into();
+        let spec_type = match extension {
+            "pxar" => BackupSpecificationType::PXAR,
+            "img" => BackupSpecificationType::IMAGE,
+            "conf" => BackupSpecificationType::CONFIG,
+            "log" => BackupSpecificationType::LOGFILE,
+            _ => bail!("unknown backup source type '{}'", extension),
+        };
+        return Ok(BackupSpecification { archive_name, config_string, spec_type });
+    }
+
+    bail!("unable to parse backup source specification '{}'", value);
+}
diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs
new file mode 100644 (file)
index 0000000..5c15f27
--- /dev/null
@@ -0,0 +1,842 @@
+use std::collections::HashSet;
+use std::future::Future;
+use std::os::unix::fs::OpenOptionsExt;
+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex};
+
+use anyhow::{bail, format_err, Error};
+use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
+use futures::stream::{Stream, StreamExt, TryStreamExt};
+use serde_json::{json, Value};
+use tokio::io::AsyncReadExt;
+use tokio::sync::{mpsc, oneshot};
+use tokio_stream::wrappers::ReceiverStream;
+
+use proxmox::tools::digest_to_hex;
+
+use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1, CryptConfig};
+use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
+use pbs_datastore::dynamic_index::DynamicIndexReader;
+use pbs_datastore::fixed_index::FixedIndexReader;
+use pbs_datastore::index::IndexFile;
+use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
+use pbs_tools::format::HumanByte;
+
+use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
+
+use super::{H2Client, HttpClient};
+
+pub struct BackupWriter {
+    h2: H2Client,
+    abort: AbortHandle,
+    verbose: bool,
+    crypt_config: Option<Arc<CryptConfig>>,
+}
+
+impl Drop for BackupWriter {
+    fn drop(&mut self) {
+        self.abort.abort();
+    }
+}
+
+pub struct BackupStats {
+    pub size: u64,
+    pub csum: [u8; 32],
+}
+
+/// Options for uploading blobs/streams to the server
+#[derive(Default, Clone)]
+pub struct UploadOptions {
+    pub previous_manifest: Option<Arc<BackupManifest>>,
+    pub compress: bool,
+    pub encrypt: bool,
+    pub fixed_size: Option<u64>,
+}
+
+struct UploadStats {
+    chunk_count: usize,
+    chunk_reused: usize,
+    size: usize,
+    size_reused: usize,
+    size_compressed: usize,
+    duration: std::time::Duration,
+    csum: [u8; 32],
+}
+
+type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
+type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
+
+impl BackupWriter {
+    fn new(
+        h2: H2Client,
+        abort: AbortHandle,
+        crypt_config: Option<Arc<CryptConfig>>,
+        verbose: bool,
+    ) -> Arc<Self> {
+        Arc::new(Self {
+            h2,
+            abort,
+            crypt_config,
+            verbose,
+        })
+    }
+
+    // FIXME: extract into (flattened) parameter struct?
+    #[allow(clippy::too_many_arguments)]
+    pub async fn start(
+        client: HttpClient,
+        crypt_config: Option<Arc<CryptConfig>>,
+        datastore: &str,
+        backup_type: &str,
+        backup_id: &str,
+        backup_time: i64,
+        debug: bool,
+        benchmark: bool,
+    ) -> Result<Arc<BackupWriter>, Error> {
+        let param = json!({
+            "backup-type": backup_type,
+            "backup-id": backup_id,
+            "backup-time": backup_time,
+            "store": datastore,
+            "debug": debug,
+            "benchmark": benchmark
+        });
+
+        let req = HttpClient::request_builder(
+            client.server(),
+            client.port(),
+            "GET",
+            "/api2/json/backup",
+            Some(param),
+        )
+        .unwrap();
+
+        let (h2, abort) = client
+            .start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
+            .await?;
+
+        Ok(BackupWriter::new(h2, abort, crypt_config, debug))
+    }
+
+    pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
+        self.h2.get(path, param).await
+    }
+
+    pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
+        self.h2.put(path, param).await
+    }
+
+    pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
+        self.h2.post(path, param).await
+    }
+
+    pub async fn upload_post(
+        &self,
+        path: &str,
+        param: Option<Value>,
+        content_type: &str,
+        data: Vec<u8>,
+    ) -> Result<Value, Error> {
+        self.h2
+            .upload("POST", path, param, content_type, data)
+            .await
+    }
+
+    pub async fn send_upload_request(
+        &self,
+        method: &str,
+        path: &str,
+        param: Option<Value>,
+        content_type: &str,
+        data: Vec<u8>,
+    ) -> Result<h2::client::ResponseFuture, Error> {
+        let request =
+            H2Client::request_builder("localhost", method, path, param, Some(content_type))
+                .unwrap();
+        let response_future = self
+            .h2
+            .send_request(request, Some(bytes::Bytes::from(data.clone())))
+            .await?;
+        Ok(response_future)
+    }
+
+    pub async fn upload_put(
+        &self,
+        path: &str,
+        param: Option<Value>,
+        content_type: &str,
+        data: Vec<u8>,
+    ) -> Result<Value, Error> {
+        self.h2.upload("PUT", path, param, content_type, data).await
+    }
+
+    pub async fn finish(self: Arc<Self>) -> Result<(), Error> {
+        let h2 = self.h2.clone();
+
+        h2.post("finish", None)
+            .map_ok(move |_| {
+                self.abort.abort();
+            })
+            .await
+    }
+
+    pub fn cancel(&self) {
+        self.abort.abort();
+    }
+
+    pub async fn upload_blob<R: std::io::Read>(
+        &self,
+        mut reader: R,
+        file_name: &str,
+    ) -> Result<BackupStats, Error> {
+        let mut raw_data = Vec::new();
+        // fixme: avoid loading into memory
+        reader.read_to_end(&mut raw_data)?;
+
+        let csum = openssl::sha::sha256(&raw_data);
+        let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
+        let size = raw_data.len() as u64;
+        let _value = self
+            .h2
+            .upload(
+                "POST",
+                "blob",
+                Some(param),
+                "application/octet-stream",
+                raw_data,
+            )
+            .await?;
+        Ok(BackupStats { size, csum })
+    }
+
+    pub async fn upload_blob_from_data(
+        &self,
+        data: Vec<u8>,
+        file_name: &str,
+        options: UploadOptions,
+    ) -> Result<BackupStats, Error> {
+        let blob = match (options.encrypt, &self.crypt_config) {
+            (false, _) => DataBlob::encode(&data, None, options.compress)?,
+            (true, None) => bail!("requested encryption without a crypt config"),
+            (true, Some(crypt_config)) => {
+                DataBlob::encode(&data, Some(crypt_config), options.compress)?
+            }
+        };
+
+        let raw_data = blob.into_inner();
+        let size = raw_data.len() as u64;
+
+        let csum = openssl::sha::sha256(&raw_data);
+        let param = json!({"encoded-size": size, "file-name": file_name });
+        let _value = self
+            .h2
+            .upload(
+                "POST",
+                "blob",
+                Some(param),
+                "application/octet-stream",
+                raw_data,
+            )
+            .await?;
+        Ok(BackupStats { size, csum })
+    }
+
+    pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
+        &self,
+        src_path: P,
+        file_name: &str,
+        options: UploadOptions,
+    ) -> Result<BackupStats, Error> {
+        let src_path = src_path.as_ref();
+
+        let mut file = tokio::fs::File::open(src_path)
+            .await
+            .map_err(|err| format_err!("unable to open file {:?} - {}", src_path, err))?;
+
+        let mut contents = Vec::new();
+
+        file.read_to_end(&mut contents)
+            .await
+            .map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
+
+        self.upload_blob_from_data(contents, file_name, options)
+            .await
+    }
+
+    pub async fn upload_stream(
+        &self,
+        archive_name: &str,
+        stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
+        options: UploadOptions,
+    ) -> Result<BackupStats, Error> {
+        let known_chunks = Arc::new(Mutex::new(HashSet::new()));
+
+        let mut param = json!({ "archive-name": archive_name });
+        let prefix = if let Some(size) = options.fixed_size {
+            param["size"] = size.into();
+            "fixed"
+        } else {
+            "dynamic"
+        };
+
+        if options.encrypt && self.crypt_config.is_none() {
+            bail!("requested encryption without a crypt config");
+        }
+
+        let index_path = format!("{}_index", prefix);
+        let close_path = format!("{}_close", prefix);
+
+        if let Some(manifest) = options.previous_manifest {
+            // try, but ignore errors
+            match ArchiveType::from_path(archive_name) {
+                Ok(ArchiveType::FixedIndex) => {
+                    let _ = self
+                        .download_previous_fixed_index(
+                            archive_name,
+                            &manifest,
+                            known_chunks.clone(),
+                        )
+                        .await;
+                }
+                Ok(ArchiveType::DynamicIndex) => {
+                    let _ = self
+                        .download_previous_dynamic_index(
+                            archive_name,
+                            &manifest,
+                            known_chunks.clone(),
+                        )
+                        .await;
+                }
+                _ => { /* do nothing */ }
+            }
+        }
+
+        let wid = self
+            .h2
+            .post(&index_path, Some(param))
+            .await?
+            .as_u64()
+            .unwrap();
+
+        let upload_stats = Self::upload_chunk_info_stream(
+            self.h2.clone(),
+            wid,
+            stream,
+            &prefix,
+            known_chunks.clone(),
+            if options.encrypt {
+                self.crypt_config.clone()
+            } else {
+                None
+            },
+            options.compress,
+            self.verbose,
+        )
+        .await?;
+
+        let size_dirty = upload_stats.size - upload_stats.size_reused;
+        let size: HumanByte = upload_stats.size.into();
+        let archive = if self.verbose {
+            archive_name.to_string()
+        } else {
+            pbs_tools::format::strip_server_file_extension(archive_name)
+        };
+        if archive_name != CATALOG_NAME {
+            let speed: HumanByte =
+                ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
+            let size_dirty: HumanByte = size_dirty.into();
+            let size_compressed: HumanByte = upload_stats.size_compressed.into();
+            println!(
+                "{}: had to backup {} of {} (compressed {}) in {:.2}s",
+                archive,
+                size_dirty,
+                size,
+                size_compressed,
+                upload_stats.duration.as_secs_f64()
+            );
+            println!("{}: average backup speed: {}/s", archive, speed);
+        } else {
+            println!("Uploaded backup catalog ({})", size);
+        }
+
+        if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
+            let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
+            let reused: HumanByte = upload_stats.size_reused.into();
+            println!(
+                "{}: backup was done incrementally, reused {} ({:.1}%)",
+                archive, reused, reused_percent
+            );
+        }
+        if self.verbose && upload_stats.chunk_count > 0 {
+            println!(
+                "{}: Reused {} from {} chunks.",
+                archive, upload_stats.chunk_reused, upload_stats.chunk_count
+            );
+            println!(
+                "{}: Average chunk size was {}.",
+                archive,
+                HumanByte::from(upload_stats.size / upload_stats.chunk_count)
+            );
+            println!(
+                "{}: Average time per request: {} microseconds.",
+                archive,
+                (upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
+            );
+        }
+
+        let param = json!({
+            "wid": wid ,
+            "chunk-count": upload_stats.chunk_count,
+            "size": upload_stats.size,
+            "csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
+        });
+        let _value = self.h2.post(&close_path, Some(param)).await?;
+        Ok(BackupStats {
+            size: upload_stats.size as u64,
+            csum: upload_stats.csum,
+        })
+    }
+
+    fn response_queue(
+        verbose: bool,
+    ) -> (
+        mpsc::Sender<h2::client::ResponseFuture>,
+        oneshot::Receiver<Result<(), Error>>,
+    ) {
+        let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
+        let (verify_result_tx, verify_result_rx) = oneshot::channel();
+
+        // FIXME: check if this works as expected as replacement for the combinator below?
+        // tokio::spawn(async move {
+        //     let result: Result<(), Error> = (async move {
+        //         while let Some(response) = verify_queue_rx.recv().await {
+        //             match H2Client::h2api_response(response.await?).await {
+        //                 Ok(result) => println!("RESPONSE: {:?}", result),
+        //                 Err(err) => bail!("pipelined request failed: {}", err),
+        //             }
+        //         }
+        //         Ok(())
+        //     }).await;
+        //     let _ignore_closed_channel = verify_result_tx.send(result);
+        // });
+        // old code for reference?
+        tokio::spawn(
+            ReceiverStream::new(verify_queue_rx)
+                .map(Ok::<_, Error>)
+                .try_for_each(move |response: h2::client::ResponseFuture| {
+                    response
+                        .map_err(Error::from)
+                        .and_then(H2Client::h2api_response)
+                        .map_ok(move |result| {
+                            if verbose {
+                                println!("RESPONSE: {:?}", result)
+                            }
+                        })
+                        .map_err(|err| format_err!("pipelined request failed: {}", err))
+                })
+                .map(|result| {
+                    let _ignore_closed_channel = verify_result_tx.send(result);
+                }),
+        );
+
+        (verify_queue_tx, verify_result_rx)
+    }
+
+    fn append_chunk_queue(
+        h2: H2Client,
+        wid: u64,
+        path: String,
+        verbose: bool,
+    ) -> (UploadQueueSender, UploadResultReceiver) {
+        let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
+        let (verify_result_tx, verify_result_rx) = oneshot::channel();
+
+        // FIXME: async-block-ify this code!
+        tokio::spawn(
+            ReceiverStream::new(verify_queue_rx)
+                .map(Ok::<_, Error>)
+                .and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
+                    match (response, merged_chunk_info) {
+                        (Some(response), MergedChunkInfo::Known(list)) => {
+                            Either::Left(
+                                response
+                                    .map_err(Error::from)
+                                    .and_then(H2Client::h2api_response)
+                                    .and_then(move |_result| {
+                                        future::ok(MergedChunkInfo::Known(list))
+                                    })
+                            )
+                        }
+                        (None, MergedChunkInfo::Known(list)) => {
+                            Either::Right(future::ok(MergedChunkInfo::Known(list)))
+                        }
+                        _ => unreachable!(),
+                    }
+                })
+                .merge_known_chunks()
+                .and_then(move |merged_chunk_info| {
+                    match merged_chunk_info {
+                        MergedChunkInfo::Known(chunk_list) => {
+                            let mut digest_list = vec![];
+                            let mut offset_list = vec![];
+                            for (offset, digest) in chunk_list {
+                                digest_list.push(digest_to_hex(&digest));
+                                offset_list.push(offset);
+                            }
+                            if verbose { println!("append chunks list len ({})", digest_list.len()); }
+                            let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
+                            let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
+                            let param_data = bytes::Bytes::from(param.to_string().into_bytes());
+                            let upload_data = Some(param_data);
+                            h2.send_request(request, upload_data)
+                                .and_then(move |response| {
+                                    response
+                                        .map_err(Error::from)
+                                        .and_then(H2Client::h2api_response)
+                                        .map_ok(|_| ())
+                                })
+                                .map_err(|err| format_err!("pipelined request failed: {}", err))
+                        }
+                        _ => unreachable!(),
+                    }
+                })
+                .try_for_each(|_| future::ok(()))
+                .map(|result| {
+                      let _ignore_closed_channel = verify_result_tx.send(result);
+                })
+        );
+
+        (verify_queue_tx, verify_result_rx)
+    }
+
+    pub async fn download_previous_fixed_index(
+        &self,
+        archive_name: &str,
+        manifest: &BackupManifest,
+        known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+    ) -> Result<FixedIndexReader, Error> {
+        let mut tmpfile = std::fs::OpenOptions::new()
+            .write(true)
+            .read(true)
+            .custom_flags(libc::O_TMPFILE)
+            .open("/tmp")?;
+
+        let param = json!({ "archive-name": archive_name });
+        self.h2
+            .download("previous", Some(param), &mut tmpfile)
+            .await?;
+
+        let index = FixedIndexReader::new(tmpfile).map_err(|err| {
+            format_err!("unable to read fixed index '{}' - {}", archive_name, err)
+        })?;
+        // Note: do not use values stored in index (not trusted) - instead, computed them again
+        let (csum, size) = index.compute_csum();
+        manifest.verify_file(archive_name, &csum, size)?;
+
+        // add index chunks to known chunks
+        let mut known_chunks = known_chunks.lock().unwrap();
+        for i in 0..index.index_count() {
+            known_chunks.insert(*index.index_digest(i).unwrap());
+        }
+
+        if self.verbose {
+            println!(
+                "{}: known chunks list length is {}",
+                archive_name,
+                index.index_count()
+            );
+        }
+
+        Ok(index)
+    }
+
+    pub async fn download_previous_dynamic_index(
+        &self,
+        archive_name: &str,
+        manifest: &BackupManifest,
+        known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+    ) -> Result<DynamicIndexReader, Error> {
+        let mut tmpfile = std::fs::OpenOptions::new()
+            .write(true)
+            .read(true)
+            .custom_flags(libc::O_TMPFILE)
+            .open("/tmp")?;
+
+        let param = json!({ "archive-name": archive_name });
+        self.h2
+            .download("previous", Some(param), &mut tmpfile)
+            .await?;
+
+        let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
+            format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
+        })?;
+        // Note: do not use values stored in index (not trusted) - instead, computed them again
+        let (csum, size) = index.compute_csum();
+        manifest.verify_file(archive_name, &csum, size)?;
+
+        // add index chunks to known chunks
+        let mut known_chunks = known_chunks.lock().unwrap();
+        for i in 0..index.index_count() {
+            known_chunks.insert(*index.index_digest(i).unwrap());
+        }
+
+        if self.verbose {
+            println!(
+                "{}: known chunks list length is {}",
+                archive_name,
+                index.index_count()
+            );
+        }
+
+        Ok(index)
+    }
+
+    /// Retrieve backup time of last backup
+    pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
+        let data = self.h2.get("previous_backup_time", None).await?;
+        serde_json::from_value(data).map_err(|err| {
+            format_err!(
+                "Failed to parse backup time value returned by server - {}",
+                err
+            )
+        })
+    }
+
+    /// Download backup manifest (index.json) of last backup
+    pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
+        let mut raw_data = Vec::with_capacity(64 * 1024);
+
+        let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
+        self.h2
+            .download("previous", Some(param), &mut raw_data)
+            .await?;
+
+        let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
+        // no expected digest available
+        let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
+
+        let manifest =
+            BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
+
+        Ok(manifest)
+    }
+
+    // We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
+    // function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
+    // since this is a private method.
+    #[allow(clippy::too_many_arguments)]
+    fn upload_chunk_info_stream(
+        h2: H2Client,
+        wid: u64,
+        stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
+        prefix: &str,
+        known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+        crypt_config: Option<Arc<CryptConfig>>,
+        compress: bool,
+        verbose: bool,
+    ) -> impl Future<Output = Result<UploadStats, Error>> {
+        let total_chunks = Arc::new(AtomicUsize::new(0));
+        let total_chunks2 = total_chunks.clone();
+        let known_chunk_count = Arc::new(AtomicUsize::new(0));
+        let known_chunk_count2 = known_chunk_count.clone();
+
+        let stream_len = Arc::new(AtomicUsize::new(0));
+        let stream_len2 = stream_len.clone();
+        let compressed_stream_len = Arc::new(AtomicU64::new(0));
+        let compressed_stream_len2 = compressed_stream_len.clone();
+        let reused_len = Arc::new(AtomicUsize::new(0));
+        let reused_len2 = reused_len.clone();
+
+        let append_chunk_path = format!("{}_index", prefix);
+        let upload_chunk_path = format!("{}_chunk", prefix);
+        let is_fixed_chunk_size = prefix == "fixed";
+
+        let (upload_queue, upload_result) =
+            Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, verbose);
+
+        let start_time = std::time::Instant::now();
+
+        let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
+        let index_csum_2 = index_csum.clone();
+
+        stream
+            .and_then(move |data| {
+                let chunk_len = data.len();
+
+                total_chunks.fetch_add(1, Ordering::SeqCst);
+                let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
+
+                let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
+
+                if let Some(ref crypt_config) = crypt_config {
+                    chunk_builder = chunk_builder.crypt_config(crypt_config);
+                }
+
+                let mut known_chunks = known_chunks.lock().unwrap();
+                let digest = chunk_builder.digest();
+
+                let mut guard = index_csum.lock().unwrap();
+                let csum = guard.as_mut().unwrap();
+
+                let chunk_end = offset + chunk_len as u64;
+
+                if !is_fixed_chunk_size {
+                    csum.update(&chunk_end.to_le_bytes());
+                }
+                csum.update(digest);
+
+                let chunk_is_known = known_chunks.contains(digest);
+                if chunk_is_known {
+                    known_chunk_count.fetch_add(1, Ordering::SeqCst);
+                    reused_len.fetch_add(chunk_len, Ordering::SeqCst);
+                    future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
+                } else {
+                    let compressed_stream_len2 = compressed_stream_len.clone();
+                    known_chunks.insert(*digest);
+                    future::ready(chunk_builder.build().map(move |(chunk, digest)| {
+                        compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
+                        MergedChunkInfo::New(ChunkInfo {
+                            chunk,
+                            digest,
+                            chunk_len: chunk_len as u64,
+                            offset,
+                        })
+                    }))
+                }
+            })
+            .merge_known_chunks()
+            .try_for_each(move |merged_chunk_info| {
+                let upload_queue = upload_queue.clone();
+
+                if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
+                    let offset = chunk_info.offset;
+                    let digest = chunk_info.digest;
+                    let digest_str = digest_to_hex(&digest);
+
+                    /* too verbose, needs finer verbosity setting granularity
+                    if verbose {
+                        println!("upload new chunk {} ({} bytes, offset {})", digest_str,
+                                 chunk_info.chunk_len, offset);
+                    }
+                    */
+
+                    let chunk_data = chunk_info.chunk.into_inner();
+                    let param = json!({
+                        "wid": wid,
+                        "digest": digest_str,
+                        "size": chunk_info.chunk_len,
+                        "encoded-size": chunk_data.len(),
+                    });
+
+                    let ct = "application/octet-stream";
+                    let request = H2Client::request_builder(
+                        "localhost",
+                        "POST",
+                        &upload_chunk_path,
+                        Some(param),
+                        Some(ct),
+                    )
+                    .unwrap();
+                    let upload_data = Some(bytes::Bytes::from(chunk_data));
+
+                    let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
+
+                    Either::Left(h2.send_request(request, upload_data).and_then(
+                        move |response| async move {
+                            upload_queue
+                                .send((new_info, Some(response)))
+                                .await
+                                .map_err(|err| {
+                                    format_err!("failed to send to upload queue: {}", err)
+                                })
+                        },
+                    ))
+                } else {
+                    Either::Right(async move {
+                        upload_queue
+                            .send((merged_chunk_info, None))
+                            .await
+                            .map_err(|err| format_err!("failed to send to upload queue: {}", err))
+                    })
+                }
+            })
+            .then(move |result| async move { upload_result.await?.and(result) }.boxed())
+            .and_then(move |_| {
+                let duration = start_time.elapsed();
+                let chunk_count = total_chunks2.load(Ordering::SeqCst);
+                let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
+                let size = stream_len2.load(Ordering::SeqCst);
+                let size_reused = reused_len2.load(Ordering::SeqCst);
+                let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
+
+                let mut guard = index_csum_2.lock().unwrap();
+                let csum = guard.take().unwrap().finish();
+
+                futures::future::ok(UploadStats {
+                    chunk_count,
+                    chunk_reused,
+                    size,
+                    size_reused,
+                    size_compressed,
+                    duration,
+                    csum,
+                })
+            })
+    }
+
+    /// Upload speed test - prints result to stderr
+    pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
+        let mut data = vec![];
+        // generate pseudo random byte sequence
+        for i in 0..1024 * 1024 {
+            for j in 0..4 {
+                let byte = ((i >> (j << 3)) & 0xff) as u8;
+                data.push(byte);
+            }
+        }
+
+        let item_len = data.len();
+
+        let mut repeat = 0;
+
+        let (upload_queue, upload_result) = Self::response_queue(verbose);
+
+        let start_time = std::time::Instant::now();
+
+        loop {
+            repeat += 1;
+            if start_time.elapsed().as_secs() >= 5 {
+                break;
+            }
+
+            if verbose {
+                eprintln!("send test data ({} bytes)", data.len());
+            }
+            let request =
+                H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
+            let request_future = self
+                .h2
+                .send_request(request, Some(bytes::Bytes::from(data.clone())))
+                .await?;
+
+            upload_queue.send(request_future).await?;
+        }
+
+        drop(upload_queue); // close queue
+
+        let _ = upload_result.await?;
+
+        eprintln!(
+            "Uploaded {} chunks in {} seconds.",
+            repeat,
+            start_time.elapsed().as_secs()
+        );
+        let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
+        eprintln!(
+            "Time per request: {} microseconds.",
+            (start_time.elapsed().as_micros()) / (repeat as u128)
+        );
+
+        Ok(speed)
+    }
+}
diff --git a/pbs-client/src/catalog_shell.rs b/pbs-client/src/catalog_shell.rs
new file mode 100644 (file)
index 0000000..defa623
--- /dev/null
@@ -0,0 +1,1282 @@
+use std::collections::HashMap;
+use std::ffi::{CStr, CString, OsStr, OsString};
+use std::future::Future;
+use std::io::Write;
+use std::mem;
+use std::os::unix::ffi::{OsStrExt, OsStringExt};
+use std::path::{Path, PathBuf};
+use std::pin::Pin;
+
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
+use proxmox::api::api;
+use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
+use proxmox::tools::fs::{create_path, CreateOptions};
+use pxar::{EntryKind, Metadata};
+
+use pbs_runtime::block_in_place;
+use pbs_datastore::catalog::{self, DirEntryAttribute};
+use pbs_tools::ops::ControlFlow;
+
+use crate::pxar::Flags;
+use crate::pxar::fuse::{Accessor, FileEntry};
+
+type CatalogReader = pbs_datastore::catalog::CatalogReader<std::fs::File>;
+
+const MAX_SYMLINK_COUNT: usize = 40;
+
+static mut SHELL: Option<usize> = None;
+
+/// This list defines all the shell commands and their properties
+/// using the api schema
+pub fn catalog_shell_cli() -> CommandLineInterface {
+    CommandLineInterface::Nested(
+        CliCommandMap::new()
+            .insert("pwd", CliCommand::new(&API_METHOD_PWD_COMMAND))
+            .insert(
+                "cd",
+                CliCommand::new(&API_METHOD_CD_COMMAND)
+                    .arg_param(&["path"])
+                    .completion_cb("path", complete_path),
+            )
+            .insert(
+                "ls",
+                CliCommand::new(&API_METHOD_LS_COMMAND)
+                    .arg_param(&["path"])
+                    .completion_cb("path", complete_path),
+            )
+            .insert(
+                "stat",
+                CliCommand::new(&API_METHOD_STAT_COMMAND)
+                    .arg_param(&["path"])
+                    .completion_cb("path", complete_path),
+            )
+            .insert(
+                "select",
+                CliCommand::new(&API_METHOD_SELECT_COMMAND)
+                    .arg_param(&["path"])
+                    .completion_cb("path", complete_path),
+            )
+            .insert(
+                "deselect",
+                CliCommand::new(&API_METHOD_DESELECT_COMMAND)
+                    .arg_param(&["path"])
+                    .completion_cb("path", complete_path),
+            )
+            .insert(
+                "clear-selected",
+                CliCommand::new(&API_METHOD_CLEAR_SELECTED_COMMAND),
+            )
+            .insert(
+                "list-selected",
+                CliCommand::new(&API_METHOD_LIST_SELECTED_COMMAND),
+            )
+            .insert(
+                "restore-selected",
+                CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
+                    .arg_param(&["target"])
+                    .completion_cb("target", pbs_tools::fs::complete_file_name),
+            )
+            .insert(
+                "restore",
+                CliCommand::new(&API_METHOD_RESTORE_COMMAND)
+                    .arg_param(&["target"])
+                    .completion_cb("target", pbs_tools::fs::complete_file_name),
+            )
+            .insert(
+                "find",
+                CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
+            )
+            .insert(
+                "exit",
+                CliCommand::new(&API_METHOD_EXIT),
+            )
+            .insert_help(),
+    )
+}
+
+fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<String> {
+    let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
+    match shell.complete_path(complete_me) {
+        Ok(list) => list,
+        Err(err) => {
+            eprintln!("error during completion: {}", err);
+            Vec::new()
+        }
+    }
+}
+
+// just an empty wrapper so that it is displayed in help/docs, we check
+// in the readloop for 'exit' again break
+#[api(input: { properties: {} })]
+/// Exit the shell
+async fn exit() -> Result<(), Error> {
+    Ok(())
+}
+
+#[api(input: { properties: {} })]
+/// List the current working directory.
+async fn pwd_command() -> Result<(), Error> {
+    Shell::with(move |shell| shell.pwd()).await
+}
+
+#[api(
+    input: {
+        properties: {
+            path: {
+                type: String,
+                optional: true,
+                description: "target path."
+            }
+        }
+    }
+)]
+/// Change the current working directory to the new directory
+async fn cd_command(path: Option<String>) -> Result<(), Error> {
+    let path = path.as_ref().map(Path::new);
+    Shell::with(move |shell| shell.cd(path)).await
+}
+
+#[api(
+    input: {
+        properties: {
+            path: {
+                type: String,
+                optional: true,
+                description: "target path."
+            }
+        }
+    }
+)]
+/// List the content of working directory or given path.
+async fn ls_command(path: Option<String>) -> Result<(), Error> {
+    let path = path.as_ref().map(Path::new);
+    Shell::with(move |shell| shell.ls(path)).await
+}
+
+#[api(
+    input: {
+        properties: {
+            path: {
+                type: String,
+                description: "target path."
+            }
+        }
+    }
+)]
+/// Read the metadata for a given directory entry.
+///
+/// This is expensive because the data has to be read from the pxar archive, which means reading
+/// over the network.
+async fn stat_command(path: String) -> Result<(), Error> {
+    Shell::with(move |shell| shell.stat(PathBuf::from(path))).await
+}
+
+#[api(
+    input: {
+        properties: {
+            path: {
+                type: String,
+                description: "target path."
+            }
+        }
+    }
+)]
+/// Select an entry for restore.
+///
+/// This will return an error if the entry is already present in the list or
+/// if an invalid path was provided.
+async fn select_command(path: String) -> Result<(), Error> {
+    Shell::with(move |shell| shell.select(PathBuf::from(path))).await
+}
+
+#[api(
+    input: {
+        properties: {
+            path: {
+                type: String,
+                description: "path to entry to remove from list."
+            }
+        }
+    }
+)]
+/// Deselect an entry for restore.
+///
+/// This will return an error if the entry was not found in the list of entries
+/// selected for restore.
+async fn deselect_command(path: String) -> Result<(), Error> {
+    Shell::with(move |shell| shell.deselect(PathBuf::from(path))).await
+}
+
+#[api( input: { properties: { } })]
+/// Clear the list of files selected for restore.
+async fn clear_selected_command() -> Result<(), Error> {
+    Shell::with(move |shell| shell.deselect_all()).await
+}
+
+#[api(
+    input: {
+        properties: {
+            patterns: {
+                type: Boolean,
+                description: "List match patterns instead of the matching files.",
+                optional: true,
+                default: false,
+            }
+        }
+    }
+)]
+/// List entries currently selected for restore.
+async fn list_selected_command(patterns: bool) -> Result<(), Error> {
+    Shell::with(move |shell| shell.list_selected(patterns)).await
+}
+
+#[api(
+    input: {
+        properties: {
+            pattern: {
+                type: String,
+                description: "Match pattern for matching files in the catalog."
+            },
+            select: {
+                type: bool,
+                optional: true,
+                default: false,
+                description: "Add matching filenames to list for restore."
+            }
+        }
+    }
+)]
+/// Find entries in the catalog matching the given match pattern.
+async fn find_command(pattern: String, select: bool) -> Result<(), Error> {
+    Shell::with(move |shell| shell.find(pattern, select)).await
+}
+
+#[api(
+    input: {
+        properties: {
+            target: {
+                type: String,
+                description: "target path for restore on local filesystem."
+            }
+        }
+    }
+)]
+/// Restore the selected entries to the given target path.
+///
+/// Target must not exist on the clients filesystem.
+async fn restore_selected_command(target: String) -> Result<(), Error> {
+    Shell::with(move |shell| shell.restore_selected(PathBuf::from(target))).await
+}
+
+#[api(
+    input: {
+        properties: {
+            target: {
+                type: String,
+                description: "target path for restore on local filesystem."
+            },
+            pattern: {
+                type: String,
+                optional: true,
+                description: "match pattern to limit files for restore."
+            }
+        }
+    }
+)]
+/// Restore the sub-archive given by the current working directory to target.
+///
+/// By further providing a pattern, the restore can be limited to a narrower
+/// subset of this sub-archive.
+/// If pattern is not present or empty, the full archive is restored to target.
+async fn restore_command(target: String, pattern: Option<String>) -> Result<(), Error> {
+    Shell::with(move |shell| shell.restore(PathBuf::from(target), pattern)).await
+}
+
+/// TODO: Should we use this to fix `step()`? Make path resolution behave more like described in
+/// the path_resolution(7) man page.
+///
+/// The `Path` type's component iterator does not tell us anything about trailing slashes or
+/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
+/// here:
+enum PathComponent<'a> {
+    Root,
+    CurDir,
+    ParentDir,
+    Normal(&'a OsStr),
+    TrailingSlash,
+}
+
+struct PathComponentIter<'a> {
+    path: &'a [u8],
+    state: u8, // 0=beginning, 1=ongoing, 2=trailing, 3=finished (fused)
+}
+
+impl std::iter::FusedIterator for PathComponentIter<'_> {}
+
+impl<'a> Iterator for PathComponentIter<'a> {
+    type Item = PathComponent<'a>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.path.is_empty() {
+            return None;
+        }
+
+        if self.state == 0 {
+            self.state = 1;
+            if self.path[0] == b'/' {
+                // absolute path
+                self.path = &self.path[1..];
+                return Some(PathComponent::Root);
+            }
+        }
+
+        // skip slashes
+        let had_slashes = self.path[0] == b'/';
+        while self.path.get(0).copied() == Some(b'/') {
+            self.path = &self.path[1..];
+        }
+
+        Some(match self.path {
+            [] if had_slashes => PathComponent::TrailingSlash,
+            [] => return None,
+            [b'.'] | [b'.', b'/', ..] => {
+                self.path = &self.path[1..];
+                PathComponent::CurDir
+            }
+            [b'.', b'.'] | [b'.', b'.', b'/', ..] => {
+                self.path = &self.path[2..];
+                PathComponent::ParentDir
+            }
+            _ => {
+                let end = self
+                    .path
+                    .iter()
+                    .position(|&b| b == b'/')
+                    .unwrap_or(self.path.len());
+                let (out, rest) = self.path.split_at(end);
+                self.path = rest;
+                PathComponent::Normal(OsStr::from_bytes(out))
+            }
+        })
+    }
+}
+
+pub struct Shell {
+    /// Readline instance handling input and callbacks
+    rl: rustyline::Editor<CliHelper>,
+
+    /// Interactive prompt.
+    prompt: String,
+
+    /// Calalog reader instance to navigate
+    catalog: CatalogReader,
+
+    /// List of selected paths for restore
+    selected: HashMap<OsString, MatchEntry>,
+
+    /// pxar accessor instance for the current pxar archive
+    accessor: Accessor,
+
+    /// The current position in the archive.
+    position: Vec<PathStackEntry>,
+}
+
+#[derive(Clone)]
+struct PathStackEntry {
+    /// This is always available. We mainly navigate through the catalog.
+    catalog: catalog::DirEntry,
+
+    /// Whenever we need something from the actual archive we fill this out. This is cached along
+    /// the entire path.
+    pxar: Option<FileEntry>,
+}
+
+impl PathStackEntry {
+    fn new(dir_entry: catalog::DirEntry) -> Self {
+        Self {
+            pxar: None,
+            catalog: dir_entry,
+        }
+    }
+}
+
+impl Shell {
+    /// Create a new shell for the given catalog and pxar archive.
+    pub async fn new(
+        mut catalog: CatalogReader,
+        archive_name: &str,
+        archive: Accessor,
+    ) -> Result<Self, Error> {
+        let cli_helper = CliHelper::new(catalog_shell_cli());
+        let mut rl = rustyline::Editor::<CliHelper>::new();
+        rl.set_helper(Some(cli_helper));
+
+        let catalog_root = catalog.root()?;
+        let archive_root = catalog
+            .lookup(&catalog_root, archive_name.as_bytes())?
+            .ok_or_else(|| format_err!("archive not found in catalog"))?;
+        let position = vec![PathStackEntry::new(archive_root)];
+
+        let mut this = Self {
+            rl,
+            prompt: String::new(),
+            catalog,
+            selected: HashMap::new(),
+            accessor: archive,
+            position,
+        };
+        this.update_prompt();
+        Ok(this)
+    }
+
+    async fn with<'a, Fut, R, F>(call: F) -> Result<R, Error>
+    where
+        F: FnOnce(&'a mut Shell) -> Fut,
+        Fut: Future<Output = Result<R, Error>>,
+        F: 'a,
+        Fut: 'a,
+        R: 'static,
+    {
+        let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
+        call(&mut *shell).await
+    }
+
+    pub async fn shell(mut self) -> Result<(), Error> {
+        let this = &mut self;
+        unsafe {
+            SHELL = Some(this as *mut Shell as usize);
+        }
+        while let Ok(line) = this.rl.readline(&this.prompt) {
+            if line == "exit" {
+                break;
+            }
+            let helper = this.rl.helper().unwrap();
+            let args = match cli::shellword_split(&line) {
+                Ok(args) => args,
+                Err(err) => {
+                    println!("Error: {}", err);
+                    continue;
+                }
+            };
+
+            let _ =
+                cli::handle_command_future(helper.cmd_def(), "", args, cli::CliEnvironment::new())
+                    .await;
+            this.rl.add_history_entry(line);
+            this.update_prompt();
+        }
+        Ok(())
+    }
+
+    fn update_prompt(&mut self) {
+        self.prompt = "pxar:".to_string();
+        if self.position.len() <= 1 {
+            self.prompt.push('/');
+        } else {
+            for p in self.position.iter().skip(1) {
+                if !p.catalog.name.starts_with(b"/") {
+                    self.prompt.push('/');
+                }
+                match std::str::from_utf8(&p.catalog.name) {
+                    Ok(entry) => self.prompt.push_str(entry),
+                    Err(_) => self.prompt.push_str("<non-utf8-dir>"),
+                }
+            }
+        }
+        self.prompt.push_str(" > ");
+    }
+
+    async fn pwd(&mut self) -> Result<(), Error> {
+        let stack = Self::lookup(
+            &self.position,
+            &mut self.catalog,
+            &self.accessor,
+            None,
+            &mut Some(0),
+        )
+        .await?;
+        let path = Self::format_path_stack(&stack);
+        println!("{:?}", path);
+        Ok(())
+    }
+
+    fn new_path_stack(&self) -> Vec<PathStackEntry> {
+        self.position[..1].to_vec()
+    }
+
+    async fn resolve_symlink(
+        stack: &mut Vec<PathStackEntry>,
+        catalog: &mut CatalogReader,
+        accessor: &Accessor,
+        follow_symlinks: &mut Option<usize>,
+    ) -> Result<(), Error> {
+        if let Some(ref mut symlink_count) = follow_symlinks {
+            *symlink_count += 1;
+            if *symlink_count > MAX_SYMLINK_COUNT {
+                bail!("too many levels of symbolic links");
+            }
+
+            let file = Self::walk_pxar_archive(accessor, &mut stack[..]).await?;
+
+            let path = match file.entry().kind() {
+                EntryKind::Symlink(symlink) => Path::new(symlink.as_os_str()),
+                _ => bail!("symlink in the catalog was not a symlink in the archive"),
+            };
+
+            let new_stack =
+                Self::lookup(&stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
+
+            *stack = new_stack;
+
+            Ok(())
+        } else {
+            bail!("target is a symlink");
+        }
+    }
+
+    /// Walk a path and add it to the path stack.
+    ///
+    /// If the symlink count is used, symlinks will be followed, until we hit the cap and error
+    /// out.
+    async fn step(
+        stack: &mut Vec<PathStackEntry>,
+        catalog: &mut CatalogReader,
+        accessor: &Accessor,
+        component: std::path::Component<'_>,
+        follow_symlinks: &mut Option<usize>,
+    ) -> Result<(), Error> {
+        use std::path::Component;
+        match component {
+            Component::Prefix(_) => bail!("invalid path component (prefix)"),
+            Component::RootDir => stack.truncate(1),
+            Component::CurDir => {
+                if stack.last().unwrap().catalog.is_symlink() {
+                    Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
+                }
+            }
+            Component::ParentDir => drop(stack.pop()),
+            Component::Normal(entry) => {
+                if stack.last().unwrap().catalog.is_symlink() {
+                    Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
+                }
+                match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
+                    Some(dir) => stack.push(PathStackEntry::new(dir)),
+                    None => bail!("no such file or directory: {:?}", entry),
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    fn step_nofollow(
+        stack: &mut Vec<PathStackEntry>,
+        catalog: &mut CatalogReader,
+        component: std::path::Component<'_>,
+    ) -> Result<(), Error> {
+        use std::path::Component;
+        match component {
+            Component::Prefix(_) => bail!("invalid path component (prefix)"),
+            Component::RootDir => stack.truncate(1),
+            Component::CurDir => {
+                if stack.last().unwrap().catalog.is_symlink() {
+                    bail!("target is a symlink");
+                }
+            }
+            Component::ParentDir => drop(stack.pop()),
+            Component::Normal(entry) => {
+                if stack.last().unwrap().catalog.is_symlink() {
+                    bail!("target is a symlink");
+                } else {
+                    match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
+                        Some(dir) => stack.push(PathStackEntry::new(dir)),
+                        None => bail!("no such file or directory: {:?}", entry),
+                    }
+                }
+            }
+        }
+        Ok(())
+    }
+
+    /// The pxar accessor is required to resolve symbolic links
+    async fn walk_catalog(
+        stack: &mut Vec<PathStackEntry>,
+        catalog: &mut CatalogReader,
+        accessor: &Accessor,
+        path: &Path,
+        follow_symlinks: &mut Option<usize>,
+    ) -> Result<(), Error> {
+        for c in path.components() {
+            Self::step(stack, catalog, accessor, c, follow_symlinks).await?;
+        }
+        Ok(())
+    }
+
+    /// Non-async version cannot follow symlinks.
+    fn walk_catalog_nofollow(
+        stack: &mut Vec<PathStackEntry>,
+        catalog: &mut CatalogReader,
+        path: &Path,
+    ) -> Result<(), Error> {
+        for c in path.components() {
+            Self::step_nofollow(stack, catalog, c)?;
+        }
+        Ok(())
+    }
+
+    /// This assumes that there are no more symlinks in the path stack.
+    async fn walk_pxar_archive(
+        accessor: &Accessor,
+        mut stack: &mut [PathStackEntry],
+    ) -> Result<FileEntry, Error> {
+        if stack[0].pxar.is_none() {
+            stack[0].pxar = Some(accessor.open_root().await?.lookup_self().await?);
+        }
+
+        // Now walk the directory stack:
+        let mut at = 1;
+        while at < stack.len() {
+            if stack[at].pxar.is_some() {
+                at += 1;
+                continue;
+            }
+
+            let parent = stack[at - 1].pxar.as_ref().unwrap();
+            let dir = parent.enter_directory().await?;
+            let name = Path::new(OsStr::from_bytes(&stack[at].catalog.name));
+            stack[at].pxar = Some(
+                dir.lookup(name)
+                    .await?
+                    .ok_or_else(|| format_err!("no such entry in pxar file: {:?}", name))?,
+            );
+
+            at += 1;
+        }
+
+        Ok(stack.last().unwrap().pxar.clone().unwrap())
+    }
+
+    fn complete_path(&mut self, input: &str) -> Result<Vec<String>, Error> {
+        let mut tmp_stack;
+        let (parent, base, part) = match input.rfind('/') {
+            Some(ind) => {
+                let (base, part) = input.split_at(ind + 1);
+                let path = PathBuf::from(base);
+                if path.is_absolute() {
+                    tmp_stack = self.new_path_stack();
+                } else {
+                    tmp_stack = self.position.clone();
+                }
+                Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
+                (&tmp_stack.last().unwrap().catalog, base, part)
+            }
+            None => (&self.position.last().unwrap().catalog, "", input),
+        };
+
+        let entries = self.catalog.read_dir(parent)?;
+
+        let mut out = Vec::new();
+        for entry in entries {
+            let mut name = base.to_string();
+            if entry.name.starts_with(part.as_bytes()) {
+                name.push_str(std::str::from_utf8(&entry.name)?);
+                if entry.is_directory() {
+                    name.push('/');
+                }
+                out.push(name);
+            }
+        }
+
+        Ok(out)
+    }
+
+    // Break async recursion here: lookup -> walk_catalog -> step -> lookup
+    fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
+        stack: &'s [PathStackEntry],
+        catalog: &'c mut CatalogReader,
+        accessor: &'a Accessor,
+        path: Option<&'p Path>,
+        follow_symlinks: &'y mut Option<usize>,
+    ) -> Pin<Box<dyn Future<Output = Result<Vec<PathStackEntry>, Error>> + Send + 'future>>
+    where
+        's: 'future,
+        'c: 'future,
+        'a: 'future,
+        'p: 'future,
+        'y: 'future,
+    {
+        Box::pin(async move {
+            Ok(match path {
+                None => stack.to_vec(),
+                Some(path) => {
+                    let mut stack = if path.is_absolute() {
+                        stack[..1].to_vec()
+                    } else {
+                        stack.to_vec()
+                    };
+                    Self::walk_catalog(&mut stack, catalog, accessor, path, follow_symlinks)
+                        .await?;
+                    stack
+                }
+            })
+        })
+    }
+
+    async fn ls(&mut self, path: Option<&Path>) -> Result<(), Error> {
+        let stack = Self::lookup(
+            &self.position,
+            &mut self.catalog,
+            &self.accessor,
+            path,
+            &mut Some(0),
+        )
+        .await?;
+
+        let last = stack.last().unwrap();
+        if last.catalog.is_directory() {
+            let items = self.catalog.read_dir(&stack.last().unwrap().catalog)?;
+            let mut out = std::io::stdout();
+            // FIXME: columnize
+            for item in items {
+                out.write_all(&item.name)?;
+                out.write_all(b"\n")?;
+            }
+        } else {
+            let mut out = std::io::stdout();
+            out.write_all(&last.catalog.name)?;
+            out.write_all(b"\n")?;
+        }
+        Ok(())
+    }
+
+    async fn stat(&mut self, path: PathBuf) -> Result<(), Error> {
+        let mut stack = Self::lookup(
+            &self.position,
+            &mut self.catalog,
+            &self.accessor,
+            Some(&path),
+            &mut Some(0),
+        )
+        .await?;
+
+        let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
+        std::io::stdout()
+            .write_all(crate::pxar::format_multi_line_entry(file.entry()).as_bytes())?;
+        Ok(())
+    }
+
+    async fn cd(&mut self, path: Option<&Path>) -> Result<(), Error> {
+        match path {
+            Some(path) => {
+                let new_position = Self::lookup(
+                    &self.position,
+                    &mut self.catalog,
+                    &self.accessor,
+                    Some(path),
+                    &mut None,
+                )
+                .await?;
+                if !new_position.last().unwrap().catalog.is_directory() {
+                    bail!("not a directory");
+                }
+                self.position = new_position;
+            }
+            None => self.position.truncate(1),
+        }
+        self.update_prompt();
+        Ok(())
+    }
+
+    /// This stack must have been canonicalized already!
+    fn format_path_stack(stack: &[PathStackEntry]) -> OsString {
+        if stack.len() <= 1 {
+            return OsString::from("/");
+        }
+
+        let mut out = OsString::new();
+        for c in stack.iter().skip(1) {
+            out.push("/");
+            out.push(OsStr::from_bytes(&c.catalog.name));
+        }
+
+        out
+    }
+
+    async fn select(&mut self, path: PathBuf) -> Result<(), Error> {
+        let stack = Self::lookup(
+            &self.position,
+            &mut self.catalog,
+            &self.accessor,
+            Some(&path),
+            &mut Some(0),
+        )
+        .await?;
+
+        let path = Self::format_path_stack(&stack);
+        let entry = MatchEntry::include(MatchPattern::Literal(path.as_bytes().to_vec()));
+        if self.selected.insert(path.clone(), entry).is_some() {
+            println!("path already selected: {:?}", path);
+        } else {
+            println!("added path: {:?}", path);
+        }
+
+        Ok(())
+    }
+
+    async fn deselect(&mut self, path: PathBuf) -> Result<(), Error> {
+        let stack = Self::lookup(
+            &self.position,
+            &mut self.catalog,
+            &self.accessor,
+            Some(&path),
+            &mut Some(0),
+        )
+        .await?;
+
+        let path = Self::format_path_stack(&stack);
+
+        if self.selected.remove(&path).is_some() {
+            println!("removed path from selection: {:?}", path);
+        } else {
+            println!("path not selected: {:?}", path);
+        }
+
+        Ok(())
+    }
+
+    async fn deselect_all(&mut self) -> Result<(), Error> {
+        self.selected.clear();
+        println!("cleared selection");
+        Ok(())
+    }
+
+    async fn list_selected(&mut self, patterns: bool) -> Result<(), Error> {
+        if patterns {
+            self.list_selected_patterns().await
+        } else {
+            self.list_matching_files().await
+        }
+    }
+
+    async fn list_selected_patterns(&self) -> Result<(), Error> {
+        for entry in self.selected.keys() {
+            println!("{:?}", entry);
+        }
+        Ok(())
+    }
+
+    fn build_match_list(&self) -> Vec<MatchEntry> {
+        let mut list = Vec::with_capacity(self.selected.len());
+        for entry in self.selected.values() {
+            list.push(entry.clone());
+        }
+        list
+    }
+
+    async fn list_matching_files(&mut self) -> Result<(), Error> {
+        let matches = self.build_match_list();
+
+        self.catalog.find(
+            &self.position[0].catalog,
+            &mut Vec::new(),
+            &matches,
+            &mut |path: &[u8]| -> Result<(), Error> {
+                let mut out = std::io::stdout();
+                out.write_all(path)?;
+                out.write_all(b"\n")?;
+                Ok(())
+            },
+        )?;
+
+        Ok(())
+    }
+
+    async fn find(&mut self, pattern: String, select: bool) -> Result<(), Error> {
+        let pattern_os = OsString::from(pattern.clone());
+        let pattern_entry =
+            MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
+
+        let mut found_some = false;
+        self.catalog.find(
+            &self.position[0].catalog,
+            &mut Vec::new(),
+            &[&pattern_entry],
+            &mut |path: &[u8]| -> Result<(), Error> {
+                found_some = true;
+                let mut out = std::io::stdout();
+                out.write_all(path)?;
+                out.write_all(b"\n")?;
+                Ok(())
+            },
+        )?;
+
+        if found_some && select {
+            self.selected.insert(pattern_os, pattern_entry);
+        }
+
+        Ok(())
+    }
+
+    async fn restore_selected(&mut self, destination: PathBuf) -> Result<(), Error> {
+        if self.selected.is_empty() {
+            bail!("no entries selected");
+        }
+
+        let match_list = self.build_match_list();
+
+        self.restore_with_match_list(destination, &match_list).await
+    }
+
+    async fn restore(
+        &mut self,
+        destination: PathBuf,
+        pattern: Option<String>,
+    ) -> Result<(), Error> {
+        let tmp;
+        let match_list: &[MatchEntry] = match pattern {
+            None => &[],
+            Some(pattern) => {
+                tmp = [MatchEntry::parse_pattern(
+                    pattern,
+                    PatternFlag::PATH_NAME,
+                    MatchType::Include,
+                )?];
+                &tmp
+            }
+        };
+
+        self.restore_with_match_list(destination, match_list).await
+    }
+
+    async fn restore_with_match_list(
+        &mut self,
+        destination: PathBuf,
+        match_list: &[MatchEntry],
+    ) -> Result<(), Error> {
+        create_path(
+            &destination,
+            None,
+            Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
+        )
+        .map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
+
+        let rootdir = Dir::open(
+            &destination,
+            OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
+            Mode::empty(),
+        )
+        .map_err(|err| {
+            format_err!("unable to open target directory {:?}: {}", destination, err,)
+        })?;
+
+        let mut dir_stack = self.new_path_stack();
+        Self::walk_pxar_archive(&self.accessor, &mut dir_stack).await?;
+        let root_meta = dir_stack
+            .last()
+            .unwrap()
+            .pxar
+            .as_ref()
+            .unwrap()
+            .entry()
+            .metadata()
+            .clone();
+
+        let extractor =
+            crate::pxar::extract::Extractor::new(rootdir, root_meta, true, Flags::DEFAULT);
+
+        let mut extractor = ExtractorState::new(
+            &mut self.catalog,
+            dir_stack,
+            extractor,
+            &match_list,
+            &self.accessor,
+        )?;
+
+        extractor.extract().await
+    }
+}
+
+struct ExtractorState<'a> {
+    path: Vec<u8>,
+    path_len: usize,
+    path_len_stack: Vec<usize>,
+
+    dir_stack: Vec<PathStackEntry>,
+
+    matches: bool,
+    matches_stack: Vec<bool>,
+
+    read_dir: <Vec<catalog::DirEntry> as IntoIterator>::IntoIter,
+    read_dir_stack: Vec<<Vec<catalog::DirEntry> as IntoIterator>::IntoIter>,
+
+    extractor: crate::pxar::extract::Extractor,
+
+    catalog: &'a mut CatalogReader,
+    match_list: &'a [MatchEntry],
+    accessor: &'a Accessor,
+}
+
+impl<'a> ExtractorState<'a> {
+    pub fn new(
+        catalog: &'a mut CatalogReader,
+        dir_stack: Vec<PathStackEntry>,
+        extractor: crate::pxar::extract::Extractor,
+        match_list: &'a [MatchEntry],
+        accessor: &'a Accessor,
+    ) -> Result<Self, Error> {
+        let read_dir = catalog
+            .read_dir(&dir_stack.last().unwrap().catalog)?
+            .into_iter();
+        Ok(Self {
+            path: Vec::new(),
+            path_len: 0,
+            path_len_stack: Vec::new(),
+
+            dir_stack,
+
+            matches: match_list.is_empty(),
+            matches_stack: Vec::new(),
+
+            read_dir,
+            read_dir_stack: Vec::new(),
+
+            extractor,
+
+            catalog,
+            match_list,
+            accessor,
+        })
+    }
+
+    pub async fn extract(&mut self) -> Result<(), Error> {
+        loop {
+            let entry = match self.read_dir.next() {
+                Some(entry) => entry,
+                None => match self.handle_end_of_directory()? {
+                    ControlFlow::Break(()) => break, // done with root directory
+                    ControlFlow::Continue(()) => continue,
+                },
+            };
+
+            self.path.truncate(self.path_len);
+            if !entry.name.starts_with(b"/") {
+                self.path.reserve(entry.name.len() + 1);
+                self.path.push(b'/');
+            }
+            self.path.extend(&entry.name);
+
+            self.extractor.set_path(OsString::from_vec(self.path.clone()));
+            self.handle_entry(entry).await?;
+        }
+
+        Ok(())
+    }
+
+    fn handle_end_of_directory(&mut self) -> Result<ControlFlow<()>, Error> {
+        // go up a directory:
+        self.read_dir = match self.read_dir_stack.pop() {
+            Some(r) => r,
+            None => return Ok(ControlFlow::Break(())), // out of root directory
+        };
+
+        self.matches = self
+            .matches_stack
+            .pop()
+            .ok_or_else(|| format_err!("internal iterator error (matches_stack)"))?;
+
+        self.dir_stack
+            .pop()
+            .ok_or_else(|| format_err!("internal iterator error (dir_stack)"))?;
+
+        self.path_len = self
+            .path_len_stack
+            .pop()
+            .ok_or_else(|| format_err!("internal iterator error (path_len_stack)"))?;
+
+        self.extractor.leave_directory()?;
+
+        Ok(ControlFlow::CONTINUE)
+    }
+
+    async fn handle_new_directory(
+        &mut self,
+        entry: catalog::DirEntry,
+        match_result: Option<MatchType>,
+    ) -> Result<(), Error> {
+        // enter a new directory:
+        self.read_dir_stack.push(mem::replace(
+            &mut self.read_dir,
+            self.catalog.read_dir(&entry)?.into_iter(),
+        ));
+        self.matches_stack.push(self.matches);
+        self.dir_stack.push(PathStackEntry::new(entry));
+        self.path_len_stack.push(self.path_len);
+        self.path_len = self.path.len();
+
+        Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
+        let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
+        let dir_meta = dir_pxar.entry().metadata().clone();
+        let create = self.matches && match_result != Some(MatchType::Exclude);
+        self.extractor.enter_directory(dir_pxar.file_name().to_os_string(), dir_meta, create)?;
+
+        Ok(())
+    }
+
+    pub async fn handle_entry(&mut self, entry: catalog::DirEntry) -> Result<(), Error> {
+        let match_result = self.match_list.matches(&self.path, entry.get_file_mode());
+        let did_match = match match_result {
+            Some(MatchType::Include) => true,
+            Some(MatchType::Exclude) => false,
+            None => self.matches,
+        };
+
+        match (did_match, &entry.attr) {
+            (_, DirEntryAttribute::Directory { .. }) => {
+                self.handle_new_directory(entry, match_result).await?;
+            }
+            (true, DirEntryAttribute::File { .. }) => {
+                self.dir_stack.push(PathStackEntry::new(entry));
+                let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
+                self.extract_file(file).await?;
+                self.dir_stack.pop();
+            }
+            (true, DirEntryAttribute::Symlink)
+            | (true, DirEntryAttribute::BlockDevice)
+            | (true, DirEntryAttribute::CharDevice)
+            | (true, DirEntryAttribute::Fifo)
+            | (true, DirEntryAttribute::Socket)
+            | (true, DirEntryAttribute::Hardlink) => {
+                let attr = entry.attr.clone();
+                self.dir_stack.push(PathStackEntry::new(entry));
+                let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
+                self.extract_special(file, attr).await?;
+                self.dir_stack.pop();
+            }
+            (false, _) => (), // skip
+        }
+
+        Ok(())
+    }
+
+    fn path(&self) -> &OsStr {
+        OsStr::from_bytes(&self.path)
+    }
+
+    async fn extract_file(&mut self, entry: FileEntry) -> Result<(), Error> {
+        match entry.kind() {
+            pxar::EntryKind::File { size, .. } => {
+                let file_name = CString::new(entry.file_name().as_bytes())?;
+                let mut contents = entry.contents().await?;
+                self.extractor.async_extract_file(
+                    &file_name,
+                    entry.metadata(),
+                    *size,
+                    &mut contents,
+                )
+                .await
+            }
+            _ => {
+                bail!(
+                    "catalog file {:?} not a regular file in the archive",
+                    self.path()
+                );
+            }
+        }
+    }
+
+    async fn extract_special(
+        &mut self,
+        entry: FileEntry,
+        catalog_attr: DirEntryAttribute,
+    ) -> Result<(), Error> {
+        let file_name = CString::new(entry.file_name().as_bytes())?;
+        match (catalog_attr, entry.kind()) {
+            (DirEntryAttribute::Symlink, pxar::EntryKind::Symlink(symlink)) => {
+                block_in_place(|| self.extractor.extract_symlink(
+                    &file_name,
+                    entry.metadata(),
+                    symlink.as_os_str(),
+                ))
+            }
+            (DirEntryAttribute::Symlink, _) => {
+                bail!(
+                    "catalog symlink {:?} not a symlink in the archive",
+                    self.path()
+                );
+            }
+
+            (DirEntryAttribute::Hardlink, pxar::EntryKind::Hardlink(hardlink)) => {
+                block_in_place(|| self.extractor.extract_hardlink(&file_name, hardlink.as_os_str()))
+            }
+            (DirEntryAttribute::Hardlink, _) => {
+                bail!(
+                    "catalog hardlink {:?} not a hardlink in the archive",
+                    self.path()
+                );
+            }
+
+            (ref attr, pxar::EntryKind::Device(device)) => {
+                self.extract_device(attr.clone(), &file_name, device, entry.metadata())
+            }
+
+            (DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => {
+                block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
+            }
+            (DirEntryAttribute::Fifo, _) => {
+                bail!("catalog fifo {:?} not a fifo in the archive", self.path());
+            }
+
+            (DirEntryAttribute::Socket, pxar::EntryKind::Socket) => {
+                block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
+            }
+            (DirEntryAttribute::Socket, _) => {
+                bail!(
+                    "catalog socket {:?} not a socket in the archive",
+                    self.path()
+                );
+            }
+
+            attr => bail!("unhandled file type {:?} for {:?}", attr, self.path()),
+        }
+    }
+
+    fn extract_device(
+        &mut self,
+        attr: DirEntryAttribute,
+        file_name: &CStr,
+        device: &pxar::format::Device,
+        metadata: &Metadata,
+    ) -> Result<(), Error> {
+        match attr {
+            DirEntryAttribute::BlockDevice => {
+                if !metadata.stat.is_blockdev() {
+                    bail!(
+                        "catalog block device {:?} is not a block device in the archive",
+                        self.path(),
+                    );
+                }
+            }
+            DirEntryAttribute::CharDevice => {
+                if !metadata.stat.is_chardev() {
+                    bail!(
+                        "catalog character device {:?} is not a character device in the archive",
+                        self.path(),
+                    );
+                }
+            }
+            _ => {
+                bail!(
+                    "unexpected file type for {:?} in the catalog, \
+                     which is a device special file in the archive",
+                    self.path(),
+                );
+            }
+        }
+        block_in_place(|| self.extractor.extract_special(file_name, metadata, device.to_dev_t()))
+    }
+}
diff --git a/pbs-client/src/http_client.rs b/pbs-client/src/http_client.rs
new file mode 100644 (file)
index 0000000..a83b8d3
--- /dev/null
@@ -0,0 +1,1013 @@
+use std::io::Write;
+use std::sync::{Arc, Mutex, RwLock};
+use std::time::Duration;
+
+use anyhow::{bail, format_err, Error};
+use futures::*;
+use http::Uri;
+use http::header::HeaderValue;
+use http::{Request, Response};
+use hyper::Body;
+use hyper::client::{Client, HttpConnector};
+use openssl::{ssl::{SslConnector, SslMethod}, x509::X509StoreContextRef};
+use serde_json::{json, Value};
+use percent_encoding::percent_encode;
+use xdg::BaseDirectories;
+
+use proxmox::{
+    api::error::HttpError,
+    sys::linux::tty,
+    tools::fs::{file_get_json, replace_file, CreateOptions},
+};
+
+use proxmox_http::client::HttpsConnector;
+use proxmox_http::uri::build_authority;
+
+use pbs_api_types::{Authid, Userid};
+use pbs_tools::broadcast_future::BroadcastFuture;
+use pbs_tools::json::json_object_to_query;
+use pbs_tools::ticket;
+use pbs_tools::percent_encoding::DEFAULT_ENCODE_SET;
+
+use super::pipe_to_stream::PipeToSendStream;
+use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
+
+/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
+/// certain error conditions. Keep it generous, to avoid false-positive under high load.
+const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
+
+#[derive(Clone)]
+pub struct AuthInfo {
+    pub auth_id: Authid,
+    pub ticket: String,
+    pub token: String,
+}
+
+pub struct HttpClientOptions {
+    prefix: Option<String>,
+    password: Option<String>,
+    fingerprint: Option<String>,
+    interactive: bool,
+    ticket_cache: bool,
+    fingerprint_cache: bool,
+    verify_cert: bool,
+}
+
+impl HttpClientOptions {
+
+    pub fn new_interactive(password: Option<String>, fingerprint: Option<String>) -> Self {
+        Self {
+            password,
+            fingerprint,
+            fingerprint_cache: true,
+            ticket_cache: true,
+            interactive: true,
+            prefix: Some("proxmox-backup".to_string()),
+            ..Self::default()
+        }
+    }
+
+    pub fn new_non_interactive(password: String, fingerprint: Option<String>) -> Self {
+        Self {
+            password: Some(password),
+            fingerprint,
+            ..Self::default()
+        }
+    }
+
+    pub fn prefix(mut self, prefix: Option<String>) -> Self {
+        self.prefix = prefix;
+        self
+    }
+
+    pub fn password(mut self, password: Option<String>) -> Self {
+        self.password = password;
+        self
+    }
+
+    pub fn fingerprint(mut self, fingerprint: Option<String>) -> Self {
+        self.fingerprint = fingerprint;
+        self
+    }
+
+    pub fn interactive(mut self, interactive: bool) -> Self {
+        self.interactive = interactive;
+        self
+    }
+
+    pub fn ticket_cache(mut self, ticket_cache: bool) -> Self {
+        self.ticket_cache = ticket_cache;
+        self
+    }
+
+    pub fn fingerprint_cache(mut self, fingerprint_cache: bool) -> Self {
+        self.fingerprint_cache = fingerprint_cache;
+        self
+    }
+
+    pub fn verify_cert(mut self, verify_cert: bool) -> Self {
+        self.verify_cert = verify_cert;
+        self
+    }
+}
+
+impl Default for HttpClientOptions {
+    fn default() -> Self {
+        Self {
+            prefix: None,
+            password: None,
+            fingerprint: None,
+            interactive: false,
+            ticket_cache: false,
+            fingerprint_cache: false,
+            verify_cert: true,
+        }
+    }
+}
+
+/// HTTP(S) API client
+pub struct HttpClient {
+    client: Client<HttpsConnector>,
+    server: String,
+    port: u16,
+    fingerprint: Arc<Mutex<Option<String>>>,
+    first_auth: Option<BroadcastFuture<()>>,
+    auth: Arc<RwLock<AuthInfo>>,
+    ticket_abort: futures::future::AbortHandle,
+    _options: HttpClientOptions,
+}
+
+/// Delete stored ticket data (logout)
+pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
+
+    let base = BaseDirectories::with_prefix(prefix)?;
+
+    // usually /run/user/<uid>/...
+    let path = base.place_runtime_file("tickets")?;
+
+    let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
+
+    let mut data = file_get_json(&path, Some(json!({})))?;
+
+    if let Some(map) = data[server].as_object_mut() {
+        map.remove(username.as_str());
+    }
+
+    replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
+
+    Ok(())
+}
+
+fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<(), Error> {
+
+    let base = BaseDirectories::with_prefix(prefix)?;
+
+    // usually ~/.config/<prefix>/fingerprints
+    let path = base.place_config_file("fingerprints")?;
+
+    let raw = match std::fs::read_to_string(&path) {
+        Ok(v) => v,
+        Err(err) => {
+            if err.kind() == std::io::ErrorKind::NotFound {
+                String::new()
+            } else {
+                bail!("unable to read fingerprints from {:?} - {}", path, err);
+            }
+        }
+    };
+
+    let mut result = String::new();
+
+    raw.split('\n').for_each(|line| {
+        let items: Vec<String> = line.split_whitespace().map(String::from).collect();
+        if items.len() == 2 {
+            if items[0] == server {
+                // found, add later with new fingerprint
+            } else {
+                result.push_str(line);
+                result.push('\n');
+            }
+        }
+    });
+
+    result.push_str(server);
+    result.push(' ');
+    result.push_str(fingerprint);
+    result.push('\n');
+
+    replace_file(path, result.as_bytes(), CreateOptions::new())?;
+
+    Ok(())
+}
+
+fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
+
+    let base = BaseDirectories::with_prefix(prefix).ok()?;
+
+    // usually ~/.config/<prefix>/fingerprints
+    let path = base.place_config_file("fingerprints").ok()?;
+
+    let raw = std::fs::read_to_string(&path).ok()?;
+
+    for line in raw.split('\n') {
+        let items: Vec<String> = line.split_whitespace().map(String::from).collect();
+        if items.len() == 2 && items[0] == server {
+            return Some(items[1].clone());
+        }
+    }
+
+    None
+}
+
+fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, token: &str) -> Result<(), Error> {
+
+    let base = BaseDirectories::with_prefix(prefix)?;
+
+    // usually /run/user/<uid>/...
+    let path = base.place_runtime_file("tickets")?;
+
+    let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
+
+    let mut data = file_get_json(&path, Some(json!({})))?;
+
+    let now = proxmox::tools::time::epoch_i64();
+
+    data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
+
+    let mut new_data = json!({});
+
+    let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
+
+    let empty = serde_json::map::Map::new();
+    for (server, info) in data.as_object().unwrap_or(&empty) {
+        for (user, uinfo) in info.as_object().unwrap_or(&empty) {
+            if let Some(timestamp) = uinfo["timestamp"].as_i64() {
+                let age = now - timestamp;
+                if age < ticket_lifetime {
+                    new_data[server][user] = uinfo.clone();
+                }
+            }
+        }
+    }
+
+    replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
+
+    Ok(())
+}
+
+fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(String, String)> {
+    let base = BaseDirectories::with_prefix(prefix).ok()?;
+
+    // usually /run/user/<uid>/...
+    let path = base.place_runtime_file("tickets").ok()?;
+    let data = file_get_json(&path, None).ok()?;
+    let now = proxmox::tools::time::epoch_i64();
+    let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
+    let uinfo = data[server][userid.as_str()].as_object()?;
+    let timestamp = uinfo["timestamp"].as_i64()?;
+    let age = now - timestamp;
+
+    if age < ticket_lifetime {
+        let ticket = uinfo["ticket"].as_str()?;
+        let token = uinfo["token"].as_str()?;
+        Some((ticket.to_owned(), token.to_owned()))
+    } else {
+        None
+    }
+}
+
+fn build_uri(server: &str, port: u16, path: &str, query: Option<String>) -> Result<Uri, Error> {
+    Uri::builder()
+        .scheme("https")
+        .authority(build_authority(server, port)?)
+        .path_and_query(match query {
+            Some(query) => format!("/{}?{}", path, query),
+            None => format!("/{}", path),
+        })
+        .build()
+        .map_err(|err| format_err!("error building uri - {}", err))
+}
+
+impl HttpClient {
+    pub fn new(
+        server: &str,
+        port: u16,
+        auth_id: &Authid,
+        mut options: HttpClientOptions,
+    ) -> Result<Self, Error> {
+
+        let verified_fingerprint = Arc::new(Mutex::new(None));
+
+        let mut expected_fingerprint = options.fingerprint.take();
+
+        if expected_fingerprint.is_some() {
+            // do not store fingerprints passed via options in cache
+            options.fingerprint_cache = false;
+        } else if options.fingerprint_cache && options.prefix.is_some() {
+            expected_fingerprint = load_fingerprint(options.prefix.as_ref().unwrap(), server);
+        }
+
+        let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
+
+        if options.verify_cert {
+            let server = server.to_string();
+            let verified_fingerprint = verified_fingerprint.clone();
+            let interactive = options.interactive;
+            let fingerprint_cache = options.fingerprint_cache;
+            let prefix = options.prefix.clone();
+            ssl_connector_builder.set_verify_callback(openssl::ssl::SslVerifyMode::PEER, move |valid, ctx| {
+                match Self::verify_callback(valid, ctx, expected_fingerprint.as_ref(), interactive) {
+                    Ok(None) => true,
+                    Ok(Some(fingerprint)) => {
+                        if fingerprint_cache && prefix.is_some() {
+                            if let Err(err) = store_fingerprint(
+                                prefix.as_ref().unwrap(), &server, &fingerprint) {
+                                eprintln!("{}", err);
+                            }
+                        }
+                        *verified_fingerprint.lock().unwrap() = Some(fingerprint);
+                        true
+                    },
+                    Err(err) => {
+                        eprintln!("certificate validation failed - {}", err);
+                        false
+                    },
+                }
+            });
+        } else {
+            ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
+        }
+
+        let mut httpc = HttpConnector::new();
+        httpc.set_nodelay(true); // important for h2 download performance!
+        httpc.enforce_http(false); // we want https...
+
+        httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
+        let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
+
+        let client = Client::builder()
+        //.http2_initial_stream_window_size( (1 << 31) - 2)
+        //.http2_initial_connection_window_size( (1 << 31) - 2)
+            .build::<_, Body>(https);
+
+        let password = options.password.take();
+        let use_ticket_cache = options.ticket_cache && options.prefix.is_some();
+
+        let password = if let Some(password) = password {
+            password
+        } else {
+            let userid = if auth_id.is_token() {
+                bail!("API token secret must be provided!");
+            } else {
+                auth_id.user()
+            };
+            let mut ticket_info = None;
+            if use_ticket_cache {
+                ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, userid);
+            }
+            if let Some((ticket, _token)) = ticket_info {
+                ticket
+            } else {
+                Self::get_password(userid, options.interactive)?
+            }
+        };
+
+        let auth = Arc::new(RwLock::new(AuthInfo {
+            auth_id: auth_id.clone(),
+            ticket: password.clone(),
+            token: "".to_string(),
+        }));
+
+        let server2 = server.to_string();
+        let client2 = client.clone();
+        let auth2 = auth.clone();
+        let prefix2 = options.prefix.clone();
+
+        let renewal_future = async move {
+            loop {
+                tokio::time::sleep(Duration::new(60*15,  0)).await; // 15 minutes
+                let (auth_id, ticket) = {
+                    let authinfo = auth2.read().unwrap().clone();
+                    (authinfo.auth_id, authinfo.ticket)
+                };
+                match Self::credentials(client2.clone(), server2.clone(), port, auth_id.user().clone(), ticket).await {
+                    Ok(auth) => {
+                        if use_ticket_cache && prefix2.is_some() {
+                            let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
+                        }
+                        *auth2.write().unwrap() = auth;
+                    },
+                    Err(err) => {
+                        eprintln!("re-authentication failed: {}", err);
+                        return;
+                    }
+                }
+            }
+        };
+
+        let (renewal_future, ticket_abort) = futures::future::abortable(renewal_future);
+
+        let login_future = Self::credentials(
+            client.clone(),
+            server.to_owned(),
+            port,
+            auth_id.user().clone(),
+            password,
+        ).map_ok({
+            let server = server.to_string();
+            let prefix = options.prefix.clone();
+            let authinfo = auth.clone();
+
+            move |auth| {
+                if use_ticket_cache && prefix.is_some() {
+                    let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
+                }
+                *authinfo.write().unwrap() = auth;
+                tokio::spawn(renewal_future);
+            }
+        });
+
+        let first_auth = if auth_id.is_token() {
+            // TODO check access here?
+            None
+        } else {
+            Some(BroadcastFuture::new(Box::new(login_future)))
+        };
+
+        Ok(Self {
+            client,
+            server: String::from(server),
+            port,
+            fingerprint: verified_fingerprint,
+            auth,
+            ticket_abort,
+            first_auth,
+            _options: options,
+        })
+    }
+
+    /// Login
+    ///
+    /// Login is done on demand, so this is only required if you need
+    /// access to authentication data in 'AuthInfo'.
+    ///
+    /// Note: tickets a periodially re-newed, so one can use this
+    /// to query changed ticket.
+    pub async fn login(&self) -> Result<AuthInfo, Error> {
+        if let Some(future) = &self.first_auth {
+            future.listen().await?;
+        }
+
+        let authinfo = self.auth.read().unwrap();
+        Ok(authinfo.clone())
+    }
+
+    /// Returns the optional fingerprint passed to the new() constructor.
+    pub fn fingerprint(&self) -> Option<String> {
+        (*self.fingerprint.lock().unwrap()).clone()
+    }
+
+    fn get_password(username: &Userid, interactive: bool) -> Result<String, Error> {
+        // If we're on a TTY, query the user for a password
+        if interactive && tty::stdin_isatty() {
+            let msg = format!("Password for \"{}\": ", username);
+            return Ok(String::from_utf8(tty::read_password(&msg)?)?);
+        }
+
+        bail!("no password input mechanism available");
+    }
+
+    fn verify_callback(
+        openssl_valid: bool,
+        ctx: &mut X509StoreContextRef,
+        expected_fingerprint: Option<&String>,
+        interactive: bool,
+    ) -> Result<Option<String>, Error> {
+
+        if openssl_valid {
+            return Ok(None);
+        }
+
+        let cert = match ctx.current_cert() {
+            Some(cert) => cert,
+            None => bail!("context lacks current certificate."),
+        };
+
+        let depth = ctx.error_depth();
+        if depth != 0 { bail!("context depth != 0") }
+
+        let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) {
+            Ok(fp) => fp,
+            Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
+        };
+        let fp_string = proxmox::tools::digest_to_hex(&fp);
+        let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
+            .collect::<Vec<&str>>().join(":");
+
+        if let Some(expected_fingerprint) = expected_fingerprint {
+            let expected_fingerprint = expected_fingerprint.to_lowercase();
+            if expected_fingerprint == fp_string {
+                return Ok(Some(fp_string));
+            } else {
+                eprintln!("WARNING: certificate fingerprint does not match expected fingerprint!");
+                eprintln!("expected:    {}", expected_fingerprint);
+            }
+        }
+
+        // If we're on a TTY, query the user
+        if interactive && tty::stdin_isatty() {
+            eprintln!("fingerprint: {}", fp_string);
+            loop {
+                eprint!("Are you sure you want to continue connecting? (y/n): ");
+                let _ = std::io::stdout().flush();
+                use std::io::{BufRead, BufReader};
+                let mut line = String::new();
+                match BufReader::new(std::io::stdin()).read_line(&mut line) {
+                    Ok(_) => {
+                        let trimmed = line.trim();
+                        if trimmed == "y" || trimmed == "Y" {
+                            return Ok(Some(fp_string));
+                        } else if trimmed == "n" || trimmed == "N" {
+                            bail!("Certificate fingerprint was not confirmed.");
+                        } else {
+                            continue;
+                        }
+                    }
+                    Err(err) => bail!("Certificate fingerprint was not confirmed - {}.", err),
+                }
+            }
+        }
+
+        bail!("Certificate fingerprint was not confirmed.");
+    }
+
+    pub async fn request(&self, mut req: Request<Body>) -> Result<Value, Error> {
+
+        let client = self.client.clone();
+
+        let auth =  self.login().await?;
+        if auth.auth_id.is_token() {
+            let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
+            req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
+        } else {
+            let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
+            req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
+            req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
+        }
+
+        Self::api_request(client, req).await
+    }
+
+    pub async fn get(
+        &self,
+        path: &str,
+        data: Option<Value>,
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder(&self.server, self.port, "GET", path, data)?;
+        self.request(req).await
+    }
+
+    pub async fn delete(
+        &mut self,
+        path: &str,
+        data: Option<Value>,
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?;
+        self.request(req).await
+    }
+
+    pub async fn post(
+        &mut self,
+        path: &str,
+        data: Option<Value>,
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder(&self.server, self.port, "POST", path, data)?;
+        self.request(req).await
+    }
+
+    pub async fn put(
+        &mut self,
+        path: &str,
+        data: Option<Value>,
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder(&self.server, self.port, "PUT", path, data)?;
+        self.request(req).await
+    }
+
+    pub async fn download(
+        &mut self,
+        path: &str,
+        output: &mut (dyn Write + Send),
+    ) -> Result<(), Error> {
+        let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?;
+
+        let client = self.client.clone();
+
+        let auth = self.login().await?;
+
+        let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
+        req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
+
+        let resp = tokio::time::timeout(
+            HTTP_TIMEOUT,
+            client.request(req)
+        )
+            .await
+            .map_err(|_| format_err!("http download request timed out"))??;
+        let status = resp.status();
+        if !status.is_success() {
+            HttpClient::api_response(resp)
+                .map(|_| Err(format_err!("unknown error")))
+                .await?
+        } else {
+            resp.into_body()
+                .map_err(Error::from)
+                .try_fold(output, move |acc, chunk| async move {
+                    acc.write_all(&chunk)?;
+                    Ok::<_, Error>(acc)
+                })
+                .await?;
+        }
+        Ok(())
+    }
+
+    pub async fn upload(
+        &mut self,
+        content_type: &str,
+        body: Body,
+        path: &str,
+        data: Option<Value>,
+    ) -> Result<Value, Error> {
+
+        let query = match data {
+            Some(data) => Some(json_object_to_query(data)?),
+            None => None,
+        };
+        let url = build_uri(&self.server, self.port, path, query)?;
+
+        let req = Request::builder()
+            .method("POST")
+            .uri(url)
+            .header("User-Agent", "proxmox-backup-client/1.0")
+            .header("Content-Type", content_type)
+            .body(body).unwrap();
+
+        self.request(req).await
+    }
+
+    pub async fn start_h2_connection(
+        &self,
+        mut req: Request<Body>,
+        protocol_name: String,
+    ) -> Result<(H2Client, futures::future::AbortHandle), Error> {
+
+        let client = self.client.clone();
+        let auth =  self.login().await?;
+
+        if auth.auth_id.is_token() {
+            let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
+            req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
+        } else {
+            let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
+            req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
+            req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
+        }
+
+        req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
+
+        let resp = tokio::time::timeout(
+            HTTP_TIMEOUT,
+            client.request(req)
+        )
+            .await
+            .map_err(|_| format_err!("http upgrade request timed out"))??;
+        let status = resp.status();
+
+        if status != http::StatusCode::SWITCHING_PROTOCOLS {
+            Self::api_response(resp).await?;
+            bail!("unknown error");
+        }
+
+        let upgraded = hyper::upgrade::on(resp).await?;
+
+        let max_window_size = (1 << 31) - 2;
+
+        let (h2, connection) = h2::client::Builder::new()
+            .initial_connection_window_size(max_window_size)
+            .initial_window_size(max_window_size)
+            .max_frame_size(4*1024*1024)
+            .handshake(upgraded)
+            .await?;
+
+        let connection = connection
+            .map_err(|_| eprintln!("HTTP/2.0 connection failed"));
+
+        let (connection, abort) = futures::future::abortable(connection);
+        // A cancellable future returns an Option which is None when cancelled and
+        // Some when it finished instead, since we don't care about the return type we
+        // need to map it away:
+        let connection = connection.map(|_| ());
+
+        // Spawn a new task to drive the connection state
+        tokio::spawn(connection);
+
+        // Wait until the `SendRequest` handle has available capacity.
+        let c = h2.ready().await?;
+        Ok((H2Client::new(c), abort))
+    }
+
+    async fn credentials(
+        client: Client<HttpsConnector>,
+        server: String,
+        port: u16,
+        username: Userid,
+        password: String,
+    ) -> Result<AuthInfo, Error> {
+        let data = json!({ "username": username, "password": password });
+        let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
+        let cred = Self::api_request(client, req).await?;
+        let auth = AuthInfo {
+            auth_id: cred["data"]["username"].as_str().unwrap().parse()?,
+            ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
+            token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
+        };
+
+        Ok(auth)
+    }
+
+    async fn api_response(response: Response<Body>) -> Result<Value, Error> {
+        let status = response.status();
+        let data = hyper::body::to_bytes(response.into_body()).await?;
+
+        let text = String::from_utf8(data.to_vec()).unwrap();
+        if status.is_success() {
+            if text.is_empty() {
+                Ok(Value::Null)
+            } else {
+                let value: Value = serde_json::from_str(&text)?;
+                Ok(value)
+            }
+        } else {
+            Err(Error::from(HttpError::new(status, text)))
+        }
+    }
+
+    async fn api_request(
+        client: Client<HttpsConnector>,
+        req: Request<Body>
+    ) -> Result<Value, Error> {
+
+        Self::api_response(
+            tokio::time::timeout(
+                HTTP_TIMEOUT,
+                client.request(req)
+            )
+                .await
+                .map_err(|_| format_err!("http request timed out"))??
+        ).await
+    }
+
+    // Read-only access to server property
+    pub fn server(&self) -> &str {
+        &self.server
+    }
+
+    pub fn port(&self) -> u16 {
+        self.port
+    }
+
+    pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
+        if let Some(data) = data {
+            if method == "POST" {
+                let url = build_uri(server, port, path, None)?;
+                let request = Request::builder()
+                    .method(method)
+                    .uri(url)
+                    .header("User-Agent", "proxmox-backup-client/1.0")
+                    .header(hyper::header::CONTENT_TYPE, "application/json")
+                    .body(Body::from(data.to_string()))?;
+                Ok(request)
+            } else {
+                let query = json_object_to_query(data)?;
+                let url = build_uri(server, port, path, Some(query))?;
+                let request = Request::builder()
+                    .method(method)
+                    .uri(url)
+                    .header("User-Agent", "proxmox-backup-client/1.0")
+                    .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
+                    .body(Body::empty())?;
+                Ok(request)
+            }
+        } else {
+            let url = build_uri(server, port, path, None)?;
+            let request = Request::builder()
+                .method(method)
+                .uri(url)
+                .header("User-Agent", "proxmox-backup-client/1.0")
+                .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
+                .body(Body::empty())?;
+
+            Ok(request)
+        }
+    }
+}
+
+impl Drop for HttpClient {
+    fn drop(&mut self) {
+        self.ticket_abort.abort();
+    }
+}
+
+
+#[derive(Clone)]
+pub struct H2Client {
+    h2: h2::client::SendRequest<bytes::Bytes>,
+}
+
+impl H2Client {
+
+    pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
+        Self { h2 }
+    }
+
+    pub async fn get(
+        &self,
+        path: &str,
+        param: Option<Value>
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder("localhost", "GET", path, param, None).unwrap();
+        self.request(req).await
+    }
+
+    pub async fn put(
+        &self,
+        path: &str,
+        param: Option<Value>
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder("localhost", "PUT", path, param, None).unwrap();
+        self.request(req).await
+    }
+
+    pub async fn post(
+        &self,
+        path: &str,
+        param: Option<Value>
+    ) -> Result<Value, Error> {
+        let req = Self::request_builder("localhost", "POST", path, param, None).unwrap();
+        self.request(req).await
+    }
+
+    pub async fn download<W: Write + Send>(
+        &self,
+        path: &str,
+        param: Option<Value>,
+        mut output: W,
+    ) -> Result<(), Error> {
+        let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
+
+        let response_future = self.send_request(request, None).await?;
+
+        let resp = response_future.await?;
+
+        let status = resp.status();
+        if !status.is_success() {
+            H2Client::h2api_response(resp).await?; // raise error
+            unreachable!();
+        }
+
+        let mut body = resp.into_body();
+        while let Some(chunk) = body.data().await {
+            let chunk = chunk?;
+            body.flow_control().release_capacity(chunk.len())?;
+            output.write_all(&chunk)?;
+        }
+
+        Ok(())
+    }
+
+    pub async fn upload(
+        &self,
+        method: &str, // POST or PUT
+        path: &str,
+        param: Option<Value>,
+        content_type: &str,
+        data: Vec<u8>,
+    ) -> Result<Value, Error> {
+        let request = Self::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
+
+        let mut send_request = self.h2.clone().ready().await?;
+
+        let (response, stream) = send_request.send_request(request, false).unwrap();
+
+        PipeToSendStream::new(bytes::Bytes::from(data), stream).await?;
+
+        response
+            .map_err(Error::from)
+            .and_then(Self::h2api_response)
+            .await
+    }
+
+    async fn request(
+        &self,
+        request: Request<()>,
+    ) -> Result<Value, Error> {
+
+        self.send_request(request, None)
+            .and_then(move |response| {
+                response
+                    .map_err(Error::from)
+                    .and_then(Self::h2api_response)
+            })
+            .await
+    }
+
+    pub fn send_request(
+        &self,
+        request: Request<()>,
+        data: Option<bytes::Bytes>,
+    ) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
+
+        self.h2.clone()
+            .ready()
+            .map_err(Error::from)
+            .and_then(move |mut send_request| async move {
+                if let Some(data) = data {
+                    let (response, stream) = send_request.send_request(request, false).unwrap();
+                    PipeToSendStream::new(data, stream).await?;
+                    Ok(response)
+                } else {
+                    let (response, _stream) = send_request.send_request(request, true).unwrap();
+                    Ok(response)
+                }
+            })
+    }
+
+    pub async fn h2api_response(
+        response: Response<h2::RecvStream>,
+    ) -> Result<Value, Error> {
+        let status = response.status();
+
+        let (_head, mut body) = response.into_parts();
+
+        let mut data = Vec::new();
+        while let Some(chunk) = body.data().await {
+            let chunk = chunk?;
+            // Whenever data is received, the caller is responsible for
+            // releasing capacity back to the server once it has freed
+            // the data from memory.
+            // Let the server send more data.
+            body.flow_control().release_capacity(chunk.len())?;
+            data.extend(chunk);
+        }
+
+        let text = String::from_utf8(data.to_vec()).unwrap();
+        if status.is_success() {
+            if text.is_empty() {
+                Ok(Value::Null)
+            } else {
+                let mut value: Value = serde_json::from_str(&text)?;
+                if let Some(map) = value.as_object_mut() {
+                    if let Some(data) = map.remove("data") {
+                        return Ok(data);
+                    }
+                }
+                bail!("got result without data property");
+            }
+        } else {
+            Err(Error::from(HttpError::new(status, text)))
+        }
+    }
+
+    // Note: We always encode parameters with the url
+    pub fn request_builder(
+        server: &str,
+        method: &str,
+        path: &str,
+        param: Option<Value>,
+        content_type: Option<&str>,
+    ) -> Result<Request<()>, Error> {
+        let path = path.trim_matches('/');
+
+        let content_type = content_type.unwrap_or("application/x-www-form-urlencoded");
+        let query = match param {
+            Some(param) => {
+                let query = json_object_to_query(param)?;
+                // We detected problem with hyper around 6000 characters - so we try to keep on the safe side
+                if query.len() > 4096 {
+                    bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
+                }
+                Some(query)
+            }
+            None => None,
+        };
+
+        let url = build_uri(server, 8007, path, query)?;
+        let request = Request::builder()
+            .method(method)
+            .uri(url)
+            .header("User-Agent", "proxmox-backup-client/1.0")
+            .header(hyper::header::CONTENT_TYPE, content_type)
+            .body(())?;
+        Ok(request)
+    }
+}
diff --git a/pbs-client/src/lib.rs b/pbs-client/src/lib.rs
new file mode 100644 (file)
index 0000000..78e9dad
--- /dev/null
@@ -0,0 +1,70 @@
+//! Client side interface to the proxmox backup server
+//!
+//! This library implements the client side to access the backups
+//! server using https.
+
+use anyhow::Error;
+
+use pbs_api_types::{Authid, Userid};
+use pbs_tools::ticket::Ticket;
+use pbs_tools::cert::CertInfo;
+use pbs_tools::auth::private_auth_key;
+
+pub mod catalog_shell;
+pub mod pxar;
+pub mod tools;
+
+mod merge_known_chunks;
+pub mod pipe_to_stream;
+
+mod http_client;
+pub use http_client::*;
+
+mod vsock_client;
+pub use vsock_client::*;
+
+mod task_log;
+pub use task_log::*;
+
+mod backup_reader;
+pub use backup_reader::*;
+
+mod backup_writer;
+pub use backup_writer::*;
+
+mod remote_chunk_reader;
+pub use remote_chunk_reader::*;
+
+mod pxar_backup_stream;
+pub use pxar_backup_stream::*;
+
+mod backup_repo;
+pub use backup_repo::*;
+
+mod backup_specification;
+pub use backup_specification::*;
+
+pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
+
+/// Connect to localhost:8007 as root@pam
+///
+/// This automatically creates a ticket if run as 'root' user.
+pub fn connect_to_localhost() -> Result<HttpClient, Error> {
+
+    let uid = nix::unistd::Uid::current();
+
+    let client = if uid.is_root()  {
+        let ticket = Ticket::new("PBS", Userid::root_userid())?
+            .sign(private_auth_key(), None)?;
+        let fingerprint = CertInfo::new()?.fingerprint()?;
+        let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
+
+        HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
+    } else {
+        let options = HttpClientOptions::new_interactive(None, None);
+
+        HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
+    };
+
+    Ok(client)
+}
diff --git a/pbs-client/src/merge_known_chunks.rs b/pbs-client/src/merge_known_chunks.rs
new file mode 100644 (file)
index 0000000..ef7a8f9
--- /dev/null
@@ -0,0 +1,97 @@
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use anyhow::Error;
+use futures::{ready, Stream};
+use pin_project::pin_project;
+
+use pbs_datastore::data_blob::ChunkInfo;
+
+pub enum MergedChunkInfo {
+    Known(Vec<(u64, [u8; 32])>),
+    New(ChunkInfo),
+}
+
+pub trait MergeKnownChunks: Sized {
+    fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self>;
+}
+
+#[pin_project]
+pub struct MergeKnownChunksQueue<S> {
+    #[pin]
+    input: S,
+    buffer: Option<MergedChunkInfo>,
+}
+
+impl<S> MergeKnownChunks for S
+where
+    S: Stream<Item = Result<MergedChunkInfo, Error>>,
+{
+    fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self> {
+        MergeKnownChunksQueue {
+            input: self,
+            buffer: None,
+        }
+    }
+}
+
+impl<S> Stream for MergeKnownChunksQueue<S>
+where
+    S: Stream<Item = Result<MergedChunkInfo, Error>>,
+{
+    type Item = Result<MergedChunkInfo, Error>;
+
+    fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
+        let mut this = self.project();
+
+        loop {
+            match ready!(this.input.as_mut().poll_next(cx)) {
+                Some(Err(err)) => return Poll::Ready(Some(Err(err))),
+                None => {
+                    if let Some(last) = this.buffer.take() {
+                        return Poll::Ready(Some(Ok(last)));
+                    } else {
+                        return Poll::Ready(None);
+                    }
+                }
+                Some(Ok(mergerd_chunk_info)) => {
+                    match mergerd_chunk_info {
+                        MergedChunkInfo::Known(list) => {
+                            let last = this.buffer.take();
+
+                            match last {
+                                None => {
+                                    *this.buffer = Some(MergedChunkInfo::Known(list));
+                                    // continue
+                                }
+                                Some(MergedChunkInfo::Known(mut last_list)) => {
+                                    last_list.extend_from_slice(&list);
+                                    let len = last_list.len();
+                                    *this.buffer = Some(MergedChunkInfo::Known(last_list));
+
+                                    if len >= 64 {
+                                        return Poll::Ready(this.buffer.take().map(Ok));
+                                    }
+                                    // continue
+                                }
+                                Some(MergedChunkInfo::New(_)) => {
+                                    *this.buffer = Some(MergedChunkInfo::Known(list));
+                                    return Poll::Ready(last.map(Ok));
+                                }
+                            }
+                        }
+                        MergedChunkInfo::New(chunk_info) => {
+                            let new = MergedChunkInfo::New(chunk_info);
+                            if let Some(last) = this.buffer.take() {
+                                *this.buffer = Some(new);
+                                return Poll::Ready(Some(Ok(last)));
+                            } else {
+                                return Poll::Ready(Some(Ok(new)));
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/pbs-client/src/pipe_to_stream.rs b/pbs-client/src/pipe_to_stream.rs
new file mode 100644 (file)
index 0000000..d461b1d
--- /dev/null
@@ -0,0 +1,70 @@
+// Implement simple flow control for h2 client
+//
+// See also: hyper/src/proto/h2/mod.rs
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use anyhow::{format_err, Error};
+use bytes::Bytes;
+use futures::{ready, Future};
+use h2::SendStream;
+
+pub struct PipeToSendStream {
+    body_tx: SendStream<Bytes>,
+    data: Option<Bytes>,
+}
+
+impl PipeToSendStream {
+    pub fn new(data: Bytes, tx: SendStream<Bytes>) -> PipeToSendStream {
+        PipeToSendStream {
+            body_tx: tx,
+            data: Some(data),
+        }
+    }
+}
+
+impl Future for PipeToSendStream {
+    type Output = Result<(), Error>;
+
+    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+        let this = self.get_mut();
+
+        if this.data != None {
+            // just reserve 1 byte to make sure there's some
+            // capacity available. h2 will handle the capacity
+            // management for the actual body chunk.
+            this.body_tx.reserve_capacity(1);
+
+            if this.body_tx.capacity() == 0 {
+                loop {
+                    match ready!(this.body_tx.poll_capacity(cx)) {
+                        Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
+                        Some(Ok(0)) => {}
+                        Some(Ok(_)) => break,
+                        None => return Poll::Ready(Err(format_err!("protocol canceled"))),
+                    }
+                }
+            } else if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
+                return Poll::Ready(Err(match reset {
+                    Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
+                    Err(err) => Error::from(err),
+                }));
+            }
+
+            this.body_tx
+                .send_data(this.data.take().unwrap(), true)
+                .map_err(Error::from)?;
+
+            Poll::Ready(Ok(()))
+        } else {
+            if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
+                return Poll::Ready(Err(match reset {
+                    Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
+                    Err(err) => Error::from(err),
+                }));
+            }
+            Poll::Ready(Ok(()))
+        }
+    }
+}
diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs
new file mode 100644 (file)
index 0000000..96888c7
--- /dev/null
@@ -0,0 +1,1079 @@
+use std::collections::{HashSet, HashMap};
+use std::ffi::{CStr, CString, OsStr};
+use std::fmt;
+use std::io::{self, Read, Write};
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, Mutex};
+
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
+use nix::errno::Errno;
+use nix::fcntl::OFlag;
+use nix::sys::stat::{FileStat, Mode};
+use futures::future::BoxFuture;
+use futures::FutureExt;
+
+use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
+use pxar::Metadata;
+use pxar::encoder::{SeqWrite, LinkOffset};
+
+use proxmox::c_str;
+use proxmox::sys::error::SysError;
+use proxmox::tools::fd::RawFdNum;
+use proxmox::tools::vec;
+use proxmox::tools::fd::Fd;
+
+use pbs_datastore::catalog::BackupCatalogWriter;
+use pbs_tools::{acl, fs, xattr};
+use pbs_tools::str::strip_ascii_whitespace;
+
+use crate::pxar::metadata::errno_is_unsupported;
+use crate::pxar::Flags;
+use crate::pxar::tools::assert_single_path_component;
+
+/// Pxar options for creating a pxar archive/stream
+#[derive(Default, Clone)]
+pub struct PxarCreateOptions {
+    /// Device/mountpoint st_dev numbers that should be included. None for no limitation.
+    pub device_set: Option<HashSet<u64>>,
+    /// Exclusion patterns
+    pub patterns: Vec<MatchEntry>,
+    /// Maximum number of entries to hold in memory
+    pub entries_max: usize,
+    /// Skip lost+found directory
+    pub skip_lost_and_found: bool,
+    /// Verbose output
+    pub verbose: bool,
+}
+
+
+fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
+    let mut fs_stat = std::mem::MaybeUninit::uninit();
+    let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
+    Errno::result(res)?;
+    let fs_stat = unsafe { fs_stat.assume_init() };
+
+    Ok(fs_stat.f_type)
+}
+
+#[rustfmt::skip]
+pub fn is_virtual_file_system(magic: i64) -> bool {
+    use proxmox::sys::linux::magic::*;
+
+    matches!(magic, BINFMTFS_MAGIC |
+        CGROUP2_SUPER_MAGIC |
+        CGROUP_SUPER_MAGIC |
+        CONFIGFS_MAGIC |
+        DEBUGFS_MAGIC |
+        DEVPTS_SUPER_MAGIC |
+        EFIVARFS_MAGIC |
+        FUSE_CTL_SUPER_MAGIC |
+        HUGETLBFS_MAGIC |
+        MQUEUE_MAGIC |
+        NFSD_MAGIC |
+        PROC_SUPER_MAGIC |
+        PSTOREFS_MAGIC |
+        RPCAUTH_GSSMAGIC |
+        SECURITYFS_MAGIC |
+        SELINUX_MAGIC |
+        SMACK_MAGIC |
+        SYSFS_MAGIC)
+}
+
+#[derive(Debug)]
+struct ArchiveError {
+    path: PathBuf,
+    error: Error,
+}
+
+impl ArchiveError {
+    fn new(path: PathBuf, error: Error) -> Self {
+        Self { path, error }
+    }
+}
+
+impl std::error::Error for ArchiveError {}
+
+impl fmt::Display for ArchiveError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "error at {:?}: {}", self.path, self.error)
+    }
+}
+
+#[derive(Eq, PartialEq, Hash)]
+struct HardLinkInfo {
+    st_dev: u64,
+    st_ino: u64,
+}
+
+/// TODO: make a builder for the create_archive call for fewer parameters and add a method to add a
+/// logger which does not write to stderr.
+struct Logger;
+
+impl std::io::Write for Logger {
+    fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+        std::io::stderr().write(data)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        std::io::stderr().flush()
+    }
+}
+
+/// And the error case.
+struct ErrorReporter;
+
+impl std::io::Write for ErrorReporter {
+    fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+        std::io::stderr().write(data)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        std::io::stderr().flush()
+    }
+}
+
+struct Archiver {
+    feature_flags: Flags,
+    fs_feature_flags: Flags,
+    fs_magic: i64,
+    patterns: Vec<MatchEntry>,
+    callback: Box<dyn FnMut(&Path) -> Result<(), Error> + Send>,
+    catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
+    path: PathBuf,
+    entry_counter: usize,
+    entry_limit: usize,
+    current_st_dev: libc::dev_t,
+    device_set: Option<HashSet<u64>>,
+    hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
+    errors: ErrorReporter,
+    logger: Logger,
+    file_copy_buffer: Vec<u8>,
+}
+
+type Encoder<'a, T> = pxar::encoder::aio::Encoder<'a, T>;
+
+pub async fn create_archive<T, F>(
+    source_dir: Dir,
+    mut writer: T,
+    feature_flags: Flags,
+    callback: F,
+    catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
+    options: PxarCreateOptions,
+) -> Result<(), Error>
+where
+    T: SeqWrite + Send,
+    F: FnMut(&Path) -> Result<(), Error> + Send + 'static,
+{
+    let fs_magic = detect_fs_type(source_dir.as_raw_fd())?;
+    if is_virtual_file_system(fs_magic) {
+        bail!("refusing to backup a virtual file system");
+    }
+
+    let mut fs_feature_flags = Flags::from_magic(fs_magic);
+
+    let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?;
+    let metadata = get_metadata(
+        source_dir.as_raw_fd(),
+        &stat,
+        feature_flags & fs_feature_flags,
+        fs_magic,
+        &mut fs_feature_flags,
+    )
+    .map_err(|err| format_err!("failed to get metadata for source directory: {}", err))?;
+
+    let mut device_set = options.device_set.clone();
+    if let Some(ref mut set) = device_set {
+        set.insert(stat.st_dev);
+    }
+
+    let mut encoder = Encoder::new(&mut writer, &metadata).await?;
+
+    let mut patterns = options.patterns;
+
+    if options.skip_lost_and_found {
+        patterns.push(MatchEntry::parse_pattern(
+            "lost+found",
+            PatternFlag::PATH_NAME,
+            MatchType::Exclude,
+        )?);
+    }
+
+    let mut archiver = Archiver {
+        feature_flags,
+        fs_feature_flags,
+        fs_magic,
+        callback: Box::new(callback),
+        patterns,
+        catalog,
+        path: PathBuf::new(),
+        entry_counter: 0,
+        entry_limit: options.entries_max,
+        current_st_dev: stat.st_dev,
+        device_set,
+        hardlinks: HashMap::new(),
+        errors: ErrorReporter,
+        logger: Logger,
+        file_copy_buffer: vec::undefined(4 * 1024 * 1024),
+    };
+
+    archiver.archive_dir_contents(&mut encoder, source_dir, true).await?;
+    encoder.finish().await?;
+    Ok(())
+}
+
+struct FileListEntry {
+    name: CString,
+    path: PathBuf,
+    stat: FileStat,
+}
+
+impl Archiver {
+    /// Get the currently effective feature flags. (Requested flags masked by the file system
+    /// feature flags).
+    fn flags(&self) -> Flags {
+        self.feature_flags & self.fs_feature_flags
+    }
+
+    fn wrap_err(&self, err: Error) -> Error {
+        if err.downcast_ref::<ArchiveError>().is_some() {
+            err
+        } else {
+            ArchiveError::new(self.path.clone(), err).into()
+        }
+    }
+
+    fn archive_dir_contents<'a, 'b, T: SeqWrite + Send>(
+        &'a mut self,
+        encoder: &'a mut Encoder<'b, T>,
+        mut dir: Dir,
+        is_root: bool,
+    ) -> BoxFuture<'a, Result<(), Error>> {
+        async move {
+            let entry_counter = self.entry_counter;
+
+            let old_patterns_count = self.patterns.len();
+            self.read_pxar_excludes(dir.as_raw_fd())?;
+
+            let mut file_list = self.generate_directory_file_list(&mut dir, is_root)?;
+
+            if is_root && old_patterns_count > 0 {
+                file_list.push(FileListEntry {
+                    name: CString::new(".pxarexclude-cli").unwrap(),
+                    path: PathBuf::new(),
+                    stat: unsafe { std::mem::zeroed() },
+                });
+            }
+
+            let dir_fd = dir.as_raw_fd();
+
+            let old_path = std::mem::take(&mut self.path);
+
+            for file_entry in file_list {
+                let file_name = file_entry.name.to_bytes();
+
+                if is_root && file_name == b".pxarexclude-cli" {
+                    self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count).await?;
+                    continue;
+                }
+
+                (self.callback)(&file_entry.path)?;
+                self.path = file_entry.path;
+                self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat).await
+                    .map_err(|err| self.wrap_err(err))?;
+            }
+            self.path = old_path;
+            self.entry_counter = entry_counter;
+            self.patterns.truncate(old_patterns_count);
+
+            Ok(())
+        }.boxed()
+    }
+
+    /// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
+    ///
+    /// The `existed` flag is set when iterating through a directory to note that we know the file
+    /// is supposed to exist and we should warn if it doesnt'.
+    fn open_file(
+        &mut self,
+        parent: RawFd,
+        file_name: &CStr,
+        oflags: OFlag,
+        existed: bool,
+    ) -> Result<Option<Fd>, Error> {
+        // common flags we always want to use:
+        let oflags = oflags | OFlag::O_CLOEXEC | OFlag::O_NOCTTY;
+
+        let mut noatime = OFlag::O_NOATIME;
+        loop {
+            return match Fd::openat(
+                &unsafe { RawFdNum::from_raw_fd(parent) },
+                file_name,
+                oflags | noatime,
+                Mode::empty(),
+            ) {
+                Ok(fd) => Ok(Some(fd)),
+                Err(nix::Error::Sys(Errno::ENOENT)) => {
+                    if existed {
+                        self.report_vanished_file()?;
+                    }
+                    Ok(None)
+                }
+                Err(nix::Error::Sys(Errno::EACCES)) => {
+                    writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
+                    Ok(None)
+                }
+                Err(nix::Error::Sys(Errno::EPERM)) if !noatime.is_empty() => {
+                    // Retry without O_NOATIME:
+                    noatime = OFlag::empty();
+                    continue;
+                }
+                Err(other) => Err(Error::from(other)),
+            }
+        }
+    }
+
+    fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
+        let fd = match self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)? {
+            Some(fd) => fd,
+            None => return Ok(()),
+        };
+
+        let old_pattern_count = self.patterns.len();
+
+        let path_bytes = self.path.as_os_str().as_bytes();
+
+        let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
+
+        use io::BufRead;
+        for line in io::BufReader::new(file).split(b'\n') {
+            let line = match line {
+                Ok(line) => line,
+                Err(err) => {
+                    let _ = writeln!(
+                        self.errors,
+                        "ignoring .pxarexclude after read error in {:?}: {}",
+                        self.path,
+                        err,
+                    );
+                    self.patterns.truncate(old_pattern_count);
+                    return Ok(());
+                }
+            };
+
+            let line = strip_ascii_whitespace(&line);
+
+            if line.is_empty() || line[0] == b'#' {
+                continue;
+            }
+
+            let mut buf;
+            let (line, mode, anchored) = if line[0] == b'/' {
+                buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
+                buf.extend(path_bytes);
+                buf.extend(line);
+                (&buf[..], MatchType::Exclude, true)
+            } else if line.starts_with(b"!/") {
+                // inverted case with absolute path
+                buf = Vec::with_capacity(path_bytes.len() + line.len());
+                buf.extend(path_bytes);
+                buf.extend(&line[1..]); // without the '!'
+                (&buf[..], MatchType::Include, true)
+            } else if line.starts_with(b"!") {
+                (&line[1..], MatchType::Include, false)
+            } else {
+                (line, MatchType::Exclude, false)
+            };
+
+            match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
+                Ok(pattern) => {
+                    if anchored {
+                        self.patterns.push(pattern.add_flags(MatchFlag::ANCHORED));
+                    } else {
+                        self.patterns.push(pattern);
+                    }
+                }
+                Err(err) => {
+                    let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    async fn encode_pxarexclude_cli<T: SeqWrite + Send>(
+        &mut self,
+        encoder: &mut Encoder<'_, T>,
+        file_name: &CStr,
+        patterns_count: usize,
+    ) -> Result<(), Error> {
+        let content = generate_pxar_excludes_cli(&self.patterns[..patterns_count]);
+        if let Some(ref catalog) = self.catalog {
+            catalog.lock().unwrap().add_file(file_name, content.len() as u64, 0)?;
+        }
+
+        let mut metadata = Metadata::default();
+        metadata.stat.mode = pxar::format::mode::IFREG | 0o600;
+
+        let mut file = encoder.create_file(&metadata, ".pxarexclude-cli", content.len() as u64).await?;
+        file.write_all(&content).await?;
+
+        Ok(())
+    }
+
+    fn generate_directory_file_list(
+        &mut self,
+        dir: &mut Dir,
+        is_root: bool,
+    ) -> Result<Vec<FileListEntry>, Error> {
+        let dir_fd = dir.as_raw_fd();
+
+        let mut file_list = Vec::new();
+
+        for file in dir.iter() {
+            let file = file?;
+
+            let file_name = file.file_name().to_owned();
+            let file_name_bytes = file_name.to_bytes();
+            if file_name_bytes == b"." || file_name_bytes == b".." {
+                continue;
+            }
+
+            if is_root && file_name_bytes == b".pxarexclude-cli" {
+                continue;
+            }
+
+            let os_file_name = OsStr::from_bytes(file_name_bytes);
+            assert_single_path_component(os_file_name)?;
+            let full_path = self.path.join(os_file_name);
+
+            let stat = match nix::sys::stat::fstatat(
+                dir_fd,
+                file_name.as_c_str(),
+                nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
+            ) {
+                Ok(stat) => stat,
+                Err(ref err) if err.not_found() => continue,
+                Err(err) => bail!("stat failed on {:?}: {}", full_path, err),
+            };
+
+            let match_path = PathBuf::from("/").join(full_path.clone());
+            if self
+                .patterns
+                .matches(match_path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
+                == Some(MatchType::Exclude)
+            {
+                continue;
+            }
+
+            self.entry_counter += 1;
+            if self.entry_counter > self.entry_limit {
+                bail!("exceeded allowed number of file entries (> {})",self.entry_limit);
+            }
+
+            file_list.push(FileListEntry {
+                name: file_name,
+                path: full_path,
+                stat
+            });
+        }
+
+        file_list.sort_unstable_by(|a, b| a.name.cmp(&b.name));
+
+        Ok(file_list)
+    }
+
+    fn report_vanished_file(&mut self) -> Result<(), Error> {
+        writeln!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
+        Ok(())
+    }
+
+    fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
+        writeln!(
+            self.errors,
+            "warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
+            self.path,
+        )?;
+        Ok(())
+    }
+
+    fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
+        writeln!(
+            self.errors,
+            "warning: file size increased while reading: {:?}, file will be truncated!",
+            self.path,
+        )?;
+        Ok(())
+    }
+
+    async fn add_entry<T: SeqWrite + Send>(
+        &mut self,
+        encoder: &mut Encoder<'_, T>,
+        parent: RawFd,
+        c_file_name: &CStr,
+        stat: &FileStat,
+    ) -> Result<(), Error> {
+        use pxar::format::mode;
+
+        let file_mode = stat.st_mode & libc::S_IFMT;
+        let open_mode = if file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR {
+            OFlag::empty()
+        } else {
+            OFlag::O_PATH
+        };
+
+        let fd = self.open_file(
+            parent,
+            c_file_name,
+            open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW,
+            true,
+        )?;
+
+        let fd = match fd {
+            Some(fd) => fd,
+            None => return Ok(()),
+        };
+
+        let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?;
+
+        if self
+            .patterns
+            .matches(self.path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
+            == Some(MatchType::Exclude)
+        {
+            return Ok(());
+        }
+
+        let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
+        match metadata.file_type() {
+            mode::IFREG => {
+                let link_info = HardLinkInfo {
+                    st_dev: stat.st_dev,
+                    st_ino: stat.st_ino,
+                };
+
+                if stat.st_nlink > 1 {
+                    if let Some((path, offset)) = self.hardlinks.get(&link_info) {
+                        if let Some(ref catalog) = self.catalog {
+                            catalog.lock().unwrap().add_hardlink(c_file_name)?;
+                        }
+
+                        encoder.add_hardlink(file_name, path, *offset).await?;
+
+                        return Ok(());
+                    }
+                }
+
+                let file_size = stat.st_size as u64;
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().add_file(c_file_name, file_size, stat.st_mtime)?;
+                }
+
+                let offset: LinkOffset =
+                    self.add_regular_file(encoder, fd, file_name, &metadata, file_size).await?;
+
+                if stat.st_nlink > 1 {
+                    self.hardlinks.insert(link_info, (self.path.clone(), offset));
+                }
+
+                Ok(())
+            }
+            mode::IFDIR => {
+                let dir = Dir::from_fd(fd.into_raw_fd())?;
+
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().start_directory(c_file_name)?;
+                }
+                let result = self.add_directory(encoder, dir, c_file_name, &metadata, stat).await;
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().end_directory()?;
+                }
+                result
+            }
+            mode::IFSOCK => {
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().add_socket(c_file_name)?;
+                }
+
+                Ok(encoder.add_socket(&metadata, file_name).await?)
+            }
+            mode::IFIFO => {
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().add_fifo(c_file_name)?;
+                }
+
+                Ok(encoder.add_fifo(&metadata, file_name).await?)
+            }
+            mode::IFLNK => {
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().add_symlink(c_file_name)?;
+                }
+
+                self.add_symlink(encoder, fd, file_name, &metadata).await
+            }
+            mode::IFBLK => {
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().add_block_device(c_file_name)?;
+                }
+
+                self.add_device(encoder, file_name, &metadata, &stat).await
+            }
+            mode::IFCHR => {
+                if let Some(ref catalog) = self.catalog {
+                    catalog.lock().unwrap().add_char_device(c_file_name)?;
+                }
+
+                self.add_device(encoder, file_name, &metadata, &stat).await
+            }
+            other => bail!(
+                "encountered unknown file type: 0x{:x} (0o{:o})",
+                other,
+                other
+            ),
+        }
+    }
+
+    async fn add_directory<T: SeqWrite + Send>(
+        &mut self,
+        encoder: &mut Encoder<'_, T>,
+        dir: Dir,
+        dir_name: &CStr,
+        metadata: &Metadata,
+        stat: &FileStat,
+    ) -> Result<(), Error> {
+        let dir_name = OsStr::from_bytes(dir_name.to_bytes());
+
+        let mut encoder = encoder.create_directory(dir_name, &metadata).await?;
+
+        let old_fs_magic = self.fs_magic;
+        let old_fs_feature_flags = self.fs_feature_flags;
+        let old_st_dev = self.current_st_dev;
+
+        let mut skip_contents = false;
+        if old_st_dev != stat.st_dev {
+            self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
+            self.fs_feature_flags = Flags::from_magic(self.fs_magic);
+            self.current_st_dev = stat.st_dev;
+
+            if is_virtual_file_system(self.fs_magic) {
+                skip_contents = true;
+            } else if let Some(set) = &self.device_set {
+                skip_contents = !set.contains(&stat.st_dev);
+            }
+        }
+
+        let result = if skip_contents {
+            writeln!(self.logger, "skipping mount point: {:?}", self.path)?;
+            Ok(())
+        } else {
+            self.archive_dir_contents(&mut encoder, dir, false).await
+        };
+
+        self.fs_magic = old_fs_magic;
+        self.fs_feature_flags = old_fs_feature_flags;
+        self.current_st_dev = old_st_dev;
+
+        encoder.finish().await?;
+        result
+    }
+
+    async fn add_regular_file<T: SeqWrite + Send>(
+        &mut self,
+        encoder: &mut Encoder<'_, T>,
+        fd: Fd,
+        file_name: &Path,
+        metadata: &Metadata,
+        file_size: u64,
+    ) -> Result<LinkOffset, Error> {
+        let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
+        let mut remaining = file_size;
+        let mut out = encoder.create_file(metadata, file_name, file_size).await?;
+        while remaining != 0 {
+            let mut got = match file.read(&mut self.file_copy_buffer[..]) {
+                Ok(0) => break,
+                Ok(got) => got,
+                Err(err) if err.kind() == std::io::ErrorKind::Interrupted => continue,
+                Err(err) => bail!(err),
+            };
+            if got as u64 > remaining {
+                self.report_file_grew_while_reading()?;
+                got = remaining as usize;
+            }
+            out.write_all(&self.file_copy_buffer[..got]).await?;
+            remaining -= got as u64;
+        }
+        if remaining > 0 {
+            self.report_file_shrunk_while_reading()?;
+            let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
+            vec::clear(&mut self.file_copy_buffer[..to_zero]);
+            while remaining != 0 {
+                let fill = remaining.min(self.file_copy_buffer.len() as u64) as usize;
+                out.write_all(&self.file_copy_buffer[..fill]).await?;
+                remaining -= fill as u64;
+            }
+        }
+
+        Ok(out.file_offset())
+    }
+
+    async fn add_symlink<T: SeqWrite + Send>(
+        &mut self,
+        encoder: &mut Encoder<'_, T>,
+        fd: Fd,
+        file_name: &Path,
+        metadata: &Metadata,
+    ) -> Result<(), Error> {
+        let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
+        encoder.add_symlink(metadata, file_name, dest).await?;
+        Ok(())
+    }
+
+    async fn add_device<T: SeqWrite + Send>(
+        &mut self,
+        encoder: &mut Encoder<'_, T>,
+        file_name: &Path,
+        metadata: &Metadata,
+        stat: &FileStat,
+    ) -> Result<(), Error> {
+        Ok(encoder.add_device(
+            metadata,
+            file_name,
+            pxar::format::Device::from_dev_t(stat.st_rdev),
+        ).await?)
+    }
+}
+
+fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64, fs_feature_flags: &mut Flags) -> Result<Metadata, Error> {
+    // required for some of these
+    let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
+
+    let mut meta = Metadata {
+        stat: pxar::Stat {
+            mode: u64::from(stat.st_mode),
+            flags: 0,
+            uid: stat.st_uid,
+            gid: stat.st_gid,
+            mtime: pxar::format::StatxTimestamp::new(stat.st_mtime, stat.st_mtime_nsec as u32),
+        },
+        ..Default::default()
+    };
+
+    get_xattr_fcaps_acl(&mut meta, fd, &proc_path, flags, fs_feature_flags)?;
+    get_chattr(&mut meta, fd)?;
+    get_fat_attr(&mut meta, fd, fs_magic)?;
+    get_quota_project_id(&mut meta, fd, flags, fs_magic)?;
+    Ok(meta)
+}
+
+fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
+    if !flags.contains(Flags::WITH_FCAPS) {
+        return Ok(());
+    }
+
+    match xattr::fgetxattr(fd, xattr::xattr_name_fcaps()) {
+        Ok(data) => {
+            meta.fcaps = Some(pxar::format::FCaps { data });
+            Ok(())
+        }
+        Err(Errno::ENODATA) => Ok(()),
+        Err(Errno::EOPNOTSUPP) => {
+            fs_feature_flags.remove(Flags::WITH_FCAPS);
+            Ok(())
+        }
+        Err(Errno::EBADF) => Ok(()), // symlinks
+        Err(err) => bail!("failed to read file capabilities: {}", err),
+    }
+}
+
+fn get_xattr_fcaps_acl(
+    meta: &mut Metadata,
+    fd: RawFd,
+    proc_path: &Path,
+    flags: Flags,
+    fs_feature_flags: &mut Flags,
+) -> Result<(), Error> {
+    if !flags.contains(Flags::WITH_XATTRS) {
+        return Ok(());
+    }
+
+    let xattrs = match xattr::flistxattr(fd) {
+        Ok(names) => names,
+        Err(Errno::EOPNOTSUPP) => {
+            fs_feature_flags.remove(Flags::WITH_XATTRS);
+            return Ok(());
+        },
+        Err(Errno::EBADF) => return Ok(()), // symlinks
+        Err(err) => bail!("failed to read xattrs: {}", err),
+    };
+
+    for attr in &xattrs {
+        if xattr::is_security_capability(&attr) {
+            get_fcaps(meta, fd, flags, fs_feature_flags)?;
+            continue;
+        }
+
+        if xattr::is_acl(&attr) {
+            get_acl(meta, proc_path, flags, fs_feature_flags)?;
+            continue;
+        }
+
+        if !xattr::is_valid_xattr_name(&attr) {
+            continue;
+        }
+
+        match xattr::fgetxattr(fd, attr) {
+            Ok(data) => meta
+                .xattrs
+                .push(pxar::format::XAttr::new(attr.to_bytes(), data)),
+            Err(Errno::ENODATA) => (), // it got removed while we were iterating...
+            Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
+            Err(Errno::EBADF) => (),   // symlinks, shouldn't be able to reach this either
+            Err(err) => bail!("error reading extended attribute {:?}: {}", attr, err),
+        }
+    }
+
+    Ok(())
+}
+
+fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
+    let mut attr: libc::c_long = 0;
+
+    match unsafe { fs::read_attr_fd(fd, &mut attr) } {
+        Ok(_) => (),
+        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
+            return Ok(());
+        }
+        Err(err) => bail!("failed to read file attributes: {}", err),
+    }
+
+    metadata.stat.flags |= Flags::from_chattr(attr).bits();
+
+    Ok(())
+}
+
+fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> {
+    use proxmox::sys::linux::magic::*;
+
+    if fs_magic != MSDOS_SUPER_MAGIC && fs_magic != FUSE_SUPER_MAGIC {
+        return Ok(());
+    }
+
+    let mut attr: u32 = 0;
+
+    match unsafe { fs::read_fat_attr_fd(fd, &mut attr) } {
+        Ok(_) => (),
+        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
+            return Ok(());
+        }
+        Err(err) => bail!("failed to read fat attributes: {}", err),
+    }
+
+    metadata.stat.flags |= Flags::from_fat_attr(attr).bits();
+
+    Ok(())
+}
+
+/// Read the quota project id for an inode, supported on ext4/XFS/FUSE/ZFS filesystems
+fn get_quota_project_id(
+    metadata: &mut Metadata,
+    fd: RawFd,
+    flags: Flags,
+    magic: i64,
+) -> Result<(), Error> {
+    if !(metadata.is_dir() || metadata.is_regular_file()) {
+        return Ok(());
+    }
+
+    if !flags.contains(Flags::WITH_QUOTA_PROJID) {
+        return Ok(());
+    }
+
+    use proxmox::sys::linux::magic::*;
+
+    match magic {
+        EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (),
+        _ => return Ok(()),
+    }
+
+    let mut fsxattr = fs::FSXAttr::default();
+    let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) };
+
+    // On some FUSE filesystems it can happen that ioctl is not supported.
+    // For these cases projid is set to 0 while the error is ignored.
+    if let Err(err) = res {
+        let errno = err
+            .as_errno()
+            .ok_or_else(|| format_err!("error while reading quota project id"))?;
+        if errno_is_unsupported(errno) {
+            return Ok(());
+        } else {
+            bail!("error while reading quota project id ({})", errno);
+        }
+    }
+
+    let projid = fsxattr.fsx_projid as u64;
+    if projid != 0 {
+        metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid });
+    }
+    Ok(())
+}
+
+fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
+    if !flags.contains(Flags::WITH_ACL) {
+        return Ok(());
+    }
+
+    if metadata.is_symlink() {
+        return Ok(());
+    }
+
+    get_acl_do(metadata, proc_path, acl::ACL_TYPE_ACCESS, fs_feature_flags)?;
+
+    if metadata.is_dir() {
+        get_acl_do(metadata, proc_path, acl::ACL_TYPE_DEFAULT, fs_feature_flags)?;
+    }
+
+    Ok(())
+}
+
+fn get_acl_do(
+    metadata: &mut Metadata,
+    proc_path: &Path,
+    acl_type: acl::ACLType,
+    fs_feature_flags: &mut Flags,
+) -> Result<(), Error> {
+    // In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
+    // to create a path for acl_get_file(). acl_get_fd() only allows to get
+    // ACL_TYPE_ACCESS attributes.
+    let acl = match acl::ACL::get_file(&proc_path, acl_type) {
+        Ok(acl) => acl,
+        // Don't bail if underlying endpoint does not support acls
+        Err(Errno::EOPNOTSUPP) => {
+            fs_feature_flags.remove(Flags::WITH_ACL);
+            return Ok(());
+        }
+        // Don't bail if the endpoint cannot carry acls
+        Err(Errno::EBADF) => return Ok(()),
+        // Don't bail if there is no data
+        Err(Errno::ENODATA) => return Ok(()),
+        Err(err) => bail!("error while reading ACL - {}", err),
+    };
+
+    process_acl(metadata, acl, acl_type)
+}
+
+fn process_acl(
+    metadata: &mut Metadata,
+    acl: acl::ACL,
+    acl_type: acl::ACLType,
+) -> Result<(), Error> {
+    use pxar::format::acl as pxar_acl;
+    use pxar::format::acl::{Group, GroupObject, Permissions, User};
+
+    let mut acl_user = Vec::new();
+    let mut acl_group = Vec::new();
+    let mut acl_group_obj = None;
+    let mut acl_default = None;
+    let mut user_obj_permissions = None;
+    let mut group_obj_permissions = None;
+    let mut other_permissions = None;
+    let mut mask_permissions = None;
+
+    for entry in &mut acl.entries() {
+        let tag = entry.get_tag_type()?;
+        let permissions = entry.get_permissions()?;
+        match tag {
+            acl::ACL_USER_OBJ => user_obj_permissions = Some(Permissions(permissions)),
+            acl::ACL_GROUP_OBJ => group_obj_permissions = Some(Permissions(permissions)),
+            acl::ACL_OTHER => other_permissions = Some(Permissions(permissions)),
+            acl::ACL_MASK => mask_permissions = Some(Permissions(permissions)),
+            acl::ACL_USER => {
+                acl_user.push(User {
+                    uid: entry.get_qualifier()?,
+                    permissions: Permissions(permissions),
+                });
+            }
+            acl::ACL_GROUP => {
+                acl_group.push(Group {
+                    gid: entry.get_qualifier()?,
+                    permissions: Permissions(permissions),
+                });
+            }
+            _ => bail!("Unexpected ACL tag encountered!"),
+        }
+    }
+
+    acl_user.sort();
+    acl_group.sort();
+
+    match acl_type {
+        acl::ACL_TYPE_ACCESS => {
+            // The mask permissions are mapped to the stat group permissions
+            // in case that the ACL group permissions were set.
+            // Only in that case we need to store the group permissions,
+            // in the other cases they are identical to the stat group permissions.
+            if let (Some(gop), true) = (group_obj_permissions, mask_permissions.is_some()) {
+                acl_group_obj = Some(GroupObject { permissions: gop });
+            }
+
+            metadata.acl.users = acl_user;
+            metadata.acl.groups = acl_group;
+            metadata.acl.group_obj = acl_group_obj;
+        }
+        acl::ACL_TYPE_DEFAULT => {
+            if user_obj_permissions != None
+                || group_obj_permissions != None
+                || other_permissions != None
+                || mask_permissions != None
+            {
+                acl_default = Some(pxar_acl::Default {
+                    // The value is set to UINT64_MAX as placeholder if one
+                    // of the permissions is not set
+                    user_obj_permissions: user_obj_permissions.unwrap_or(Permissions::NO_MASK),
+                    group_obj_permissions: group_obj_permissions.unwrap_or(Permissions::NO_MASK),
+                    other_permissions: other_permissions.unwrap_or(Permissions::NO_MASK),
+                    mask_permissions: mask_permissions.unwrap_or(Permissions::NO_MASK),
+                });
+            }
+
+            metadata.acl.default_users = acl_user;
+            metadata.acl.default_groups = acl_group;
+            metadata.acl.default = acl_default;
+        }
+        _ => bail!("Unexpected ACL type encountered"),
+    }
+
+    Ok(())
+}
+
+/// Note that our pattern lists are "positive". `MatchType::Include` means the file is included.
+/// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
+/// prefix.
+fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
+    use pathpatterns::MatchPattern;
+
+    let mut content = Vec::new();
+
+    for pattern in patterns {
+        match pattern.match_type() {
+            MatchType::Include => content.push(b'!'),
+            MatchType::Exclude => (),
+        }
+
+        match pattern.pattern() {
+            MatchPattern::Literal(lit) => content.extend(lit),
+            MatchPattern::Pattern(pat) => content.extend(pat.pattern().to_bytes()),
+        }
+
+        if pattern.match_flags() == MatchFlag::MATCH_DIRECTORIES && content.last() != Some(&b'/') {
+            content.push(b'/');
+        }
+
+        content.push(b'\n');
+    }
+
+    content
+}
diff --git a/pbs-client/src/pxar/dir_stack.rs b/pbs-client/src/pxar/dir_stack.rs
new file mode 100644 (file)
index 0000000..86740ff
--- /dev/null
@@ -0,0 +1,162 @@
+use std::ffi::OsString;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::path::{Path, PathBuf};
+
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::{mkdirat, Mode};
+
+use proxmox::sys::error::SysError;
+use proxmox::tools::fd::BorrowedFd;
+use pxar::Metadata;
+
+use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
+
+pub struct PxarDir {
+    file_name: OsString,
+    metadata: Metadata,
+    dir: Option<Dir>,
+}
+
+impl PxarDir {
+    pub fn new(file_name: OsString, metadata: Metadata) -> Self {
+        Self {
+            file_name,
+            metadata,
+            dir: None,
+        }
+    }
+
+    pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
+        Self {
+            file_name: OsString::from("."),
+            metadata,
+            dir: Some(dir),
+        }
+    }
+
+    fn create_dir(
+        &mut self,
+        parent: RawFd,
+        allow_existing_dirs: bool,
+    ) -> Result<BorrowedFd, Error> {
+        match mkdirat(
+            parent,
+            self.file_name.as_os_str(),
+            perms_from_metadata(&self.metadata)?,
+        ) {
+            Ok(()) => (),
+            Err(err) => {
+                if !(allow_existing_dirs && err.already_exists()) {
+                    return Err(err.into());
+                }
+            }
+        }
+
+        self.open_dir(parent)
+    }
+
+    fn open_dir(&mut self, parent: RawFd) -> Result<BorrowedFd, Error> {
+        let dir = Dir::openat(
+            parent,
+            self.file_name.as_os_str(),
+            OFlag::O_DIRECTORY,
+            Mode::empty(),
+        )?;
+
+        let fd = BorrowedFd::new(&dir);
+        self.dir = Some(dir);
+
+        Ok(fd)
+    }
+
+    pub fn try_as_borrowed_fd(&self) -> Option<BorrowedFd> {
+        self.dir.as_ref().map(BorrowedFd::new)
+    }
+
+    pub fn metadata(&self) -> &Metadata {
+        &self.metadata
+    }
+}
+
+pub struct PxarDirStack {
+    dirs: Vec<PxarDir>,
+    path: PathBuf,
+    created: usize,
+}
+
+impl PxarDirStack {
+    pub fn new(root: Dir, metadata: Metadata) -> Self {
+        Self {
+            dirs: vec![PxarDir::with_dir(root, metadata)],
+            path: PathBuf::from("/"),
+            created: 1, // the root directory exists
+        }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.dirs.is_empty()
+    }
+
+    pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
+        assert_single_path_component(&file_name)?;
+        self.path.push(&file_name);
+        self.dirs.push(PxarDir::new(file_name, metadata));
+        Ok(())
+    }
+
+    pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
+        let out = self.dirs.pop();
+        if !self.path.pop() {
+            if self.path.as_os_str() == "/" {
+                // we just finished the root directory, make sure this can only happen once:
+                self.path = PathBuf::new();
+            } else {
+                bail!("lost track of path");
+            }
+        }
+        self.created = self.created.min(self.dirs.len());
+        Ok(out)
+    }
+
+    pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<BorrowedFd, Error> {
+        // should not be possible given the way we use it:
+        assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
+
+        let dirs_len = self.dirs.len();
+        let mut fd = self.dirs[self.created - 1]
+            .try_as_borrowed_fd()
+            .ok_or_else(|| format_err!("lost track of directory file descriptors"))?
+            .as_raw_fd();
+
+        while self.created < dirs_len {
+            fd = self.dirs[self.created]
+                .create_dir(fd, allow_existing_dirs)?
+                .as_raw_fd();
+            self.created += 1;
+        }
+
+        self.dirs[self.created - 1]
+            .try_as_borrowed_fd()
+            .ok_or_else(|| format_err!("lost track of directory file descriptors"))
+    }
+
+    pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
+        let _: BorrowedFd = self.last_dir_fd(allow_existing_dirs)?;
+        Ok(())
+    }
+
+    pub fn root_dir_fd(&self) -> Result<BorrowedFd, Error> {
+        // should not be possible given the way we use it:
+        assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
+
+        self.dirs[0]
+            .try_as_borrowed_fd()
+            .ok_or_else(|| format_err!("lost track of directory file descriptors"))
+    }
+
+    pub fn path(&self) -> &Path {
+        &self.path
+    }
+}
diff --git a/pbs-client/src/pxar/extract.rs b/pbs-client/src/pxar/extract.rs
new file mode 100644 (file)
index 0000000..a03b5a5
--- /dev/null
@@ -0,0 +1,864 @@
+//! Code for extraction of pxar contents onto the file system.
+
+use std::convert::TryFrom;
+use std::ffi::{CStr, CString, OsStr, OsString};
+use std::io;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, Mutex};
+use std::pin::Pin;
+
+use futures::future::Future;
+use anyhow::{bail, format_err, Error};
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use pathpatterns::{MatchEntry, MatchList, MatchType};
+use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
+use pxar::decoder::aio::Decoder;
+use pxar::format::Device;
+use pxar::{Entry, EntryKind, Metadata};
+
+use proxmox::c_result;
+use proxmox::tools::{
+    fs::{create_path, CreateOptions},
+    io::{sparse_copy, sparse_copy_async},
+};
+
+use pbs_tools::zip::{ZipEncoder, ZipEntry};
+
+use crate::pxar::dir_stack::PxarDirStack;
+use crate::pxar::metadata;
+use crate::pxar::Flags;
+
+pub struct PxarExtractOptions<'a> {
+    pub match_list: &'a[MatchEntry],
+    pub extract_match_default: bool,
+    pub allow_existing_dirs: bool,
+    pub on_error: Option<ErrorHandler>,
+}
+
+pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
+
+pub fn extract_archive<T, F>(
+    mut decoder: pxar::decoder::Decoder<T>,
+    destination: &Path,
+    feature_flags: Flags,
+    mut callback: F,
+    options: PxarExtractOptions,
+) -> Result<(), Error>
+where
+    T: pxar::decoder::SeqRead,
+    F: FnMut(&Path),
+{
+    // we use this to keep track of our directory-traversal
+    decoder.enable_goodbye_entries(true);
+
+    let root = decoder
+        .next()
+        .ok_or_else(|| format_err!("found empty pxar archive"))?
+        .map_err(|err| format_err!("error reading pxar archive: {}", err))?;
+
+    if !root.is_dir() {
+        bail!("pxar archive does not start with a directory entry!");
+    }
+
+    create_path(
+        &destination,
+        None,
+        Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
+    )
+    .map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
+
+    let dir = Dir::open(
+        destination,
+        OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
+        Mode::empty(),
+    )
+    .map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
+
+    let mut extractor = Extractor::new(
+        dir,
+        root.metadata().clone(),
+        options.allow_existing_dirs,
+        feature_flags,
+    );
+
+    if let Some(on_error) = options.on_error {
+        extractor.on_error(on_error);
+    }
+
+    let mut match_stack = Vec::new();
+    let mut err_path_stack = vec![OsString::from("/")];
+    let mut current_match = options.extract_match_default;
+    while let Some(entry) = decoder.next() {
+        let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
+
+        let file_name_os = entry.file_name();
+
+        // safety check: a file entry in an archive must never contain slashes:
+        if file_name_os.as_bytes().contains(&b'/') {
+            bail!("archive file entry contains slashes, which is invalid and a security concern");
+        }
+
+        let file_name = CString::new(file_name_os.as_bytes())
+            .map_err(|_| format_err!("encountered file name with null-bytes"))?;
+
+        let metadata = entry.metadata();
+
+        extractor.set_path(entry.path().as_os_str().to_owned());
+
+        let match_result = options.match_list.matches(
+            entry.path().as_os_str().as_bytes(),
+            Some(metadata.file_type() as u32),
+        );
+
+        let did_match = match match_result {
+            Some(MatchType::Include) => true,
+            Some(MatchType::Exclude) => false,
+            None => current_match,
+        };
+        match (did_match, entry.kind()) {
+            (_, EntryKind::Directory) => {
+                callback(entry.path());
+
+                let create = current_match && match_result != Some(MatchType::Exclude);
+                extractor
+                    .enter_directory(file_name_os.to_owned(), metadata.clone(), create)
+                    .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
+
+                // We're starting a new directory, push our old matching state and replace it with
+                // our new one:
+                match_stack.push(current_match);
+                current_match = did_match;
+
+                // When we hit the goodbye table we'll try to apply metadata to the directory, but
+                // the Goodbye entry will not contain the path, so push it to our path stack for
+                // error messages:
+                err_path_stack.push(extractor.clone_path());
+
+                Ok(())
+            }
+            (_, EntryKind::GoodbyeTable) => {
+                // go up a directory
+
+                extractor.set_path(err_path_stack.pop().ok_or_else(|| {
+                    format_err!(
+                        "error at entry {:?}: unexpected end of directory",
+                        file_name_os
+                    )
+                })?);
+
+                extractor
+                    .leave_directory()
+                    .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
+
+                // We left a directory, also get back our previous matching state. This is in sync
+                // with `dir_stack` so this should never be empty except for the final goodbye
+                // table, in which case we get back to the default of `true`.
+                current_match = match_stack.pop().unwrap_or(true);
+
+                Ok(())
+            }
+            (true, EntryKind::Symlink(link)) => {
+                callback(entry.path());
+                extractor.extract_symlink(&file_name, metadata, link.as_ref())
+            }
+            (true, EntryKind::Hardlink(link)) => {
+                callback(entry.path());
+                extractor.extract_hardlink(&file_name, link.as_os_str())
+            }
+            (true, EntryKind::Device(dev)) => {
+                if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
+                    callback(entry.path());
+                    extractor.extract_device(&file_name, metadata, dev)
+                } else {
+                    Ok(())
+                }
+            }
+            (true, EntryKind::Fifo) => {
+                if extractor.contains_flags(Flags::WITH_FIFOS) {
+                    callback(entry.path());
+                    extractor.extract_special(&file_name, metadata, 0)
+                } else {
+                    Ok(())
+                }
+            }
+            (true, EntryKind::Socket) => {
+                if extractor.contains_flags(Flags::WITH_SOCKETS) {
+                    callback(entry.path());
+                    extractor.extract_special(&file_name, metadata, 0)
+                } else {
+                    Ok(())
+                }
+            }
+            (true, EntryKind::File { size, .. }) => extractor.extract_file(
+                &file_name,
+                metadata,
+                *size,
+                &mut decoder.contents().ok_or_else(|| {
+                    format_err!("found regular file entry without contents in archive")
+                })?,
+            ),
+            (false, _) => Ok(()), // skip this
+        }
+        .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
+    }
+
+    if !extractor.dir_stack.is_empty() {
+        bail!("unexpected eof while decoding pxar archive");
+    }
+
+    Ok(())
+}
+
+/// Common state for file extraction.
+pub struct Extractor {
+    feature_flags: Flags,
+    allow_existing_dirs: bool,
+    dir_stack: PxarDirStack,
+
+    /// For better error output we need to track the current path in the Extractor state.
+    current_path: Arc<Mutex<OsString>>,
+
+    /// Error callback. Includes `current_path` in the reformatted error, should return `Ok` to
+    /// continue extracting or the passed error as `Err` to bail out.
+    on_error: ErrorHandler,
+}
+
+impl Extractor {
+    /// Create a new extractor state for a target directory.
+    pub fn new(
+        root_dir: Dir,
+        metadata: Metadata,
+        allow_existing_dirs: bool,
+        feature_flags: Flags,
+    ) -> Self {
+        Self {
+            dir_stack: PxarDirStack::new(root_dir, metadata),
+            allow_existing_dirs,
+            feature_flags,
+            current_path: Arc::new(Mutex::new(OsString::new())),
+            on_error: Box::new(Err),
+        }
+    }
+
+    /// We call this on errors. The error will be reformatted to include `current_path`. The
+    /// callback should decide whether this error was fatal (simply return it) to bail out early,
+    /// or log/remember/accumulate errors somewhere and return `Ok(())` in its place to continue
+    /// extracting.
+    pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
+        let path = Arc::clone(&self.current_path);
+        self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
+            on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
+        });
+    }
+
+    pub fn set_path(&mut self, path: OsString) {
+        *self.current_path.lock().unwrap() = path;
+    }
+
+    pub fn clone_path(&self) -> OsString {
+        self.current_path.lock().unwrap().clone()
+    }
+
+    /// When encountering a directory during extraction, this is used to keep track of it. If
+    /// `create` is true it is immediately created and its metadata will be updated once we leave
+    /// it. If `create` is false it will only be created if it is going to have any actual content.
+    pub fn enter_directory(
+        &mut self,
+        file_name: OsString,
+        metadata: Metadata,
+        create: bool,
+    ) -> Result<(), Error> {
+        self.dir_stack.push(file_name, metadata)?;
+
+        if create {
+            self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
+        }
+
+        Ok(())
+    }
+
+    /// When done with a directory we can apply its metadata if it has been created.
+    pub fn leave_directory(&mut self) -> Result<(), Error> {
+        let path_info = self.dir_stack.path().to_owned();
+
+        let dir = self
+            .dir_stack
+            .pop()
+            .map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
+            .ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
+
+        if let Some(fd) = dir.try_as_borrowed_fd() {
+            metadata::apply(
+                self.feature_flags,
+                dir.metadata(),
+                fd.as_raw_fd(),
+                &path_info,
+                &mut self.on_error,
+            )
+            .map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
+        }
+
+        Ok(())
+    }
+
+    fn contains_flags(&self, flag: Flags) -> bool {
+        self.feature_flags.contains(flag)
+    }
+
+    fn parent_fd(&mut self) -> Result<RawFd, Error> {
+        self.dir_stack
+            .last_dir_fd(self.allow_existing_dirs)
+            .map(|d| d.as_raw_fd())
+            .map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
+    }
+
+    pub fn extract_symlink(
+        &mut self,
+        file_name: &CStr,
+        metadata: &Metadata,
+        link: &OsStr,
+    ) -> Result<(), Error> {
+        let parent = self.parent_fd()?;
+        nix::unistd::symlinkat(link, Some(parent), file_name)?;
+        metadata::apply_at(
+            self.feature_flags,
+            metadata,
+            parent,
+            file_name,
+            self.dir_stack.path(),
+            &mut self.on_error,
+        )
+    }
+
+    pub fn extract_hardlink(&mut self, file_name: &CStr, link: &OsStr) -> Result<(), Error> {
+        crate::pxar::tools::assert_relative_path(link)?;
+
+        let parent = self.parent_fd()?;
+        let root = self.dir_stack.root_dir_fd()?;
+        let target = CString::new(link.as_bytes())?;
+        nix::unistd::linkat(
+            Some(root.as_raw_fd()),
+            target.as_c_str(),
+            Some(parent),
+            file_name,
+            nix::unistd::LinkatFlags::NoSymlinkFollow,
+        )?;
+
+        Ok(())
+    }
+
+    pub fn extract_device(
+        &mut self,
+        file_name: &CStr,
+        metadata: &Metadata,
+        device: &Device,
+    ) -> Result<(), Error> {
+        self.extract_special(file_name, metadata, device.to_dev_t())
+    }
+
+    pub fn extract_special(
+        &mut self,
+        file_name: &CStr,
+        metadata: &Metadata,
+        device: libc::dev_t,
+    ) -> Result<(), Error> {
+        let mode = metadata.stat.mode;
+        let mode = u32::try_from(mode).map_err(|_| {
+            format_err!(
+                "device node's mode contains illegal bits: 0x{:x} (0o{:o})",
+                mode,
+                mode,
+            )
+        })?;
+        let parent = self.parent_fd()?;
+        unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
+            .map_err(|err| format_err!("failed to create device node: {}", err))?;
+
+        metadata::apply_at(
+            self.feature_flags,
+            metadata,
+            parent,
+            file_name,
+            self.dir_stack.path(),
+            &mut self.on_error,
+        )
+    }
+
+    pub fn extract_file(
+        &mut self,
+        file_name: &CStr,
+        metadata: &Metadata,
+        size: u64,
+        contents: &mut dyn io::Read,
+    ) -> Result<(), Error> {
+        let parent = self.parent_fd()?;
+        let mut file = unsafe {
+            std::fs::File::from_raw_fd(
+                nix::fcntl::openat(
+                    parent,
+                    file_name,
+                    OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
+                    Mode::from_bits(0o600).unwrap(),
+                )
+                .map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
+            )
+        };
+
+        metadata::apply_initial_flags(
+            self.feature_flags,
+            metadata,
+            file.as_raw_fd(),
+            &mut self.on_error,
+        )
+        .map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
+
+        let result = sparse_copy(&mut *contents, &mut file)
+            .map_err(|err| format_err!("failed to copy file contents: {}", err))?;
+
+        if size != result.written {
+            bail!(
+                "extracted {} bytes of a file of {} bytes",
+                result.written,
+                size
+            );
+        }
+
+        if result.seeked_last {
+            while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
+                Ok(_) => false,
+                Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
+                Err(err) => bail!("error setting file size: {}", err),
+            } {}
+        }
+
+        metadata::apply(
+            self.feature_flags,
+            metadata,
+            file.as_raw_fd(),
+            self.dir_stack.path(),
+            &mut self.on_error,
+        )
+    }
+
+    pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
+        &mut self,
+        file_name: &CStr,
+        metadata: &Metadata,
+        size: u64,
+        contents: &mut T,
+    ) -> Result<(), Error> {
+        let parent = self.parent_fd()?;
+        let mut file = tokio::fs::File::from_std(unsafe {
+            std::fs::File::from_raw_fd(
+                nix::fcntl::openat(
+                    parent,
+                    file_name,
+                    OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
+                    Mode::from_bits(0o600).unwrap(),
+                )
+                .map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
+            )
+        });
+
+        metadata::apply_initial_flags(
+            self.feature_flags,
+            metadata,
+            file.as_raw_fd(),
+            &mut self.on_error,
+        )
+        .map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
+
+        let result = sparse_copy_async(&mut *contents, &mut file)
+            .await
+            .map_err(|err| format_err!("failed to copy file contents: {}", err))?;
+
+        if size != result.written {
+            bail!(
+                "extracted {} bytes of a file of {} bytes",
+                result.written,
+                size
+            );
+        }
+
+        if result.seeked_last {
+            while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
+                Ok(_) => false,
+                Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
+                Err(err) => bail!("error setting file size: {}", err),
+            } {}
+        }
+
+        metadata::apply(
+            self.feature_flags,
+            metadata,
+            file.as_raw_fd(),
+            self.dir_stack.path(),
+            &mut self.on_error,
+        )
+    }
+}
+
+pub async fn create_zip<T, W, P>(
+    output: W,
+    decoder: Accessor<T>,
+    path: P,
+    verbose: bool,
+) -> Result<(), Error>
+where
+    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
+    W: tokio::io::AsyncWrite + Unpin + Send + 'static,
+    P: AsRef<Path>,
+{
+    let root = decoder.open_root().await?;
+    let file = root
+        .lookup(&path).await?
+        .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
+
+    let mut prefix = PathBuf::new();
+    let mut components = file.entry().path().components();
+    components.next_back(); // discar last
+    for comp in components {
+        prefix.push(comp);
+    }
+
+    let mut zipencoder = ZipEncoder::new(output);
+    let mut decoder = decoder;
+    recurse_files_zip(&mut zipencoder, &mut decoder, &prefix, file, verbose)
+        .await
+        .map_err(|err| {
+            eprintln!("error during creating of zip: {}", err);
+            err
+        })?;
+
+    zipencoder
+        .finish()
+        .await
+        .map_err(|err| {
+            eprintln!("error during finishing of zip: {}", err);
+            err
+        })
+}
+
+fn recurse_files_zip<'a, T, W>(
+    zip: &'a mut ZipEncoder<W>,
+    decoder: &'a mut Accessor<T>,
+    prefix: &'a Path,
+    file: FileEntry<T>,
+    verbose: bool,
+) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
+where
+    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
+    W: tokio::io::AsyncWrite + Unpin + Send + 'static,
+{
+    Box::pin(async move {
+        let metadata = file.entry().metadata();
+        let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
+
+        match file.kind() {
+            EntryKind::File { .. } => {
+                if verbose {
+                    eprintln!("adding '{}' to zip", path.display());
+                }
+                let entry = ZipEntry::new(
+                    path,
+                    metadata.stat.mtime.secs,
+                    metadata.stat.mode as u16,
+                    true,
+                );
+                zip.add_entry(entry, Some(file.contents().await?))
+                   .await
+                   .map_err(|err| format_err!("could not send file entry: {}", err))?;
+            }
+            EntryKind::Hardlink(_) => {
+                let realfile = decoder.follow_hardlink(&file).await?;
+                if verbose {
+                    eprintln!("adding '{}' to zip", path.display());
+                }
+                let entry = ZipEntry::new(
+                    path,
+                    metadata.stat.mtime.secs,
+                    metadata.stat.mode as u16,
+                    true,
+                );
+                zip.add_entry(entry, Some(realfile.contents().await?))
+                   .await
+                   .map_err(|err| format_err!("could not send file entry: {}", err))?;
+            }
+            EntryKind::Directory => {
+                let dir = file.enter_directory().await?;
+                let mut readdir = dir.read_dir();
+                if verbose {
+                    eprintln!("adding '{}' to zip", path.display());
+                }
+                let entry = ZipEntry::new(
+                    path,
+                    metadata.stat.mtime.secs,
+                    metadata.stat.mode as u16,
+                    false,
+                );
+                zip.add_entry::<FileContents<T>>(entry, None).await?;
+                while let Some(entry) = readdir.next().await {
+                    let entry = entry?.decode_entry().await?;
+                    recurse_files_zip(zip, decoder, prefix, entry, verbose).await?;
+                }
+            }
+            _ => {} // ignore all else
+        };
+
+        Ok(())
+    })
+}
+
+fn get_extractor<DEST>(destination: DEST, metadata: Metadata) -> Result<Extractor, Error>
+where
+    DEST: AsRef<Path>,
+{
+    create_path(
+        &destination,
+        None,
+        Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
+    )
+    .map_err(|err| {
+        format_err!(
+            "error creating directory {:?}: {}",
+            destination.as_ref(),
+            err
+        )
+    })?;
+
+    let dir = Dir::open(
+        destination.as_ref(),
+        OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
+        Mode::empty(),
+    )
+    .map_err(|err| {
+        format_err!(
+            "unable to open target directory {:?}: {}",
+            destination.as_ref(),
+            err,
+        )
+    })?;
+
+    Ok(Extractor::new(dir, metadata, false, Flags::DEFAULT))
+}
+
+pub async fn extract_sub_dir<T, DEST, PATH>(
+    destination: DEST,
+    decoder: Accessor<T>,
+    path: PATH,
+    verbose: bool,
+) -> Result<(), Error>
+where
+    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
+    DEST: AsRef<Path>,
+    PATH: AsRef<Path>,
+{
+    let root = decoder.open_root().await?;
+
+    let mut extractor = get_extractor(
+        destination,
+        root.lookup_self().await?.entry().metadata().clone(),
+    )?;
+
+    let file = root
+        .lookup(&path)
+        .await?
+        .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
+
+    recurse_files_extractor(&mut extractor, file, verbose).await
+}
+
+pub async fn extract_sub_dir_seq<S, DEST>(
+    destination: DEST,
+    mut decoder: Decoder<S>,
+    verbose: bool,
+) -> Result<(), Error>
+where
+    S: pxar::decoder::SeqRead + Unpin + Send + 'static,
+    DEST: AsRef<Path>,
+{
+    decoder.enable_goodbye_entries(true);
+    let root = match decoder.next().await {
+        Some(Ok(root)) => root,
+        Some(Err(err)) => bail!("error getting root entry from pxar: {}", err),
+        None => bail!("cannot extract empty archive"),
+    };
+
+    let mut extractor = get_extractor(destination, root.metadata().clone())?;
+
+    if let Err(err) = seq_files_extractor(&mut extractor, decoder, verbose).await {
+        eprintln!("error extracting pxar archive: {}", err);
+    }
+
+    Ok(())
+}
+
+fn extract_special(
+    extractor: &mut Extractor,
+    entry: &Entry,
+    file_name: &CStr,
+) -> Result<(), Error> {
+    let metadata = entry.metadata();
+    match entry.kind() {
+        EntryKind::Symlink(link) => {
+            extractor.extract_symlink(file_name, metadata, link.as_ref())?;
+        }
+        EntryKind::Hardlink(link) => {
+            extractor.extract_hardlink(file_name, link.as_os_str())?;
+        }
+        EntryKind::Device(dev) => {
+            if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
+                extractor.extract_device(file_name, metadata, dev)?;
+            }
+        }
+        EntryKind::Fifo => {
+            if extractor.contains_flags(Flags::WITH_FIFOS) {
+                extractor.extract_special(file_name, metadata, 0)?;
+            }
+        }
+        EntryKind::Socket => {
+            if extractor.contains_flags(Flags::WITH_SOCKETS) {
+                extractor.extract_special(file_name, metadata, 0)?;
+            }
+        }
+        _ => bail!("extract_special used with unsupported entry kind"),
+    }
+    Ok(())
+}
+
+fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
+    let file_name_os = entry.file_name().to_owned();
+
+    // safety check: a file entry in an archive must never contain slashes:
+    if file_name_os.as_bytes().contains(&b'/') {
+        bail!("archive file entry contains slashes, which is invalid and a security concern");
+    }
+
+    let file_name = CString::new(file_name_os.as_bytes())
+        .map_err(|_| format_err!("encountered file name with null-bytes"))?;
+
+    Ok((file_name_os, file_name))
+}
+
+async fn recurse_files_extractor<'a, T>(
+    extractor: &'a mut Extractor,
+    file: FileEntry<T>,
+    verbose: bool,
+) -> Result<(), Error>
+where
+    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
+{
+    let entry = file.entry();
+    let metadata = entry.metadata();
+    let (file_name_os, file_name) = get_filename(entry)?;
+
+    if verbose {
+        eprintln!("extracting: {}", file.path().display());
+    }
+
+    match file.kind() {
+        EntryKind::Directory => {
+            extractor
+                .enter_directory(file_name_os.to_owned(), metadata.clone(), true)
+                .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
+
+            let dir = file.enter_directory().await?;
+            let mut seq_decoder = dir.decode_full().await?;
+            seq_decoder.enable_goodbye_entries(true);
+            seq_files_extractor(extractor, seq_decoder, verbose).await?;
+            extractor.leave_directory()?;
+        }
+        EntryKind::File { size, .. } => {
+            extractor
+                .async_extract_file(
+                    &file_name,
+                    metadata,
+                    *size,
+                    &mut file.contents().await.map_err(|_| {
+                        format_err!("found regular file entry without contents in archive")
+                    })?,
+                )
+                .await?
+        }
+        EntryKind::GoodbyeTable => {} // ignore
+        _ => extract_special(extractor, entry, &file_name)?,
+    }
+    Ok(())
+}
+
+async fn seq_files_extractor<'a, T>(
+    extractor: &'a mut Extractor,
+    mut decoder: pxar::decoder::aio::Decoder<T>,
+    verbose: bool,
+) -> Result<(), Error>
+where
+    T: pxar::decoder::SeqRead,
+{
+    let mut dir_level = 0;
+    loop {
+        let entry = match decoder.next().await {
+            Some(entry) => entry?,
+            None => return Ok(()),
+        };
+
+        let metadata = entry.metadata();
+        let (file_name_os, file_name) = get_filename(&entry)?;
+
+        if verbose && !matches!(entry.kind(), EntryKind::GoodbyeTable) {
+            eprintln!("extracting: {}", entry.path().display());
+        }
+
+        if let Err(err) = async {
+            match entry.kind() {
+                EntryKind::Directory => {
+                    dir_level += 1;
+                    extractor
+                        .enter_directory(file_name_os.to_owned(), metadata.clone(), true)
+                        .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
+                }
+                EntryKind::File { size, .. } => {
+                    extractor
+                        .async_extract_file(
+                            &file_name,
+                            metadata,
+                            *size,
+                            &mut decoder.contents().ok_or_else(|| {
+                                format_err!("found regular file entry without contents in archive")
+                            })?,
+                        )
+                        .await?
+                }
+                EntryKind::GoodbyeTable => {
+                    dir_level -= 1;
+                    extractor.leave_directory()?;
+                }
+                _ => extract_special(extractor, &entry, &file_name)?,
+            }
+            Ok(()) as Result<(), Error>
+        }
+        .await
+        {
+            let display = entry.path().display().to_string();
+            eprintln!(
+                "error extracting {}: {}",
+                if matches!(entry.kind(), EntryKind::GoodbyeTable) {
+                    "<directory>"
+                } else {
+                    &display
+                },
+                err
+            );
+        }
+
+        if dir_level < 0 {
+            // we've encountered one Goodbye more then Directory, meaning we've left the dir we
+            // started in - exit early, otherwise the extractor might panic
+            return Ok(());
+        }
+    }
+}
diff --git a/pbs-client/src/pxar/flags.rs b/pbs-client/src/pxar/flags.rs
new file mode 100644 (file)
index 0000000..eca5ee9
--- /dev/null
@@ -0,0 +1,378 @@
+//! Feature flags for *pxar* allow to control what is stored/restored in/from the
+//! archive.
+//! Flags for known supported features for a given filesystem can be derived
+//! from the superblocks magic number.
+
+use libc::c_long;
+
+use bitflags::bitflags;
+
+bitflags! {
+    pub struct Flags: u64 {
+        /// FAT-style 2s time granularity
+        const WITH_2SEC_TIME                   = 0x40;
+        /// Preserve read only flag of files
+        const WITH_READ_ONLY                   = 0x80;
+        /// Preserve unix permissions
+        const WITH_PERMISSIONS                 = 0x100;
+        /// Include symbolik links
+        const WITH_SYMLINKS                    = 0x200;
+        /// Include device nodes
+        const WITH_DEVICE_NODES                = 0x400;
+        /// Include FIFOs
+        const WITH_FIFOS                       = 0x800;
+        /// Include Sockets
+        const WITH_SOCKETS                     = 0x1000;
+
+        /// Preserve DOS file flag `HIDDEN`
+        const WITH_FLAG_HIDDEN                 = 0x2000;
+        /// Preserve DOS file flag `SYSTEM`
+        const WITH_FLAG_SYSTEM                 = 0x4000;
+        /// Preserve DOS file flag `ARCHIVE`
+        const WITH_FLAG_ARCHIVE                = 0x8000;
+
+        // chattr() flags
+        /// Linux file attribute `APPEND`
+        const WITH_FLAG_APPEND                 = 0x10000;
+        /// Linux file attribute `NOATIME`
+        const WITH_FLAG_NOATIME                = 0x20000;
+        /// Linux file attribute `COMPR`
+        const WITH_FLAG_COMPR                  = 0x40000;
+        /// Linux file attribute `NOCOW`
+        const WITH_FLAG_NOCOW                  = 0x80000;
+        /// Linux file attribute `NODUMP`
+        const WITH_FLAG_NODUMP                 = 0x0010_0000;
+        /// Linux file attribute `DIRSYNC`
+        const WITH_FLAG_DIRSYNC                = 0x0020_0000;
+        /// Linux file attribute `IMMUTABLE`
+        const WITH_FLAG_IMMUTABLE              = 0x0040_0000;
+        /// Linux file attribute `SYNC`
+        const WITH_FLAG_SYNC                   = 0x0080_0000;
+        /// Linux file attribute `NOCOMP`
+        const WITH_FLAG_NOCOMP                 = 0x0100_0000;
+        /// Linux file attribute `PROJINHERIT`
+        const WITH_FLAG_PROJINHERIT            = 0x0200_0000;
+
+
+        /// Preserve BTRFS subvolume flag
+        const WITH_SUBVOLUME                   = 0x0400_0000;
+        /// Preserve BTRFS read-only subvolume flag
+        const WITH_SUBVOLUME_RO                = 0x0800_0000;
+
+        /// Preserve Extended Attribute metadata
+        const WITH_XATTRS                      = 0x1000_0000;
+        /// Preserve Access Control List metadata
+        const WITH_ACL                         = 0x2000_0000;
+        /// Preserve SELinux security context
+        const WITH_SELINUX                     = 0x4000_0000;
+        /// Preserve "security.capability" xattr
+        const WITH_FCAPS                       = 0x8000_0000;
+
+        /// Preserve XFS/ext4/ZFS project quota ID
+        const WITH_QUOTA_PROJID                = 0x0001_0000_0000;
+
+        /// Support ".pxarexclude" files
+        const EXCLUDE_FILE                     = 0x1000_0000_0000_0000;
+        /// Exclude submounts
+        const EXCLUDE_SUBMOUNTS                = 0x4000_0000_0000_0000;
+        /// Exclude entries with chattr flag NODUMP
+        const EXCLUDE_NODUMP                   = 0x8000_0000_0000_0000;
+
+        // Definitions of typical feature flags for the *pxar* encoder/decoder.
+        // By this expensive syscalls for unsupported features are avoided.
+
+        /// All chattr file attributes
+        const WITH_CHATTR =
+            Flags::WITH_FLAG_APPEND.bits() |
+            Flags::WITH_FLAG_NOATIME.bits() |
+            Flags::WITH_FLAG_COMPR.bits() |
+            Flags::WITH_FLAG_NOCOW.bits() |
+            Flags::WITH_FLAG_NODUMP.bits() |
+            Flags::WITH_FLAG_DIRSYNC.bits() |
+            Flags::WITH_FLAG_IMMUTABLE.bits() |
+            Flags::WITH_FLAG_SYNC.bits() |
+            Flags::WITH_FLAG_NOCOMP.bits() |
+            Flags::WITH_FLAG_PROJINHERIT.bits();
+
+        /// All FAT file attributes
+        const WITH_FAT_ATTRS =
+            Flags::WITH_FLAG_HIDDEN.bits() |
+            Flags::WITH_FLAG_SYSTEM.bits() |
+            Flags::WITH_FLAG_ARCHIVE.bits();
+
+        /// All bits that may also be exposed via fuse
+        const WITH_FUSE =
+            Flags::WITH_2SEC_TIME.bits() |
+            Flags::WITH_READ_ONLY.bits() |
+            Flags::WITH_PERMISSIONS.bits() |
+            Flags::WITH_SYMLINKS.bits() |
+            Flags::WITH_DEVICE_NODES.bits() |
+            Flags::WITH_FIFOS.bits() |
+            Flags::WITH_SOCKETS.bits() |
+            Flags::WITH_FAT_ATTRS.bits() |
+            Flags::WITH_CHATTR.bits() |
+            Flags::WITH_XATTRS.bits();
+
+
+        /// Default feature flags for encoder/decoder
+        const DEFAULT =
+            Flags::WITH_SYMLINKS.bits() |
+            Flags::WITH_DEVICE_NODES.bits() |
+            Flags::WITH_FIFOS.bits() |
+            Flags::WITH_SOCKETS.bits() |
+            Flags::WITH_FLAG_HIDDEN.bits() |
+            Flags::WITH_FLAG_SYSTEM.bits() |
+            Flags::WITH_FLAG_ARCHIVE.bits() |
+            Flags::WITH_FLAG_APPEND.bits() |
+            Flags::WITH_FLAG_NOATIME.bits() |
+            Flags::WITH_FLAG_COMPR.bits() |
+            Flags::WITH_FLAG_NOCOW.bits() |
+            //WITH_FLAG_NODUMP.bits() |
+            Flags::WITH_FLAG_DIRSYNC.bits() |
+            Flags::WITH_FLAG_IMMUTABLE.bits() |
+            Flags::WITH_FLAG_SYNC.bits() |
+            Flags::WITH_FLAG_NOCOMP.bits() |
+            Flags::WITH_FLAG_PROJINHERIT.bits() |
+            Flags::WITH_SUBVOLUME.bits() |
+            Flags::WITH_SUBVOLUME_RO.bits() |
+            Flags::WITH_XATTRS.bits() |
+            Flags::WITH_ACL.bits() |
+            Flags::WITH_SELINUX.bits() |
+            Flags::WITH_FCAPS.bits() |
+            Flags::WITH_QUOTA_PROJID.bits() |
+            Flags::EXCLUDE_NODUMP.bits() |
+            Flags::EXCLUDE_FILE.bits();
+    }
+}
+
+impl Default for Flags {
+    fn default() -> Flags {
+        Flags::DEFAULT
+    }
+}
+
+// form /usr/include/linux/fs.h
+const FS_APPEND_FL: c_long =      0x0000_0020;
+const FS_NOATIME_FL: c_long =     0x0000_0080;
+const FS_COMPR_FL: c_long =       0x0000_0004;
+const FS_NOCOW_FL: c_long =       0x0080_0000;
+const FS_NODUMP_FL: c_long =      0x0000_0040;
+const FS_DIRSYNC_FL: c_long =     0x0001_0000;
+const FS_IMMUTABLE_FL: c_long =   0x0000_0010;
+const FS_SYNC_FL: c_long =        0x0000_0008;
+const FS_NOCOMP_FL: c_long =      0x0000_0400;
+const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
+
+pub(crate) const INITIAL_FS_FLAGS: c_long =
+    FS_NOATIME_FL
+    | FS_COMPR_FL
+    | FS_NOCOW_FL
+    | FS_NOCOMP_FL
+    | FS_PROJINHERIT_FL;
+
+#[rustfmt::skip]
+const CHATTR_MAP: [(Flags, c_long); 10] = [
+    ( Flags::WITH_FLAG_APPEND,      FS_APPEND_FL      ),
+    ( Flags::WITH_FLAG_NOATIME,     FS_NOATIME_FL     ),
+    ( Flags::WITH_FLAG_COMPR,       FS_COMPR_FL       ),
+    ( Flags::WITH_FLAG_NOCOW,       FS_NOCOW_FL       ),
+    ( Flags::WITH_FLAG_NODUMP,      FS_NODUMP_FL      ),
+    ( Flags::WITH_FLAG_DIRSYNC,     FS_DIRSYNC_FL     ),
+    ( Flags::WITH_FLAG_IMMUTABLE,   FS_IMMUTABLE_FL   ),
+    ( Flags::WITH_FLAG_SYNC,        FS_SYNC_FL        ),
+    ( Flags::WITH_FLAG_NOCOMP,      FS_NOCOMP_FL      ),
+    ( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
+];
+
+// from /usr/include/linux/msdos_fs.h
+const ATTR_HIDDEN: u32 =      2;
+const ATTR_SYS: u32 =         4;
+const ATTR_ARCH: u32 =       32;
+
+#[rustfmt::skip]
+const FAT_ATTR_MAP: [(Flags, u32); 3] = [
+    ( Flags::WITH_FLAG_HIDDEN,  ATTR_HIDDEN ),
+    ( Flags::WITH_FLAG_SYSTEM,  ATTR_SYS    ),
+    ( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH   ),
+];
+
+impl Flags {
+    /// Get a set of feature flags from file attributes.
+    pub fn from_chattr(attr: c_long) -> Flags {
+        let mut flags = Flags::empty();
+
+        for (fe_flag, fs_flag) in &CHATTR_MAP {
+            if (attr & fs_flag) != 0 {
+                flags |= *fe_flag;
+            }
+        }
+
+        flags
+    }
+
+    /// Get the chattr bit representation of these feature flags.
+    pub fn to_chattr(self) -> c_long {
+        let mut flags: c_long = 0;
+
+        for (fe_flag, fs_flag) in &CHATTR_MAP {
+            if self.contains(*fe_flag) {
+                flags |= *fs_flag;
+            }
+        }
+
+        flags
+    }
+
+    pub fn to_initial_chattr(self) -> c_long {
+        self.to_chattr() & INITIAL_FS_FLAGS
+    }
+
+    /// Get a set of feature flags from FAT attributes.
+    pub fn from_fat_attr(attr: u32) -> Flags {
+        let mut flags = Flags::empty();
+
+        for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
+            if (attr & fs_flag) != 0 {
+                flags |= *fe_flag;
+            }
+        }
+
+        flags
+    }
+
+    /// Get the fat attribute bit representation of these feature flags.
+    pub fn to_fat_attr(self) -> u32 {
+        let mut flags = 0u32;
+
+        for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
+            if self.contains(*fe_flag) {
+                flags |= *fs_flag;
+            }
+        }
+
+        flags
+    }
+
+    /// Return the supported *pxar* feature flags based on the magic number of the filesystem.
+    pub fn from_magic(magic: i64) -> Flags {
+        use proxmox::sys::linux::magic::*;
+        match magic {
+            MSDOS_SUPER_MAGIC => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_FAT_ATTRS
+            },
+            EXT4_SUPER_MAGIC => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_PERMISSIONS |
+                Flags::WITH_SYMLINKS |
+                Flags::WITH_DEVICE_NODES |
+                Flags::WITH_FIFOS |
+                Flags::WITH_SOCKETS |
+                Flags::WITH_FLAG_APPEND |
+                Flags::WITH_FLAG_NOATIME |
+                Flags::WITH_FLAG_NODUMP |
+                Flags::WITH_FLAG_DIRSYNC |
+                Flags::WITH_FLAG_IMMUTABLE |
+                Flags::WITH_FLAG_SYNC |
+                Flags::WITH_XATTRS |
+                Flags::WITH_ACL |
+                Flags::WITH_SELINUX |
+                Flags::WITH_FCAPS |
+                Flags::WITH_QUOTA_PROJID
+            },
+            XFS_SUPER_MAGIC => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_PERMISSIONS |
+                Flags::WITH_SYMLINKS |
+                Flags::WITH_DEVICE_NODES |
+                Flags::WITH_FIFOS |
+                Flags::WITH_SOCKETS |
+                Flags::WITH_FLAG_APPEND |
+                Flags::WITH_FLAG_NOATIME |
+                Flags::WITH_FLAG_NODUMP |
+                Flags::WITH_FLAG_IMMUTABLE |
+                Flags::WITH_FLAG_SYNC |
+                Flags::WITH_XATTRS |
+                Flags::WITH_ACL |
+                Flags::WITH_SELINUX |
+                Flags::WITH_FCAPS |
+                Flags::WITH_QUOTA_PROJID
+            },
+            ZFS_SUPER_MAGIC => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_PERMISSIONS |
+                Flags::WITH_SYMLINKS |
+                Flags::WITH_DEVICE_NODES |
+                Flags::WITH_FIFOS |
+                Flags::WITH_SOCKETS |
+                Flags::WITH_FLAG_APPEND |
+                Flags::WITH_FLAG_NOATIME |
+                Flags::WITH_FLAG_NODUMP |
+                Flags::WITH_FLAG_DIRSYNC |
+                Flags::WITH_FLAG_IMMUTABLE |
+                Flags::WITH_FLAG_SYNC |
+                Flags::WITH_XATTRS |
+                Flags::WITH_ACL |
+                Flags::WITH_SELINUX |
+                Flags::WITH_FCAPS |
+                Flags::WITH_QUOTA_PROJID
+            },
+            BTRFS_SUPER_MAGIC => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_PERMISSIONS |
+                Flags::WITH_SYMLINKS |
+                Flags::WITH_DEVICE_NODES |
+                Flags::WITH_FIFOS |
+                Flags::WITH_SOCKETS |
+                Flags::WITH_FLAG_APPEND |
+                Flags::WITH_FLAG_NOATIME |
+                Flags::WITH_FLAG_COMPR |
+                Flags::WITH_FLAG_NOCOW |
+                Flags::WITH_FLAG_NODUMP |
+                Flags::WITH_FLAG_DIRSYNC |
+                Flags::WITH_FLAG_IMMUTABLE |
+                Flags::WITH_FLAG_SYNC |
+                Flags::WITH_FLAG_NOCOMP |
+                Flags::WITH_XATTRS |
+                Flags::WITH_ACL |
+                Flags::WITH_SELINUX |
+                Flags::WITH_SUBVOLUME |
+                Flags::WITH_SUBVOLUME_RO |
+                Flags::WITH_FCAPS
+            },
+            TMPFS_MAGIC => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_PERMISSIONS |
+                Flags::WITH_SYMLINKS |
+                Flags::WITH_DEVICE_NODES |
+                Flags::WITH_FIFOS |
+                Flags::WITH_SOCKETS |
+                Flags::WITH_ACL |
+                Flags::WITH_SELINUX
+            },
+            // FUSE mounts are special as the supported feature set
+            // is not clear a priori.
+            FUSE_SUPER_MAGIC => {
+                Flags::WITH_FUSE
+            },
+            _ => {
+                Flags::WITH_2SEC_TIME |
+                Flags::WITH_READ_ONLY |
+                Flags::WITH_PERMISSIONS |
+                Flags::WITH_SYMLINKS |
+                Flags::WITH_DEVICE_NODES |
+                Flags::WITH_FIFOS |
+                Flags::WITH_SOCKETS |
+                Flags::WITH_XATTRS |
+                Flags::WITH_ACL |
+                Flags::WITH_FCAPS
+            },
+        }
+    }
+}
diff --git a/pbs-client/src/pxar/fuse.rs b/pbs-client/src/pxar/fuse.rs
new file mode 100644 (file)
index 0000000..1f5b1dd
--- /dev/null
@@ -0,0 +1,690 @@
+//! Asynchronous fuse implementation.
+
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::ffi::{OsStr, OsString};
+use std::future::Future;
+use std::io;
+use std::mem;
+use std::ops::Range;
+use std::os::unix::ffi::OsStrExt;
+use std::path::Path;
+use std::pin::Pin;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, RwLock};
+use std::task::{Context, Poll};
+
+use anyhow::{format_err, Error};
+use futures::channel::mpsc::UnboundedSender;
+use futures::select;
+use futures::sink::SinkExt;
+use futures::stream::{StreamExt, TryStreamExt};
+
+use proxmox::tools::vec;
+use pxar::accessor::{self, EntryRangeInfo, ReadAt};
+
+use proxmox_fuse::requests::{self, FuseRequest};
+use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
+
+use pbs_tools::xattr;
+
+/// We mark inodes for regular files this way so we know how to access them.
+const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
+
+#[inline]
+fn is_dir_inode(inode: u64) -> bool {
+    0 == (inode & NON_DIRECTORY_INODE)
+}
+
+/// Our reader type instance used for accessors.
+pub type Reader = Arc<dyn ReadAt + Send + Sync + 'static>;
+
+/// Our Accessor type instance.
+pub type Accessor = accessor::aio::Accessor<Reader>;
+
+/// Our Directory type instance.
+pub type Directory = accessor::aio::Directory<Reader>;
+
+/// Our FileEntry type instance.
+pub type FileEntry = accessor::aio::FileEntry<Reader>;
+
+/// Our FileContents type instance.
+pub type FileContents = accessor::aio::FileContents<Reader>;
+
+pub struct Session {
+    fut: Pin<Box<dyn Future<Output = Result<(), Error>> + Send + Sync + 'static>>,
+}
+
+impl Session {
+    /// Create a fuse session for an archive.
+    pub async fn mount_path(
+        archive_path: &Path,
+        options: &OsStr,
+        verbose: bool,
+        mountpoint: &Path,
+    ) -> Result<Self, Error> {
+        // TODO: Add a buffered/caching ReadAt layer?
+        let file = std::fs::File::open(archive_path)?;
+        let file_size = file.metadata()?.len();
+        let reader: Reader = Arc::new(accessor::sync::FileReader::new(file));
+        let accessor = Accessor::new(reader, file_size).await?;
+        Self::mount(accessor, options, verbose, mountpoint)
+    }
+
+    /// Create a new fuse session for the given pxar `Accessor`.
+    pub fn mount(
+        accessor: Accessor,
+        options: &OsStr,
+        verbose: bool,
+        path: &Path,
+    ) -> Result<Self, Error> {
+        let fuse = Fuse::builder("pxar-mount")?
+            .debug()
+            .options_os(options)?
+            .enable_readdirplus()
+            .enable_read()
+            .enable_readlink()
+            .enable_read_xattr()
+            .build()?
+            .mount(path)?;
+
+        let session = SessionImpl::new(accessor, verbose);
+
+        Ok(Self {
+            fut: Box::pin(session.main(fuse)),
+        })
+    }
+}
+
+impl Future for Session {
+    type Output = Result<(), Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+        Pin::new(&mut self.fut).poll(cx)
+    }
+}
+
+/// We use this to return an errno value back to the kernel.
+macro_rules! io_return {
+    ($errno:expr) => {
+        return Err(::std::io::Error::from_raw_os_error($errno).into());
+    };
+}
+
+/// Format an "other" error, see `io_bail` below for details.
+macro_rules! io_format_err {
+    ($($fmt:tt)*) => {
+        ::std::io::Error::new(::std::io::ErrorKind::Other, format!($($fmt)*))
+    }
+}
+
+/// We use this to bail out of a functionin an unexpected error case. This will cause the fuse
+/// request to be answered with a generic `EIO` error code. The error message contained in here
+/// will be printed to stdout if the verbose flag is used, otherwise silently dropped.
+macro_rules! io_bail {
+    ($($fmt:tt)*) => { return Err(io_format_err!($($fmt)*).into()); }
+}
+
+/// This is what we need to cache as a "lookup" entry. The kernel assumes that these are easily
+/// accessed.
+struct Lookup {
+    refs: AtomicUsize,
+
+    inode: u64,
+    parent: u64,
+    entry_range_info: EntryRangeInfo,
+    content_range: Option<Range<u64>>,
+}
+
+impl Lookup {
+    fn new(
+        inode: u64,
+        parent: u64,
+        entry_range_info: EntryRangeInfo,
+        content_range: Option<Range<u64>>,
+    ) -> Box<Lookup> {
+        Box::new(Self {
+            refs: AtomicUsize::new(1),
+            inode,
+            parent,
+            entry_range_info,
+            content_range,
+        })
+    }
+
+    /// Decrease the reference count by `count`. Note that this must not include the reference held
+    /// by `self` itself, so this must not decrease the count below 2.
+    fn forget(&self, count: usize) -> Result<(), Error> {
+        loop {
+            let old = self.refs.load(Ordering::Acquire);
+            if count >= old {
+                io_bail!("reference count underflow");
+            }
+            let new = old - count;
+            match self
+                .refs
+                .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
+            {
+                Ok(_) => break Ok(()),
+                Err(_) => continue,
+            }
+        }
+    }
+
+    fn get_ref<'a>(&self, session: &'a SessionImpl) -> LookupRef<'a> {
+        if self.refs.fetch_add(1, Ordering::AcqRel) == 0 {
+            panic!("atomic refcount increased from 0 to 1");
+        }
+
+        LookupRef {
+            session,
+            lookup: self as *const Lookup,
+        }
+    }
+}
+
+struct LookupRef<'a> {
+    session: &'a SessionImpl,
+    lookup: *const Lookup,
+}
+
+unsafe impl<'a> Send for LookupRef<'a> {}
+unsafe impl<'a> Sync for LookupRef<'a> {}
+
+impl<'a> Clone for LookupRef<'a> {
+    fn clone(&self) -> Self {
+        self.get_ref(self.session)
+    }
+}
+
+impl<'a> std::ops::Deref for LookupRef<'a> {
+    type Target = Lookup;
+
+    fn deref(&self) -> &Self::Target {
+        unsafe { &*self.lookup }
+    }
+}
+
+impl<'a> Drop for LookupRef<'a> {
+    fn drop(&mut self) {
+        if self.lookup.is_null() {
+            return;
+        }
+
+        if self.refs.fetch_sub(1, Ordering::AcqRel) == 1 {
+            let inode = self.inode;
+            drop(self.session.lookups.write().unwrap().remove(&inode));
+        }
+    }
+}
+
+impl<'a> LookupRef<'a> {
+    fn leak(mut self) -> &'a Lookup {
+        unsafe { &*mem::replace(&mut self.lookup, std::ptr::null()) }
+    }
+}
+
+struct SessionImpl {
+    accessor: Accessor,
+    verbose: bool,
+    lookups: RwLock<BTreeMap<u64, Box<Lookup>>>,
+}
+
+impl SessionImpl {
+    fn new(accessor: Accessor, verbose: bool) -> Self {
+        let root = Lookup::new(
+            ROOT_ID,
+            ROOT_ID,
+            EntryRangeInfo::toplevel(0..accessor.size()),
+            None,
+        );
+
+        let mut tree = BTreeMap::new();
+        tree.insert(ROOT_ID, root);
+
+        Self {
+            accessor,
+            verbose,
+            lookups: RwLock::new(tree),
+        }
+    }
+
+    /// Here's how we deal with errors:
+    ///
+    /// Any error will be printed if the verbose flag was set, otherwise the message will be
+    /// silently dropped.
+    ///
+    /// Opaque errors will cause the fuse main loop to bail out with that error.
+    ///
+    /// `io::Error`s will cause the fuse request to responded to with the given `io::Error`. An
+    /// `io::ErrorKind::Other` translates to a generic `EIO`.
+    async fn handle_err(
+        &self,
+        request: impl FuseRequest,
+        err: Error,
+        mut sender: UnboundedSender<Error>,
+    ) {
+        let final_result = match err.downcast::<io::Error>() {
+            Ok(err) => {
+                if err.kind() == io::ErrorKind::Other && self.verbose {
+                    eprintln!("an IO error occurred: {}", err);
+                }
+
+                // fail the request
+                request.io_fail(err).map_err(Error::from)
+            }
+            Err(err) => {
+                // `bail` (non-`io::Error`) is used for fatal errors which should actually cancel:
+                if self.verbose {
+                    eprintln!("internal error: {}, bailing out", err);
+                }
+                Err(err)
+            }
+        };
+        if let Err(err) = final_result {
+            // either we failed to send the error code to fuse, or the above was not an
+            // `io::Error`, so in this case notify the main loop:
+            sender
+                .send(err)
+                .await
+                .expect("failed to propagate error to main loop");
+        }
+    }
+
+    async fn main(self, fuse: Fuse) -> Result<(), Error> {
+        Arc::new(self).main_do(fuse).await
+    }
+
+    async fn main_do(self: Arc<Self>, fuse: Fuse) -> Result<(), Error> {
+        let (err_send, mut err_recv) = futures::channel::mpsc::unbounded::<Error>();
+        let mut fuse = fuse.fuse(); // make this a futures::stream::FusedStream!
+        loop {
+            select! {
+                request = fuse.try_next() => match request? {
+                    Some(request) => {
+                        tokio::spawn(Arc::clone(&self).handle_request(request, err_send.clone()));
+                    }
+                    None => break,
+                },
+                err = err_recv.next() => match err {
+                    Some(err) => if self.verbose {
+                        eprintln!("cancelling fuse main loop due to error: {}", err);
+                        return Err(err);
+                    },
+                    None => panic!("error channel was closed unexpectedly"),
+                },
+            }
+        }
+        Ok(())
+    }
+
+    async fn handle_request(
+        self: Arc<Self>,
+        request: Request,
+        mut err_sender: UnboundedSender<Error>,
+    ) {
+        let result: Result<(), Error> = match request {
+            Request::Lookup(request) => {
+                match self.lookup(request.parent, &request.file_name).await {
+                    Ok((entry, lookup)) => match request.reply(&entry) {
+                        Ok(()) => {
+                            lookup.leak();
+                            Ok(())
+                        }
+                        Err(err) => Err(Error::from(err)),
+                    },
+                    Err(err) => return self.handle_err(request, err, err_sender).await,
+                }
+            }
+            Request::Forget(request) => match self.forget(request.inode, request.count as usize) {
+                Ok(()) => {
+                    request.reply();
+                    Ok(())
+                }
+                Err(err) => return self.handle_err(request, err, err_sender).await,
+            },
+            Request::Getattr(request) => match self.getattr(request.inode).await {
+                Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
+                Err(err) => return self.handle_err(request, err, err_sender).await,
+            },
+            Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
+                Ok(lookups) => match request.reply() {
+                    Ok(()) => {
+                        for i in lookups {
+                            i.leak();
+                        }
+                        Ok(())
+                    }
+                    Err(err) => Err(Error::from(err)),
+                },
+                Err(err) => return self.handle_err(request, err, err_sender).await,
+            },
+            Request::Read(request) => {
+                match self.read(request.inode, request.size, request.offset).await {
+                    Ok(data) => request.reply(&data).map_err(Error::from),
+                    Err(err) => return self.handle_err(request, err, err_sender).await,
+                }
+            }
+            Request::Readlink(request) => match self.readlink(request.inode).await {
+                Ok(data) => request.reply(&data).map_err(Error::from),
+                Err(err) => return self.handle_err(request, err, err_sender).await,
+            },
+            Request::ListXAttrSize(request) => match self.listxattrs(request.inode).await {
+                Ok(data) => request
+                    .reply(
+                        data.into_iter()
+                            .fold(0, |sum, i| sum + i.name().to_bytes_with_nul().len()),
+                    )
+                    .map_err(Error::from),
+                Err(err) => return self.handle_err(request, err, err_sender).await,
+            },
+            Request::ListXAttr(mut request) => match self.listxattrs_into(&mut request).await {
+                Ok(ReplyBufState::Ok) => request.reply().map_err(Error::from),
+                Ok(ReplyBufState::Full) => request.fail_full().map_err(Error::from),
+                Err(err) => return self.handle_err(request, err, err_sender).await,
+            },
+            Request::GetXAttrSize(request) => {
+                match self.getxattr(request.inode, &request.attr_name).await {
+                    Ok(xattr) => request.reply(xattr.value().len()).map_err(Error::from),
+                    Err(err) => return self.handle_err(request, err, err_sender).await,
+                }
+            }
+            Request::GetXAttr(request) => {
+                match self.getxattr(request.inode, &request.attr_name).await {
+                    Ok(xattr) => request.reply(xattr.value()).map_err(Error::from),
+                    Err(err) => return self.handle_err(request, err, err_sender).await,
+                }
+            }
+            other => {
+                if self.verbose {
+                    eprintln!("Received unexpected fuse request");
+                }
+                other.fail(libc::ENOSYS).map_err(Error::from)
+            }
+        };
+
+        if let Err(err) = result {
+            err_sender
+                .send(err)
+                .await
+                .expect("failed to propagate error to main loop");
+        }
+    }
+
+    fn get_lookup(&self, inode: u64) -> Result<LookupRef, Error> {
+        let lookups = self.lookups.read().unwrap();
+        if let Some(lookup) = lookups.get(&inode) {
+            return Ok(lookup.get_ref(self));
+        }
+        io_return!(libc::ENOENT);
+    }
+
+    async fn open_dir(&self, inode: u64) -> Result<Directory, Error> {
+        if inode == ROOT_ID {
+            Ok(self.accessor.open_root().await?)
+        } else if !is_dir_inode(inode) {
+            io_return!(libc::ENOTDIR);
+        } else {
+            Ok(unsafe { self.accessor.open_dir_at_end(inode).await? })
+        }
+    }
+
+    async fn open_entry(&self, lookup: &LookupRef<'_>) -> io::Result<FileEntry> {
+        unsafe {
+            self.accessor
+                .open_file_at_range(&lookup.entry_range_info)
+                .await
+        }
+    }
+
+    fn open_content(&self, lookup: &LookupRef) -> Result<FileContents, Error> {
+        if is_dir_inode(lookup.inode) {
+            io_return!(libc::EISDIR);
+        }
+
+        match lookup.content_range.clone() {
+            Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }),
+            None => io_return!(libc::EBADF),
+        }
+    }
+
+    fn make_lookup(&self, parent: u64, inode: u64, entry: &FileEntry) -> Result<LookupRef, Error> {
+        let lookups = self.lookups.read().unwrap();
+        if let Some(lookup) = lookups.get(&inode) {
+            return Ok(lookup.get_ref(self));
+        }
+        drop(lookups);
+
+        let entry = Lookup::new(
+            inode,
+            parent,
+            entry.entry_range_info().clone(),
+            entry.content_range()?,
+        );
+        let reference = entry.get_ref(self);
+        entry.refs.store(1, Ordering::Release);
+
+        let mut lookups = self.lookups.write().unwrap();
+        if let Some(lookup) = lookups.get(&inode) {
+            return Ok(lookup.get_ref(self));
+        }
+
+        lookups.insert(inode, entry);
+        drop(lookups);
+        Ok(reference)
+    }
+
+    fn forget(&self, inode: u64, count: usize) -> Result<(), Error> {
+        let node = self.get_lookup(inode)?;
+        node.forget(count)?;
+        Ok(())
+    }
+
+    async fn lookup(
+        &'_ self,
+        parent: u64,
+        file_name: &OsStr,
+    ) -> Result<(EntryParam, LookupRef<'_>), Error> {
+        let dir = self.open_dir(parent).await?;
+
+        let entry = match { dir }.lookup(file_name).await? {
+            Some(entry) => entry,
+            None => io_return!(libc::ENOENT),
+        };
+
+        let entry = if let pxar::EntryKind::Hardlink(_) = entry.kind() {
+            // we don't know the file's end-offset, so we'll just allow the decoder to decode the
+            // entire rest of the archive until we figure out something better...
+            let entry = self.accessor.follow_hardlink(&entry).await?;
+
+            if let pxar::EntryKind::Hardlink(_) = entry.kind() {
+                // hardlinks must not point to other hardlinks...
+                io_return!(libc::ELOOP);
+            }
+
+            entry
+        } else {
+            entry
+        };
+
+        let response = to_entry(&entry)?;
+        let inode = response.inode;
+        Ok((response, self.make_lookup(parent, inode, &entry)?))
+    }
+
+    async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
+        let entry = unsafe {
+            self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
+        };
+        to_stat(inode, &entry)
+    }
+
+    async fn readdirplus(
+        &'_ self,
+        request: &mut requests::ReaddirPlus,
+    ) -> Result<Vec<LookupRef<'_>>, Error> {
+        let mut lookups = Vec::new();
+        let offset = usize::try_from(request.offset)
+            .map_err(|_| io_format_err!("directory offset out of range"))?;
+
+        let dir = self.open_dir(request.inode).await?;
+        let dir_lookup = self.get_lookup(request.inode)?;
+
+        let entry_count = dir.read_dir().count() as isize;
+
+        let mut next = offset as isize;
+        let mut iter = dir.read_dir().skip(offset);
+        while let Some(file) = iter.next().await {
+            next += 1;
+            let file = file?.decode_entry().await?;
+            let stat = to_stat(to_inode(&file), &file)?;
+            let name = file.file_name();
+            match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
+                ReplyBufState::Ok => (),
+                ReplyBufState::Full => return Ok(lookups),
+            }
+            lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
+        }
+
+        if next == entry_count {
+            next += 1;
+            let file = dir.lookup_self().await?;
+            let stat = to_stat(to_inode(&file), &file)?;
+            let name = OsStr::new(".");
+            match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
+                ReplyBufState::Ok => (),
+                ReplyBufState::Full => return Ok(lookups),
+            }
+            lookups.push(LookupRef::clone(&dir_lookup));
+        }
+
+        if next == entry_count + 1 {
+            next += 1;
+            let lookup = self.get_lookup(dir_lookup.parent)?;
+            let parent_dir = self.open_dir(lookup.inode).await?;
+            let file = parent_dir.lookup_self().await?;
+            let stat = to_stat(to_inode(&file), &file)?;
+            let name = OsStr::new("..");
+            match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
+                ReplyBufState::Ok => (),
+                ReplyBufState::Full => return Ok(lookups),
+            }
+            lookups.push(lookup);
+        }
+
+        Ok(lookups)
+    }
+
+    async fn read(&self, inode: u64, len: usize, offset: u64) -> Result<Vec<u8>, Error> {
+        let file = self.get_lookup(inode)?;
+        let content = self.open_content(&file)?;
+        let mut buf = vec::undefined(len);
+        let got = content.read_at(&mut buf, offset).await?;
+        buf.truncate(got);
+        Ok(buf)
+    }
+
+    async fn readlink(&self, inode: u64) -> Result<OsString, Error> {
+        let lookup = self.get_lookup(inode)?;
+        let file = self.open_entry(&lookup).await?;
+        match file.get_symlink() {
+            None => io_return!(libc::EINVAL),
+            Some(link) => Ok(link.to_owned()),
+        }
+    }
+
+    async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
+        let lookup = self.get_lookup(inode)?;
+        let metadata = self
+            .open_entry(&lookup)
+            .await?
+            .into_entry()
+            .into_metadata();
+
+        let mut xattrs = metadata.xattrs;
+
+        use pxar::format::XAttr;
+
+        if let Some(fcaps) = metadata.fcaps {
+            xattrs.push(XAttr::new(xattr::xattr_name_fcaps().to_bytes(), fcaps.data));
+        }
+
+        // TODO: Special cases:
+        //     b"system.posix_acl_access
+        //     b"system.posix_acl_default
+        //
+        // For these we need to be able to create posix acl format entries, at that point we could
+        // just ditch libacl as well...
+
+        Ok(xattrs)
+    }
+
+    async fn listxattrs_into(
+        &self,
+        request: &mut requests::ListXAttr,
+    ) -> Result<ReplyBufState, Error> {
+        let xattrs = self.listxattrs(request.inode).await?;
+
+        for entry in xattrs {
+            match request.add_c_string(entry.name()) {
+                ReplyBufState::Ok => (),
+                ReplyBufState::Full => return Ok(ReplyBufState::Full),
+            }
+        }
+
+        Ok(ReplyBufState::Ok)
+    }
+
+    async fn getxattr(&self, inode: u64, xattr: &OsStr) -> Result<pxar::format::XAttr, Error> {
+        // TODO: pxar::Accessor could probably get a more optimized method to fetch a specific
+        // xattr for an entry...
+        let xattrs = self.listxattrs(inode).await?;
+        for entry in xattrs {
+            if entry.name().to_bytes() == xattr.as_bytes() {
+                return Ok(entry);
+            }
+        }
+        io_return!(libc::ENODATA);
+    }
+}
+
+#[inline]
+fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
+    to_entry_param(to_inode(&entry), &entry)
+}
+
+#[inline]
+fn to_inode(entry: &FileEntry) -> u64 {
+    if entry.is_dir() {
+        entry.entry_range_info().entry_range.end
+    } else {
+        entry.entry_range_info().entry_range.start | NON_DIRECTORY_INODE
+    }
+}
+
+fn to_entry_param(inode: u64, entry: &pxar::Entry) -> Result<EntryParam, Error> {
+    Ok(EntryParam::simple(inode, to_stat(inode, entry)?))
+}
+
+fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
+    let nlink = if entry.is_dir() { 2 } else { 1 };
+
+    let metadata = entry.metadata();
+
+    let mut stat: libc::stat = unsafe { mem::zeroed() };
+    stat.st_ino = inode;
+    stat.st_nlink = nlink;
+    stat.st_mode = u32::try_from(metadata.stat.mode)
+        .map_err(|err| format_err!("mode does not fit into st_mode field: {}", err))?;
+    stat.st_size = i64::try_from(entry.file_size().unwrap_or(0))
+        .map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
+    stat.st_uid = metadata.stat.uid;
+    stat.st_gid = metadata.stat.gid;
+    stat.st_atime = metadata.stat.mtime.secs;
+    stat.st_atime_nsec = metadata.stat.mtime.nanos as _;
+    stat.st_mtime = metadata.stat.mtime.secs;
+    stat.st_mtime_nsec = metadata.stat.mtime.nanos as _;
+    stat.st_ctime = metadata.stat.mtime.secs;
+    stat.st_ctime_nsec = metadata.stat.mtime.nanos as _;
+    Ok(stat)
+}
diff --git a/pbs-client/src/pxar/metadata.rs b/pbs-client/src/pxar/metadata.rs
new file mode 100644 (file)
index 0000000..2d27270
--- /dev/null
@@ -0,0 +1,407 @@
+use std::ffi::{CStr, CString};
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::Path;
+
+use anyhow::{bail, format_err, Error};
+use nix::errno::Errno;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use pxar::Metadata;
+
+use proxmox::c_result;
+use proxmox::sys::error::SysError;
+use proxmox::tools::fd::RawFdNum;
+
+use pbs_tools::{acl, fs, xattr};
+
+use crate::pxar::tools::perms_from_metadata;
+use crate::pxar::Flags;
+
+//
+// utility functions
+//
+
+fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
+    if err.is_errno(Errno::EOPNOTSUPP) {
+        Ok(())
+    } else {
+        Err(err)
+    }
+}
+
+fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
+    if err.is_errno(Errno::EOPNOTSUPP) {
+        *not_supp = true;
+        Ok(())
+    } else {
+        Err(err)
+    }
+}
+
+fn timestamp_to_update_timespec(mtime: &pxar::format::StatxTimestamp) -> [libc::timespec; 2] {
+    // restore mtime
+    const UTIME_OMIT: i64 = (1 << 30) - 2;
+
+    [
+        libc::timespec {
+            tv_sec: 0,
+            tv_nsec: UTIME_OMIT,
+        },
+        libc::timespec {
+            tv_sec: mtime.secs,
+            tv_nsec: mtime.nanos as _,
+        },
+    ]
+}
+
+//
+// metadata application:
+//
+
+pub fn apply_at(
+    flags: Flags,
+    metadata: &Metadata,
+    parent: RawFd,
+    file_name: &CStr,
+    path_info: &Path,
+    on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
+) -> Result<(), Error> {
+    let fd = proxmox::tools::fd::Fd::openat(
+        &unsafe { RawFdNum::from_raw_fd(parent) },
+        file_name,
+        OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
+        Mode::empty(),
+    )?;
+
+    apply(flags, metadata, fd.as_raw_fd(), path_info, on_error)
+}
+
+pub fn apply_initial_flags(
+    flags: Flags,
+    metadata: &Metadata,
+    fd: RawFd,
+    on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
+) -> Result<(), Error> {
+    let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
+    apply_chattr(
+        fd,
+        entry_flags.to_initial_chattr(),
+        flags.to_initial_chattr(),
+    )
+    .or_else(on_error)?;
+    Ok(())
+}
+
+pub fn apply(
+    flags: Flags,
+    metadata: &Metadata,
+    fd: RawFd,
+    path_info: &Path,
+    on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
+) -> Result<(), Error> {
+    let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
+
+    unsafe {
+        // UID and GID first, as this fails if we lose access anyway.
+        c_result!(libc::chown(
+            c_proc_path.as_ptr(),
+            metadata.stat.uid,
+            metadata.stat.gid
+        ))
+        .map(drop)
+        .or_else(allow_notsupp)
+        .map_err(|err| format_err!("failed to set ownership: {}", err))
+        .or_else(&mut *on_error)?;
+    }
+
+    let mut skip_xattrs = false;
+    apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)
+        .or_else(&mut *on_error)?;
+    add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
+    apply_acls(flags, &c_proc_path, metadata, path_info)
+        .map_err(|err| format_err!("failed to apply acls: {}", err))
+        .or_else(&mut *on_error)?;
+    apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
+
+    // Finally mode and time. We may lose access with mode, but the changing the mode also
+    // affects times.
+    if !metadata.is_symlink() {
+        c_result!(unsafe {
+            libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
+        })
+        .map(drop)
+        .or_else(allow_notsupp)
+        .map_err(|err| format_err!("failed to change file mode: {}", err))
+        .or_else(&mut *on_error)?;
+    }
+
+    let res = c_result!(unsafe {
+        libc::utimensat(
+            libc::AT_FDCWD,
+            c_proc_path.as_ptr(),
+            timestamp_to_update_timespec(&metadata.stat.mtime).as_ptr(),
+            0,
+        )
+    });
+    match res {
+        Ok(_) => (),
+        Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
+        Err(err) => {
+            on_error(format_err!(
+                "failed to restore mtime attribute on {:?}: {}",
+                path_info,
+                err
+            ))?;
+        }
+    }
+
+    if metadata.stat.flags != 0 {
+        apply_flags(flags, fd, metadata.stat.flags).or_else(&mut *on_error)?;
+    }
+
+    Ok(())
+}
+
+fn add_fcaps(
+    flags: Flags,
+    c_proc_path: *const libc::c_char,
+    metadata: &Metadata,
+    skip_xattrs: &mut bool,
+) -> Result<(), Error> {
+    if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
+        return Ok(());
+    }
+    let fcaps = match metadata.fcaps.as_ref() {
+        Some(fcaps) => fcaps,
+        None => return Ok(()),
+    };
+
+    c_result!(unsafe {
+        libc::setxattr(
+            c_proc_path,
+            xattr::xattr_name_fcaps().as_ptr(),
+            fcaps.data.as_ptr() as *const libc::c_void,
+            fcaps.data.len(),
+            0,
+        )
+    })
+    .map(drop)
+    .or_else(|err| allow_notsupp_remember(err, skip_xattrs))
+    .map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
+
+    Ok(())
+}
+
+fn apply_xattrs(
+    flags: Flags,
+    c_proc_path: *const libc::c_char,
+    metadata: &Metadata,
+    skip_xattrs: &mut bool,
+) -> Result<(), Error> {
+    if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
+        return Ok(());
+    }
+
+    for xattr in &metadata.xattrs {
+        if *skip_xattrs {
+            return Ok(());
+        }
+
+        if !xattr::is_valid_xattr_name(xattr.name()) {
+            eprintln!("skipping invalid xattr named {:?}", xattr.name());
+            continue;
+        }
+
+        c_result!(unsafe {
+            libc::setxattr(
+                c_proc_path,
+                xattr.name().as_ptr() as *const libc::c_char,
+                xattr.value().as_ptr() as *const libc::c_void,
+                xattr.value().len(),
+                0,
+            )
+        })
+        .map(drop)
+        .or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
+        .map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
+    }
+
+    Ok(())
+}
+
+fn apply_acls(
+    flags: Flags,
+    c_proc_path: &CStr,
+    metadata: &Metadata,
+    path_info: &Path,
+) -> Result<(), Error> {
+    if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
+        return Ok(());
+    }
+
+    let mut acl = acl::ACL::init(5)?;
+
+    // acl type access:
+    acl.add_entry_full(
+        acl::ACL_USER_OBJ,
+        None,
+        acl::mode_user_to_acl_permissions(metadata.stat.mode),
+    )?;
+
+    acl.add_entry_full(
+        acl::ACL_OTHER,
+        None,
+        acl::mode_other_to_acl_permissions(metadata.stat.mode),
+    )?;
+
+    match metadata.acl.group_obj.as_ref() {
+        Some(group_obj) => {
+            acl.add_entry_full(
+                acl::ACL_MASK,
+                None,
+                acl::mode_group_to_acl_permissions(metadata.stat.mode),
+            )?;
+            acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
+        }
+        None => {
+            let mode = acl::mode_group_to_acl_permissions(metadata.stat.mode);
+
+            acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
+
+            if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
+                eprintln!(
+                    "Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
+                    path_info,
+                );
+                acl.add_entry_full(acl::ACL_MASK, None, mode)?;
+            }
+        }
+    }
+
+    for user in &metadata.acl.users {
+        acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
+    }
+
+    for group in &metadata.acl.groups {
+        acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
+    }
+
+    if !acl.is_valid() {
+        bail!("Error while restoring ACL - ACL invalid");
+    }
+
+    acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
+    drop(acl);
+
+    // acl type default:
+    if let Some(default) = metadata.acl.default.as_ref() {
+        let mut acl = acl::ACL::init(5)?;
+
+        acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
+
+        acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
+
+        acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
+
+        if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
+            acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
+        }
+
+        for user in &metadata.acl.default_users {
+            acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
+        }
+
+        for group in &metadata.acl.default_groups {
+            acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
+        }
+
+        if !acl.is_valid() {
+            bail!("Error while restoring ACL - ACL invalid");
+        }
+
+        acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
+    }
+
+    Ok(())
+}
+
+fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
+    if !flags.contains(Flags::WITH_QUOTA_PROJID) {
+        return Ok(());
+    }
+
+    let projid = match metadata.quota_project_id {
+        Some(projid) => projid,
+        None => return Ok(()),
+    };
+
+    let mut fsxattr = fs::FSXAttr::default();
+    unsafe {
+        fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
+            format_err!(
+                "error while getting fsxattr to restore quota project id - {}",
+                err
+            )
+        })?;
+
+        fsxattr.fsx_projid = projid.projid as u32;
+
+        fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
+            format_err!(
+                "error while setting fsxattr to restore quota project id - {}",
+                err
+            )
+        })?;
+    }
+
+    Ok(())
+}
+
+pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
+    matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
+}
+
+fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
+    if chattr == 0 {
+        return Ok(());
+    }
+
+    let mut fattr: libc::c_long = 0;
+    match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
+        Ok(_) => (),
+        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
+            return Ok(());
+        }
+        Err(err) => bail!("failed to read file attributes: {}", err),
+    }
+
+    let attr = (chattr & mask) | (fattr & !mask);
+
+    if attr == fattr {
+        return Ok(());
+    }
+
+    match unsafe { fs::write_attr_fd(fd, &attr) } {
+        Ok(_) => Ok(()),
+        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
+        Err(err) => bail!("failed to set file attributes: {}", err),
+    }
+}
+
+fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
+    let entry_flags = Flags::from_bits_truncate(entry_flags);
+
+    apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
+
+    let fatattr = (flags & entry_flags).to_fat_attr();
+    if fatattr != 0 {
+        match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
+            Ok(_) => (),
+            Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
+            Err(err) => bail!("failed to set file FAT attributes: {}", err),
+        }
+    }
+
+    Ok(())
+}
diff --git a/pbs-client/src/pxar/mod.rs b/pbs-client/src/pxar/mod.rs
new file mode 100644 (file)
index 0000000..f20a1f9
--- /dev/null
@@ -0,0 +1,71 @@
+//! *pxar* Implementation (proxmox file archive format)
+//!
+//! This code implements a slightly modified version of the *catar*
+//! format used in the [casync](https://github.com/systemd/casync)
+//! toolkit (we are not 100\% binary compatible). It is a file archive
+//! format defined by 'Lennart Poettering', specially defined for
+//! efficient deduplication.
+
+//! Every archive contains items in the following order:
+//!  * `ENTRY`              -- containing general stat() data and related bits
+//!   * `USER`              -- user name as text, if enabled
+//!   * `GROUP`             -- group name as text, if enabled
+//!   * `XATTR`             -- one extended attribute
+//!   * ...                 -- more of these when there are multiple defined
+//!   * `ACL_USER`          -- one `USER ACL` entry
+//!   * ...                 -- more of these when there are multiple defined
+//!   * `ACL_GROUP`         -- one `GROUP ACL` entry
+//!   * ...                 -- more of these when there are multiple defined
+//!   * `ACL_GROUP_OBJ`     -- The `ACL_GROUP_OBJ`
+//!   * `ACL_DEFAULT`       -- The various default ACL fields if there's one defined
+//!   * `ACL_DEFAULT_USER`  -- one USER ACL entry
+//!   * ...                 -- more of these when multiple are defined
+//!   * `ACL_DEFAULT_GROUP` -- one GROUP ACL entry
+//!   * ...                 -- more of these when multiple are defined
+//!   * `FCAPS`             -- file capability in Linux disk format
+//!   * `QUOTA_PROJECT_ID`  -- the ext4/xfs quota project ID
+//!   * `PAYLOAD`           -- file contents, if it is one
+//!   * `SYMLINK`           -- symlink target, if it is one
+//!   * `DEVICE`            -- device major/minor, if it is a block/char device
+//!
+//!   If we are serializing a directory, then this is followed by:
+//!
+//!   * `FILENAME`          -- name of the first directory entry (strictly ordered!)
+//!   * `<archive>`         -- serialization of the first directory entry's metadata and contents,
+//!  following the exact same archive format
+//!   * `FILENAME`          -- name of the second directory entry (strictly ordered!)
+//!   * `<archive>`         -- serialization of the second directory entry
+//!   * ...
+//!   * `GOODBYE`           -- lookup table at the end of a list of directory entries
+
+//!
+//! The original format has no way to deal with hardlinks, so we
+//! extended the format by a special `HARDLINK` tag, which can replace
+//! an `ENTRY` tag. The `HARDLINK` tag contains an 64bit offset which
+//! points to the linked `ENTRY` inside the archive, followed by the
+//! full path name of that `ENTRY`. `HARDLINK`s may not have further data
+//! (user, group, acl, ...) because this is already defined by the
+//! linked `ENTRY`.
+
+pub(crate) mod create;
+pub(crate) mod dir_stack;
+pub(crate) mod extract;
+pub mod fuse;
+pub(crate) mod metadata;
+pub(crate) mod tools;
+
+mod flags;
+pub use flags::Flags;
+
+pub use create::{create_archive, PxarCreateOptions};
+pub use extract::{
+    create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
+    PxarExtractOptions,
+};
+
+/// The format requires to build sorted directory lookup tables in
+/// memory, so we restrict the number of allowed entries to limit
+/// maximum memory usage.
+pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
+
+pub use tools::{format_multi_line_entry, format_single_line_entry};
diff --git a/pbs-client/src/pxar/tools.rs b/pbs-client/src/pxar/tools.rs
new file mode 100644 (file)
index 0000000..3fd0fc0
--- /dev/null
@@ -0,0 +1,202 @@
+//! Some common methods used within the pxar code.
+
+use std::convert::TryFrom;
+use std::ffi::OsStr;
+use std::os::unix::ffi::OsStrExt;
+use std::path::Path;
+
+use anyhow::{bail, format_err, Error};
+use nix::sys::stat::Mode;
+
+use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
+
+/// Get the file permissions as `nix::Mode`
+pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
+    let mode = meta.stat.get_permission_bits();
+    u32::try_from(mode)
+        .map_err(drop)
+        .and_then(|mode| Mode::from_bits(mode).ok_or(()))
+        .map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
+}
+
+/// Make sure path is relative and not '.' or '..'.
+pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
+    assert_relative_path_do(Path::new(path))
+}
+
+/// Make sure path is a single component and not '.' or '..'.
+pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
+    assert_single_path_component_do(Path::new(path))
+}
+
+fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
+    if !path.is_relative() {
+        bail!("bad absolute file name in archive: {:?}", path);
+    }
+
+    Ok(())
+}
+
+fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
+    assert_relative_path_do(path)?;
+
+    let mut components = path.components();
+    match components.next() {
+        Some(std::path::Component::Normal(_)) => (),
+        _ => bail!("invalid path component in archive: {:?}", path),
+    }
+
+    if components.next().is_some() {
+        bail!(
+            "invalid path with multiple components in archive: {:?}",
+            path
+        );
+    }
+
+    Ok(())
+}
+
+#[rustfmt::skip]
+fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
+    [
+        if 0 != c & 4 { b'r' } else { b'-' },
+        if 0 != c & 2 { b'w' } else { b'-' },
+        match (c & 1, special) {
+            (0, false) => b'-',
+            (0, true) => special_no_x,
+            (_, false) => b'x',
+            (_, true) => special_x,
+        }
+    ]
+}
+
+fn mode_string(entry: &Entry) -> String {
+    // https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
+    // additionally we use:
+    //     file type capital 'L' hard links
+    //     a second '+' after the mode to show non-acl xattr presence
+    //
+    // Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
+
+    let meta = entry.metadata();
+    let mode = meta.stat.mode;
+    let type_char = if entry.is_hardlink() {
+        'L'
+    } else {
+        match mode & mode::IFMT {
+            mode::IFREG => '-',
+            mode::IFBLK => 'b',
+            mode::IFCHR => 'c',
+            mode::IFDIR => 'd',
+            mode::IFLNK => 'l',
+            mode::IFIFO => 'p',
+            mode::IFSOCK => 's',
+            _ => '?',
+        }
+    };
+
+    let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
+    let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
+    let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
+
+    let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
+
+    let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
+
+    format!(
+        "{}{}{}{}{}{}",
+        type_char,
+        unsafe { std::str::from_utf8_unchecked(&fmt_u) },
+        unsafe { std::str::from_utf8_unchecked(&fmt_g) },
+        unsafe { std::str::from_utf8_unchecked(&fmt_o) },
+        has_acls,
+        has_xattrs,
+    )
+}
+
+fn format_mtime(mtime: &StatxTimestamp) -> String {
+    if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
+        return s;
+    }
+    format!("{}.{}", mtime.secs, mtime.nanos)
+}
+
+pub fn format_single_line_entry(entry: &Entry) -> String {
+    let mode_string = mode_string(entry);
+
+    let meta = entry.metadata();
+
+    let (size, link) = match entry.kind() {
+        EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
+        EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
+        EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
+        EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
+        _ => ("0".to_string(), String::new()),
+    };
+
+    format!(
+        "{} {:<13} {} {:>8} {:?}{}",
+        mode_string,
+        format!("{}/{}", meta.stat.uid, meta.stat.gid),
+        format_mtime(&meta.stat.mtime),
+        size,
+        entry.path(),
+        link,
+    )
+}
+
+pub fn format_multi_line_entry(entry: &Entry) -> String {
+    let mode_string = mode_string(entry);
+
+    let meta = entry.metadata();
+
+    let (size, link, type_name) = match entry.kind() {
+        EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
+        EntryKind::Symlink(link) => (
+            "0".to_string(),
+            format!(" -> {:?}", link.as_os_str()),
+            "symlink",
+        ),
+        EntryKind::Hardlink(link) => (
+            "0".to_string(),
+            format!(" -> {:?}", link.as_os_str()),
+            "symlink",
+        ),
+        EntryKind::Device(dev) => (
+            format!("{},{}", dev.major, dev.minor),
+            String::new(),
+            if meta.stat.is_chardev() {
+                "characters pecial file"
+            } else if meta.stat.is_blockdev() {
+                "block special file"
+            } else {
+                "device"
+            },
+        ),
+        EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
+        EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
+        EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
+        EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
+    };
+
+    let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
+        Ok(name) => std::borrow::Cow::Borrowed(name),
+        Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
+    };
+
+    format!(
+        "  File: {}{}\n  \
+           Size: {:<13} Type: {}\n\
+         Access: ({:o}/{})  Uid: {:<5} Gid: {:<5}\n\
+         Modify: {}\n",
+        file_name,
+        link,
+        size,
+        type_name,
+        meta.file_mode(),
+        mode_string,
+        meta.stat.uid,
+        meta.stat.gid,
+        format_mtime(&meta.stat.mtime),
+    )
+}
diff --git a/pbs-client/src/pxar_backup_stream.rs b/pbs-client/src/pxar_backup_stream.rs
new file mode 100644 (file)
index 0000000..d39eb6c
--- /dev/null
@@ -0,0 +1,125 @@
+use std::io::Write;
+//use std::os::unix::io::FromRawFd;
+use std::path::Path;
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+use std::task::{Context, Poll};
+
+use anyhow::{format_err, Error};
+use futures::stream::Stream;
+use futures::future::{Abortable, AbortHandle};
+use nix::dir::Dir;
+use nix::fcntl::OFlag;
+use nix::sys::stat::Mode;
+
+use pbs_datastore::catalog::CatalogWriter;
+use pbs_tools::sync::StdChannelWriter;
+use pbs_tools::tokio::TokioWriterAdapter;
+
+/// Stream implementation to encode and upload .pxar archives.
+///
+/// The hyper client needs an async Stream for file upload, so we
+/// spawn an extra thread to encode the .pxar data and pipe it to the
+/// consumer.
+pub struct PxarBackupStream {
+    rx: Option<std::sync::mpsc::Receiver<Result<Vec<u8>, Error>>>,
+    handle: Option<AbortHandle>,
+    error: Arc<Mutex<Option<String>>>,
+}
+
+impl Drop for PxarBackupStream {
+    fn drop(&mut self) {
+        self.rx = None;
+        self.handle.take().unwrap().abort();
+    }
+}
+
+impl PxarBackupStream {
+    pub fn new<W: Write + Send + 'static>(
+        dir: Dir,
+        catalog: Arc<Mutex<CatalogWriter<W>>>,
+        options: crate::pxar::PxarCreateOptions,
+    ) -> Result<Self, Error> {
+        let (tx, rx) = std::sync::mpsc::sync_channel(10);
+
+        let buffer_size = 256 * 1024;
+
+        let error = Arc::new(Mutex::new(None));
+        let error2 = Arc::clone(&error);
+        let handler = async move {
+            let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
+                buffer_size,
+                StdChannelWriter::new(tx),
+            ));
+
+            let verbose = options.verbose;
+
+            let writer = pxar::encoder::sync::StandardWriter::new(writer);
+            if let Err(err) = crate::pxar::create_archive(
+                dir,
+                writer,
+                crate::pxar::Flags::DEFAULT,
+                move |path| {
+                    if verbose {
+                        println!("{:?}", path);
+                    }
+                    Ok(())
+                },
+                Some(catalog),
+                options,
+            ).await {
+                let mut error = error2.lock().unwrap();
+                *error = Some(err.to_string());
+            }
+        };
+
+        let (handle, registration) = AbortHandle::new_pair();
+        let future = Abortable::new(handler, registration);
+        tokio::spawn(future);
+
+        Ok(Self {
+            rx: Some(rx),
+            handle: Some(handle),
+            error,
+        })
+    }
+
+    pub fn open<W: Write + Send + 'static>(
+        dirname: &Path,
+        catalog: Arc<Mutex<CatalogWriter<W>>>,
+        options: crate::pxar::PxarCreateOptions,
+    ) -> Result<Self, Error> {
+        let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
+
+        Self::new(
+            dir,
+            catalog,
+            options,
+        )
+    }
+}
+
+impl Stream for PxarBackupStream {
+    type Item = Result<Vec<u8>, Error>;
+
+    fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
+        {
+            // limit lock scope
+            let error = self.error.lock().unwrap();
+            if let Some(ref msg) = *error {
+                return Poll::Ready(Some(Err(format_err!("{}", msg))));
+            }
+        }
+
+        match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
+            Ok(data) => Poll::Ready(Some(data)),
+            Err(_) => {
+                let error = self.error.lock().unwrap();
+                if let Some(ref msg) = *error {
+                    return Poll::Ready(Some(Err(format_err!("{}", msg))));
+                }
+                Poll::Ready(None) // channel closed, no error
+            }
+        }
+    }
+}
diff --git a/pbs-client/src/remote_chunk_reader.rs b/pbs-client/src/remote_chunk_reader.rs
new file mode 100644 (file)
index 0000000..61b6fb0
--- /dev/null
@@ -0,0 +1,125 @@
+use std::future::Future;
+use std::collections::HashMap;
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+
+use anyhow::{bail, Error};
+
+use pbs_datastore::{CryptConfig, CryptMode};
+use pbs_datastore::data_blob::DataBlob;
+use pbs_datastore::read_chunk::ReadChunk;
+use pbs_datastore::read_chunk::AsyncReadChunk;
+use pbs_runtime::block_on;
+
+use super::BackupReader;
+
+/// Read chunks from remote host using ``BackupReader``
+#[derive(Clone)]
+pub struct RemoteChunkReader {
+    client: Arc<BackupReader>,
+    crypt_config: Option<Arc<CryptConfig>>,
+    crypt_mode: CryptMode,
+    cache_hint: Arc<HashMap<[u8; 32], usize>>,
+    cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
+}
+
+impl RemoteChunkReader {
+    /// Create a new instance.
+    ///
+    /// Chunks listed in ``cache_hint`` are cached and kept in RAM.
+    pub fn new(
+        client: Arc<BackupReader>,
+        crypt_config: Option<Arc<CryptConfig>>,
+        crypt_mode: CryptMode,
+        cache_hint: HashMap<[u8; 32], usize>,
+    ) -> Self {
+        Self {
+            client,
+            crypt_config,
+            crypt_mode,
+            cache_hint: Arc::new(cache_hint),
+            cache: Arc::new(Mutex::new(HashMap::new())),
+        }
+    }
+
+    /// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
+    /// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
+    pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
+        let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
+
+        self.client
+            .download_chunk(&digest, &mut chunk_data)
+            .await?;
+
+        let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
+
+        match self.crypt_mode {
+            CryptMode::Encrypt => {
+                match chunk.crypt_mode()? {
+                    CryptMode::Encrypt => Ok(chunk),
+                    CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
+                }
+            },
+            CryptMode::SignOnly | CryptMode::None => {
+                match chunk.crypt_mode()? {
+                    CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
+                    CryptMode::SignOnly | CryptMode::None => Ok(chunk),
+                }
+            },
+        }
+    }
+}
+
+impl ReadChunk for RemoteChunkReader {
+    fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
+        block_on(Self::read_raw_chunk(self, digest))
+    }
+
+    fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
+        if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
+            return Ok(raw_data.to_vec());
+        }
+
+        let chunk = ReadChunk::read_raw_chunk(self, digest)?;
+
+        let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
+
+        let use_cache = self.cache_hint.contains_key(digest);
+        if use_cache {
+            (*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
+        }
+
+        Ok(raw_data)
+    }
+}
+
+impl AsyncReadChunk for RemoteChunkReader {
+    fn read_raw_chunk<'a>(
+        &'a self,
+        digest: &'a [u8; 32],
+    ) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
+        Box::pin(Self::read_raw_chunk(self, digest))
+    }
+
+    fn read_chunk<'a>(
+        &'a self,
+        digest: &'a [u8; 32],
+    ) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
+        Box::pin(async move {
+            if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
+                return Ok(raw_data.to_vec());
+            }
+
+            let chunk = Self::read_raw_chunk(self, digest).await?;
+
+            let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
+
+            let use_cache = self.cache_hint.contains_key(digest);
+            if use_cache {
+                (*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
+            }
+
+            Ok(raw_data)
+        })
+    }
+}
diff --git a/pbs-client/src/task_log.rs b/pbs-client/src/task_log.rs
new file mode 100644 (file)
index 0000000..1d1af14
--- /dev/null
@@ -0,0 +1,117 @@
+use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
+
+use anyhow::{bail, Error};
+use serde_json::{json, Value};
+use tokio::signal::unix::{signal, SignalKind};
+use futures::*;
+
+use proxmox::api::cli::format_and_print_result;
+
+use pbs_tools::percent_encoding::percent_encode_component;
+
+use super::HttpClient;
+
+/// Display task log on console
+///
+/// This polls the task API and prints the log to the console. It also
+/// catches interrupt signals, and sends a abort request to the task if
+/// the user presses CTRL-C. Two interrupts cause an immediate end of
+/// the loop. The task may still run in that case.
+pub async fn display_task_log(
+    client: &mut HttpClient,
+    upid_str: &str,
+    strip_date: bool,
+) -> Result<(), Error> {
+
+    let mut signal_stream = signal(SignalKind::interrupt())?;
+    let abort_count = Arc::new(AtomicUsize::new(0));
+    let abort_count2 = Arc::clone(&abort_count);
+
+    let abort_future = async move {
+        while signal_stream.recv().await.is_some() {
+            println!("got shutdown request (SIGINT)");
+            let prev_count = abort_count2.fetch_add(1, Ordering::SeqCst);
+            if prev_count >= 1 {
+                println!("forced exit (task still running)");
+                break;
+            }
+        }
+        Ok::<_, Error>(())
+    };
+
+    let request_future = async move {
+
+        let mut start = 1;
+        let limit = 500;
+
+        loop {
+
+            let abort = abort_count.load(Ordering::Relaxed);
+            if abort > 0 {
+                let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
+                let _ = client.delete(&path, None).await?;
+            }
+
+            let param = json!({ "start": start, "limit": limit, "test-status": true });
+
+            let path = format!("api2/json/nodes/localhost/tasks/{}/log", percent_encode_component(upid_str));
+            let result = client.get(&path, Some(param)).await?;
+
+            let active = result["active"].as_bool().unwrap();
+            let total = result["total"].as_u64().unwrap();
+            let data = result["data"].as_array().unwrap();
+
+            let lines = data.len();
+
+            for item in data {
+                let n = item["n"].as_u64().unwrap();
+                let t = item["t"].as_str().unwrap();
+                if n != start { bail!("got wrong line number in response data ({} != {}", n, start); }
+                if strip_date && t.len() > 27 && &t[25..27] == ": " {
+                    let line = &t[27..];
+                    println!("{}", line);
+                } else {
+                    println!("{}", t);
+                }
+                start += 1;
+            }
+
+            if start > total {
+                if active {
+                    tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await;
+                } else {
+                    break;
+                }
+            } else if lines != limit {
+                bail!("got wrong number of lines from server ({} != {})", lines, limit);
+            }
+        }
+
+        Ok(())
+    };
+
+    futures::select!{
+        request = request_future.fuse() => request?,
+        abort = abort_future.fuse() => abort?,
+    };
+
+    Ok(())
+}
+
+/// Display task result (upid), or view task log - depending on output format
+pub async fn view_task_result(
+    client: &mut HttpClient,
+    result: Value,
+    output_format: &str,
+) -> Result<(), Error> {
+    let data = &result["data"];
+    if output_format == "text" {
+        if let Some(upid) = data.as_str() {
+            display_task_log(client, upid, true).await?;
+        }
+    } else {
+        format_and_print_result(&data, &output_format);
+    }
+
+    Ok(())
+}
diff --git a/pbs-client/src/tools/key_source.rs b/pbs-client/src/tools/key_source.rs
new file mode 100644 (file)
index 0000000..340fe23
--- /dev/null
@@ -0,0 +1,585 @@
+use std::convert::TryFrom;
+use std::path::PathBuf;
+use std::os::unix::io::{FromRawFd, RawFd};
+use std::io::Read;
+
+use anyhow::{bail, format_err, Error};
+use serde_json::Value;
+
+use proxmox::api::schema::*;
+use proxmox::sys::linux::tty;
+use proxmox::tools::fs::file_get_contents;
+
+use pbs_api_types::CryptMode;
+
+pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
+pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
+
+pub const KEYFILE_SCHEMA: Schema =
+    StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
+        .schema();
+
+pub const KEYFD_SCHEMA: Schema =
+    IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
+        .minimum(0)
+        .schema();
+
+pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
+    "Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
+    .schema();
+
+pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
+    IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
+        .minimum(0)
+        .schema();
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum KeySource {
+    DefaultKey,
+    Fd,
+    Path(String),
+}
+
+pub fn format_key_source(source: &KeySource, key_type: &str) -> String {
+    match source {
+        KeySource::DefaultKey => format!("Using default {} key..", key_type),
+        KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
+        KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
+    }
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct KeyWithSource {
+    pub source: KeySource,
+    pub key: Vec<u8>,
+}
+
+impl KeyWithSource {
+    pub fn from_fd(key: Vec<u8>) -> Self {
+        Self {
+            source: KeySource::Fd,
+            key,
+        }
+    }
+
+    pub fn from_default(key: Vec<u8>) -> Self {
+        Self {
+            source: KeySource::DefaultKey,
+            key,
+        }
+    }
+
+    pub fn from_path(path: String, key: Vec<u8>) -> Self {
+        Self {
+            source: KeySource::Path(path),
+            key,
+        }
+    }
+}
+
+#[derive(Debug, Eq, PartialEq)]
+pub struct CryptoParams {
+    pub mode: CryptMode,
+    pub enc_key: Option<KeyWithSource>,
+    // FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
+    pub master_pubkey: Option<KeyWithSource>,
+}
+
+pub fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
+    do_crypto_parameters(param, false)
+}
+
+pub fn crypto_parameters_keep_fd(param: &Value) -> Result<CryptoParams, Error> {
+    do_crypto_parameters(param, true)
+}
+
+fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoParams, Error> {
+    let keyfile = match param.get("keyfile") {
+        Some(Value::String(keyfile)) => Some(keyfile),
+        Some(_) => bail!("bad --keyfile parameter type"),
+        None => None,
+    };
+
+    let key_fd = match param.get("keyfd") {
+        Some(Value::Number(key_fd)) => Some(
+            RawFd::try_from(key_fd
+                .as_i64()
+                .ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
+            )
+            .map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
+        ),
+        Some(_) => bail!("bad --keyfd parameter type"),
+        None => None,
+    };
+
+    let master_pubkey_file = match param.get("master-pubkey-file") {
+        Some(Value::String(keyfile)) => Some(keyfile),
+        Some(_) => bail!("bad --master-pubkey-file parameter type"),
+        None => None,
+    };
+
+    let master_pubkey_fd = match param.get("master-pubkey-fd") {
+        Some(Value::Number(key_fd)) => Some(
+            RawFd::try_from(key_fd
+                .as_i64()
+                .ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
+            )
+            .map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
+        ),
+        Some(_) => bail!("bad --master-pubkey-fd parameter type"),
+        None => None,
+    };
+
+    let mode: Option<CryptMode> = match param.get("crypt-mode") {
+        Some(mode) => Some(serde_json::from_value(mode.clone())?),
+        None => None,
+    };
+
+    let key = match (keyfile, key_fd) {
+        (None, None) => None,
+        (Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
+        (Some(keyfile), None) => Some(KeyWithSource::from_path(
+            keyfile.clone(),
+            file_get_contents(keyfile)?,
+        )),
+        (None, Some(fd)) => {
+            let mut input = unsafe { std::fs::File::from_raw_fd(fd) };
+            let mut data = Vec::new();
+            let _len: usize = input.read_to_end(&mut data).map_err(|err| {
+                format_err!("error reading encryption key from fd {}: {}", fd, err)
+            })?;
+            if keep_keyfd_open {
+                // don't close fd if requested, and try to reset seek position
+                std::mem::forget(input);
+                unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
+            }
+            Some(KeyWithSource::from_fd(data))
+        }
+    };
+
+    let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
+        (None, None) => None,
+        (Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
+        (Some(keyfile), None) => Some(KeyWithSource::from_path(
+            keyfile.clone(),
+            file_get_contents(keyfile)?,
+        )),
+        (None, Some(fd)) => {
+            let input = unsafe { std::fs::File::from_raw_fd(fd) };
+            let mut data = Vec::new();
+            let _len: usize = { input }
+                .read_to_end(&mut data)
+                .map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
+            Some(KeyWithSource::from_fd(data))
+        }
+    };
+
+    let res = match mode {
+        // no crypt mode, enable encryption if keys are available
+        None => match (key, master_pubkey) {
+            // only default keys if available
+            (None, None) => match read_optional_default_encryption_key()? {
+                None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
+                enc_key => {
+                    let master_pubkey = read_optional_default_master_pubkey()?;
+                    CryptoParams {
+                        mode: CryptMode::Encrypt,
+                        enc_key,
+                        master_pubkey,
+                    }
+                },
+            },
+
+            // explicit master key, default enc key needed
+            (None, master_pubkey) => match read_optional_default_encryption_key()? {
+                None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
+                enc_key => {
+                    CryptoParams {
+                        mode: CryptMode::Encrypt,
+                        enc_key,
+                        master_pubkey,
+                    }
+                },
+            },
+
+            // explicit keyfile, maybe default master key
+            (enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: read_optional_default_master_pubkey()? },
+
+            // explicit keyfile and master key
+            (enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
+        },
+
+        // explicitly disabled encryption
+        Some(CryptMode::None) => match (key, master_pubkey) {
+            // no keys => OK, no encryption
+            (None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
+
+            // --keyfile and --crypt-mode=none
+            (Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
+
+            // --master-pubkey-file and --crypt-mode=none
+            (_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
+        },
+
+        // explicitly enabled encryption
+        Some(mode) => match (key, master_pubkey) {
+            // no key, maybe master key
+            (None, master_pubkey) => match read_optional_default_encryption_key()? {
+                None => bail!("--crypt-mode without --keyfile and no default key file available"),
+                enc_key => {
+                    eprintln!("Encrypting with default encryption key!");
+                    let master_pubkey = match master_pubkey {
+                        None => read_optional_default_master_pubkey()?,
+                        master_pubkey => master_pubkey,
+                    };
+
+                    CryptoParams {
+                        mode,
+                        enc_key,
+                        master_pubkey,
+                    }
+                },
+            },
+
+            // --keyfile and --crypt-mode other than none
+            (enc_key, master_pubkey) => {
+                let master_pubkey = match master_pubkey {
+                    None => read_optional_default_master_pubkey()?,
+                    master_pubkey => master_pubkey,
+                };
+
+                CryptoParams { mode, enc_key, master_pubkey }
+            },
+        },
+    };
+
+    Ok(res)
+}
+
+pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
+    super::find_xdg_file(
+        DEFAULT_MASTER_PUBKEY_FILE_NAME,
+        "default master public key file",
+    )
+}
+
+pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
+    super::place_xdg_file(
+        DEFAULT_MASTER_PUBKEY_FILE_NAME,
+        "default master public key file",
+    )
+}
+
+pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
+    super::find_xdg_file(
+        DEFAULT_ENCRYPTION_KEY_FILE_NAME,
+        "default encryption key file",
+    )
+}
+
+pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
+    super::place_xdg_file(
+        DEFAULT_ENCRYPTION_KEY_FILE_NAME,
+        "default encryption key file",
+    )
+}
+
+#[cfg(not(test))]
+pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
+    find_default_encryption_key()?
+        .map(|path| file_get_contents(path).map(KeyWithSource::from_default))
+        .transpose()
+}
+
+#[cfg(not(test))]
+pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
+    find_default_master_pubkey()?
+        .map(|path| file_get_contents(path).map(KeyWithSource::from_default))
+        .transpose()
+}
+
+#[cfg(test)]
+static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
+
+#[cfg(test)]
+pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
+    // not safe when multiple concurrent test cases end up here!
+    unsafe {
+        match &TEST_DEFAULT_ENCRYPTION_KEY {
+            Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
+            Ok(None) => Ok(None),
+            Err(_) => bail!("test error"),
+        }
+    }
+}
+
+#[cfg(test)]
+// not safe when multiple concurrent test cases end up here!
+pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
+    TEST_DEFAULT_ENCRYPTION_KEY = value;
+}
+
+#[cfg(test)]
+static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
+
+#[cfg(test)]
+pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
+    // not safe when multiple concurrent test cases end up here!
+    unsafe {
+        match &TEST_DEFAULT_MASTER_PUBKEY {
+            Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
+            Ok(None) => Ok(None),
+            Err(_) => bail!("test error"),
+        }
+    }
+}
+
+#[cfg(test)]
+// not safe when multiple concurrent test cases end up here!
+pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
+    TEST_DEFAULT_MASTER_PUBKEY = value;
+}
+
+pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
+    // fixme: implement other input methods
+
+    use std::env::VarError::*;
+    match std::env::var("PBS_ENCRYPTION_PASSWORD") {
+        Ok(p) => return Ok(p.as_bytes().to_vec()),
+        Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
+        Err(NotPresent) => {
+            // Try another method
+        }
+    }
+
+    // If we're on a TTY, query the user for a password
+    if tty::stdin_isatty() {
+        return Ok(tty::read_password("Encryption Key Password: ")?);
+    }
+
+    bail!("no password input mechanism available");
+}
+
+#[test]
+// WARNING: there must only be one test for crypto_parameters as the default key handling is not
+// safe w.r.t. concurrency
+fn test_crypto_parameters_handling() -> Result<(), Error> {
+    use serde_json::json;
+    use proxmox::tools::fs::{replace_file, CreateOptions};
+
+    let some_key = vec![1;1];
+    let default_key = vec![2;1];
+
+    let some_master_key = vec![3;1];
+    let default_master_key = vec![4;1];
+
+    let keypath = "./target/testout/keyfile.test";
+    let master_keypath = "./target/testout/masterkeyfile.test";
+    let invalid_keypath = "./target/testout/invalid_keyfile.test";
+
+    let no_key_res = CryptoParams {
+        enc_key: None,
+        master_pubkey: None,
+        mode: CryptMode::None,
+    };
+    let some_key_res = CryptoParams {
+        enc_key: Some(KeyWithSource::from_path(
+            keypath.to_string(),
+            some_key.clone(),
+        )),
+        master_pubkey: None,
+        mode: CryptMode::Encrypt,
+    };
+    let some_key_some_master_res = CryptoParams {
+        enc_key: Some(KeyWithSource::from_path(
+            keypath.to_string(),
+            some_key.clone(),
+        )),
+        master_pubkey: Some(KeyWithSource::from_path(
+            master_keypath.to_string(),
+            some_master_key.clone(),
+        )),
+        mode: CryptMode::Encrypt,
+    };
+    let some_key_default_master_res = CryptoParams {
+        enc_key: Some(KeyWithSource::from_path(
+            keypath.to_string(),
+            some_key.clone(),
+        )),
+        master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
+        mode: CryptMode::Encrypt,
+    };
+
+    let some_key_sign_res = CryptoParams {
+        enc_key: Some(KeyWithSource::from_path(
+            keypath.to_string(),
+            some_key.clone(),
+        )),
+        master_pubkey: None,
+        mode: CryptMode::SignOnly,
+    };
+    let default_key_res = CryptoParams {
+        enc_key: Some(KeyWithSource::from_default(default_key.clone())),
+        master_pubkey: None,
+        mode: CryptMode::Encrypt,
+    };
+    let default_key_sign_res = CryptoParams {
+        enc_key: Some(KeyWithSource::from_default(default_key.clone())),
+        master_pubkey: None,
+        mode: CryptMode::SignOnly,
+    };
+
+    replace_file(&keypath, &some_key, CreateOptions::default())?;
+    replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
+
+    // no params, no default key == no key
+    let res = crypto_parameters(&json!({}));
+    assert_eq!(res.unwrap(), no_key_res);
+
+    // keyfile param == key from keyfile
+    let res = crypto_parameters(&json!({"keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_res);
+
+    // crypt mode none == no key
+    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
+    assert_eq!(res.unwrap(), no_key_res);
+
+    // crypt mode encrypt/sign-only, no keyfile, no default key == Error
+    assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
+    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
+
+    // crypt mode none with explicit key == Error
+    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
+
+    // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
+    let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_sign_res);
+    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_res);
+
+    // invalid keyfile parameter always errors
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
+
+    // now set a default key
+    unsafe { set_test_encryption_key(Ok(Some(default_key.clone()))); }
+
+    // and repeat
+
+    // no params but default key == default key
+    let res = crypto_parameters(&json!({}));
+    assert_eq!(res.unwrap(), default_key_res);
+
+    // keyfile param == key from keyfile
+    let res = crypto_parameters(&json!({"keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_res);
+
+    // crypt mode none == no key
+    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
+    assert_eq!(res.unwrap(), no_key_res);
+
+    // crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
+    let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
+    assert_eq!(res.unwrap(), default_key_sign_res);
+    let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
+    assert_eq!(res.unwrap(), default_key_res);
+
+    // crypt mode none with explicit key == Error
+    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
+
+    // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
+    let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_sign_res);
+    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_res);
+
+    // invalid keyfile parameter always errors
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
+
+    // now make default key retrieval error
+    unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
+
+    // and repeat
+
+    // no params, default key retrieval errors == Error
+    assert!(crypto_parameters(&json!({})).is_err());
+
+    // keyfile param == key from keyfile
+    let res = crypto_parameters(&json!({"keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_res);
+
+    // crypt mode none == no key
+    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
+    assert_eq!(res.unwrap(), no_key_res);
+
+    // crypt mode encrypt/sign-only, no keyfile, default key error == Error
+    assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
+    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
+
+    // crypt mode none with explicit key == Error
+    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
+
+    // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
+    let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_sign_res);
+    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_res);
+
+    // invalid keyfile parameter always errors
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
+
+    // now remove default key again
+    unsafe { set_test_encryption_key(Ok(None)); }
+    // set a default master key
+    unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
+
+    // and use an explicit master key
+    assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
+    // just a default == no key
+    let res = crypto_parameters(&json!({}));
+    assert_eq!(res.unwrap(), no_key_res);
+
+    // keyfile param == key from keyfile
+    let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
+    assert_eq!(res.unwrap(), some_key_some_master_res);
+    // same with fallback to default master key
+    let res = crypto_parameters(&json!({"keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_default_master_res);
+
+    // crypt mode none == error
+    assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
+    // with just default master key == no key
+    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
+    assert_eq!(res.unwrap(), no_key_res);
+
+    // crypt mode encrypt without enc key == error
+    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
+    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
+
+    // crypt mode none with explicit key == Error
+    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
+    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
+
+    // crypt mode encrypt with keyfile == key from keyfile with correct mode
+    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
+    assert_eq!(res.unwrap(), some_key_some_master_res);
+    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
+    assert_eq!(res.unwrap(), some_key_default_master_res);
+
+    // invalid master keyfile parameter always errors when a key is passed, even with a valid
+    // default master key
+    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
+    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
+
+    Ok(())
+}
diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs
new file mode 100644 (file)
index 0000000..7b932b6
--- /dev/null
@@ -0,0 +1,374 @@
+//! Shared tools useful for common CLI clients.
+use std::collections::HashMap;
+
+use anyhow::{bail, format_err, Context, Error};
+use serde_json::{json, Value};
+use xdg::BaseDirectories;
+
+use proxmox::{
+    api::schema::*,
+    tools::fs::file_get_json,
+};
+
+use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
+use pbs_datastore::BackupDir;
+use pbs_tools::json::json_object_to_query;
+
+use crate::{BackupRepository, HttpClient, HttpClientOptions};
+
+pub mod key_source;
+
+const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
+const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
+
+pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
+    .format(&BACKUP_REPO_URL)
+    .max_length(256)
+    .schema();
+
+pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
+    .minimum(64)
+    .maximum(4096)
+    .default(4096)
+    .schema();
+
+pub fn get_default_repository() -> Option<String> {
+    std::env::var("PBS_REPOSITORY").ok()
+}
+
+pub fn extract_repository_from_value(param: &Value) -> Result<BackupRepository, Error> {
+    let repo_url = param["repository"]
+        .as_str()
+        .map(String::from)
+        .or_else(get_default_repository)
+        .ok_or_else(|| format_err!("unable to get (default) repository"))?;
+
+    let repo: BackupRepository = repo_url.parse()?;
+
+    Ok(repo)
+}
+
+pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<BackupRepository> {
+    param
+        .get("repository")
+        .map(String::from)
+        .or_else(get_default_repository)
+        .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
+}
+
+pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
+    connect_do(repo.host(), repo.port(), repo.auth_id())
+        .map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
+}
+
+fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
+    let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
+
+    use std::env::VarError::*;
+    let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
+        Ok(p) => Some(p),
+        Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
+        Err(NotPresent) => None,
+    };
+
+    let options = HttpClientOptions::new_interactive(password, fingerprint);
+
+    HttpClient::new(server, port, auth_id, options)
+}
+
+/// like get, but simply ignore errors and return Null instead
+pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
+
+    let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
+    let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
+
+    // ticket cache, but no questions asked
+    let options = HttpClientOptions::new_interactive(password, fingerprint)
+        .interactive(false);
+
+    let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
+        Ok(v) => v,
+        _ => return Value::Null,
+    };
+
+    let mut resp = match client.get(url, None).await {
+        Ok(v) => v,
+        _ => return Value::Null,
+    };
+
+    if let Some(map) = resp.as_object_mut() {
+        if let Some(data) = map.remove("data") {
+            return data;
+        }
+    }
+    Value::Null
+}
+
+pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    pbs_runtime::main(async { complete_backup_group_do(param).await })
+}
+
+pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
+
+    let mut result = vec![];
+
+    let repo = match extract_repository_from_map(param) {
+        Some(v) => v,
+        _ => return result,
+    };
+
+    let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
+
+    let data = try_get(&repo, &path).await;
+
+    if let Some(list) = data.as_array() {
+        for item in list {
+            if let (Some(backup_id), Some(backup_type)) =
+                (item["backup-id"].as_str(), item["backup-type"].as_str())
+            {
+                result.push(format!("{}/{}", backup_type, backup_id));
+            }
+        }
+    }
+
+    result
+}
+
+pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
+}
+
+pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+
+    if arg.matches('/').count() < 2 {
+        let groups = complete_backup_group_do(param).await;
+        let mut result = vec![];
+        for group in groups {
+            result.push(group.to_string());
+            result.push(format!("{}/", group));
+        }
+        return result;
+    }
+
+    complete_backup_snapshot_do(param).await
+}
+
+pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
+}
+
+pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
+
+    let mut result = vec![];
+
+    let repo = match extract_repository_from_map(param) {
+        Some(v) => v,
+        _ => return result,
+    };
+
+    let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
+
+    let data = try_get(&repo, &path).await;
+
+    if let Some(list) = data.as_array() {
+        for item in list {
+            if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
+                (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
+            {
+                if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
+                    result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+                }
+            }
+        }
+    }
+
+    result
+}
+
+pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    pbs_runtime::main(async { complete_server_file_name_do(param).await })
+}
+
+pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
+
+    let mut result = vec![];
+
+    let repo = match extract_repository_from_map(param) {
+        Some(v) => v,
+        _ => return result,
+    };
+
+    let snapshot: BackupDir = match param.get("snapshot") {
+        Some(path) => {
+            match path.parse() {
+                Ok(v) => v,
+                _ => return result,
+            }
+        }
+        _ => return result,
+    };
+
+    let query = json_object_to_query(json!({
+        "backup-type": snapshot.group().backup_type(),
+        "backup-id": snapshot.group().backup_id(),
+        "backup-time": snapshot.backup_time(),
+    })).unwrap();
+
+    let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
+
+    let data = try_get(&repo, &path).await;
+
+    if let Some(list) = data.as_array() {
+        for item in list {
+            if let Some(filename) = item["filename"].as_str() {
+                result.push(filename.to_owned());
+            }
+        }
+    }
+
+    result
+}
+
+pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    complete_server_file_name(arg, param)
+        .iter()
+        .map(|v| pbs_tools::format::strip_server_file_extension(&v))
+        .collect()
+}
+
+pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    complete_server_file_name(arg, param)
+        .iter()
+        .filter_map(|name| {
+            if name.ends_with(".pxar.didx") {
+                Some(pbs_tools::format::strip_server_file_extension(name))
+            } else {
+                None
+            }
+        })
+        .collect()
+}
+
+pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    complete_server_file_name(arg, param)
+        .iter()
+        .filter_map(|name| {
+            if name.ends_with(".img.fidx") {
+                Some(pbs_tools::format::strip_server_file_extension(name))
+            } else {
+                None
+            }
+        })
+        .collect()
+}
+
+pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+
+    let mut result = vec![];
+
+    let mut size = 64;
+    loop {
+        result.push(size.to_string());
+        size *= 2;
+        if size > 4096 { break; }
+    }
+
+    result
+}
+
+pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    pbs_runtime::main(async { complete_auth_id_do(param).await })
+}
+
+pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
+
+    let mut result = vec![];
+
+    let repo = match extract_repository_from_map(param) {
+        Some(v) => v,
+        _ => return result,
+    };
+
+    let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
+
+    if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
+        for user in parsed {
+            result.push(user.userid.to_string());
+            for token in user.tokens {
+                result.push(token.tokenid.to_string());
+            }
+        }
+    };
+
+    result
+}
+
+pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+    let mut result = vec![];
+
+    let base = match BaseDirectories::with_prefix("proxmox-backup") {
+        Ok(v) => v,
+        _ => return result,
+    };
+
+    // usually $HOME/.cache/proxmox-backup/repo-list
+    let path = match base.place_cache_file("repo-list") {
+        Ok(v) => v,
+        _ => return result,
+    };
+
+    let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
+
+    if let Some(map) = data.as_object() {
+        for (repo, _count) in map {
+            result.push(repo.to_owned());
+        }
+    }
+
+    result
+}
+
+pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+    let mut result = vec![];
+
+    let data: Vec<&str> = arg.splitn(2, ':').collect();
+
+    if data.len() != 2 {
+        result.push(String::from("root.pxar:/"));
+        result.push(String::from("etc.pxar:/etc"));
+        return result;
+    }
+
+    let files = pbs_tools::fs::complete_file_name(data[1], param);
+
+    for file in files {
+        result.push(format!("{}:{}", data[0], file));
+    }
+
+    result
+}
+
+pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
+    xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
+}
+
+/// Convenience helper for better error messages:
+pub fn find_xdg_file(
+    file_name: impl AsRef<std::path::Path>,
+    description: &'static str,
+) -> Result<Option<std::path::PathBuf>, Error> {
+    let file_name = file_name.as_ref();
+    base_directories()
+        .map(|base| base.find_config_file(file_name))
+        .with_context(|| format!("error searching for {}", description))
+}
+
+pub fn place_xdg_file(
+    file_name: impl AsRef<std::path::Path>,
+    description: &'static str,
+) -> Result<std::path::PathBuf, Error> {
+    let file_name = file_name.as_ref();
+    base_directories()
+        .and_then(|base| base.place_config_file(file_name).map_err(Error::from))
+        .with_context(|| format!("failed to place {} in xdg home", description))
+}
diff --git a/pbs-client/src/vsock_client.rs b/pbs-client/src/vsock_client.rs
new file mode 100644 (file)
index 0000000..3f0f373
--- /dev/null
@@ -0,0 +1,256 @@
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use anyhow::{bail, format_err, Error};
+use futures::*;
+use http::Uri;
+use http::{Request, Response};
+use hyper::client::connect::{Connected, Connection};
+use hyper::client::Client;
+use hyper::Body;
+use pin_project::pin_project;
+use serde_json::Value;
+use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
+use tokio::net::UnixStream;
+
+use proxmox::api::error::HttpError;
+
+pub const DEFAULT_VSOCK_PORT: u16 = 807;
+
+#[derive(Clone)]
+struct VsockConnector;
+
+#[pin_project]
+/// Wrapper around UnixStream so we can implement hyper::client::connect::Connection
+struct UnixConnection {
+    #[pin]
+    stream: UnixStream,
+}
+
+impl tower_service::Service<Uri> for VsockConnector {
+    type Response = UnixConnection;
+    type Error = Error;
+    type Future = Pin<Box<dyn Future<Output = Result<UnixConnection, Error>> + Send>>;
+
+    fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
+        Poll::Ready(Ok(()))
+    }
+
+    fn call(&mut self, dst: Uri) -> Self::Future {
+        use nix::sys::socket::*;
+        use std::os::unix::io::FromRawFd;
+
+        // connect can block, so run in blocking task (though in reality it seems to immediately
+        // return with either ENODEV or ETIMEDOUT in case of error)
+        tokio::task::spawn_blocking(move || {
+            if dst.scheme_str().unwrap_or_default() != "vsock" {
+                bail!("invalid URI (scheme) for vsock connector: {}", dst);
+            }
+
+            let cid = match dst.host() {
+                Some(host) => host.parse().map_err(|err| {
+                    format_err!(
+                        "invalid URI (host not a number) for vsock connector: {} ({})",
+                        dst,
+                        err
+                    )
+                })?,
+                None => bail!("invalid URI (no host) for vsock connector: {}", dst),
+            };
+
+            let port = match dst.port_u16() {
+                Some(port) => port,
+                None => bail!("invalid URI (bad port) for vsock connector: {}", dst),
+            };
+
+            let sock_fd = socket(
+                AddressFamily::Vsock,
+                SockType::Stream,
+                SockFlag::empty(),
+                None,
+            )?;
+
+            let sock_addr = VsockAddr::new(cid, port as u32);
+            connect(sock_fd, &SockAddr::Vsock(sock_addr))?;
+
+            // connect sync, but set nonblock after (tokio requires it)
+            let std_stream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd) };
+            std_stream.set_nonblocking(true)?;
+
+            let stream = tokio::net::UnixStream::from_std(std_stream)?;
+            let connection = UnixConnection { stream };
+
+            Ok(connection)
+        })
+        // unravel the thread JoinHandle to a usable future
+        .map(|res| match res {
+            Ok(res) => res,
+            Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
+        })
+        .boxed()
+    }
+}
+
+impl Connection for UnixConnection {
+    fn connected(&self) -> Connected {
+        Connected::new()
+    }
+}
+
+impl AsyncRead for UnixConnection {
+    fn poll_read(
+        self: Pin<&mut Self>,
+        cx: &mut Context<'_>,
+        buf: &mut ReadBuf,
+    ) -> Poll<Result<(), std::io::Error>> {
+        let this = self.project();
+        this.stream.poll_read(cx, buf)
+    }
+}
+
+impl AsyncWrite for UnixConnection {
+    fn poll_write(
+        self: Pin<&mut Self>,
+        cx: &mut Context<'_>,
+        buf: &[u8],
+    ) -> Poll<tokio::io::Result<usize>> {
+        let this = self.project();
+        this.stream.poll_write(cx, buf)
+    }
+
+    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
+        let this = self.project();
+        this.stream.poll_flush(cx)
+    }
+
+    fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
+        let this = self.project();
+        this.stream.poll_shutdown(cx)
+    }
+}
+
+/// Slimmed down version of HttpClient for virtio-vsock connections (file restore daemon)
+pub struct VsockClient {
+    client: Client<VsockConnector>,
+    cid: i32,
+    port: u16,
+    auth: Option<String>,
+}
+
+impl VsockClient {
+    pub fn new(cid: i32, port: u16, auth: Option<String>) -> Self {
+        let conn = VsockConnector {};
+        let client = Client::builder().build::<_, Body>(conn);
+        Self {
+            client,
+            cid,
+            port,
+            auth,
+        }
+    }
+
+    pub async fn get(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
+        let req = self.request_builder("GET", path, data)?;
+        self.api_request(req).await
+    }
+
+    pub async fn post(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
+        let req = self.request_builder("POST", path, data)?;
+        self.api_request(req).await
+    }
+
+    pub async fn download(
+        &self,
+        path: &str,
+        data: Option<Value>,
+        output: &mut (dyn AsyncWrite + Send + Unpin),
+    ) -> Result<(), Error> {
+        let req = self.request_builder("GET", path, data)?;
+
+        let client = self.client.clone();
+
+        let resp = client
+            .request(req)
+            .await
+            .map_err(|_| format_err!("vsock download request timed out"))?;
+        let status = resp.status();
+        if !status.is_success() {
+            Self::api_response(resp).await.map(|_| ())?
+        } else {
+            resp.into_body()
+                .map_err(Error::from)
+                .try_fold(output, move |acc, chunk| async move {
+                    acc.write_all(&chunk).await?;
+                    Ok::<_, Error>(acc)
+                })
+                .await?;
+        }
+        Ok(())
+    }
+
+    async fn api_response(response: Response<Body>) -> Result<Value, Error> {
+        let status = response.status();
+        let data = hyper::body::to_bytes(response.into_body()).await?;
+
+        let text = String::from_utf8(data.to_vec()).unwrap();
+        if status.is_success() {
+            if text.is_empty() {
+                Ok(Value::Null)
+            } else {
+                let value: Value = serde_json::from_str(&text)?;
+                Ok(value)
+            }
+        } else {
+            Err(Error::from(HttpError::new(status, text)))
+        }
+    }
+
+    async fn api_request(&self, req: Request<Body>) -> Result<Value, Error> {
+        self.client
+            .request(req)
+            .map_err(Error::from)
+            .and_then(Self::api_response)
+            .await
+    }
+
+    fn request_builder(
+        &self,
+        method: &str,
+        path: &str,
+        data: Option<Value>,
+    ) -> Result<Request<Body>, Error> {
+        let path = path.trim_matches('/');
+        let url: Uri = format!("vsock://{}:{}/{}", self.cid, self.port, path).parse()?;
+
+        let make_builder = |content_type: &str, url: &Uri| {
+            let mut builder = Request::builder()
+                .method(method)
+                .uri(url)
+                .header(hyper::header::CONTENT_TYPE, content_type);
+            if let Some(auth) = &self.auth {
+                builder = builder.header(hyper::header::AUTHORIZATION, auth);
+            }
+            builder
+        };
+
+        if let Some(data) = data {
+            if method == "POST" {
+                let builder = make_builder("application/json", &url);
+                let request = builder.body(Body::from(data.to_string()))?;
+                return Ok(request);
+            } else {
+                let query = pbs_tools::json::json_object_to_query(data)?;
+                let url: Uri =
+                    format!("vsock://{}:{}/{}?{}", self.cid, self.port, path, query).parse()?;
+                let builder = make_builder("application/x-www-form-urlencoded", &url);
+                let request = builder.body(Body::empty())?;
+                return Ok(request);
+            }
+        }
+
+        let builder = make_builder("application/x-www-form-urlencoded", &url);
+        let request = builder.body(Body::empty())?;
+
+        Ok(request)
+    }
+}
index ab80666c5850cf3184081a2aec4db2c525983188..e149f04830309573f280cff448a52b3572ae6b84 100644 (file)
@@ -9,6 +9,10 @@ description = "common tools used throughout pbs"
 [dependencies]
 anyhow = "1.0"
 base64 = "0.12"
+bytes = "1.0"
+crc32fast = "1"
+endian_trait = { version = "0.6", features = ["arrays"] }
+flate2 = "1.0"
 foreign-types = "0.3"
 futures = "0.3"
 lazy_static = "1.4"
@@ -21,9 +25,10 @@ regex = "1.2"
 serde = "1.0"
 serde_json = "1.0"
 # rt-multi-thread is required for block_in_place
-tokio = { version = "1.6", features = [ "rt", "rt-multi-thread", "sync" ] }
+tokio = { version = "1.6", features = [ "fs", "io-util", "rt", "rt-multi-thread", "sync" ] }
 url = "2.1"
+walkdir = "2"
 
-proxmox = { version = "0.11.5", default-features = false, features = [] }
+proxmox = { version = "0.11.5", default-features = false, features = [ "tokio" ] }
 
 pbs-buildcfg = { path = "../pbs-buildcfg" }
diff --git a/pbs-tools/src/acl.rs b/pbs-tools/src/acl.rs
new file mode 100644 (file)
index 0000000..80e2781
--- /dev/null
@@ -0,0 +1,334 @@
+//! Implementation of the calls to handle POSIX access control lists
+
+// see C header file <sys/acl.h> for reference
+extern crate libc;
+
+use std::ffi::CString;
+use std::marker::PhantomData;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::RawFd;
+use std::path::Path;
+use std::ptr;
+
+use libc::{c_char, c_int, c_uint, c_void};
+use nix::errno::Errno;
+use nix::NixPath;
+
+// from: acl/include/acl.h
+pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
+// acl_perm_t values
+pub type ACLPerm = c_uint;
+pub const ACL_READ: ACLPerm     = 0x04;
+pub const ACL_WRITE: ACLPerm    = 0x02;
+pub const ACL_EXECUTE: ACLPerm  = 0x01;
+
+// acl_tag_t values
+pub type ACLTag = c_int;
+pub const ACL_UNDEFINED_TAG: ACLTag = 0x00;
+pub const ACL_USER_OBJ: ACLTag      = 0x01;
+pub const ACL_USER: ACLTag          = 0x02;
+pub const ACL_GROUP_OBJ: ACLTag     = 0x04;
+pub const ACL_GROUP: ACLTag         = 0x08;
+pub const ACL_MASK: ACLTag          = 0x10;
+pub const ACL_OTHER: ACLTag         = 0x20;
+
+// acl_type_t values
+pub type ACLType = c_uint;
+pub const ACL_TYPE_ACCESS: ACLType  = 0x8000;
+pub const ACL_TYPE_DEFAULT: ACLType = 0x4000;
+
+// acl entry constants
+pub const ACL_FIRST_ENTRY: c_int = 0;
+pub const ACL_NEXT_ENTRY: c_int  = 1;
+
+// acl to extended attribute names constants
+// from: acl/include/acl_ea.h
+pub const ACL_EA_ACCESS: &str = "system.posix_acl_access";
+pub const ACL_EA_DEFAULT: &str = "system.posix_acl_default";
+pub const ACL_EA_VERSION: u32 = 0x0002;
+
+#[link(name = "acl")]
+extern "C" {
+    fn acl_get_file(path: *const c_char, acl_type: ACLType) -> *mut c_void;
+    fn acl_set_file(path: *const c_char, acl_type: ACLType, acl: *mut c_void) -> c_int;
+    fn acl_get_fd(fd: RawFd) -> *mut c_void;
+    fn acl_get_entry(acl: *const c_void, entry_id: c_int, entry: *mut *mut c_void) -> c_int;
+    fn acl_create_entry(acl: *mut *mut c_void, entry: *mut *mut c_void) -> c_int;
+    fn acl_get_tag_type(entry: *mut c_void, tag_type: *mut ACLTag) -> c_int;
+    fn acl_set_tag_type(entry: *mut c_void, tag_type: ACLTag) -> c_int;
+    fn acl_get_permset(entry: *mut c_void, permset: *mut *mut c_void) -> c_int;
+    fn acl_clear_perms(permset: *mut c_void) -> c_int;
+    fn acl_get_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
+    fn acl_add_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
+    fn acl_get_qualifier(entry: *mut c_void) -> *mut c_void;
+    fn acl_set_qualifier(entry: *mut c_void, qualifier: *const c_void) -> c_int;
+    fn acl_init(count: c_int) -> *mut c_void;
+    fn acl_valid(ptr: *const c_void) -> c_int;
+    fn acl_free(ptr: *mut c_void) -> c_int;
+}
+
+#[derive(Debug)]
+pub struct ACL {
+    ptr: *mut c_void,
+}
+
+impl Drop for ACL {
+    fn drop(&mut self) {
+        let ret = unsafe { acl_free(self.ptr) };
+        if ret != 0 {
+            panic!("invalid pointer encountered while dropping ACL - {}", Errno::last());
+        }
+    }
+}
+
+impl ACL {
+    pub fn init(count: usize) -> Result<ACL, nix::errno::Errno> {
+        let ptr = unsafe { acl_init(count as i32 as c_int) };
+        if ptr.is_null() {
+            return Err(Errno::last());
+        }
+
+        Ok(ACL { ptr })
+    }
+
+    pub fn get_file<P: AsRef<Path>>(path: P, acl_type: ACLType) -> Result<ACL, nix::errno::Errno> {
+        let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
+        let ptr = unsafe { acl_get_file(path_cstr.as_ptr(), acl_type) };
+        if ptr.is_null() {
+            return Err(Errno::last());
+        }
+        Ok(ACL { ptr })
+    }
+
+    pub fn set_file<P: NixPath + ?Sized>(&self, path: &P, acl_type: ACLType) -> nix::Result<()> {
+        path.with_nix_path(|path| {
+            Errno::result(unsafe { acl_set_file(path.as_ptr(), acl_type, self.ptr) })
+        })?
+        .map(drop)
+    }
+
+    pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
+        let ptr = unsafe { acl_get_fd(fd) };
+        if ptr.is_null() {
+            return Err(Errno::last());
+        }
+
+        Ok(ACL { ptr })
+    }
+
+    pub fn create_entry(&mut self) -> Result<ACLEntry, nix::errno::Errno> {
+        let mut ptr = ptr::null_mut() as *mut c_void;
+        let res = unsafe { acl_create_entry(&mut self.ptr, &mut ptr) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        Ok(ACLEntry {
+            ptr,
+            _phantom: PhantomData,
+        })
+    }
+
+    pub fn is_valid(&self) -> bool {
+        let res = unsafe { acl_valid(self.ptr) };
+        if res == 0 {
+            return true;
+        }
+
+        false
+    }
+
+    pub fn entries(self) -> ACLEntriesIterator {
+        ACLEntriesIterator {
+            acl: self,
+            current: ACL_FIRST_ENTRY,
+        }
+    }
+
+    pub fn add_entry_full(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64)
+        -> Result<(), nix::errno::Errno>
+    {
+        let mut entry = self.create_entry()?;
+        entry.set_tag_type(tag)?;
+        if let Some(qualifier) = qualifier {
+            entry.set_qualifier(qualifier)?;
+        }
+        entry.set_permissions(permissions)?;
+
+        Ok(())
+    }
+}
+
+#[derive(Debug)]
+pub struct ACLEntry<'a> {
+    ptr: *mut c_void,
+    _phantom: PhantomData<&'a mut ()>,
+}
+
+impl<'a> ACLEntry<'a> {
+    pub fn get_tag_type(&self) -> Result<ACLTag, nix::errno::Errno> {
+        let mut tag = ACL_UNDEFINED_TAG;
+        let res = unsafe { acl_get_tag_type(self.ptr, &mut tag as *mut ACLTag) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        Ok(tag)
+    }
+
+    pub fn set_tag_type(&mut self, tag: ACLTag) -> Result<(), nix::errno::Errno> {
+        let res = unsafe { acl_set_tag_type(self.ptr, tag) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        Ok(())
+    }
+
+    pub fn get_permissions(&self) -> Result<u64, nix::errno::Errno> {
+        let mut permissions = 0;
+        let mut permset = ptr::null_mut() as *mut c_void;
+        let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
+            res = unsafe { acl_get_perm(permset, perm) };
+            if res < 0 {
+                return Err(Errno::last());
+            }
+
+            if res == 1 {
+                permissions |= perm as u64;
+            }
+        }
+
+        Ok(permissions)
+    }
+
+    pub fn set_permissions(&mut self, permissions: u64) -> Result<u64, nix::errno::Errno> {
+        let mut permset = ptr::null_mut() as *mut c_void;
+        let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        res = unsafe { acl_clear_perms(permset) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
+            if permissions & perm as u64 == perm as u64 {
+                res = unsafe { acl_add_perm(permset, perm) };
+                if res < 0 {
+                    return Err(Errno::last());
+                }
+            }
+        }
+
+        Ok(permissions)
+    }
+
+    pub fn get_qualifier(&self) -> Result<u64, nix::errno::Errno> {
+        let qualifier = unsafe { acl_get_qualifier(self.ptr) };
+        if qualifier.is_null() {
+            return Err(Errno::last());
+        }
+        let result = unsafe { *(qualifier as *const u32) as u64 };
+        let ret = unsafe { acl_free(qualifier) };
+        if ret != 0 {
+            panic!("invalid pointer encountered while dropping ACL qualifier - {}", Errno::last());
+        }
+
+        Ok(result)
+    }
+
+    pub fn set_qualifier(&mut self, qualifier: u64) -> Result<(), nix::errno::Errno> {
+        let val = qualifier as u32;
+        let val_ptr: *const u32 = &val;
+        let res = unsafe { acl_set_qualifier(self.ptr, val_ptr as *const c_void) };
+        if res < 0 {
+            return Err(Errno::last());
+        }
+
+        Ok(())
+    }
+}
+
+#[derive(Debug)]
+pub struct ACLEntriesIterator {
+    acl: ACL,
+    current: c_int,
+}
+
+impl<'a> Iterator for &'a mut ACLEntriesIterator {
+    type Item = ACLEntry<'a>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        let mut entry_ptr = ptr::null_mut();
+        let res = unsafe { acl_get_entry(self.acl.ptr, self.current, &mut entry_ptr) };
+        self.current = ACL_NEXT_ENTRY;
+        if res == 1 {
+            return Some(ACLEntry { ptr: entry_ptr, _phantom: PhantomData });
+        }
+
+        None
+    }
+}
+
+/// Helper to transform `PxarEntry`s user mode to acl permissions.
+pub fn mode_user_to_acl_permissions(mode: u64) -> u64 {
+    (mode >> 6) & 7
+}
+
+/// Helper to transform `PxarEntry`s group mode to acl permissions.
+pub fn mode_group_to_acl_permissions(mode: u64) -> u64 {
+    (mode >> 3) & 7
+}
+
+/// Helper to transform `PxarEntry`s other mode to acl permissions.
+pub fn mode_other_to_acl_permissions(mode: u64) -> u64 {
+    mode & 7
+}
+
+/// Buffer to compose ACLs as extended attribute.
+pub struct ACLXAttrBuffer {
+    buffer: Vec<u8>,
+}
+
+impl ACLXAttrBuffer {
+    /// Create a new buffer to write ACLs as extended attribute.
+    ///
+    /// `version` defines the ACL_EA_VERSION found in acl/include/acl_ea.h
+    pub fn new(version: u32) -> Self {
+        let mut buffer = Vec::new();
+        buffer.extend_from_slice(&version.to_le_bytes());
+        Self { buffer }
+    }
+
+    /// Add ACL entry to buffer.
+    pub fn add_entry(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64) {
+        self.buffer.extend_from_slice(&(tag as u16).to_le_bytes());
+        self.buffer.extend_from_slice(&(permissions as u16).to_le_bytes());
+        match qualifier {
+            Some(qualifier) => self.buffer.extend_from_slice(&(qualifier as u32).to_le_bytes()),
+            None => self.buffer.extend_from_slice(&ACL_UNDEFINED_ID.to_le_bytes()),
+        }
+    }
+
+    /// Length of the buffer in bytes.
+    pub fn len(&self) -> usize {
+        self.buffer.len()
+    }
+
+    /// The buffer always contains at least the version, it is never empty
+    pub const fn is_empty(&self) -> bool { false }
+
+    /// Borrow raw buffer as mut slice.
+    pub fn as_mut_slice(&mut self) -> &mut [u8] {
+        self.buffer.as_mut_slice()
+    }
+}
diff --git a/pbs-tools/src/compression.rs b/pbs-tools/src/compression.rs
new file mode 100644 (file)
index 0000000..e2b6d79
--- /dev/null
@@ -0,0 +1,194 @@
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use anyhow::Error;
+use bytes::Bytes;
+use flate2::{Compress, Compression, FlushCompress};
+use futures::ready;
+use futures::stream::Stream;
+use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
+
+use proxmox::io_format_err;
+use proxmox::tools::byte_buffer::ByteBuffer;
+
+const BUFFER_SIZE: usize = 8192;
+
+pub enum Level {
+    Fastest,
+    Best,
+    Default,
+    Precise(u32),
+}
+
+#[derive(Eq, PartialEq)]
+enum EncoderState {
+    Reading,
+    Writing,
+    Flushing,
+    Finished,
+}
+
+pub struct DeflateEncoder<T> {
+    inner: T,
+    compressor: Compress,
+    buffer: ByteBuffer,
+    input_buffer: Bytes,
+    state: EncoderState,
+}
+
+impl<T> DeflateEncoder<T> {
+    pub fn new(inner: T) -> Self {
+        Self::with_quality(inner, Level::Default)
+    }
+
+    pub fn with_quality(inner: T, level: Level) -> Self {
+        let level = match level {
+            Level::Fastest => Compression::fast(),
+            Level::Best => Compression::best(),
+            Level::Default => Compression::new(3),
+            Level::Precise(val) => Compression::new(val),
+        };
+
+        Self {
+            inner,
+            compressor: Compress::new(level, false),
+            buffer: ByteBuffer::with_capacity(BUFFER_SIZE),
+            input_buffer: Bytes::new(),
+            state: EncoderState::Reading,
+        }
+    }
+
+    pub fn total_in(&self) -> u64 {
+        self.compressor.total_in()
+    }
+
+    pub fn total_out(&self) -> u64 {
+        self.compressor.total_out()
+    }
+
+    pub fn into_inner(self) -> T {
+        self.inner
+    }
+
+    fn encode(
+        &mut self,
+        inbuf: &[u8],
+        flush: FlushCompress,
+    ) -> Result<(usize, flate2::Status), io::Error> {
+        let old_in = self.compressor.total_in();
+        let old_out = self.compressor.total_out();
+        let res = self
+            .compressor
+            .compress(&inbuf[..], self.buffer.get_free_mut_slice(), flush)?;
+        let new_in = (self.compressor.total_in() - old_in) as usize;
+        let new_out = (self.compressor.total_out() - old_out) as usize;
+        self.buffer.add_size(new_out);
+
+        Ok((new_in, res))
+    }
+}
+
+impl DeflateEncoder<Vec<u8>> {
+    // assume small files
+    pub async fn compress_vec<R>(&mut self, reader: &mut R, size_hint: usize) -> Result<(), Error>
+    where
+        R: AsyncRead + Unpin,
+    {
+        let mut buffer = Vec::with_capacity(size_hint);
+        reader.read_to_end(&mut buffer).await?;
+        self.inner.reserve(size_hint); // should be enough since we want smalller files
+        self.compressor.compress_vec(&buffer[..], &mut self.inner, FlushCompress::Finish)?;
+        Ok(())
+    }
+}
+
+impl<T: AsyncWrite + Unpin> DeflateEncoder<T> {
+    pub async fn compress<R>(&mut self, reader: &mut R) -> Result<(), Error>
+    where
+        R: AsyncRead + Unpin,
+    {
+        let mut buffer = ByteBuffer::with_capacity(BUFFER_SIZE);
+        let mut eof = false;
+        loop {
+            if !eof && !buffer.is_full() {
+                let read = buffer.read_from_async(reader).await?;
+                if read == 0 {
+                    eof = true;
+                }
+            }
+            let (read, _res) = self.encode(&buffer[..], FlushCompress::None)?;
+            buffer.consume(read);
+
+            self.inner.write_all(&self.buffer[..]).await?;
+            self.buffer.clear();
+
+            if buffer.is_empty() && eof {
+                break;
+            }
+        }
+
+        loop {
+            let (_read, res) = self.encode(&[][..], FlushCompress::Finish)?;
+            self.inner.write_all(&self.buffer[..]).await?;
+            self.buffer.clear();
+            if res == flate2::Status::StreamEnd {
+                break;
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl<T, O> Stream for DeflateEncoder<T>
+where
+    T: Stream<Item = Result<O, io::Error>> + Unpin,
+    O: Into<Bytes>
+{
+    type Item = Result<Bytes, io::Error>;
+
+    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        let this = self.get_mut();
+
+        loop {
+            match this.state {
+                EncoderState::Reading => {
+                    if let Some(res) = ready!(Pin::new(&mut this.inner).poll_next(cx)) {
+                        let buf = res?;
+                        this.input_buffer = buf.into();
+                        this.state = EncoderState::Writing;
+                    } else {
+                        this.state = EncoderState::Flushing;
+                    }
+                }
+                EncoderState::Writing => {
+                    if this.input_buffer.is_empty() {
+                        return Poll::Ready(Some(Err(io_format_err!("empty input during write"))));
+                    }
+                    let mut buf = this.input_buffer.split_off(0);
+                    let (read, res) = this.encode(&buf[..], FlushCompress::None)?;
+                    this.input_buffer = buf.split_off(read);
+                    if this.input_buffer.is_empty() {
+                        this.state = EncoderState::Reading;
+                    }
+                    if this.buffer.is_full() || res == flate2::Status::BufError {
+                        let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
+                        return Poll::Ready(Some(Ok(bytes.into())));
+                    }
+                }
+                EncoderState::Flushing => {
+                    let (_read, res) = this.encode(&[][..], FlushCompress::Finish)?;
+                    if !this.buffer.is_empty() {
+                        let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
+                        return Poll::Ready(Some(Ok(bytes.into())));
+                    }
+                    if res == flate2::Status::StreamEnd {
+                        this.state = EncoderState::Finished;
+                    }
+                }
+                EncoderState::Finished => return Poll::Ready(None),
+            }
+        }
+    }
+}
index 9f8325e168c951badc3c5ba5defd21919b34d7bc..1d2699cac75f5e696b23d525250d0e6ebf99524b 100644 (file)
@@ -1,13 +1,15 @@
 //! File system helper utilities.
 
 use std::borrow::{Borrow, BorrowMut};
+use std::collections::HashMap;
+use std::hash::BuildHasher;
 use std::ops::{Deref, DerefMut};
 use std::os::unix::io::{AsRawFd, RawFd};
 
 use anyhow::{bail, format_err, Error};
 use nix::dir;
 use nix::dir::Dir;
-use nix::fcntl::OFlag;
+use nix::fcntl::{AtFlags, OFlag};
 use nix::sys::stat::Mode;
 
 use regex::Regex;
@@ -344,3 +346,58 @@ fn do_lock_dir_noblock(
 
     Ok(handle)
 }
+
+pub fn complete_file_name<S>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String>
+where
+    S: BuildHasher,
+{
+    let mut result = vec![];
+
+    let mut dirname = std::path::PathBuf::from(if arg.is_empty() { "./" } else { arg });
+
+    let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) {
+        Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR,
+        Err(_) => false,
+    };
+
+    if !is_dir {
+        if let Some(parent) = dirname.parent() {
+            dirname = parent.to_owned();
+        }
+    }
+
+    let mut dir =
+        match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) {
+            Ok(d) => d,
+            Err(_) => return result,
+        };
+
+    for item in dir.iter() {
+        if let Ok(entry) = item {
+            if let Ok(name) = entry.file_name().to_str() {
+                if name == "." || name == ".." {
+                    continue;
+                }
+                let mut newpath = dirname.clone();
+                newpath.push(name);
+
+                if let Ok(stat) =
+                    nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty())
+                {
+                    if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR {
+                        newpath.push("");
+                        if let Some(newpath) = newpath.to_str() {
+                            result.push(newpath.to_owned());
+                        }
+                        continue;
+                    }
+                }
+                if let Some(newpath) = newpath.to_str() {
+                    result.push(newpath.to_owned());
+                }
+            }
+        }
+    }
+
+    result
+}
index c9d95dd9dcd1fdc158699a619780778b525654c3..075b066bf9d697b942a9c8b1ee7c32f55c89dec1 100644 (file)
@@ -1,11 +1,14 @@
+pub mod acl;
 pub mod auth;
 pub mod borrow;
 pub mod broadcast_future;
 pub mod cert;
+pub mod compression;
 pub mod format;
 pub mod fs;
 pub mod json;
 pub mod nom;
+pub mod ops;
 pub mod percent_encoding;
 pub mod process_locker;
 pub mod sha;
@@ -13,6 +16,8 @@ pub mod str;
 pub mod sync;
 pub mod ticket;
 pub mod tokio;
+pub mod xattr;
+pub mod zip;
 
 mod command;
 pub use command::{command_output, command_output_as_string, run_command};
diff --git a/pbs-tools/src/ops.rs b/pbs-tools/src/ops.rs
new file mode 100644 (file)
index 0000000..49d0212
--- /dev/null
@@ -0,0 +1,12 @@
+//! std::ops extensions
+
+/// Modeled after the nightly `std::ops::ControlFlow`.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum ControlFlow<B, C = ()> {
+    Continue(C),
+    Break(B),
+}
+
+impl<B> ControlFlow<B> {
+    pub const CONTINUE: ControlFlow<B, ()> = ControlFlow::Continue(());
+}
index 9b2d66ef01bdb54f2fd26ebe06204941f47825d7..387fe5fe18e34f17db6102c74536ed246c6ba08d 100644 (file)
@@ -15,3 +15,13 @@ pub fn join<S: Borrow<str>>(data: &[S], sep: char) -> String {
     list
 }
 
+pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
+    let line = match line.iter().position(|&b| !b.is_ascii_whitespace()) {
+        Some(n) => &line[n..],
+        None => return &[],
+    };
+    match line.iter().rev().position(|&b| !b.is_ascii_whitespace()) {
+        Some(n) => &line[..(line.len() - n)],
+        None => &[],
+    }
+}
diff --git a/pbs-tools/src/xattr.rs b/pbs-tools/src/xattr.rs
new file mode 100644 (file)
index 0000000..500af32
--- /dev/null
@@ -0,0 +1,240 @@
+//! Wrapper functions for the libc xattr calls
+
+use std::ffi::CStr;
+use std::os::unix::io::RawFd;
+
+use nix::errno::Errno;
+
+use proxmox::c_str;
+use proxmox::tools::vec;
+
+/// `"security.capability"` as a CStr to avoid typos.
+///
+/// This cannot be `const` until `const_cstr_unchecked` is stable.
+#[inline]
+pub fn xattr_name_fcaps() -> &'static CStr {
+    c_str!("security.capability")
+}
+
+/// `"system.posix_acl_access"` as a CStr to avoid typos.
+///
+/// This cannot be `const` until `const_cstr_unchecked` is stable.
+#[inline]
+pub fn xattr_acl_access() -> &'static CStr {
+    c_str!("system.posix_acl_access")
+}
+
+/// `"system.posix_acl_default"` as a CStr to avoid typos.
+///
+/// This cannot be `const` until `const_cstr_unchecked` is stable.
+#[inline]
+pub fn xattr_acl_default() -> &'static CStr {
+    c_str!("system.posix_acl_default")
+}
+
+/// Result of `flistxattr`, allows iterating over the attributes as a list of `&CStr`s.
+///
+/// Listing xattrs produces a list separated by zeroes, inherently making them available as `&CStr`
+/// already, so we make use of this fact and reflect this in the interface.
+pub struct ListXAttr {
+    data: Vec<u8>,
+}
+
+impl ListXAttr {
+    fn new(data: Vec<u8>) -> Self {
+        Self { data }
+    }
+}
+
+impl<'a> IntoIterator for &'a ListXAttr {
+    type Item = &'a CStr;
+    type IntoIter = ListXAttrIter<'a>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        ListXAttrIter {
+            data: &self.data,
+            at: 0,
+        }
+    }
+}
+
+/// Iterator over the extended attribute entries in a `ListXAttr`.
+pub struct ListXAttrIter<'a> {
+    data: &'a [u8],
+    at: usize,
+}
+
+impl<'a> Iterator for ListXAttrIter<'a> {
+    type Item = &'a CStr;
+
+    fn next(&mut self) -> Option<&'a CStr> {
+        let data = &self.data[self.at..];
+        let next = data.iter().position(|b| *b == 0)? + 1;
+        self.at += next;
+        Some(unsafe { CStr::from_bytes_with_nul_unchecked(&data[..next]) })
+    }
+}
+
+/// Return a list of extended attributes accessible as an iterator over items of type `&CStr`.
+pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
+    // Initial buffer size for the attribute list, if content does not fit
+    // it gets dynamically increased until big enough.
+    let mut size = 256;
+    let mut buffer = vec::undefined(size);
+    let mut bytes = unsafe {
+        libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
+    };
+    while bytes < 0 {
+        let err = Errno::last();
+        match err {
+            Errno::ERANGE => {
+                // Buffer was not big enough to fit the list, retry with double the size
+                size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
+            },
+            _ => return Err(err),
+        }
+        // Retry to read the list with new buffer
+        buffer.resize(size, 0);
+        bytes = unsafe {
+            libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
+        };
+    }
+    buffer.truncate(bytes as usize);
+
+    Ok(ListXAttr::new(buffer))
+}
+
+/// Get an extended attribute by name.
+///
+/// Extended attributes may not contain zeroes, which we enforce in the API by using a `&CStr`
+/// type.
+pub fn fgetxattr(fd: RawFd, name: &CStr) -> Result<Vec<u8>, nix::errno::Errno> {
+    let mut size = 256;
+    let mut buffer = vec::undefined(size);
+    let mut bytes = unsafe {
+        libc::fgetxattr(fd, name.as_ptr(), buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
+    };
+    while bytes < 0 {
+        let err = Errno::last();
+        match err {
+            Errno::ERANGE => {
+                // Buffer was not big enough to fit the value, retry with double the size
+                size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
+            },
+            _ => return Err(err),
+        }
+        buffer.resize(size, 0);
+        bytes = unsafe {
+            libc::fgetxattr(fd, name.as_ptr() as *const libc::c_char, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
+        };
+    }
+    buffer.resize(bytes as usize, 0);
+
+    Ok(buffer)
+}
+
+/// Set an extended attribute on a file descriptor.
+pub fn fsetxattr(fd: RawFd, name: &CStr, data: &[u8]) -> Result<(), nix::errno::Errno> {
+    let flags = 0 as libc::c_int;
+    let result = unsafe {
+        libc::fsetxattr(fd, name.as_ptr(), data.as_ptr() as *const libc::c_void, data.len(), flags)
+    };
+    if result < 0 {
+        return Err(Errno::last());
+    }
+
+    Ok(())
+}
+
+pub fn fsetxattr_fcaps(fd: RawFd, fcaps: &[u8]) -> Result<(), nix::errno::Errno> {
+    // TODO casync checks and removes capabilities if they are set
+    fsetxattr(fd, xattr_name_fcaps(), fcaps)
+}
+
+pub fn is_security_capability(name: &CStr) -> bool {
+    name.to_bytes() == xattr_name_fcaps().to_bytes()
+}
+
+pub fn is_acl(name: &CStr) -> bool {
+    name.to_bytes() == xattr_acl_access().to_bytes()
+    || name.to_bytes() == xattr_acl_default().to_bytes()
+}
+
+/// Check if the passed name buffer starts with a valid xattr namespace prefix
+/// and is within the length limit of 255 bytes
+pub fn is_valid_xattr_name(c_name: &CStr) -> bool {
+    let name = c_name.to_bytes();
+    if name.is_empty() || name.len() > 255 {
+        return false;
+    }
+    if name.starts_with(b"user.") || name.starts_with(b"trusted.") {
+        return true;
+    }
+    // samba saves windows ACLs there
+    if name == b"security.NTACL" {
+        return true;
+    }
+    is_security_capability(c_name)
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use std::ffi::CString;
+    use std::fs::OpenOptions;
+    use std::os::unix::io::AsRawFd;
+
+    use nix::errno::Errno;
+
+    use proxmox::c_str;
+
+    #[test]
+    fn test_fsetxattr_fgetxattr() {
+        let path = "./tests/xattrs.txt";
+        let file = OpenOptions::new()
+            .write(true)
+            .create(true)
+            .open(&path)
+            .unwrap();
+
+        let fd = file.as_raw_fd();
+
+        let mut name = b"user.".to_vec();
+        for _ in 0..260 {
+            name.push(b'a');
+        }
+
+        let invalid_name = CString::new(name).unwrap();
+
+        assert!(fsetxattr(fd, c_str!("user.attribute0"), b"value0").is_ok());
+        assert!(fsetxattr(fd, c_str!("user.empty"), b"").is_ok());
+
+        if nix::unistd::Uid::current() != nix::unistd::ROOT {
+            assert_eq!(fsetxattr(fd, c_str!("trusted.attribute0"), b"value0"), Err(Errno::EPERM));
+        }
+
+        assert_eq!(fsetxattr(fd, c_str!("garbage.attribute0"), b"value"), Err(Errno::EOPNOTSUPP));
+        assert_eq!(fsetxattr(fd, &invalid_name, b"err"), Err(Errno::ERANGE));
+
+        let v0 = fgetxattr(fd, c_str!("user.attribute0")).unwrap();
+        let v1 = fgetxattr(fd, c_str!("user.empty")).unwrap();
+
+        assert_eq!(v0, b"value0".as_ref());
+        assert_eq!(v1, b"".as_ref());
+        assert_eq!(fgetxattr(fd, c_str!("user.attribute1")), Err(Errno::ENODATA));
+
+        std::fs::remove_file(&path).unwrap();
+    }
+
+    #[test]
+    fn test_is_valid_xattr_name() {
+        let too_long = CString::new(vec![b'a'; 265]).unwrap();
+
+        assert!(!is_valid_xattr_name(&too_long));
+        assert!(!is_valid_xattr_name(c_str!("system.attr")));
+        assert!(is_valid_xattr_name(c_str!("user.attr")));
+        assert!(is_valid_xattr_name(c_str!("trusted.attr")));
+        assert!(is_valid_xattr_name(super::xattr_name_fcaps()));
+    }
+}
diff --git a/pbs-tools/src/zip.rs b/pbs-tools/src/zip.rs
new file mode 100644 (file)
index 0000000..c0bf76c
--- /dev/null
@@ -0,0 +1,671 @@
+//! ZIP Helper
+//!
+//! Provides an interface to create a ZIP File from ZipEntries
+//! for a more detailed description of the ZIP format, see:
+//! https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
+
+use std::convert::TryInto;
+use std::ffi::OsString;
+use std::io;
+use std::mem::size_of;
+use std::os::unix::ffi::OsStrExt;
+use std::path::{Component, Path, PathBuf};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::SystemTime;
+
+use anyhow::{format_err, Error, Result};
+use endian_trait::Endian;
+use futures::ready;
+use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
+
+use crc32fast::Hasher;
+use proxmox::tools::time::gmtime;
+
+use crate::compression::{DeflateEncoder, Level};
+
+const LOCAL_FH_SIG: u32 = 0x04034B50;
+const LOCAL_FF_SIG: u32 = 0x08074B50;
+const CENTRAL_DIRECTORY_FH_SIG: u32 = 0x02014B50;
+const END_OF_CENTRAL_DIR: u32 = 0x06054B50;
+const VERSION_NEEDED: u16 = 0x002d;
+const VERSION_MADE_BY: u16 = 0x032d;
+
+const ZIP64_EOCD_RECORD: u32 = 0x06064B50;
+const ZIP64_EOCD_LOCATOR: u32 = 0x07064B50;
+
+// bits for time:
+// 0-4: day of the month (1-31)
+// 5-8: month: (1 = jan, etc.)
+// 9-15: year offset from 1980
+//
+// bits for date:
+// 0-4: second / 2
+// 5-10: minute (0-59)
+// 11-15: hour (0-23)
+//
+// see https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
+fn epoch_to_dos(epoch: i64) -> (u16, u16) {
+    let gmtime = match gmtime(epoch) {
+        Ok(gmtime) => gmtime,
+        Err(_) => return (0, 0),
+    };
+
+    let seconds = (gmtime.tm_sec / 2) & 0b11111;
+    let minutes = gmtime.tm_min & 0xb111111;
+    let hours = gmtime.tm_hour & 0b11111;
+    let time: u16 = ((hours << 11) | (minutes << 5) | (seconds)) as u16;
+
+    let date: u16 = if gmtime.tm_year > (2108 - 1900) || gmtime.tm_year < (1980 - 1900) {
+        0
+    } else {
+        let day = gmtime.tm_mday & 0b11111;
+        let month = (gmtime.tm_mon + 1) & 0b1111;
+        let year = (gmtime.tm_year + 1900 - 1980) & 0b1111111;
+        ((year << 9) | (month << 5) | (day)) as u16
+    };
+
+    (date, time)
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct Zip64Field {
+    field_type: u16,
+    field_size: u16,
+    uncompressed_size: u64,
+    compressed_size: u64,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct Zip64FieldWithOffset {
+    field_type: u16,
+    field_size: u16,
+    uncompressed_size: u64,
+    compressed_size: u64,
+    offset: u64,
+    start_disk: u32,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct LocalFileHeader {
+    signature: u32,
+    version_needed: u16,
+    flags: u16,
+    compression: u16,
+    time: u16,
+    date: u16,
+    crc32: u32,
+    compressed_size: u32,
+    uncompressed_size: u32,
+    filename_len: u16,
+    extra_field_len: u16,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct LocalFileFooter {
+    signature: u32,
+    crc32: u32,
+    compressed_size: u64,
+    uncompressed_size: u64,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct CentralDirectoryFileHeader {
+    signature: u32,
+    version_made_by: u16,
+    version_needed: u16,
+    flags: u16,
+    compression: u16,
+    time: u16,
+    date: u16,
+    crc32: u32,
+    compressed_size: u32,
+    uncompressed_size: u32,
+    filename_len: u16,
+    extra_field_len: u16,
+    comment_len: u16,
+    start_disk: u16,
+    internal_flags: u16,
+    external_flags: u32,
+    offset: u32,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct EndOfCentralDir {
+    signature: u32,
+    disk_number: u16,
+    start_disk: u16,
+    disk_record_count: u16,
+    total_record_count: u16,
+    directory_size: u32,
+    directory_offset: u32,
+    comment_len: u16,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct Zip64EOCDRecord {
+    signature: u32,
+    field_size: u64,
+    version_made_by: u16,
+    version_needed: u16,
+    disk_number: u32,
+    disk_number_central_dir: u32,
+    disk_record_count: u64,
+    total_record_count: u64,
+    directory_size: u64,
+    directory_offset: u64,
+}
+
+#[derive(Endian)]
+#[repr(C, packed)]
+struct Zip64EOCDLocator {
+    signature: u32,
+    disk_number: u32,
+    offset: u64,
+    disk_count: u32,
+}
+
+async fn write_struct<E, T>(output: &mut T, data: E) -> io::Result<()>
+where
+    T: AsyncWrite + ?Sized + Unpin,
+    E: Endian,
+{
+    let data = data.to_le();
+
+    let data = unsafe {
+        std::slice::from_raw_parts(
+            &data as *const E as *const u8,
+            core::mem::size_of_val(&data),
+        )
+    };
+    output.write_all(data).await
+}
+
+/// Represents an Entry in a ZIP File
+///
+/// used to add to a ZipEncoder
+pub struct ZipEntry {
+    filename: OsString,
+    mtime: i64,
+    mode: u16,
+    crc32: u32,
+    uncompressed_size: u64,
+    compressed_size: u64,
+    offset: u64,
+    is_file: bool,
+}
+
+impl ZipEntry {
+    /// Creates a new ZipEntry
+    ///
+    /// if is_file is false the path will contain an trailing separator,
+    /// so that the zip file understands that it is a directory
+    pub fn new<P: AsRef<Path>>(path: P, mtime: i64, mode: u16, is_file: bool) -> Self {
+        let mut relpath = PathBuf::new();
+
+        for comp in path.as_ref().components() {
+            if let Component::Normal(_) = comp {
+                relpath.push(comp);
+            }
+        }
+
+        if !is_file {
+            relpath.push(""); // adds trailing slash
+        }
+
+        Self {
+            filename: relpath.into(),
+            crc32: 0,
+            mtime,
+            mode,
+            uncompressed_size: 0,
+            compressed_size: 0,
+            offset: 0,
+            is_file,
+        }
+    }
+
+    async fn write_local_header<W>(&self, mut buf: &mut W) -> io::Result<usize>
+    where
+        W: AsyncWrite + Unpin + ?Sized,
+    {
+        let filename = self.filename.as_bytes();
+        let filename_len = filename.len();
+        let header_size = size_of::<LocalFileHeader>();
+        let zip_field_size = size_of::<Zip64Field>();
+        let size: usize = header_size + filename_len + zip_field_size;
+
+        let (date, time) = epoch_to_dos(self.mtime);
+
+        write_struct(
+            &mut buf,
+            LocalFileHeader {
+                signature: LOCAL_FH_SIG,
+                version_needed: 0x2d,
+                flags: 1 << 3,
+                compression: 0x8,
+                time,
+                date,
+                crc32: 0,
+                compressed_size: 0xFFFFFFFF,
+                uncompressed_size: 0xFFFFFFFF,
+                filename_len: filename_len as u16,
+                extra_field_len: zip_field_size as u16,
+            },
+        )
+        .await?;
+
+        buf.write_all(filename).await?;
+
+        write_struct(
+            &mut buf,
+            Zip64Field {
+                field_type: 0x0001,
+                field_size: 2 * 8,
+                uncompressed_size: 0,
+                compressed_size: 0,
+            },
+        )
+        .await?;
+
+        Ok(size)
+    }
+
+    async fn write_data_descriptor<W: AsyncWrite + Unpin + ?Sized>(
+        &self,
+        mut buf: &mut W,
+    ) -> io::Result<usize> {
+        let size = size_of::<LocalFileFooter>();
+
+        write_struct(
+            &mut buf,
+            LocalFileFooter {
+                signature: LOCAL_FF_SIG,
+                crc32: self.crc32,
+                compressed_size: self.compressed_size,
+                uncompressed_size: self.uncompressed_size,
+            },
+        )
+        .await?;
+
+        Ok(size)
+    }
+
+    async fn write_central_directory_header<W: AsyncWrite + Unpin + ?Sized>(
+        &self,
+        mut buf: &mut W,
+    ) -> io::Result<usize> {
+        let filename = self.filename.as_bytes();
+        let filename_len = filename.len();
+        let header_size = size_of::<CentralDirectoryFileHeader>();
+        let zip_field_size = size_of::<Zip64FieldWithOffset>();
+        let mut size: usize = header_size + filename_len;
+
+        let (date, time) = epoch_to_dos(self.mtime);
+
+        let (compressed_size, uncompressed_size, offset, need_zip64) = if self.compressed_size
+            >= (u32::MAX as u64)
+            || self.uncompressed_size >= (u32::MAX as u64)
+            || self.offset >= (u32::MAX as u64)
+        {
+            size += zip_field_size;
+            (0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, true)
+        } else {
+            (
+                self.compressed_size as u32,
+                self.uncompressed_size as u32,
+                self.offset as u32,
+                false,
+            )
+        };
+
+        write_struct(
+            &mut buf,
+            CentralDirectoryFileHeader {
+                signature: CENTRAL_DIRECTORY_FH_SIG,
+                version_made_by: VERSION_MADE_BY,
+                version_needed: VERSION_NEEDED,
+                flags: 1 << 3,
+                compression: 0x8,
+                time,
+                date,
+                crc32: self.crc32,
+                compressed_size,
+                uncompressed_size,
+                filename_len: filename_len as u16,
+                extra_field_len: if need_zip64 { zip_field_size as u16 } else { 0 },
+                comment_len: 0,
+                start_disk: 0,
+                internal_flags: 0,
+                external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
+                offset,
+            },
+        )
+        .await?;
+
+        buf.write_all(filename).await?;
+
+        if need_zip64 {
+            write_struct(
+                &mut buf,
+                Zip64FieldWithOffset {
+                    field_type: 1,
+                    field_size: 3 * 8 + 4,
+                    uncompressed_size: self.uncompressed_size,
+                    compressed_size: self.compressed_size,
+                    offset: self.offset,
+                    start_disk: 0,
+                },
+            )
+            .await?;
+        }
+
+        Ok(size)
+    }
+}
+
+// wraps an asyncreader and calculates the hash
+struct HashWrapper<R> {
+    inner: R,
+    hasher: Hasher,
+}
+
+impl<R> HashWrapper<R> {
+    fn new(inner: R) -> Self {
+        Self {
+            inner,
+            hasher: Hasher::new(),
+        }
+    }
+
+    // consumes self and returns the hash and the reader
+    fn finish(self) -> (u32, R) {
+        let crc32 = self.hasher.finalize();
+        (crc32, self.inner)
+    }
+}
+
+impl<R> AsyncRead for HashWrapper<R>
+where
+    R: AsyncRead + Unpin,
+{
+    fn poll_read(
+        self: Pin<&mut Self>,
+        cx: &mut Context<'_>,
+        buf: &mut ReadBuf<'_>,
+    ) -> Poll<Result<(), io::Error>> {
+        let this = self.get_mut();
+        let old_len = buf.filled().len();
+        ready!(Pin::new(&mut this.inner).poll_read(cx, buf))?;
+        let new_len = buf.filled().len();
+        if new_len > old_len {
+            this.hasher.update(&buf.filled()[old_len..new_len]);
+        }
+        Poll::Ready(Ok(()))
+    }
+}
+
+/// Wraps a writer that implements AsyncWrite for creating a ZIP archive
+///
+/// This will create a ZIP archive on the fly with files added with
+/// 'add_entry'. To Finish the file, call 'finish'
+/// Example:
+/// ```no_run
+/// use proxmox_backup::tools::zip::*;
+/// use tokio::fs::File;
+/// use anyhow::{Error, Result};
+///
+/// #[tokio::main]
+/// async fn main() -> Result<(), Error> {
+///     let target = File::open("foo.zip").await?;
+///     let mut source = File::open("foo.txt").await?;
+///
+///     let mut zip = ZipEncoder::new(target);
+///     zip.add_entry(ZipEntry::new(
+///         "foo.txt",
+///         0,
+///         0o100755,
+///         true,
+///     ), Some(source)).await?;
+///
+///     zip.finish().await?;
+///
+///     Ok(())
+/// }
+/// ```
+pub struct ZipEncoder<W>
+where
+    W: AsyncWrite + Unpin,
+{
+    byte_count: usize,
+    files: Vec<ZipEntry>,
+    target: Option<W>,
+}
+
+impl<W: AsyncWrite + Unpin> ZipEncoder<W> {
+    pub fn new(target: W) -> Self {
+        Self {
+            byte_count: 0,
+            files: Vec::new(),
+            target: Some(target),
+        }
+    }
+
+    pub async fn add_entry<R: AsyncRead + Unpin>(
+        &mut self,
+        mut entry: ZipEntry,
+        content: Option<R>,
+    ) -> Result<(), Error> {
+        let mut target = self
+            .target
+            .take()
+            .ok_or_else(|| format_err!("had no target during add entry"))?;
+        entry.offset = self.byte_count.try_into()?;
+        self.byte_count += entry.write_local_header(&mut target).await?;
+        if let Some(content) = content {
+            let mut reader = HashWrapper::new(content);
+            let mut enc = DeflateEncoder::with_quality(target, Level::Fastest);
+
+            enc.compress(&mut reader).await?;
+            let total_in = enc.total_in();
+            let total_out = enc.total_out();
+            target = enc.into_inner();
+
+            let (crc32, _reader) = reader.finish();
+
+            self.byte_count += total_out as usize;
+            entry.compressed_size = total_out;
+            entry.uncompressed_size = total_in;
+
+            entry.crc32 = crc32;
+        }
+        self.byte_count += entry.write_data_descriptor(&mut target).await?;
+        self.target = Some(target);
+
+        self.files.push(entry);
+
+        Ok(())
+    }
+
+    async fn write_eocd(
+        &mut self,
+        central_dir_size: usize,
+        central_dir_offset: usize,
+    ) -> Result<(), Error> {
+        let entrycount = self.files.len();
+        let mut target = self
+            .target
+            .take()
+            .ok_or_else(|| format_err!("had no target during write_eocd"))?;
+
+        let mut count = entrycount as u16;
+        let mut directory_size = central_dir_size as u32;
+        let mut directory_offset = central_dir_offset as u32;
+
+        if central_dir_size > u32::MAX as usize
+            || central_dir_offset > u32::MAX as usize
+            || entrycount > u16::MAX as usize
+        {
+            count = 0xFFFF;
+            directory_size = 0xFFFFFFFF;
+            directory_offset = 0xFFFFFFFF;
+
+            write_struct(
+                &mut target,
+                Zip64EOCDRecord {
+                    signature: ZIP64_EOCD_RECORD,
+                    field_size: 44,
+                    version_made_by: VERSION_MADE_BY,
+                    version_needed: VERSION_NEEDED,
+                    disk_number: 0,
+                    disk_number_central_dir: 0,
+                    disk_record_count: entrycount.try_into()?,
+                    total_record_count: entrycount.try_into()?,
+                    directory_size: central_dir_size.try_into()?,
+                    directory_offset: central_dir_offset.try_into()?,
+                },
+            )
+            .await?;
+
+            let locator_offset = central_dir_offset + central_dir_size;
+
+            write_struct(
+                &mut target,
+                Zip64EOCDLocator {
+                    signature: ZIP64_EOCD_LOCATOR,
+                    disk_number: 0,
+                    offset: locator_offset.try_into()?,
+                    disk_count: 1,
+                },
+            )
+            .await?;
+        }
+
+        write_struct(
+            &mut target,
+            EndOfCentralDir {
+                signature: END_OF_CENTRAL_DIR,
+                disk_number: 0,
+                start_disk: 0,
+                disk_record_count: count,
+                total_record_count: count,
+                directory_size,
+                directory_offset,
+                comment_len: 0,
+            },
+        )
+        .await?;
+
+        self.target = Some(target);
+
+        Ok(())
+    }
+
+    pub async fn finish(&mut self) -> Result<(), Error> {
+        let mut target = self
+            .target
+            .take()
+            .ok_or_else(|| format_err!("had no target during finish"))?;
+        let central_dir_offset = self.byte_count;
+        let mut central_dir_size = 0;
+
+        for file in &self.files {
+            central_dir_size += file.write_central_directory_header(&mut target).await?;
+        }
+
+        self.target = Some(target);
+        self.write_eocd(central_dir_size, central_dir_offset)
+            .await?;
+
+        self.target
+            .take()
+            .ok_or_else(|| format_err!("had no target for flush"))?
+            .flush()
+            .await?;
+
+        Ok(())
+    }
+}
+
+/// Zip a local directory and write encoded data to target. "source" has to point to a valid
+/// directory, it's name will be the root of the zip file - e.g.:
+/// source:
+///         /foo/bar
+/// zip file:
+///         /bar/file1
+///         /bar/dir1
+///         /bar/dir1/file2
+///         ...
+/// ...except if "source" is the root directory
+pub async fn zip_directory<W>(target: W, source: &Path) -> Result<(), Error>
+where
+    W: AsyncWrite + Unpin + Send,
+{
+    use walkdir::WalkDir;
+    use std::os::unix::fs::MetadataExt;
+
+    let base_path = source.parent().unwrap_or_else(|| Path::new("/"));
+    let mut encoder = ZipEncoder::new(target);
+
+    for entry in WalkDir::new(&source).into_iter() {
+        match entry {
+            Ok(entry) => {
+                let entry_path = entry.path().to_owned();
+                let encoder = &mut encoder;
+
+                if let Err(err) = async move {
+                    let entry_path_no_base = entry.path().strip_prefix(base_path)?;
+                    let metadata = entry.metadata()?;
+                    let mtime = match metadata.modified().unwrap_or_else(|_| SystemTime::now()).duration_since(SystemTime::UNIX_EPOCH) {
+                        Ok(dur) => dur.as_secs() as i64,
+                        Err(time_error) => -(time_error.duration().as_secs() as i64)
+                    };
+                    let mode = metadata.mode() as u16;
+
+                    if entry.file_type().is_file() {
+                        let file = tokio::fs::File::open(entry.path()).await?;
+                        let ze = ZipEntry::new(
+                            &entry_path_no_base,
+                            mtime,
+                            mode,
+                            true,
+                        );
+                        encoder.add_entry(ze, Some(file)).await?;
+                    } else if entry.file_type().is_dir() {
+                        let ze = ZipEntry::new(
+                            &entry_path_no_base,
+                            mtime,
+                            mode,
+                            false,
+                        );
+                        let content: Option<tokio::fs::File> = None;
+                        encoder.add_entry(ze, content).await?;
+                    }
+                    // ignore other file types
+                    let ok: Result<(), Error> = Ok(());
+                    ok
+                }
+                .await
+                {
+                    eprintln!(
+                        "zip: error encoding file or directory '{}': {}",
+                        entry_path.display(),
+                        err
+                    );
+                }
+            }
+            Err(err) => {
+                eprintln!("zip: error reading directory entry: {}", err);
+            }
+        }
+    }
+
+    encoder.finish().await
+}
index e080d57ace392810d1d608981eefccc063154ff4..70481ffb1c5a61d35cf4ee0b290345995362d7e4 100644 (file)
@@ -10,7 +10,11 @@ use proxmox::api::router::SubdirMap;
 use proxmox::api::schema::{Schema, StringSchema};
 use proxmox::tools::fs::open_file_locked;
 
-use crate::api2::types::*;
+use pbs_api_types::{
+    PASSWORD_FORMAT, PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, Authid,
+    Tokenname, UserWithTokens, Userid,
+};
+
 use crate::config::user;
 use crate::config::token_shadow;
 use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
@@ -22,77 +26,16 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
     .max_length(64)
     .schema();
 
-#[api(
-    properties: {
-        userid: {
-            type: Userid,
-        },
-        comment: {
-            optional: true,
-            schema: SINGLE_LINE_COMMENT_SCHEMA,
-        },
-        enable: {
-            optional: true,
-            schema: user::ENABLE_USER_SCHEMA,
-        },
-        expire: {
-            optional: true,
-            schema: user::EXPIRE_USER_SCHEMA,
-        },
-        firstname: {
-            optional: true,
-            schema: user::FIRST_NAME_SCHEMA,
-        },
-        lastname: {
-            schema: user::LAST_NAME_SCHEMA,
-            optional: true,
-         },
-        email: {
-            schema: user::EMAIL_SCHEMA,
-            optional: true,
-        },
-        tokens: {
-            type: Array,
-            optional: true,
-            description: "List of user's API tokens.",
-            items: {
-                type: user::ApiToken
-            },
-        },
-    }
-)]
-#[derive(Serialize,Deserialize)]
-/// User properties with added list of ApiTokens
-pub struct UserWithTokens {
-    pub userid: Userid,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub comment: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub enable: Option<bool>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub expire: Option<i64>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub firstname: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub lastname: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub email: Option<String>,
-    #[serde(skip_serializing_if="Vec::is_empty", default)]
-    pub tokens: Vec<user::ApiToken>,
-}
-
-impl UserWithTokens {
-    fn new(user: user::User) -> Self {
-        Self {
-            userid: user.userid,
-            comment: user.comment,
-            enable: user.enable,
-            expire: user.expire,
-            firstname: user.firstname,
-            lastname: user.lastname,
-            email: user.email,
-            tokens: Vec::new(),
-        }
+fn new_user_with_tokens(user: user::User) -> UserWithTokens {
+    UserWithTokens {
+        userid: user.userid,
+        comment: user.comment,
+        enable: user.enable,
+        expire: user.expire,
+        firstname: user.firstname,
+        lastname: user.lastname,
+        email: user.email,
+        tokens: Vec::new(),
     }
 }
 
@@ -165,13 +108,13 @@ pub fn list_users(
             });
         iter
             .map(|user: user::User| {
-                let mut user = UserWithTokens::new(user);
+                let mut user = new_user_with_tokens(user);
                 user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
                 user
             })
             .collect()
     } else {
-        iter.map(UserWithTokens::new)
+        iter.map(new_user_with_tokens)
             .collect()
     };
 
index 6094150171e14b3e579425b00bcb865e1238b856..470df9b68ff92a179e2f35f156e4c703c3089351 100644 (file)
@@ -26,13 +26,14 @@ use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
 use pxar::accessor::aio::Accessor;
 use pxar::EntryKind;
 
+use pbs_client::pxar::create_zip;
+
 use crate::api2::types::*;
 use crate::api2::node::rrd::create_value_from_rrd;
 use crate::api2::helpers;
 use crate::backup::*;
 use crate::config::datastore;
 use crate::config::cached_user_info::CachedUserInfo;
-use crate::pxar::create_zip;
 
 use crate::server::{jobstate::Job, WorkerTask};
 use crate::tools::{
index d302b2f735400cd227ae8ebde97c660f197da579..eafff189bdc92d0b8dc03395e18779d8071ac343 100644 (file)
@@ -13,6 +13,7 @@ use proxmox::api::router::SubdirMap;
 use proxmox::api::schema::*;
 
 use pbs_tools::fs::lock_dir_noblock_shared;
+use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
 
 use crate::tools;
 use crate::server::{WorkerTask, H2Service};
index 446a260413769e3db2e434a31471764efd2d4035..8eacbc85e2470f8e55369dcb536a45684cc02ebc 100644 (file)
@@ -6,8 +6,9 @@ use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
 use proxmox::http_err;
 use proxmox::tools::fs::open_file_locked;
 
+use pbs_client::{HttpClient, HttpClientOptions};
+
 use crate::api2::types::*;
-use crate::client::{HttpClient, HttpClientOptions};
 use crate::config::cached_user_info::CachedUserInfo;
 use crate::config::remote;
 use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
index 0998e9b8440676cee34aa52d1906013f9dcdea01..4893c9fb3ae3c8e896766e0bae058cc0fc6c3361 100644 (file)
@@ -7,9 +7,10 @@ use futures::{select, future::FutureExt};
 use proxmox::api::api;
 use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
 
+use pbs_client::{HttpClient, BackupRepository};
+
 use crate::server::{WorkerTask, jobstate::Job, pull::pull_store};
 use crate::backup::DataStore;
-use crate::client::{HttpClient, BackupRepository};
 use crate::api2::types::{
     DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, Authid,
 };
index 81d92bf1bba10ec9a9cceb75fccd88e531adfd8d..533bc88c1577b772bfe10a75ea8416a3a9c711e5 100644 (file)
@@ -28,6 +28,7 @@ use proxmox::{
 };
 
 use pbs_tools::fs::lock_dir_noblock_shared;
+use pbs_datastore::PROXMOX_BACKUP_READER_PROTOCOL_ID_V1;
 
 use crate::{
     api2::{
index b8b42a5e92fb697bb42ab5fcbdda47b5f1f7d606..530ce9048e54d2b4196a3f910bccf2d20e798022 100644 (file)
@@ -107,14 +107,6 @@ pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
     .format(&FINGERPRINT_SHA256_FORMAT)
     .schema();
 
-pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
-    "Prevent changes if current configuration file has different \
-    SHA256 digest. This can be used to prevent concurrent \
-    modifications."
-)
-    .format(&PVE_CONFIG_DIGEST_FORMAT) .schema();
-
-
 pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
     .format(&CHUNK_DIGEST_FORMAT)
     .schema();
diff --git a/src/backup/catalog_shell.rs b/src/backup/catalog_shell.rs
deleted file mode 100644 (file)
index b186ac6..0000000
+++ /dev/null
@@ -1,1280 +0,0 @@
-use std::collections::HashMap;
-use std::ffi::{CStr, CString, OsStr, OsString};
-use std::future::Future;
-use std::io::Write;
-use std::mem;
-use std::os::unix::ffi::{OsStrExt, OsStringExt};
-use std::path::{Path, PathBuf};
-use std::pin::Pin;
-
-use anyhow::{bail, format_err, Error};
-use nix::dir::Dir;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-
-use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
-use proxmox::api::api;
-use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
-use proxmox::tools::fs::{create_path, CreateOptions};
-use pxar::{EntryKind, Metadata};
-
-use crate::backup::catalog::{self, DirEntryAttribute};
-use crate::pxar::fuse::{Accessor, FileEntry};
-use crate::pxar::Flags;
-use pbs_runtime::block_in_place;
-use crate::tools::ControlFlow;
-
-type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
-
-const MAX_SYMLINK_COUNT: usize = 40;
-
-static mut SHELL: Option<usize> = None;
-
-/// This list defines all the shell commands and their properties
-/// using the api schema
-pub fn catalog_shell_cli() -> CommandLineInterface {
-    CommandLineInterface::Nested(
-        CliCommandMap::new()
-            .insert("pwd", CliCommand::new(&API_METHOD_PWD_COMMAND))
-            .insert(
-                "cd",
-                CliCommand::new(&API_METHOD_CD_COMMAND)
-                    .arg_param(&["path"])
-                    .completion_cb("path", complete_path),
-            )
-            .insert(
-                "ls",
-                CliCommand::new(&API_METHOD_LS_COMMAND)
-                    .arg_param(&["path"])
-                    .completion_cb("path", complete_path),
-            )
-            .insert(
-                "stat",
-                CliCommand::new(&API_METHOD_STAT_COMMAND)
-                    .arg_param(&["path"])
-                    .completion_cb("path", complete_path),
-            )
-            .insert(
-                "select",
-                CliCommand::new(&API_METHOD_SELECT_COMMAND)
-                    .arg_param(&["path"])
-                    .completion_cb("path", complete_path),
-            )
-            .insert(
-                "deselect",
-                CliCommand::new(&API_METHOD_DESELECT_COMMAND)
-                    .arg_param(&["path"])
-                    .completion_cb("path", complete_path),
-            )
-            .insert(
-                "clear-selected",
-                CliCommand::new(&API_METHOD_CLEAR_SELECTED_COMMAND),
-            )
-            .insert(
-                "list-selected",
-                CliCommand::new(&API_METHOD_LIST_SELECTED_COMMAND),
-            )
-            .insert(
-                "restore-selected",
-                CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
-                    .arg_param(&["target"])
-                    .completion_cb("target", crate::tools::complete_file_name),
-            )
-            .insert(
-                "restore",
-                CliCommand::new(&API_METHOD_RESTORE_COMMAND)
-                    .arg_param(&["target"])
-                    .completion_cb("target", crate::tools::complete_file_name),
-            )
-            .insert(
-                "find",
-                CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
-            )
-            .insert(
-                "exit",
-                CliCommand::new(&API_METHOD_EXIT),
-            )
-            .insert_help(),
-    )
-}
-
-fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<String> {
-    let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
-    match shell.complete_path(complete_me) {
-        Ok(list) => list,
-        Err(err) => {
-            eprintln!("error during completion: {}", err);
-            Vec::new()
-        }
-    }
-}
-
-// just an empty wrapper so that it is displayed in help/docs, we check
-// in the readloop for 'exit' again break
-#[api(input: { properties: {} })]
-/// Exit the shell
-async fn exit() -> Result<(), Error> {
-    Ok(())
-}
-
-#[api(input: { properties: {} })]
-/// List the current working directory.
-async fn pwd_command() -> Result<(), Error> {
-    Shell::with(move |shell| shell.pwd()).await
-}
-
-#[api(
-    input: {
-        properties: {
-            path: {
-                type: String,
-                optional: true,
-                description: "target path."
-            }
-        }
-    }
-)]
-/// Change the current working directory to the new directory
-async fn cd_command(path: Option<String>) -> Result<(), Error> {
-    let path = path.as_ref().map(Path::new);
-    Shell::with(move |shell| shell.cd(path)).await
-}
-
-#[api(
-    input: {
-        properties: {
-            path: {
-                type: String,
-                optional: true,
-                description: "target path."
-            }
-        }
-    }
-)]
-/// List the content of working directory or given path.
-async fn ls_command(path: Option<String>) -> Result<(), Error> {
-    let path = path.as_ref().map(Path::new);
-    Shell::with(move |shell| shell.ls(path)).await
-}
-
-#[api(
-    input: {
-        properties: {
-            path: {
-                type: String,
-                description: "target path."
-            }
-        }
-    }
-)]
-/// Read the metadata for a given directory entry.
-///
-/// This is expensive because the data has to be read from the pxar archive, which means reading
-/// over the network.
-async fn stat_command(path: String) -> Result<(), Error> {
-    Shell::with(move |shell| shell.stat(PathBuf::from(path))).await
-}
-
-#[api(
-    input: {
-        properties: {
-            path: {
-                type: String,
-                description: "target path."
-            }
-        }
-    }
-)]
-/// Select an entry for restore.
-///
-/// This will return an error if the entry is already present in the list or
-/// if an invalid path was provided.
-async fn select_command(path: String) -> Result<(), Error> {
-    Shell::with(move |shell| shell.select(PathBuf::from(path))).await
-}
-
-#[api(
-    input: {
-        properties: {
-            path: {
-                type: String,
-                description: "path to entry to remove from list."
-            }
-        }
-    }
-)]
-/// Deselect an entry for restore.
-///
-/// This will return an error if the entry was not found in the list of entries
-/// selected for restore.
-async fn deselect_command(path: String) -> Result<(), Error> {
-    Shell::with(move |shell| shell.deselect(PathBuf::from(path))).await
-}
-
-#[api( input: { properties: { } })]
-/// Clear the list of files selected for restore.
-async fn clear_selected_command() -> Result<(), Error> {
-    Shell::with(move |shell| shell.deselect_all()).await
-}
-
-#[api(
-    input: {
-        properties: {
-            patterns: {
-                type: Boolean,
-                description: "List match patterns instead of the matching files.",
-                optional: true,
-                default: false,
-            }
-        }
-    }
-)]
-/// List entries currently selected for restore.
-async fn list_selected_command(patterns: bool) -> Result<(), Error> {
-    Shell::with(move |shell| shell.list_selected(patterns)).await
-}
-
-#[api(
-    input: {
-        properties: {
-            pattern: {
-                type: String,
-                description: "Match pattern for matching files in the catalog."
-            },
-            select: {
-                type: bool,
-                optional: true,
-                default: false,
-                description: "Add matching filenames to list for restore."
-            }
-        }
-    }
-)]
-/// Find entries in the catalog matching the given match pattern.
-async fn find_command(pattern: String, select: bool) -> Result<(), Error> {
-    Shell::with(move |shell| shell.find(pattern, select)).await
-}
-
-#[api(
-    input: {
-        properties: {
-            target: {
-                type: String,
-                description: "target path for restore on local filesystem."
-            }
-        }
-    }
-)]
-/// Restore the selected entries to the given target path.
-///
-/// Target must not exist on the clients filesystem.
-async fn restore_selected_command(target: String) -> Result<(), Error> {
-    Shell::with(move |shell| shell.restore_selected(PathBuf::from(target))).await
-}
-
-#[api(
-    input: {
-        properties: {
-            target: {
-                type: String,
-                description: "target path for restore on local filesystem."
-            },
-            pattern: {
-                type: String,
-                optional: true,
-                description: "match pattern to limit files for restore."
-            }
-        }
-    }
-)]
-/// Restore the sub-archive given by the current working directory to target.
-///
-/// By further providing a pattern, the restore can be limited to a narrower
-/// subset of this sub-archive.
-/// If pattern is not present or empty, the full archive is restored to target.
-async fn restore_command(target: String, pattern: Option<String>) -> Result<(), Error> {
-    Shell::with(move |shell| shell.restore(PathBuf::from(target), pattern)).await
-}
-
-/// TODO: Should we use this to fix `step()`? Make path resolution behave more like described in
-/// the path_resolution(7) man page.
-///
-/// The `Path` type's component iterator does not tell us anything about trailing slashes or
-/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
-/// here:
-enum PathComponent<'a> {
-    Root,
-    CurDir,
-    ParentDir,
-    Normal(&'a OsStr),
-    TrailingSlash,
-}
-
-struct PathComponentIter<'a> {
-    path: &'a [u8],
-    state: u8, // 0=beginning, 1=ongoing, 2=trailing, 3=finished (fused)
-}
-
-impl std::iter::FusedIterator for PathComponentIter<'_> {}
-
-impl<'a> Iterator for PathComponentIter<'a> {
-    type Item = PathComponent<'a>;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        if self.path.is_empty() {
-            return None;
-        }
-
-        if self.state == 0 {
-            self.state = 1;
-            if self.path[0] == b'/' {
-                // absolute path
-                self.path = &self.path[1..];
-                return Some(PathComponent::Root);
-            }
-        }
-
-        // skip slashes
-        let had_slashes = self.path[0] == b'/';
-        while self.path.get(0).copied() == Some(b'/') {
-            self.path = &self.path[1..];
-        }
-
-        Some(match self.path {
-            [] if had_slashes => PathComponent::TrailingSlash,
-            [] => return None,
-            [b'.'] | [b'.', b'/', ..] => {
-                self.path = &self.path[1..];
-                PathComponent::CurDir
-            }
-            [b'.', b'.'] | [b'.', b'.', b'/', ..] => {
-                self.path = &self.path[2..];
-                PathComponent::ParentDir
-            }
-            _ => {
-                let end = self
-                    .path
-                    .iter()
-                    .position(|&b| b == b'/')
-                    .unwrap_or(self.path.len());
-                let (out, rest) = self.path.split_at(end);
-                self.path = rest;
-                PathComponent::Normal(OsStr::from_bytes(out))
-            }
-        })
-    }
-}
-
-pub struct Shell {
-    /// Readline instance handling input and callbacks
-    rl: rustyline::Editor<CliHelper>,
-
-    /// Interactive prompt.
-    prompt: String,
-
-    /// Calalog reader instance to navigate
-    catalog: CatalogReader,
-
-    /// List of selected paths for restore
-    selected: HashMap<OsString, MatchEntry>,
-
-    /// pxar accessor instance for the current pxar archive
-    accessor: Accessor,
-
-    /// The current position in the archive.
-    position: Vec<PathStackEntry>,
-}
-
-#[derive(Clone)]
-struct PathStackEntry {
-    /// This is always available. We mainly navigate through the catalog.
-    catalog: catalog::DirEntry,
-
-    /// Whenever we need something from the actual archive we fill this out. This is cached along
-    /// the entire path.
-    pxar: Option<FileEntry>,
-}
-
-impl PathStackEntry {
-    fn new(dir_entry: catalog::DirEntry) -> Self {
-        Self {
-            pxar: None,
-            catalog: dir_entry,
-        }
-    }
-}
-
-impl Shell {
-    /// Create a new shell for the given catalog and pxar archive.
-    pub async fn new(
-        mut catalog: CatalogReader,
-        archive_name: &str,
-        archive: Accessor,
-    ) -> Result<Self, Error> {
-        let cli_helper = CliHelper::new(catalog_shell_cli());
-        let mut rl = rustyline::Editor::<CliHelper>::new();
-        rl.set_helper(Some(cli_helper));
-
-        let catalog_root = catalog.root()?;
-        let archive_root = catalog
-            .lookup(&catalog_root, archive_name.as_bytes())?
-            .ok_or_else(|| format_err!("archive not found in catalog"))?;
-        let position = vec![PathStackEntry::new(archive_root)];
-
-        let mut this = Self {
-            rl,
-            prompt: String::new(),
-            catalog,
-            selected: HashMap::new(),
-            accessor: archive,
-            position,
-        };
-        this.update_prompt();
-        Ok(this)
-    }
-
-    async fn with<'a, Fut, R, F>(call: F) -> Result<R, Error>
-    where
-        F: FnOnce(&'a mut Shell) -> Fut,
-        Fut: Future<Output = Result<R, Error>>,
-        F: 'a,
-        Fut: 'a,
-        R: 'static,
-    {
-        let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
-        call(&mut *shell).await
-    }
-
-    pub async fn shell(mut self) -> Result<(), Error> {
-        let this = &mut self;
-        unsafe {
-            SHELL = Some(this as *mut Shell as usize);
-        }
-        while let Ok(line) = this.rl.readline(&this.prompt) {
-            if line == "exit" {
-                break;
-            }
-            let helper = this.rl.helper().unwrap();
-            let args = match cli::shellword_split(&line) {
-                Ok(args) => args,
-                Err(err) => {
-                    println!("Error: {}", err);
-                    continue;
-                }
-            };
-
-            let _ =
-                cli::handle_command_future(helper.cmd_def(), "", args, cli::CliEnvironment::new())
-                    .await;
-            this.rl.add_history_entry(line);
-            this.update_prompt();
-        }
-        Ok(())
-    }
-
-    fn update_prompt(&mut self) {
-        self.prompt = "pxar:".to_string();
-        if self.position.len() <= 1 {
-            self.prompt.push('/');
-        } else {
-            for p in self.position.iter().skip(1) {
-                if !p.catalog.name.starts_with(b"/") {
-                    self.prompt.push('/');
-                }
-                match std::str::from_utf8(&p.catalog.name) {
-                    Ok(entry) => self.prompt.push_str(entry),
-                    Err(_) => self.prompt.push_str("<non-utf8-dir>"),
-                }
-            }
-        }
-        self.prompt.push_str(" > ");
-    }
-
-    async fn pwd(&mut self) -> Result<(), Error> {
-        let stack = Self::lookup(
-            &self.position,
-            &mut self.catalog,
-            &self.accessor,
-            None,
-            &mut Some(0),
-        )
-        .await?;
-        let path = Self::format_path_stack(&stack);
-        println!("{:?}", path);
-        Ok(())
-    }
-
-    fn new_path_stack(&self) -> Vec<PathStackEntry> {
-        self.position[..1].to_vec()
-    }
-
-    async fn resolve_symlink(
-        stack: &mut Vec<PathStackEntry>,
-        catalog: &mut CatalogReader,
-        accessor: &Accessor,
-        follow_symlinks: &mut Option<usize>,
-    ) -> Result<(), Error> {
-        if let Some(ref mut symlink_count) = follow_symlinks {
-            *symlink_count += 1;
-            if *symlink_count > MAX_SYMLINK_COUNT {
-                bail!("too many levels of symbolic links");
-            }
-
-            let file = Self::walk_pxar_archive(accessor, &mut stack[..]).await?;
-
-            let path = match file.entry().kind() {
-                EntryKind::Symlink(symlink) => Path::new(symlink.as_os_str()),
-                _ => bail!("symlink in the catalog was not a symlink in the archive"),
-            };
-
-            let new_stack =
-                Self::lookup(&stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
-
-            *stack = new_stack;
-
-            Ok(())
-        } else {
-            bail!("target is a symlink");
-        }
-    }
-
-    /// Walk a path and add it to the path stack.
-    ///
-    /// If the symlink count is used, symlinks will be followed, until we hit the cap and error
-    /// out.
-    async fn step(
-        stack: &mut Vec<PathStackEntry>,
-        catalog: &mut CatalogReader,
-        accessor: &Accessor,
-        component: std::path::Component<'_>,
-        follow_symlinks: &mut Option<usize>,
-    ) -> Result<(), Error> {
-        use std::path::Component;
-        match component {
-            Component::Prefix(_) => bail!("invalid path component (prefix)"),
-            Component::RootDir => stack.truncate(1),
-            Component::CurDir => {
-                if stack.last().unwrap().catalog.is_symlink() {
-                    Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
-                }
-            }
-            Component::ParentDir => drop(stack.pop()),
-            Component::Normal(entry) => {
-                if stack.last().unwrap().catalog.is_symlink() {
-                    Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
-                }
-                match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
-                    Some(dir) => stack.push(PathStackEntry::new(dir)),
-                    None => bail!("no such file or directory: {:?}", entry),
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    fn step_nofollow(
-        stack: &mut Vec<PathStackEntry>,
-        catalog: &mut CatalogReader,
-        component: std::path::Component<'_>,
-    ) -> Result<(), Error> {
-        use std::path::Component;
-        match component {
-            Component::Prefix(_) => bail!("invalid path component (prefix)"),
-            Component::RootDir => stack.truncate(1),
-            Component::CurDir => {
-                if stack.last().unwrap().catalog.is_symlink() {
-                    bail!("target is a symlink");
-                }
-            }
-            Component::ParentDir => drop(stack.pop()),
-            Component::Normal(entry) => {
-                if stack.last().unwrap().catalog.is_symlink() {
-                    bail!("target is a symlink");
-                } else {
-                    match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
-                        Some(dir) => stack.push(PathStackEntry::new(dir)),
-                        None => bail!("no such file or directory: {:?}", entry),
-                    }
-                }
-            }
-        }
-        Ok(())
-    }
-
-    /// The pxar accessor is required to resolve symbolic links
-    async fn walk_catalog(
-        stack: &mut Vec<PathStackEntry>,
-        catalog: &mut CatalogReader,
-        accessor: &Accessor,
-        path: &Path,
-        follow_symlinks: &mut Option<usize>,
-    ) -> Result<(), Error> {
-        for c in path.components() {
-            Self::step(stack, catalog, accessor, c, follow_symlinks).await?;
-        }
-        Ok(())
-    }
-
-    /// Non-async version cannot follow symlinks.
-    fn walk_catalog_nofollow(
-        stack: &mut Vec<PathStackEntry>,
-        catalog: &mut CatalogReader,
-        path: &Path,
-    ) -> Result<(), Error> {
-        for c in path.components() {
-            Self::step_nofollow(stack, catalog, c)?;
-        }
-        Ok(())
-    }
-
-    /// This assumes that there are no more symlinks in the path stack.
-    async fn walk_pxar_archive(
-        accessor: &Accessor,
-        mut stack: &mut [PathStackEntry],
-    ) -> Result<FileEntry, Error> {
-        if stack[0].pxar.is_none() {
-            stack[0].pxar = Some(accessor.open_root().await?.lookup_self().await?);
-        }
-
-        // Now walk the directory stack:
-        let mut at = 1;
-        while at < stack.len() {
-            if stack[at].pxar.is_some() {
-                at += 1;
-                continue;
-            }
-
-            let parent = stack[at - 1].pxar.as_ref().unwrap();
-            let dir = parent.enter_directory().await?;
-            let name = Path::new(OsStr::from_bytes(&stack[at].catalog.name));
-            stack[at].pxar = Some(
-                dir.lookup(name)
-                    .await?
-                    .ok_or_else(|| format_err!("no such entry in pxar file: {:?}", name))?,
-            );
-
-            at += 1;
-        }
-
-        Ok(stack.last().unwrap().pxar.clone().unwrap())
-    }
-
-    fn complete_path(&mut self, input: &str) -> Result<Vec<String>, Error> {
-        let mut tmp_stack;
-        let (parent, base, part) = match input.rfind('/') {
-            Some(ind) => {
-                let (base, part) = input.split_at(ind + 1);
-                let path = PathBuf::from(base);
-                if path.is_absolute() {
-                    tmp_stack = self.new_path_stack();
-                } else {
-                    tmp_stack = self.position.clone();
-                }
-                Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
-                (&tmp_stack.last().unwrap().catalog, base, part)
-            }
-            None => (&self.position.last().unwrap().catalog, "", input),
-        };
-
-        let entries = self.catalog.read_dir(parent)?;
-
-        let mut out = Vec::new();
-        for entry in entries {
-            let mut name = base.to_string();
-            if entry.name.starts_with(part.as_bytes()) {
-                name.push_str(std::str::from_utf8(&entry.name)?);
-                if entry.is_directory() {
-                    name.push('/');
-                }
-                out.push(name);
-            }
-        }
-
-        Ok(out)
-    }
-
-    // Break async recursion here: lookup -> walk_catalog -> step -> lookup
-    fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
-        stack: &'s [PathStackEntry],
-        catalog: &'c mut CatalogReader,
-        accessor: &'a Accessor,
-        path: Option<&'p Path>,
-        follow_symlinks: &'y mut Option<usize>,
-    ) -> Pin<Box<dyn Future<Output = Result<Vec<PathStackEntry>, Error>> + Send + 'future>>
-    where
-        's: 'future,
-        'c: 'future,
-        'a: 'future,
-        'p: 'future,
-        'y: 'future,
-    {
-        Box::pin(async move {
-            Ok(match path {
-                None => stack.to_vec(),
-                Some(path) => {
-                    let mut stack = if path.is_absolute() {
-                        stack[..1].to_vec()
-                    } else {
-                        stack.to_vec()
-                    };
-                    Self::walk_catalog(&mut stack, catalog, accessor, path, follow_symlinks)
-                        .await?;
-                    stack
-                }
-            })
-        })
-    }
-
-    async fn ls(&mut self, path: Option<&Path>) -> Result<(), Error> {
-        let stack = Self::lookup(
-            &self.position,
-            &mut self.catalog,
-            &self.accessor,
-            path,
-            &mut Some(0),
-        )
-        .await?;
-
-        let last = stack.last().unwrap();
-        if last.catalog.is_directory() {
-            let items = self.catalog.read_dir(&stack.last().unwrap().catalog)?;
-            let mut out = std::io::stdout();
-            // FIXME: columnize
-            for item in items {
-                out.write_all(&item.name)?;
-                out.write_all(b"\n")?;
-            }
-        } else {
-            let mut out = std::io::stdout();
-            out.write_all(&last.catalog.name)?;
-            out.write_all(b"\n")?;
-        }
-        Ok(())
-    }
-
-    async fn stat(&mut self, path: PathBuf) -> Result<(), Error> {
-        let mut stack = Self::lookup(
-            &self.position,
-            &mut self.catalog,
-            &self.accessor,
-            Some(&path),
-            &mut Some(0),
-        )
-        .await?;
-
-        let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
-        std::io::stdout()
-            .write_all(crate::pxar::format_multi_line_entry(file.entry()).as_bytes())?;
-        Ok(())
-    }
-
-    async fn cd(&mut self, path: Option<&Path>) -> Result<(), Error> {
-        match path {
-            Some(path) => {
-                let new_position = Self::lookup(
-                    &self.position,
-                    &mut self.catalog,
-                    &self.accessor,
-                    Some(path),
-                    &mut None,
-                )
-                .await?;
-                if !new_position.last().unwrap().catalog.is_directory() {
-                    bail!("not a directory");
-                }
-                self.position = new_position;
-            }
-            None => self.position.truncate(1),
-        }
-        self.update_prompt();
-        Ok(())
-    }
-
-    /// This stack must have been canonicalized already!
-    fn format_path_stack(stack: &[PathStackEntry]) -> OsString {
-        if stack.len() <= 1 {
-            return OsString::from("/");
-        }
-
-        let mut out = OsString::new();
-        for c in stack.iter().skip(1) {
-            out.push("/");
-            out.push(OsStr::from_bytes(&c.catalog.name));
-        }
-
-        out
-    }
-
-    async fn select(&mut self, path: PathBuf) -> Result<(), Error> {
-        let stack = Self::lookup(
-            &self.position,
-            &mut self.catalog,
-            &self.accessor,
-            Some(&path),
-            &mut Some(0),
-        )
-        .await?;
-
-        let path = Self::format_path_stack(&stack);
-        let entry = MatchEntry::include(MatchPattern::Literal(path.as_bytes().to_vec()));
-        if self.selected.insert(path.clone(), entry).is_some() {
-            println!("path already selected: {:?}", path);
-        } else {
-            println!("added path: {:?}", path);
-        }
-
-        Ok(())
-    }
-
-    async fn deselect(&mut self, path: PathBuf) -> Result<(), Error> {
-        let stack = Self::lookup(
-            &self.position,
-            &mut self.catalog,
-            &self.accessor,
-            Some(&path),
-            &mut Some(0),
-        )
-        .await?;
-
-        let path = Self::format_path_stack(&stack);
-
-        if self.selected.remove(&path).is_some() {
-            println!("removed path from selection: {:?}", path);
-        } else {
-            println!("path not selected: {:?}", path);
-        }
-
-        Ok(())
-    }
-
-    async fn deselect_all(&mut self) -> Result<(), Error> {
-        self.selected.clear();
-        println!("cleared selection");
-        Ok(())
-    }
-
-    async fn list_selected(&mut self, patterns: bool) -> Result<(), Error> {
-        if patterns {
-            self.list_selected_patterns().await
-        } else {
-            self.list_matching_files().await
-        }
-    }
-
-    async fn list_selected_patterns(&self) -> Result<(), Error> {
-        for entry in self.selected.keys() {
-            println!("{:?}", entry);
-        }
-        Ok(())
-    }
-
-    fn build_match_list(&self) -> Vec<MatchEntry> {
-        let mut list = Vec::with_capacity(self.selected.len());
-        for entry in self.selected.values() {
-            list.push(entry.clone());
-        }
-        list
-    }
-
-    async fn list_matching_files(&mut self) -> Result<(), Error> {
-        let matches = self.build_match_list();
-
-        self.catalog.find(
-            &self.position[0].catalog,
-            &mut Vec::new(),
-            &matches,
-            &mut |path: &[u8]| -> Result<(), Error> {
-                let mut out = std::io::stdout();
-                out.write_all(path)?;
-                out.write_all(b"\n")?;
-                Ok(())
-            },
-        )?;
-
-        Ok(())
-    }
-
-    async fn find(&mut self, pattern: String, select: bool) -> Result<(), Error> {
-        let pattern_os = OsString::from(pattern.clone());
-        let pattern_entry =
-            MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
-
-        let mut found_some = false;
-        self.catalog.find(
-            &self.position[0].catalog,
-            &mut Vec::new(),
-            &[&pattern_entry],
-            &mut |path: &[u8]| -> Result<(), Error> {
-                found_some = true;
-                let mut out = std::io::stdout();
-                out.write_all(path)?;
-                out.write_all(b"\n")?;
-                Ok(())
-            },
-        )?;
-
-        if found_some && select {
-            self.selected.insert(pattern_os, pattern_entry);
-        }
-
-        Ok(())
-    }
-
-    async fn restore_selected(&mut self, destination: PathBuf) -> Result<(), Error> {
-        if self.selected.is_empty() {
-            bail!("no entries selected");
-        }
-
-        let match_list = self.build_match_list();
-
-        self.restore_with_match_list(destination, &match_list).await
-    }
-
-    async fn restore(
-        &mut self,
-        destination: PathBuf,
-        pattern: Option<String>,
-    ) -> Result<(), Error> {
-        let tmp;
-        let match_list: &[MatchEntry] = match pattern {
-            None => &[],
-            Some(pattern) => {
-                tmp = [MatchEntry::parse_pattern(
-                    pattern,
-                    PatternFlag::PATH_NAME,
-                    MatchType::Include,
-                )?];
-                &tmp
-            }
-        };
-
-        self.restore_with_match_list(destination, match_list).await
-    }
-
-    async fn restore_with_match_list(
-        &mut self,
-        destination: PathBuf,
-        match_list: &[MatchEntry],
-    ) -> Result<(), Error> {
-        create_path(
-            &destination,
-            None,
-            Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
-        )
-        .map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
-
-        let rootdir = Dir::open(
-            &destination,
-            OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
-            Mode::empty(),
-        )
-        .map_err(|err| {
-            format_err!("unable to open target directory {:?}: {}", destination, err,)
-        })?;
-
-        let mut dir_stack = self.new_path_stack();
-        Self::walk_pxar_archive(&self.accessor, &mut dir_stack).await?;
-        let root_meta = dir_stack
-            .last()
-            .unwrap()
-            .pxar
-            .as_ref()
-            .unwrap()
-            .entry()
-            .metadata()
-            .clone();
-
-        let extractor = crate::pxar::extract::Extractor::new(rootdir, root_meta, true, Flags::DEFAULT);
-
-        let mut extractor = ExtractorState::new(
-            &mut self.catalog,
-            dir_stack,
-            extractor,
-            &match_list,
-            &self.accessor,
-        )?;
-
-        extractor.extract().await
-    }
-}
-
-struct ExtractorState<'a> {
-    path: Vec<u8>,
-    path_len: usize,
-    path_len_stack: Vec<usize>,
-
-    dir_stack: Vec<PathStackEntry>,
-
-    matches: bool,
-    matches_stack: Vec<bool>,
-
-    read_dir: <Vec<catalog::DirEntry> as IntoIterator>::IntoIter,
-    read_dir_stack: Vec<<Vec<catalog::DirEntry> as IntoIterator>::IntoIter>,
-
-    extractor: crate::pxar::extract::Extractor,
-
-    catalog: &'a mut CatalogReader,
-    match_list: &'a [MatchEntry],
-    accessor: &'a Accessor,
-}
-
-impl<'a> ExtractorState<'a> {
-    pub fn new(
-        catalog: &'a mut CatalogReader,
-        dir_stack: Vec<PathStackEntry>,
-        extractor: crate::pxar::extract::Extractor,
-        match_list: &'a [MatchEntry],
-        accessor: &'a Accessor,
-    ) -> Result<Self, Error> {
-        let read_dir = catalog
-            .read_dir(&dir_stack.last().unwrap().catalog)?
-            .into_iter();
-        Ok(Self {
-            path: Vec::new(),
-            path_len: 0,
-            path_len_stack: Vec::new(),
-
-            dir_stack,
-
-            matches: match_list.is_empty(),
-            matches_stack: Vec::new(),
-
-            read_dir,
-            read_dir_stack: Vec::new(),
-
-            extractor,
-
-            catalog,
-            match_list,
-            accessor,
-        })
-    }
-
-    pub async fn extract(&mut self) -> Result<(), Error> {
-        loop {
-            let entry = match self.read_dir.next() {
-                Some(entry) => entry,
-                None => match self.handle_end_of_directory()? {
-                    ControlFlow::Break(()) => break, // done with root directory
-                    ControlFlow::Continue(()) => continue,
-                },
-            };
-
-            self.path.truncate(self.path_len);
-            if !entry.name.starts_with(b"/") {
-                self.path.reserve(entry.name.len() + 1);
-                self.path.push(b'/');
-            }
-            self.path.extend(&entry.name);
-
-            self.extractor.set_path(OsString::from_vec(self.path.clone()));
-            self.handle_entry(entry).await?;
-        }
-
-        Ok(())
-    }
-
-    fn handle_end_of_directory(&mut self) -> Result<ControlFlow<()>, Error> {
-        // go up a directory:
-        self.read_dir = match self.read_dir_stack.pop() {
-            Some(r) => r,
-            None => return Ok(ControlFlow::Break(())), // out of root directory
-        };
-
-        self.matches = self
-            .matches_stack
-            .pop()
-            .ok_or_else(|| format_err!("internal iterator error (matches_stack)"))?;
-
-        self.dir_stack
-            .pop()
-            .ok_or_else(|| format_err!("internal iterator error (dir_stack)"))?;
-
-        self.path_len = self
-            .path_len_stack
-            .pop()
-            .ok_or_else(|| format_err!("internal iterator error (path_len_stack)"))?;
-
-        self.extractor.leave_directory()?;
-
-        Ok(ControlFlow::CONTINUE)
-    }
-
-    async fn handle_new_directory(
-        &mut self,
-        entry: catalog::DirEntry,
-        match_result: Option<MatchType>,
-    ) -> Result<(), Error> {
-        // enter a new directory:
-        self.read_dir_stack.push(mem::replace(
-            &mut self.read_dir,
-            self.catalog.read_dir(&entry)?.into_iter(),
-        ));
-        self.matches_stack.push(self.matches);
-        self.dir_stack.push(PathStackEntry::new(entry));
-        self.path_len_stack.push(self.path_len);
-        self.path_len = self.path.len();
-
-        Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
-        let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
-        let dir_meta = dir_pxar.entry().metadata().clone();
-        let create = self.matches && match_result != Some(MatchType::Exclude);
-        self.extractor.enter_directory(dir_pxar.file_name().to_os_string(), dir_meta, create)?;
-
-        Ok(())
-    }
-
-    pub async fn handle_entry(&mut self, entry: catalog::DirEntry) -> Result<(), Error> {
-        let match_result = self.match_list.matches(&self.path, entry.get_file_mode());
-        let did_match = match match_result {
-            Some(MatchType::Include) => true,
-            Some(MatchType::Exclude) => false,
-            None => self.matches,
-        };
-
-        match (did_match, &entry.attr) {
-            (_, DirEntryAttribute::Directory { .. }) => {
-                self.handle_new_directory(entry, match_result).await?;
-            }
-            (true, DirEntryAttribute::File { .. }) => {
-                self.dir_stack.push(PathStackEntry::new(entry));
-                let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
-                self.extract_file(file).await?;
-                self.dir_stack.pop();
-            }
-            (true, DirEntryAttribute::Symlink)
-            | (true, DirEntryAttribute::BlockDevice)
-            | (true, DirEntryAttribute::CharDevice)
-            | (true, DirEntryAttribute::Fifo)
-            | (true, DirEntryAttribute::Socket)
-            | (true, DirEntryAttribute::Hardlink) => {
-                let attr = entry.attr.clone();
-                self.dir_stack.push(PathStackEntry::new(entry));
-                let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
-                self.extract_special(file, attr).await?;
-                self.dir_stack.pop();
-            }
-            (false, _) => (), // skip
-        }
-
-        Ok(())
-    }
-
-    fn path(&self) -> &OsStr {
-        OsStr::from_bytes(&self.path)
-    }
-
-    async fn extract_file(&mut self, entry: FileEntry) -> Result<(), Error> {
-        match entry.kind() {
-            pxar::EntryKind::File { size, .. } => {
-                let file_name = CString::new(entry.file_name().as_bytes())?;
-                let mut contents = entry.contents().await?;
-                self.extractor.async_extract_file(
-                    &file_name,
-                    entry.metadata(),
-                    *size,
-                    &mut contents,
-                )
-                .await
-            }
-            _ => {
-                bail!(
-                    "catalog file {:?} not a regular file in the archive",
-                    self.path()
-                );
-            }
-        }
-    }
-
-    async fn extract_special(
-        &mut self,
-        entry: FileEntry,
-        catalog_attr: DirEntryAttribute,
-    ) -> Result<(), Error> {
-        let file_name = CString::new(entry.file_name().as_bytes())?;
-        match (catalog_attr, entry.kind()) {
-            (DirEntryAttribute::Symlink, pxar::EntryKind::Symlink(symlink)) => {
-                block_in_place(|| self.extractor.extract_symlink(
-                    &file_name,
-                    entry.metadata(),
-                    symlink.as_os_str(),
-                ))
-            }
-            (DirEntryAttribute::Symlink, _) => {
-                bail!(
-                    "catalog symlink {:?} not a symlink in the archive",
-                    self.path()
-                );
-            }
-
-            (DirEntryAttribute::Hardlink, pxar::EntryKind::Hardlink(hardlink)) => {
-                block_in_place(|| self.extractor.extract_hardlink(&file_name, hardlink.as_os_str()))
-            }
-            (DirEntryAttribute::Hardlink, _) => {
-                bail!(
-                    "catalog hardlink {:?} not a hardlink in the archive",
-                    self.path()
-                );
-            }
-
-            (ref attr, pxar::EntryKind::Device(device)) => {
-                self.extract_device(attr.clone(), &file_name, device, entry.metadata())
-            }
-
-            (DirEntryAttribute::Fifo, pxar::EntryKind::Fifo) => {
-                block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
-            }
-            (DirEntryAttribute::Fifo, _) => {
-                bail!("catalog fifo {:?} not a fifo in the archive", self.path());
-            }
-
-            (DirEntryAttribute::Socket, pxar::EntryKind::Socket) => {
-                block_in_place(|| self.extractor.extract_special(&file_name, entry.metadata(), 0))
-            }
-            (DirEntryAttribute::Socket, _) => {
-                bail!(
-                    "catalog socket {:?} not a socket in the archive",
-                    self.path()
-                );
-            }
-
-            attr => bail!("unhandled file type {:?} for {:?}", attr, self.path()),
-        }
-    }
-
-    fn extract_device(
-        &mut self,
-        attr: DirEntryAttribute,
-        file_name: &CStr,
-        device: &pxar::format::Device,
-        metadata: &Metadata,
-    ) -> Result<(), Error> {
-        match attr {
-            DirEntryAttribute::BlockDevice => {
-                if !metadata.stat.is_blockdev() {
-                    bail!(
-                        "catalog block device {:?} is not a block device in the archive",
-                        self.path(),
-                    );
-                }
-            }
-            DirEntryAttribute::CharDevice => {
-                if !metadata.stat.is_chardev() {
-                    bail!(
-                        "catalog character device {:?} is not a character device in the archive",
-                        self.path(),
-                    );
-                }
-            }
-            _ => {
-                bail!(
-                    "unexpected file type for {:?} in the catalog, \
-                     which is a device special file in the archive",
-                    self.path(),
-                );
-            }
-        }
-        block_in_place(|| self.extractor.extract_special(file_name, metadata, device.to_dev_t()))
-    }
-}
index 4161b402a22362392cbb96bf5a1cbd1c2a386f67..2f2c84262b65046e149e9afd4c4d2a814a59ea21 100644 (file)
@@ -5,20 +5,6 @@ use anyhow::{bail, Error};
 // Note: .pcat1 => Proxmox Catalog Format version 1
 pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
 
-#[macro_export]
-macro_rules! PROXMOX_BACKUP_PROTOCOL_ID_V1 {
-    () => {
-        "proxmox-backup-protocol-v1"
-    };
-}
-
-#[macro_export]
-macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
-    () => {
-        "proxmox-backup-reader-protocol-v1"
-    };
-}
-
 /// Unix system user used by proxmox-backup-proxy
 pub const BACKUP_USER_NAME: &str = "backup";
 /// Unix system group used by proxmox-backup-proxy
@@ -102,9 +88,5 @@ pub use datastore::*;
 mod verify;
 pub use verify::*;
 
-// Move to client
-mod catalog_shell;
-pub use catalog_shell::*;
-
 mod cached_chunk_reader;
 pub use cached_chunk_reader::*;
index 7dcb9dc16504d5c863f363ac9901ddb0584aa319..37ce36ea9f794640fae9e39288c15dadafd71b82 100644 (file)
@@ -3,7 +3,7 @@ use anyhow::{Error};
 use proxmox::api::format::*;
 use proxmox::api::cli::*;
 
-use proxmox_backup::backup::catalog_shell_cli;
+use pbs_client::catalog_shell::catalog_shell_cli;
 
 fn main() -> Result<(), Error> {
 
index 3a8a42a092ca40cf2f796d18544c00fe8af3fc56..11ad5417dcd581a8dfccb4536731e08de193b153 100644 (file)
@@ -27,38 +27,59 @@ use proxmox::{
 };
 use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
 
+use pbs_api_types::CryptMode;
+use pbs_client::{
+    BACKUP_SOURCE_SCHEMA,
+    BackupReader,
+    BackupRepository,
+    BackupSpecificationType,
+    BackupStats,
+    BackupWriter,
+    HttpClient,
+    PxarBackupStream,
+    RemoteChunkReader,
+    UploadOptions,
+    delete_ticket_info,
+    parse_backup_specification,
+    view_task_result,
+};
+use pbs_client::catalog_shell::Shell;
+use pbs_client::tools::{
+    complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
+    complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
+    complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
+    extract_repository_from_value,
+    key_source::{
+        crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
+        KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
+    },
+    CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
+};
+use pbs_datastore::CryptConfig;
+use pbs_datastore::backup_info::{BackupDir, BackupGroup};
 use pbs_datastore::catalog::BackupCatalogWriter;
+use pbs_datastore::dynamic_index::DynamicIndexReader;
+use pbs_datastore::fixed_index::FixedIndexReader;
+use pbs_datastore::index::IndexFile;
+use pbs_datastore::manifest::{MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, archive_type};
+use pbs_datastore::read_chunk::AsyncReadChunk;
 use pbs_tools::sync::StdChannelWriter;
 use pbs_tools::tokio::TokioWriterAdapter;
 
 use proxmox_backup::api2::types::*;
 use proxmox_backup::api2::version;
-use proxmox_backup::client::*;
 use proxmox_backup::backup::{
-    archive_type,
     decrypt_key,
     rsa_encrypt_key_config,
     verify_chunk_size,
-    ArchiveType,
-    AsyncReadChunk,
-    BackupDir,
-    BackupGroup,
-    BackupManifest,
     BufferedDynamicReader,
     CATALOG_NAME,
     CatalogReader,
     CatalogWriter,
     ChunkStream,
-    CryptConfig,
-    CryptMode,
-    DynamicIndexReader,
     ENCRYPTED_KEY_BLOB_NAME,
     FixedChunkStream,
-    FixedIndexReader,
     KeyConfig,
-    IndexFile,
-    MANIFEST_BLOB_NAME,
-    Shell,
     PruneOptions,
 };
 use proxmox_backup::tools;
@@ -66,19 +87,6 @@ use proxmox_backup::tools;
 mod proxmox_backup_client;
 use proxmox_backup_client::*;
 
-pub mod proxmox_client_tools;
-use proxmox_client_tools::{
-    complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
-    complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
-    complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
-    extract_repository_from_value,
-    key_source::{
-        crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
-        KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
-    },
-    CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
-};
-
 fn record_repository(repo: &BackupRepository) {
 
     let base = match BaseDirectories::with_prefix("proxmox-backup") {
@@ -172,7 +180,7 @@ async fn backup_directory<P: AsRef<Path>>(
     archive_name: &str,
     chunk_size: Option<usize>,
     catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
-    pxar_create_options: proxmox_backup::pxar::PxarCreateOptions,
+    pxar_create_options: pbs_client::pxar::PxarCreateOptions,
     upload_options: UploadOptions,
 ) -> Result<BackupStats, Error> {
 
@@ -589,7 +597,7 @@ fn spawn_catalog_upload(
                type: Integer,
                description: "Max number of entries to hold in memory.",
                optional: true,
-               default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
+               default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize,
            },
            "verbose": {
                type: Boolean,
@@ -633,7 +641,7 @@ async fn create_backup(
     let include_dev = param["include-dev"].as_array();
 
     let entries_max = param["entries-max"].as_u64()
-        .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
+        .unwrap_or(pbs_client::pxar::ENCODER_MAX_ENTRIES as u64);
 
     let empty = Vec::new();
     let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
@@ -856,7 +864,7 @@ async fn create_backup(
                 println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
                 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
 
-                let pxar_options = proxmox_backup::pxar::PxarCreateOptions {
+                let pxar_options = pbs_client::pxar::PxarCreateOptions {
                     device_set: devices.clone(),
                     patterns: pattern_list.clone(),
                     entries_max: entries_max as usize,
@@ -1168,7 +1176,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
 
         let mut reader = BufferedDynamicReader::new(index, chunk_reader);
 
-        let options = proxmox_backup::pxar::PxarExtractOptions {
+        let options = pbs_client::pxar::PxarExtractOptions {
             match_list: &[],
             extract_match_default: true,
             allow_existing_dirs,
@@ -1176,10 +1184,10 @@ async fn restore(param: Value) -> Result<Value, Error> {
         };
 
         if let Some(target) = target {
-            proxmox_backup::pxar::extract_archive(
+            pbs_client::pxar::extract_archive(
                 pxar::decoder::Decoder::from_std(reader)?,
                 Path::new(target),
-                proxmox_backup::pxar::Flags::DEFAULT,
+                pbs_client::pxar::Flags::DEFAULT,
                 |path| {
                     if verbose {
                         println!("{:?}", path);
@@ -1377,7 +1385,6 @@ async fn status(param: Value) -> Result<Value, Error> {
     Ok(Value::Null)
 }
 
-use proxmox_backup::client::RemoteChunkReader;
 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
 /// async use!
 ///
@@ -1424,13 +1431,13 @@ fn main() {
         .arg_param(&["backupspec"])
         .completion_cb("repository", complete_repository)
         .completion_cb("backupspec", complete_backup_source)
-        .completion_cb("keyfile", tools::complete_file_name)
-        .completion_cb("master-pubkey-file", tools::complete_file_name)
+        .completion_cb("keyfile", pbs_tools::fs::complete_file_name)
+        .completion_cb("master-pubkey-file", pbs_tools::fs::complete_file_name)
         .completion_cb("chunk-size", complete_chunk_size);
 
     let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
         .completion_cb("repository", complete_repository)
-        .completion_cb("keyfile", tools::complete_file_name);
+        .completion_cb("keyfile", pbs_tools::fs::complete_file_name);
 
     let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
         .completion_cb("repository", complete_repository);
@@ -1443,7 +1450,7 @@ fn main() {
         .completion_cb("repository", complete_repository)
         .completion_cb("snapshot", complete_group_or_snapshot)
         .completion_cb("archive-name", complete_archive_name)
-        .completion_cb("target", tools::complete_file_name);
+        .completion_cb("target", pbs_tools::fs::complete_file_name);
 
     let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
         .arg_param(&["group"])
index 5c70d8c7816e05312ab03c44a9d39b1b1093402d..ca3ac1ef517eea5bb3371f6bcaf326e4f8768d72 100644 (file)
@@ -6,12 +6,12 @@ use serde_json::{json, Value};
 
 use proxmox::api::{api, cli::*, RpcEnvironment};
 
+use pbs_client::{connect_to_localhost, display_task_log, view_task_result};
 use pbs_tools::percent_encoding::percent_encode_component;
 
 use proxmox_backup::tools;
 use proxmox_backup::config;
 use proxmox_backup::api2::{self, types::* };
-use proxmox_backup::client::*;
 use proxmox_backup::server::wait_for_local_worker;
 
 mod proxmox_backup_manager;
index 17033cf565e1d8f5fab913cea064e48c583c7a72..17c1506a3906c817cf66532910c051c91c6dcb58 100644 (file)
@@ -16,18 +16,10 @@ use proxmox::api::{
 use pxar::accessor::aio::Accessor;
 use pxar::decoder::aio::Decoder;
 
-use proxmox_backup::api2::{helpers, types::ArchiveEntry};
-use proxmox_backup::backup::{
-    decrypt_key, BackupDir, BufferedDynamicReader, CatalogReader, CryptConfig, CryptMode,
-    DirEntryAttribute, IndexFile, LocalDynamicReadAt, CATALOG_NAME,
-};
-use proxmox_backup::client::{BackupReader, RemoteChunkReader};
-use proxmox_backup::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
-use proxmox_backup::tools;
-
-// use "pub" so rust doesn't complain about "unused" functions in the module
-pub mod proxmox_client_tools;
-use proxmox_client_tools::{
+use pbs_datastore::index::IndexFile;
+use pbs_client::{BackupReader, RemoteChunkReader};
+use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
+use pbs_client::tools::{
     complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
     key_source::{
         crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
@@ -36,6 +28,13 @@ use proxmox_client_tools::{
     REPO_URL_SCHEMA,
 };
 
+use proxmox_backup::api2::{helpers, types::ArchiveEntry};
+use proxmox_backup::backup::{
+    decrypt_key, BackupDir, BufferedDynamicReader, CatalogReader, CryptConfig, CryptMode,
+    DirEntryAttribute, LocalDynamicReadAt, CATALOG_NAME,
+};
+use proxmox_backup::tools;
+
 mod proxmox_file_restore;
 use proxmox_file_restore::*;
 
@@ -456,7 +455,7 @@ fn main() {
         .arg_param(&["snapshot", "path", "target"])
         .completion_cb("repository", complete_repository)
         .completion_cb("snapshot", complete_group_or_snapshot)
-        .completion_cb("target", tools::complete_file_name);
+        .completion_cb("target", pbs_tools::fs::complete_file_name);
 
     let status_cmd_def = CliCommand::new(&API_METHOD_STATUS);
     let stop_cmd_def = CliCommand::new(&API_METHOD_STOP)
@@ -476,3 +475,15 @@ fn main() {
         Some(|future| pbs_runtime::main(future)),
     );
 }
+
+/// Returns a runtime dir owned by the current user.
+/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
+/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
+pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
+    let uid = nix::unistd::Uid::current();
+    let mut path: std::path::PathBuf = pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
+    path.push(uid.to_string());
+    tools::create_run_dir()?;
+    std::fs::create_dir_all(&path)?;
+    Ok(path)
+}
index 3e59ccfad18dab134a79713ac2d8c8ee1ddb789d..ebe8cef52568c022610cccaf0cb8ff892eac68dd 100644 (file)
@@ -13,8 +13,9 @@ use std::sync::{Arc, Mutex};
 use tokio::sync::mpsc;
 use tokio_stream::wrappers::ReceiverStream;
 
+use pbs_client::DEFAULT_VSOCK_PORT;
+
 use proxmox::api::RpcEnvironmentType;
-use proxmox_backup::client::DEFAULT_VSOCK_PORT;
 use proxmox_backup::server::{rest::*, ApiConfig};
 
 mod proxmox_restore_daemon;
index e2e887815e4123996d2ca4bb420e813a3b0ac919..ae3fb4f6d5e2ffdd07771c95268fdd40ef135d99 100644 (file)
@@ -14,6 +14,7 @@ use proxmox::{
     },
 };
 
+use pbs_client::{connect_to_localhost, view_task_result};
 use pbs_tools::format::{
     HumanByte,
     render_epoch,
@@ -21,10 +22,6 @@ use pbs_tools::format::{
 };
 
 use proxmox_backup::{
-    client::{
-        connect_to_localhost,
-        view_task_result,
-    },
     api2::{
         self,
         types::{
index c1673701d1d04a68f3c7b07e025a566c9c4220e2..228464d27dbb5cf9a17380acc2f37643eed4b7a5 100644 (file)
@@ -18,6 +18,9 @@ use proxmox::api::{
     router::ReturnType,
 };
 
+use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_client::{BackupRepository, BackupWriter};
+
 use proxmox_backup::backup::{
     load_and_decrypt_key,
     CryptConfig,
@@ -25,8 +28,6 @@ use proxmox_backup::backup::{
     DataChunkBuilder,
 };
 
-use proxmox_backup::client::*;
-
 use crate::{
     KEYFILE_SCHEMA, REPO_URL_SCHEMA,
     extract_repository_from_value,
@@ -34,8 +35,6 @@ use crate::{
     connect,
 };
 
-use crate::proxmox_client_tools::key_source::get_encryption_key_password;
-
 #[api()]
 #[derive(Copy, Clone, Serialize)]
 /// Speed test result
index f4b0a1d566873351933837a786e3a286a3529dfa..ce0fe80996e5f48ec8c29c5bd87439294c56d8eb 100644 (file)
@@ -7,9 +7,10 @@ use serde_json::Value;
 
 use proxmox::api::{api, cli::*};
 
-use proxmox_backup::tools;
+use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_client::{BackupReader, RemoteChunkReader};
 
-use proxmox_backup::client::*;
+use proxmox_backup::tools;
 
 use crate::{
     REPO_URL_SCHEMA,
@@ -37,8 +38,6 @@ use crate::{
     Shell,
 };
 
-use crate::proxmox_client_tools::key_source::get_encryption_key_password;
-
 #[api(
    input: {
         properties: {
@@ -219,9 +218,9 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
     let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
     let reader = BufferedDynamicReader::new(index, chunk_reader);
     let archive_size = reader.archive_size();
-    let reader: proxmox_backup::pxar::fuse::Reader =
+    let reader: pbs_client::pxar::fuse::Reader =
         Arc::new(BufferedDynamicReadAt::new(reader));
-    let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
+    let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
 
     client.download(CATALOG_NAME, &mut tmpfile).await?;
     let index = DynamicIndexReader::new(tmpfile)
index 49afcccb8854873c0f746bc9e6cfdcffd23e389f..7ca028bc230c7454c71bbe684710b6d42483bbe6 100644 (file)
@@ -14,19 +14,18 @@ use proxmox::sys::linux::tty;
 use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
 
 use pbs_datastore::{KeyInfo, Kdf};
+use pbs_client::tools::key_source::{
+    find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
+    place_default_encryption_key, place_default_master_pubkey,
+};
+
 
 use proxmox_backup::{
     api2::types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA},
     backup::{rsa_decrypt_key_config, KeyConfig},
-    tools,
     tools::paperkey::{generate_paper_key, PaperkeyFormat},
 };
 
-use crate::proxmox_client_tools::key_source::{
-    find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
-    place_default_encryption_key, place_default_master_pubkey,
-};
-
 #[api(
     input: {
         properties: {
@@ -458,35 +457,35 @@ fn paper_key(
 pub fn cli() -> CliCommandMap {
     let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
 
     let key_import_with_master_key_cmd_def = CliCommand::new(&API_METHOD_IMPORT_WITH_MASTER_KEY)
         .arg_param(&["master-keyfile"])
-        .completion_cb("master-keyfile", tools::complete_file_name)
+        .completion_cb("master-keyfile", pbs_tools::fs::complete_file_name)
         .arg_param(&["encrypted-keyfile"])
-        .completion_cb("encrypted-keyfile", tools::complete_file_name)
+        .completion_cb("encrypted-keyfile", pbs_tools::fs::complete_file_name)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
 
     let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
 
     let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
     let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
     let key_show_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_SHOW_MASTER_PUBKEY)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
 
     let key_show_cmd_def = CliCommand::new(&API_METHOD_SHOW_KEY)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
 
     let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
         .arg_param(&["path"])
-        .completion_cb("path", tools::complete_file_name);
+        .completion_cb("path", pbs_tools::fs::complete_file_name);
 
     CliCommandMap::new()
         .insert("create", key_create_cmd_def)
index 21f78c32474f459183cabcaf9f8c287678adb540..44ba09a89cc9b58425ba165abacf1b48755a3d8b 100644 (file)
@@ -17,6 +17,9 @@ use proxmox::{sortable, identity};
 use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
 use proxmox::tools::fd::Fd;
 
+use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_client::{BackupReader, RemoteChunkReader};
+
 use proxmox_backup::tools;
 use proxmox_backup::backup::{
     load_and_decrypt_key,
@@ -28,8 +31,6 @@ use proxmox_backup::backup::{
     CachedChunkReader,
 };
 
-use proxmox_backup::client::*;
-
 use crate::{
     REPO_URL_SCHEMA,
     extract_repository_from_value,
@@ -43,8 +44,6 @@ use crate::{
     BufferedDynamicReadAt,
 };
 
-use crate::proxmox_client_tools::key_source::get_encryption_key_password;
-
 #[sortable]
 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
     &ApiHandler::Sync(&mount),
@@ -98,7 +97,7 @@ pub fn mount_cmd_def() -> CliCommand {
         .completion_cb("repository", complete_repository)
         .completion_cb("snapshot", complete_group_or_snapshot)
         .completion_cb("archive-name", complete_pxar_archive_name)
-        .completion_cb("target", tools::complete_file_name)
+        .completion_cb("target", pbs_tools::fs::complete_file_name)
 }
 
 pub fn map_cmd_def() -> CliCommand {
@@ -257,11 +256,11 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
         let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
         let reader = BufferedDynamicReader::new(index, chunk_reader);
         let archive_size = reader.archive_size();
-        let reader: proxmox_backup::pxar::fuse::Reader =
+        let reader: pbs_client::pxar::fuse::Reader =
             Arc::new(BufferedDynamicReadAt::new(reader));
-        let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
+        let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
 
-        let session = proxmox_backup::pxar::fuse::Session::mount(
+        let session = pbs_client::pxar::fuse::Session::mount(
             decoder,
             &options,
             false,
index 7deb664755f990346a7a61831e09ef8ca82e5b4a..ae92e68865defe7c6caa08c08fae91e40b2470a3 100644 (file)
@@ -8,6 +8,8 @@ use proxmox::{
     tools::fs::file_get_contents,
 };
 
+use pbs_client::tools::key_source::get_encryption_key_password;
+
 use proxmox_backup::{
     tools,
     api2::types::*,
@@ -35,8 +37,6 @@ use crate::{
     record_repository,
 };
 
-use crate::proxmox_client_tools::key_source::get_encryption_key_password;
-
 #[api(
    input: {
         properties: {
@@ -412,8 +412,8 @@ pub fn snapshot_mgtm_cli() -> CliCommandMap {
             CliCommand::new(&API_METHOD_UPLOAD_LOG)
                 .arg_param(&["snapshot", "logfile"])
                 .completion_cb("snapshot", complete_backup_snapshot)
-                .completion_cb("logfile", tools::complete_file_name)
-                .completion_cb("keyfile", tools::complete_file_name)
+                .completion_cb("logfile", pbs_tools::fs::complete_file_name)
+                .completion_cb("keyfile", pbs_tools::fs::complete_file_name)
                 .completion_cb("repository", complete_repository)
         )
 }
index e6fcc74ea94657d417fcd39135ba6d0998191109..a65d5a3ba646a082c283f1726671a5f15e657edc 100644 (file)
@@ -4,10 +4,10 @@ use serde_json::{json, Value};
 use proxmox::api::{api, cli::*};
 
 use pbs_tools::percent_encoding::percent_encode_component;
+use pbs_client::display_task_log;
 
 use proxmox_backup::tools;
 
-use proxmox_backup::client::*;
 use proxmox_backup::api2::types::UPID_SCHEMA;
 
 use crate::{
index 7cbd88059d931dbcaaa4e01614281b0b0d1e5fea..a2436786909cbf032beb5595fbe765f26ec25c1c 100644 (file)
@@ -3,12 +3,10 @@ use serde_json::Value;
 
 use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
 
+use pbs_client::{connect_to_localhost, view_task_result};
+
 use proxmox_backup::config;
 use proxmox_backup::api2::{self, types::* };
-use proxmox_backup::client::{
-    connect_to_localhost,
-    view_task_result,
-};
 use proxmox_backup::config::datastore::DIR_NAME_SCHEMA;
 
 #[api(
diff --git a/src/bin/proxmox_client_tools/key_source.rs b/src/bin/proxmox_client_tools/key_source.rs
deleted file mode 100644 (file)
index fee0072..0000000
+++ /dev/null
@@ -1,585 +0,0 @@
-use std::convert::TryFrom;
-use std::path::PathBuf;
-use std::os::unix::io::{FromRawFd, RawFd};
-use std::io::Read;
-
-use anyhow::{bail, format_err, Error};
-use serde_json::Value;
-
-use proxmox::api::schema::*;
-use proxmox::sys::linux::tty;
-use proxmox::tools::fs::file_get_contents;
-
-use proxmox_backup::backup::CryptMode;
-
-pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
-pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
-
-pub const KEYFILE_SCHEMA: Schema =
-    StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
-        .schema();
-
-pub const KEYFD_SCHEMA: Schema =
-    IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
-        .minimum(0)
-        .schema();
-
-pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
-    "Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
-    .schema();
-
-pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
-    IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
-        .minimum(0)
-        .schema();
-
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum KeySource {
-    DefaultKey,
-    Fd,
-    Path(String),
-}
-
-pub fn format_key_source(source: &KeySource, key_type: &str) -> String {
-    match source {
-        KeySource::DefaultKey => format!("Using default {} key..", key_type),
-        KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
-        KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
-    }
-}
-
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct KeyWithSource {
-    pub source: KeySource,
-    pub key: Vec<u8>,
-}
-
-impl KeyWithSource {
-    pub fn from_fd(key: Vec<u8>) -> Self {
-        Self {
-            source: KeySource::Fd,
-            key,
-        }
-    }
-
-    pub fn from_default(key: Vec<u8>) -> Self {
-        Self {
-            source: KeySource::DefaultKey,
-            key,
-        }
-    }
-
-    pub fn from_path(path: String, key: Vec<u8>) -> Self {
-        Self {
-            source: KeySource::Path(path),
-            key,
-        }
-    }
-}
-
-#[derive(Debug, Eq, PartialEq)]
-pub struct CryptoParams {
-    pub mode: CryptMode,
-    pub enc_key: Option<KeyWithSource>,
-    // FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
-    pub master_pubkey: Option<KeyWithSource>,
-}
-
-pub fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
-    do_crypto_parameters(param, false)
-}
-
-pub fn crypto_parameters_keep_fd(param: &Value) -> Result<CryptoParams, Error> {
-    do_crypto_parameters(param, true)
-}
-
-fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoParams, Error> {
-    let keyfile = match param.get("keyfile") {
-        Some(Value::String(keyfile)) => Some(keyfile),
-        Some(_) => bail!("bad --keyfile parameter type"),
-        None => None,
-    };
-
-    let key_fd = match param.get("keyfd") {
-        Some(Value::Number(key_fd)) => Some(
-            RawFd::try_from(key_fd
-                .as_i64()
-                .ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
-            )
-            .map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
-        ),
-        Some(_) => bail!("bad --keyfd parameter type"),
-        None => None,
-    };
-
-    let master_pubkey_file = match param.get("master-pubkey-file") {
-        Some(Value::String(keyfile)) => Some(keyfile),
-        Some(_) => bail!("bad --master-pubkey-file parameter type"),
-        None => None,
-    };
-
-    let master_pubkey_fd = match param.get("master-pubkey-fd") {
-        Some(Value::Number(key_fd)) => Some(
-            RawFd::try_from(key_fd
-                .as_i64()
-                .ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
-            )
-            .map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
-        ),
-        Some(_) => bail!("bad --master-pubkey-fd parameter type"),
-        None => None,
-    };
-
-    let mode: Option<CryptMode> = match param.get("crypt-mode") {
-        Some(mode) => Some(serde_json::from_value(mode.clone())?),
-        None => None,
-    };
-
-    let key = match (keyfile, key_fd) {
-        (None, None) => None,
-        (Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
-        (Some(keyfile), None) => Some(KeyWithSource::from_path(
-            keyfile.clone(),
-            file_get_contents(keyfile)?,
-        )),
-        (None, Some(fd)) => {
-            let mut input = unsafe { std::fs::File::from_raw_fd(fd) };
-            let mut data = Vec::new();
-            let _len: usize = input.read_to_end(&mut data).map_err(|err| {
-                format_err!("error reading encryption key from fd {}: {}", fd, err)
-            })?;
-            if keep_keyfd_open {
-                // don't close fd if requested, and try to reset seek position
-                std::mem::forget(input);
-                unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
-            }
-            Some(KeyWithSource::from_fd(data))
-        }
-    };
-
-    let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
-        (None, None) => None,
-        (Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
-        (Some(keyfile), None) => Some(KeyWithSource::from_path(
-            keyfile.clone(),
-            file_get_contents(keyfile)?,
-        )),
-        (None, Some(fd)) => {
-            let input = unsafe { std::fs::File::from_raw_fd(fd) };
-            let mut data = Vec::new();
-            let _len: usize = { input }
-                .read_to_end(&mut data)
-                .map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
-            Some(KeyWithSource::from_fd(data))
-        }
-    };
-
-    let res = match mode {
-        // no crypt mode, enable encryption if keys are available
-        None => match (key, master_pubkey) {
-            // only default keys if available
-            (None, None) => match read_optional_default_encryption_key()? {
-                None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
-                enc_key => {
-                    let master_pubkey = read_optional_default_master_pubkey()?;
-                    CryptoParams {
-                        mode: CryptMode::Encrypt,
-                        enc_key,
-                        master_pubkey,
-                    }
-                },
-            },
-
-            // explicit master key, default enc key needed
-            (None, master_pubkey) => match read_optional_default_encryption_key()? {
-                None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
-                enc_key => {
-                    CryptoParams {
-                        mode: CryptMode::Encrypt,
-                        enc_key,
-                        master_pubkey,
-                    }
-                },
-            },
-
-            // explicit keyfile, maybe default master key
-            (enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: read_optional_default_master_pubkey()? },
-
-            // explicit keyfile and master key
-            (enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
-        },
-
-        // explicitly disabled encryption
-        Some(CryptMode::None) => match (key, master_pubkey) {
-            // no keys => OK, no encryption
-            (None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
-
-            // --keyfile and --crypt-mode=none
-            (Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
-
-            // --master-pubkey-file and --crypt-mode=none
-            (_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
-        },
-
-        // explicitly enabled encryption
-        Some(mode) => match (key, master_pubkey) {
-            // no key, maybe master key
-            (None, master_pubkey) => match read_optional_default_encryption_key()? {
-                None => bail!("--crypt-mode without --keyfile and no default key file available"),
-                enc_key => {
-                    eprintln!("Encrypting with default encryption key!");
-                    let master_pubkey = match master_pubkey {
-                        None => read_optional_default_master_pubkey()?,
-                        master_pubkey => master_pubkey,
-                    };
-
-                    CryptoParams {
-                        mode,
-                        enc_key,
-                        master_pubkey,
-                    }
-                },
-            },
-
-            // --keyfile and --crypt-mode other than none
-            (enc_key, master_pubkey) => {
-                let master_pubkey = match master_pubkey {
-                    None => read_optional_default_master_pubkey()?,
-                    master_pubkey => master_pubkey,
-                };
-
-                CryptoParams { mode, enc_key, master_pubkey }
-            },
-        },
-    };
-
-    Ok(res)
-}
-
-pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
-    super::find_xdg_file(
-        DEFAULT_MASTER_PUBKEY_FILE_NAME,
-        "default master public key file",
-    )
-}
-
-pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
-    super::place_xdg_file(
-        DEFAULT_MASTER_PUBKEY_FILE_NAME,
-        "default master public key file",
-    )
-}
-
-pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
-    super::find_xdg_file(
-        DEFAULT_ENCRYPTION_KEY_FILE_NAME,
-        "default encryption key file",
-    )
-}
-
-pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
-    super::place_xdg_file(
-        DEFAULT_ENCRYPTION_KEY_FILE_NAME,
-        "default encryption key file",
-    )
-}
-
-#[cfg(not(test))]
-pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
-    find_default_encryption_key()?
-        .map(|path| file_get_contents(path).map(KeyWithSource::from_default))
-        .transpose()
-}
-
-#[cfg(not(test))]
-pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
-    find_default_master_pubkey()?
-        .map(|path| file_get_contents(path).map(KeyWithSource::from_default))
-        .transpose()
-}
-
-#[cfg(test)]
-static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
-
-#[cfg(test)]
-pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
-    // not safe when multiple concurrent test cases end up here!
-    unsafe {
-        match &TEST_DEFAULT_ENCRYPTION_KEY {
-            Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
-            Ok(None) => Ok(None),
-            Err(_) => bail!("test error"),
-        }
-    }
-}
-
-#[cfg(test)]
-// not safe when multiple concurrent test cases end up here!
-pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
-    TEST_DEFAULT_ENCRYPTION_KEY = value;
-}
-
-#[cfg(test)]
-static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
-
-#[cfg(test)]
-pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
-    // not safe when multiple concurrent test cases end up here!
-    unsafe {
-        match &TEST_DEFAULT_MASTER_PUBKEY {
-            Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
-            Ok(None) => Ok(None),
-            Err(_) => bail!("test error"),
-        }
-    }
-}
-
-#[cfg(test)]
-// not safe when multiple concurrent test cases end up here!
-pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
-    TEST_DEFAULT_MASTER_PUBKEY = value;
-}
-
-pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
-    // fixme: implement other input methods
-
-    use std::env::VarError::*;
-    match std::env::var("PBS_ENCRYPTION_PASSWORD") {
-        Ok(p) => return Ok(p.as_bytes().to_vec()),
-        Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
-        Err(NotPresent) => {
-            // Try another method
-        }
-    }
-
-    // If we're on a TTY, query the user for a password
-    if tty::stdin_isatty() {
-        return Ok(tty::read_password("Encryption Key Password: ")?);
-    }
-
-    bail!("no password input mechanism available");
-}
-
-#[test]
-// WARNING: there must only be one test for crypto_parameters as the default key handling is not
-// safe w.r.t. concurrency
-fn test_crypto_parameters_handling() -> Result<(), Error> {
-    use serde_json::json;
-    use proxmox::tools::fs::{replace_file, CreateOptions};
-
-    let some_key = vec![1;1];
-    let default_key = vec![2;1];
-
-    let some_master_key = vec![3;1];
-    let default_master_key = vec![4;1];
-
-    let keypath = "./target/testout/keyfile.test";
-    let master_keypath = "./target/testout/masterkeyfile.test";
-    let invalid_keypath = "./target/testout/invalid_keyfile.test";
-
-    let no_key_res = CryptoParams {
-        enc_key: None,
-        master_pubkey: None,
-        mode: CryptMode::None,
-    };
-    let some_key_res = CryptoParams {
-        enc_key: Some(KeyWithSource::from_path(
-            keypath.to_string(),
-            some_key.clone(),
-        )),
-        master_pubkey: None,
-        mode: CryptMode::Encrypt,
-    };
-    let some_key_some_master_res = CryptoParams {
-        enc_key: Some(KeyWithSource::from_path(
-            keypath.to_string(),
-            some_key.clone(),
-        )),
-        master_pubkey: Some(KeyWithSource::from_path(
-            master_keypath.to_string(),
-            some_master_key.clone(),
-        )),
-        mode: CryptMode::Encrypt,
-    };
-    let some_key_default_master_res = CryptoParams {
-        enc_key: Some(KeyWithSource::from_path(
-            keypath.to_string(),
-            some_key.clone(),
-        )),
-        master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
-        mode: CryptMode::Encrypt,
-    };
-
-    let some_key_sign_res = CryptoParams {
-        enc_key: Some(KeyWithSource::from_path(
-            keypath.to_string(),
-            some_key.clone(),
-        )),
-        master_pubkey: None,
-        mode: CryptMode::SignOnly,
-    };
-    let default_key_res = CryptoParams {
-        enc_key: Some(KeyWithSource::from_default(default_key.clone())),
-        master_pubkey: None,
-        mode: CryptMode::Encrypt,
-    };
-    let default_key_sign_res = CryptoParams {
-        enc_key: Some(KeyWithSource::from_default(default_key.clone())),
-        master_pubkey: None,
-        mode: CryptMode::SignOnly,
-    };
-
-    replace_file(&keypath, &some_key, CreateOptions::default())?;
-    replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
-
-    // no params, no default key == no key
-    let res = crypto_parameters(&json!({}));
-    assert_eq!(res.unwrap(), no_key_res);
-
-    // keyfile param == key from keyfile
-    let res = crypto_parameters(&json!({"keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_res);
-
-    // crypt mode none == no key
-    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
-    assert_eq!(res.unwrap(), no_key_res);
-
-    // crypt mode encrypt/sign-only, no keyfile, no default key == Error
-    assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
-    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
-
-    // crypt mode none with explicit key == Error
-    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
-
-    // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
-    let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_sign_res);
-    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_res);
-
-    // invalid keyfile parameter always errors
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
-
-    // now set a default key
-    unsafe { set_test_encryption_key(Ok(Some(default_key.clone()))); }
-
-    // and repeat
-
-    // no params but default key == default key
-    let res = crypto_parameters(&json!({}));
-    assert_eq!(res.unwrap(), default_key_res);
-
-    // keyfile param == key from keyfile
-    let res = crypto_parameters(&json!({"keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_res);
-
-    // crypt mode none == no key
-    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
-    assert_eq!(res.unwrap(), no_key_res);
-
-    // crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
-    let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
-    assert_eq!(res.unwrap(), default_key_sign_res);
-    let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
-    assert_eq!(res.unwrap(), default_key_res);
-
-    // crypt mode none with explicit key == Error
-    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
-
-    // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
-    let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_sign_res);
-    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_res);
-
-    // invalid keyfile parameter always errors
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
-
-    // now make default key retrieval error
-    unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
-
-    // and repeat
-
-    // no params, default key retrieval errors == Error
-    assert!(crypto_parameters(&json!({})).is_err());
-
-    // keyfile param == key from keyfile
-    let res = crypto_parameters(&json!({"keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_res);
-
-    // crypt mode none == no key
-    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
-    assert_eq!(res.unwrap(), no_key_res);
-
-    // crypt mode encrypt/sign-only, no keyfile, default key error == Error
-    assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
-    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
-
-    // crypt mode none with explicit key == Error
-    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
-
-    // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
-    let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_sign_res);
-    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_res);
-
-    // invalid keyfile parameter always errors
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
-
-    // now remove default key again
-    unsafe { set_test_encryption_key(Ok(None)); }
-    // set a default master key
-    unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
-
-    // and use an explicit master key
-    assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
-    // just a default == no key
-    let res = crypto_parameters(&json!({}));
-    assert_eq!(res.unwrap(), no_key_res);
-
-    // keyfile param == key from keyfile
-    let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
-    assert_eq!(res.unwrap(), some_key_some_master_res);
-    // same with fallback to default master key
-    let res = crypto_parameters(&json!({"keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_default_master_res);
-
-    // crypt mode none == error
-    assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
-    // with just default master key == no key
-    let res = crypto_parameters(&json!({"crypt-mode": "none"}));
-    assert_eq!(res.unwrap(), no_key_res);
-
-    // crypt mode encrypt without enc key == error
-    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
-    assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
-
-    // crypt mode none with explicit key == Error
-    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
-    assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
-
-    // crypt mode encrypt with keyfile == key from keyfile with correct mode
-    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
-    assert_eq!(res.unwrap(), some_key_some_master_res);
-    let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
-    assert_eq!(res.unwrap(), some_key_default_master_res);
-
-    // invalid master keyfile parameter always errors when a key is passed, even with a valid
-    // default master key
-    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
-    assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
-
-    Ok(())
-}
diff --git a/src/bin/proxmox_client_tools/mod.rs b/src/bin/proxmox_client_tools/mod.rs
deleted file mode 100644 (file)
index a54abe0..0000000
+++ /dev/null
@@ -1,389 +0,0 @@
-//! Shared tools useful for common CLI clients.
-use std::collections::HashMap;
-
-use anyhow::{bail, format_err, Context, Error};
-use serde_json::{json, Value};
-use xdg::BaseDirectories;
-
-use proxmox::{
-    api::schema::*,
-    tools::fs::file_get_json,
-};
-
-use pbs_api_types::{BACKUP_REPO_URL, Authid};
-use pbs_buildcfg;
-use pbs_datastore::BackupDir;
-use pbs_tools::json::json_object_to_query;
-
-use proxmox_backup::api2::access::user::UserWithTokens;
-use proxmox_backup::client::{BackupRepository, HttpClient, HttpClientOptions};
-use proxmox_backup::tools;
-
-pub mod key_source;
-
-const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
-const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
-
-pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
-    .format(&BACKUP_REPO_URL)
-    .max_length(256)
-    .schema();
-
-pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
-    .minimum(64)
-    .maximum(4096)
-    .default(4096)
-    .schema();
-
-pub fn get_default_repository() -> Option<String> {
-    std::env::var("PBS_REPOSITORY").ok()
-}
-
-pub fn extract_repository_from_value(param: &Value) -> Result<BackupRepository, Error> {
-    let repo_url = param["repository"]
-        .as_str()
-        .map(String::from)
-        .or_else(get_default_repository)
-        .ok_or_else(|| format_err!("unable to get (default) repository"))?;
-
-    let repo: BackupRepository = repo_url.parse()?;
-
-    Ok(repo)
-}
-
-pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<BackupRepository> {
-    param
-        .get("repository")
-        .map(String::from)
-        .or_else(get_default_repository)
-        .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
-}
-
-pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
-    connect_do(repo.host(), repo.port(), repo.auth_id())
-        .map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
-}
-
-fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
-    let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
-
-    use std::env::VarError::*;
-    let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
-        Ok(p) => Some(p),
-        Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
-        Err(NotPresent) => None,
-    };
-
-    let options = HttpClientOptions::new_interactive(password, fingerprint);
-
-    HttpClient::new(server, port, auth_id, options)
-}
-
-/// like get, but simply ignore errors and return Null instead
-pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
-
-    let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
-    let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
-
-    // ticket cache, but no questions asked
-    let options = HttpClientOptions::new_interactive(password, fingerprint)
-        .interactive(false);
-
-    let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
-        Ok(v) => v,
-        _ => return Value::Null,
-    };
-
-    let mut resp = match client.get(url, None).await {
-        Ok(v) => v,
-        _ => return Value::Null,
-    };
-
-    if let Some(map) = resp.as_object_mut() {
-        if let Some(data) = map.remove("data") {
-            return data;
-        }
-    }
-    Value::Null
-}
-
-pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    pbs_runtime::main(async { complete_backup_group_do(param).await })
-}
-
-pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
-
-    let mut result = vec![];
-
-    let repo = match extract_repository_from_map(param) {
-        Some(v) => v,
-        _ => return result,
-    };
-
-    let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
-
-    let data = try_get(&repo, &path).await;
-
-    if let Some(list) = data.as_array() {
-        for item in list {
-            if let (Some(backup_id), Some(backup_type)) =
-                (item["backup-id"].as_str(), item["backup-type"].as_str())
-            {
-                result.push(format!("{}/{}", backup_type, backup_id));
-            }
-        }
-    }
-
-    result
-}
-
-pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
-}
-
-pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-
-    if arg.matches('/').count() < 2 {
-        let groups = complete_backup_group_do(param).await;
-        let mut result = vec![];
-        for group in groups {
-            result.push(group.to_string());
-            result.push(format!("{}/", group));
-        }
-        return result;
-    }
-
-    complete_backup_snapshot_do(param).await
-}
-
-pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
-}
-
-pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
-
-    let mut result = vec![];
-
-    let repo = match extract_repository_from_map(param) {
-        Some(v) => v,
-        _ => return result,
-    };
-
-    let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
-
-    let data = try_get(&repo, &path).await;
-
-    if let Some(list) = data.as_array() {
-        for item in list {
-            if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
-                (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
-            {
-                if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
-                    result.push(snapshot.relative_path().to_str().unwrap().to_owned());
-                }
-            }
-        }
-    }
-
-    result
-}
-
-pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    pbs_runtime::main(async { complete_server_file_name_do(param).await })
-}
-
-pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
-
-    let mut result = vec![];
-
-    let repo = match extract_repository_from_map(param) {
-        Some(v) => v,
-        _ => return result,
-    };
-
-    let snapshot: BackupDir = match param.get("snapshot") {
-        Some(path) => {
-            match path.parse() {
-                Ok(v) => v,
-                _ => return result,
-            }
-        }
-        _ => return result,
-    };
-
-    let query = json_object_to_query(json!({
-        "backup-type": snapshot.group().backup_type(),
-        "backup-id": snapshot.group().backup_id(),
-        "backup-time": snapshot.backup_time(),
-    })).unwrap();
-
-    let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
-
-    let data = try_get(&repo, &path).await;
-
-    if let Some(list) = data.as_array() {
-        for item in list {
-            if let Some(filename) = item["filename"].as_str() {
-                result.push(filename.to_owned());
-            }
-        }
-    }
-
-    result
-}
-
-pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    complete_server_file_name(arg, param)
-        .iter()
-        .map(|v| pbs_tools::format::strip_server_file_extension(&v))
-        .collect()
-}
-
-pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    complete_server_file_name(arg, param)
-        .iter()
-        .filter_map(|name| {
-            if name.ends_with(".pxar.didx") {
-                Some(pbs_tools::format::strip_server_file_extension(name))
-            } else {
-                None
-            }
-        })
-        .collect()
-}
-
-pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    complete_server_file_name(arg, param)
-        .iter()
-        .filter_map(|name| {
-            if name.ends_with(".img.fidx") {
-                Some(pbs_tools::format::strip_server_file_extension(name))
-            } else {
-                None
-            }
-        })
-        .collect()
-}
-
-pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
-
-    let mut result = vec![];
-
-    let mut size = 64;
-    loop {
-        result.push(size.to_string());
-        size *= 2;
-        if size > 4096 { break; }
-    }
-
-    result
-}
-
-pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    pbs_runtime::main(async { complete_auth_id_do(param).await })
-}
-
-pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
-
-    let mut result = vec![];
-
-    let repo = match extract_repository_from_map(param) {
-        Some(v) => v,
-        _ => return result,
-    };
-
-    let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
-
-    if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
-        for user in parsed {
-            result.push(user.userid.to_string());
-            for token in user.tokens {
-                result.push(token.tokenid.to_string());
-            }
-        }
-    };
-
-    result
-}
-
-pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
-    let mut result = vec![];
-
-    let base = match BaseDirectories::with_prefix("proxmox-backup") {
-        Ok(v) => v,
-        _ => return result,
-    };
-
-    // usually $HOME/.cache/proxmox-backup/repo-list
-    let path = match base.place_cache_file("repo-list") {
-        Ok(v) => v,
-        _ => return result,
-    };
-
-    let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
-
-    if let Some(map) = data.as_object() {
-        for (repo, _count) in map {
-            result.push(repo.to_owned());
-        }
-    }
-
-    result
-}
-
-pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-    let mut result = vec![];
-
-    let data: Vec<&str> = arg.splitn(2, ':').collect();
-
-    if data.len() != 2 {
-        result.push(String::from("root.pxar:/"));
-        result.push(String::from("etc.pxar:/etc"));
-        return result;
-    }
-
-    let files = tools::complete_file_name(data[1], param);
-
-    for file in files {
-        result.push(format!("{}:{}", data[0], file));
-    }
-
-    result
-}
-
-pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
-    xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
-}
-
-/// Convenience helper for better error messages:
-pub fn find_xdg_file(
-    file_name: impl AsRef<std::path::Path>,
-    description: &'static str,
-) -> Result<Option<std::path::PathBuf>, Error> {
-    let file_name = file_name.as_ref();
-    base_directories()
-        .map(|base| base.find_config_file(file_name))
-        .with_context(|| format!("error searching for {}", description))
-}
-
-pub fn place_xdg_file(
-    file_name: impl AsRef<std::path::Path>,
-    description: &'static str,
-) -> Result<std::path::PathBuf, Error> {
-    let file_name = file_name.as_ref();
-    base_directories()
-        .and_then(|base| base.place_config_file(file_name).map_err(Error::from))
-        .with_context(|| format!("failed to place {} in xdg home", description))
-}
-
-/// Returns a runtime dir owned by the current user.
-/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
-/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
-pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
-    let uid = nix::unistd::Uid::current();
-    let mut path: std::path::PathBuf = pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
-    path.push(uid.to_string());
-    tools::create_run_dir()?;
-    std::fs::create_dir_all(&path)?;
-    Ok(path)
-}
index ba9794e3732cf59575fa52745713ee03767a0d4c..700c681eed7cd5db843ec95e68e9da95e9165c36 100644 (file)
@@ -1,19 +1,20 @@
 //! Abstraction layer over different methods of accessing a block backup
-use anyhow::{bail, Error};
-use serde::{Deserialize, Serialize};
-use serde_json::{json, Value};
-
 use std::collections::HashMap;
 use std::future::Future;
 use std::hash::BuildHasher;
 use std::pin::Pin;
 
-use proxmox_backup::backup::{BackupDir, BackupManifest};
-use proxmox_backup::api2::types::ArchiveEntry;
-use proxmox_backup::client::BackupRepository;
+use anyhow::{bail, Error};
+use serde::{Deserialize, Serialize};
+use serde_json::{json, Value};
 
 use proxmox::api::{api, cli::*};
 
+use pbs_client::BackupRepository;
+
+use proxmox_backup::backup::{BackupDir, BackupManifest};
+use proxmox_backup::api2::types::ArchiveEntry;
+
 use super::block_driver_qemu::QemuBlockDriver;
 
 /// Contains details about a snapshot that is to be accessed by block file restore
index 46d91198016a632d6fd2208d4dfadf766ceee44f..be1476bd764de2564ca3ce2f64ebc93ef86b181e 100644 (file)
@@ -1,21 +1,23 @@
 //! Block file access via a small QEMU restore VM using the PBS block driver in QEMU
+use std::collections::HashMap;
+use std::fs::{File, OpenOptions};
+use std::io::{prelude::*, SeekFrom};
+
 use anyhow::{bail, Error};
 use futures::FutureExt;
 use serde::{Deserialize, Serialize};
 use serde_json::json;
 
-use std::collections::HashMap;
-use std::fs::{File, OpenOptions};
-use std::io::{prelude::*, SeekFrom};
-
 use proxmox::tools::fs::lock_file;
+
+use pbs_client::{DEFAULT_VSOCK_PORT, BackupRepository, VsockClient};
+
 use proxmox_backup::api2::types::ArchiveEntry;
 use proxmox_backup::backup::BackupDir;
-use proxmox_backup::client::*;
 use proxmox_backup::tools;
 
 use super::block_driver::*;
-use crate::proxmox_client_tools::get_user_run_dir;
+use crate::get_user_run_dir;
 
 const RESTORE_VM_MAP: &str = "restore-vm-map.json";
 
index 83e772cbe07af0655cbed4afe4bf373bac454776..80e891024f16c8a5edd254590bf436ce98dec264 100644 (file)
@@ -13,8 +13,9 @@ use nix::unistd::Pid;
 
 use proxmox::tools::fs::{create_path, file_read_string, make_tmp_file, CreateOptions};
 
+use pbs_client::{VsockClient, DEFAULT_VSOCK_PORT};
+
 use proxmox_backup::backup::backup_user;
-use proxmox_backup::client::{VsockClient, DEFAULT_VSOCK_PORT};
 use proxmox_backup::tools;
 
 use super::SnapRestoreDetails;
index b3721160e3cde86544b1bd3e9f3c6e915b1a49df..fbbda13cc4ea0c9dc7349123ee753695bd901ac3 100644 (file)
@@ -19,12 +19,13 @@ use proxmox::api::{
 };
 use proxmox::{identity, list_subdirs_api_method, sortable};
 
+use pbs_client::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
 use pbs_tools::fs::read_subdir;
+use pbs_tools::zip::zip_directory;
 
 use proxmox_backup::api2::types::*;
 use proxmox_backup::backup::DirEntryAttribute;
-use proxmox_backup::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
-use proxmox_backup::tools::{self, zip::zip_directory};
+use proxmox_backup::tools;
 
 use pxar::encoder::aio::TokioWriter;
 
index 6046c3d29af6420ec81f7285f98fff6d39df7648..d9b25f2041b3cab39440c2541a50a6105aeb6710 100644 (file)
@@ -3,12 +3,10 @@ use serde_json::Value;
 
 use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
 
+use pbs_client::{connect_to_localhost, view_task_result};
+
 use proxmox_backup::{
     config,
-    client::{
-        connect_to_localhost,
-        view_task_result,
-    },
     api2::{
         self,
         types::*,
index 34cae30cf56cd54d652435287043f32fb9094f9d..e4eac0d7707d5c8c46965adf7c202d72adb0e622 100644 (file)
@@ -12,13 +12,11 @@ use futures::select;
 use tokio::signal::unix::{signal, SignalKind};
 
 use pathpatterns::{MatchEntry, MatchType, PatternFlag};
+use pbs_client::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags, PxarExtractOptions};
 
 use proxmox::api::cli::*;
 use proxmox::api::api;
 
-use proxmox_backup::tools;
-use proxmox_backup::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags, PxarExtractOptions};
-
 fn extract_archive_from_reader<R: std::io::Read>(
     reader: &mut R,
     target: &str,
@@ -26,8 +24,7 @@ fn extract_archive_from_reader<R: std::io::Read>(
     verbose: bool,
     options: PxarExtractOptions,
 ) -> Result<(), Error> {
-
-    proxmox_backup::pxar::extract_archive(
+    pbs_client::pxar::extract_archive(
         pxar::decoder::Decoder::from_std(reader)?,
         Path::new(target),
         feature_flags,
@@ -327,7 +324,7 @@ async fn create_archive(
         Some(HashSet::new())
     };
 
-    let options = proxmox_backup::pxar::PxarCreateOptions {
+    let options = pbs_client::pxar::PxarCreateOptions {
         entries_max: entries_max as usize,
         device_set,
         patterns,
@@ -372,7 +369,7 @@ async fn create_archive(
     }
 
     let writer = pxar::encoder::sync::StandardWriter::new(writer);
-    proxmox_backup::pxar::create_archive(
+    pbs_client::pxar::create_archive(
         dir,
         writer,
         feature_flags,
@@ -464,29 +461,29 @@ fn main() {
             "create",
             CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
                 .arg_param(&["archive", "source"])
-                .completion_cb("archive", tools::complete_file_name)
-                .completion_cb("source", tools::complete_file_name),
+                .completion_cb("archive", pbs_tools::fs::complete_file_name)
+                .completion_cb("source", pbs_tools::fs::complete_file_name),
         )
         .insert(
             "extract",
             CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
                 .arg_param(&["archive", "target"])
-                .completion_cb("archive", tools::complete_file_name)
-                .completion_cb("target", tools::complete_file_name)
-                .completion_cb("files-from", tools::complete_file_name),
+                .completion_cb("archive", pbs_tools::fs::complete_file_name)
+                .completion_cb("target", pbs_tools::fs::complete_file_name)
+                .completion_cb("files-from", pbs_tools::fs::complete_file_name),
         )
         .insert(
             "mount",
             CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
                 .arg_param(&["archive", "mountpoint"])
-                .completion_cb("archive", tools::complete_file_name)
-                .completion_cb("mountpoint", tools::complete_file_name),
+                .completion_cb("archive", pbs_tools::fs::complete_file_name)
+                .completion_cb("mountpoint", pbs_tools::fs::complete_file_name),
         )
         .insert(
             "list",
             CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
                 .arg_param(&["archive"])
-                .completion_cb("archive", tools::complete_file_name),
+                .completion_cb("archive", pbs_tools::fs::complete_file_name),
         );
 
     let rpcenv = CliEnvironment::new();
diff --git a/src/client/backup_reader.rs b/src/client/backup_reader.rs
deleted file mode 100644 (file)
index e04494d..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-use anyhow::{format_err, Error};
-use std::io::{Write, Seek, SeekFrom};
-use std::fs::File;
-use std::sync::Arc;
-use std::os::unix::fs::OpenOptionsExt;
-
-use futures::future::AbortHandle;
-use serde_json::{json, Value};
-
-use proxmox::tools::digest_to_hex;
-
-use pbs_datastore::{CryptConfig, BackupManifest};
-use pbs_datastore::data_blob::DataBlob;
-use pbs_datastore::data_blob_reader::DataBlobReader;
-use pbs_datastore::dynamic_index::DynamicIndexReader;
-use pbs_datastore::fixed_index::FixedIndexReader;
-use pbs_datastore::index::IndexFile;
-use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
-use pbs_tools::sha::sha256;
-
-use super::{HttpClient, H2Client};
-
-/// Backup Reader
-pub struct BackupReader {
-    h2: H2Client,
-    abort: AbortHandle,
-    crypt_config: Option<Arc<CryptConfig>>,
-}
-
-impl Drop for BackupReader {
-
-    fn drop(&mut self) {
-        self.abort.abort();
-    }
-}
-
-impl BackupReader {
-
-    fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
-        Arc::new(Self { h2, abort, crypt_config})
-    }
-
-    /// Create a new instance by upgrading the connection at '/api2/json/reader'
-    pub async fn start(
-        client: HttpClient,
-        crypt_config: Option<Arc<CryptConfig>>,
-        datastore: &str,
-        backup_type: &str,
-        backup_id: &str,
-        backup_time: i64,
-        debug: bool,
-    ) -> Result<Arc<BackupReader>, Error> {
-
-        let param = json!({
-            "backup-type": backup_type,
-            "backup-id": backup_id,
-            "backup-time": backup_time,
-            "store": datastore,
-            "debug": debug,
-        });
-        let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
-
-        let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
-
-        Ok(BackupReader::new(h2, abort, crypt_config))
-    }
-
-    /// Execute a GET request
-    pub async fn get(
-        &self,
-        path: &str,
-        param: Option<Value>,
-    ) -> Result<Value, Error> {
-        self.h2.get(path, param).await
-    }
-
-    /// Execute a PUT request
-    pub async fn put(
-        &self,
-        path: &str,
-        param: Option<Value>,
-    ) -> Result<Value, Error> {
-        self.h2.put(path, param).await
-    }
-
-    /// Execute a POST request
-    pub async fn post(
-        &self,
-        path: &str,
-        param: Option<Value>,
-    ) -> Result<Value, Error> {
-        self.h2.post(path, param).await
-    }
-
-    /// Execute a GET request and send output to a writer
-    pub async fn download<W: Write + Send>(
-        &self,
-        file_name: &str,
-        output: W,
-    ) -> Result<(), Error> {
-        let path = "download";
-        let param = json!({ "file-name": file_name });
-        self.h2.download(path, Some(param), output).await
-    }
-
-    /// Execute a special GET request and send output to a writer
-    ///
-    /// This writes random data, and is only useful to test download speed.
-    pub async fn speedtest<W: Write + Send>(
-        &self,
-        output: W,
-    ) -> Result<(), Error> {
-        self.h2.download("speedtest", None, output).await
-    }
-
-    /// Download a specific chunk
-    pub async fn download_chunk<W: Write + Send>(
-        &self,
-        digest: &[u8; 32],
-        output: W,
-    ) -> Result<(), Error> {
-        let path = "chunk";
-        let param = json!({ "digest": digest_to_hex(digest) });
-        self.h2.download(path, Some(param), output).await
-    }
-
-    pub fn force_close(self) {
-        self.abort.abort();
-    }
-
-    /// Download backup manifest (index.json)
-    ///
-    /// The manifest signature is verified if we have a crypt_config.
-    pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
-
-        let mut raw_data = Vec::with_capacity(64 * 1024);
-        self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
-        let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
-        // no expected digest available
-        let data = blob.decode(None, None)?;
-
-        let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
-
-        Ok((manifest, data))
-    }
-
-    /// Download a .blob file
-    ///
-    /// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
-    /// the provided manifest.
-    pub async fn download_blob(
-        &self,
-        manifest: &BackupManifest,
-        name: &str,
-    ) -> Result<DataBlobReader<'_, File>, Error> {
-
-        let mut tmpfile = std::fs::OpenOptions::new()
-            .write(true)
-            .read(true)
-            .custom_flags(libc::O_TMPFILE)
-            .open("/tmp")?;
-
-        self.download(name, &mut tmpfile).await?;
-
-        tmpfile.seek(SeekFrom::Start(0))?;
-        let (csum, size) = sha256(&mut tmpfile)?;
-        manifest.verify_file(name, &csum, size)?;
-
-        tmpfile.seek(SeekFrom::Start(0))?;
-
-        DataBlobReader::new(tmpfile, self.crypt_config.clone())
-    }
-
-    /// Download dynamic index file
-    ///
-    /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
-    /// the provided manifest.
-    pub async fn download_dynamic_index(
-        &self,
-        manifest: &BackupManifest,
-        name: &str,
-    ) -> Result<DynamicIndexReader, Error> {
-
-        let mut tmpfile = std::fs::OpenOptions::new()
-            .write(true)
-            .read(true)
-            .custom_flags(libc::O_TMPFILE)
-            .open("/tmp")?;
-
-        self.download(name, &mut tmpfile).await?;
-
-        let index = DynamicIndexReader::new(tmpfile)
-            .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
-
-        // Note: do not use values stored in index (not trusted) - instead, computed them again
-        let (csum, size) = index.compute_csum();
-        manifest.verify_file(name, &csum, size)?;
-
-        Ok(index)
-    }
-
-    /// Download fixed index file
-    ///
-    /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
-    /// the provided manifest.
-    pub async fn download_fixed_index(
-        &self,
-        manifest: &BackupManifest,
-        name: &str,
-    ) -> Result<FixedIndexReader, Error> {
-
-        let mut tmpfile = std::fs::OpenOptions::new()
-            .write(true)
-            .read(true)
-            .custom_flags(libc::O_TMPFILE)
-            .open("/tmp")?;
-
-        self.download(name, &mut tmpfile).await?;
-
-        let index = FixedIndexReader::new(tmpfile)
-            .map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
-
-        // Note: do not use values stored in index (not trusted) - instead, computed them again
-        let (csum, size) = index.compute_csum();
-        manifest.verify_file(name, &csum, size)?;
-
-        Ok(index)
-    }
-}
diff --git a/src/client/backup_repo.rs b/src/client/backup_repo.rs
deleted file mode 100644 (file)
index dc9b8ec..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-use std::convert::TryFrom;
-use std::fmt;
-
-use anyhow::{format_err, Error};
-
-use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
-
-/// Reference remote backup locations
-///
-
-#[derive(Debug)]
-pub struct BackupRepository {
-    /// The user name used for Authentication
-    auth_id: Option<Authid>,
-    /// The host name or IP address
-    host: Option<String>,
-    /// The port
-    port: Option<u16>,
-    /// The name of the datastore
-    store: String,
-}
-
-impl BackupRepository {
-
-    pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
-        let host = match host {
-            Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
-                Some(format!("[{}]", host))
-            },
-            other => other,
-        };
-        Self { auth_id, host, port, store }
-    }
-
-    pub fn auth_id(&self) -> &Authid {
-        if let Some(ref auth_id) = self.auth_id {
-            return auth_id;
-        }
-
-        &Authid::root_auth_id()
-    }
-
-    pub fn user(&self) -> &Userid {
-        if let Some(auth_id) = &self.auth_id {
-            return auth_id.user();
-        }
-
-        Userid::root_userid()
-    }
-
-    pub fn host(&self) -> &str {
-        if let Some(ref host) = self.host {
-            return host;
-        }
-        "localhost"
-    }
-
-    pub fn port(&self) -> u16 {
-        if let Some(port) = self.port {
-            return port;
-        }
-        8007
-    }
-
-    pub fn store(&self) -> &str {
-        &self.store
-    }
-}
-
-impl fmt::Display for BackupRepository {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match (&self.auth_id, &self.host, self.port) {
-            (Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
-            (None, Some(host), None) => write!(f, "{}:{}", host, self.store),
-            (None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
-            (None, None, None) => write!(f, "{}", self.store),
-        }
-    }
-}
-
-impl std::str::FromStr for BackupRepository {
-    type Err = Error;
-
-    /// Parse a repository URL.
-    ///
-    /// This parses strings like `user@host:datastore`. The `user` and
-    /// `host` parts are optional, where `host` defaults to the local
-    /// host, and `user` defaults to `root@pam`.
-    fn from_str(url: &str) -> Result<Self, Self::Err> {
-
-        let cap = (BACKUP_REPO_URL_REGEX.regex_obj)().captures(url)
-            .ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
-
-        Ok(Self {
-            auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
-            host: cap.get(2).map(|m| m.as_str().to_owned()),
-            port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
-            store: cap[4].to_owned(),
-        })
-    }
-}
diff --git a/src/client/backup_specification.rs b/src/client/backup_specification.rs
deleted file mode 100644 (file)
index 627c183..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-use anyhow::{bail, Error};
-
-use proxmox::api::schema::*;
-
-proxmox::const_regex! {
-    BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
-}
-
-pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
-    "Backup source specification ([<label>:<path>]).")
-    .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
-    .schema();
-
-pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
-
-pub struct BackupSpecification {
-    pub archive_name: String, // left part
-    pub config_string: String, // right part
-    pub spec_type: BackupSpecificationType,
-}
-
-pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
-
-    if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
-        let archive_name = caps.get(1).unwrap().as_str().into();
-        let extension = caps.get(2).unwrap().as_str();
-        let config_string =  caps.get(3).unwrap().as_str().into();
-        let spec_type = match extension {
-            "pxar" => BackupSpecificationType::PXAR,
-            "img" => BackupSpecificationType::IMAGE,
-            "conf" => BackupSpecificationType::CONFIG,
-            "log" => BackupSpecificationType::LOGFILE,
-            _ => bail!("unknown backup source type '{}'", extension),
-        };
-        return Ok(BackupSpecification { archive_name, config_string, spec_type });
-    }
-
-    bail!("unable to parse backup source specification '{}'", value);
-}
diff --git a/src/client/backup_writer.rs b/src/client/backup_writer.rs
deleted file mode 100644 (file)
index 8b3ddef..0000000
+++ /dev/null
@@ -1,842 +0,0 @@
-use std::collections::HashSet;
-use std::future::Future;
-use std::os::unix::fs::OpenOptionsExt;
-use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
-use std::sync::{Arc, Mutex};
-
-use anyhow::{bail, format_err, Error};
-use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
-use futures::stream::{Stream, StreamExt, TryStreamExt};
-use serde_json::{json, Value};
-use tokio::io::AsyncReadExt;
-use tokio::sync::{mpsc, oneshot};
-use tokio_stream::wrappers::ReceiverStream;
-
-use proxmox::tools::digest_to_hex;
-
-use pbs_datastore::{CATALOG_NAME, CryptConfig};
-use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
-use pbs_datastore::dynamic_index::DynamicIndexReader;
-use pbs_datastore::fixed_index::FixedIndexReader;
-use pbs_datastore::index::IndexFile;
-use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
-use pbs_tools::format::HumanByte;
-
-use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
-
-use super::{H2Client, HttpClient};
-
-pub struct BackupWriter {
-    h2: H2Client,
-    abort: AbortHandle,
-    verbose: bool,
-    crypt_config: Option<Arc<CryptConfig>>,
-}
-
-impl Drop for BackupWriter {
-    fn drop(&mut self) {
-        self.abort.abort();
-    }
-}
-
-pub struct BackupStats {
-    pub size: u64,
-    pub csum: [u8; 32],
-}
-
-/// Options for uploading blobs/streams to the server
-#[derive(Default, Clone)]
-pub struct UploadOptions {
-    pub previous_manifest: Option<Arc<BackupManifest>>,
-    pub compress: bool,
-    pub encrypt: bool,
-    pub fixed_size: Option<u64>,
-}
-
-struct UploadStats {
-    chunk_count: usize,
-    chunk_reused: usize,
-    size: usize,
-    size_reused: usize,
-    size_compressed: usize,
-    duration: std::time::Duration,
-    csum: [u8; 32],
-}
-
-type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
-type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
-
-impl BackupWriter {
-    fn new(
-        h2: H2Client,
-        abort: AbortHandle,
-        crypt_config: Option<Arc<CryptConfig>>,
-        verbose: bool,
-    ) -> Arc<Self> {
-        Arc::new(Self {
-            h2,
-            abort,
-            crypt_config,
-            verbose,
-        })
-    }
-
-    // FIXME: extract into (flattened) parameter struct?
-    #[allow(clippy::too_many_arguments)]
-    pub async fn start(
-        client: HttpClient,
-        crypt_config: Option<Arc<CryptConfig>>,
-        datastore: &str,
-        backup_type: &str,
-        backup_id: &str,
-        backup_time: i64,
-        debug: bool,
-        benchmark: bool,
-    ) -> Result<Arc<BackupWriter>, Error> {
-        let param = json!({
-            "backup-type": backup_type,
-            "backup-id": backup_id,
-            "backup-time": backup_time,
-            "store": datastore,
-            "debug": debug,
-            "benchmark": benchmark
-        });
-
-        let req = HttpClient::request_builder(
-            client.server(),
-            client.port(),
-            "GET",
-            "/api2/json/backup",
-            Some(param),
-        )
-        .unwrap();
-
-        let (h2, abort) = client
-            .start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
-            .await?;
-
-        Ok(BackupWriter::new(h2, abort, crypt_config, debug))
-    }
-
-    pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
-        self.h2.get(path, param).await
-    }
-
-    pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
-        self.h2.put(path, param).await
-    }
-
-    pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
-        self.h2.post(path, param).await
-    }
-
-    pub async fn upload_post(
-        &self,
-        path: &str,
-        param: Option<Value>,
-        content_type: &str,
-        data: Vec<u8>,
-    ) -> Result<Value, Error> {
-        self.h2
-            .upload("POST", path, param, content_type, data)
-            .await
-    }
-
-    pub async fn send_upload_request(
-        &self,
-        method: &str,
-        path: &str,
-        param: Option<Value>,
-        content_type: &str,
-        data: Vec<u8>,
-    ) -> Result<h2::client::ResponseFuture, Error> {
-        let request =
-            H2Client::request_builder("localhost", method, path, param, Some(content_type))
-                .unwrap();
-        let response_future = self
-            .h2
-            .send_request(request, Some(bytes::Bytes::from(data.clone())))
-            .await?;
-        Ok(response_future)
-    }
-
-    pub async fn upload_put(
-        &self,
-        path: &str,
-        param: Option<Value>,
-        content_type: &str,
-        data: Vec<u8>,
-    ) -> Result<Value, Error> {
-        self.h2.upload("PUT", path, param, content_type, data).await
-    }
-
-    pub async fn finish(self: Arc<Self>) -> Result<(), Error> {
-        let h2 = self.h2.clone();
-
-        h2.post("finish", None)
-            .map_ok(move |_| {
-                self.abort.abort();
-            })
-            .await
-    }
-
-    pub fn cancel(&self) {
-        self.abort.abort();
-    }
-
-    pub async fn upload_blob<R: std::io::Read>(
-        &self,
-        mut reader: R,
-        file_name: &str,
-    ) -> Result<BackupStats, Error> {
-        let mut raw_data = Vec::new();
-        // fixme: avoid loading into memory
-        reader.read_to_end(&mut raw_data)?;
-
-        let csum = openssl::sha::sha256(&raw_data);
-        let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
-        let size = raw_data.len() as u64;
-        let _value = self
-            .h2
-            .upload(
-                "POST",
-                "blob",
-                Some(param),
-                "application/octet-stream",
-                raw_data,
-            )
-            .await?;
-        Ok(BackupStats { size, csum })
-    }
-
-    pub async fn upload_blob_from_data(
-        &self,
-        data: Vec<u8>,
-        file_name: &str,
-        options: UploadOptions,
-    ) -> Result<BackupStats, Error> {
-        let blob = match (options.encrypt, &self.crypt_config) {
-            (false, _) => DataBlob::encode(&data, None, options.compress)?,
-            (true, None) => bail!("requested encryption without a crypt config"),
-            (true, Some(crypt_config)) => {
-                DataBlob::encode(&data, Some(crypt_config), options.compress)?
-            }
-        };
-
-        let raw_data = blob.into_inner();
-        let size = raw_data.len() as u64;
-
-        let csum = openssl::sha::sha256(&raw_data);
-        let param = json!({"encoded-size": size, "file-name": file_name });
-        let _value = self
-            .h2
-            .upload(
-                "POST",
-                "blob",
-                Some(param),
-                "application/octet-stream",
-                raw_data,
-            )
-            .await?;
-        Ok(BackupStats { size, csum })
-    }
-
-    pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
-        &self,
-        src_path: P,
-        file_name: &str,
-        options: UploadOptions,
-    ) -> Result<BackupStats, Error> {
-        let src_path = src_path.as_ref();
-
-        let mut file = tokio::fs::File::open(src_path)
-            .await
-            .map_err(|err| format_err!("unable to open file {:?} - {}", src_path, err))?;
-
-        let mut contents = Vec::new();
-
-        file.read_to_end(&mut contents)
-            .await
-            .map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
-
-        self.upload_blob_from_data(contents, file_name, options)
-            .await
-    }
-
-    pub async fn upload_stream(
-        &self,
-        archive_name: &str,
-        stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
-        options: UploadOptions,
-    ) -> Result<BackupStats, Error> {
-        let known_chunks = Arc::new(Mutex::new(HashSet::new()));
-
-        let mut param = json!({ "archive-name": archive_name });
-        let prefix = if let Some(size) = options.fixed_size {
-            param["size"] = size.into();
-            "fixed"
-        } else {
-            "dynamic"
-        };
-
-        if options.encrypt && self.crypt_config.is_none() {
-            bail!("requested encryption without a crypt config");
-        }
-
-        let index_path = format!("{}_index", prefix);
-        let close_path = format!("{}_close", prefix);
-
-        if let Some(manifest) = options.previous_manifest {
-            // try, but ignore errors
-            match ArchiveType::from_path(archive_name) {
-                Ok(ArchiveType::FixedIndex) => {
-                    let _ = self
-                        .download_previous_fixed_index(
-                            archive_name,
-                            &manifest,
-                            known_chunks.clone(),
-                        )
-                        .await;
-                }
-                Ok(ArchiveType::DynamicIndex) => {
-                    let _ = self
-                        .download_previous_dynamic_index(
-                            archive_name,
-                            &manifest,
-                            known_chunks.clone(),
-                        )
-                        .await;
-                }
-                _ => { /* do nothing */ }
-            }
-        }
-
-        let wid = self
-            .h2
-            .post(&index_path, Some(param))
-            .await?
-            .as_u64()
-            .unwrap();
-
-        let upload_stats = Self::upload_chunk_info_stream(
-            self.h2.clone(),
-            wid,
-            stream,
-            &prefix,
-            known_chunks.clone(),
-            if options.encrypt {
-                self.crypt_config.clone()
-            } else {
-                None
-            },
-            options.compress,
-            self.verbose,
-        )
-        .await?;
-
-        let size_dirty = upload_stats.size - upload_stats.size_reused;
-        let size: HumanByte = upload_stats.size.into();
-        let archive = if self.verbose {
-            archive_name.to_string()
-        } else {
-            pbs_tools::format::strip_server_file_extension(archive_name)
-        };
-        if archive_name != CATALOG_NAME {
-            let speed: HumanByte =
-                ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
-            let size_dirty: HumanByte = size_dirty.into();
-            let size_compressed: HumanByte = upload_stats.size_compressed.into();
-            println!(
-                "{}: had to backup {} of {} (compressed {}) in {:.2}s",
-                archive,
-                size_dirty,
-                size,
-                size_compressed,
-                upload_stats.duration.as_secs_f64()
-            );
-            println!("{}: average backup speed: {}/s", archive, speed);
-        } else {
-            println!("Uploaded backup catalog ({})", size);
-        }
-
-        if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
-            let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
-            let reused: HumanByte = upload_stats.size_reused.into();
-            println!(
-                "{}: backup was done incrementally, reused {} ({:.1}%)",
-                archive, reused, reused_percent
-            );
-        }
-        if self.verbose && upload_stats.chunk_count > 0 {
-            println!(
-                "{}: Reused {} from {} chunks.",
-                archive, upload_stats.chunk_reused, upload_stats.chunk_count
-            );
-            println!(
-                "{}: Average chunk size was {}.",
-                archive,
-                HumanByte::from(upload_stats.size / upload_stats.chunk_count)
-            );
-            println!(
-                "{}: Average time per request: {} microseconds.",
-                archive,
-                (upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
-            );
-        }
-
-        let param = json!({
-            "wid": wid ,
-            "chunk-count": upload_stats.chunk_count,
-            "size": upload_stats.size,
-            "csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
-        });
-        let _value = self.h2.post(&close_path, Some(param)).await?;
-        Ok(BackupStats {
-            size: upload_stats.size as u64,
-            csum: upload_stats.csum,
-        })
-    }
-
-    fn response_queue(
-        verbose: bool,
-    ) -> (
-        mpsc::Sender<h2::client::ResponseFuture>,
-        oneshot::Receiver<Result<(), Error>>,
-    ) {
-        let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
-        let (verify_result_tx, verify_result_rx) = oneshot::channel();
-
-        // FIXME: check if this works as expected as replacement for the combinator below?
-        // tokio::spawn(async move {
-        //     let result: Result<(), Error> = (async move {
-        //         while let Some(response) = verify_queue_rx.recv().await {
-        //             match H2Client::h2api_response(response.await?).await {
-        //                 Ok(result) => println!("RESPONSE: {:?}", result),
-        //                 Err(err) => bail!("pipelined request failed: {}", err),
-        //             }
-        //         }
-        //         Ok(())
-        //     }).await;
-        //     let _ignore_closed_channel = verify_result_tx.send(result);
-        // });
-        // old code for reference?
-        tokio::spawn(
-            ReceiverStream::new(verify_queue_rx)
-                .map(Ok::<_, Error>)
-                .try_for_each(move |response: h2::client::ResponseFuture| {
-                    response
-                        .map_err(Error::from)
-                        .and_then(H2Client::h2api_response)
-                        .map_ok(move |result| {
-                            if verbose {
-                                println!("RESPONSE: {:?}", result)
-                            }
-                        })
-                        .map_err(|err| format_err!("pipelined request failed: {}", err))
-                })
-                .map(|result| {
-                    let _ignore_closed_channel = verify_result_tx.send(result);
-                }),
-        );
-
-        (verify_queue_tx, verify_result_rx)
-    }
-
-    fn append_chunk_queue(
-        h2: H2Client,
-        wid: u64,
-        path: String,
-        verbose: bool,
-    ) -> (UploadQueueSender, UploadResultReceiver) {
-        let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
-        let (verify_result_tx, verify_result_rx) = oneshot::channel();
-
-        // FIXME: async-block-ify this code!
-        tokio::spawn(
-            ReceiverStream::new(verify_queue_rx)
-                .map(Ok::<_, Error>)
-                .and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
-                    match (response, merged_chunk_info) {
-                        (Some(response), MergedChunkInfo::Known(list)) => {
-                            Either::Left(
-                                response
-                                    .map_err(Error::from)
-                                    .and_then(H2Client::h2api_response)
-                                    .and_then(move |_result| {
-                                        future::ok(MergedChunkInfo::Known(list))
-                                    })
-                            )
-                        }
-                        (None, MergedChunkInfo::Known(list)) => {
-                            Either::Right(future::ok(MergedChunkInfo::Known(list)))
-                        }
-                        _ => unreachable!(),
-                    }
-                })
-                .merge_known_chunks()
-                .and_then(move |merged_chunk_info| {
-                    match merged_chunk_info {
-                        MergedChunkInfo::Known(chunk_list) => {
-                            let mut digest_list = vec![];
-                            let mut offset_list = vec![];
-                            for (offset, digest) in chunk_list {
-                                digest_list.push(digest_to_hex(&digest));
-                                offset_list.push(offset);
-                            }
-                            if verbose { println!("append chunks list len ({})", digest_list.len()); }
-                            let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
-                            let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
-                            let param_data = bytes::Bytes::from(param.to_string().into_bytes());
-                            let upload_data = Some(param_data);
-                            h2.send_request(request, upload_data)
-                                .and_then(move |response| {
-                                    response
-                                        .map_err(Error::from)
-                                        .and_then(H2Client::h2api_response)
-                                        .map_ok(|_| ())
-                                })
-                                .map_err(|err| format_err!("pipelined request failed: {}", err))
-                        }
-                        _ => unreachable!(),
-                    }
-                })
-                .try_for_each(|_| future::ok(()))
-                .map(|result| {
-                      let _ignore_closed_channel = verify_result_tx.send(result);
-                })
-        );
-
-        (verify_queue_tx, verify_result_rx)
-    }
-
-    pub async fn download_previous_fixed_index(
-        &self,
-        archive_name: &str,
-        manifest: &BackupManifest,
-        known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
-    ) -> Result<FixedIndexReader, Error> {
-        let mut tmpfile = std::fs::OpenOptions::new()
-            .write(true)
-            .read(true)
-            .custom_flags(libc::O_TMPFILE)
-            .open("/tmp")?;
-
-        let param = json!({ "archive-name": archive_name });
-        self.h2
-            .download("previous", Some(param), &mut tmpfile)
-            .await?;
-
-        let index = FixedIndexReader::new(tmpfile).map_err(|err| {
-            format_err!("unable to read fixed index '{}' - {}", archive_name, err)
-        })?;
-        // Note: do not use values stored in index (not trusted) - instead, computed them again
-        let (csum, size) = index.compute_csum();
-        manifest.verify_file(archive_name, &csum, size)?;
-
-        // add index chunks to known chunks
-        let mut known_chunks = known_chunks.lock().unwrap();
-        for i in 0..index.index_count() {
-            known_chunks.insert(*index.index_digest(i).unwrap());
-        }
-
-        if self.verbose {
-            println!(
-                "{}: known chunks list length is {}",
-                archive_name,
-                index.index_count()
-            );
-        }
-
-        Ok(index)
-    }
-
-    pub async fn download_previous_dynamic_index(
-        &self,
-        archive_name: &str,
-        manifest: &BackupManifest,
-        known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
-    ) -> Result<DynamicIndexReader, Error> {
-        let mut tmpfile = std::fs::OpenOptions::new()
-            .write(true)
-            .read(true)
-            .custom_flags(libc::O_TMPFILE)
-            .open("/tmp")?;
-
-        let param = json!({ "archive-name": archive_name });
-        self.h2
-            .download("previous", Some(param), &mut tmpfile)
-            .await?;
-
-        let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
-            format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
-        })?;
-        // Note: do not use values stored in index (not trusted) - instead, computed them again
-        let (csum, size) = index.compute_csum();
-        manifest.verify_file(archive_name, &csum, size)?;
-
-        // add index chunks to known chunks
-        let mut known_chunks = known_chunks.lock().unwrap();
-        for i in 0..index.index_count() {
-            known_chunks.insert(*index.index_digest(i).unwrap());
-        }
-
-        if self.verbose {
-            println!(
-                "{}: known chunks list length is {}",
-                archive_name,
-                index.index_count()
-            );
-        }
-
-        Ok(index)
-    }
-
-    /// Retrieve backup time of last backup
-    pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
-        let data = self.h2.get("previous_backup_time", None).await?;
-        serde_json::from_value(data).map_err(|err| {
-            format_err!(
-                "Failed to parse backup time value returned by server - {}",
-                err
-            )
-        })
-    }
-
-    /// Download backup manifest (index.json) of last backup
-    pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
-        let mut raw_data = Vec::with_capacity(64 * 1024);
-
-        let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
-        self.h2
-            .download("previous", Some(param), &mut raw_data)
-            .await?;
-
-        let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
-        // no expected digest available
-        let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
-
-        let manifest =
-            BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
-
-        Ok(manifest)
-    }
-
-    // We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
-    // function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
-    // since this is a private method.
-    #[allow(clippy::too_many_arguments)]
-    fn upload_chunk_info_stream(
-        h2: H2Client,
-        wid: u64,
-        stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
-        prefix: &str,
-        known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
-        crypt_config: Option<Arc<CryptConfig>>,
-        compress: bool,
-        verbose: bool,
-    ) -> impl Future<Output = Result<UploadStats, Error>> {
-        let total_chunks = Arc::new(AtomicUsize::new(0));
-        let total_chunks2 = total_chunks.clone();
-        let known_chunk_count = Arc::new(AtomicUsize::new(0));
-        let known_chunk_count2 = known_chunk_count.clone();
-
-        let stream_len = Arc::new(AtomicUsize::new(0));
-        let stream_len2 = stream_len.clone();
-        let compressed_stream_len = Arc::new(AtomicU64::new(0));
-        let compressed_stream_len2 = compressed_stream_len.clone();
-        let reused_len = Arc::new(AtomicUsize::new(0));
-        let reused_len2 = reused_len.clone();
-
-        let append_chunk_path = format!("{}_index", prefix);
-        let upload_chunk_path = format!("{}_chunk", prefix);
-        let is_fixed_chunk_size = prefix == "fixed";
-
-        let (upload_queue, upload_result) =
-            Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, verbose);
-
-        let start_time = std::time::Instant::now();
-
-        let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
-        let index_csum_2 = index_csum.clone();
-
-        stream
-            .and_then(move |data| {
-                let chunk_len = data.len();
-
-                total_chunks.fetch_add(1, Ordering::SeqCst);
-                let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
-
-                let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
-
-                if let Some(ref crypt_config) = crypt_config {
-                    chunk_builder = chunk_builder.crypt_config(crypt_config);
-                }
-
-                let mut known_chunks = known_chunks.lock().unwrap();
-                let digest = chunk_builder.digest();
-
-                let mut guard = index_csum.lock().unwrap();
-                let csum = guard.as_mut().unwrap();
-
-                let chunk_end = offset + chunk_len as u64;
-
-                if !is_fixed_chunk_size {
-                    csum.update(&chunk_end.to_le_bytes());
-                }
-                csum.update(digest);
-
-                let chunk_is_known = known_chunks.contains(digest);
-                if chunk_is_known {
-                    known_chunk_count.fetch_add(1, Ordering::SeqCst);
-                    reused_len.fetch_add(chunk_len, Ordering::SeqCst);
-                    future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
-                } else {
-                    let compressed_stream_len2 = compressed_stream_len.clone();
-                    known_chunks.insert(*digest);
-                    future::ready(chunk_builder.build().map(move |(chunk, digest)| {
-                        compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
-                        MergedChunkInfo::New(ChunkInfo {
-                            chunk,
-                            digest,
-                            chunk_len: chunk_len as u64,
-                            offset,
-                        })
-                    }))
-                }
-            })
-            .merge_known_chunks()
-            .try_for_each(move |merged_chunk_info| {
-                let upload_queue = upload_queue.clone();
-
-                if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
-                    let offset = chunk_info.offset;
-                    let digest = chunk_info.digest;
-                    let digest_str = digest_to_hex(&digest);
-
-                    /* too verbose, needs finer verbosity setting granularity
-                    if verbose {
-                        println!("upload new chunk {} ({} bytes, offset {})", digest_str,
-                                 chunk_info.chunk_len, offset);
-                    }
-                    */
-
-                    let chunk_data = chunk_info.chunk.into_inner();
-                    let param = json!({
-                        "wid": wid,
-                        "digest": digest_str,
-                        "size": chunk_info.chunk_len,
-                        "encoded-size": chunk_data.len(),
-                    });
-
-                    let ct = "application/octet-stream";
-                    let request = H2Client::request_builder(
-                        "localhost",
-                        "POST",
-                        &upload_chunk_path,
-                        Some(param),
-                        Some(ct),
-                    )
-                    .unwrap();
-                    let upload_data = Some(bytes::Bytes::from(chunk_data));
-
-                    let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
-
-                    Either::Left(h2.send_request(request, upload_data).and_then(
-                        move |response| async move {
-                            upload_queue
-                                .send((new_info, Some(response)))
-                                .await
-                                .map_err(|err| {
-                                    format_err!("failed to send to upload queue: {}", err)
-                                })
-                        },
-                    ))
-                } else {
-                    Either::Right(async move {
-                        upload_queue
-                            .send((merged_chunk_info, None))
-                            .await
-                            .map_err(|err| format_err!("failed to send to upload queue: {}", err))
-                    })
-                }
-            })
-            .then(move |result| async move { upload_result.await?.and(result) }.boxed())
-            .and_then(move |_| {
-                let duration = start_time.elapsed();
-                let chunk_count = total_chunks2.load(Ordering::SeqCst);
-                let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
-                let size = stream_len2.load(Ordering::SeqCst);
-                let size_reused = reused_len2.load(Ordering::SeqCst);
-                let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
-
-                let mut guard = index_csum_2.lock().unwrap();
-                let csum = guard.take().unwrap().finish();
-
-                futures::future::ok(UploadStats {
-                    chunk_count,
-                    chunk_reused,
-                    size,
-                    size_reused,
-                    size_compressed,
-                    duration,
-                    csum,
-                })
-            })
-    }
-
-    /// Upload speed test - prints result to stderr
-    pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
-        let mut data = vec![];
-        // generate pseudo random byte sequence
-        for i in 0..1024 * 1024 {
-            for j in 0..4 {
-                let byte = ((i >> (j << 3)) & 0xff) as u8;
-                data.push(byte);
-            }
-        }
-
-        let item_len = data.len();
-
-        let mut repeat = 0;
-
-        let (upload_queue, upload_result) = Self::response_queue(verbose);
-
-        let start_time = std::time::Instant::now();
-
-        loop {
-            repeat += 1;
-            if start_time.elapsed().as_secs() >= 5 {
-                break;
-            }
-
-            if verbose {
-                eprintln!("send test data ({} bytes)", data.len());
-            }
-            let request =
-                H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
-            let request_future = self
-                .h2
-                .send_request(request, Some(bytes::Bytes::from(data.clone())))
-                .await?;
-
-            upload_queue.send(request_future).await?;
-        }
-
-        drop(upload_queue); // close queue
-
-        let _ = upload_result.await?;
-
-        eprintln!(
-            "Uploaded {} chunks in {} seconds.",
-            repeat,
-            start_time.elapsed().as_secs()
-        );
-        let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
-        eprintln!(
-            "Time per request: {} microseconds.",
-            (start_time.elapsed().as_micros()) / (repeat as u128)
-        );
-
-        Ok(speed)
-    }
-}
diff --git a/src/client/http_client.rs b/src/client/http_client.rs
deleted file mode 100644 (file)
index a83b8d3..0000000
+++ /dev/null
@@ -1,1013 +0,0 @@
-use std::io::Write;
-use std::sync::{Arc, Mutex, RwLock};
-use std::time::Duration;
-
-use anyhow::{bail, format_err, Error};
-use futures::*;
-use http::Uri;
-use http::header::HeaderValue;
-use http::{Request, Response};
-use hyper::Body;
-use hyper::client::{Client, HttpConnector};
-use openssl::{ssl::{SslConnector, SslMethod}, x509::X509StoreContextRef};
-use serde_json::{json, Value};
-use percent_encoding::percent_encode;
-use xdg::BaseDirectories;
-
-use proxmox::{
-    api::error::HttpError,
-    sys::linux::tty,
-    tools::fs::{file_get_json, replace_file, CreateOptions},
-};
-
-use proxmox_http::client::HttpsConnector;
-use proxmox_http::uri::build_authority;
-
-use pbs_api_types::{Authid, Userid};
-use pbs_tools::broadcast_future::BroadcastFuture;
-use pbs_tools::json::json_object_to_query;
-use pbs_tools::ticket;
-use pbs_tools::percent_encoding::DEFAULT_ENCODE_SET;
-
-use super::pipe_to_stream::PipeToSendStream;
-use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
-
-/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
-/// certain error conditions. Keep it generous, to avoid false-positive under high load.
-const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
-
-#[derive(Clone)]
-pub struct AuthInfo {
-    pub auth_id: Authid,
-    pub ticket: String,
-    pub token: String,
-}
-
-pub struct HttpClientOptions {
-    prefix: Option<String>,
-    password: Option<String>,
-    fingerprint: Option<String>,
-    interactive: bool,
-    ticket_cache: bool,
-    fingerprint_cache: bool,
-    verify_cert: bool,
-}
-
-impl HttpClientOptions {
-
-    pub fn new_interactive(password: Option<String>, fingerprint: Option<String>) -> Self {
-        Self {
-            password,
-            fingerprint,
-            fingerprint_cache: true,
-            ticket_cache: true,
-            interactive: true,
-            prefix: Some("proxmox-backup".to_string()),
-            ..Self::default()
-        }
-    }
-
-    pub fn new_non_interactive(password: String, fingerprint: Option<String>) -> Self {
-        Self {
-            password: Some(password),
-            fingerprint,
-            ..Self::default()
-        }
-    }
-
-    pub fn prefix(mut self, prefix: Option<String>) -> Self {
-        self.prefix = prefix;
-        self
-    }
-
-    pub fn password(mut self, password: Option<String>) -> Self {
-        self.password = password;
-        self
-    }
-
-    pub fn fingerprint(mut self, fingerprint: Option<String>) -> Self {
-        self.fingerprint = fingerprint;
-        self
-    }
-
-    pub fn interactive(mut self, interactive: bool) -> Self {
-        self.interactive = interactive;
-        self
-    }
-
-    pub fn ticket_cache(mut self, ticket_cache: bool) -> Self {
-        self.ticket_cache = ticket_cache;
-        self
-    }
-
-    pub fn fingerprint_cache(mut self, fingerprint_cache: bool) -> Self {
-        self.fingerprint_cache = fingerprint_cache;
-        self
-    }
-
-    pub fn verify_cert(mut self, verify_cert: bool) -> Self {
-        self.verify_cert = verify_cert;
-        self
-    }
-}
-
-impl Default for HttpClientOptions {
-    fn default() -> Self {
-        Self {
-            prefix: None,
-            password: None,
-            fingerprint: None,
-            interactive: false,
-            ticket_cache: false,
-            fingerprint_cache: false,
-            verify_cert: true,
-        }
-    }
-}
-
-/// HTTP(S) API client
-pub struct HttpClient {
-    client: Client<HttpsConnector>,
-    server: String,
-    port: u16,
-    fingerprint: Arc<Mutex<Option<String>>>,
-    first_auth: Option<BroadcastFuture<()>>,
-    auth: Arc<RwLock<AuthInfo>>,
-    ticket_abort: futures::future::AbortHandle,
-    _options: HttpClientOptions,
-}
-
-/// Delete stored ticket data (logout)
-pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
-
-    let base = BaseDirectories::with_prefix(prefix)?;
-
-    // usually /run/user/<uid>/...
-    let path = base.place_runtime_file("tickets")?;
-
-    let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
-
-    let mut data = file_get_json(&path, Some(json!({})))?;
-
-    if let Some(map) = data[server].as_object_mut() {
-        map.remove(username.as_str());
-    }
-
-    replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
-
-    Ok(())
-}
-
-fn store_fingerprint(prefix: &str, server: &str, fingerprint: &str) -> Result<(), Error> {
-
-    let base = BaseDirectories::with_prefix(prefix)?;
-
-    // usually ~/.config/<prefix>/fingerprints
-    let path = base.place_config_file("fingerprints")?;
-
-    let raw = match std::fs::read_to_string(&path) {
-        Ok(v) => v,
-        Err(err) => {
-            if err.kind() == std::io::ErrorKind::NotFound {
-                String::new()
-            } else {
-                bail!("unable to read fingerprints from {:?} - {}", path, err);
-            }
-        }
-    };
-
-    let mut result = String::new();
-
-    raw.split('\n').for_each(|line| {
-        let items: Vec<String> = line.split_whitespace().map(String::from).collect();
-        if items.len() == 2 {
-            if items[0] == server {
-                // found, add later with new fingerprint
-            } else {
-                result.push_str(line);
-                result.push('\n');
-            }
-        }
-    });
-
-    result.push_str(server);
-    result.push(' ');
-    result.push_str(fingerprint);
-    result.push('\n');
-
-    replace_file(path, result.as_bytes(), CreateOptions::new())?;
-
-    Ok(())
-}
-
-fn load_fingerprint(prefix: &str, server: &str) -> Option<String> {
-
-    let base = BaseDirectories::with_prefix(prefix).ok()?;
-
-    // usually ~/.config/<prefix>/fingerprints
-    let path = base.place_config_file("fingerprints").ok()?;
-
-    let raw = std::fs::read_to_string(&path).ok()?;
-
-    for line in raw.split('\n') {
-        let items: Vec<String> = line.split_whitespace().map(String::from).collect();
-        if items.len() == 2 && items[0] == server {
-            return Some(items[1].clone());
-        }
-    }
-
-    None
-}
-
-fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, token: &str) -> Result<(), Error> {
-
-    let base = BaseDirectories::with_prefix(prefix)?;
-
-    // usually /run/user/<uid>/...
-    let path = base.place_runtime_file("tickets")?;
-
-    let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
-
-    let mut data = file_get_json(&path, Some(json!({})))?;
-
-    let now = proxmox::tools::time::epoch_i64();
-
-    data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
-
-    let mut new_data = json!({});
-
-    let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
-
-    let empty = serde_json::map::Map::new();
-    for (server, info) in data.as_object().unwrap_or(&empty) {
-        for (user, uinfo) in info.as_object().unwrap_or(&empty) {
-            if let Some(timestamp) = uinfo["timestamp"].as_i64() {
-                let age = now - timestamp;
-                if age < ticket_lifetime {
-                    new_data[server][user] = uinfo.clone();
-                }
-            }
-        }
-    }
-
-    replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
-
-    Ok(())
-}
-
-fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(String, String)> {
-    let base = BaseDirectories::with_prefix(prefix).ok()?;
-
-    // usually /run/user/<uid>/...
-    let path = base.place_runtime_file("tickets").ok()?;
-    let data = file_get_json(&path, None).ok()?;
-    let now = proxmox::tools::time::epoch_i64();
-    let ticket_lifetime = ticket::TICKET_LIFETIME - 60;
-    let uinfo = data[server][userid.as_str()].as_object()?;
-    let timestamp = uinfo["timestamp"].as_i64()?;
-    let age = now - timestamp;
-
-    if age < ticket_lifetime {
-        let ticket = uinfo["ticket"].as_str()?;
-        let token = uinfo["token"].as_str()?;
-        Some((ticket.to_owned(), token.to_owned()))
-    } else {
-        None
-    }
-}
-
-fn build_uri(server: &str, port: u16, path: &str, query: Option<String>) -> Result<Uri, Error> {
-    Uri::builder()
-        .scheme("https")
-        .authority(build_authority(server, port)?)
-        .path_and_query(match query {
-            Some(query) => format!("/{}?{}", path, query),
-            None => format!("/{}", path),
-        })
-        .build()
-        .map_err(|err| format_err!("error building uri - {}", err))
-}
-
-impl HttpClient {
-    pub fn new(
-        server: &str,
-        port: u16,
-        auth_id: &Authid,
-        mut options: HttpClientOptions,
-    ) -> Result<Self, Error> {
-
-        let verified_fingerprint = Arc::new(Mutex::new(None));
-
-        let mut expected_fingerprint = options.fingerprint.take();
-
-        if expected_fingerprint.is_some() {
-            // do not store fingerprints passed via options in cache
-            options.fingerprint_cache = false;
-        } else if options.fingerprint_cache && options.prefix.is_some() {
-            expected_fingerprint = load_fingerprint(options.prefix.as_ref().unwrap(), server);
-        }
-
-        let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
-
-        if options.verify_cert {
-            let server = server.to_string();
-            let verified_fingerprint = verified_fingerprint.clone();
-            let interactive = options.interactive;
-            let fingerprint_cache = options.fingerprint_cache;
-            let prefix = options.prefix.clone();
-            ssl_connector_builder.set_verify_callback(openssl::ssl::SslVerifyMode::PEER, move |valid, ctx| {
-                match Self::verify_callback(valid, ctx, expected_fingerprint.as_ref(), interactive) {
-                    Ok(None) => true,
-                    Ok(Some(fingerprint)) => {
-                        if fingerprint_cache && prefix.is_some() {
-                            if let Err(err) = store_fingerprint(
-                                prefix.as_ref().unwrap(), &server, &fingerprint) {
-                                eprintln!("{}", err);
-                            }
-                        }
-                        *verified_fingerprint.lock().unwrap() = Some(fingerprint);
-                        true
-                    },
-                    Err(err) => {
-                        eprintln!("certificate validation failed - {}", err);
-                        false
-                    },
-                }
-            });
-        } else {
-            ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
-        }
-
-        let mut httpc = HttpConnector::new();
-        httpc.set_nodelay(true); // important for h2 download performance!
-        httpc.enforce_http(false); // we want https...
-
-        httpc.set_connect_timeout(Some(std::time::Duration::new(10, 0)));
-        let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
-
-        let client = Client::builder()
-        //.http2_initial_stream_window_size( (1 << 31) - 2)
-        //.http2_initial_connection_window_size( (1 << 31) - 2)
-            .build::<_, Body>(https);
-
-        let password = options.password.take();
-        let use_ticket_cache = options.ticket_cache && options.prefix.is_some();
-
-        let password = if let Some(password) = password {
-            password
-        } else {
-            let userid = if auth_id.is_token() {
-                bail!("API token secret must be provided!");
-            } else {
-                auth_id.user()
-            };
-            let mut ticket_info = None;
-            if use_ticket_cache {
-                ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, userid);
-            }
-            if let Some((ticket, _token)) = ticket_info {
-                ticket
-            } else {
-                Self::get_password(userid, options.interactive)?
-            }
-        };
-
-        let auth = Arc::new(RwLock::new(AuthInfo {
-            auth_id: auth_id.clone(),
-            ticket: password.clone(),
-            token: "".to_string(),
-        }));
-
-        let server2 = server.to_string();
-        let client2 = client.clone();
-        let auth2 = auth.clone();
-        let prefix2 = options.prefix.clone();
-
-        let renewal_future = async move {
-            loop {
-                tokio::time::sleep(Duration::new(60*15,  0)).await; // 15 minutes
-                let (auth_id, ticket) = {
-                    let authinfo = auth2.read().unwrap().clone();
-                    (authinfo.auth_id, authinfo.ticket)
-                };
-                match Self::credentials(client2.clone(), server2.clone(), port, auth_id.user().clone(), ticket).await {
-                    Ok(auth) => {
-                        if use_ticket_cache && prefix2.is_some() {
-                            let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
-                        }
-                        *auth2.write().unwrap() = auth;
-                    },
-                    Err(err) => {
-                        eprintln!("re-authentication failed: {}", err);
-                        return;
-                    }
-                }
-            }
-        };
-
-        let (renewal_future, ticket_abort) = futures::future::abortable(renewal_future);
-
-        let login_future = Self::credentials(
-            client.clone(),
-            server.to_owned(),
-            port,
-            auth_id.user().clone(),
-            password,
-        ).map_ok({
-            let server = server.to_string();
-            let prefix = options.prefix.clone();
-            let authinfo = auth.clone();
-
-            move |auth| {
-                if use_ticket_cache && prefix.is_some() {
-                    let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.auth_id.to_string(), &auth.ticket, &auth.token);
-                }
-                *authinfo.write().unwrap() = auth;
-                tokio::spawn(renewal_future);
-            }
-        });
-
-        let first_auth = if auth_id.is_token() {
-            // TODO check access here?
-            None
-        } else {
-            Some(BroadcastFuture::new(Box::new(login_future)))
-        };
-
-        Ok(Self {
-            client,
-            server: String::from(server),
-            port,
-            fingerprint: verified_fingerprint,
-            auth,
-            ticket_abort,
-            first_auth,
-            _options: options,
-        })
-    }
-
-    /// Login
-    ///
-    /// Login is done on demand, so this is only required if you need
-    /// access to authentication data in 'AuthInfo'.
-    ///
-    /// Note: tickets a periodially re-newed, so one can use this
-    /// to query changed ticket.
-    pub async fn login(&self) -> Result<AuthInfo, Error> {
-        if let Some(future) = &self.first_auth {
-            future.listen().await?;
-        }
-
-        let authinfo = self.auth.read().unwrap();
-        Ok(authinfo.clone())
-    }
-
-    /// Returns the optional fingerprint passed to the new() constructor.
-    pub fn fingerprint(&self) -> Option<String> {
-        (*self.fingerprint.lock().unwrap()).clone()
-    }
-
-    fn get_password(username: &Userid, interactive: bool) -> Result<String, Error> {
-        // If we're on a TTY, query the user for a password
-        if interactive && tty::stdin_isatty() {
-            let msg = format!("Password for \"{}\": ", username);
-            return Ok(String::from_utf8(tty::read_password(&msg)?)?);
-        }
-
-        bail!("no password input mechanism available");
-    }
-
-    fn verify_callback(
-        openssl_valid: bool,
-        ctx: &mut X509StoreContextRef,
-        expected_fingerprint: Option<&String>,
-        interactive: bool,
-    ) -> Result<Option<String>, Error> {
-
-        if openssl_valid {
-            return Ok(None);
-        }
-
-        let cert = match ctx.current_cert() {
-            Some(cert) => cert,
-            None => bail!("context lacks current certificate."),
-        };
-
-        let depth = ctx.error_depth();
-        if depth != 0 { bail!("context depth != 0") }
-
-        let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) {
-            Ok(fp) => fp,
-            Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
-        };
-        let fp_string = proxmox::tools::digest_to_hex(&fp);
-        let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
-            .collect::<Vec<&str>>().join(":");
-
-        if let Some(expected_fingerprint) = expected_fingerprint {
-            let expected_fingerprint = expected_fingerprint.to_lowercase();
-            if expected_fingerprint == fp_string {
-                return Ok(Some(fp_string));
-            } else {
-                eprintln!("WARNING: certificate fingerprint does not match expected fingerprint!");
-                eprintln!("expected:    {}", expected_fingerprint);
-            }
-        }
-
-        // If we're on a TTY, query the user
-        if interactive && tty::stdin_isatty() {
-            eprintln!("fingerprint: {}", fp_string);
-            loop {
-                eprint!("Are you sure you want to continue connecting? (y/n): ");
-                let _ = std::io::stdout().flush();
-                use std::io::{BufRead, BufReader};
-                let mut line = String::new();
-                match BufReader::new(std::io::stdin()).read_line(&mut line) {
-                    Ok(_) => {
-                        let trimmed = line.trim();
-                        if trimmed == "y" || trimmed == "Y" {
-                            return Ok(Some(fp_string));
-                        } else if trimmed == "n" || trimmed == "N" {
-                            bail!("Certificate fingerprint was not confirmed.");
-                        } else {
-                            continue;
-                        }
-                    }
-                    Err(err) => bail!("Certificate fingerprint was not confirmed - {}.", err),
-                }
-            }
-        }
-
-        bail!("Certificate fingerprint was not confirmed.");
-    }
-
-    pub async fn request(&self, mut req: Request<Body>) -> Result<Value, Error> {
-
-        let client = self.client.clone();
-
-        let auth =  self.login().await?;
-        if auth.auth_id.is_token() {
-            let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
-            req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
-        } else {
-            let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
-            req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
-            req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
-        }
-
-        Self::api_request(client, req).await
-    }
-
-    pub async fn get(
-        &self,
-        path: &str,
-        data: Option<Value>,
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder(&self.server, self.port, "GET", path, data)?;
-        self.request(req).await
-    }
-
-    pub async fn delete(
-        &mut self,
-        path: &str,
-        data: Option<Value>,
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?;
-        self.request(req).await
-    }
-
-    pub async fn post(
-        &mut self,
-        path: &str,
-        data: Option<Value>,
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder(&self.server, self.port, "POST", path, data)?;
-        self.request(req).await
-    }
-
-    pub async fn put(
-        &mut self,
-        path: &str,
-        data: Option<Value>,
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder(&self.server, self.port, "PUT", path, data)?;
-        self.request(req).await
-    }
-
-    pub async fn download(
-        &mut self,
-        path: &str,
-        output: &mut (dyn Write + Send),
-    ) -> Result<(), Error> {
-        let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?;
-
-        let client = self.client.clone();
-
-        let auth = self.login().await?;
-
-        let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
-        req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
-
-        let resp = tokio::time::timeout(
-            HTTP_TIMEOUT,
-            client.request(req)
-        )
-            .await
-            .map_err(|_| format_err!("http download request timed out"))??;
-        let status = resp.status();
-        if !status.is_success() {
-            HttpClient::api_response(resp)
-                .map(|_| Err(format_err!("unknown error")))
-                .await?
-        } else {
-            resp.into_body()
-                .map_err(Error::from)
-                .try_fold(output, move |acc, chunk| async move {
-                    acc.write_all(&chunk)?;
-                    Ok::<_, Error>(acc)
-                })
-                .await?;
-        }
-        Ok(())
-    }
-
-    pub async fn upload(
-        &mut self,
-        content_type: &str,
-        body: Body,
-        path: &str,
-        data: Option<Value>,
-    ) -> Result<Value, Error> {
-
-        let query = match data {
-            Some(data) => Some(json_object_to_query(data)?),
-            None => None,
-        };
-        let url = build_uri(&self.server, self.port, path, query)?;
-
-        let req = Request::builder()
-            .method("POST")
-            .uri(url)
-            .header("User-Agent", "proxmox-backup-client/1.0")
-            .header("Content-Type", content_type)
-            .body(body).unwrap();
-
-        self.request(req).await
-    }
-
-    pub async fn start_h2_connection(
-        &self,
-        mut req: Request<Body>,
-        protocol_name: String,
-    ) -> Result<(H2Client, futures::future::AbortHandle), Error> {
-
-        let client = self.client.clone();
-        let auth =  self.login().await?;
-
-        if auth.auth_id.is_token() {
-            let enc_api_token = format!("PBSAPIToken {}:{}", auth.auth_id, percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
-            req.headers_mut().insert("Authorization", HeaderValue::from_str(&enc_api_token).unwrap());
-        } else {
-            let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
-            req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
-            req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
-        }
-
-        req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
-
-        let resp = tokio::time::timeout(
-            HTTP_TIMEOUT,
-            client.request(req)
-        )
-            .await
-            .map_err(|_| format_err!("http upgrade request timed out"))??;
-        let status = resp.status();
-
-        if status != http::StatusCode::SWITCHING_PROTOCOLS {
-            Self::api_response(resp).await?;
-            bail!("unknown error");
-        }
-
-        let upgraded = hyper::upgrade::on(resp).await?;
-
-        let max_window_size = (1 << 31) - 2;
-
-        let (h2, connection) = h2::client::Builder::new()
-            .initial_connection_window_size(max_window_size)
-            .initial_window_size(max_window_size)
-            .max_frame_size(4*1024*1024)
-            .handshake(upgraded)
-            .await?;
-
-        let connection = connection
-            .map_err(|_| eprintln!("HTTP/2.0 connection failed"));
-
-        let (connection, abort) = futures::future::abortable(connection);
-        // A cancellable future returns an Option which is None when cancelled and
-        // Some when it finished instead, since we don't care about the return type we
-        // need to map it away:
-        let connection = connection.map(|_| ());
-
-        // Spawn a new task to drive the connection state
-        tokio::spawn(connection);
-
-        // Wait until the `SendRequest` handle has available capacity.
-        let c = h2.ready().await?;
-        Ok((H2Client::new(c), abort))
-    }
-
-    async fn credentials(
-        client: Client<HttpsConnector>,
-        server: String,
-        port: u16,
-        username: Userid,
-        password: String,
-    ) -> Result<AuthInfo, Error> {
-        let data = json!({ "username": username, "password": password });
-        let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
-        let cred = Self::api_request(client, req).await?;
-        let auth = AuthInfo {
-            auth_id: cred["data"]["username"].as_str().unwrap().parse()?,
-            ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
-            token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
-        };
-
-        Ok(auth)
-    }
-
-    async fn api_response(response: Response<Body>) -> Result<Value, Error> {
-        let status = response.status();
-        let data = hyper::body::to_bytes(response.into_body()).await?;
-
-        let text = String::from_utf8(data.to_vec()).unwrap();
-        if status.is_success() {
-            if text.is_empty() {
-                Ok(Value::Null)
-            } else {
-                let value: Value = serde_json::from_str(&text)?;
-                Ok(value)
-            }
-        } else {
-            Err(Error::from(HttpError::new(status, text)))
-        }
-    }
-
-    async fn api_request(
-        client: Client<HttpsConnector>,
-        req: Request<Body>
-    ) -> Result<Value, Error> {
-
-        Self::api_response(
-            tokio::time::timeout(
-                HTTP_TIMEOUT,
-                client.request(req)
-            )
-                .await
-                .map_err(|_| format_err!("http request timed out"))??
-        ).await
-    }
-
-    // Read-only access to server property
-    pub fn server(&self) -> &str {
-        &self.server
-    }
-
-    pub fn port(&self) -> u16 {
-        self.port
-    }
-
-    pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
-        if let Some(data) = data {
-            if method == "POST" {
-                let url = build_uri(server, port, path, None)?;
-                let request = Request::builder()
-                    .method(method)
-                    .uri(url)
-                    .header("User-Agent", "proxmox-backup-client/1.0")
-                    .header(hyper::header::CONTENT_TYPE, "application/json")
-                    .body(Body::from(data.to_string()))?;
-                Ok(request)
-            } else {
-                let query = json_object_to_query(data)?;
-                let url = build_uri(server, port, path, Some(query))?;
-                let request = Request::builder()
-                    .method(method)
-                    .uri(url)
-                    .header("User-Agent", "proxmox-backup-client/1.0")
-                    .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
-                    .body(Body::empty())?;
-                Ok(request)
-            }
-        } else {
-            let url = build_uri(server, port, path, None)?;
-            let request = Request::builder()
-                .method(method)
-                .uri(url)
-                .header("User-Agent", "proxmox-backup-client/1.0")
-                .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
-                .body(Body::empty())?;
-
-            Ok(request)
-        }
-    }
-}
-
-impl Drop for HttpClient {
-    fn drop(&mut self) {
-        self.ticket_abort.abort();
-    }
-}
-
-
-#[derive(Clone)]
-pub struct H2Client {
-    h2: h2::client::SendRequest<bytes::Bytes>,
-}
-
-impl H2Client {
-
-    pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
-        Self { h2 }
-    }
-
-    pub async fn get(
-        &self,
-        path: &str,
-        param: Option<Value>
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder("localhost", "GET", path, param, None).unwrap();
-        self.request(req).await
-    }
-
-    pub async fn put(
-        &self,
-        path: &str,
-        param: Option<Value>
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder("localhost", "PUT", path, param, None).unwrap();
-        self.request(req).await
-    }
-
-    pub async fn post(
-        &self,
-        path: &str,
-        param: Option<Value>
-    ) -> Result<Value, Error> {
-        let req = Self::request_builder("localhost", "POST", path, param, None).unwrap();
-        self.request(req).await
-    }
-
-    pub async fn download<W: Write + Send>(
-        &self,
-        path: &str,
-        param: Option<Value>,
-        mut output: W,
-    ) -> Result<(), Error> {
-        let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
-
-        let response_future = self.send_request(request, None).await?;
-
-        let resp = response_future.await?;
-
-        let status = resp.status();
-        if !status.is_success() {
-            H2Client::h2api_response(resp).await?; // raise error
-            unreachable!();
-        }
-
-        let mut body = resp.into_body();
-        while let Some(chunk) = body.data().await {
-            let chunk = chunk?;
-            body.flow_control().release_capacity(chunk.len())?;
-            output.write_all(&chunk)?;
-        }
-
-        Ok(())
-    }
-
-    pub async fn upload(
-        &self,
-        method: &str, // POST or PUT
-        path: &str,
-        param: Option<Value>,
-        content_type: &str,
-        data: Vec<u8>,
-    ) -> Result<Value, Error> {
-        let request = Self::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
-
-        let mut send_request = self.h2.clone().ready().await?;
-
-        let (response, stream) = send_request.send_request(request, false).unwrap();
-
-        PipeToSendStream::new(bytes::Bytes::from(data), stream).await?;
-
-        response
-            .map_err(Error::from)
-            .and_then(Self::h2api_response)
-            .await
-    }
-
-    async fn request(
-        &self,
-        request: Request<()>,
-    ) -> Result<Value, Error> {
-
-        self.send_request(request, None)
-            .and_then(move |response| {
-                response
-                    .map_err(Error::from)
-                    .and_then(Self::h2api_response)
-            })
-            .await
-    }
-
-    pub fn send_request(
-        &self,
-        request: Request<()>,
-        data: Option<bytes::Bytes>,
-    ) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
-
-        self.h2.clone()
-            .ready()
-            .map_err(Error::from)
-            .and_then(move |mut send_request| async move {
-                if let Some(data) = data {
-                    let (response, stream) = send_request.send_request(request, false).unwrap();
-                    PipeToSendStream::new(data, stream).await?;
-                    Ok(response)
-                } else {
-                    let (response, _stream) = send_request.send_request(request, true).unwrap();
-                    Ok(response)
-                }
-            })
-    }
-
-    pub async fn h2api_response(
-        response: Response<h2::RecvStream>,
-    ) -> Result<Value, Error> {
-        let status = response.status();
-
-        let (_head, mut body) = response.into_parts();
-
-        let mut data = Vec::new();
-        while let Some(chunk) = body.data().await {
-            let chunk = chunk?;
-            // Whenever data is received, the caller is responsible for
-            // releasing capacity back to the server once it has freed
-            // the data from memory.
-            // Let the server send more data.
-            body.flow_control().release_capacity(chunk.len())?;
-            data.extend(chunk);
-        }
-
-        let text = String::from_utf8(data.to_vec()).unwrap();
-        if status.is_success() {
-            if text.is_empty() {
-                Ok(Value::Null)
-            } else {
-                let mut value: Value = serde_json::from_str(&text)?;
-                if let Some(map) = value.as_object_mut() {
-                    if let Some(data) = map.remove("data") {
-                        return Ok(data);
-                    }
-                }
-                bail!("got result without data property");
-            }
-        } else {
-            Err(Error::from(HttpError::new(status, text)))
-        }
-    }
-
-    // Note: We always encode parameters with the url
-    pub fn request_builder(
-        server: &str,
-        method: &str,
-        path: &str,
-        param: Option<Value>,
-        content_type: Option<&str>,
-    ) -> Result<Request<()>, Error> {
-        let path = path.trim_matches('/');
-
-        let content_type = content_type.unwrap_or("application/x-www-form-urlencoded");
-        let query = match param {
-            Some(param) => {
-                let query = json_object_to_query(param)?;
-                // We detected problem with hyper around 6000 characters - so we try to keep on the safe side
-                if query.len() > 4096 {
-                    bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len());
-                }
-                Some(query)
-            }
-            None => None,
-        };
-
-        let url = build_uri(server, 8007, path, query)?;
-        let request = Request::builder()
-            .method(method)
-            .uri(url)
-            .header("User-Agent", "proxmox-backup-client/1.0")
-            .header(hyper::header::CONTENT_TYPE, content_type)
-            .body(())?;
-        Ok(request)
-    }
-}
diff --git a/src/client/merge_known_chunks.rs b/src/client/merge_known_chunks.rs
deleted file mode 100644 (file)
index ef7a8f9..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use anyhow::Error;
-use futures::{ready, Stream};
-use pin_project::pin_project;
-
-use pbs_datastore::data_blob::ChunkInfo;
-
-pub enum MergedChunkInfo {
-    Known(Vec<(u64, [u8; 32])>),
-    New(ChunkInfo),
-}
-
-pub trait MergeKnownChunks: Sized {
-    fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self>;
-}
-
-#[pin_project]
-pub struct MergeKnownChunksQueue<S> {
-    #[pin]
-    input: S,
-    buffer: Option<MergedChunkInfo>,
-}
-
-impl<S> MergeKnownChunks for S
-where
-    S: Stream<Item = Result<MergedChunkInfo, Error>>,
-{
-    fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self> {
-        MergeKnownChunksQueue {
-            input: self,
-            buffer: None,
-        }
-    }
-}
-
-impl<S> Stream for MergeKnownChunksQueue<S>
-where
-    S: Stream<Item = Result<MergedChunkInfo, Error>>,
-{
-    type Item = Result<MergedChunkInfo, Error>;
-
-    fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
-        let mut this = self.project();
-
-        loop {
-            match ready!(this.input.as_mut().poll_next(cx)) {
-                Some(Err(err)) => return Poll::Ready(Some(Err(err))),
-                None => {
-                    if let Some(last) = this.buffer.take() {
-                        return Poll::Ready(Some(Ok(last)));
-                    } else {
-                        return Poll::Ready(None);
-                    }
-                }
-                Some(Ok(mergerd_chunk_info)) => {
-                    match mergerd_chunk_info {
-                        MergedChunkInfo::Known(list) => {
-                            let last = this.buffer.take();
-
-                            match last {
-                                None => {
-                                    *this.buffer = Some(MergedChunkInfo::Known(list));
-                                    // continue
-                                }
-                                Some(MergedChunkInfo::Known(mut last_list)) => {
-                                    last_list.extend_from_slice(&list);
-                                    let len = last_list.len();
-                                    *this.buffer = Some(MergedChunkInfo::Known(last_list));
-
-                                    if len >= 64 {
-                                        return Poll::Ready(this.buffer.take().map(Ok));
-                                    }
-                                    // continue
-                                }
-                                Some(MergedChunkInfo::New(_)) => {
-                                    *this.buffer = Some(MergedChunkInfo::Known(list));
-                                    return Poll::Ready(last.map(Ok));
-                                }
-                            }
-                        }
-                        MergedChunkInfo::New(chunk_info) => {
-                            let new = MergedChunkInfo::New(chunk_info);
-                            if let Some(last) = this.buffer.take() {
-                                *this.buffer = Some(new);
-                                return Poll::Ready(Some(Ok(last)));
-                            } else {
-                                return Poll::Ready(Some(Ok(new)));
-                            }
-                        }
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/src/client/mod.rs b/src/client/mod.rs
deleted file mode 100644 (file)
index 8ff0001..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-//! Client side interface to the proxmox backup server
-//!
-//! This library implements the client side to access the backups
-//! server using https.
-
-use anyhow::Error;
-
-use pbs_api_types::{Authid, Userid};
-use pbs_tools::ticket::Ticket;
-use pbs_tools::cert::CertInfo;
-use pbs_tools::auth::private_auth_key;
-
-mod merge_known_chunks;
-pub mod pipe_to_stream;
-
-mod http_client;
-pub use http_client::*;
-
-mod vsock_client;
-pub use vsock_client::*;
-
-mod task_log;
-pub use task_log::*;
-
-mod backup_reader;
-pub use backup_reader::*;
-
-mod backup_writer;
-pub use backup_writer::*;
-
-mod remote_chunk_reader;
-pub use remote_chunk_reader::*;
-
-mod pxar_backup_stream;
-pub use pxar_backup_stream::*;
-
-mod backup_repo;
-pub use backup_repo::*;
-
-mod backup_specification;
-pub use backup_specification::*;
-
-pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
-
-/// Connect to localhost:8007 as root@pam
-///
-/// This automatically creates a ticket if run as 'root' user.
-pub fn connect_to_localhost() -> Result<HttpClient, Error> {
-
-    let uid = nix::unistd::Uid::current();
-
-    let client = if uid.is_root()  {
-        let ticket = Ticket::new("PBS", Userid::root_userid())?
-            .sign(private_auth_key(), None)?;
-        let fingerprint = CertInfo::new()?.fingerprint()?;
-        let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
-
-        HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
-    } else {
-        let options = HttpClientOptions::new_interactive(None, None);
-
-        HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
-    };
-
-    Ok(client)
-}
diff --git a/src/client/pipe_to_stream.rs b/src/client/pipe_to_stream.rs
deleted file mode 100644 (file)
index d461b1d..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// Implement simple flow control for h2 client
-//
-// See also: hyper/src/proto/h2/mod.rs
-
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use anyhow::{format_err, Error};
-use bytes::Bytes;
-use futures::{ready, Future};
-use h2::SendStream;
-
-pub struct PipeToSendStream {
-    body_tx: SendStream<Bytes>,
-    data: Option<Bytes>,
-}
-
-impl PipeToSendStream {
-    pub fn new(data: Bytes, tx: SendStream<Bytes>) -> PipeToSendStream {
-        PipeToSendStream {
-            body_tx: tx,
-            data: Some(data),
-        }
-    }
-}
-
-impl Future for PipeToSendStream {
-    type Output = Result<(), Error>;
-
-    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
-        let this = self.get_mut();
-
-        if this.data != None {
-            // just reserve 1 byte to make sure there's some
-            // capacity available. h2 will handle the capacity
-            // management for the actual body chunk.
-            this.body_tx.reserve_capacity(1);
-
-            if this.body_tx.capacity() == 0 {
-                loop {
-                    match ready!(this.body_tx.poll_capacity(cx)) {
-                        Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
-                        Some(Ok(0)) => {}
-                        Some(Ok(_)) => break,
-                        None => return Poll::Ready(Err(format_err!("protocol canceled"))),
-                    }
-                }
-            } else if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
-                return Poll::Ready(Err(match reset {
-                    Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
-                    Err(err) => Error::from(err),
-                }));
-            }
-
-            this.body_tx
-                .send_data(this.data.take().unwrap(), true)
-                .map_err(Error::from)?;
-
-            Poll::Ready(Ok(()))
-        } else {
-            if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
-                return Poll::Ready(Err(match reset {
-                    Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
-                    Err(err) => Error::from(err),
-                }));
-            }
-            Poll::Ready(Ok(()))
-        }
-    }
-}
diff --git a/src/client/pxar_backup_stream.rs b/src/client/pxar_backup_stream.rs
deleted file mode 100644 (file)
index d39eb6c..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-use std::io::Write;
-//use std::os::unix::io::FromRawFd;
-use std::path::Path;
-use std::pin::Pin;
-use std::sync::{Arc, Mutex};
-use std::task::{Context, Poll};
-
-use anyhow::{format_err, Error};
-use futures::stream::Stream;
-use futures::future::{Abortable, AbortHandle};
-use nix::dir::Dir;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-
-use pbs_datastore::catalog::CatalogWriter;
-use pbs_tools::sync::StdChannelWriter;
-use pbs_tools::tokio::TokioWriterAdapter;
-
-/// Stream implementation to encode and upload .pxar archives.
-///
-/// The hyper client needs an async Stream for file upload, so we
-/// spawn an extra thread to encode the .pxar data and pipe it to the
-/// consumer.
-pub struct PxarBackupStream {
-    rx: Option<std::sync::mpsc::Receiver<Result<Vec<u8>, Error>>>,
-    handle: Option<AbortHandle>,
-    error: Arc<Mutex<Option<String>>>,
-}
-
-impl Drop for PxarBackupStream {
-    fn drop(&mut self) {
-        self.rx = None;
-        self.handle.take().unwrap().abort();
-    }
-}
-
-impl PxarBackupStream {
-    pub fn new<W: Write + Send + 'static>(
-        dir: Dir,
-        catalog: Arc<Mutex<CatalogWriter<W>>>,
-        options: crate::pxar::PxarCreateOptions,
-    ) -> Result<Self, Error> {
-        let (tx, rx) = std::sync::mpsc::sync_channel(10);
-
-        let buffer_size = 256 * 1024;
-
-        let error = Arc::new(Mutex::new(None));
-        let error2 = Arc::clone(&error);
-        let handler = async move {
-            let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
-                buffer_size,
-                StdChannelWriter::new(tx),
-            ));
-
-            let verbose = options.verbose;
-
-            let writer = pxar::encoder::sync::StandardWriter::new(writer);
-            if let Err(err) = crate::pxar::create_archive(
-                dir,
-                writer,
-                crate::pxar::Flags::DEFAULT,
-                move |path| {
-                    if verbose {
-                        println!("{:?}", path);
-                    }
-                    Ok(())
-                },
-                Some(catalog),
-                options,
-            ).await {
-                let mut error = error2.lock().unwrap();
-                *error = Some(err.to_string());
-            }
-        };
-
-        let (handle, registration) = AbortHandle::new_pair();
-        let future = Abortable::new(handler, registration);
-        tokio::spawn(future);
-
-        Ok(Self {
-            rx: Some(rx),
-            handle: Some(handle),
-            error,
-        })
-    }
-
-    pub fn open<W: Write + Send + 'static>(
-        dirname: &Path,
-        catalog: Arc<Mutex<CatalogWriter<W>>>,
-        options: crate::pxar::PxarCreateOptions,
-    ) -> Result<Self, Error> {
-        let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
-
-        Self::new(
-            dir,
-            catalog,
-            options,
-        )
-    }
-}
-
-impl Stream for PxarBackupStream {
-    type Item = Result<Vec<u8>, Error>;
-
-    fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
-        {
-            // limit lock scope
-            let error = self.error.lock().unwrap();
-            if let Some(ref msg) = *error {
-                return Poll::Ready(Some(Err(format_err!("{}", msg))));
-            }
-        }
-
-        match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
-            Ok(data) => Poll::Ready(Some(data)),
-            Err(_) => {
-                let error = self.error.lock().unwrap();
-                if let Some(ref msg) = *error {
-                    return Poll::Ready(Some(Err(format_err!("{}", msg))));
-                }
-                Poll::Ready(None) // channel closed, no error
-            }
-        }
-    }
-}
diff --git a/src/client/remote_chunk_reader.rs b/src/client/remote_chunk_reader.rs
deleted file mode 100644 (file)
index 61b6fb0..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-use std::future::Future;
-use std::collections::HashMap;
-use std::pin::Pin;
-use std::sync::{Arc, Mutex};
-
-use anyhow::{bail, Error};
-
-use pbs_datastore::{CryptConfig, CryptMode};
-use pbs_datastore::data_blob::DataBlob;
-use pbs_datastore::read_chunk::ReadChunk;
-use pbs_datastore::read_chunk::AsyncReadChunk;
-use pbs_runtime::block_on;
-
-use super::BackupReader;
-
-/// Read chunks from remote host using ``BackupReader``
-#[derive(Clone)]
-pub struct RemoteChunkReader {
-    client: Arc<BackupReader>,
-    crypt_config: Option<Arc<CryptConfig>>,
-    crypt_mode: CryptMode,
-    cache_hint: Arc<HashMap<[u8; 32], usize>>,
-    cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
-}
-
-impl RemoteChunkReader {
-    /// Create a new instance.
-    ///
-    /// Chunks listed in ``cache_hint`` are cached and kept in RAM.
-    pub fn new(
-        client: Arc<BackupReader>,
-        crypt_config: Option<Arc<CryptConfig>>,
-        crypt_mode: CryptMode,
-        cache_hint: HashMap<[u8; 32], usize>,
-    ) -> Self {
-        Self {
-            client,
-            crypt_config,
-            crypt_mode,
-            cache_hint: Arc::new(cache_hint),
-            cache: Arc::new(Mutex::new(HashMap::new())),
-        }
-    }
-
-    /// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
-    /// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
-    pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
-        let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
-
-        self.client
-            .download_chunk(&digest, &mut chunk_data)
-            .await?;
-
-        let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
-
-        match self.crypt_mode {
-            CryptMode::Encrypt => {
-                match chunk.crypt_mode()? {
-                    CryptMode::Encrypt => Ok(chunk),
-                    CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
-                }
-            },
-            CryptMode::SignOnly | CryptMode::None => {
-                match chunk.crypt_mode()? {
-                    CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
-                    CryptMode::SignOnly | CryptMode::None => Ok(chunk),
-                }
-            },
-        }
-    }
-}
-
-impl ReadChunk for RemoteChunkReader {
-    fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
-        block_on(Self::read_raw_chunk(self, digest))
-    }
-
-    fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
-        if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
-            return Ok(raw_data.to_vec());
-        }
-
-        let chunk = ReadChunk::read_raw_chunk(self, digest)?;
-
-        let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
-
-        let use_cache = self.cache_hint.contains_key(digest);
-        if use_cache {
-            (*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
-        }
-
-        Ok(raw_data)
-    }
-}
-
-impl AsyncReadChunk for RemoteChunkReader {
-    fn read_raw_chunk<'a>(
-        &'a self,
-        digest: &'a [u8; 32],
-    ) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
-        Box::pin(Self::read_raw_chunk(self, digest))
-    }
-
-    fn read_chunk<'a>(
-        &'a self,
-        digest: &'a [u8; 32],
-    ) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
-        Box::pin(async move {
-            if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
-                return Ok(raw_data.to_vec());
-            }
-
-            let chunk = Self::read_raw_chunk(self, digest).await?;
-
-            let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
-
-            let use_cache = self.cache_hint.contains_key(digest);
-            if use_cache {
-                (*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
-            }
-
-            Ok(raw_data)
-        })
-    }
-}
diff --git a/src/client/task_log.rs b/src/client/task_log.rs
deleted file mode 100644 (file)
index 1d1af14..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
-
-use anyhow::{bail, Error};
-use serde_json::{json, Value};
-use tokio::signal::unix::{signal, SignalKind};
-use futures::*;
-
-use proxmox::api::cli::format_and_print_result;
-
-use pbs_tools::percent_encoding::percent_encode_component;
-
-use super::HttpClient;
-
-/// Display task log on console
-///
-/// This polls the task API and prints the log to the console. It also
-/// catches interrupt signals, and sends a abort request to the task if
-/// the user presses CTRL-C. Two interrupts cause an immediate end of
-/// the loop. The task may still run in that case.
-pub async fn display_task_log(
-    client: &mut HttpClient,
-    upid_str: &str,
-    strip_date: bool,
-) -> Result<(), Error> {
-
-    let mut signal_stream = signal(SignalKind::interrupt())?;
-    let abort_count = Arc::new(AtomicUsize::new(0));
-    let abort_count2 = Arc::clone(&abort_count);
-
-    let abort_future = async move {
-        while signal_stream.recv().await.is_some() {
-            println!("got shutdown request (SIGINT)");
-            let prev_count = abort_count2.fetch_add(1, Ordering::SeqCst);
-            if prev_count >= 1 {
-                println!("forced exit (task still running)");
-                break;
-            }
-        }
-        Ok::<_, Error>(())
-    };
-
-    let request_future = async move {
-
-        let mut start = 1;
-        let limit = 500;
-
-        loop {
-
-            let abort = abort_count.load(Ordering::Relaxed);
-            if abort > 0 {
-                let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
-                let _ = client.delete(&path, None).await?;
-            }
-
-            let param = json!({ "start": start, "limit": limit, "test-status": true });
-
-            let path = format!("api2/json/nodes/localhost/tasks/{}/log", percent_encode_component(upid_str));
-            let result = client.get(&path, Some(param)).await?;
-
-            let active = result["active"].as_bool().unwrap();
-            let total = result["total"].as_u64().unwrap();
-            let data = result["data"].as_array().unwrap();
-
-            let lines = data.len();
-
-            for item in data {
-                let n = item["n"].as_u64().unwrap();
-                let t = item["t"].as_str().unwrap();
-                if n != start { bail!("got wrong line number in response data ({} != {}", n, start); }
-                if strip_date && t.len() > 27 && &t[25..27] == ": " {
-                    let line = &t[27..];
-                    println!("{}", line);
-                } else {
-                    println!("{}", t);
-                }
-                start += 1;
-            }
-
-            if start > total {
-                if active {
-                    tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await;
-                } else {
-                    break;
-                }
-            } else if lines != limit {
-                bail!("got wrong number of lines from server ({} != {})", lines, limit);
-            }
-        }
-
-        Ok(())
-    };
-
-    futures::select!{
-        request = request_future.fuse() => request?,
-        abort = abort_future.fuse() => abort?,
-    };
-
-    Ok(())
-}
-
-/// Display task result (upid), or view task log - depending on output format
-pub async fn view_task_result(
-    client: &mut HttpClient,
-    result: Value,
-    output_format: &str,
-) -> Result<(), Error> {
-    let data = &result["data"];
-    if output_format == "text" {
-        if let Some(upid) = data.as_str() {
-            display_task_log(client, upid, true).await?;
-        }
-    } else {
-        format_and_print_result(&data, &output_format);
-    }
-
-    Ok(())
-}
diff --git a/src/client/vsock_client.rs b/src/client/vsock_client.rs
deleted file mode 100644 (file)
index 3f0f373..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use anyhow::{bail, format_err, Error};
-use futures::*;
-use http::Uri;
-use http::{Request, Response};
-use hyper::client::connect::{Connected, Connection};
-use hyper::client::Client;
-use hyper::Body;
-use pin_project::pin_project;
-use serde_json::Value;
-use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
-use tokio::net::UnixStream;
-
-use proxmox::api::error::HttpError;
-
-pub const DEFAULT_VSOCK_PORT: u16 = 807;
-
-#[derive(Clone)]
-struct VsockConnector;
-
-#[pin_project]
-/// Wrapper around UnixStream so we can implement hyper::client::connect::Connection
-struct UnixConnection {
-    #[pin]
-    stream: UnixStream,
-}
-
-impl tower_service::Service<Uri> for VsockConnector {
-    type Response = UnixConnection;
-    type Error = Error;
-    type Future = Pin<Box<dyn Future<Output = Result<UnixConnection, Error>> + Send>>;
-
-    fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
-        Poll::Ready(Ok(()))
-    }
-
-    fn call(&mut self, dst: Uri) -> Self::Future {
-        use nix::sys::socket::*;
-        use std::os::unix::io::FromRawFd;
-
-        // connect can block, so run in blocking task (though in reality it seems to immediately
-        // return with either ENODEV or ETIMEDOUT in case of error)
-        tokio::task::spawn_blocking(move || {
-            if dst.scheme_str().unwrap_or_default() != "vsock" {
-                bail!("invalid URI (scheme) for vsock connector: {}", dst);
-            }
-
-            let cid = match dst.host() {
-                Some(host) => host.parse().map_err(|err| {
-                    format_err!(
-                        "invalid URI (host not a number) for vsock connector: {} ({})",
-                        dst,
-                        err
-                    )
-                })?,
-                None => bail!("invalid URI (no host) for vsock connector: {}", dst),
-            };
-
-            let port = match dst.port_u16() {
-                Some(port) => port,
-                None => bail!("invalid URI (bad port) for vsock connector: {}", dst),
-            };
-
-            let sock_fd = socket(
-                AddressFamily::Vsock,
-                SockType::Stream,
-                SockFlag::empty(),
-                None,
-            )?;
-
-            let sock_addr = VsockAddr::new(cid, port as u32);
-            connect(sock_fd, &SockAddr::Vsock(sock_addr))?;
-
-            // connect sync, but set nonblock after (tokio requires it)
-            let std_stream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd) };
-            std_stream.set_nonblocking(true)?;
-
-            let stream = tokio::net::UnixStream::from_std(std_stream)?;
-            let connection = UnixConnection { stream };
-
-            Ok(connection)
-        })
-        // unravel the thread JoinHandle to a usable future
-        .map(|res| match res {
-            Ok(res) => res,
-            Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
-        })
-        .boxed()
-    }
-}
-
-impl Connection for UnixConnection {
-    fn connected(&self) -> Connected {
-        Connected::new()
-    }
-}
-
-impl AsyncRead for UnixConnection {
-    fn poll_read(
-        self: Pin<&mut Self>,
-        cx: &mut Context<'_>,
-        buf: &mut ReadBuf,
-    ) -> Poll<Result<(), std::io::Error>> {
-        let this = self.project();
-        this.stream.poll_read(cx, buf)
-    }
-}
-
-impl AsyncWrite for UnixConnection {
-    fn poll_write(
-        self: Pin<&mut Self>,
-        cx: &mut Context<'_>,
-        buf: &[u8],
-    ) -> Poll<tokio::io::Result<usize>> {
-        let this = self.project();
-        this.stream.poll_write(cx, buf)
-    }
-
-    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
-        let this = self.project();
-        this.stream.poll_flush(cx)
-    }
-
-    fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
-        let this = self.project();
-        this.stream.poll_shutdown(cx)
-    }
-}
-
-/// Slimmed down version of HttpClient for virtio-vsock connections (file restore daemon)
-pub struct VsockClient {
-    client: Client<VsockConnector>,
-    cid: i32,
-    port: u16,
-    auth: Option<String>,
-}
-
-impl VsockClient {
-    pub fn new(cid: i32, port: u16, auth: Option<String>) -> Self {
-        let conn = VsockConnector {};
-        let client = Client::builder().build::<_, Body>(conn);
-        Self {
-            client,
-            cid,
-            port,
-            auth,
-        }
-    }
-
-    pub async fn get(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
-        let req = self.request_builder("GET", path, data)?;
-        self.api_request(req).await
-    }
-
-    pub async fn post(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
-        let req = self.request_builder("POST", path, data)?;
-        self.api_request(req).await
-    }
-
-    pub async fn download(
-        &self,
-        path: &str,
-        data: Option<Value>,
-        output: &mut (dyn AsyncWrite + Send + Unpin),
-    ) -> Result<(), Error> {
-        let req = self.request_builder("GET", path, data)?;
-
-        let client = self.client.clone();
-
-        let resp = client
-            .request(req)
-            .await
-            .map_err(|_| format_err!("vsock download request timed out"))?;
-        let status = resp.status();
-        if !status.is_success() {
-            Self::api_response(resp).await.map(|_| ())?
-        } else {
-            resp.into_body()
-                .map_err(Error::from)
-                .try_fold(output, move |acc, chunk| async move {
-                    acc.write_all(&chunk).await?;
-                    Ok::<_, Error>(acc)
-                })
-                .await?;
-        }
-        Ok(())
-    }
-
-    async fn api_response(response: Response<Body>) -> Result<Value, Error> {
-        let status = response.status();
-        let data = hyper::body::to_bytes(response.into_body()).await?;
-
-        let text = String::from_utf8(data.to_vec()).unwrap();
-        if status.is_success() {
-            if text.is_empty() {
-                Ok(Value::Null)
-            } else {
-                let value: Value = serde_json::from_str(&text)?;
-                Ok(value)
-            }
-        } else {
-            Err(Error::from(HttpError::new(status, text)))
-        }
-    }
-
-    async fn api_request(&self, req: Request<Body>) -> Result<Value, Error> {
-        self.client
-            .request(req)
-            .map_err(Error::from)
-            .and_then(Self::api_response)
-            .await
-    }
-
-    fn request_builder(
-        &self,
-        method: &str,
-        path: &str,
-        data: Option<Value>,
-    ) -> Result<Request<Body>, Error> {
-        let path = path.trim_matches('/');
-        let url: Uri = format!("vsock://{}:{}/{}", self.cid, self.port, path).parse()?;
-
-        let make_builder = |content_type: &str, url: &Uri| {
-            let mut builder = Request::builder()
-                .method(method)
-                .uri(url)
-                .header(hyper::header::CONTENT_TYPE, content_type);
-            if let Some(auth) = &self.auth {
-                builder = builder.header(hyper::header::AUTHORIZATION, auth);
-            }
-            builder
-        };
-
-        if let Some(data) = data {
-            if method == "POST" {
-                let builder = make_builder("application/json", &url);
-                let request = builder.body(Body::from(data.to_string()))?;
-                return Ok(request);
-            } else {
-                let query = pbs_tools::json::json_object_to_query(data)?;
-                let url: Uri =
-                    format!("vsock://{}:{}/{}?{}", self.cid, self.port, path, query).parse()?;
-                let builder = make_builder("application/x-www-form-urlencoded", &url);
-                let request = builder.body(Body::empty())?;
-                return Ok(request);
-            }
-        }
-
-        let builder = make_builder("application/x-www-form-urlencoded", &url);
-        let request = builder.body(Body::empty())?;
-
-        Ok(request)
-    }
-}
index bdec5fc11ac845d26ab829bddb08aae3c21a1dfb..1406e386a107d8479d99877f2af6c3b589ccb41c 100644 (file)
@@ -3,10 +3,8 @@ use std::sync::{Arc, RwLock};
 
 use anyhow::{bail, Error};
 use lazy_static::lazy_static;
-use serde::{Serialize, Deserialize};
 
 use proxmox::api::{
-    api,
     schema::*,
     section_config::{
         SectionConfig,
@@ -17,154 +15,18 @@ use proxmox::api::{
 
 use proxmox::tools::{fs::replace_file, fs::CreateOptions};
 
-use crate::api2::types::*;
+use pbs_api_types::{Authid, Userid};
+pub use pbs_api_types::{ApiToken, User};
+pub use pbs_api_types::{
+    EMAIL_SCHEMA, ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
+};
+
 use crate::tools::Memcom;
 
 lazy_static! {
     pub static ref CONFIG: SectionConfig = init();
 }
 
-pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
-    "Enable the account (default). You can set this to '0' to disable the account.")
-    .default(true)
-    .schema();
-
-pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
-    "Account expiration date (seconds since epoch). '0' means no expiration date.")
-    .default(0)
-    .minimum(0)
-    .schema();
-
-pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
-    .format(&SINGLE_LINE_COMMENT_FORMAT)
-    .min_length(2)
-    .max_length(64)
-    .schema();
-
-pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
-    .format(&SINGLE_LINE_COMMENT_FORMAT)
-    .min_length(2)
-    .max_length(64)
-    .schema();
-
-pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
-    .format(&SINGLE_LINE_COMMENT_FORMAT)
-    .min_length(2)
-    .max_length(64)
-    .schema();
-
-#[api(
-    properties: {
-        tokenid: {
-            schema: PROXMOX_TOKEN_ID_SCHEMA,
-        },
-        comment: {
-            optional: true,
-            schema: SINGLE_LINE_COMMENT_SCHEMA,
-        },
-        enable: {
-            optional: true,
-            schema: ENABLE_USER_SCHEMA,
-        },
-        expire: {
-            optional: true,
-            schema: EXPIRE_USER_SCHEMA,
-        },
-    }
-)]
-#[derive(Serialize,Deserialize)]
-/// ApiToken properties.
-pub struct ApiToken {
-    pub tokenid: Authid,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub comment: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub enable: Option<bool>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub expire: Option<i64>,
-}
-
-impl ApiToken {
-
-    pub fn is_active(&self) -> bool {
-        if !self.enable.unwrap_or(true) {
-            return false;
-        }
-        if let Some(expire) = self.expire {
-            let now =  proxmox::tools::time::epoch_i64();
-            if expire > 0 && expire <= now {
-                return false;
-            }
-        }
-        true
-    }
-}
-
-#[api(
-    properties: {
-        userid: {
-            type: Userid,
-        },
-        comment: {
-            optional: true,
-            schema: SINGLE_LINE_COMMENT_SCHEMA,
-        },
-        enable: {
-            optional: true,
-            schema: ENABLE_USER_SCHEMA,
-        },
-        expire: {
-            optional: true,
-            schema: EXPIRE_USER_SCHEMA,
-        },
-        firstname: {
-            optional: true,
-            schema: FIRST_NAME_SCHEMA,
-        },
-        lastname: {
-            schema: LAST_NAME_SCHEMA,
-            optional: true,
-         },
-        email: {
-            schema: EMAIL_SCHEMA,
-            optional: true,
-        },
-    }
-)]
-#[derive(Serialize,Deserialize)]
-/// User properties.
-pub struct User {
-    pub userid: Userid,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub comment: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub enable: Option<bool>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub expire: Option<i64>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub firstname: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub lastname: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub email: Option<String>,
-}
-
-impl User {
-
-    pub fn is_active(&self) -> bool {
-        if !self.enable.unwrap_or(true) {
-            return false;
-        }
-        if let Some(expire) = self.expire {
-            let now =  proxmox::tools::time::epoch_i64();
-            if expire > 0 && expire <= now {
-                return false;
-            }
-        }
-        true
-    }
-}
-
 fn init() -> SectionConfig {
     let mut config = SectionConfig::new(&Authid::API_SCHEMA);
 
index 4815c4145957d3d0f814911befddcd63b44c1f19..fcbc2e18e66f1c7408e9b6968832f742dba16e12 100644 (file)
@@ -9,8 +9,6 @@ pub mod tools;
 #[macro_use]
 pub mod server;
 
-pub mod pxar;
-
 #[macro_use]
 pub mod backup;
 
@@ -18,8 +16,6 @@ pub mod config;
 
 pub mod api2;
 
-pub mod client;
-
 pub mod auth_helpers;
 
 pub mod auth;
diff --git a/src/pxar/create.rs b/src/pxar/create.rs
deleted file mode 100644 (file)
index a3ce356..0000000
+++ /dev/null
@@ -1,1078 +0,0 @@
-use std::collections::{HashSet, HashMap};
-use std::ffi::{CStr, CString, OsStr};
-use std::fmt;
-use std::io::{self, Read, Write};
-use std::os::unix::ffi::OsStrExt;
-use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
-use std::path::{Path, PathBuf};
-use std::sync::{Arc, Mutex};
-
-use anyhow::{bail, format_err, Error};
-use nix::dir::Dir;
-use nix::errno::Errno;
-use nix::fcntl::OFlag;
-use nix::sys::stat::{FileStat, Mode};
-use futures::future::BoxFuture;
-use futures::FutureExt;
-
-use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
-use pxar::Metadata;
-use pxar::encoder::{SeqWrite, LinkOffset};
-
-use proxmox::c_str;
-use proxmox::sys::error::SysError;
-use proxmox::tools::fd::RawFdNum;
-use proxmox::tools::vec;
-
-use pbs_datastore::catalog::BackupCatalogWriter;
-use pbs_tools::fs;
-
-use crate::pxar::metadata::errno_is_unsupported;
-use crate::pxar::Flags;
-use crate::pxar::tools::assert_single_path_component;
-use crate::tools::{acl, xattr, Fd};
-
-/// Pxar options for creating a pxar archive/stream
-#[derive(Default, Clone)]
-pub struct PxarCreateOptions {
-    /// Device/mountpoint st_dev numbers that should be included. None for no limitation.
-    pub device_set: Option<HashSet<u64>>,
-    /// Exclusion patterns
-    pub patterns: Vec<MatchEntry>,
-    /// Maximum number of entries to hold in memory
-    pub entries_max: usize,
-    /// Skip lost+found directory
-    pub skip_lost_and_found: bool,
-    /// Verbose output
-    pub verbose: bool,
-}
-
-
-fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
-    let mut fs_stat = std::mem::MaybeUninit::uninit();
-    let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
-    Errno::result(res)?;
-    let fs_stat = unsafe { fs_stat.assume_init() };
-
-    Ok(fs_stat.f_type)
-}
-
-#[rustfmt::skip]
-pub fn is_virtual_file_system(magic: i64) -> bool {
-    use proxmox::sys::linux::magic::*;
-
-    matches!(magic, BINFMTFS_MAGIC |
-        CGROUP2_SUPER_MAGIC |
-        CGROUP_SUPER_MAGIC |
-        CONFIGFS_MAGIC |
-        DEBUGFS_MAGIC |
-        DEVPTS_SUPER_MAGIC |
-        EFIVARFS_MAGIC |
-        FUSE_CTL_SUPER_MAGIC |
-        HUGETLBFS_MAGIC |
-        MQUEUE_MAGIC |
-        NFSD_MAGIC |
-        PROC_SUPER_MAGIC |
-        PSTOREFS_MAGIC |
-        RPCAUTH_GSSMAGIC |
-        SECURITYFS_MAGIC |
-        SELINUX_MAGIC |
-        SMACK_MAGIC |
-        SYSFS_MAGIC)
-}
-
-#[derive(Debug)]
-struct ArchiveError {
-    path: PathBuf,
-    error: Error,
-}
-
-impl ArchiveError {
-    fn new(path: PathBuf, error: Error) -> Self {
-        Self { path, error }
-    }
-}
-
-impl std::error::Error for ArchiveError {}
-
-impl fmt::Display for ArchiveError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "error at {:?}: {}", self.path, self.error)
-    }
-}
-
-#[derive(Eq, PartialEq, Hash)]
-struct HardLinkInfo {
-    st_dev: u64,
-    st_ino: u64,
-}
-
-/// TODO: make a builder for the create_archive call for fewer parameters and add a method to add a
-/// logger which does not write to stderr.
-struct Logger;
-
-impl std::io::Write for Logger {
-    fn write(&mut self, data: &[u8]) -> io::Result<usize> {
-        std::io::stderr().write(data)
-    }
-
-    fn flush(&mut self) -> io::Result<()> {
-        std::io::stderr().flush()
-    }
-}
-
-/// And the error case.
-struct ErrorReporter;
-
-impl std::io::Write for ErrorReporter {
-    fn write(&mut self, data: &[u8]) -> io::Result<usize> {
-        std::io::stderr().write(data)
-    }
-
-    fn flush(&mut self) -> io::Result<()> {
-        std::io::stderr().flush()
-    }
-}
-
-struct Archiver {
-    feature_flags: Flags,
-    fs_feature_flags: Flags,
-    fs_magic: i64,
-    patterns: Vec<MatchEntry>,
-    callback: Box<dyn FnMut(&Path) -> Result<(), Error> + Send>,
-    catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
-    path: PathBuf,
-    entry_counter: usize,
-    entry_limit: usize,
-    current_st_dev: libc::dev_t,
-    device_set: Option<HashSet<u64>>,
-    hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
-    errors: ErrorReporter,
-    logger: Logger,
-    file_copy_buffer: Vec<u8>,
-}
-
-type Encoder<'a, T> = pxar::encoder::aio::Encoder<'a, T>;
-
-pub async fn create_archive<T, F>(
-    source_dir: Dir,
-    mut writer: T,
-    feature_flags: Flags,
-    callback: F,
-    catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
-    options: PxarCreateOptions,
-) -> Result<(), Error>
-where
-    T: SeqWrite + Send,
-    F: FnMut(&Path) -> Result<(), Error> + Send + 'static,
-{
-    let fs_magic = detect_fs_type(source_dir.as_raw_fd())?;
-    if is_virtual_file_system(fs_magic) {
-        bail!("refusing to backup a virtual file system");
-    }
-
-    let mut fs_feature_flags = Flags::from_magic(fs_magic);
-
-    let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?;
-    let metadata = get_metadata(
-        source_dir.as_raw_fd(),
-        &stat,
-        feature_flags & fs_feature_flags,
-        fs_magic,
-        &mut fs_feature_flags,
-    )
-    .map_err(|err| format_err!("failed to get metadata for source directory: {}", err))?;
-
-    let mut device_set = options.device_set.clone();
-    if let Some(ref mut set) = device_set {
-        set.insert(stat.st_dev);
-    }
-
-    let mut encoder = Encoder::new(&mut writer, &metadata).await?;
-
-    let mut patterns = options.patterns;
-
-    if options.skip_lost_and_found {
-        patterns.push(MatchEntry::parse_pattern(
-            "lost+found",
-            PatternFlag::PATH_NAME,
-            MatchType::Exclude,
-        )?);
-    }
-
-    let mut archiver = Archiver {
-        feature_flags,
-        fs_feature_flags,
-        fs_magic,
-        callback: Box::new(callback),
-        patterns,
-        catalog,
-        path: PathBuf::new(),
-        entry_counter: 0,
-        entry_limit: options.entries_max,
-        current_st_dev: stat.st_dev,
-        device_set,
-        hardlinks: HashMap::new(),
-        errors: ErrorReporter,
-        logger: Logger,
-        file_copy_buffer: vec::undefined(4 * 1024 * 1024),
-    };
-
-    archiver.archive_dir_contents(&mut encoder, source_dir, true).await?;
-    encoder.finish().await?;
-    Ok(())
-}
-
-struct FileListEntry {
-    name: CString,
-    path: PathBuf,
-    stat: FileStat,
-}
-
-impl Archiver {
-    /// Get the currently effective feature flags. (Requested flags masked by the file system
-    /// feature flags).
-    fn flags(&self) -> Flags {
-        self.feature_flags & self.fs_feature_flags
-    }
-
-    fn wrap_err(&self, err: Error) -> Error {
-        if err.downcast_ref::<ArchiveError>().is_some() {
-            err
-        } else {
-            ArchiveError::new(self.path.clone(), err).into()
-        }
-    }
-
-    fn archive_dir_contents<'a, 'b, T: SeqWrite + Send>(
-        &'a mut self,
-        encoder: &'a mut Encoder<'b, T>,
-        mut dir: Dir,
-        is_root: bool,
-    ) -> BoxFuture<'a, Result<(), Error>> {
-        async move {
-            let entry_counter = self.entry_counter;
-
-            let old_patterns_count = self.patterns.len();
-            self.read_pxar_excludes(dir.as_raw_fd())?;
-
-            let mut file_list = self.generate_directory_file_list(&mut dir, is_root)?;
-
-            if is_root && old_patterns_count > 0 {
-                file_list.push(FileListEntry {
-                    name: CString::new(".pxarexclude-cli").unwrap(),
-                    path: PathBuf::new(),
-                    stat: unsafe { std::mem::zeroed() },
-                });
-            }
-
-            let dir_fd = dir.as_raw_fd();
-
-            let old_path = std::mem::take(&mut self.path);
-
-            for file_entry in file_list {
-                let file_name = file_entry.name.to_bytes();
-
-                if is_root && file_name == b".pxarexclude-cli" {
-                    self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count).await?;
-                    continue;
-                }
-
-                (self.callback)(&file_entry.path)?;
-                self.path = file_entry.path;
-                self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat).await
-                    .map_err(|err| self.wrap_err(err))?;
-            }
-            self.path = old_path;
-            self.entry_counter = entry_counter;
-            self.patterns.truncate(old_patterns_count);
-
-            Ok(())
-        }.boxed()
-    }
-
-    /// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
-    ///
-    /// The `existed` flag is set when iterating through a directory to note that we know the file
-    /// is supposed to exist and we should warn if it doesnt'.
-    fn open_file(
-        &mut self,
-        parent: RawFd,
-        file_name: &CStr,
-        oflags: OFlag,
-        existed: bool,
-    ) -> Result<Option<Fd>, Error> {
-        // common flags we always want to use:
-        let oflags = oflags | OFlag::O_CLOEXEC | OFlag::O_NOCTTY;
-
-        let mut noatime = OFlag::O_NOATIME;
-        loop {
-            return match Fd::openat(
-                &unsafe { RawFdNum::from_raw_fd(parent) },
-                file_name,
-                oflags | noatime,
-                Mode::empty(),
-            ) {
-                Ok(fd) => Ok(Some(fd)),
-                Err(nix::Error::Sys(Errno::ENOENT)) => {
-                    if existed {
-                        self.report_vanished_file()?;
-                    }
-                    Ok(None)
-                }
-                Err(nix::Error::Sys(Errno::EACCES)) => {
-                    writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
-                    Ok(None)
-                }
-                Err(nix::Error::Sys(Errno::EPERM)) if !noatime.is_empty() => {
-                    // Retry without O_NOATIME:
-                    noatime = OFlag::empty();
-                    continue;
-                }
-                Err(other) => Err(Error::from(other)),
-            }
-        }
-    }
-
-    fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
-        let fd = match self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)? {
-            Some(fd) => fd,
-            None => return Ok(()),
-        };
-
-        let old_pattern_count = self.patterns.len();
-
-        let path_bytes = self.path.as_os_str().as_bytes();
-
-        let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
-
-        use io::BufRead;
-        for line in io::BufReader::new(file).split(b'\n') {
-            let line = match line {
-                Ok(line) => line,
-                Err(err) => {
-                    let _ = writeln!(
-                        self.errors,
-                        "ignoring .pxarexclude after read error in {:?}: {}",
-                        self.path,
-                        err,
-                    );
-                    self.patterns.truncate(old_pattern_count);
-                    return Ok(());
-                }
-            };
-
-            let line = crate::tools::strip_ascii_whitespace(&line);
-
-            if line.is_empty() || line[0] == b'#' {
-                continue;
-            }
-
-            let mut buf;
-            let (line, mode, anchored) = if line[0] == b'/' {
-                buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
-                buf.extend(path_bytes);
-                buf.extend(line);
-                (&buf[..], MatchType::Exclude, true)
-            } else if line.starts_with(b"!/") {
-                // inverted case with absolute path
-                buf = Vec::with_capacity(path_bytes.len() + line.len());
-                buf.extend(path_bytes);
-                buf.extend(&line[1..]); // without the '!'
-                (&buf[..], MatchType::Include, true)
-            } else if line.starts_with(b"!") {
-                (&line[1..], MatchType::Include, false)
-            } else {
-                (line, MatchType::Exclude, false)
-            };
-
-            match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
-                Ok(pattern) => {
-                    if anchored {
-                        self.patterns.push(pattern.add_flags(MatchFlag::ANCHORED));
-                    } else {
-                        self.patterns.push(pattern);
-                    }
-                }
-                Err(err) => {
-                    let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    async fn encode_pxarexclude_cli<T: SeqWrite + Send>(
-        &mut self,
-        encoder: &mut Encoder<'_, T>,
-        file_name: &CStr,
-        patterns_count: usize,
-    ) -> Result<(), Error> {
-        let content = generate_pxar_excludes_cli(&self.patterns[..patterns_count]);
-        if let Some(ref catalog) = self.catalog {
-            catalog.lock().unwrap().add_file(file_name, content.len() as u64, 0)?;
-        }
-
-        let mut metadata = Metadata::default();
-        metadata.stat.mode = pxar::format::mode::IFREG | 0o600;
-
-        let mut file = encoder.create_file(&metadata, ".pxarexclude-cli", content.len() as u64).await?;
-        file.write_all(&content).await?;
-
-        Ok(())
-    }
-
-    fn generate_directory_file_list(
-        &mut self,
-        dir: &mut Dir,
-        is_root: bool,
-    ) -> Result<Vec<FileListEntry>, Error> {
-        let dir_fd = dir.as_raw_fd();
-
-        let mut file_list = Vec::new();
-
-        for file in dir.iter() {
-            let file = file?;
-
-            let file_name = file.file_name().to_owned();
-            let file_name_bytes = file_name.to_bytes();
-            if file_name_bytes == b"." || file_name_bytes == b".." {
-                continue;
-            }
-
-            if is_root && file_name_bytes == b".pxarexclude-cli" {
-                continue;
-            }
-
-            let os_file_name = OsStr::from_bytes(file_name_bytes);
-            assert_single_path_component(os_file_name)?;
-            let full_path = self.path.join(os_file_name);
-
-            let stat = match nix::sys::stat::fstatat(
-                dir_fd,
-                file_name.as_c_str(),
-                nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
-            ) {
-                Ok(stat) => stat,
-                Err(ref err) if err.not_found() => continue,
-                Err(err) => bail!("stat failed on {:?}: {}", full_path, err),
-            };
-
-            let match_path = PathBuf::from("/").join(full_path.clone());
-            if self
-                .patterns
-                .matches(match_path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
-                == Some(MatchType::Exclude)
-            {
-                continue;
-            }
-
-            self.entry_counter += 1;
-            if self.entry_counter > self.entry_limit {
-                bail!("exceeded allowed number of file entries (> {})",self.entry_limit);
-            }
-
-            file_list.push(FileListEntry {
-                name: file_name,
-                path: full_path,
-                stat
-            });
-        }
-
-        file_list.sort_unstable_by(|a, b| a.name.cmp(&b.name));
-
-        Ok(file_list)
-    }
-
-    fn report_vanished_file(&mut self) -> Result<(), Error> {
-        writeln!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
-        Ok(())
-    }
-
-    fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
-        writeln!(
-            self.errors,
-            "warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
-            self.path,
-        )?;
-        Ok(())
-    }
-
-    fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
-        writeln!(
-            self.errors,
-            "warning: file size increased while reading: {:?}, file will be truncated!",
-            self.path,
-        )?;
-        Ok(())
-    }
-
-    async fn add_entry<T: SeqWrite + Send>(
-        &mut self,
-        encoder: &mut Encoder<'_, T>,
-        parent: RawFd,
-        c_file_name: &CStr,
-        stat: &FileStat,
-    ) -> Result<(), Error> {
-        use pxar::format::mode;
-
-        let file_mode = stat.st_mode & libc::S_IFMT;
-        let open_mode = if file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR {
-            OFlag::empty()
-        } else {
-            OFlag::O_PATH
-        };
-
-        let fd = self.open_file(
-            parent,
-            c_file_name,
-            open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW,
-            true,
-        )?;
-
-        let fd = match fd {
-            Some(fd) => fd,
-            None => return Ok(()),
-        };
-
-        let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?;
-
-        if self
-            .patterns
-            .matches(self.path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
-            == Some(MatchType::Exclude)
-        {
-            return Ok(());
-        }
-
-        let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
-        match metadata.file_type() {
-            mode::IFREG => {
-                let link_info = HardLinkInfo {
-                    st_dev: stat.st_dev,
-                    st_ino: stat.st_ino,
-                };
-
-                if stat.st_nlink > 1 {
-                    if let Some((path, offset)) = self.hardlinks.get(&link_info) {
-                        if let Some(ref catalog) = self.catalog {
-                            catalog.lock().unwrap().add_hardlink(c_file_name)?;
-                        }
-
-                        encoder.add_hardlink(file_name, path, *offset).await?;
-
-                        return Ok(());
-                    }
-                }
-
-                let file_size = stat.st_size as u64;
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().add_file(c_file_name, file_size, stat.st_mtime)?;
-                }
-
-                let offset: LinkOffset =
-                    self.add_regular_file(encoder, fd, file_name, &metadata, file_size).await?;
-
-                if stat.st_nlink > 1 {
-                    self.hardlinks.insert(link_info, (self.path.clone(), offset));
-                }
-
-                Ok(())
-            }
-            mode::IFDIR => {
-                let dir = Dir::from_fd(fd.into_raw_fd())?;
-
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().start_directory(c_file_name)?;
-                }
-                let result = self.add_directory(encoder, dir, c_file_name, &metadata, stat).await;
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().end_directory()?;
-                }
-                result
-            }
-            mode::IFSOCK => {
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().add_socket(c_file_name)?;
-                }
-
-                Ok(encoder.add_socket(&metadata, file_name).await?)
-            }
-            mode::IFIFO => {
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().add_fifo(c_file_name)?;
-                }
-
-                Ok(encoder.add_fifo(&metadata, file_name).await?)
-            }
-            mode::IFLNK => {
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().add_symlink(c_file_name)?;
-                }
-
-                self.add_symlink(encoder, fd, file_name, &metadata).await
-            }
-            mode::IFBLK => {
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().add_block_device(c_file_name)?;
-                }
-
-                self.add_device(encoder, file_name, &metadata, &stat).await
-            }
-            mode::IFCHR => {
-                if let Some(ref catalog) = self.catalog {
-                    catalog.lock().unwrap().add_char_device(c_file_name)?;
-                }
-
-                self.add_device(encoder, file_name, &metadata, &stat).await
-            }
-            other => bail!(
-                "encountered unknown file type: 0x{:x} (0o{:o})",
-                other,
-                other
-            ),
-        }
-    }
-
-    async fn add_directory<T: SeqWrite + Send>(
-        &mut self,
-        encoder: &mut Encoder<'_, T>,
-        dir: Dir,
-        dir_name: &CStr,
-        metadata: &Metadata,
-        stat: &FileStat,
-    ) -> Result<(), Error> {
-        let dir_name = OsStr::from_bytes(dir_name.to_bytes());
-
-        let mut encoder = encoder.create_directory(dir_name, &metadata).await?;
-
-        let old_fs_magic = self.fs_magic;
-        let old_fs_feature_flags = self.fs_feature_flags;
-        let old_st_dev = self.current_st_dev;
-
-        let mut skip_contents = false;
-        if old_st_dev != stat.st_dev {
-            self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
-            self.fs_feature_flags = Flags::from_magic(self.fs_magic);
-            self.current_st_dev = stat.st_dev;
-
-            if is_virtual_file_system(self.fs_magic) {
-                skip_contents = true;
-            } else if let Some(set) = &self.device_set {
-                skip_contents = !set.contains(&stat.st_dev);
-            }
-        }
-
-        let result = if skip_contents {
-            writeln!(self.logger, "skipping mount point: {:?}", self.path)?;
-            Ok(())
-        } else {
-            self.archive_dir_contents(&mut encoder, dir, false).await
-        };
-
-        self.fs_magic = old_fs_magic;
-        self.fs_feature_flags = old_fs_feature_flags;
-        self.current_st_dev = old_st_dev;
-
-        encoder.finish().await?;
-        result
-    }
-
-    async fn add_regular_file<T: SeqWrite + Send>(
-        &mut self,
-        encoder: &mut Encoder<'_, T>,
-        fd: Fd,
-        file_name: &Path,
-        metadata: &Metadata,
-        file_size: u64,
-    ) -> Result<LinkOffset, Error> {
-        let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
-        let mut remaining = file_size;
-        let mut out = encoder.create_file(metadata, file_name, file_size).await?;
-        while remaining != 0 {
-            let mut got = match file.read(&mut self.file_copy_buffer[..]) {
-                Ok(0) => break,
-                Ok(got) => got,
-                Err(err) if err.kind() == std::io::ErrorKind::Interrupted => continue,
-                Err(err) => bail!(err),
-            };
-            if got as u64 > remaining {
-                self.report_file_grew_while_reading()?;
-                got = remaining as usize;
-            }
-            out.write_all(&self.file_copy_buffer[..got]).await?;
-            remaining -= got as u64;
-        }
-        if remaining > 0 {
-            self.report_file_shrunk_while_reading()?;
-            let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
-            vec::clear(&mut self.file_copy_buffer[..to_zero]);
-            while remaining != 0 {
-                let fill = remaining.min(self.file_copy_buffer.len() as u64) as usize;
-                out.write_all(&self.file_copy_buffer[..fill]).await?;
-                remaining -= fill as u64;
-            }
-        }
-
-        Ok(out.file_offset())
-    }
-
-    async fn add_symlink<T: SeqWrite + Send>(
-        &mut self,
-        encoder: &mut Encoder<'_, T>,
-        fd: Fd,
-        file_name: &Path,
-        metadata: &Metadata,
-    ) -> Result<(), Error> {
-        let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
-        encoder.add_symlink(metadata, file_name, dest).await?;
-        Ok(())
-    }
-
-    async fn add_device<T: SeqWrite + Send>(
-        &mut self,
-        encoder: &mut Encoder<'_, T>,
-        file_name: &Path,
-        metadata: &Metadata,
-        stat: &FileStat,
-    ) -> Result<(), Error> {
-        Ok(encoder.add_device(
-            metadata,
-            file_name,
-            pxar::format::Device::from_dev_t(stat.st_rdev),
-        ).await?)
-    }
-}
-
-fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64, fs_feature_flags: &mut Flags) -> Result<Metadata, Error> {
-    // required for some of these
-    let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
-
-    let mut meta = Metadata {
-        stat: pxar::Stat {
-            mode: u64::from(stat.st_mode),
-            flags: 0,
-            uid: stat.st_uid,
-            gid: stat.st_gid,
-            mtime: pxar::format::StatxTimestamp::new(stat.st_mtime, stat.st_mtime_nsec as u32),
-        },
-        ..Default::default()
-    };
-
-    get_xattr_fcaps_acl(&mut meta, fd, &proc_path, flags, fs_feature_flags)?;
-    get_chattr(&mut meta, fd)?;
-    get_fat_attr(&mut meta, fd, fs_magic)?;
-    get_quota_project_id(&mut meta, fd, flags, fs_magic)?;
-    Ok(meta)
-}
-
-fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
-    if !flags.contains(Flags::WITH_FCAPS) {
-        return Ok(());
-    }
-
-    match xattr::fgetxattr(fd, xattr::xattr_name_fcaps()) {
-        Ok(data) => {
-            meta.fcaps = Some(pxar::format::FCaps { data });
-            Ok(())
-        }
-        Err(Errno::ENODATA) => Ok(()),
-        Err(Errno::EOPNOTSUPP) => {
-            fs_feature_flags.remove(Flags::WITH_FCAPS);
-            Ok(())
-        }
-        Err(Errno::EBADF) => Ok(()), // symlinks
-        Err(err) => bail!("failed to read file capabilities: {}", err),
-    }
-}
-
-fn get_xattr_fcaps_acl(
-    meta: &mut Metadata,
-    fd: RawFd,
-    proc_path: &Path,
-    flags: Flags,
-    fs_feature_flags: &mut Flags,
-) -> Result<(), Error> {
-    if !flags.contains(Flags::WITH_XATTRS) {
-        return Ok(());
-    }
-
-    let xattrs = match xattr::flistxattr(fd) {
-        Ok(names) => names,
-        Err(Errno::EOPNOTSUPP) => {
-            fs_feature_flags.remove(Flags::WITH_XATTRS);
-            return Ok(());
-        },
-        Err(Errno::EBADF) => return Ok(()), // symlinks
-        Err(err) => bail!("failed to read xattrs: {}", err),
-    };
-
-    for attr in &xattrs {
-        if xattr::is_security_capability(&attr) {
-            get_fcaps(meta, fd, flags, fs_feature_flags)?;
-            continue;
-        }
-
-        if xattr::is_acl(&attr) {
-            get_acl(meta, proc_path, flags, fs_feature_flags)?;
-            continue;
-        }
-
-        if !xattr::is_valid_xattr_name(&attr) {
-            continue;
-        }
-
-        match xattr::fgetxattr(fd, attr) {
-            Ok(data) => meta
-                .xattrs
-                .push(pxar::format::XAttr::new(attr.to_bytes(), data)),
-            Err(Errno::ENODATA) => (), // it got removed while we were iterating...
-            Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
-            Err(Errno::EBADF) => (),   // symlinks, shouldn't be able to reach this either
-            Err(err) => bail!("error reading extended attribute {:?}: {}", attr, err),
-        }
-    }
-
-    Ok(())
-}
-
-fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
-    let mut attr: libc::c_long = 0;
-
-    match unsafe { fs::read_attr_fd(fd, &mut attr) } {
-        Ok(_) => (),
-        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
-            return Ok(());
-        }
-        Err(err) => bail!("failed to read file attributes: {}", err),
-    }
-
-    metadata.stat.flags |= Flags::from_chattr(attr).bits();
-
-    Ok(())
-}
-
-fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> {
-    use proxmox::sys::linux::magic::*;
-
-    if fs_magic != MSDOS_SUPER_MAGIC && fs_magic != FUSE_SUPER_MAGIC {
-        return Ok(());
-    }
-
-    let mut attr: u32 = 0;
-
-    match unsafe { fs::read_fat_attr_fd(fd, &mut attr) } {
-        Ok(_) => (),
-        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
-            return Ok(());
-        }
-        Err(err) => bail!("failed to read fat attributes: {}", err),
-    }
-
-    metadata.stat.flags |= Flags::from_fat_attr(attr).bits();
-
-    Ok(())
-}
-
-/// Read the quota project id for an inode, supported on ext4/XFS/FUSE/ZFS filesystems
-fn get_quota_project_id(
-    metadata: &mut Metadata,
-    fd: RawFd,
-    flags: Flags,
-    magic: i64,
-) -> Result<(), Error> {
-    if !(metadata.is_dir() || metadata.is_regular_file()) {
-        return Ok(());
-    }
-
-    if !flags.contains(Flags::WITH_QUOTA_PROJID) {
-        return Ok(());
-    }
-
-    use proxmox::sys::linux::magic::*;
-
-    match magic {
-        EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (),
-        _ => return Ok(()),
-    }
-
-    let mut fsxattr = fs::FSXAttr::default();
-    let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) };
-
-    // On some FUSE filesystems it can happen that ioctl is not supported.
-    // For these cases projid is set to 0 while the error is ignored.
-    if let Err(err) = res {
-        let errno = err
-            .as_errno()
-            .ok_or_else(|| format_err!("error while reading quota project id"))?;
-        if errno_is_unsupported(errno) {
-            return Ok(());
-        } else {
-            bail!("error while reading quota project id ({})", errno);
-        }
-    }
-
-    let projid = fsxattr.fsx_projid as u64;
-    if projid != 0 {
-        metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid });
-    }
-    Ok(())
-}
-
-fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
-    if !flags.contains(Flags::WITH_ACL) {
-        return Ok(());
-    }
-
-    if metadata.is_symlink() {
-        return Ok(());
-    }
-
-    get_acl_do(metadata, proc_path, acl::ACL_TYPE_ACCESS, fs_feature_flags)?;
-
-    if metadata.is_dir() {
-        get_acl_do(metadata, proc_path, acl::ACL_TYPE_DEFAULT, fs_feature_flags)?;
-    }
-
-    Ok(())
-}
-
-fn get_acl_do(
-    metadata: &mut Metadata,
-    proc_path: &Path,
-    acl_type: acl::ACLType,
-    fs_feature_flags: &mut Flags,
-) -> Result<(), Error> {
-    // In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
-    // to create a path for acl_get_file(). acl_get_fd() only allows to get
-    // ACL_TYPE_ACCESS attributes.
-    let acl = match acl::ACL::get_file(&proc_path, acl_type) {
-        Ok(acl) => acl,
-        // Don't bail if underlying endpoint does not support acls
-        Err(Errno::EOPNOTSUPP) => {
-            fs_feature_flags.remove(Flags::WITH_ACL);
-            return Ok(());
-        }
-        // Don't bail if the endpoint cannot carry acls
-        Err(Errno::EBADF) => return Ok(()),
-        // Don't bail if there is no data
-        Err(Errno::ENODATA) => return Ok(()),
-        Err(err) => bail!("error while reading ACL - {}", err),
-    };
-
-    process_acl(metadata, acl, acl_type)
-}
-
-fn process_acl(
-    metadata: &mut Metadata,
-    acl: acl::ACL,
-    acl_type: acl::ACLType,
-) -> Result<(), Error> {
-    use pxar::format::acl as pxar_acl;
-    use pxar::format::acl::{Group, GroupObject, Permissions, User};
-
-    let mut acl_user = Vec::new();
-    let mut acl_group = Vec::new();
-    let mut acl_group_obj = None;
-    let mut acl_default = None;
-    let mut user_obj_permissions = None;
-    let mut group_obj_permissions = None;
-    let mut other_permissions = None;
-    let mut mask_permissions = None;
-
-    for entry in &mut acl.entries() {
-        let tag = entry.get_tag_type()?;
-        let permissions = entry.get_permissions()?;
-        match tag {
-            acl::ACL_USER_OBJ => user_obj_permissions = Some(Permissions(permissions)),
-            acl::ACL_GROUP_OBJ => group_obj_permissions = Some(Permissions(permissions)),
-            acl::ACL_OTHER => other_permissions = Some(Permissions(permissions)),
-            acl::ACL_MASK => mask_permissions = Some(Permissions(permissions)),
-            acl::ACL_USER => {
-                acl_user.push(User {
-                    uid: entry.get_qualifier()?,
-                    permissions: Permissions(permissions),
-                });
-            }
-            acl::ACL_GROUP => {
-                acl_group.push(Group {
-                    gid: entry.get_qualifier()?,
-                    permissions: Permissions(permissions),
-                });
-            }
-            _ => bail!("Unexpected ACL tag encountered!"),
-        }
-    }
-
-    acl_user.sort();
-    acl_group.sort();
-
-    match acl_type {
-        acl::ACL_TYPE_ACCESS => {
-            // The mask permissions are mapped to the stat group permissions
-            // in case that the ACL group permissions were set.
-            // Only in that case we need to store the group permissions,
-            // in the other cases they are identical to the stat group permissions.
-            if let (Some(gop), true) = (group_obj_permissions, mask_permissions.is_some()) {
-                acl_group_obj = Some(GroupObject { permissions: gop });
-            }
-
-            metadata.acl.users = acl_user;
-            metadata.acl.groups = acl_group;
-            metadata.acl.group_obj = acl_group_obj;
-        }
-        acl::ACL_TYPE_DEFAULT => {
-            if user_obj_permissions != None
-                || group_obj_permissions != None
-                || other_permissions != None
-                || mask_permissions != None
-            {
-                acl_default = Some(pxar_acl::Default {
-                    // The value is set to UINT64_MAX as placeholder if one
-                    // of the permissions is not set
-                    user_obj_permissions: user_obj_permissions.unwrap_or(Permissions::NO_MASK),
-                    group_obj_permissions: group_obj_permissions.unwrap_or(Permissions::NO_MASK),
-                    other_permissions: other_permissions.unwrap_or(Permissions::NO_MASK),
-                    mask_permissions: mask_permissions.unwrap_or(Permissions::NO_MASK),
-                });
-            }
-
-            metadata.acl.default_users = acl_user;
-            metadata.acl.default_groups = acl_group;
-            metadata.acl.default = acl_default;
-        }
-        _ => bail!("Unexpected ACL type encountered"),
-    }
-
-    Ok(())
-}
-
-/// Note that our pattern lists are "positive". `MatchType::Include` means the file is included.
-/// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
-/// prefix.
-fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
-    use pathpatterns::MatchPattern;
-
-    let mut content = Vec::new();
-
-    for pattern in patterns {
-        match pattern.match_type() {
-            MatchType::Include => content.push(b'!'),
-            MatchType::Exclude => (),
-        }
-
-        match pattern.pattern() {
-            MatchPattern::Literal(lit) => content.extend(lit),
-            MatchPattern::Pattern(pat) => content.extend(pat.pattern().to_bytes()),
-        }
-
-        if pattern.match_flags() == MatchFlag::MATCH_DIRECTORIES && content.last() != Some(&b'/') {
-            content.push(b'/');
-        }
-
-        content.push(b'\n');
-    }
-
-    content
-}
diff --git a/src/pxar/dir_stack.rs b/src/pxar/dir_stack.rs
deleted file mode 100644 (file)
index 86740ff..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-use std::ffi::OsString;
-use std::os::unix::io::{AsRawFd, RawFd};
-use std::path::{Path, PathBuf};
-
-use anyhow::{bail, format_err, Error};
-use nix::dir::Dir;
-use nix::fcntl::OFlag;
-use nix::sys::stat::{mkdirat, Mode};
-
-use proxmox::sys::error::SysError;
-use proxmox::tools::fd::BorrowedFd;
-use pxar::Metadata;
-
-use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
-
-pub struct PxarDir {
-    file_name: OsString,
-    metadata: Metadata,
-    dir: Option<Dir>,
-}
-
-impl PxarDir {
-    pub fn new(file_name: OsString, metadata: Metadata) -> Self {
-        Self {
-            file_name,
-            metadata,
-            dir: None,
-        }
-    }
-
-    pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
-        Self {
-            file_name: OsString::from("."),
-            metadata,
-            dir: Some(dir),
-        }
-    }
-
-    fn create_dir(
-        &mut self,
-        parent: RawFd,
-        allow_existing_dirs: bool,
-    ) -> Result<BorrowedFd, Error> {
-        match mkdirat(
-            parent,
-            self.file_name.as_os_str(),
-            perms_from_metadata(&self.metadata)?,
-        ) {
-            Ok(()) => (),
-            Err(err) => {
-                if !(allow_existing_dirs && err.already_exists()) {
-                    return Err(err.into());
-                }
-            }
-        }
-
-        self.open_dir(parent)
-    }
-
-    fn open_dir(&mut self, parent: RawFd) -> Result<BorrowedFd, Error> {
-        let dir = Dir::openat(
-            parent,
-            self.file_name.as_os_str(),
-            OFlag::O_DIRECTORY,
-            Mode::empty(),
-        )?;
-
-        let fd = BorrowedFd::new(&dir);
-        self.dir = Some(dir);
-
-        Ok(fd)
-    }
-
-    pub fn try_as_borrowed_fd(&self) -> Option<BorrowedFd> {
-        self.dir.as_ref().map(BorrowedFd::new)
-    }
-
-    pub fn metadata(&self) -> &Metadata {
-        &self.metadata
-    }
-}
-
-pub struct PxarDirStack {
-    dirs: Vec<PxarDir>,
-    path: PathBuf,
-    created: usize,
-}
-
-impl PxarDirStack {
-    pub fn new(root: Dir, metadata: Metadata) -> Self {
-        Self {
-            dirs: vec![PxarDir::with_dir(root, metadata)],
-            path: PathBuf::from("/"),
-            created: 1, // the root directory exists
-        }
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.dirs.is_empty()
-    }
-
-    pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
-        assert_single_path_component(&file_name)?;
-        self.path.push(&file_name);
-        self.dirs.push(PxarDir::new(file_name, metadata));
-        Ok(())
-    }
-
-    pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
-        let out = self.dirs.pop();
-        if !self.path.pop() {
-            if self.path.as_os_str() == "/" {
-                // we just finished the root directory, make sure this can only happen once:
-                self.path = PathBuf::new();
-            } else {
-                bail!("lost track of path");
-            }
-        }
-        self.created = self.created.min(self.dirs.len());
-        Ok(out)
-    }
-
-    pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<BorrowedFd, Error> {
-        // should not be possible given the way we use it:
-        assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
-
-        let dirs_len = self.dirs.len();
-        let mut fd = self.dirs[self.created - 1]
-            .try_as_borrowed_fd()
-            .ok_or_else(|| format_err!("lost track of directory file descriptors"))?
-            .as_raw_fd();
-
-        while self.created < dirs_len {
-            fd = self.dirs[self.created]
-                .create_dir(fd, allow_existing_dirs)?
-                .as_raw_fd();
-            self.created += 1;
-        }
-
-        self.dirs[self.created - 1]
-            .try_as_borrowed_fd()
-            .ok_or_else(|| format_err!("lost track of directory file descriptors"))
-    }
-
-    pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
-        let _: BorrowedFd = self.last_dir_fd(allow_existing_dirs)?;
-        Ok(())
-    }
-
-    pub fn root_dir_fd(&self) -> Result<BorrowedFd, Error> {
-        // should not be possible given the way we use it:
-        assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
-
-        self.dirs[0]
-            .try_as_borrowed_fd()
-            .ok_or_else(|| format_err!("lost track of directory file descriptors"))
-    }
-
-    pub fn path(&self) -> &Path {
-        &self.path
-    }
-}
diff --git a/src/pxar/extract.rs b/src/pxar/extract.rs
deleted file mode 100644 (file)
index 16b2b49..0000000
+++ /dev/null
@@ -1,864 +0,0 @@
-//! Code for extraction of pxar contents onto the file system.
-
-use std::convert::TryFrom;
-use std::ffi::{CStr, CString, OsStr, OsString};
-use std::io;
-use std::os::unix::ffi::OsStrExt;
-use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
-use std::path::{Path, PathBuf};
-use std::sync::{Arc, Mutex};
-use std::pin::Pin;
-
-use futures::future::Future;
-use anyhow::{bail, format_err, Error};
-use nix::dir::Dir;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-
-use pathpatterns::{MatchEntry, MatchList, MatchType};
-use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
-use pxar::decoder::aio::Decoder;
-use pxar::format::Device;
-use pxar::{Entry, EntryKind, Metadata};
-
-use proxmox::c_result;
-use proxmox::tools::{
-    fs::{create_path, CreateOptions},
-    io::{sparse_copy, sparse_copy_async},
-};
-
-use crate::pxar::dir_stack::PxarDirStack;
-use crate::pxar::metadata;
-use crate::pxar::Flags;
-
-use crate::tools::zip::{ZipEncoder, ZipEntry};
-
-pub struct PxarExtractOptions<'a> {
-    pub match_list: &'a[MatchEntry],
-    pub extract_match_default: bool,
-    pub allow_existing_dirs: bool,
-    pub on_error: Option<ErrorHandler>,
-}
-
-pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
-
-pub fn extract_archive<T, F>(
-    mut decoder: pxar::decoder::Decoder<T>,
-    destination: &Path,
-    feature_flags: Flags,
-    mut callback: F,
-    options: PxarExtractOptions,
-) -> Result<(), Error>
-where
-    T: pxar::decoder::SeqRead,
-    F: FnMut(&Path),
-{
-    // we use this to keep track of our directory-traversal
-    decoder.enable_goodbye_entries(true);
-
-    let root = decoder
-        .next()
-        .ok_or_else(|| format_err!("found empty pxar archive"))?
-        .map_err(|err| format_err!("error reading pxar archive: {}", err))?;
-
-    if !root.is_dir() {
-        bail!("pxar archive does not start with a directory entry!");
-    }
-
-    create_path(
-        &destination,
-        None,
-        Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
-    )
-    .map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
-
-    let dir = Dir::open(
-        destination,
-        OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
-        Mode::empty(),
-    )
-    .map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
-
-    let mut extractor = Extractor::new(
-        dir,
-        root.metadata().clone(),
-        options.allow_existing_dirs,
-        feature_flags,
-    );
-
-    if let Some(on_error) = options.on_error {
-        extractor.on_error(on_error);
-    }
-
-    let mut match_stack = Vec::new();
-    let mut err_path_stack = vec![OsString::from("/")];
-    let mut current_match = options.extract_match_default;
-    while let Some(entry) = decoder.next() {
-        let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
-
-        let file_name_os = entry.file_name();
-
-        // safety check: a file entry in an archive must never contain slashes:
-        if file_name_os.as_bytes().contains(&b'/') {
-            bail!("archive file entry contains slashes, which is invalid and a security concern");
-        }
-
-        let file_name = CString::new(file_name_os.as_bytes())
-            .map_err(|_| format_err!("encountered file name with null-bytes"))?;
-
-        let metadata = entry.metadata();
-
-        extractor.set_path(entry.path().as_os_str().to_owned());
-
-        let match_result = options.match_list.matches(
-            entry.path().as_os_str().as_bytes(),
-            Some(metadata.file_type() as u32),
-        );
-
-        let did_match = match match_result {
-            Some(MatchType::Include) => true,
-            Some(MatchType::Exclude) => false,
-            None => current_match,
-        };
-        match (did_match, entry.kind()) {
-            (_, EntryKind::Directory) => {
-                callback(entry.path());
-
-                let create = current_match && match_result != Some(MatchType::Exclude);
-                extractor
-                    .enter_directory(file_name_os.to_owned(), metadata.clone(), create)
-                    .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
-
-                // We're starting a new directory, push our old matching state and replace it with
-                // our new one:
-                match_stack.push(current_match);
-                current_match = did_match;
-
-                // When we hit the goodbye table we'll try to apply metadata to the directory, but
-                // the Goodbye entry will not contain the path, so push it to our path stack for
-                // error messages:
-                err_path_stack.push(extractor.clone_path());
-
-                Ok(())
-            }
-            (_, EntryKind::GoodbyeTable) => {
-                // go up a directory
-
-                extractor.set_path(err_path_stack.pop().ok_or_else(|| {
-                    format_err!(
-                        "error at entry {:?}: unexpected end of directory",
-                        file_name_os
-                    )
-                })?);
-
-                extractor
-                    .leave_directory()
-                    .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
-
-                // We left a directory, also get back our previous matching state. This is in sync
-                // with `dir_stack` so this should never be empty except for the final goodbye
-                // table, in which case we get back to the default of `true`.
-                current_match = match_stack.pop().unwrap_or(true);
-
-                Ok(())
-            }
-            (true, EntryKind::Symlink(link)) => {
-                callback(entry.path());
-                extractor.extract_symlink(&file_name, metadata, link.as_ref())
-            }
-            (true, EntryKind::Hardlink(link)) => {
-                callback(entry.path());
-                extractor.extract_hardlink(&file_name, link.as_os_str())
-            }
-            (true, EntryKind::Device(dev)) => {
-                if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
-                    callback(entry.path());
-                    extractor.extract_device(&file_name, metadata, dev)
-                } else {
-                    Ok(())
-                }
-            }
-            (true, EntryKind::Fifo) => {
-                if extractor.contains_flags(Flags::WITH_FIFOS) {
-                    callback(entry.path());
-                    extractor.extract_special(&file_name, metadata, 0)
-                } else {
-                    Ok(())
-                }
-            }
-            (true, EntryKind::Socket) => {
-                if extractor.contains_flags(Flags::WITH_SOCKETS) {
-                    callback(entry.path());
-                    extractor.extract_special(&file_name, metadata, 0)
-                } else {
-                    Ok(())
-                }
-            }
-            (true, EntryKind::File { size, .. }) => extractor.extract_file(
-                &file_name,
-                metadata,
-                *size,
-                &mut decoder.contents().ok_or_else(|| {
-                    format_err!("found regular file entry without contents in archive")
-                })?,
-            ),
-            (false, _) => Ok(()), // skip this
-        }
-        .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
-    }
-
-    if !extractor.dir_stack.is_empty() {
-        bail!("unexpected eof while decoding pxar archive");
-    }
-
-    Ok(())
-}
-
-/// Common state for file extraction.
-pub(crate) struct Extractor {
-    feature_flags: Flags,
-    allow_existing_dirs: bool,
-    dir_stack: PxarDirStack,
-
-    /// For better error output we need to track the current path in the Extractor state.
-    current_path: Arc<Mutex<OsString>>,
-
-    /// Error callback. Includes `current_path` in the reformatted error, should return `Ok` to
-    /// continue extracting or the passed error as `Err` to bail out.
-    on_error: ErrorHandler,
-}
-
-impl Extractor {
-    /// Create a new extractor state for a target directory.
-    pub fn new(
-        root_dir: Dir,
-        metadata: Metadata,
-        allow_existing_dirs: bool,
-        feature_flags: Flags,
-    ) -> Self {
-        Self {
-            dir_stack: PxarDirStack::new(root_dir, metadata),
-            allow_existing_dirs,
-            feature_flags,
-            current_path: Arc::new(Mutex::new(OsString::new())),
-            on_error: Box::new(Err),
-        }
-    }
-
-    /// We call this on errors. The error will be reformatted to include `current_path`. The
-    /// callback should decide whether this error was fatal (simply return it) to bail out early,
-    /// or log/remember/accumulate errors somewhere and return `Ok(())` in its place to continue
-    /// extracting.
-    pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
-        let path = Arc::clone(&self.current_path);
-        self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
-            on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
-        });
-    }
-
-    pub fn set_path(&mut self, path: OsString) {
-        *self.current_path.lock().unwrap() = path;
-    }
-
-    pub fn clone_path(&self) -> OsString {
-        self.current_path.lock().unwrap().clone()
-    }
-
-    /// When encountering a directory during extraction, this is used to keep track of it. If
-    /// `create` is true it is immediately created and its metadata will be updated once we leave
-    /// it. If `create` is false it will only be created if it is going to have any actual content.
-    pub fn enter_directory(
-        &mut self,
-        file_name: OsString,
-        metadata: Metadata,
-        create: bool,
-    ) -> Result<(), Error> {
-        self.dir_stack.push(file_name, metadata)?;
-
-        if create {
-            self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
-        }
-
-        Ok(())
-    }
-
-    /// When done with a directory we can apply its metadata if it has been created.
-    pub fn leave_directory(&mut self) -> Result<(), Error> {
-        let path_info = self.dir_stack.path().to_owned();
-
-        let dir = self
-            .dir_stack
-            .pop()
-            .map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
-            .ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
-
-        if let Some(fd) = dir.try_as_borrowed_fd() {
-            metadata::apply(
-                self.feature_flags,
-                dir.metadata(),
-                fd.as_raw_fd(),
-                &path_info,
-                &mut self.on_error,
-            )
-            .map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
-        }
-
-        Ok(())
-    }
-
-    fn contains_flags(&self, flag: Flags) -> bool {
-        self.feature_flags.contains(flag)
-    }
-
-    fn parent_fd(&mut self) -> Result<RawFd, Error> {
-        self.dir_stack
-            .last_dir_fd(self.allow_existing_dirs)
-            .map(|d| d.as_raw_fd())
-            .map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
-    }
-
-    pub fn extract_symlink(
-        &mut self,
-        file_name: &CStr,
-        metadata: &Metadata,
-        link: &OsStr,
-    ) -> Result<(), Error> {
-        let parent = self.parent_fd()?;
-        nix::unistd::symlinkat(link, Some(parent), file_name)?;
-        metadata::apply_at(
-            self.feature_flags,
-            metadata,
-            parent,
-            file_name,
-            self.dir_stack.path(),
-            &mut self.on_error,
-        )
-    }
-
-    pub fn extract_hardlink(&mut self, file_name: &CStr, link: &OsStr) -> Result<(), Error> {
-        crate::pxar::tools::assert_relative_path(link)?;
-
-        let parent = self.parent_fd()?;
-        let root = self.dir_stack.root_dir_fd()?;
-        let target = CString::new(link.as_bytes())?;
-        nix::unistd::linkat(
-            Some(root.as_raw_fd()),
-            target.as_c_str(),
-            Some(parent),
-            file_name,
-            nix::unistd::LinkatFlags::NoSymlinkFollow,
-        )?;
-
-        Ok(())
-    }
-
-    pub fn extract_device(
-        &mut self,
-        file_name: &CStr,
-        metadata: &Metadata,
-        device: &Device,
-    ) -> Result<(), Error> {
-        self.extract_special(file_name, metadata, device.to_dev_t())
-    }
-
-    pub fn extract_special(
-        &mut self,
-        file_name: &CStr,
-        metadata: &Metadata,
-        device: libc::dev_t,
-    ) -> Result<(), Error> {
-        let mode = metadata.stat.mode;
-        let mode = u32::try_from(mode).map_err(|_| {
-            format_err!(
-                "device node's mode contains illegal bits: 0x{:x} (0o{:o})",
-                mode,
-                mode,
-            )
-        })?;
-        let parent = self.parent_fd()?;
-        unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
-            .map_err(|err| format_err!("failed to create device node: {}", err))?;
-
-        metadata::apply_at(
-            self.feature_flags,
-            metadata,
-            parent,
-            file_name,
-            self.dir_stack.path(),
-            &mut self.on_error,
-        )
-    }
-
-    pub fn extract_file(
-        &mut self,
-        file_name: &CStr,
-        metadata: &Metadata,
-        size: u64,
-        contents: &mut dyn io::Read,
-    ) -> Result<(), Error> {
-        let parent = self.parent_fd()?;
-        let mut file = unsafe {
-            std::fs::File::from_raw_fd(
-                nix::fcntl::openat(
-                    parent,
-                    file_name,
-                    OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
-                    Mode::from_bits(0o600).unwrap(),
-                )
-                .map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
-            )
-        };
-
-        metadata::apply_initial_flags(
-            self.feature_flags,
-            metadata,
-            file.as_raw_fd(),
-            &mut self.on_error,
-        )
-        .map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
-
-        let result = sparse_copy(&mut *contents, &mut file)
-            .map_err(|err| format_err!("failed to copy file contents: {}", err))?;
-
-        if size != result.written {
-            bail!(
-                "extracted {} bytes of a file of {} bytes",
-                result.written,
-                size
-            );
-        }
-
-        if result.seeked_last {
-            while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
-                Ok(_) => false,
-                Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
-                Err(err) => bail!("error setting file size: {}", err),
-            } {}
-        }
-
-        metadata::apply(
-            self.feature_flags,
-            metadata,
-            file.as_raw_fd(),
-            self.dir_stack.path(),
-            &mut self.on_error,
-        )
-    }
-
-    pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
-        &mut self,
-        file_name: &CStr,
-        metadata: &Metadata,
-        size: u64,
-        contents: &mut T,
-    ) -> Result<(), Error> {
-        let parent = self.parent_fd()?;
-        let mut file = tokio::fs::File::from_std(unsafe {
-            std::fs::File::from_raw_fd(
-                nix::fcntl::openat(
-                    parent,
-                    file_name,
-                    OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
-                    Mode::from_bits(0o600).unwrap(),
-                )
-                .map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
-            )
-        });
-
-        metadata::apply_initial_flags(
-            self.feature_flags,
-            metadata,
-            file.as_raw_fd(),
-            &mut self.on_error,
-        )
-        .map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
-
-        let result = sparse_copy_async(&mut *contents, &mut file)
-            .await
-            .map_err(|err| format_err!("failed to copy file contents: {}", err))?;
-
-        if size != result.written {
-            bail!(
-                "extracted {} bytes of a file of {} bytes",
-                result.written,
-                size
-            );
-        }
-
-        if result.seeked_last {
-            while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
-                Ok(_) => false,
-                Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
-                Err(err) => bail!("error setting file size: {}", err),
-            } {}
-        }
-
-        metadata::apply(
-            self.feature_flags,
-            metadata,
-            file.as_raw_fd(),
-            self.dir_stack.path(),
-            &mut self.on_error,
-        )
-    }
-}
-
-pub async fn create_zip<T, W, P>(
-    output: W,
-    decoder: Accessor<T>,
-    path: P,
-    verbose: bool,
-) -> Result<(), Error>
-where
-    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
-    W: tokio::io::AsyncWrite + Unpin + Send + 'static,
-    P: AsRef<Path>,
-{
-    let root = decoder.open_root().await?;
-    let file = root
-        .lookup(&path).await?
-        .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
-
-    let mut prefix = PathBuf::new();
-    let mut components = file.entry().path().components();
-    components.next_back(); // discar last
-    for comp in components {
-        prefix.push(comp);
-    }
-
-    let mut zipencoder = ZipEncoder::new(output);
-    let mut decoder = decoder;
-    recurse_files_zip(&mut zipencoder, &mut decoder, &prefix, file, verbose)
-        .await
-        .map_err(|err| {
-            eprintln!("error during creating of zip: {}", err);
-            err
-        })?;
-
-    zipencoder
-        .finish()
-        .await
-        .map_err(|err| {
-            eprintln!("error during finishing of zip: {}", err);
-            err
-        })
-}
-
-fn recurse_files_zip<'a, T, W>(
-    zip: &'a mut ZipEncoder<W>,
-    decoder: &'a mut Accessor<T>,
-    prefix: &'a Path,
-    file: FileEntry<T>,
-    verbose: bool,
-) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
-where
-    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
-    W: tokio::io::AsyncWrite + Unpin + Send + 'static,
-{
-    Box::pin(async move {
-        let metadata = file.entry().metadata();
-        let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
-
-        match file.kind() {
-            EntryKind::File { .. } => {
-                if verbose {
-                    eprintln!("adding '{}' to zip", path.display());
-                }
-                let entry = ZipEntry::new(
-                    path,
-                    metadata.stat.mtime.secs,
-                    metadata.stat.mode as u16,
-                    true,
-                );
-                zip.add_entry(entry, Some(file.contents().await?))
-                   .await
-                   .map_err(|err| format_err!("could not send file entry: {}", err))?;
-            }
-            EntryKind::Hardlink(_) => {
-                let realfile = decoder.follow_hardlink(&file).await?;
-                if verbose {
-                    eprintln!("adding '{}' to zip", path.display());
-                }
-                let entry = ZipEntry::new(
-                    path,
-                    metadata.stat.mtime.secs,
-                    metadata.stat.mode as u16,
-                    true,
-                );
-                zip.add_entry(entry, Some(realfile.contents().await?))
-                   .await
-                   .map_err(|err| format_err!("could not send file entry: {}", err))?;
-            }
-            EntryKind::Directory => {
-                let dir = file.enter_directory().await?;
-                let mut readdir = dir.read_dir();
-                if verbose {
-                    eprintln!("adding '{}' to zip", path.display());
-                }
-                let entry = ZipEntry::new(
-                    path,
-                    metadata.stat.mtime.secs,
-                    metadata.stat.mode as u16,
-                    false,
-                );
-                zip.add_entry::<FileContents<T>>(entry, None).await?;
-                while let Some(entry) = readdir.next().await {
-                    let entry = entry?.decode_entry().await?;
-                    recurse_files_zip(zip, decoder, prefix, entry, verbose).await?;
-                }
-            }
-            _ => {} // ignore all else
-        };
-
-        Ok(())
-    })
-}
-
-fn get_extractor<DEST>(destination: DEST, metadata: Metadata) -> Result<Extractor, Error>
-where
-    DEST: AsRef<Path>,
-{
-    create_path(
-        &destination,
-        None,
-        Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
-    )
-    .map_err(|err| {
-        format_err!(
-            "error creating directory {:?}: {}",
-            destination.as_ref(),
-            err
-        )
-    })?;
-
-    let dir = Dir::open(
-        destination.as_ref(),
-        OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
-        Mode::empty(),
-    )
-    .map_err(|err| {
-        format_err!(
-            "unable to open target directory {:?}: {}",
-            destination.as_ref(),
-            err,
-        )
-    })?;
-
-    Ok(Extractor::new(dir, metadata, false, Flags::DEFAULT))
-}
-
-pub async fn extract_sub_dir<T, DEST, PATH>(
-    destination: DEST,
-    decoder: Accessor<T>,
-    path: PATH,
-    verbose: bool,
-) -> Result<(), Error>
-where
-    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
-    DEST: AsRef<Path>,
-    PATH: AsRef<Path>,
-{
-    let root = decoder.open_root().await?;
-
-    let mut extractor = get_extractor(
-        destination,
-        root.lookup_self().await?.entry().metadata().clone(),
-    )?;
-
-    let file = root
-        .lookup(&path)
-        .await?
-        .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
-
-    recurse_files_extractor(&mut extractor, file, verbose).await
-}
-
-pub async fn extract_sub_dir_seq<S, DEST>(
-    destination: DEST,
-    mut decoder: Decoder<S>,
-    verbose: bool,
-) -> Result<(), Error>
-where
-    S: pxar::decoder::SeqRead + Unpin + Send + 'static,
-    DEST: AsRef<Path>,
-{
-    decoder.enable_goodbye_entries(true);
-    let root = match decoder.next().await {
-        Some(Ok(root)) => root,
-        Some(Err(err)) => bail!("error getting root entry from pxar: {}", err),
-        None => bail!("cannot extract empty archive"),
-    };
-
-    let mut extractor = get_extractor(destination, root.metadata().clone())?;
-
-    if let Err(err) = seq_files_extractor(&mut extractor, decoder, verbose).await {
-        eprintln!("error extracting pxar archive: {}", err);
-    }
-
-    Ok(())
-}
-
-fn extract_special(
-    extractor: &mut Extractor,
-    entry: &Entry,
-    file_name: &CStr,
-) -> Result<(), Error> {
-    let metadata = entry.metadata();
-    match entry.kind() {
-        EntryKind::Symlink(link) => {
-            extractor.extract_symlink(file_name, metadata, link.as_ref())?;
-        }
-        EntryKind::Hardlink(link) => {
-            extractor.extract_hardlink(file_name, link.as_os_str())?;
-        }
-        EntryKind::Device(dev) => {
-            if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
-                extractor.extract_device(file_name, metadata, dev)?;
-            }
-        }
-        EntryKind::Fifo => {
-            if extractor.contains_flags(Flags::WITH_FIFOS) {
-                extractor.extract_special(file_name, metadata, 0)?;
-            }
-        }
-        EntryKind::Socket => {
-            if extractor.contains_flags(Flags::WITH_SOCKETS) {
-                extractor.extract_special(file_name, metadata, 0)?;
-            }
-        }
-        _ => bail!("extract_special used with unsupported entry kind"),
-    }
-    Ok(())
-}
-
-fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
-    let file_name_os = entry.file_name().to_owned();
-
-    // safety check: a file entry in an archive must never contain slashes:
-    if file_name_os.as_bytes().contains(&b'/') {
-        bail!("archive file entry contains slashes, which is invalid and a security concern");
-    }
-
-    let file_name = CString::new(file_name_os.as_bytes())
-        .map_err(|_| format_err!("encountered file name with null-bytes"))?;
-
-    Ok((file_name_os, file_name))
-}
-
-async fn recurse_files_extractor<'a, T>(
-    extractor: &'a mut Extractor,
-    file: FileEntry<T>,
-    verbose: bool,
-) -> Result<(), Error>
-where
-    T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
-{
-    let entry = file.entry();
-    let metadata = entry.metadata();
-    let (file_name_os, file_name) = get_filename(entry)?;
-
-    if verbose {
-        eprintln!("extracting: {}", file.path().display());
-    }
-
-    match file.kind() {
-        EntryKind::Directory => {
-            extractor
-                .enter_directory(file_name_os.to_owned(), metadata.clone(), true)
-                .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
-
-            let dir = file.enter_directory().await?;
-            let mut seq_decoder = dir.decode_full().await?;
-            seq_decoder.enable_goodbye_entries(true);
-            seq_files_extractor(extractor, seq_decoder, verbose).await?;
-            extractor.leave_directory()?;
-        }
-        EntryKind::File { size, .. } => {
-            extractor
-                .async_extract_file(
-                    &file_name,
-                    metadata,
-                    *size,
-                    &mut file.contents().await.map_err(|_| {
-                        format_err!("found regular file entry without contents in archive")
-                    })?,
-                )
-                .await?
-        }
-        EntryKind::GoodbyeTable => {} // ignore
-        _ => extract_special(extractor, entry, &file_name)?,
-    }
-    Ok(())
-}
-
-async fn seq_files_extractor<'a, T>(
-    extractor: &'a mut Extractor,
-    mut decoder: pxar::decoder::aio::Decoder<T>,
-    verbose: bool,
-) -> Result<(), Error>
-where
-    T: pxar::decoder::SeqRead,
-{
-    let mut dir_level = 0;
-    loop {
-        let entry = match decoder.next().await {
-            Some(entry) => entry?,
-            None => return Ok(()),
-        };
-
-        let metadata = entry.metadata();
-        let (file_name_os, file_name) = get_filename(&entry)?;
-
-        if verbose && !matches!(entry.kind(), EntryKind::GoodbyeTable) {
-            eprintln!("extracting: {}", entry.path().display());
-        }
-
-        if let Err(err) = async {
-            match entry.kind() {
-                EntryKind::Directory => {
-                    dir_level += 1;
-                    extractor
-                        .enter_directory(file_name_os.to_owned(), metadata.clone(), true)
-                        .map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
-                }
-                EntryKind::File { size, .. } => {
-                    extractor
-                        .async_extract_file(
-                            &file_name,
-                            metadata,
-                            *size,
-                            &mut decoder.contents().ok_or_else(|| {
-                                format_err!("found regular file entry without contents in archive")
-                            })?,
-                        )
-                        .await?
-                }
-                EntryKind::GoodbyeTable => {
-                    dir_level -= 1;
-                    extractor.leave_directory()?;
-                }
-                _ => extract_special(extractor, &entry, &file_name)?,
-            }
-            Ok(()) as Result<(), Error>
-        }
-        .await
-        {
-            let display = entry.path().display().to_string();
-            eprintln!(
-                "error extracting {}: {}",
-                if matches!(entry.kind(), EntryKind::GoodbyeTable) {
-                    "<directory>"
-                } else {
-                    &display
-                },
-                err
-            );
-        }
-
-        if dir_level < 0 {
-            // we've encountered one Goodbye more then Directory, meaning we've left the dir we
-            // started in - exit early, otherwise the extractor might panic
-            return Ok(());
-        }
-    }
-}
diff --git a/src/pxar/flags.rs b/src/pxar/flags.rs
deleted file mode 100644 (file)
index eca5ee9..0000000
+++ /dev/null
@@ -1,378 +0,0 @@
-//! Feature flags for *pxar* allow to control what is stored/restored in/from the
-//! archive.
-//! Flags for known supported features for a given filesystem can be derived
-//! from the superblocks magic number.
-
-use libc::c_long;
-
-use bitflags::bitflags;
-
-bitflags! {
-    pub struct Flags: u64 {
-        /// FAT-style 2s time granularity
-        const WITH_2SEC_TIME                   = 0x40;
-        /// Preserve read only flag of files
-        const WITH_READ_ONLY                   = 0x80;
-        /// Preserve unix permissions
-        const WITH_PERMISSIONS                 = 0x100;
-        /// Include symbolik links
-        const WITH_SYMLINKS                    = 0x200;
-        /// Include device nodes
-        const WITH_DEVICE_NODES                = 0x400;
-        /// Include FIFOs
-        const WITH_FIFOS                       = 0x800;
-        /// Include Sockets
-        const WITH_SOCKETS                     = 0x1000;
-
-        /// Preserve DOS file flag `HIDDEN`
-        const WITH_FLAG_HIDDEN                 = 0x2000;
-        /// Preserve DOS file flag `SYSTEM`
-        const WITH_FLAG_SYSTEM                 = 0x4000;
-        /// Preserve DOS file flag `ARCHIVE`
-        const WITH_FLAG_ARCHIVE                = 0x8000;
-
-        // chattr() flags
-        /// Linux file attribute `APPEND`
-        const WITH_FLAG_APPEND                 = 0x10000;
-        /// Linux file attribute `NOATIME`
-        const WITH_FLAG_NOATIME                = 0x20000;
-        /// Linux file attribute `COMPR`
-        const WITH_FLAG_COMPR                  = 0x40000;
-        /// Linux file attribute `NOCOW`
-        const WITH_FLAG_NOCOW                  = 0x80000;
-        /// Linux file attribute `NODUMP`
-        const WITH_FLAG_NODUMP                 = 0x0010_0000;
-        /// Linux file attribute `DIRSYNC`
-        const WITH_FLAG_DIRSYNC                = 0x0020_0000;
-        /// Linux file attribute `IMMUTABLE`
-        const WITH_FLAG_IMMUTABLE              = 0x0040_0000;
-        /// Linux file attribute `SYNC`
-        const WITH_FLAG_SYNC                   = 0x0080_0000;
-        /// Linux file attribute `NOCOMP`
-        const WITH_FLAG_NOCOMP                 = 0x0100_0000;
-        /// Linux file attribute `PROJINHERIT`
-        const WITH_FLAG_PROJINHERIT            = 0x0200_0000;
-
-
-        /// Preserve BTRFS subvolume flag
-        const WITH_SUBVOLUME                   = 0x0400_0000;
-        /// Preserve BTRFS read-only subvolume flag
-        const WITH_SUBVOLUME_RO                = 0x0800_0000;
-
-        /// Preserve Extended Attribute metadata
-        const WITH_XATTRS                      = 0x1000_0000;
-        /// Preserve Access Control List metadata
-        const WITH_ACL                         = 0x2000_0000;
-        /// Preserve SELinux security context
-        const WITH_SELINUX                     = 0x4000_0000;
-        /// Preserve "security.capability" xattr
-        const WITH_FCAPS                       = 0x8000_0000;
-
-        /// Preserve XFS/ext4/ZFS project quota ID
-        const WITH_QUOTA_PROJID                = 0x0001_0000_0000;
-
-        /// Support ".pxarexclude" files
-        const EXCLUDE_FILE                     = 0x1000_0000_0000_0000;
-        /// Exclude submounts
-        const EXCLUDE_SUBMOUNTS                = 0x4000_0000_0000_0000;
-        /// Exclude entries with chattr flag NODUMP
-        const EXCLUDE_NODUMP                   = 0x8000_0000_0000_0000;
-
-        // Definitions of typical feature flags for the *pxar* encoder/decoder.
-        // By this expensive syscalls for unsupported features are avoided.
-
-        /// All chattr file attributes
-        const WITH_CHATTR =
-            Flags::WITH_FLAG_APPEND.bits() |
-            Flags::WITH_FLAG_NOATIME.bits() |
-            Flags::WITH_FLAG_COMPR.bits() |
-            Flags::WITH_FLAG_NOCOW.bits() |
-            Flags::WITH_FLAG_NODUMP.bits() |
-            Flags::WITH_FLAG_DIRSYNC.bits() |
-            Flags::WITH_FLAG_IMMUTABLE.bits() |
-            Flags::WITH_FLAG_SYNC.bits() |
-            Flags::WITH_FLAG_NOCOMP.bits() |
-            Flags::WITH_FLAG_PROJINHERIT.bits();
-
-        /// All FAT file attributes
-        const WITH_FAT_ATTRS =
-            Flags::WITH_FLAG_HIDDEN.bits() |
-            Flags::WITH_FLAG_SYSTEM.bits() |
-            Flags::WITH_FLAG_ARCHIVE.bits();
-
-        /// All bits that may also be exposed via fuse
-        const WITH_FUSE =
-            Flags::WITH_2SEC_TIME.bits() |
-            Flags::WITH_READ_ONLY.bits() |
-            Flags::WITH_PERMISSIONS.bits() |
-            Flags::WITH_SYMLINKS.bits() |
-            Flags::WITH_DEVICE_NODES.bits() |
-            Flags::WITH_FIFOS.bits() |
-            Flags::WITH_SOCKETS.bits() |
-            Flags::WITH_FAT_ATTRS.bits() |
-            Flags::WITH_CHATTR.bits() |
-            Flags::WITH_XATTRS.bits();
-
-
-        /// Default feature flags for encoder/decoder
-        const DEFAULT =
-            Flags::WITH_SYMLINKS.bits() |
-            Flags::WITH_DEVICE_NODES.bits() |
-            Flags::WITH_FIFOS.bits() |
-            Flags::WITH_SOCKETS.bits() |
-            Flags::WITH_FLAG_HIDDEN.bits() |
-            Flags::WITH_FLAG_SYSTEM.bits() |
-            Flags::WITH_FLAG_ARCHIVE.bits() |
-            Flags::WITH_FLAG_APPEND.bits() |
-            Flags::WITH_FLAG_NOATIME.bits() |
-            Flags::WITH_FLAG_COMPR.bits() |
-            Flags::WITH_FLAG_NOCOW.bits() |
-            //WITH_FLAG_NODUMP.bits() |
-            Flags::WITH_FLAG_DIRSYNC.bits() |
-            Flags::WITH_FLAG_IMMUTABLE.bits() |
-            Flags::WITH_FLAG_SYNC.bits() |
-            Flags::WITH_FLAG_NOCOMP.bits() |
-            Flags::WITH_FLAG_PROJINHERIT.bits() |
-            Flags::WITH_SUBVOLUME.bits() |
-            Flags::WITH_SUBVOLUME_RO.bits() |
-            Flags::WITH_XATTRS.bits() |
-            Flags::WITH_ACL.bits() |
-            Flags::WITH_SELINUX.bits() |
-            Flags::WITH_FCAPS.bits() |
-            Flags::WITH_QUOTA_PROJID.bits() |
-            Flags::EXCLUDE_NODUMP.bits() |
-            Flags::EXCLUDE_FILE.bits();
-    }
-}
-
-impl Default for Flags {
-    fn default() -> Flags {
-        Flags::DEFAULT
-    }
-}
-
-// form /usr/include/linux/fs.h
-const FS_APPEND_FL: c_long =      0x0000_0020;
-const FS_NOATIME_FL: c_long =     0x0000_0080;
-const FS_COMPR_FL: c_long =       0x0000_0004;
-const FS_NOCOW_FL: c_long =       0x0080_0000;
-const FS_NODUMP_FL: c_long =      0x0000_0040;
-const FS_DIRSYNC_FL: c_long =     0x0001_0000;
-const FS_IMMUTABLE_FL: c_long =   0x0000_0010;
-const FS_SYNC_FL: c_long =        0x0000_0008;
-const FS_NOCOMP_FL: c_long =      0x0000_0400;
-const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
-
-pub(crate) const INITIAL_FS_FLAGS: c_long =
-    FS_NOATIME_FL
-    | FS_COMPR_FL
-    | FS_NOCOW_FL
-    | FS_NOCOMP_FL
-    | FS_PROJINHERIT_FL;
-
-#[rustfmt::skip]
-const CHATTR_MAP: [(Flags, c_long); 10] = [
-    ( Flags::WITH_FLAG_APPEND,      FS_APPEND_FL      ),
-    ( Flags::WITH_FLAG_NOATIME,     FS_NOATIME_FL     ),
-    ( Flags::WITH_FLAG_COMPR,       FS_COMPR_FL       ),
-    ( Flags::WITH_FLAG_NOCOW,       FS_NOCOW_FL       ),
-    ( Flags::WITH_FLAG_NODUMP,      FS_NODUMP_FL      ),
-    ( Flags::WITH_FLAG_DIRSYNC,     FS_DIRSYNC_FL     ),
-    ( Flags::WITH_FLAG_IMMUTABLE,   FS_IMMUTABLE_FL   ),
-    ( Flags::WITH_FLAG_SYNC,        FS_SYNC_FL        ),
-    ( Flags::WITH_FLAG_NOCOMP,      FS_NOCOMP_FL      ),
-    ( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
-];
-
-// from /usr/include/linux/msdos_fs.h
-const ATTR_HIDDEN: u32 =      2;
-const ATTR_SYS: u32 =         4;
-const ATTR_ARCH: u32 =       32;
-
-#[rustfmt::skip]
-const FAT_ATTR_MAP: [(Flags, u32); 3] = [
-    ( Flags::WITH_FLAG_HIDDEN,  ATTR_HIDDEN ),
-    ( Flags::WITH_FLAG_SYSTEM,  ATTR_SYS    ),
-    ( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH   ),
-];
-
-impl Flags {
-    /// Get a set of feature flags from file attributes.
-    pub fn from_chattr(attr: c_long) -> Flags {
-        let mut flags = Flags::empty();
-
-        for (fe_flag, fs_flag) in &CHATTR_MAP {
-            if (attr & fs_flag) != 0 {
-                flags |= *fe_flag;
-            }
-        }
-
-        flags
-    }
-
-    /// Get the chattr bit representation of these feature flags.
-    pub fn to_chattr(self) -> c_long {
-        let mut flags: c_long = 0;
-
-        for (fe_flag, fs_flag) in &CHATTR_MAP {
-            if self.contains(*fe_flag) {
-                flags |= *fs_flag;
-            }
-        }
-
-        flags
-    }
-
-    pub fn to_initial_chattr(self) -> c_long {
-        self.to_chattr() & INITIAL_FS_FLAGS
-    }
-
-    /// Get a set of feature flags from FAT attributes.
-    pub fn from_fat_attr(attr: u32) -> Flags {
-        let mut flags = Flags::empty();
-
-        for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
-            if (attr & fs_flag) != 0 {
-                flags |= *fe_flag;
-            }
-        }
-
-        flags
-    }
-
-    /// Get the fat attribute bit representation of these feature flags.
-    pub fn to_fat_attr(self) -> u32 {
-        let mut flags = 0u32;
-
-        for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
-            if self.contains(*fe_flag) {
-                flags |= *fs_flag;
-            }
-        }
-
-        flags
-    }
-
-    /// Return the supported *pxar* feature flags based on the magic number of the filesystem.
-    pub fn from_magic(magic: i64) -> Flags {
-        use proxmox::sys::linux::magic::*;
-        match magic {
-            MSDOS_SUPER_MAGIC => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_FAT_ATTRS
-            },
-            EXT4_SUPER_MAGIC => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_PERMISSIONS |
-                Flags::WITH_SYMLINKS |
-                Flags::WITH_DEVICE_NODES |
-                Flags::WITH_FIFOS |
-                Flags::WITH_SOCKETS |
-                Flags::WITH_FLAG_APPEND |
-                Flags::WITH_FLAG_NOATIME |
-                Flags::WITH_FLAG_NODUMP |
-                Flags::WITH_FLAG_DIRSYNC |
-                Flags::WITH_FLAG_IMMUTABLE |
-                Flags::WITH_FLAG_SYNC |
-                Flags::WITH_XATTRS |
-                Flags::WITH_ACL |
-                Flags::WITH_SELINUX |
-                Flags::WITH_FCAPS |
-                Flags::WITH_QUOTA_PROJID
-            },
-            XFS_SUPER_MAGIC => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_PERMISSIONS |
-                Flags::WITH_SYMLINKS |
-                Flags::WITH_DEVICE_NODES |
-                Flags::WITH_FIFOS |
-                Flags::WITH_SOCKETS |
-                Flags::WITH_FLAG_APPEND |
-                Flags::WITH_FLAG_NOATIME |
-                Flags::WITH_FLAG_NODUMP |
-                Flags::WITH_FLAG_IMMUTABLE |
-                Flags::WITH_FLAG_SYNC |
-                Flags::WITH_XATTRS |
-                Flags::WITH_ACL |
-                Flags::WITH_SELINUX |
-                Flags::WITH_FCAPS |
-                Flags::WITH_QUOTA_PROJID
-            },
-            ZFS_SUPER_MAGIC => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_PERMISSIONS |
-                Flags::WITH_SYMLINKS |
-                Flags::WITH_DEVICE_NODES |
-                Flags::WITH_FIFOS |
-                Flags::WITH_SOCKETS |
-                Flags::WITH_FLAG_APPEND |
-                Flags::WITH_FLAG_NOATIME |
-                Flags::WITH_FLAG_NODUMP |
-                Flags::WITH_FLAG_DIRSYNC |
-                Flags::WITH_FLAG_IMMUTABLE |
-                Flags::WITH_FLAG_SYNC |
-                Flags::WITH_XATTRS |
-                Flags::WITH_ACL |
-                Flags::WITH_SELINUX |
-                Flags::WITH_FCAPS |
-                Flags::WITH_QUOTA_PROJID
-            },
-            BTRFS_SUPER_MAGIC => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_PERMISSIONS |
-                Flags::WITH_SYMLINKS |
-                Flags::WITH_DEVICE_NODES |
-                Flags::WITH_FIFOS |
-                Flags::WITH_SOCKETS |
-                Flags::WITH_FLAG_APPEND |
-                Flags::WITH_FLAG_NOATIME |
-                Flags::WITH_FLAG_COMPR |
-                Flags::WITH_FLAG_NOCOW |
-                Flags::WITH_FLAG_NODUMP |
-                Flags::WITH_FLAG_DIRSYNC |
-                Flags::WITH_FLAG_IMMUTABLE |
-                Flags::WITH_FLAG_SYNC |
-                Flags::WITH_FLAG_NOCOMP |
-                Flags::WITH_XATTRS |
-                Flags::WITH_ACL |
-                Flags::WITH_SELINUX |
-                Flags::WITH_SUBVOLUME |
-                Flags::WITH_SUBVOLUME_RO |
-                Flags::WITH_FCAPS
-            },
-            TMPFS_MAGIC => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_PERMISSIONS |
-                Flags::WITH_SYMLINKS |
-                Flags::WITH_DEVICE_NODES |
-                Flags::WITH_FIFOS |
-                Flags::WITH_SOCKETS |
-                Flags::WITH_ACL |
-                Flags::WITH_SELINUX
-            },
-            // FUSE mounts are special as the supported feature set
-            // is not clear a priori.
-            FUSE_SUPER_MAGIC => {
-                Flags::WITH_FUSE
-            },
-            _ => {
-                Flags::WITH_2SEC_TIME |
-                Flags::WITH_READ_ONLY |
-                Flags::WITH_PERMISSIONS |
-                Flags::WITH_SYMLINKS |
-                Flags::WITH_DEVICE_NODES |
-                Flags::WITH_FIFOS |
-                Flags::WITH_SOCKETS |
-                Flags::WITH_XATTRS |
-                Flags::WITH_ACL |
-                Flags::WITH_FCAPS
-            },
-        }
-    }
-}
diff --git a/src/pxar/fuse.rs b/src/pxar/fuse.rs
deleted file mode 100644 (file)
index a5001cb..0000000
+++ /dev/null
@@ -1,690 +0,0 @@
-//! Asynchronous fuse implementation.
-
-use std::collections::BTreeMap;
-use std::convert::TryFrom;
-use std::ffi::{OsStr, OsString};
-use std::future::Future;
-use std::io;
-use std::mem;
-use std::ops::Range;
-use std::os::unix::ffi::OsStrExt;
-use std::path::Path;
-use std::pin::Pin;
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::sync::{Arc, RwLock};
-use std::task::{Context, Poll};
-
-use anyhow::{format_err, Error};
-use futures::channel::mpsc::UnboundedSender;
-use futures::select;
-use futures::sink::SinkExt;
-use futures::stream::{StreamExt, TryStreamExt};
-
-use proxmox::tools::vec;
-use pxar::accessor::{self, EntryRangeInfo, ReadAt};
-
-use proxmox_fuse::requests::{self, FuseRequest};
-use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
-
-use crate::tools::xattr;
-
-/// We mark inodes for regular files this way so we know how to access them.
-const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
-
-#[inline]
-fn is_dir_inode(inode: u64) -> bool {
-    0 == (inode & NON_DIRECTORY_INODE)
-}
-
-/// Our reader type instance used for accessors.
-pub type Reader = Arc<dyn ReadAt + Send + Sync + 'static>;
-
-/// Our Accessor type instance.
-pub type Accessor = accessor::aio::Accessor<Reader>;
-
-/// Our Directory type instance.
-pub type Directory = accessor::aio::Directory<Reader>;
-
-/// Our FileEntry type instance.
-pub type FileEntry = accessor::aio::FileEntry<Reader>;
-
-/// Our FileContents type instance.
-pub type FileContents = accessor::aio::FileContents<Reader>;
-
-pub struct Session {
-    fut: Pin<Box<dyn Future<Output = Result<(), Error>> + Send + Sync + 'static>>,
-}
-
-impl Session {
-    /// Create a fuse session for an archive.
-    pub async fn mount_path(
-        archive_path: &Path,
-        options: &OsStr,
-        verbose: bool,
-        mountpoint: &Path,
-    ) -> Result<Self, Error> {
-        // TODO: Add a buffered/caching ReadAt layer?
-        let file = std::fs::File::open(archive_path)?;
-        let file_size = file.metadata()?.len();
-        let reader: Reader = Arc::new(accessor::sync::FileReader::new(file));
-        let accessor = Accessor::new(reader, file_size).await?;
-        Self::mount(accessor, options, verbose, mountpoint)
-    }
-
-    /// Create a new fuse session for the given pxar `Accessor`.
-    pub fn mount(
-        accessor: Accessor,
-        options: &OsStr,
-        verbose: bool,
-        path: &Path,
-    ) -> Result<Self, Error> {
-        let fuse = Fuse::builder("pxar-mount")?
-            .debug()
-            .options_os(options)?
-            .enable_readdirplus()
-            .enable_read()
-            .enable_readlink()
-            .enable_read_xattr()
-            .build()?
-            .mount(path)?;
-
-        let session = SessionImpl::new(accessor, verbose);
-
-        Ok(Self {
-            fut: Box::pin(session.main(fuse)),
-        })
-    }
-}
-
-impl Future for Session {
-    type Output = Result<(), Error>;
-
-    fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
-        Pin::new(&mut self.fut).poll(cx)
-    }
-}
-
-/// We use this to return an errno value back to the kernel.
-macro_rules! io_return {
-    ($errno:expr) => {
-        return Err(::std::io::Error::from_raw_os_error($errno).into());
-    };
-}
-
-/// Format an "other" error, see `io_bail` below for details.
-macro_rules! io_format_err {
-    ($($fmt:tt)*) => {
-        ::std::io::Error::new(::std::io::ErrorKind::Other, format!($($fmt)*))
-    }
-}
-
-/// We use this to bail out of a functionin an unexpected error case. This will cause the fuse
-/// request to be answered with a generic `EIO` error code. The error message contained in here
-/// will be printed to stdout if the verbose flag is used, otherwise silently dropped.
-macro_rules! io_bail {
-    ($($fmt:tt)*) => { return Err(io_format_err!($($fmt)*).into()); }
-}
-
-/// This is what we need to cache as a "lookup" entry. The kernel assumes that these are easily
-/// accessed.
-struct Lookup {
-    refs: AtomicUsize,
-
-    inode: u64,
-    parent: u64,
-    entry_range_info: EntryRangeInfo,
-    content_range: Option<Range<u64>>,
-}
-
-impl Lookup {
-    fn new(
-        inode: u64,
-        parent: u64,
-        entry_range_info: EntryRangeInfo,
-        content_range: Option<Range<u64>>,
-    ) -> Box<Lookup> {
-        Box::new(Self {
-            refs: AtomicUsize::new(1),
-            inode,
-            parent,
-            entry_range_info,
-            content_range,
-        })
-    }
-
-    /// Decrease the reference count by `count`. Note that this must not include the reference held
-    /// by `self` itself, so this must not decrease the count below 2.
-    fn forget(&self, count: usize) -> Result<(), Error> {
-        loop {
-            let old = self.refs.load(Ordering::Acquire);
-            if count >= old {
-                io_bail!("reference count underflow");
-            }
-            let new = old - count;
-            match self
-                .refs
-                .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
-            {
-                Ok(_) => break Ok(()),
-                Err(_) => continue,
-            }
-        }
-    }
-
-    fn get_ref<'a>(&self, session: &'a SessionImpl) -> LookupRef<'a> {
-        if self.refs.fetch_add(1, Ordering::AcqRel) == 0 {
-            panic!("atomic refcount increased from 0 to 1");
-        }
-
-        LookupRef {
-            session,
-            lookup: self as *const Lookup,
-        }
-    }
-}
-
-struct LookupRef<'a> {
-    session: &'a SessionImpl,
-    lookup: *const Lookup,
-}
-
-unsafe impl<'a> Send for LookupRef<'a> {}
-unsafe impl<'a> Sync for LookupRef<'a> {}
-
-impl<'a> Clone for LookupRef<'a> {
-    fn clone(&self) -> Self {
-        self.get_ref(self.session)
-    }
-}
-
-impl<'a> std::ops::Deref for LookupRef<'a> {
-    type Target = Lookup;
-
-    fn deref(&self) -> &Self::Target {
-        unsafe { &*self.lookup }
-    }
-}
-
-impl<'a> Drop for LookupRef<'a> {
-    fn drop(&mut self) {
-        if self.lookup.is_null() {
-            return;
-        }
-
-        if self.refs.fetch_sub(1, Ordering::AcqRel) == 1 {
-            let inode = self.inode;
-            drop(self.session.lookups.write().unwrap().remove(&inode));
-        }
-    }
-}
-
-impl<'a> LookupRef<'a> {
-    fn leak(mut self) -> &'a Lookup {
-        unsafe { &*mem::replace(&mut self.lookup, std::ptr::null()) }
-    }
-}
-
-struct SessionImpl {
-    accessor: Accessor,
-    verbose: bool,
-    lookups: RwLock<BTreeMap<u64, Box<Lookup>>>,
-}
-
-impl SessionImpl {
-    fn new(accessor: Accessor, verbose: bool) -> Self {
-        let root = Lookup::new(
-            ROOT_ID,
-            ROOT_ID,
-            EntryRangeInfo::toplevel(0..accessor.size()),
-            None,
-        );
-
-        let mut tree = BTreeMap::new();
-        tree.insert(ROOT_ID, root);
-
-        Self {
-            accessor,
-            verbose,
-            lookups: RwLock::new(tree),
-        }
-    }
-
-    /// Here's how we deal with errors:
-    ///
-    /// Any error will be printed if the verbose flag was set, otherwise the message will be
-    /// silently dropped.
-    ///
-    /// Opaque errors will cause the fuse main loop to bail out with that error.
-    ///
-    /// `io::Error`s will cause the fuse request to responded to with the given `io::Error`. An
-    /// `io::ErrorKind::Other` translates to a generic `EIO`.
-    async fn handle_err(
-        &self,
-        request: impl FuseRequest,
-        err: Error,
-        mut sender: UnboundedSender<Error>,
-    ) {
-        let final_result = match err.downcast::<io::Error>() {
-            Ok(err) => {
-                if err.kind() == io::ErrorKind::Other && self.verbose {
-                    eprintln!("an IO error occurred: {}", err);
-                }
-
-                // fail the request
-                request.io_fail(err).map_err(Error::from)
-            }
-            Err(err) => {
-                // `bail` (non-`io::Error`) is used for fatal errors which should actually cancel:
-                if self.verbose {
-                    eprintln!("internal error: {}, bailing out", err);
-                }
-                Err(err)
-            }
-        };
-        if let Err(err) = final_result {
-            // either we failed to send the error code to fuse, or the above was not an
-            // `io::Error`, so in this case notify the main loop:
-            sender
-                .send(err)
-                .await
-                .expect("failed to propagate error to main loop");
-        }
-    }
-
-    async fn main(self, fuse: Fuse) -> Result<(), Error> {
-        Arc::new(self).main_do(fuse).await
-    }
-
-    async fn main_do(self: Arc<Self>, fuse: Fuse) -> Result<(), Error> {
-        let (err_send, mut err_recv) = futures::channel::mpsc::unbounded::<Error>();
-        let mut fuse = fuse.fuse(); // make this a futures::stream::FusedStream!
-        loop {
-            select! {
-                request = fuse.try_next() => match request? {
-                    Some(request) => {
-                        tokio::spawn(Arc::clone(&self).handle_request(request, err_send.clone()));
-                    }
-                    None => break,
-                },
-                err = err_recv.next() => match err {
-                    Some(err) => if self.verbose {
-                        eprintln!("cancelling fuse main loop due to error: {}", err);
-                        return Err(err);
-                    },
-                    None => panic!("error channel was closed unexpectedly"),
-                },
-            }
-        }
-        Ok(())
-    }
-
-    async fn handle_request(
-        self: Arc<Self>,
-        request: Request,
-        mut err_sender: UnboundedSender<Error>,
-    ) {
-        let result: Result<(), Error> = match request {
-            Request::Lookup(request) => {
-                match self.lookup(request.parent, &request.file_name).await {
-                    Ok((entry, lookup)) => match request.reply(&entry) {
-                        Ok(()) => {
-                            lookup.leak();
-                            Ok(())
-                        }
-                        Err(err) => Err(Error::from(err)),
-                    },
-                    Err(err) => return self.handle_err(request, err, err_sender).await,
-                }
-            }
-            Request::Forget(request) => match self.forget(request.inode, request.count as usize) {
-                Ok(()) => {
-                    request.reply();
-                    Ok(())
-                }
-                Err(err) => return self.handle_err(request, err, err_sender).await,
-            },
-            Request::Getattr(request) => match self.getattr(request.inode).await {
-                Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
-                Err(err) => return self.handle_err(request, err, err_sender).await,
-            },
-            Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
-                Ok(lookups) => match request.reply() {
-                    Ok(()) => {
-                        for i in lookups {
-                            i.leak();
-                        }
-                        Ok(())
-                    }
-                    Err(err) => Err(Error::from(err)),
-                },
-                Err(err) => return self.handle_err(request, err, err_sender).await,
-            },
-            Request::Read(request) => {
-                match self.read(request.inode, request.size, request.offset).await {
-                    Ok(data) => request.reply(&data).map_err(Error::from),
-                    Err(err) => return self.handle_err(request, err, err_sender).await,
-                }
-            }
-            Request::Readlink(request) => match self.readlink(request.inode).await {
-                Ok(data) => request.reply(&data).map_err(Error::from),
-                Err(err) => return self.handle_err(request, err, err_sender).await,
-            },
-            Request::ListXAttrSize(request) => match self.listxattrs(request.inode).await {
-                Ok(data) => request
-                    .reply(
-                        data.into_iter()
-                            .fold(0, |sum, i| sum + i.name().to_bytes_with_nul().len()),
-                    )
-                    .map_err(Error::from),
-                Err(err) => return self.handle_err(request, err, err_sender).await,
-            },
-            Request::ListXAttr(mut request) => match self.listxattrs_into(&mut request).await {
-                Ok(ReplyBufState::Ok) => request.reply().map_err(Error::from),
-                Ok(ReplyBufState::Full) => request.fail_full().map_err(Error::from),
-                Err(err) => return self.handle_err(request, err, err_sender).await,
-            },
-            Request::GetXAttrSize(request) => {
-                match self.getxattr(request.inode, &request.attr_name).await {
-                    Ok(xattr) => request.reply(xattr.value().len()).map_err(Error::from),
-                    Err(err) => return self.handle_err(request, err, err_sender).await,
-                }
-            }
-            Request::GetXAttr(request) => {
-                match self.getxattr(request.inode, &request.attr_name).await {
-                    Ok(xattr) => request.reply(xattr.value()).map_err(Error::from),
-                    Err(err) => return self.handle_err(request, err, err_sender).await,
-                }
-            }
-            other => {
-                if self.verbose {
-                    eprintln!("Received unexpected fuse request");
-                }
-                other.fail(libc::ENOSYS).map_err(Error::from)
-            }
-        };
-
-        if let Err(err) = result {
-            err_sender
-                .send(err)
-                .await
-                .expect("failed to propagate error to main loop");
-        }
-    }
-
-    fn get_lookup(&self, inode: u64) -> Result<LookupRef, Error> {
-        let lookups = self.lookups.read().unwrap();
-        if let Some(lookup) = lookups.get(&inode) {
-            return Ok(lookup.get_ref(self));
-        }
-        io_return!(libc::ENOENT);
-    }
-
-    async fn open_dir(&self, inode: u64) -> Result<Directory, Error> {
-        if inode == ROOT_ID {
-            Ok(self.accessor.open_root().await?)
-        } else if !is_dir_inode(inode) {
-            io_return!(libc::ENOTDIR);
-        } else {
-            Ok(unsafe { self.accessor.open_dir_at_end(inode).await? })
-        }
-    }
-
-    async fn open_entry(&self, lookup: &LookupRef<'_>) -> io::Result<FileEntry> {
-        unsafe {
-            self.accessor
-                .open_file_at_range(&lookup.entry_range_info)
-                .await
-        }
-    }
-
-    fn open_content(&self, lookup: &LookupRef) -> Result<FileContents, Error> {
-        if is_dir_inode(lookup.inode) {
-            io_return!(libc::EISDIR);
-        }
-
-        match lookup.content_range.clone() {
-            Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }),
-            None => io_return!(libc::EBADF),
-        }
-    }
-
-    fn make_lookup(&self, parent: u64, inode: u64, entry: &FileEntry) -> Result<LookupRef, Error> {
-        let lookups = self.lookups.read().unwrap();
-        if let Some(lookup) = lookups.get(&inode) {
-            return Ok(lookup.get_ref(self));
-        }
-        drop(lookups);
-
-        let entry = Lookup::new(
-            inode,
-            parent,
-            entry.entry_range_info().clone(),
-            entry.content_range()?,
-        );
-        let reference = entry.get_ref(self);
-        entry.refs.store(1, Ordering::Release);
-
-        let mut lookups = self.lookups.write().unwrap();
-        if let Some(lookup) = lookups.get(&inode) {
-            return Ok(lookup.get_ref(self));
-        }
-
-        lookups.insert(inode, entry);
-        drop(lookups);
-        Ok(reference)
-    }
-
-    fn forget(&self, inode: u64, count: usize) -> Result<(), Error> {
-        let node = self.get_lookup(inode)?;
-        node.forget(count)?;
-        Ok(())
-    }
-
-    async fn lookup(
-        &'_ self,
-        parent: u64,
-        file_name: &OsStr,
-    ) -> Result<(EntryParam, LookupRef<'_>), Error> {
-        let dir = self.open_dir(parent).await?;
-
-        let entry = match { dir }.lookup(file_name).await? {
-            Some(entry) => entry,
-            None => io_return!(libc::ENOENT),
-        };
-
-        let entry = if let pxar::EntryKind::Hardlink(_) = entry.kind() {
-            // we don't know the file's end-offset, so we'll just allow the decoder to decode the
-            // entire rest of the archive until we figure out something better...
-            let entry = self.accessor.follow_hardlink(&entry).await?;
-
-            if let pxar::EntryKind::Hardlink(_) = entry.kind() {
-                // hardlinks must not point to other hardlinks...
-                io_return!(libc::ELOOP);
-            }
-
-            entry
-        } else {
-            entry
-        };
-
-        let response = to_entry(&entry)?;
-        let inode = response.inode;
-        Ok((response, self.make_lookup(parent, inode, &entry)?))
-    }
-
-    async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
-        let entry = unsafe {
-            self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
-        };
-        to_stat(inode, &entry)
-    }
-
-    async fn readdirplus(
-        &'_ self,
-        request: &mut requests::ReaddirPlus,
-    ) -> Result<Vec<LookupRef<'_>>, Error> {
-        let mut lookups = Vec::new();
-        let offset = usize::try_from(request.offset)
-            .map_err(|_| io_format_err!("directory offset out of range"))?;
-
-        let dir = self.open_dir(request.inode).await?;
-        let dir_lookup = self.get_lookup(request.inode)?;
-
-        let entry_count = dir.read_dir().count() as isize;
-
-        let mut next = offset as isize;
-        let mut iter = dir.read_dir().skip(offset);
-        while let Some(file) = iter.next().await {
-            next += 1;
-            let file = file?.decode_entry().await?;
-            let stat = to_stat(to_inode(&file), &file)?;
-            let name = file.file_name();
-            match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
-                ReplyBufState::Ok => (),
-                ReplyBufState::Full => return Ok(lookups),
-            }
-            lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
-        }
-
-        if next == entry_count {
-            next += 1;
-            let file = dir.lookup_self().await?;
-            let stat = to_stat(to_inode(&file), &file)?;
-            let name = OsStr::new(".");
-            match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
-                ReplyBufState::Ok => (),
-                ReplyBufState::Full => return Ok(lookups),
-            }
-            lookups.push(LookupRef::clone(&dir_lookup));
-        }
-
-        if next == entry_count + 1 {
-            next += 1;
-            let lookup = self.get_lookup(dir_lookup.parent)?;
-            let parent_dir = self.open_dir(lookup.inode).await?;
-            let file = parent_dir.lookup_self().await?;
-            let stat = to_stat(to_inode(&file), &file)?;
-            let name = OsStr::new("..");
-            match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
-                ReplyBufState::Ok => (),
-                ReplyBufState::Full => return Ok(lookups),
-            }
-            lookups.push(lookup);
-        }
-
-        Ok(lookups)
-    }
-
-    async fn read(&self, inode: u64, len: usize, offset: u64) -> Result<Vec<u8>, Error> {
-        let file = self.get_lookup(inode)?;
-        let content = self.open_content(&file)?;
-        let mut buf = vec::undefined(len);
-        let got = content.read_at(&mut buf, offset).await?;
-        buf.truncate(got);
-        Ok(buf)
-    }
-
-    async fn readlink(&self, inode: u64) -> Result<OsString, Error> {
-        let lookup = self.get_lookup(inode)?;
-        let file = self.open_entry(&lookup).await?;
-        match file.get_symlink() {
-            None => io_return!(libc::EINVAL),
-            Some(link) => Ok(link.to_owned()),
-        }
-    }
-
-    async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
-        let lookup = self.get_lookup(inode)?;
-        let metadata = self
-            .open_entry(&lookup)
-            .await?
-            .into_entry()
-            .into_metadata();
-
-        let mut xattrs = metadata.xattrs;
-
-        use pxar::format::XAttr;
-
-        if let Some(fcaps) = metadata.fcaps {
-            xattrs.push(XAttr::new(xattr::xattr_name_fcaps().to_bytes(), fcaps.data));
-        }
-
-        // TODO: Special cases:
-        //     b"system.posix_acl_access
-        //     b"system.posix_acl_default
-        //
-        // For these we need to be able to create posix acl format entries, at that point we could
-        // just ditch libacl as well...
-
-        Ok(xattrs)
-    }
-
-    async fn listxattrs_into(
-        &self,
-        request: &mut requests::ListXAttr,
-    ) -> Result<ReplyBufState, Error> {
-        let xattrs = self.listxattrs(request.inode).await?;
-
-        for entry in xattrs {
-            match request.add_c_string(entry.name()) {
-                ReplyBufState::Ok => (),
-                ReplyBufState::Full => return Ok(ReplyBufState::Full),
-            }
-        }
-
-        Ok(ReplyBufState::Ok)
-    }
-
-    async fn getxattr(&self, inode: u64, xattr: &OsStr) -> Result<pxar::format::XAttr, Error> {
-        // TODO: pxar::Accessor could probably get a more optimized method to fetch a specific
-        // xattr for an entry...
-        let xattrs = self.listxattrs(inode).await?;
-        for entry in xattrs {
-            if entry.name().to_bytes() == xattr.as_bytes() {
-                return Ok(entry);
-            }
-        }
-        io_return!(libc::ENODATA);
-    }
-}
-
-#[inline]
-fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
-    to_entry_param(to_inode(&entry), &entry)
-}
-
-#[inline]
-fn to_inode(entry: &FileEntry) -> u64 {
-    if entry.is_dir() {
-        entry.entry_range_info().entry_range.end
-    } else {
-        entry.entry_range_info().entry_range.start | NON_DIRECTORY_INODE
-    }
-}
-
-fn to_entry_param(inode: u64, entry: &pxar::Entry) -> Result<EntryParam, Error> {
-    Ok(EntryParam::simple(inode, to_stat(inode, entry)?))
-}
-
-fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
-    let nlink = if entry.is_dir() { 2 } else { 1 };
-
-    let metadata = entry.metadata();
-
-    let mut stat: libc::stat = unsafe { mem::zeroed() };
-    stat.st_ino = inode;
-    stat.st_nlink = nlink;
-    stat.st_mode = u32::try_from(metadata.stat.mode)
-        .map_err(|err| format_err!("mode does not fit into st_mode field: {}", err))?;
-    stat.st_size = i64::try_from(entry.file_size().unwrap_or(0))
-        .map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
-    stat.st_uid = metadata.stat.uid;
-    stat.st_gid = metadata.stat.gid;
-    stat.st_atime = metadata.stat.mtime.secs;
-    stat.st_atime_nsec = metadata.stat.mtime.nanos as _;
-    stat.st_mtime = metadata.stat.mtime.secs;
-    stat.st_mtime_nsec = metadata.stat.mtime.nanos as _;
-    stat.st_ctime = metadata.stat.mtime.secs;
-    stat.st_ctime_nsec = metadata.stat.mtime.nanos as _;
-    Ok(stat)
-}
diff --git a/src/pxar/metadata.rs b/src/pxar/metadata.rs
deleted file mode 100644 (file)
index e399c63..0000000
+++ /dev/null
@@ -1,408 +0,0 @@
-use std::ffi::{CStr, CString};
-use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
-use std::path::Path;
-
-use anyhow::{bail, format_err, Error};
-use nix::errno::Errno;
-use nix::fcntl::OFlag;
-use nix::sys::stat::Mode;
-
-use pxar::Metadata;
-
-use proxmox::c_result;
-use proxmox::sys::error::SysError;
-use proxmox::tools::fd::RawFdNum;
-
-use pbs_tools::fs;
-
-use crate::pxar::tools::perms_from_metadata;
-use crate::pxar::Flags;
-use crate::tools::{acl, xattr};
-
-//
-// utility functions
-//
-
-fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
-    if err.is_errno(Errno::EOPNOTSUPP) {
-        Ok(())
-    } else {
-        Err(err)
-    }
-}
-
-fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
-    if err.is_errno(Errno::EOPNOTSUPP) {
-        *not_supp = true;
-        Ok(())
-    } else {
-        Err(err)
-    }
-}
-
-fn timestamp_to_update_timespec(mtime: &pxar::format::StatxTimestamp) -> [libc::timespec; 2] {
-    // restore mtime
-    const UTIME_OMIT: i64 = (1 << 30) - 2;
-
-    [
-        libc::timespec {
-            tv_sec: 0,
-            tv_nsec: UTIME_OMIT,
-        },
-        libc::timespec {
-            tv_sec: mtime.secs,
-            tv_nsec: mtime.nanos as _,
-        },
-    ]
-}
-
-//
-// metadata application:
-//
-
-pub fn apply_at(
-    flags: Flags,
-    metadata: &Metadata,
-    parent: RawFd,
-    file_name: &CStr,
-    path_info: &Path,
-    on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
-) -> Result<(), Error> {
-    let fd = proxmox::tools::fd::Fd::openat(
-        &unsafe { RawFdNum::from_raw_fd(parent) },
-        file_name,
-        OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
-        Mode::empty(),
-    )?;
-
-    apply(flags, metadata, fd.as_raw_fd(), path_info, on_error)
-}
-
-pub fn apply_initial_flags(
-    flags: Flags,
-    metadata: &Metadata,
-    fd: RawFd,
-    on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
-) -> Result<(), Error> {
-    let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
-    apply_chattr(
-        fd,
-        entry_flags.to_initial_chattr(),
-        flags.to_initial_chattr(),
-    )
-    .or_else(on_error)?;
-    Ok(())
-}
-
-pub fn apply(
-    flags: Flags,
-    metadata: &Metadata,
-    fd: RawFd,
-    path_info: &Path,
-    on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
-) -> Result<(), Error> {
-    let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
-
-    unsafe {
-        // UID and GID first, as this fails if we lose access anyway.
-        c_result!(libc::chown(
-            c_proc_path.as_ptr(),
-            metadata.stat.uid,
-            metadata.stat.gid
-        ))
-        .map(drop)
-        .or_else(allow_notsupp)
-        .map_err(|err| format_err!("failed to set ownership: {}", err))
-        .or_else(&mut *on_error)?;
-    }
-
-    let mut skip_xattrs = false;
-    apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)
-        .or_else(&mut *on_error)?;
-    add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
-    apply_acls(flags, &c_proc_path, metadata, path_info)
-        .map_err(|err| format_err!("failed to apply acls: {}", err))
-        .or_else(&mut *on_error)?;
-    apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
-
-    // Finally mode and time. We may lose access with mode, but the changing the mode also
-    // affects times.
-    if !metadata.is_symlink() {
-        c_result!(unsafe {
-            libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
-        })
-        .map(drop)
-        .or_else(allow_notsupp)
-        .map_err(|err| format_err!("failed to change file mode: {}", err))
-        .or_else(&mut *on_error)?;
-    }
-
-    let res = c_result!(unsafe {
-        libc::utimensat(
-            libc::AT_FDCWD,
-            c_proc_path.as_ptr(),
-            timestamp_to_update_timespec(&metadata.stat.mtime).as_ptr(),
-            0,
-        )
-    });
-    match res {
-        Ok(_) => (),
-        Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
-        Err(err) => {
-            on_error(format_err!(
-                "failed to restore mtime attribute on {:?}: {}",
-                path_info,
-                err
-            ))?;
-        }
-    }
-
-    if metadata.stat.flags != 0 {
-        apply_flags(flags, fd, metadata.stat.flags).or_else(&mut *on_error)?;
-    }
-
-    Ok(())
-}
-
-fn add_fcaps(
-    flags: Flags,
-    c_proc_path: *const libc::c_char,
-    metadata: &Metadata,
-    skip_xattrs: &mut bool,
-) -> Result<(), Error> {
-    if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
-        return Ok(());
-    }
-    let fcaps = match metadata.fcaps.as_ref() {
-        Some(fcaps) => fcaps,
-        None => return Ok(()),
-    };
-
-    c_result!(unsafe {
-        libc::setxattr(
-            c_proc_path,
-            xattr::xattr_name_fcaps().as_ptr(),
-            fcaps.data.as_ptr() as *const libc::c_void,
-            fcaps.data.len(),
-            0,
-        )
-    })
-    .map(drop)
-    .or_else(|err| allow_notsupp_remember(err, skip_xattrs))
-    .map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
-
-    Ok(())
-}
-
-fn apply_xattrs(
-    flags: Flags,
-    c_proc_path: *const libc::c_char,
-    metadata: &Metadata,
-    skip_xattrs: &mut bool,
-) -> Result<(), Error> {
-    if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
-        return Ok(());
-    }
-
-    for xattr in &metadata.xattrs {
-        if *skip_xattrs {
-            return Ok(());
-        }
-
-        if !xattr::is_valid_xattr_name(xattr.name()) {
-            eprintln!("skipping invalid xattr named {:?}", xattr.name());
-            continue;
-        }
-
-        c_result!(unsafe {
-            libc::setxattr(
-                c_proc_path,
-                xattr.name().as_ptr() as *const libc::c_char,
-                xattr.value().as_ptr() as *const libc::c_void,
-                xattr.value().len(),
-                0,
-            )
-        })
-        .map(drop)
-        .or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
-        .map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
-    }
-
-    Ok(())
-}
-
-fn apply_acls(
-    flags: Flags,
-    c_proc_path: &CStr,
-    metadata: &Metadata,
-    path_info: &Path,
-) -> Result<(), Error> {
-    if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
-        return Ok(());
-    }
-
-    let mut acl = acl::ACL::init(5)?;
-
-    // acl type access:
-    acl.add_entry_full(
-        acl::ACL_USER_OBJ,
-        None,
-        acl::mode_user_to_acl_permissions(metadata.stat.mode),
-    )?;
-
-    acl.add_entry_full(
-        acl::ACL_OTHER,
-        None,
-        acl::mode_other_to_acl_permissions(metadata.stat.mode),
-    )?;
-
-    match metadata.acl.group_obj.as_ref() {
-        Some(group_obj) => {
-            acl.add_entry_full(
-                acl::ACL_MASK,
-                None,
-                acl::mode_group_to_acl_permissions(metadata.stat.mode),
-            )?;
-            acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
-        }
-        None => {
-            let mode = acl::mode_group_to_acl_permissions(metadata.stat.mode);
-
-            acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
-
-            if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
-                eprintln!(
-                    "Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
-                    path_info,
-                );
-                acl.add_entry_full(acl::ACL_MASK, None, mode)?;
-            }
-        }
-    }
-
-    for user in &metadata.acl.users {
-        acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
-    }
-
-    for group in &metadata.acl.groups {
-        acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
-    }
-
-    if !acl.is_valid() {
-        bail!("Error while restoring ACL - ACL invalid");
-    }
-
-    acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
-    drop(acl);
-
-    // acl type default:
-    if let Some(default) = metadata.acl.default.as_ref() {
-        let mut acl = acl::ACL::init(5)?;
-
-        acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
-
-        acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
-
-        acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
-
-        if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
-            acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
-        }
-
-        for user in &metadata.acl.default_users {
-            acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
-        }
-
-        for group in &metadata.acl.default_groups {
-            acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
-        }
-
-        if !acl.is_valid() {
-            bail!("Error while restoring ACL - ACL invalid");
-        }
-
-        acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
-    }
-
-    Ok(())
-}
-
-fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
-    if !flags.contains(Flags::WITH_QUOTA_PROJID) {
-        return Ok(());
-    }
-
-    let projid = match metadata.quota_project_id {
-        Some(projid) => projid,
-        None => return Ok(()),
-    };
-
-    let mut fsxattr = fs::FSXAttr::default();
-    unsafe {
-        fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
-            format_err!(
-                "error while getting fsxattr to restore quota project id - {}",
-                err
-            )
-        })?;
-
-        fsxattr.fsx_projid = projid.projid as u32;
-
-        fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
-            format_err!(
-                "error while setting fsxattr to restore quota project id - {}",
-                err
-            )
-        })?;
-    }
-
-    Ok(())
-}
-
-pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
-    matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
-}
-
-fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
-    if chattr == 0 {
-        return Ok(());
-    }
-
-    let mut fattr: libc::c_long = 0;
-    match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
-        Ok(_) => (),
-        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
-            return Ok(());
-        }
-        Err(err) => bail!("failed to read file attributes: {}", err),
-    }
-
-    let attr = (chattr & mask) | (fattr & !mask);
-
-    if attr == fattr {
-        return Ok(());
-    }
-
-    match unsafe { fs::write_attr_fd(fd, &attr) } {
-        Ok(_) => Ok(()),
-        Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
-        Err(err) => bail!("failed to set file attributes: {}", err),
-    }
-}
-
-fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
-    let entry_flags = Flags::from_bits_truncate(entry_flags);
-
-    apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
-
-    let fatattr = (flags & entry_flags).to_fat_attr();
-    if fatattr != 0 {
-        match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
-            Ok(_) => (),
-            Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
-            Err(err) => bail!("failed to set file FAT attributes: {}", err),
-        }
-    }
-
-    Ok(())
-}
diff --git a/src/pxar/mod.rs b/src/pxar/mod.rs
deleted file mode 100644 (file)
index 6ad913d..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-//! *pxar* Implementation (proxmox file archive format)
-//!
-//! This code implements a slightly modified version of the *catar*
-//! format used in the [casync](https://github.com/systemd/casync)
-//! toolkit (we are not 100\% binary compatible). It is a file archive
-//! format defined by 'Lennart Poettering', specially defined for
-//! efficient deduplication.
-
-//! Every archive contains items in the following order:
-//!  * `ENTRY`              -- containing general stat() data and related bits
-//!   * `USER`              -- user name as text, if enabled
-//!   * `GROUP`             -- group name as text, if enabled
-//!   * `XATTR`             -- one extended attribute
-//!   * ...                 -- more of these when there are multiple defined
-//!   * `ACL_USER`          -- one `USER ACL` entry
-//!   * ...                 -- more of these when there are multiple defined
-//!   * `ACL_GROUP`         -- one `GROUP ACL` entry
-//!   * ...                 -- more of these when there are multiple defined
-//!   * `ACL_GROUP_OBJ`     -- The `ACL_GROUP_OBJ`
-//!   * `ACL_DEFAULT`       -- The various default ACL fields if there's one defined
-//!   * `ACL_DEFAULT_USER`  -- one USER ACL entry
-//!   * ...                 -- more of these when multiple are defined
-//!   * `ACL_DEFAULT_GROUP` -- one GROUP ACL entry
-//!   * ...                 -- more of these when multiple are defined
-//!   * `FCAPS`             -- file capability in Linux disk format
-//!   * `QUOTA_PROJECT_ID`  -- the ext4/xfs quota project ID
-//!   * `PAYLOAD`           -- file contents, if it is one
-//!   * `SYMLINK`           -- symlink target, if it is one
-//!   * `DEVICE`            -- device major/minor, if it is a block/char device
-//!
-//!   If we are serializing a directory, then this is followed by:
-//!
-//!   * `FILENAME`          -- name of the first directory entry (strictly ordered!)
-//!   * `<archive>`         -- serialization of the first directory entry's metadata and contents,
-//!  following the exact same archive format
-//!   * `FILENAME`          -- name of the second directory entry (strictly ordered!)
-//!   * `<archive>`         -- serialization of the second directory entry
-//!   * ...
-//!   * `GOODBYE`           -- lookup table at the end of a list of directory entries
-
-//!
-//! The original format has no way to deal with hardlinks, so we
-//! extended the format by a special `HARDLINK` tag, which can replace
-//! an `ENTRY` tag. The `HARDLINK` tag contains an 64bit offset which
-//! points to the linked `ENTRY` inside the archive, followed by the
-//! full path name of that `ENTRY`. `HARDLINK`s may not have further data
-//! (user, group, acl, ...) because this is already defined by the
-//! linked `ENTRY`.
-
-pub(crate) mod create;
-pub(crate) mod dir_stack;
-pub(crate) mod extract;
-pub(crate) mod metadata;
-pub mod fuse;
-pub(crate) mod tools;
-
-mod flags;
-pub use flags::Flags;
-
-pub use create::{create_archive, PxarCreateOptions};
-pub use extract::{
-    create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
-    PxarExtractOptions,
-};
-
-/// The format requires to build sorted directory lookup tables in
-/// memory, so we restrict the number of allowed entries to limit
-/// maximum memory usage.
-pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
-
-pub use tools::{format_multi_line_entry, format_single_line_entry};
diff --git a/src/pxar/tools.rs b/src/pxar/tools.rs
deleted file mode 100644 (file)
index 3fd0fc0..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-//! Some common methods used within the pxar code.
-
-use std::convert::TryFrom;
-use std::ffi::OsStr;
-use std::os::unix::ffi::OsStrExt;
-use std::path::Path;
-
-use anyhow::{bail, format_err, Error};
-use nix::sys::stat::Mode;
-
-use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
-
-/// Get the file permissions as `nix::Mode`
-pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
-    let mode = meta.stat.get_permission_bits();
-    u32::try_from(mode)
-        .map_err(drop)
-        .and_then(|mode| Mode::from_bits(mode).ok_or(()))
-        .map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
-}
-
-/// Make sure path is relative and not '.' or '..'.
-pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
-    assert_relative_path_do(Path::new(path))
-}
-
-/// Make sure path is a single component and not '.' or '..'.
-pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
-    assert_single_path_component_do(Path::new(path))
-}
-
-fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
-    if !path.is_relative() {
-        bail!("bad absolute file name in archive: {:?}", path);
-    }
-
-    Ok(())
-}
-
-fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
-    assert_relative_path_do(path)?;
-
-    let mut components = path.components();
-    match components.next() {
-        Some(std::path::Component::Normal(_)) => (),
-        _ => bail!("invalid path component in archive: {:?}", path),
-    }
-
-    if components.next().is_some() {
-        bail!(
-            "invalid path with multiple components in archive: {:?}",
-            path
-        );
-    }
-
-    Ok(())
-}
-
-#[rustfmt::skip]
-fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
-    [
-        if 0 != c & 4 { b'r' } else { b'-' },
-        if 0 != c & 2 { b'w' } else { b'-' },
-        match (c & 1, special) {
-            (0, false) => b'-',
-            (0, true) => special_no_x,
-            (_, false) => b'x',
-            (_, true) => special_x,
-        }
-    ]
-}
-
-fn mode_string(entry: &Entry) -> String {
-    // https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
-    // additionally we use:
-    //     file type capital 'L' hard links
-    //     a second '+' after the mode to show non-acl xattr presence
-    //
-    // Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
-
-    let meta = entry.metadata();
-    let mode = meta.stat.mode;
-    let type_char = if entry.is_hardlink() {
-        'L'
-    } else {
-        match mode & mode::IFMT {
-            mode::IFREG => '-',
-            mode::IFBLK => 'b',
-            mode::IFCHR => 'c',
-            mode::IFDIR => 'd',
-            mode::IFLNK => 'l',
-            mode::IFIFO => 'p',
-            mode::IFSOCK => 's',
-            _ => '?',
-        }
-    };
-
-    let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
-    let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
-    let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
-
-    let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
-
-    let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
-
-    format!(
-        "{}{}{}{}{}{}",
-        type_char,
-        unsafe { std::str::from_utf8_unchecked(&fmt_u) },
-        unsafe { std::str::from_utf8_unchecked(&fmt_g) },
-        unsafe { std::str::from_utf8_unchecked(&fmt_o) },
-        has_acls,
-        has_xattrs,
-    )
-}
-
-fn format_mtime(mtime: &StatxTimestamp) -> String {
-    if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
-        return s;
-    }
-    format!("{}.{}", mtime.secs, mtime.nanos)
-}
-
-pub fn format_single_line_entry(entry: &Entry) -> String {
-    let mode_string = mode_string(entry);
-
-    let meta = entry.metadata();
-
-    let (size, link) = match entry.kind() {
-        EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
-        EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
-        EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
-        EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
-        _ => ("0".to_string(), String::new()),
-    };
-
-    format!(
-        "{} {:<13} {} {:>8} {:?}{}",
-        mode_string,
-        format!("{}/{}", meta.stat.uid, meta.stat.gid),
-        format_mtime(&meta.stat.mtime),
-        size,
-        entry.path(),
-        link,
-    )
-}
-
-pub fn format_multi_line_entry(entry: &Entry) -> String {
-    let mode_string = mode_string(entry);
-
-    let meta = entry.metadata();
-
-    let (size, link, type_name) = match entry.kind() {
-        EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
-        EntryKind::Symlink(link) => (
-            "0".to_string(),
-            format!(" -> {:?}", link.as_os_str()),
-            "symlink",
-        ),
-        EntryKind::Hardlink(link) => (
-            "0".to_string(),
-            format!(" -> {:?}", link.as_os_str()),
-            "symlink",
-        ),
-        EntryKind::Device(dev) => (
-            format!("{},{}", dev.major, dev.minor),
-            String::new(),
-            if meta.stat.is_chardev() {
-                "characters pecial file"
-            } else if meta.stat.is_blockdev() {
-                "block special file"
-            } else {
-                "device"
-            },
-        ),
-        EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
-        EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
-        EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
-        EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
-    };
-
-    let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
-        Ok(name) => std::borrow::Cow::Borrowed(name),
-        Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
-    };
-
-    format!(
-        "  File: {}{}\n  \
-           Size: {:<13} Type: {}\n\
-         Access: ({:o}/{})  Uid: {:<5} Gid: {:<5}\n\
-         Modify: {}\n",
-        file_name,
-        link,
-        size,
-        type_name,
-        meta.file_mode(),
-        mode_string,
-        meta.stat.uid,
-        meta.stat.gid,
-        format_mtime(&meta.stat.mtime),
-    )
-}
index 9b454341778ab76bf1b1123199d2d0ad81726737..5214a218f1524cd980bd521fbf88cdd0118007be 100644 (file)
@@ -22,10 +22,10 @@ use pbs_datastore::manifest::{
     CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, FileInfo, archive_type
 };
 use pbs_tools::sha::sha256;
+use pbs_client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader};
 
 use crate::{
     backup::DataStore,
-    client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader},
     server::WorkerTask,
     tools::ParallelHandler,
 };
index 3a359ad0608216741f16b6353caa79215a900b2d..166804849891ba4100b9d308a99bb05d6b1163fa 100644 (file)
@@ -30,6 +30,8 @@ use proxmox::api::{
 };
 use proxmox::http_err;
 
+use pbs_tools::compression::{DeflateEncoder, Level};
+
 use super::auth::AuthError;
 use super::environment::RestEnvironment;
 use super::formatter::*;
@@ -39,7 +41,7 @@ use crate::api2::types::{Authid, Userid};
 use crate::auth_helpers::*;
 use crate::config::cached_user_info::CachedUserInfo;
 use crate::tools;
-use crate::tools::compression::{CompressionMethod, DeflateEncoder, Level};
+use crate::tools::compression::CompressionMethod;
 use crate::tools::AsyncReaderStream;
 use crate::tools::FileLogger;
 
diff --git a/src/tools/acl.rs b/src/tools/acl.rs
deleted file mode 100644 (file)
index 80e2781..0000000
+++ /dev/null
@@ -1,334 +0,0 @@
-//! Implementation of the calls to handle POSIX access control lists
-
-// see C header file <sys/acl.h> for reference
-extern crate libc;
-
-use std::ffi::CString;
-use std::marker::PhantomData;
-use std::os::unix::ffi::OsStrExt;
-use std::os::unix::io::RawFd;
-use std::path::Path;
-use std::ptr;
-
-use libc::{c_char, c_int, c_uint, c_void};
-use nix::errno::Errno;
-use nix::NixPath;
-
-// from: acl/include/acl.h
-pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
-// acl_perm_t values
-pub type ACLPerm = c_uint;
-pub const ACL_READ: ACLPerm     = 0x04;
-pub const ACL_WRITE: ACLPerm    = 0x02;
-pub const ACL_EXECUTE: ACLPerm  = 0x01;
-
-// acl_tag_t values
-pub type ACLTag = c_int;
-pub const ACL_UNDEFINED_TAG: ACLTag = 0x00;
-pub const ACL_USER_OBJ: ACLTag      = 0x01;
-pub const ACL_USER: ACLTag          = 0x02;
-pub const ACL_GROUP_OBJ: ACLTag     = 0x04;
-pub const ACL_GROUP: ACLTag         = 0x08;
-pub const ACL_MASK: ACLTag          = 0x10;
-pub const ACL_OTHER: ACLTag         = 0x20;
-
-// acl_type_t values
-pub type ACLType = c_uint;
-pub const ACL_TYPE_ACCESS: ACLType  = 0x8000;
-pub const ACL_TYPE_DEFAULT: ACLType = 0x4000;
-
-// acl entry constants
-pub const ACL_FIRST_ENTRY: c_int = 0;
-pub const ACL_NEXT_ENTRY: c_int  = 1;
-
-// acl to extended attribute names constants
-// from: acl/include/acl_ea.h
-pub const ACL_EA_ACCESS: &str = "system.posix_acl_access";
-pub const ACL_EA_DEFAULT: &str = "system.posix_acl_default";
-pub const ACL_EA_VERSION: u32 = 0x0002;
-
-#[link(name = "acl")]
-extern "C" {
-    fn acl_get_file(path: *const c_char, acl_type: ACLType) -> *mut c_void;
-    fn acl_set_file(path: *const c_char, acl_type: ACLType, acl: *mut c_void) -> c_int;
-    fn acl_get_fd(fd: RawFd) -> *mut c_void;
-    fn acl_get_entry(acl: *const c_void, entry_id: c_int, entry: *mut *mut c_void) -> c_int;
-    fn acl_create_entry(acl: *mut *mut c_void, entry: *mut *mut c_void) -> c_int;
-    fn acl_get_tag_type(entry: *mut c_void, tag_type: *mut ACLTag) -> c_int;
-    fn acl_set_tag_type(entry: *mut c_void, tag_type: ACLTag) -> c_int;
-    fn acl_get_permset(entry: *mut c_void, permset: *mut *mut c_void) -> c_int;
-    fn acl_clear_perms(permset: *mut c_void) -> c_int;
-    fn acl_get_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
-    fn acl_add_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
-    fn acl_get_qualifier(entry: *mut c_void) -> *mut c_void;
-    fn acl_set_qualifier(entry: *mut c_void, qualifier: *const c_void) -> c_int;
-    fn acl_init(count: c_int) -> *mut c_void;
-    fn acl_valid(ptr: *const c_void) -> c_int;
-    fn acl_free(ptr: *mut c_void) -> c_int;
-}
-
-#[derive(Debug)]
-pub struct ACL {
-    ptr: *mut c_void,
-}
-
-impl Drop for ACL {
-    fn drop(&mut self) {
-        let ret = unsafe { acl_free(self.ptr) };
-        if ret != 0 {
-            panic!("invalid pointer encountered while dropping ACL - {}", Errno::last());
-        }
-    }
-}
-
-impl ACL {
-    pub fn init(count: usize) -> Result<ACL, nix::errno::Errno> {
-        let ptr = unsafe { acl_init(count as i32 as c_int) };
-        if ptr.is_null() {
-            return Err(Errno::last());
-        }
-
-        Ok(ACL { ptr })
-    }
-
-    pub fn get_file<P: AsRef<Path>>(path: P, acl_type: ACLType) -> Result<ACL, nix::errno::Errno> {
-        let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
-        let ptr = unsafe { acl_get_file(path_cstr.as_ptr(), acl_type) };
-        if ptr.is_null() {
-            return Err(Errno::last());
-        }
-        Ok(ACL { ptr })
-    }
-
-    pub fn set_file<P: NixPath + ?Sized>(&self, path: &P, acl_type: ACLType) -> nix::Result<()> {
-        path.with_nix_path(|path| {
-            Errno::result(unsafe { acl_set_file(path.as_ptr(), acl_type, self.ptr) })
-        })?
-        .map(drop)
-    }
-
-    pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
-        let ptr = unsafe { acl_get_fd(fd) };
-        if ptr.is_null() {
-            return Err(Errno::last());
-        }
-
-        Ok(ACL { ptr })
-    }
-
-    pub fn create_entry(&mut self) -> Result<ACLEntry, nix::errno::Errno> {
-        let mut ptr = ptr::null_mut() as *mut c_void;
-        let res = unsafe { acl_create_entry(&mut self.ptr, &mut ptr) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        Ok(ACLEntry {
-            ptr,
-            _phantom: PhantomData,
-        })
-    }
-
-    pub fn is_valid(&self) -> bool {
-        let res = unsafe { acl_valid(self.ptr) };
-        if res == 0 {
-            return true;
-        }
-
-        false
-    }
-
-    pub fn entries(self) -> ACLEntriesIterator {
-        ACLEntriesIterator {
-            acl: self,
-            current: ACL_FIRST_ENTRY,
-        }
-    }
-
-    pub fn add_entry_full(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64)
-        -> Result<(), nix::errno::Errno>
-    {
-        let mut entry = self.create_entry()?;
-        entry.set_tag_type(tag)?;
-        if let Some(qualifier) = qualifier {
-            entry.set_qualifier(qualifier)?;
-        }
-        entry.set_permissions(permissions)?;
-
-        Ok(())
-    }
-}
-
-#[derive(Debug)]
-pub struct ACLEntry<'a> {
-    ptr: *mut c_void,
-    _phantom: PhantomData<&'a mut ()>,
-}
-
-impl<'a> ACLEntry<'a> {
-    pub fn get_tag_type(&self) -> Result<ACLTag, nix::errno::Errno> {
-        let mut tag = ACL_UNDEFINED_TAG;
-        let res = unsafe { acl_get_tag_type(self.ptr, &mut tag as *mut ACLTag) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        Ok(tag)
-    }
-
-    pub fn set_tag_type(&mut self, tag: ACLTag) -> Result<(), nix::errno::Errno> {
-        let res = unsafe { acl_set_tag_type(self.ptr, tag) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        Ok(())
-    }
-
-    pub fn get_permissions(&self) -> Result<u64, nix::errno::Errno> {
-        let mut permissions = 0;
-        let mut permset = ptr::null_mut() as *mut c_void;
-        let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
-            res = unsafe { acl_get_perm(permset, perm) };
-            if res < 0 {
-                return Err(Errno::last());
-            }
-
-            if res == 1 {
-                permissions |= perm as u64;
-            }
-        }
-
-        Ok(permissions)
-    }
-
-    pub fn set_permissions(&mut self, permissions: u64) -> Result<u64, nix::errno::Errno> {
-        let mut permset = ptr::null_mut() as *mut c_void;
-        let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        res = unsafe { acl_clear_perms(permset) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
-            if permissions & perm as u64 == perm as u64 {
-                res = unsafe { acl_add_perm(permset, perm) };
-                if res < 0 {
-                    return Err(Errno::last());
-                }
-            }
-        }
-
-        Ok(permissions)
-    }
-
-    pub fn get_qualifier(&self) -> Result<u64, nix::errno::Errno> {
-        let qualifier = unsafe { acl_get_qualifier(self.ptr) };
-        if qualifier.is_null() {
-            return Err(Errno::last());
-        }
-        let result = unsafe { *(qualifier as *const u32) as u64 };
-        let ret = unsafe { acl_free(qualifier) };
-        if ret != 0 {
-            panic!("invalid pointer encountered while dropping ACL qualifier - {}", Errno::last());
-        }
-
-        Ok(result)
-    }
-
-    pub fn set_qualifier(&mut self, qualifier: u64) -> Result<(), nix::errno::Errno> {
-        let val = qualifier as u32;
-        let val_ptr: *const u32 = &val;
-        let res = unsafe { acl_set_qualifier(self.ptr, val_ptr as *const c_void) };
-        if res < 0 {
-            return Err(Errno::last());
-        }
-
-        Ok(())
-    }
-}
-
-#[derive(Debug)]
-pub struct ACLEntriesIterator {
-    acl: ACL,
-    current: c_int,
-}
-
-impl<'a> Iterator for &'a mut ACLEntriesIterator {
-    type Item = ACLEntry<'a>;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        let mut entry_ptr = ptr::null_mut();
-        let res = unsafe { acl_get_entry(self.acl.ptr, self.current, &mut entry_ptr) };
-        self.current = ACL_NEXT_ENTRY;
-        if res == 1 {
-            return Some(ACLEntry { ptr: entry_ptr, _phantom: PhantomData });
-        }
-
-        None
-    }
-}
-
-/// Helper to transform `PxarEntry`s user mode to acl permissions.
-pub fn mode_user_to_acl_permissions(mode: u64) -> u64 {
-    (mode >> 6) & 7
-}
-
-/// Helper to transform `PxarEntry`s group mode to acl permissions.
-pub fn mode_group_to_acl_permissions(mode: u64) -> u64 {
-    (mode >> 3) & 7
-}
-
-/// Helper to transform `PxarEntry`s other mode to acl permissions.
-pub fn mode_other_to_acl_permissions(mode: u64) -> u64 {
-    mode & 7
-}
-
-/// Buffer to compose ACLs as extended attribute.
-pub struct ACLXAttrBuffer {
-    buffer: Vec<u8>,
-}
-
-impl ACLXAttrBuffer {
-    /// Create a new buffer to write ACLs as extended attribute.
-    ///
-    /// `version` defines the ACL_EA_VERSION found in acl/include/acl_ea.h
-    pub fn new(version: u32) -> Self {
-        let mut buffer = Vec::new();
-        buffer.extend_from_slice(&version.to_le_bytes());
-        Self { buffer }
-    }
-
-    /// Add ACL entry to buffer.
-    pub fn add_entry(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64) {
-        self.buffer.extend_from_slice(&(tag as u16).to_le_bytes());
-        self.buffer.extend_from_slice(&(permissions as u16).to_le_bytes());
-        match qualifier {
-            Some(qualifier) => self.buffer.extend_from_slice(&(qualifier as u32).to_le_bytes()),
-            None => self.buffer.extend_from_slice(&ACL_UNDEFINED_ID.to_le_bytes()),
-        }
-    }
-
-    /// Length of the buffer in bytes.
-    pub fn len(&self) -> usize {
-        self.buffer.len()
-    }
-
-    /// The buffer always contains at least the version, it is never empty
-    pub const fn is_empty(&self) -> bool { false }
-
-    /// Borrow raw buffer as mut slice.
-    pub fn as_mut_slice(&mut self) -> &mut [u8] {
-        self.buffer.as_mut_slice()
-    }
-}
index b27d7e70ef9977ad2d5c6092e24727f15360f869..19626efc80155d8bf947c1181d65773a33784cde 100644 (file)
@@ -1,19 +1,5 @@
-use std::io;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
 use anyhow::{bail, Error};
-use bytes::Bytes;
-use flate2::{Compress, Compression, FlushCompress};
-use futures::ready;
-use futures::stream::Stream;
 use hyper::header;
-use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
-
-use proxmox::io_format_err;
-use proxmox::tools::byte_buffer::ByteBuffer;
-
-const BUFFER_SIZE: usize = 8192;
 
 /// Possible Compression Methods, order determines preference (later is preferred)
 #[derive(Eq, Ord, PartialEq, PartialOrd, Debug)]
@@ -51,182 +37,3 @@ impl std::str::FromStr for CompressionMethod {
         }
     }
 }
-
-pub enum Level {
-    Fastest,
-    Best,
-    Default,
-    Precise(u32),
-}
-
-#[derive(Eq, PartialEq)]
-enum EncoderState {
-    Reading,
-    Writing,
-    Flushing,
-    Finished,
-}
-
-pub struct DeflateEncoder<T> {
-    inner: T,
-    compressor: Compress,
-    buffer: ByteBuffer,
-    input_buffer: Bytes,
-    state: EncoderState,
-}
-
-impl<T> DeflateEncoder<T> {
-    pub fn new(inner: T) -> Self {
-        Self::with_quality(inner, Level::Default)
-    }
-
-    pub fn with_quality(inner: T, level: Level) -> Self {
-        let level = match level {
-            Level::Fastest => Compression::fast(),
-            Level::Best => Compression::best(),
-            Level::Default => Compression::new(3),
-            Level::Precise(val) => Compression::new(val),
-        };
-
-        Self {
-            inner,
-            compressor: Compress::new(level, false),
-            buffer: ByteBuffer::with_capacity(BUFFER_SIZE),
-            input_buffer: Bytes::new(),
-            state: EncoderState::Reading,
-        }
-    }
-
-    pub fn total_in(&self) -> u64 {
-        self.compressor.total_in()
-    }
-
-    pub fn total_out(&self) -> u64 {
-        self.compressor.total_out()
-    }
-
-    pub fn into_inner(self) -> T {
-        self.inner
-    }
-
-    fn encode(
-        &mut self,
-        inbuf: &[u8],
-        flush: FlushCompress,
-    ) -> Result<(usize, flate2::Status), io::Error> {
-        let old_in = self.compressor.total_in();
-        let old_out = self.compressor.total_out();
-        let res = self
-            .compressor
-            .compress(&inbuf[..], self.buffer.get_free_mut_slice(), flush)?;
-        let new_in = (self.compressor.total_in() - old_in) as usize;
-        let new_out = (self.compressor.total_out() - old_out) as usize;
-        self.buffer.add_size(new_out);
-
-        Ok((new_in, res))
-    }
-}
-
-impl DeflateEncoder<Vec<u8>> {
-    // assume small files
-    pub async fn compress_vec<R>(&mut self, reader: &mut R, size_hint: usize) -> Result<(), Error>
-    where
-        R: AsyncRead + Unpin,
-    {
-        let mut buffer = Vec::with_capacity(size_hint);
-        reader.read_to_end(&mut buffer).await?;
-        self.inner.reserve(size_hint); // should be enough since we want smalller files
-        self.compressor.compress_vec(&buffer[..], &mut self.inner, FlushCompress::Finish)?;
-        Ok(())
-    }
-}
-
-impl<T: AsyncWrite + Unpin> DeflateEncoder<T> {
-    pub async fn compress<R>(&mut self, reader: &mut R) -> Result<(), Error>
-    where
-        R: AsyncRead + Unpin,
-    {
-        let mut buffer = ByteBuffer::with_capacity(BUFFER_SIZE);
-        let mut eof = false;
-        loop {
-            if !eof && !buffer.is_full() {
-                let read = buffer.read_from_async(reader).await?;
-                if read == 0 {
-                    eof = true;
-                }
-            }
-            let (read, _res) = self.encode(&buffer[..], FlushCompress::None)?;
-            buffer.consume(read);
-
-            self.inner.write_all(&self.buffer[..]).await?;
-            self.buffer.clear();
-
-            if buffer.is_empty() && eof {
-                break;
-            }
-        }
-
-        loop {
-            let (_read, res) = self.encode(&[][..], FlushCompress::Finish)?;
-            self.inner.write_all(&self.buffer[..]).await?;
-            self.buffer.clear();
-            if res == flate2::Status::StreamEnd {
-                break;
-            }
-        }
-
-        Ok(())
-    }
-}
-
-impl<T, O> Stream for DeflateEncoder<T>
-where
-    T: Stream<Item = Result<O, io::Error>> + Unpin,
-    O: Into<Bytes>
-{
-    type Item = Result<Bytes, io::Error>;
-
-    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
-        let this = self.get_mut();
-
-        loop {
-            match this.state {
-                EncoderState::Reading => {
-                    if let Some(res) = ready!(Pin::new(&mut this.inner).poll_next(cx)) {
-                        let buf = res?;
-                        this.input_buffer = buf.into();
-                        this.state = EncoderState::Writing;
-                    } else {
-                        this.state = EncoderState::Flushing;
-                    }
-                }
-                EncoderState::Writing => {
-                    if this.input_buffer.is_empty() {
-                        return Poll::Ready(Some(Err(io_format_err!("empty input during write"))));
-                    }
-                    let mut buf = this.input_buffer.split_off(0);
-                    let (read, res) = this.encode(&buf[..], FlushCompress::None)?;
-                    this.input_buffer = buf.split_off(read);
-                    if this.input_buffer.is_empty() {
-                        this.state = EncoderState::Reading;
-                    }
-                    if this.buffer.is_full() || res == flate2::Status::BufError {
-                        let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
-                        return Poll::Ready(Some(Ok(bytes.into())));
-                    }
-                }
-                EncoderState::Flushing => {
-                    let (_read, res) = this.encode(&[][..], FlushCompress::Finish)?;
-                    if !this.buffer.is_empty() {
-                        let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
-                        return Poll::Ready(Some(Ok(bytes.into())));
-                    }
-                    if res == flate2::Status::StreamEnd {
-                        this.state = EncoderState::Finished;
-                    }
-                }
-                EncoderState::Finished => return Poll::Ready(None),
-            }
-        }
-    }
-}
index 658c70142411dc99c4e19f6aef8148cdf93558b9..d13c4d45b2a5942380b788b8762d96c45895d395 100644 (file)
@@ -2,8 +2,6 @@
 //!
 //! This is a collection of small and useful tools.
 use std::any::Any;
-use std::collections::HashMap;
-use std::hash::BuildHasher;
 use std::fs::File;
 use std::io::{self, BufRead};
 use std::os::unix::io::RawFd;
@@ -29,7 +27,6 @@ pub use pbs_tools::process_locker::{
     ProcessLocker, ProcessLockExclusiveGuard, ProcessLockSharedGuard
 };
 
-pub mod acl;
 pub mod apt;
 pub mod async_io;
 pub mod compression;
@@ -51,8 +48,6 @@ pub mod statistics;
 pub mod subscription;
 pub mod systemd;
 pub mod ticket;
-pub mod xattr;
-pub mod zip;
 pub mod sgutils2;
 pub mod paperkey;
 
@@ -69,6 +64,7 @@ mod file_logger;
 pub use file_logger::{FileLogger, FileLogOptions};
 
 pub use pbs_tools::broadcast_future::{BroadcastData, BroadcastFuture};
+pub use pbs_tools::ops::ControlFlow;
 
 /// The `BufferedRead` trait provides a single function
 /// `buffered_read`. It returns a reference to an internal buffer. The
@@ -122,65 +118,6 @@ pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<&'a [
     }
 }
 
-pub fn complete_file_name<S>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String>
-where
-    S: BuildHasher,
-{
-    let mut result = vec![];
-
-    use nix::fcntl::AtFlags;
-    use nix::fcntl::OFlag;
-    use nix::sys::stat::Mode;
-
-    let mut dirname = std::path::PathBuf::from(if arg.is_empty() { "./" } else { arg });
-
-    let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) {
-        Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR,
-        Err(_) => false,
-    };
-
-    if !is_dir {
-        if let Some(parent) = dirname.parent() {
-            dirname = parent.to_owned();
-        }
-    }
-
-    let mut dir =
-        match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) {
-            Ok(d) => d,
-            Err(_) => return result,
-        };
-
-    for item in dir.iter() {
-        if let Ok(entry) = item {
-            if let Ok(name) = entry.file_name().to_str() {
-                if name == "." || name == ".." {
-                    continue;
-                }
-                let mut newpath = dirname.clone();
-                newpath.push(name);
-
-                if let Ok(stat) =
-                    nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty())
-                {
-                    if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR {
-                        newpath.push("");
-                        if let Some(newpath) = newpath.to_str() {
-                            result.push(newpath.to_owned());
-                        }
-                        continue;
-                    }
-                }
-                if let Some(newpath) = newpath.to_str() {
-                    result.push(newpath.to_owned());
-                }
-            }
-        }
-    }
-
-    result
-}
-
 /// Shortcut for md5 sums.
 pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
     hash(MessageDigest::md5(), data).map_err(Error::from)
@@ -373,17 +310,6 @@ pub fn setup_safe_path_env() {
     }
 }
 
-pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
-    let line = match line.iter().position(|&b| !b.is_ascii_whitespace()) {
-        Some(n) => &line[n..],
-        None => return &[],
-    };
-    match line.iter().rev().position(|&b| !b.is_ascii_whitespace()) {
-        Some(n) => &line[..(line.len() - n)],
-        None => &[],
-    }
-}
-
 /// Create the base run-directory.
 ///
 /// This exists to fixate the permissions for the run *base* directory while allowing intermediate
@@ -396,14 +322,3 @@ pub fn create_run_dir() -> Result<(), Error> {
     let _: bool = create_path(pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), None, Some(opts))?;
     Ok(())
 }
-
-/// Modeled after the nightly `std::ops::ControlFlow`.
-#[derive(Clone, Copy, Debug, PartialEq)]
-pub enum ControlFlow<B, C = ()> {
-    Continue(C),
-    Break(B),
-}
-
-impl<B> ControlFlow<B> {
-    pub const CONTINUE: ControlFlow<B, ()> = ControlFlow::Continue(());
-}
diff --git a/src/tools/xattr.rs b/src/tools/xattr.rs
deleted file mode 100644 (file)
index 500af32..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-//! Wrapper functions for the libc xattr calls
-
-use std::ffi::CStr;
-use std::os::unix::io::RawFd;
-
-use nix::errno::Errno;
-
-use proxmox::c_str;
-use proxmox::tools::vec;
-
-/// `"security.capability"` as a CStr to avoid typos.
-///
-/// This cannot be `const` until `const_cstr_unchecked` is stable.
-#[inline]
-pub fn xattr_name_fcaps() -> &'static CStr {
-    c_str!("security.capability")
-}
-
-/// `"system.posix_acl_access"` as a CStr to avoid typos.
-///
-/// This cannot be `const` until `const_cstr_unchecked` is stable.
-#[inline]
-pub fn xattr_acl_access() -> &'static CStr {
-    c_str!("system.posix_acl_access")
-}
-
-/// `"system.posix_acl_default"` as a CStr to avoid typos.
-///
-/// This cannot be `const` until `const_cstr_unchecked` is stable.
-#[inline]
-pub fn xattr_acl_default() -> &'static CStr {
-    c_str!("system.posix_acl_default")
-}
-
-/// Result of `flistxattr`, allows iterating over the attributes as a list of `&CStr`s.
-///
-/// Listing xattrs produces a list separated by zeroes, inherently making them available as `&CStr`
-/// already, so we make use of this fact and reflect this in the interface.
-pub struct ListXAttr {
-    data: Vec<u8>,
-}
-
-impl ListXAttr {
-    fn new(data: Vec<u8>) -> Self {
-        Self { data }
-    }
-}
-
-impl<'a> IntoIterator for &'a ListXAttr {
-    type Item = &'a CStr;
-    type IntoIter = ListXAttrIter<'a>;
-
-    fn into_iter(self) -> Self::IntoIter {
-        ListXAttrIter {
-            data: &self.data,
-            at: 0,
-        }
-    }
-}
-
-/// Iterator over the extended attribute entries in a `ListXAttr`.
-pub struct ListXAttrIter<'a> {
-    data: &'a [u8],
-    at: usize,
-}
-
-impl<'a> Iterator for ListXAttrIter<'a> {
-    type Item = &'a CStr;
-
-    fn next(&mut self) -> Option<&'a CStr> {
-        let data = &self.data[self.at..];
-        let next = data.iter().position(|b| *b == 0)? + 1;
-        self.at += next;
-        Some(unsafe { CStr::from_bytes_with_nul_unchecked(&data[..next]) })
-    }
-}
-
-/// Return a list of extended attributes accessible as an iterator over items of type `&CStr`.
-pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
-    // Initial buffer size for the attribute list, if content does not fit
-    // it gets dynamically increased until big enough.
-    let mut size = 256;
-    let mut buffer = vec::undefined(size);
-    let mut bytes = unsafe {
-        libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
-    };
-    while bytes < 0 {
-        let err = Errno::last();
-        match err {
-            Errno::ERANGE => {
-                // Buffer was not big enough to fit the list, retry with double the size
-                size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
-            },
-            _ => return Err(err),
-        }
-        // Retry to read the list with new buffer
-        buffer.resize(size, 0);
-        bytes = unsafe {
-            libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
-        };
-    }
-    buffer.truncate(bytes as usize);
-
-    Ok(ListXAttr::new(buffer))
-}
-
-/// Get an extended attribute by name.
-///
-/// Extended attributes may not contain zeroes, which we enforce in the API by using a `&CStr`
-/// type.
-pub fn fgetxattr(fd: RawFd, name: &CStr) -> Result<Vec<u8>, nix::errno::Errno> {
-    let mut size = 256;
-    let mut buffer = vec::undefined(size);
-    let mut bytes = unsafe {
-        libc::fgetxattr(fd, name.as_ptr(), buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
-    };
-    while bytes < 0 {
-        let err = Errno::last();
-        match err {
-            Errno::ERANGE => {
-                // Buffer was not big enough to fit the value, retry with double the size
-                size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
-            },
-            _ => return Err(err),
-        }
-        buffer.resize(size, 0);
-        bytes = unsafe {
-            libc::fgetxattr(fd, name.as_ptr() as *const libc::c_char, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
-        };
-    }
-    buffer.resize(bytes as usize, 0);
-
-    Ok(buffer)
-}
-
-/// Set an extended attribute on a file descriptor.
-pub fn fsetxattr(fd: RawFd, name: &CStr, data: &[u8]) -> Result<(), nix::errno::Errno> {
-    let flags = 0 as libc::c_int;
-    let result = unsafe {
-        libc::fsetxattr(fd, name.as_ptr(), data.as_ptr() as *const libc::c_void, data.len(), flags)
-    };
-    if result < 0 {
-        return Err(Errno::last());
-    }
-
-    Ok(())
-}
-
-pub fn fsetxattr_fcaps(fd: RawFd, fcaps: &[u8]) -> Result<(), nix::errno::Errno> {
-    // TODO casync checks and removes capabilities if they are set
-    fsetxattr(fd, xattr_name_fcaps(), fcaps)
-}
-
-pub fn is_security_capability(name: &CStr) -> bool {
-    name.to_bytes() == xattr_name_fcaps().to_bytes()
-}
-
-pub fn is_acl(name: &CStr) -> bool {
-    name.to_bytes() == xattr_acl_access().to_bytes()
-    || name.to_bytes() == xattr_acl_default().to_bytes()
-}
-
-/// Check if the passed name buffer starts with a valid xattr namespace prefix
-/// and is within the length limit of 255 bytes
-pub fn is_valid_xattr_name(c_name: &CStr) -> bool {
-    let name = c_name.to_bytes();
-    if name.is_empty() || name.len() > 255 {
-        return false;
-    }
-    if name.starts_with(b"user.") || name.starts_with(b"trusted.") {
-        return true;
-    }
-    // samba saves windows ACLs there
-    if name == b"security.NTACL" {
-        return true;
-    }
-    is_security_capability(c_name)
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    use std::ffi::CString;
-    use std::fs::OpenOptions;
-    use std::os::unix::io::AsRawFd;
-
-    use nix::errno::Errno;
-
-    use proxmox::c_str;
-
-    #[test]
-    fn test_fsetxattr_fgetxattr() {
-        let path = "./tests/xattrs.txt";
-        let file = OpenOptions::new()
-            .write(true)
-            .create(true)
-            .open(&path)
-            .unwrap();
-
-        let fd = file.as_raw_fd();
-
-        let mut name = b"user.".to_vec();
-        for _ in 0..260 {
-            name.push(b'a');
-        }
-
-        let invalid_name = CString::new(name).unwrap();
-
-        assert!(fsetxattr(fd, c_str!("user.attribute0"), b"value0").is_ok());
-        assert!(fsetxattr(fd, c_str!("user.empty"), b"").is_ok());
-
-        if nix::unistd::Uid::current() != nix::unistd::ROOT {
-            assert_eq!(fsetxattr(fd, c_str!("trusted.attribute0"), b"value0"), Err(Errno::EPERM));
-        }
-
-        assert_eq!(fsetxattr(fd, c_str!("garbage.attribute0"), b"value"), Err(Errno::EOPNOTSUPP));
-        assert_eq!(fsetxattr(fd, &invalid_name, b"err"), Err(Errno::ERANGE));
-
-        let v0 = fgetxattr(fd, c_str!("user.attribute0")).unwrap();
-        let v1 = fgetxattr(fd, c_str!("user.empty")).unwrap();
-
-        assert_eq!(v0, b"value0".as_ref());
-        assert_eq!(v1, b"".as_ref());
-        assert_eq!(fgetxattr(fd, c_str!("user.attribute1")), Err(Errno::ENODATA));
-
-        std::fs::remove_file(&path).unwrap();
-    }
-
-    #[test]
-    fn test_is_valid_xattr_name() {
-        let too_long = CString::new(vec![b'a'; 265]).unwrap();
-
-        assert!(!is_valid_xattr_name(&too_long));
-        assert!(!is_valid_xattr_name(c_str!("system.attr")));
-        assert!(is_valid_xattr_name(c_str!("user.attr")));
-        assert!(is_valid_xattr_name(c_str!("trusted.attr")));
-        assert!(is_valid_xattr_name(super::xattr_name_fcaps()));
-    }
-}
diff --git a/src/tools/zip.rs b/src/tools/zip.rs
deleted file mode 100644 (file)
index a9ef9b6..0000000
+++ /dev/null
@@ -1,671 +0,0 @@
-//! ZIP Helper
-//!
-//! Provides an interface to create a ZIP File from ZipEntries
-//! for a more detailed description of the ZIP format, see:
-//! https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
-
-use std::convert::TryInto;
-use std::ffi::OsString;
-use std::io;
-use std::mem::size_of;
-use std::os::unix::ffi::OsStrExt;
-use std::path::{Component, Path, PathBuf};
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use std::time::SystemTime;
-
-use anyhow::{format_err, Error, Result};
-use endian_trait::Endian;
-use futures::ready;
-use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
-
-use crc32fast::Hasher;
-use proxmox::tools::time::gmtime;
-
-use crate::tools::compression::{DeflateEncoder, Level};
-
-const LOCAL_FH_SIG: u32 = 0x04034B50;
-const LOCAL_FF_SIG: u32 = 0x08074B50;
-const CENTRAL_DIRECTORY_FH_SIG: u32 = 0x02014B50;
-const END_OF_CENTRAL_DIR: u32 = 0x06054B50;
-const VERSION_NEEDED: u16 = 0x002d;
-const VERSION_MADE_BY: u16 = 0x032d;
-
-const ZIP64_EOCD_RECORD: u32 = 0x06064B50;
-const ZIP64_EOCD_LOCATOR: u32 = 0x07064B50;
-
-// bits for time:
-// 0-4: day of the month (1-31)
-// 5-8: month: (1 = jan, etc.)
-// 9-15: year offset from 1980
-//
-// bits for date:
-// 0-4: second / 2
-// 5-10: minute (0-59)
-// 11-15: hour (0-23)
-//
-// see https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
-fn epoch_to_dos(epoch: i64) -> (u16, u16) {
-    let gmtime = match gmtime(epoch) {
-        Ok(gmtime) => gmtime,
-        Err(_) => return (0, 0),
-    };
-
-    let seconds = (gmtime.tm_sec / 2) & 0b11111;
-    let minutes = gmtime.tm_min & 0xb111111;
-    let hours = gmtime.tm_hour & 0b11111;
-    let time: u16 = ((hours << 11) | (minutes << 5) | (seconds)) as u16;
-
-    let date: u16 = if gmtime.tm_year > (2108 - 1900) || gmtime.tm_year < (1980 - 1900) {
-        0
-    } else {
-        let day = gmtime.tm_mday & 0b11111;
-        let month = (gmtime.tm_mon + 1) & 0b1111;
-        let year = (gmtime.tm_year + 1900 - 1980) & 0b1111111;
-        ((year << 9) | (month << 5) | (day)) as u16
-    };
-
-    (date, time)
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct Zip64Field {
-    field_type: u16,
-    field_size: u16,
-    uncompressed_size: u64,
-    compressed_size: u64,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct Zip64FieldWithOffset {
-    field_type: u16,
-    field_size: u16,
-    uncompressed_size: u64,
-    compressed_size: u64,
-    offset: u64,
-    start_disk: u32,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct LocalFileHeader {
-    signature: u32,
-    version_needed: u16,
-    flags: u16,
-    compression: u16,
-    time: u16,
-    date: u16,
-    crc32: u32,
-    compressed_size: u32,
-    uncompressed_size: u32,
-    filename_len: u16,
-    extra_field_len: u16,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct LocalFileFooter {
-    signature: u32,
-    crc32: u32,
-    compressed_size: u64,
-    uncompressed_size: u64,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct CentralDirectoryFileHeader {
-    signature: u32,
-    version_made_by: u16,
-    version_needed: u16,
-    flags: u16,
-    compression: u16,
-    time: u16,
-    date: u16,
-    crc32: u32,
-    compressed_size: u32,
-    uncompressed_size: u32,
-    filename_len: u16,
-    extra_field_len: u16,
-    comment_len: u16,
-    start_disk: u16,
-    internal_flags: u16,
-    external_flags: u32,
-    offset: u32,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct EndOfCentralDir {
-    signature: u32,
-    disk_number: u16,
-    start_disk: u16,
-    disk_record_count: u16,
-    total_record_count: u16,
-    directory_size: u32,
-    directory_offset: u32,
-    comment_len: u16,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct Zip64EOCDRecord {
-    signature: u32,
-    field_size: u64,
-    version_made_by: u16,
-    version_needed: u16,
-    disk_number: u32,
-    disk_number_central_dir: u32,
-    disk_record_count: u64,
-    total_record_count: u64,
-    directory_size: u64,
-    directory_offset: u64,
-}
-
-#[derive(Endian)]
-#[repr(C, packed)]
-struct Zip64EOCDLocator {
-    signature: u32,
-    disk_number: u32,
-    offset: u64,
-    disk_count: u32,
-}
-
-async fn write_struct<E, T>(output: &mut T, data: E) -> io::Result<()>
-where
-    T: AsyncWrite + ?Sized + Unpin,
-    E: Endian,
-{
-    let data = data.to_le();
-
-    let data = unsafe {
-        std::slice::from_raw_parts(
-            &data as *const E as *const u8,
-            core::mem::size_of_val(&data),
-        )
-    };
-    output.write_all(data).await
-}
-
-/// Represents an Entry in a ZIP File
-///
-/// used to add to a ZipEncoder
-pub struct ZipEntry {
-    filename: OsString,
-    mtime: i64,
-    mode: u16,
-    crc32: u32,
-    uncompressed_size: u64,
-    compressed_size: u64,
-    offset: u64,
-    is_file: bool,
-}
-
-impl ZipEntry {
-    /// Creates a new ZipEntry
-    ///
-    /// if is_file is false the path will contain an trailing separator,
-    /// so that the zip file understands that it is a directory
-    pub fn new<P: AsRef<Path>>(path: P, mtime: i64, mode: u16, is_file: bool) -> Self {
-        let mut relpath = PathBuf::new();
-
-        for comp in path.as_ref().components() {
-            if let Component::Normal(_) = comp {
-                relpath.push(comp);
-            }
-        }
-
-        if !is_file {
-            relpath.push(""); // adds trailing slash
-        }
-
-        Self {
-            filename: relpath.into(),
-            crc32: 0,
-            mtime,
-            mode,
-            uncompressed_size: 0,
-            compressed_size: 0,
-            offset: 0,
-            is_file,
-        }
-    }
-
-    async fn write_local_header<W>(&self, mut buf: &mut W) -> io::Result<usize>
-    where
-        W: AsyncWrite + Unpin + ?Sized,
-    {
-        let filename = self.filename.as_bytes();
-        let filename_len = filename.len();
-        let header_size = size_of::<LocalFileHeader>();
-        let zip_field_size = size_of::<Zip64Field>();
-        let size: usize = header_size + filename_len + zip_field_size;
-
-        let (date, time) = epoch_to_dos(self.mtime);
-
-        write_struct(
-            &mut buf,
-            LocalFileHeader {
-                signature: LOCAL_FH_SIG,
-                version_needed: 0x2d,
-                flags: 1 << 3,
-                compression: 0x8,
-                time,
-                date,
-                crc32: 0,
-                compressed_size: 0xFFFFFFFF,
-                uncompressed_size: 0xFFFFFFFF,
-                filename_len: filename_len as u16,
-                extra_field_len: zip_field_size as u16,
-            },
-        )
-        .await?;
-
-        buf.write_all(filename).await?;
-
-        write_struct(
-            &mut buf,
-            Zip64Field {
-                field_type: 0x0001,
-                field_size: 2 * 8,
-                uncompressed_size: 0,
-                compressed_size: 0,
-            },
-        )
-        .await?;
-
-        Ok(size)
-    }
-
-    async fn write_data_descriptor<W: AsyncWrite + Unpin + ?Sized>(
-        &self,
-        mut buf: &mut W,
-    ) -> io::Result<usize> {
-        let size = size_of::<LocalFileFooter>();
-
-        write_struct(
-            &mut buf,
-            LocalFileFooter {
-                signature: LOCAL_FF_SIG,
-                crc32: self.crc32,
-                compressed_size: self.compressed_size,
-                uncompressed_size: self.uncompressed_size,
-            },
-        )
-        .await?;
-
-        Ok(size)
-    }
-
-    async fn write_central_directory_header<W: AsyncWrite + Unpin + ?Sized>(
-        &self,
-        mut buf: &mut W,
-    ) -> io::Result<usize> {
-        let filename = self.filename.as_bytes();
-        let filename_len = filename.len();
-        let header_size = size_of::<CentralDirectoryFileHeader>();
-        let zip_field_size = size_of::<Zip64FieldWithOffset>();
-        let mut size: usize = header_size + filename_len;
-
-        let (date, time) = epoch_to_dos(self.mtime);
-
-        let (compressed_size, uncompressed_size, offset, need_zip64) = if self.compressed_size
-            >= (u32::MAX as u64)
-            || self.uncompressed_size >= (u32::MAX as u64)
-            || self.offset >= (u32::MAX as u64)
-        {
-            size += zip_field_size;
-            (0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, true)
-        } else {
-            (
-                self.compressed_size as u32,
-                self.uncompressed_size as u32,
-                self.offset as u32,
-                false,
-            )
-        };
-
-        write_struct(
-            &mut buf,
-            CentralDirectoryFileHeader {
-                signature: CENTRAL_DIRECTORY_FH_SIG,
-                version_made_by: VERSION_MADE_BY,
-                version_needed: VERSION_NEEDED,
-                flags: 1 << 3,
-                compression: 0x8,
-                time,
-                date,
-                crc32: self.crc32,
-                compressed_size,
-                uncompressed_size,
-                filename_len: filename_len as u16,
-                extra_field_len: if need_zip64 { zip_field_size as u16 } else { 0 },
-                comment_len: 0,
-                start_disk: 0,
-                internal_flags: 0,
-                external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
-                offset,
-            },
-        )
-        .await?;
-
-        buf.write_all(filename).await?;
-
-        if need_zip64 {
-            write_struct(
-                &mut buf,
-                Zip64FieldWithOffset {
-                    field_type: 1,
-                    field_size: 3 * 8 + 4,
-                    uncompressed_size: self.uncompressed_size,
-                    compressed_size: self.compressed_size,
-                    offset: self.offset,
-                    start_disk: 0,
-                },
-            )
-            .await?;
-        }
-
-        Ok(size)
-    }
-}
-
-// wraps an asyncreader and calculates the hash
-struct HashWrapper<R> {
-    inner: R,
-    hasher: Hasher,
-}
-
-impl<R> HashWrapper<R> {
-    fn new(inner: R) -> Self {
-        Self {
-            inner,
-            hasher: Hasher::new(),
-        }
-    }
-
-    // consumes self and returns the hash and the reader
-    fn finish(self) -> (u32, R) {
-        let crc32 = self.hasher.finalize();
-        (crc32, self.inner)
-    }
-}
-
-impl<R> AsyncRead for HashWrapper<R>
-where
-    R: AsyncRead + Unpin,
-{
-    fn poll_read(
-        self: Pin<&mut Self>,
-        cx: &mut Context<'_>,
-        buf: &mut ReadBuf<'_>,
-    ) -> Poll<Result<(), io::Error>> {
-        let this = self.get_mut();
-        let old_len = buf.filled().len();
-        ready!(Pin::new(&mut this.inner).poll_read(cx, buf))?;
-        let new_len = buf.filled().len();
-        if new_len > old_len {
-            this.hasher.update(&buf.filled()[old_len..new_len]);
-        }
-        Poll::Ready(Ok(()))
-    }
-}
-
-/// Wraps a writer that implements AsyncWrite for creating a ZIP archive
-///
-/// This will create a ZIP archive on the fly with files added with
-/// 'add_entry'. To Finish the file, call 'finish'
-/// Example:
-/// ```no_run
-/// use proxmox_backup::tools::zip::*;
-/// use tokio::fs::File;
-/// use anyhow::{Error, Result};
-///
-/// #[tokio::main]
-/// async fn main() -> Result<(), Error> {
-///     let target = File::open("foo.zip").await?;
-///     let mut source = File::open("foo.txt").await?;
-///
-///     let mut zip = ZipEncoder::new(target);
-///     zip.add_entry(ZipEntry::new(
-///         "foo.txt",
-///         0,
-///         0o100755,
-///         true,
-///     ), Some(source)).await?;
-///
-///     zip.finish().await?;
-///
-///     Ok(())
-/// }
-/// ```
-pub struct ZipEncoder<W>
-where
-    W: AsyncWrite + Unpin,
-{
-    byte_count: usize,
-    files: Vec<ZipEntry>,
-    target: Option<W>,
-}
-
-impl<W: AsyncWrite + Unpin> ZipEncoder<W> {
-    pub fn new(target: W) -> Self {
-        Self {
-            byte_count: 0,
-            files: Vec::new(),
-            target: Some(target),
-        }
-    }
-
-    pub async fn add_entry<R: AsyncRead + Unpin>(
-        &mut self,
-        mut entry: ZipEntry,
-        content: Option<R>,
-    ) -> Result<(), Error> {
-        let mut target = self
-            .target
-            .take()
-            .ok_or_else(|| format_err!("had no target during add entry"))?;
-        entry.offset = self.byte_count.try_into()?;
-        self.byte_count += entry.write_local_header(&mut target).await?;
-        if let Some(content) = content {
-            let mut reader = HashWrapper::new(content);
-            let mut enc = DeflateEncoder::with_quality(target, Level::Fastest);
-
-            enc.compress(&mut reader).await?;
-            let total_in = enc.total_in();
-            let total_out = enc.total_out();
-            target = enc.into_inner();
-
-            let (crc32, _reader) = reader.finish();
-
-            self.byte_count += total_out as usize;
-            entry.compressed_size = total_out;
-            entry.uncompressed_size = total_in;
-
-            entry.crc32 = crc32;
-        }
-        self.byte_count += entry.write_data_descriptor(&mut target).await?;
-        self.target = Some(target);
-
-        self.files.push(entry);
-
-        Ok(())
-    }
-
-    async fn write_eocd(
-        &mut self,
-        central_dir_size: usize,
-        central_dir_offset: usize,
-    ) -> Result<(), Error> {
-        let entrycount = self.files.len();
-        let mut target = self
-            .target
-            .take()
-            .ok_or_else(|| format_err!("had no target during write_eocd"))?;
-
-        let mut count = entrycount as u16;
-        let mut directory_size = central_dir_size as u32;
-        let mut directory_offset = central_dir_offset as u32;
-
-        if central_dir_size > u32::MAX as usize
-            || central_dir_offset > u32::MAX as usize
-            || entrycount > u16::MAX as usize
-        {
-            count = 0xFFFF;
-            directory_size = 0xFFFFFFFF;
-            directory_offset = 0xFFFFFFFF;
-
-            write_struct(
-                &mut target,
-                Zip64EOCDRecord {
-                    signature: ZIP64_EOCD_RECORD,
-                    field_size: 44,
-                    version_made_by: VERSION_MADE_BY,
-                    version_needed: VERSION_NEEDED,
-                    disk_number: 0,
-                    disk_number_central_dir: 0,
-                    disk_record_count: entrycount.try_into()?,
-                    total_record_count: entrycount.try_into()?,
-                    directory_size: central_dir_size.try_into()?,
-                    directory_offset: central_dir_offset.try_into()?,
-                },
-            )
-            .await?;
-
-            let locator_offset = central_dir_offset + central_dir_size;
-
-            write_struct(
-                &mut target,
-                Zip64EOCDLocator {
-                    signature: ZIP64_EOCD_LOCATOR,
-                    disk_number: 0,
-                    offset: locator_offset.try_into()?,
-                    disk_count: 1,
-                },
-            )
-            .await?;
-        }
-
-        write_struct(
-            &mut target,
-            EndOfCentralDir {
-                signature: END_OF_CENTRAL_DIR,
-                disk_number: 0,
-                start_disk: 0,
-                disk_record_count: count,
-                total_record_count: count,
-                directory_size,
-                directory_offset,
-                comment_len: 0,
-            },
-        )
-        .await?;
-
-        self.target = Some(target);
-
-        Ok(())
-    }
-
-    pub async fn finish(&mut self) -> Result<(), Error> {
-        let mut target = self
-            .target
-            .take()
-            .ok_or_else(|| format_err!("had no target during finish"))?;
-        let central_dir_offset = self.byte_count;
-        let mut central_dir_size = 0;
-
-        for file in &self.files {
-            central_dir_size += file.write_central_directory_header(&mut target).await?;
-        }
-
-        self.target = Some(target);
-        self.write_eocd(central_dir_size, central_dir_offset)
-            .await?;
-
-        self.target
-            .take()
-            .ok_or_else(|| format_err!("had no target for flush"))?
-            .flush()
-            .await?;
-
-        Ok(())
-    }
-}
-
-/// Zip a local directory and write encoded data to target. "source" has to point to a valid
-/// directory, it's name will be the root of the zip file - e.g.:
-/// source:
-///         /foo/bar
-/// zip file:
-///         /bar/file1
-///         /bar/dir1
-///         /bar/dir1/file2
-///         ...
-/// ...except if "source" is the root directory
-pub async fn zip_directory<W>(target: W, source: &Path) -> Result<(), Error>
-where
-    W: AsyncWrite + Unpin + Send,
-{
-    use walkdir::WalkDir;
-    use std::os::unix::fs::MetadataExt;
-
-    let base_path = source.parent().unwrap_or_else(|| Path::new("/"));
-    let mut encoder = ZipEncoder::new(target);
-
-    for entry in WalkDir::new(&source).into_iter() {
-        match entry {
-            Ok(entry) => {
-                let entry_path = entry.path().to_owned();
-                let encoder = &mut encoder;
-
-                if let Err(err) = async move {
-                    let entry_path_no_base = entry.path().strip_prefix(base_path)?;
-                    let metadata = entry.metadata()?;
-                    let mtime = match metadata.modified().unwrap_or_else(|_| SystemTime::now()).duration_since(SystemTime::UNIX_EPOCH) {
-                        Ok(dur) => dur.as_secs() as i64,
-                        Err(time_error) => -(time_error.duration().as_secs() as i64)
-                    };
-                    let mode = metadata.mode() as u16;
-
-                    if entry.file_type().is_file() {
-                        let file = tokio::fs::File::open(entry.path()).await?;
-                        let ze = ZipEntry::new(
-                            &entry_path_no_base,
-                            mtime,
-                            mode,
-                            true,
-                        );
-                        encoder.add_entry(ze, Some(file)).await?;
-                    } else if entry.file_type().is_dir() {
-                        let ze = ZipEntry::new(
-                            &entry_path_no_base,
-                            mtime,
-                            mode,
-                            false,
-                        );
-                        let content: Option<tokio::fs::File> = None;
-                        encoder.add_entry(ze, content).await?;
-                    }
-                    // ignore other file types
-                    let ok: Result<(), Error> = Ok(());
-                    ok
-                }
-                .await
-                {
-                    eprintln!(
-                        "zip: error encoding file or directory '{}': {}",
-                        entry_path.display(),
-                        err
-                    );
-                }
-            }
-            Err(err) => {
-                eprintln!("zip: error reading directory entry: {}", err);
-            }
-        }
-    }
-
-    encoder.finish().await
-}
index 550600c6a502b87ff3d43f9b75c7381f5e1b1333..0eea394562b822b802d916881950ecfc03966c35 100644 (file)
@@ -1,7 +1,8 @@
+use std::process::Command;
+
 use anyhow::{Error};
 
-use std::process::Command;
-use proxmox_backup::pxar::*;
+use pbs_client::pxar::*;
 
 fn run_test(dir_name: &str) -> Result<(), Error> {