]> git.proxmox.com Git - proxmox-backup.git/commitdiff
move backup id related types to pbs-api-types
authorWolfgang Bumiller <w.bumiller@proxmox.com>
Wed, 7 Jul 2021 09:28:53 +0000 (11:28 +0200)
committerWolfgang Bumiller <w.bumiller@proxmox.com>
Wed, 7 Jul 2021 09:34:56 +0000 (11:34 +0200)
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
pbs-api-types/src/lib.rs
pbs-api-types/src/userid.rs
pbs-datastore/src/backup_info.rs [new file with mode: 0644]
pbs-datastore/src/manifest.rs [new file with mode: 0644]
src/api2/types/mod.rs
src/backup/backup_info.rs [deleted file]
src/backup/manifest.rs [deleted file]

index 58f5615a0972b74b5c63528737ab8673c12814f3..7775324d3642ef32a54dcde73066fccaa7b22b56 100644 (file)
@@ -3,6 +3,30 @@
 use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
 use proxmox::const_regex;
 
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; }
+
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
+
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
+
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
+
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! SNAPSHOT_PATH_REGEX_STR {
+    () => (
+        concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
+    );
+}
+
 #[macro_use]
 mod userid;
 pub use userid::Authid;
@@ -12,14 +36,19 @@ pub use userid::{Tokenname, TokennameRef};
 pub use userid::{Username, UsernameRef};
 pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA};
 
-#[macro_export]
-macro_rules! PROXMOX_SAFE_ID_REGEX_STR {
-    () => {
-        r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)"
-    };
-}
-
 const_regex! {
+    pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
+
+    pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
+
+    pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
+
+    pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
+
+    pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
+
+    pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
+
     pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
 
     /// Regex for safe identifiers.
@@ -51,3 +80,5 @@ pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
 pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
     .format(&SINGLE_LINE_COMMENT_FORMAT)
     .schema();
+
+pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
index 34c7a09fc474fcd22c2d5a0649201555da930c1d..08335b93a9e1371902920aa513037cdea109f38b 100644 (file)
@@ -33,8 +33,6 @@ use proxmox::api::api;
 use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
 use proxmox::const_regex;
 
-use super::PROXMOX_SAFE_ID_REGEX_STR;
-
 // we only allow a limited set of characters
 // colon is not allowed, because we store usernames in
 // colon separated lists)!
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
new file mode 100644 (file)
index 0000000..34bf26a
--- /dev/null
@@ -0,0 +1,394 @@
+use std::os::unix::io::RawFd;
+use std::path::{Path, PathBuf};
+
+use anyhow::{bail, format_err, Error};
+
+use crate::api2::types::{
+    BACKUP_ID_REGEX,
+    BACKUP_TYPE_REGEX,
+    BACKUP_DATE_REGEX,
+    GROUP_PATH_REGEX,
+    SNAPSHOT_PATH_REGEX,
+    BACKUP_FILE_REGEX,
+};
+
+use super::manifest::MANIFEST_BLOB_NAME;
+
+/// BackupGroup is a directory containing a list of BackupDir
+#[derive(Debug, Eq, PartialEq, Hash, Clone)]
+pub struct BackupGroup {
+    /// Type of backup
+    backup_type: String,
+    /// Unique (for this type) ID
+    backup_id: String,
+}
+
+impl std::cmp::Ord for BackupGroup {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        let type_order = self.backup_type.cmp(&other.backup_type);
+        if type_order != std::cmp::Ordering::Equal {
+            return type_order;
+        }
+        // try to compare IDs numerically
+        let id_self = self.backup_id.parse::<u64>();
+        let id_other = other.backup_id.parse::<u64>();
+        match (id_self, id_other) {
+            (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
+            (Ok(_), Err(_)) => std::cmp::Ordering::Less,
+            (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
+            _ => self.backup_id.cmp(&other.backup_id),
+        }
+    }
+}
+
+impl std::cmp::PartialOrd for BackupGroup {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl BackupGroup {
+    pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
+        Self {
+            backup_type: backup_type.into(),
+            backup_id: backup_id.into(),
+        }
+    }
+
+    pub fn backup_type(&self) -> &str {
+        &self.backup_type
+    }
+
+    pub fn backup_id(&self) -> &str {
+        &self.backup_id
+    }
+
+    pub fn group_path(&self) -> PathBuf {
+        let mut relative_path = PathBuf::new();
+
+        relative_path.push(&self.backup_type);
+
+        relative_path.push(&self.backup_id);
+
+        relative_path
+    }
+
+    pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
+        let mut list = vec![];
+
+        let mut path = base_path.to_owned();
+        path.push(self.group_path());
+
+        pbs_tools::fs::scandir(
+            libc::AT_FDCWD,
+            &path,
+            &BACKUP_DATE_REGEX,
+            |l2_fd, backup_time, file_type| {
+                if file_type != nix::dir::Type::Directory {
+                    return Ok(());
+                }
+
+                let backup_dir =
+                    BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
+                let files = list_backup_files(l2_fd, backup_time)?;
+
+                list.push(BackupInfo { backup_dir, files });
+
+                Ok(())
+            },
+        )?;
+        Ok(list)
+    }
+
+    pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
+        let mut last = None;
+
+        let mut path = base_path.to_owned();
+        path.push(self.group_path());
+
+        pbs_tools::fs::scandir(
+            libc::AT_FDCWD,
+            &path,
+            &BACKUP_DATE_REGEX,
+            |l2_fd, backup_time, file_type| {
+                if file_type != nix::dir::Type::Directory {
+                    return Ok(());
+                }
+
+                let mut manifest_path = PathBuf::from(backup_time);
+                manifest_path.push(MANIFEST_BLOB_NAME);
+
+                use nix::fcntl::{openat, OFlag};
+                match openat(
+                    l2_fd,
+                    &manifest_path,
+                    OFlag::O_RDONLY,
+                    nix::sys::stat::Mode::empty(),
+                ) {
+                    Ok(rawfd) => {
+                        /* manifest exists --> assume backup was successful */
+                        /* close else this leaks! */
+                        nix::unistd::close(rawfd)?;
+                    }
+                    Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
+                        return Ok(());
+                    }
+                    Err(err) => {
+                        bail!("last_successful_backup: unexpected error - {}", err);
+                    }
+                }
+
+                let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
+                if let Some(last_timestamp) = last {
+                    if timestamp > last_timestamp {
+                        last = Some(timestamp);
+                    }
+                } else {
+                    last = Some(timestamp);
+                }
+
+                Ok(())
+            },
+        )?;
+
+        Ok(last)
+    }
+}
+
+impl std::fmt::Display for BackupGroup {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let backup_type = self.backup_type();
+        let id = self.backup_id();
+        write!(f, "{}/{}", backup_type, id)
+    }
+}
+
+impl std::str::FromStr for BackupGroup {
+    type Err = Error;
+
+    /// Parse a backup group path
+    ///
+    /// This parses strings like `vm/100".
+    fn from_str(path: &str) -> Result<Self, Self::Err> {
+        let cap = GROUP_PATH_REGEX
+            .captures(path)
+            .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
+
+        Ok(Self {
+            backup_type: cap.get(1).unwrap().as_str().to_owned(),
+            backup_id: cap.get(2).unwrap().as_str().to_owned(),
+        })
+    }
+}
+
+/// Uniquely identify a Backup (relative to data store)
+///
+/// We also call this a backup snaphost.
+#[derive(Debug, Eq, PartialEq, Clone)]
+pub struct BackupDir {
+    /// Backup group
+    group: BackupGroup,
+    /// Backup timestamp
+    backup_time: i64,
+    // backup_time as rfc3339
+    backup_time_string: String,
+}
+
+impl BackupDir {
+    pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
+    where
+        T: Into<String>,
+        U: Into<String>,
+    {
+        let group = BackupGroup::new(backup_type.into(), backup_id.into());
+        BackupDir::with_group(group, backup_time)
+    }
+
+    pub fn with_rfc3339<T, U, V>(
+        backup_type: T,
+        backup_id: U,
+        backup_time_string: V,
+    ) -> Result<Self, Error>
+    where
+        T: Into<String>,
+        U: Into<String>,
+        V: Into<String>,
+    {
+        let backup_time_string = backup_time_string.into();
+        let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
+        let group = BackupGroup::new(backup_type.into(), backup_id.into());
+        Ok(Self {
+            group,
+            backup_time,
+            backup_time_string,
+        })
+    }
+
+    pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
+        let backup_time_string = Self::backup_time_to_string(backup_time)?;
+        Ok(Self {
+            group,
+            backup_time,
+            backup_time_string,
+        })
+    }
+
+    pub fn group(&self) -> &BackupGroup {
+        &self.group
+    }
+
+    pub fn backup_time(&self) -> i64 {
+        self.backup_time
+    }
+
+    pub fn backup_time_string(&self) -> &str {
+        &self.backup_time_string
+    }
+
+    pub fn relative_path(&self) -> PathBuf {
+        let mut relative_path = self.group.group_path();
+
+        relative_path.push(self.backup_time_string.clone());
+
+        relative_path
+    }
+
+    pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
+        // fixme: can this fail? (avoid unwrap)
+        proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
+    }
+}
+
+impl std::str::FromStr for BackupDir {
+    type Err = Error;
+
+    /// Parse a snapshot path
+    ///
+    /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
+    fn from_str(path: &str) -> Result<Self, Self::Err> {
+        let cap = SNAPSHOT_PATH_REGEX
+            .captures(path)
+            .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
+
+        BackupDir::with_rfc3339(
+            cap.get(1).unwrap().as_str(),
+            cap.get(2).unwrap().as_str(),
+            cap.get(3).unwrap().as_str(),
+        )
+    }
+}
+
+impl std::fmt::Display for BackupDir {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let backup_type = self.group.backup_type();
+        let id = self.group.backup_id();
+        write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
+    }
+}
+
+/// Detailed Backup Information, lists files inside a BackupDir
+#[derive(Debug, Clone)]
+pub struct BackupInfo {
+    /// the backup directory
+    pub backup_dir: BackupDir,
+    /// List of data files
+    pub files: Vec<String>,
+}
+
+impl BackupInfo {
+    pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
+        let mut path = base_path.to_owned();
+        path.push(backup_dir.relative_path());
+
+        let files = list_backup_files(libc::AT_FDCWD, &path)?;
+
+        Ok(BackupInfo { backup_dir, files })
+    }
+
+    /// Finds the latest backup inside a backup group
+    pub fn last_backup(
+        base_path: &Path,
+        group: &BackupGroup,
+        only_finished: bool,
+    ) -> Result<Option<BackupInfo>, Error> {
+        let backups = group.list_backups(base_path)?;
+        Ok(backups
+            .into_iter()
+            .filter(|item| !only_finished || item.is_finished())
+            .max_by_key(|item| item.backup_dir.backup_time()))
+    }
+
+    pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
+        if ascendending {
+            // oldest first
+            list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
+        } else {
+            // newest first
+            list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
+        }
+    }
+
+    pub fn list_files(base_path: &Path, backup_dir: &BackupDir) -> Result<Vec<String>, Error> {
+        let mut path = base_path.to_owned();
+        path.push(backup_dir.relative_path());
+
+        let files = list_backup_files(libc::AT_FDCWD, &path)?;
+
+        Ok(files)
+    }
+
+    pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
+        let mut list = Vec::new();
+
+        pbs_tools::fs::scandir(
+            libc::AT_FDCWD,
+            base_path,
+            &BACKUP_TYPE_REGEX,
+            |l0_fd, backup_type, file_type| {
+                if file_type != nix::dir::Type::Directory {
+                    return Ok(());
+                }
+                pbs_tools::fs::scandir(
+                    l0_fd,
+                    backup_type,
+                    &BACKUP_ID_REGEX,
+                    |_, backup_id, file_type| {
+                        if file_type != nix::dir::Type::Directory {
+                            return Ok(());
+                        }
+
+                        list.push(BackupGroup::new(backup_type, backup_id));
+
+                        Ok(())
+                    },
+                )
+            },
+        )?;
+
+        Ok(list)
+    }
+
+    pub fn is_finished(&self) -> bool {
+        // backup is considered unfinished if there is no manifest
+        self.files
+            .iter()
+            .any(|name| name == super::MANIFEST_BLOB_NAME)
+    }
+}
+
+fn list_backup_files<P: ?Sized + nix::NixPath>(
+    dirfd: RawFd,
+    path: &P,
+) -> Result<Vec<String>, Error> {
+    let mut files = vec![];
+
+    pbs_tools::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
+        if file_type != nix::dir::Type::File {
+            return Ok(());
+        }
+        files.push(filename.to_owned());
+        Ok(())
+    })?;
+
+    Ok(files)
+}
diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs
new file mode 100644 (file)
index 0000000..47f9cad
--- /dev/null
@@ -0,0 +1,317 @@
+use anyhow::{bail, format_err, Error};
+use std::convert::TryFrom;
+use std::path::Path;
+
+use serde_json::{json, Value};
+use ::serde::{Deserialize, Serialize};
+
+use crate::backup::{BackupDir, CryptMode, CryptConfig, Fingerprint};
+
+pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
+pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
+pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
+pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob";
+
+mod hex_csum {
+    use serde::{self, Deserialize, Serializer, Deserializer};
+
+    pub fn serialize<S>(
+        csum: &[u8; 32],
+        serializer: S,
+    ) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        let s = proxmox::tools::digest_to_hex(csum);
+        serializer.serialize_str(&s)
+    }
+
+    pub fn deserialize<'de, D>(
+        deserializer: D,
+    ) -> Result<[u8; 32], D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        let s = String::deserialize(deserializer)?;
+        proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
+    }
+}
+
+fn crypt_mode_none() -> CryptMode { CryptMode::None }
+fn empty_value() -> Value { json!({}) }
+
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all="kebab-case")]
+pub struct FileInfo {
+    pub filename: String,
+    #[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
+    pub crypt_mode: CryptMode,
+    pub size: u64,
+    #[serde(with = "hex_csum")]
+    pub csum: [u8; 32],
+}
+
+impl FileInfo {
+
+    /// Return expected CryptMode of referenced chunks
+    ///
+    /// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
+    /// should only reference plain chunks.
+    pub fn chunk_crypt_mode (&self) -> CryptMode {
+        match self.crypt_mode {
+            CryptMode::Encrypt => CryptMode::Encrypt,
+            CryptMode::SignOnly | CryptMode::None => CryptMode::None,
+        }
+    }
+}
+
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all="kebab-case")]
+pub struct BackupManifest {
+    backup_type: String,
+    backup_id: String,
+    backup_time: i64,
+    files: Vec<FileInfo>,
+    #[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
+    pub unprotected: Value,
+    pub signature: Option<String>,
+}
+
+#[derive(PartialEq)]
+pub enum ArchiveType {
+    FixedIndex,
+    DynamicIndex,
+    Blob,
+}
+
+pub fn archive_type<P: AsRef<Path>>(
+    archive_name: P,
+) -> Result<ArchiveType, Error> {
+
+    let archive_name = archive_name.as_ref();
+    let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) {
+        Some("didx") => ArchiveType::DynamicIndex,
+        Some("fidx") => ArchiveType::FixedIndex,
+        Some("blob") => ArchiveType::Blob,
+        _ => bail!("unknown archive type: {:?}", archive_name),
+    };
+    Ok(archive_type)
+}
+
+
+impl BackupManifest {
+
+    pub fn new(snapshot: BackupDir) -> Self {
+        Self {
+            backup_type: snapshot.group().backup_type().into(),
+            backup_id: snapshot.group().backup_id().into(),
+            backup_time: snapshot.backup_time(),
+            files: Vec::new(),
+            unprotected: json!({}),
+            signature: None,
+        }
+    }
+
+    pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
+        let _archive_type = archive_type(&filename)?; // check type
+        self.files.push(FileInfo { filename, size, csum, crypt_mode });
+        Ok(())
+    }
+
+    pub fn files(&self) -> &[FileInfo] {
+        &self.files[..]
+    }
+
+    pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
+
+        let info = self.files.iter().find(|item| item.filename == name);
+
+        match info {
+            None => bail!("manifest does not contain file '{}'", name),
+            Some(info) => Ok(info),
+        }
+    }
+
+    pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> {
+
+        let info = self.lookup_file_info(name)?;
+
+        if size != info.size {
+            bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
+        }
+
+        if csum != &info.csum {
+            bail!("wrong checksum for file '{}'", name);
+        }
+
+        Ok(())
+    }
+
+    // Generate canonical json
+    fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
+        crate::tools::json::to_canonical_json(value)
+    }
+
+    /// Compute manifest signature
+    ///
+    /// By generating a HMAC SHA256 over the canonical json
+    /// representation, The 'unpreotected' property is excluded.
+    pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
+        Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
+    }
+
+    fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
+
+        let mut signed_data = data.clone();
+
+        signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
+        signed_data.as_object_mut().unwrap().remove("signature"); // exclude
+
+        let canonical = Self::to_canonical_json(&signed_data)?;
+
+        let sig = crypt_config.compute_auth_tag(&canonical);
+
+        Ok(sig)
+    }
+
+    /// Converts the Manifest into json string, and add a signature if there is a crypt_config.
+    pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
+
+        let mut manifest = serde_json::to_value(&self)?;
+
+        if let Some(crypt_config) = crypt_config {
+            let sig = self.signature(crypt_config)?;
+            manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
+            let fingerprint = &crypt_config.fingerprint();
+            manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
+        }
+
+        let manifest = serde_json::to_string_pretty(&manifest).unwrap();
+        Ok(manifest)
+    }
+
+    pub fn fingerprint(&self) -> Result<Option<Fingerprint>, Error> {
+        match &self.unprotected["key-fingerprint"] {
+            Value::Null => Ok(None),
+            value => Ok(Some(serde_json::from_value(value.clone())?))
+        }
+    }
+
+    /// Checks if a BackupManifest and a CryptConfig share a valid fingerprint combination.
+    ///
+    /// An unsigned manifest is valid with any or no CryptConfig.
+    /// A signed manifest is only valid with a matching CryptConfig.
+    pub fn check_fingerprint(&self, crypt_config: Option<&CryptConfig>) -> Result<(), Error> {
+        if let Some(fingerprint) = self.fingerprint()? {
+            match crypt_config {
+                None => bail!(
+                    "missing key - manifest was created with key {}",
+                    fingerprint,
+                ),
+                Some(crypt_config) => {
+                    let config_fp = crypt_config.fingerprint();
+                    if config_fp != fingerprint {
+                        bail!(
+                            "wrong key - manifest's key {} does not match provided key {}",
+                            fingerprint,
+                            config_fp
+                        );
+                    }
+                }
+            }
+        };
+
+        Ok(())
+    }
+
+    /// Try to read the manifest. This verifies the signature if there is a crypt_config.
+    pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
+        let json: Value = serde_json::from_slice(data)?;
+        let signature = json["signature"].as_str().map(String::from);
+
+        if let Some(ref crypt_config) = crypt_config {
+            if let Some(signature) = signature {
+                let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
+
+                let fingerprint = &json["unprotected"]["key-fingerprint"];
+                if fingerprint != &Value::Null {
+                    let fingerprint = serde_json::from_value(fingerprint.clone())?;
+                    let config_fp = crypt_config.fingerprint();
+                    if config_fp != fingerprint {
+                        bail!(
+                            "wrong key - unable to verify signature since manifest's key {} does not match provided key {}",
+                            fingerprint,
+                            config_fp
+                        );
+                    }
+                }
+                if signature != expected_signature {
+                    bail!("wrong signature in manifest");
+                }
+            } else {
+                // not signed: warn/fail?
+            }
+        }
+
+        let manifest: BackupManifest = serde_json::from_value(json)?;
+        Ok(manifest)
+    }
+}
+
+
+impl TryFrom<super::DataBlob> for BackupManifest {
+    type Error = Error;
+
+    fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
+        // no expected digest available
+        let data = blob.decode(None, None)
+            .map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
+        let json: Value = serde_json::from_slice(&data[..])
+            .map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
+        let manifest: BackupManifest = serde_json::from_value(json)?;
+        Ok(manifest)
+    }
+}
+
+
+#[test]
+fn test_manifest_signature() -> Result<(), Error> {
+
+    use crate::backup::{KeyDerivationConfig};
+
+    let pw = b"test";
+
+    let kdf = KeyDerivationConfig::Scrypt {
+        n: 65536,
+        r: 8,
+        p: 1,
+        salt: Vec::new(),
+    };
+
+    let testkey = kdf.derive_key(pw)?;
+
+    let crypt_config = CryptConfig::new(testkey)?;
+
+    let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
+
+    let mut manifest = BackupManifest::new(snapshot);
+
+    manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
+    manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
+
+    manifest.unprotected["note"] = "This is not protected by the signature.".into();
+
+    let text = manifest.to_string(Some(&crypt_config))?;
+
+    let manifest: Value = serde_json::from_str(&text)?;
+    let signature = manifest["signature"].as_str().unwrap().to_string();
+
+    assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
+
+    let manifest: BackupManifest = serde_json::from_value(manifest)?;
+    let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
+
+    assert_eq!(signature, expected_signature);
+
+    Ok(())
+}
index 820d96181ddabda7328c26efd0dfbb4b0c869087..652d7bf430fb4a26edeb52fdeea34da801e043c8 100644 (file)
@@ -41,17 +41,6 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
     Ok(())
 });
 
-macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
-macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
-macro_rules! BACKUP_TIME_RE {
-    () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z")
-}
-macro_rules! SNAPSHOT_PATH_REGEX_STR {
-    () => (
-        concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
-    );
-}
-
 macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
 macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
 
@@ -101,18 +90,6 @@ const_regex!{
 
     pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
 
-    pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
-
-    pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
-
-    pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
-
-    pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
-
-    pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
-
-    pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
-
     pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
 
     pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
@@ -133,9 +110,6 @@ pub const IP_FORMAT: ApiStringFormat =
 pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
 
-pub const BACKUP_ID_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
-
 pub const UUID_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&UUID_REGEX);
 
diff --git a/src/backup/backup_info.rs b/src/backup/backup_info.rs
deleted file mode 100644 (file)
index 34bf26a..0000000
+++ /dev/null
@@ -1,394 +0,0 @@
-use std::os::unix::io::RawFd;
-use std::path::{Path, PathBuf};
-
-use anyhow::{bail, format_err, Error};
-
-use crate::api2::types::{
-    BACKUP_ID_REGEX,
-    BACKUP_TYPE_REGEX,
-    BACKUP_DATE_REGEX,
-    GROUP_PATH_REGEX,
-    SNAPSHOT_PATH_REGEX,
-    BACKUP_FILE_REGEX,
-};
-
-use super::manifest::MANIFEST_BLOB_NAME;
-
-/// BackupGroup is a directory containing a list of BackupDir
-#[derive(Debug, Eq, PartialEq, Hash, Clone)]
-pub struct BackupGroup {
-    /// Type of backup
-    backup_type: String,
-    /// Unique (for this type) ID
-    backup_id: String,
-}
-
-impl std::cmp::Ord for BackupGroup {
-    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
-        let type_order = self.backup_type.cmp(&other.backup_type);
-        if type_order != std::cmp::Ordering::Equal {
-            return type_order;
-        }
-        // try to compare IDs numerically
-        let id_self = self.backup_id.parse::<u64>();
-        let id_other = other.backup_id.parse::<u64>();
-        match (id_self, id_other) {
-            (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
-            (Ok(_), Err(_)) => std::cmp::Ordering::Less,
-            (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
-            _ => self.backup_id.cmp(&other.backup_id),
-        }
-    }
-}
-
-impl std::cmp::PartialOrd for BackupGroup {
-    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
-        Some(self.cmp(other))
-    }
-}
-
-impl BackupGroup {
-    pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
-        Self {
-            backup_type: backup_type.into(),
-            backup_id: backup_id.into(),
-        }
-    }
-
-    pub fn backup_type(&self) -> &str {
-        &self.backup_type
-    }
-
-    pub fn backup_id(&self) -> &str {
-        &self.backup_id
-    }
-
-    pub fn group_path(&self) -> PathBuf {
-        let mut relative_path = PathBuf::new();
-
-        relative_path.push(&self.backup_type);
-
-        relative_path.push(&self.backup_id);
-
-        relative_path
-    }
-
-    pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
-        let mut list = vec![];
-
-        let mut path = base_path.to_owned();
-        path.push(self.group_path());
-
-        pbs_tools::fs::scandir(
-            libc::AT_FDCWD,
-            &path,
-            &BACKUP_DATE_REGEX,
-            |l2_fd, backup_time, file_type| {
-                if file_type != nix::dir::Type::Directory {
-                    return Ok(());
-                }
-
-                let backup_dir =
-                    BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
-                let files = list_backup_files(l2_fd, backup_time)?;
-
-                list.push(BackupInfo { backup_dir, files });
-
-                Ok(())
-            },
-        )?;
-        Ok(list)
-    }
-
-    pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
-        let mut last = None;
-
-        let mut path = base_path.to_owned();
-        path.push(self.group_path());
-
-        pbs_tools::fs::scandir(
-            libc::AT_FDCWD,
-            &path,
-            &BACKUP_DATE_REGEX,
-            |l2_fd, backup_time, file_type| {
-                if file_type != nix::dir::Type::Directory {
-                    return Ok(());
-                }
-
-                let mut manifest_path = PathBuf::from(backup_time);
-                manifest_path.push(MANIFEST_BLOB_NAME);
-
-                use nix::fcntl::{openat, OFlag};
-                match openat(
-                    l2_fd,
-                    &manifest_path,
-                    OFlag::O_RDONLY,
-                    nix::sys::stat::Mode::empty(),
-                ) {
-                    Ok(rawfd) => {
-                        /* manifest exists --> assume backup was successful */
-                        /* close else this leaks! */
-                        nix::unistd::close(rawfd)?;
-                    }
-                    Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
-                        return Ok(());
-                    }
-                    Err(err) => {
-                        bail!("last_successful_backup: unexpected error - {}", err);
-                    }
-                }
-
-                let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
-                if let Some(last_timestamp) = last {
-                    if timestamp > last_timestamp {
-                        last = Some(timestamp);
-                    }
-                } else {
-                    last = Some(timestamp);
-                }
-
-                Ok(())
-            },
-        )?;
-
-        Ok(last)
-    }
-}
-
-impl std::fmt::Display for BackupGroup {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        let backup_type = self.backup_type();
-        let id = self.backup_id();
-        write!(f, "{}/{}", backup_type, id)
-    }
-}
-
-impl std::str::FromStr for BackupGroup {
-    type Err = Error;
-
-    /// Parse a backup group path
-    ///
-    /// This parses strings like `vm/100".
-    fn from_str(path: &str) -> Result<Self, Self::Err> {
-        let cap = GROUP_PATH_REGEX
-            .captures(path)
-            .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
-
-        Ok(Self {
-            backup_type: cap.get(1).unwrap().as_str().to_owned(),
-            backup_id: cap.get(2).unwrap().as_str().to_owned(),
-        })
-    }
-}
-
-/// Uniquely identify a Backup (relative to data store)
-///
-/// We also call this a backup snaphost.
-#[derive(Debug, Eq, PartialEq, Clone)]
-pub struct BackupDir {
-    /// Backup group
-    group: BackupGroup,
-    /// Backup timestamp
-    backup_time: i64,
-    // backup_time as rfc3339
-    backup_time_string: String,
-}
-
-impl BackupDir {
-    pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
-    where
-        T: Into<String>,
-        U: Into<String>,
-    {
-        let group = BackupGroup::new(backup_type.into(), backup_id.into());
-        BackupDir::with_group(group, backup_time)
-    }
-
-    pub fn with_rfc3339<T, U, V>(
-        backup_type: T,
-        backup_id: U,
-        backup_time_string: V,
-    ) -> Result<Self, Error>
-    where
-        T: Into<String>,
-        U: Into<String>,
-        V: Into<String>,
-    {
-        let backup_time_string = backup_time_string.into();
-        let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
-        let group = BackupGroup::new(backup_type.into(), backup_id.into());
-        Ok(Self {
-            group,
-            backup_time,
-            backup_time_string,
-        })
-    }
-
-    pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
-        let backup_time_string = Self::backup_time_to_string(backup_time)?;
-        Ok(Self {
-            group,
-            backup_time,
-            backup_time_string,
-        })
-    }
-
-    pub fn group(&self) -> &BackupGroup {
-        &self.group
-    }
-
-    pub fn backup_time(&self) -> i64 {
-        self.backup_time
-    }
-
-    pub fn backup_time_string(&self) -> &str {
-        &self.backup_time_string
-    }
-
-    pub fn relative_path(&self) -> PathBuf {
-        let mut relative_path = self.group.group_path();
-
-        relative_path.push(self.backup_time_string.clone());
-
-        relative_path
-    }
-
-    pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
-        // fixme: can this fail? (avoid unwrap)
-        proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
-    }
-}
-
-impl std::str::FromStr for BackupDir {
-    type Err = Error;
-
-    /// Parse a snapshot path
-    ///
-    /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
-    fn from_str(path: &str) -> Result<Self, Self::Err> {
-        let cap = SNAPSHOT_PATH_REGEX
-            .captures(path)
-            .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
-
-        BackupDir::with_rfc3339(
-            cap.get(1).unwrap().as_str(),
-            cap.get(2).unwrap().as_str(),
-            cap.get(3).unwrap().as_str(),
-        )
-    }
-}
-
-impl std::fmt::Display for BackupDir {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        let backup_type = self.group.backup_type();
-        let id = self.group.backup_id();
-        write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
-    }
-}
-
-/// Detailed Backup Information, lists files inside a BackupDir
-#[derive(Debug, Clone)]
-pub struct BackupInfo {
-    /// the backup directory
-    pub backup_dir: BackupDir,
-    /// List of data files
-    pub files: Vec<String>,
-}
-
-impl BackupInfo {
-    pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
-        let mut path = base_path.to_owned();
-        path.push(backup_dir.relative_path());
-
-        let files = list_backup_files(libc::AT_FDCWD, &path)?;
-
-        Ok(BackupInfo { backup_dir, files })
-    }
-
-    /// Finds the latest backup inside a backup group
-    pub fn last_backup(
-        base_path: &Path,
-        group: &BackupGroup,
-        only_finished: bool,
-    ) -> Result<Option<BackupInfo>, Error> {
-        let backups = group.list_backups(base_path)?;
-        Ok(backups
-            .into_iter()
-            .filter(|item| !only_finished || item.is_finished())
-            .max_by_key(|item| item.backup_dir.backup_time()))
-    }
-
-    pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
-        if ascendending {
-            // oldest first
-            list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
-        } else {
-            // newest first
-            list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
-        }
-    }
-
-    pub fn list_files(base_path: &Path, backup_dir: &BackupDir) -> Result<Vec<String>, Error> {
-        let mut path = base_path.to_owned();
-        path.push(backup_dir.relative_path());
-
-        let files = list_backup_files(libc::AT_FDCWD, &path)?;
-
-        Ok(files)
-    }
-
-    pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
-        let mut list = Vec::new();
-
-        pbs_tools::fs::scandir(
-            libc::AT_FDCWD,
-            base_path,
-            &BACKUP_TYPE_REGEX,
-            |l0_fd, backup_type, file_type| {
-                if file_type != nix::dir::Type::Directory {
-                    return Ok(());
-                }
-                pbs_tools::fs::scandir(
-                    l0_fd,
-                    backup_type,
-                    &BACKUP_ID_REGEX,
-                    |_, backup_id, file_type| {
-                        if file_type != nix::dir::Type::Directory {
-                            return Ok(());
-                        }
-
-                        list.push(BackupGroup::new(backup_type, backup_id));
-
-                        Ok(())
-                    },
-                )
-            },
-        )?;
-
-        Ok(list)
-    }
-
-    pub fn is_finished(&self) -> bool {
-        // backup is considered unfinished if there is no manifest
-        self.files
-            .iter()
-            .any(|name| name == super::MANIFEST_BLOB_NAME)
-    }
-}
-
-fn list_backup_files<P: ?Sized + nix::NixPath>(
-    dirfd: RawFd,
-    path: &P,
-) -> Result<Vec<String>, Error> {
-    let mut files = vec![];
-
-    pbs_tools::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
-        if file_type != nix::dir::Type::File {
-            return Ok(());
-        }
-        files.push(filename.to_owned());
-        Ok(())
-    })?;
-
-    Ok(files)
-}
diff --git a/src/backup/manifest.rs b/src/backup/manifest.rs
deleted file mode 100644 (file)
index 47f9cad..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-use anyhow::{bail, format_err, Error};
-use std::convert::TryFrom;
-use std::path::Path;
-
-use serde_json::{json, Value};
-use ::serde::{Deserialize, Serialize};
-
-use crate::backup::{BackupDir, CryptMode, CryptConfig, Fingerprint};
-
-pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
-pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
-pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
-pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob";
-
-mod hex_csum {
-    use serde::{self, Deserialize, Serializer, Deserializer};
-
-    pub fn serialize<S>(
-        csum: &[u8; 32],
-        serializer: S,
-    ) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        let s = proxmox::tools::digest_to_hex(csum);
-        serializer.serialize_str(&s)
-    }
-
-    pub fn deserialize<'de, D>(
-        deserializer: D,
-    ) -> Result<[u8; 32], D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        let s = String::deserialize(deserializer)?;
-        proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
-    }
-}
-
-fn crypt_mode_none() -> CryptMode { CryptMode::None }
-fn empty_value() -> Value { json!({}) }
-
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-pub struct FileInfo {
-    pub filename: String,
-    #[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
-    pub crypt_mode: CryptMode,
-    pub size: u64,
-    #[serde(with = "hex_csum")]
-    pub csum: [u8; 32],
-}
-
-impl FileInfo {
-
-    /// Return expected CryptMode of referenced chunks
-    ///
-    /// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
-    /// should only reference plain chunks.
-    pub fn chunk_crypt_mode (&self) -> CryptMode {
-        match self.crypt_mode {
-            CryptMode::Encrypt => CryptMode::Encrypt,
-            CryptMode::SignOnly | CryptMode::None => CryptMode::None,
-        }
-    }
-}
-
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-pub struct BackupManifest {
-    backup_type: String,
-    backup_id: String,
-    backup_time: i64,
-    files: Vec<FileInfo>,
-    #[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
-    pub unprotected: Value,
-    pub signature: Option<String>,
-}
-
-#[derive(PartialEq)]
-pub enum ArchiveType {
-    FixedIndex,
-    DynamicIndex,
-    Blob,
-}
-
-pub fn archive_type<P: AsRef<Path>>(
-    archive_name: P,
-) -> Result<ArchiveType, Error> {
-
-    let archive_name = archive_name.as_ref();
-    let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) {
-        Some("didx") => ArchiveType::DynamicIndex,
-        Some("fidx") => ArchiveType::FixedIndex,
-        Some("blob") => ArchiveType::Blob,
-        _ => bail!("unknown archive type: {:?}", archive_name),
-    };
-    Ok(archive_type)
-}
-
-
-impl BackupManifest {
-
-    pub fn new(snapshot: BackupDir) -> Self {
-        Self {
-            backup_type: snapshot.group().backup_type().into(),
-            backup_id: snapshot.group().backup_id().into(),
-            backup_time: snapshot.backup_time(),
-            files: Vec::new(),
-            unprotected: json!({}),
-            signature: None,
-        }
-    }
-
-    pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
-        let _archive_type = archive_type(&filename)?; // check type
-        self.files.push(FileInfo { filename, size, csum, crypt_mode });
-        Ok(())
-    }
-
-    pub fn files(&self) -> &[FileInfo] {
-        &self.files[..]
-    }
-
-    pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
-
-        let info = self.files.iter().find(|item| item.filename == name);
-
-        match info {
-            None => bail!("manifest does not contain file '{}'", name),
-            Some(info) => Ok(info),
-        }
-    }
-
-    pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> {
-
-        let info = self.lookup_file_info(name)?;
-
-        if size != info.size {
-            bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
-        }
-
-        if csum != &info.csum {
-            bail!("wrong checksum for file '{}'", name);
-        }
-
-        Ok(())
-    }
-
-    // Generate canonical json
-    fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
-        crate::tools::json::to_canonical_json(value)
-    }
-
-    /// Compute manifest signature
-    ///
-    /// By generating a HMAC SHA256 over the canonical json
-    /// representation, The 'unpreotected' property is excluded.
-    pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
-        Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
-    }
-
-    fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
-
-        let mut signed_data = data.clone();
-
-        signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
-        signed_data.as_object_mut().unwrap().remove("signature"); // exclude
-
-        let canonical = Self::to_canonical_json(&signed_data)?;
-
-        let sig = crypt_config.compute_auth_tag(&canonical);
-
-        Ok(sig)
-    }
-
-    /// Converts the Manifest into json string, and add a signature if there is a crypt_config.
-    pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
-
-        let mut manifest = serde_json::to_value(&self)?;
-
-        if let Some(crypt_config) = crypt_config {
-            let sig = self.signature(crypt_config)?;
-            manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
-            let fingerprint = &crypt_config.fingerprint();
-            manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
-        }
-
-        let manifest = serde_json::to_string_pretty(&manifest).unwrap();
-        Ok(manifest)
-    }
-
-    pub fn fingerprint(&self) -> Result<Option<Fingerprint>, Error> {
-        match &self.unprotected["key-fingerprint"] {
-            Value::Null => Ok(None),
-            value => Ok(Some(serde_json::from_value(value.clone())?))
-        }
-    }
-
-    /// Checks if a BackupManifest and a CryptConfig share a valid fingerprint combination.
-    ///
-    /// An unsigned manifest is valid with any or no CryptConfig.
-    /// A signed manifest is only valid with a matching CryptConfig.
-    pub fn check_fingerprint(&self, crypt_config: Option<&CryptConfig>) -> Result<(), Error> {
-        if let Some(fingerprint) = self.fingerprint()? {
-            match crypt_config {
-                None => bail!(
-                    "missing key - manifest was created with key {}",
-                    fingerprint,
-                ),
-                Some(crypt_config) => {
-                    let config_fp = crypt_config.fingerprint();
-                    if config_fp != fingerprint {
-                        bail!(
-                            "wrong key - manifest's key {} does not match provided key {}",
-                            fingerprint,
-                            config_fp
-                        );
-                    }
-                }
-            }
-        };
-
-        Ok(())
-    }
-
-    /// Try to read the manifest. This verifies the signature if there is a crypt_config.
-    pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
-        let json: Value = serde_json::from_slice(data)?;
-        let signature = json["signature"].as_str().map(String::from);
-
-        if let Some(ref crypt_config) = crypt_config {
-            if let Some(signature) = signature {
-                let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
-
-                let fingerprint = &json["unprotected"]["key-fingerprint"];
-                if fingerprint != &Value::Null {
-                    let fingerprint = serde_json::from_value(fingerprint.clone())?;
-                    let config_fp = crypt_config.fingerprint();
-                    if config_fp != fingerprint {
-                        bail!(
-                            "wrong key - unable to verify signature since manifest's key {} does not match provided key {}",
-                            fingerprint,
-                            config_fp
-                        );
-                    }
-                }
-                if signature != expected_signature {
-                    bail!("wrong signature in manifest");
-                }
-            } else {
-                // not signed: warn/fail?
-            }
-        }
-
-        let manifest: BackupManifest = serde_json::from_value(json)?;
-        Ok(manifest)
-    }
-}
-
-
-impl TryFrom<super::DataBlob> for BackupManifest {
-    type Error = Error;
-
-    fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
-        // no expected digest available
-        let data = blob.decode(None, None)
-            .map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
-        let json: Value = serde_json::from_slice(&data[..])
-            .map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
-        let manifest: BackupManifest = serde_json::from_value(json)?;
-        Ok(manifest)
-    }
-}
-
-
-#[test]
-fn test_manifest_signature() -> Result<(), Error> {
-
-    use crate::backup::{KeyDerivationConfig};
-
-    let pw = b"test";
-
-    let kdf = KeyDerivationConfig::Scrypt {
-        n: 65536,
-        r: 8,
-        p: 1,
-        salt: Vec::new(),
-    };
-
-    let testkey = kdf.derive_key(pw)?;
-
-    let crypt_config = CryptConfig::new(testkey)?;
-
-    let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
-
-    let mut manifest = BackupManifest::new(snapshot);
-
-    manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
-    manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
-
-    manifest.unprotected["note"] = "This is not protected by the signature.".into();
-
-    let text = manifest.to_string(Some(&crypt_config))?;
-
-    let manifest: Value = serde_json::from_str(&text)?;
-    let signature = manifest["signature"].as_str().unwrap().to_string();
-
-    assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
-
-    let manifest: BackupManifest = serde_json::from_value(manifest)?;
-    let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
-
-    assert_eq!(signature, expected_signature);
-
-    Ok(())
-}