]> git.proxmox.com Git - proxmox-backup.git/commitdiff
api-types: introduce BackupType enum and Group/Dir api types
authorWolfgang Bumiller <w.bumiller@proxmox.com>
Thu, 14 Apr 2022 13:05:58 +0000 (15:05 +0200)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Fri, 15 Apr 2022 11:12:46 +0000 (13:12 +0200)
The type is a real enum.

All are API types and implement Display and FromStr. The
ordering is the same as it is in pbs-datastore.

Also, they are now flattened into a few structs instead of
being copied manually.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
23 files changed:
examples/download-speed.rs
examples/upload-speed.rs
pbs-api-types/src/datastore.rs
pbs-api-types/src/tape/mod.rs
pbs-client/src/backup_reader.rs
pbs-client/src/backup_writer.rs
pbs-client/src/tools/mod.rs
pbs-datastore/src/backup_info.rs
pbs-datastore/src/datastore.rs
pbs-datastore/src/manifest.rs
proxmox-backup-client/src/benchmark.rs
proxmox-backup-client/src/catalog.rs
proxmox-backup-client/src/main.rs
proxmox-backup-client/src/mount.rs
proxmox-backup-client/src/snapshot.rs
src/api2/admin/datastore.rs
src/api2/backup/mod.rs
src/api2/reader/mod.rs
src/api2/tape/media.rs
src/backup/verify.rs
src/bin/proxmox-backup-manager.rs
src/bin/proxmox-tape.rs
src/server/pull.rs

index a685df9db46b86de2199c5909e4972c5fabca286..dbd778af29db62e3b54e6e93f7b3cf92f70ad4dd 100644 (file)
@@ -2,7 +2,7 @@ use std::io::Write;
 
 use anyhow::Error;
 
-use pbs_api_types::Authid;
+use pbs_api_types::{Authid, BackupType};
 use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
 
 pub struct DummyWriter {
@@ -33,8 +33,16 @@ async fn run() -> Result<(), Error> {
 
     let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
 
-    let client =
-        BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true).await?;
+    let client = BackupReader::start(
+        client,
+        None,
+        "store2",
+        BackupType::Host,
+        "elsa",
+        backup_time,
+        true,
+    )
+    .await?;
 
     let start = std::time::SystemTime::now();
 
index 7d1fb16be749742f40dbd881723ef28a1762c17a..bfd01799487191dd72e9129a1db287ad036ef56b 100644 (file)
@@ -1,6 +1,6 @@
 use anyhow::Error;
 
-use pbs_api_types::Authid;
+use pbs_api_types::{Authid, BackupType};
 use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
 
 async fn upload_speed() -> Result<f64, Error> {
@@ -21,7 +21,7 @@ async fn upload_speed() -> Result<f64, Error> {
         client,
         None,
         datastore,
-        "host",
+        BackupType::Host,
         "speedtest",
         backup_time,
         false,
index 01e2319a6545d939a001f6644af196ef609de812..92579f61ea49124fb3a9ed8eab767ea8d5d6dfdf 100644 (file)
@@ -1,3 +1,6 @@
+use std::fmt;
+
+use anyhow::{bail, format_err, Error};
 use serde::{Deserialize, Serialize};
 
 use proxmox_schema::{
@@ -394,17 +397,244 @@ pub struct SnapshotVerifyState {
     pub state: VerifyState,
 }
 
+#[api]
+/// Backup types.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
+#[serde(rename_all = "lowercase")]
+pub enum BackupType {
+    /// Virtual machines.
+    Vm,
+
+    /// Containers.
+    Ct,
+
+    /// "Host" backups.
+    Host,
+}
+
+impl BackupType {
+    pub const fn as_str(&self) -> &'static str {
+        match self {
+            BackupType::Vm => "vm",
+            BackupType::Ct => "ct",
+            BackupType::Host => "host",
+        }
+    }
+
+    /// We used to have alphabetical ordering here when this was a string.
+    const fn order(self) -> u8 {
+        match self {
+            BackupType::Ct => 0,
+            BackupType::Host => 1,
+            BackupType::Vm => 2,
+        }
+    }
+}
+
+impl fmt::Display for BackupType {
+    #[inline]
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self.as_str(), f)
+    }
+}
+
+impl std::str::FromStr for BackupType {
+    type Err = Error;
+
+    /// Parse a backup type.
+    fn from_str(ty: &str) -> Result<Self, Error> {
+        Ok(match ty {
+            "ct" => BackupType::Ct,
+            "host" => BackupType::Host,
+            "vm" => BackupType::Vm,
+            _ => bail!("invalid backup type {ty:?}"),
+        })
+    }
+}
+
+impl std::cmp::Ord for BackupType {
+    #[inline]
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.order().cmp(&other.order())
+    }
+}
+
+impl std::cmp::PartialOrd for BackupType {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
 #[api(
     properties: {
-        "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
-        },
-        "backup-id": {
-            schema: BACKUP_ID_SCHEMA,
-        },
-        "backup-time": {
-            schema: BACKUP_TIME_SCHEMA,
-        },
+        "backup-type": { type: BackupType },
+        "backup-id": { schema: BACKUP_ID_SCHEMA },
+    },
+)]
+#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// A backup group (without a data store).
+pub struct BackupGroup {
+    /// Backup type.
+    #[serde(rename = "backup-type")]
+    pub ty: BackupType,
+
+    /// Backup id.
+    #[serde(rename = "backup-id")]
+    pub id: String,
+}
+
+impl BackupGroup {
+    pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
+        Self { ty, id: id.into() }
+    }
+}
+
+impl From<(BackupType, String)> for BackupGroup {
+    fn from(data: (BackupType, String)) -> Self {
+        Self {
+            ty: data.0,
+            id: data.1,
+        }
+    }
+}
+
+impl std::cmp::Ord for BackupGroup {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        let type_order = self.ty.cmp(&other.ty);
+        if type_order != std::cmp::Ordering::Equal {
+            return type_order;
+        }
+        // try to compare IDs numerically
+        let id_self = self.id.parse::<u64>();
+        let id_other = other.id.parse::<u64>();
+        match (id_self, id_other) {
+            (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
+            (Ok(_), Err(_)) => std::cmp::Ordering::Less,
+            (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
+            _ => self.id.cmp(&other.id),
+        }
+    }
+}
+
+impl std::cmp::PartialOrd for BackupGroup {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl fmt::Display for BackupGroup {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}/{}", self.ty, self.id)
+    }
+}
+
+impl std::str::FromStr for BackupGroup {
+    type Err = Error;
+
+    /// Parse a backup group.
+    ///
+    /// This parses strings like `vm/100".
+    fn from_str(path: &str) -> Result<Self, Error> {
+        let cap = GROUP_PATH_REGEX
+            .captures(path)
+            .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
+
+        Ok(Self {
+            ty: cap.get(1).unwrap().as_str().parse()?,
+            id: cap.get(2).unwrap().as_str().to_owned(),
+        })
+    }
+}
+
+#[api(
+    properties: {
+        "group": { type: BackupGroup },
+        "backup-time": { schema: BACKUP_TIME_SCHEMA },
+    },
+)]
+/// Uniquely identify a Backup (relative to data store)
+///
+/// We also call this a backup snaphost.
+#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
+#[serde(rename_all = "kebab-case")]
+pub struct BackupDir {
+    /// Backup group.
+    #[serde(flatten)]
+    pub group: BackupGroup,
+
+    /// Backup timestamp unix epoch.
+    #[serde(rename = "backup-time")]
+    pub time: i64,
+}
+
+impl From<(BackupGroup, i64)> for BackupDir {
+    fn from(data: (BackupGroup, i64)) -> Self {
+        Self {
+            group: data.0,
+            time: data.1,
+        }
+    }
+}
+
+impl From<(BackupType, String, i64)> for BackupDir {
+    fn from(data: (BackupType, String, i64)) -> Self {
+        Self {
+            group: (data.0, data.1).into(),
+            time: data.2,
+        }
+    }
+}
+
+impl BackupDir {
+    pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
+    where
+        T: Into<String>,
+    {
+        let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
+        let group = BackupGroup::new(ty, id.into());
+        Ok(Self { group, time })
+    }
+
+    pub fn ty(&self) -> BackupType {
+        self.group.ty
+    }
+
+    pub fn id(&self) -> &str {
+        &self.group.id
+    }
+}
+
+impl std::str::FromStr for BackupDir {
+    type Err = Error;
+
+    /// Parse a snapshot path.
+    ///
+    /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
+    fn from_str(path: &str) -> Result<Self, Self::Err> {
+        let cap = SNAPSHOT_PATH_REGEX
+            .captures(path)
+            .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
+
+        BackupDir::with_rfc3339(
+            cap.get(1).unwrap().as_str().parse()?,
+            cap.get(2).unwrap().as_str(),
+            cap.get(3).unwrap().as_str(),
+        )
+    }
+}
+
+impl std::fmt::Display for BackupDir {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // FIXME: log error?
+        let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
+        write!(f, "{}/{}", self.group, time)
+    }
+}
+
+#[api(
+    properties: {
+        "backup": { type: BackupDir },
         comment: {
             schema: SINGLE_LINE_COMMENT_SCHEMA,
             optional: true,
@@ -432,9 +662,8 @@ pub struct SnapshotVerifyState {
 #[serde(rename_all = "kebab-case")]
 /// Basic information about backup snapshot.
 pub struct SnapshotListItem {
-    pub backup_type: String, // enum
-    pub backup_id: String,
-    pub backup_time: i64,
+    #[serde(flatten)]
+    pub backup: BackupDir,
     /// The first line from manifest "notes"
     #[serde(skip_serializing_if = "Option::is_none")]
     pub comment: Option<String>,
@@ -459,15 +688,8 @@ pub struct SnapshotListItem {
 
 #[api(
     properties: {
-        "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
-        },
-        "backup-id": {
-            schema: BACKUP_ID_SCHEMA,
-        },
-        "last-backup": {
-            schema: BACKUP_TIME_SCHEMA,
-        },
+        "backup": { type: BackupGroup },
+        "last-backup": { schema: BACKUP_TIME_SCHEMA },
         "backup-count": {
             type: Integer,
         },
@@ -486,8 +708,9 @@ pub struct SnapshotListItem {
 #[serde(rename_all = "kebab-case")]
 /// Basic information about a backup group.
 pub struct GroupListItem {
-    pub backup_type: String, // enum
-    pub backup_id: String,
+    #[serde(flatten)]
+    pub backup: BackupGroup,
+
     pub last_backup: i64,
     /// Number of contained snapshots
     pub backup_count: u64,
@@ -503,24 +726,16 @@ pub struct GroupListItem {
 
 #[api(
     properties: {
-        "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
-        },
-        "backup-id": {
-            schema: BACKUP_ID_SCHEMA,
-        },
-        "backup-time": {
-            schema: BACKUP_TIME_SCHEMA,
-        },
+        "backup": { type: BackupDir },
     },
 )]
 #[derive(Serialize, Deserialize)]
 #[serde(rename_all = "kebab-case")]
 /// Prune result.
 pub struct PruneListItem {
-    pub backup_type: String, // enum
-    pub backup_id: String,
-    pub backup_time: i64,
+    #[serde(flatten)]
+    pub backup: BackupDir,
+
     /// Keep snapshot
     pub keep: bool,
 }
index c90ebd0eff95798ea6644f5ad9fff19ceecbc28b..0b60eefa11262ff99c5ff70918d3194607f0ac0e 100644 (file)
@@ -27,7 +27,7 @@ use serde::{Deserialize, Serialize};
 use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
 use proxmox_uuid::Uuid;
 
-use crate::{BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, FINGERPRINT_SHA256_FORMAT};
+use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT};
 
 const_regex! {
     pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
@@ -66,7 +66,7 @@ pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
             optional: true,
         },
         "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
+            type: BackupType,
             optional: true,
         },
         "backup-id": {
@@ -83,6 +83,6 @@ pub struct MediaContentListFilter {
     pub label_text: Option<String>,
     pub media: Option<Uuid>,
     pub media_set: Option<Uuid>,
-    pub backup_type: Option<String>,
+    pub backup_type: Option<BackupType>,
     pub backup_id: Option<String>,
 }
index 6a53a25592720ed9c59c4d1cd9db1be1b3b64fed..99195492f27ca44c611f1c0089e9f0593dfab498 100644 (file)
@@ -7,6 +7,7 @@ use std::sync::Arc;
 use futures::future::AbortHandle;
 use serde_json::{json, Value};
 
+use pbs_api_types::BackupType;
 use pbs_datastore::data_blob::DataBlob;
 use pbs_datastore::data_blob_reader::DataBlobReader;
 use pbs_datastore::dynamic_index::DynamicIndexReader;
@@ -46,7 +47,7 @@ impl BackupReader {
         client: HttpClient,
         crypt_config: Option<Arc<CryptConfig>>,
         datastore: &str,
-        backup_type: &str,
+        backup_type: BackupType,
         backup_id: &str,
         backup_time: i64,
         debug: bool,
index dc2b876717938bd1e4a1056cb3dc370964985184..17f7bdad2cf0bc342f978b0a27abf861fede080d 100644 (file)
@@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
 use tokio::sync::{mpsc, oneshot};
 use tokio_stream::wrappers::ReceiverStream;
 
-use pbs_api_types::HumanByte;
+use pbs_api_types::{BackupType, HumanByte};
 use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
 use pbs_datastore::dynamic_index::DynamicIndexReader;
 use pbs_datastore::fixed_index::FixedIndexReader;
@@ -86,7 +86,7 @@ impl BackupWriter {
         client: HttpClient,
         crypt_config: Option<Arc<CryptConfig>>,
         datastore: &str,
-        backup_type: &str,
+        backup_type: BackupType,
         backup_id: &str,
         backup_time: i64,
         debug: bool,
index 70ed1addd229b40c6232e72792f5b9c45bd3b5a1..495b1c9f087632273a353b3a0fcfaa450ea6fc3c 100644 (file)
@@ -265,6 +265,13 @@ pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec
                 item["backup-type"].as_str(),
                 item["backup-time"].as_i64(),
             ) {
+                let backup_type = match backup_type.parse() {
+                    Ok(ty) => ty,
+                    Err(_) => {
+                        // FIXME: print error in completion?
+                        continue;
+                    }
+                };
                 if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
                     result.push(snapshot.relative_path().to_str().unwrap().to_owned());
                 }
index af6424879aba47177fd0c042617f2325d8b4916e..c5f4b72dba5e630cf9b9e32fb6cd7930ea576069 100644 (file)
@@ -5,7 +5,8 @@ use std::str::FromStr;
 use anyhow::{bail, format_err, Error};
 
 use pbs_api_types::{
-    GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX, SNAPSHOT_PATH_REGEX,
+    BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX,
+    SNAPSHOT_PATH_REGEX,
 };
 
 use super::manifest::MANIFEST_BLOB_NAME;
@@ -14,7 +15,7 @@ use super::manifest::MANIFEST_BLOB_NAME;
 #[derive(Debug, Eq, PartialEq, Hash, Clone)]
 pub struct BackupGroup {
     /// Type of backup
-    backup_type: String,
+    backup_type: BackupType,
     /// Unique (for this type) ID
     backup_id: String,
 }
@@ -44,15 +45,15 @@ impl std::cmp::PartialOrd for BackupGroup {
 }
 
 impl BackupGroup {
-    pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
+    pub fn new<T: Into<String>>(backup_type: BackupType, backup_id: T) -> Self {
         Self {
-            backup_type: backup_type.into(),
+            backup_type,
             backup_id: backup_id.into(),
         }
     }
 
-    pub fn backup_type(&self) -> &str {
-        &self.backup_type
+    pub fn backup_type(&self) -> BackupType {
+        self.backup_type
     }
 
     pub fn backup_id(&self) -> &str {
@@ -62,7 +63,7 @@ impl BackupGroup {
     pub fn group_path(&self) -> PathBuf {
         let mut relative_path = PathBuf::new();
 
-        relative_path.push(&self.backup_type);
+        relative_path.push(self.backup_type.as_str());
 
         relative_path.push(&self.backup_id);
 
@@ -85,7 +86,7 @@ impl BackupGroup {
                 }
 
                 let backup_dir =
-                    BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
+                    BackupDir::with_rfc3339(self.backup_type, &self.backup_id, backup_time)?;
                 let files = list_backup_files(l2_fd, backup_time)?;
 
                 let protected = backup_dir.is_protected(base_path.to_owned());
@@ -162,12 +163,24 @@ impl BackupGroup {
                 Ok(group) => &group == self,
                 Err(_) => false, // shouldn't happen if value is schema-checked
             },
-            GroupFilter::BackupType(backup_type) => self.backup_type() == backup_type,
+            GroupFilter::BackupType(backup_type) => self.backup_type().as_str() == backup_type,
             GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
         }
     }
 }
 
+impl From<&BackupGroup> for pbs_api_types::BackupGroup {
+    fn from(group: &BackupGroup) -> pbs_api_types::BackupGroup {
+        (group.backup_type, group.backup_id.clone()).into()
+    }
+}
+
+impl From<BackupGroup> for pbs_api_types::BackupGroup {
+    fn from(group: BackupGroup) -> pbs_api_types::BackupGroup {
+        (group.backup_type, group.backup_id).into()
+    }
+}
+
 impl std::fmt::Display for BackupGroup {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         let backup_type = self.backup_type();
@@ -188,7 +201,7 @@ impl std::str::FromStr for BackupGroup {
             .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
 
         Ok(Self {
-            backup_type: cap.get(1).unwrap().as_str().to_owned(),
+            backup_type: cap.get(1).unwrap().as_str().parse()?,
             backup_id: cap.get(2).unwrap().as_str().to_owned(),
         })
     }
@@ -208,28 +221,26 @@ pub struct BackupDir {
 }
 
 impl BackupDir {
-    pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
+    pub fn new<T>(backup_type: BackupType, backup_id: T, backup_time: i64) -> Result<Self, Error>
     where
         T: Into<String>,
-        U: Into<String>,
     {
-        let group = BackupGroup::new(backup_type.into(), backup_id.into());
+        let group = BackupGroup::new(backup_type, backup_id.into());
         BackupDir::with_group(group, backup_time)
     }
 
-    pub fn with_rfc3339<T, U, V>(
-        backup_type: T,
-        backup_id: U,
-        backup_time_string: V,
+    pub fn with_rfc3339<T, U>(
+        backup_type: BackupType,
+        backup_id: T,
+        backup_time_string: U,
     ) -> Result<Self, Error>
     where
         T: Into<String>,
         U: Into<String>,
-        V: Into<String>,
     {
         let backup_time_string = backup_time_string.into();
         let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
-        let group = BackupGroup::new(backup_type.into(), backup_id.into());
+        let group = BackupGroup::new(backup_type, backup_id.into());
         Ok(Self {
             group,
             backup_time,
@@ -283,6 +294,22 @@ impl BackupDir {
     }
 }
 
+impl From<&BackupDir> for pbs_api_types::BackupDir {
+    fn from(dir: &BackupDir) -> pbs_api_types::BackupDir {
+        (
+            pbs_api_types::BackupGroup::from(dir.group.clone()),
+            dir.backup_time,
+        )
+            .into()
+    }
+}
+
+impl From<BackupDir> for pbs_api_types::BackupDir {
+    fn from(dir: BackupDir) -> pbs_api_types::BackupDir {
+        (pbs_api_types::BackupGroup::from(dir.group), dir.backup_time).into()
+    }
+}
+
 impl std::str::FromStr for BackupDir {
     type Err = Error;
 
@@ -295,7 +322,7 @@ impl std::str::FromStr for BackupDir {
             .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
 
         BackupDir::with_rfc3339(
-            cap.get(1).unwrap().as_str(),
+            cap.get(1).unwrap().as_str().parse()?,
             cap.get(2).unwrap().as_str(),
             cap.get(3).unwrap().as_str(),
         )
index 7f935ad8003f23a9e3a76808b34898b822f0afd8..64f873696f207669efc4cce1d2975a0227ddaaef 100644 (file)
@@ -18,8 +18,8 @@ use proxmox_sys::WorkerTaskContext;
 use proxmox_sys::{task_log, task_warn};
 
 use pbs_api_types::{
-    Authid, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus, HumanByte,
-    Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, BACKUP_TYPE_REGEX, UPID,
+    Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
+    HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
 };
 use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
 
@@ -494,7 +494,7 @@ impl DataStore {
     ) -> Result<(Authid, DirLockGuard), Error> {
         // create intermediate path first:
         let mut full_path = self.base_path();
-        full_path.push(backup_group.backup_type());
+        full_path.push(backup_group.backup_type().as_str());
         std::fs::create_dir_all(&full_path)?;
 
         full_path.push(backup_group.backup_id());
@@ -1113,7 +1113,7 @@ impl Iterator for ListSnapshots {
 /// A iterator for a (single) level of Backup Groups
 pub struct ListGroups {
     type_fd: proxmox_sys::fs::ReadDir,
-    id_state: Option<(String, proxmox_sys::fs::ReadDir)>,
+    id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
 }
 
 impl ListGroups {
@@ -1130,7 +1130,7 @@ impl Iterator for ListGroups {
 
     fn next(&mut self) -> Option<Self::Item> {
         loop {
-            if let Some((ref group_type, ref mut id_fd)) = self.id_state {
+            if let Some((group_type, ref mut id_fd)) = self.id_state {
                 let item = match id_fd.next() {
                     Some(item) => item,
                     None => {
@@ -1162,7 +1162,7 @@ impl Iterator for ListGroups {
                                 Some(nix::dir::Type::Directory) => {} // OK
                                 _ => continue,
                             }
-                            if BACKUP_TYPE_REGEX.is_match(name) {
+                            if let Ok(group_type) = BackupType::from_str(name) {
                                 // found a backup group type, descend into it to scan all IDs in it
                                 // by switching to the id-state branch
                                 let base_fd = entry.parent_fd();
@@ -1170,7 +1170,7 @@ impl Iterator for ListGroups {
                                     Ok(dirfd) => dirfd,
                                     Err(err) => return Some(Err(err.into())),
                                 };
-                                self.id_state = Some((name.to_owned(), id_dirfd));
+                                self.id_state = Some((group_type, id_dirfd));
                             }
                         }
                         continue; // file did not match regex or isn't valid utf-8
index e05925e4bab618a1eb81b9b437e38fe3a9998adb..c2d06485fca034cc48beabe8dd4864a3beb33e9d 100644 (file)
@@ -6,7 +6,7 @@ use anyhow::{bail, format_err, Error};
 use serde::{Deserialize, Serialize};
 use serde_json::{json, Value};
 
-use pbs_api_types::{CryptMode, Fingerprint};
+use pbs_api_types::{BackupType, CryptMode, Fingerprint};
 use pbs_tools::crypt_config::CryptConfig;
 
 use crate::BackupDir;
@@ -50,7 +50,7 @@ impl FileInfo {
 #[derive(Serialize, Deserialize)]
 #[serde(rename_all = "kebab-case")]
 pub struct BackupManifest {
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     files: Vec<FileInfo>,
@@ -87,7 +87,7 @@ pub fn archive_type<P: AsRef<Path>>(archive_name: P) -> Result<ArchiveType, Erro
 impl BackupManifest {
     pub fn new(snapshot: BackupDir) -> Self {
         Self {
-            backup_type: snapshot.group().backup_type().into(),
+            backup_type: snapshot.group().backup_type(),
             backup_id: snapshot.group().backup_id().into(),
             backup_time: snapshot.backup_time(),
             files: Vec::new(),
index cd59893f131a719381cd6029aef06a07dccd763c..bc853b5462fc1c9f38338edcd8620c6df6bd191a 100644 (file)
@@ -14,6 +14,7 @@ use proxmox_router::{
 };
 use proxmox_schema::{api, ApiType, ReturnType};
 
+use pbs_api_types::BackupType;
 use pbs_client::tools::key_source::get_encryption_key_password;
 use pbs_client::{BackupRepository, BackupWriter};
 use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
@@ -241,7 +242,7 @@ async fn test_upload_speed(
         client,
         crypt_config.clone(),
         repo.store(),
-        "host",
+        BackupType::Host,
         "benchmark",
         backup_time,
         false,
index c4c43f5e877be763606ff2c2ea53638ccf33633e..ba16877e1bcdd8f8e092e28d905f1b94434bd9ee 100644 (file)
@@ -190,7 +190,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
         client,
         crypt_config.clone(),
         repo.store(),
-        &backup_type,
+        backup_type,
         &backup_id,
         backup_time,
         true,
index 7c022fadf2c6f180d0419687f00e59e6678d9de4..e4a0969aa404182bf1f8eac9fe9bb23885632082 100644 (file)
@@ -22,9 +22,10 @@ use proxmox_time::{epoch_i64, strftime_local};
 use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
 
 use pbs_api_types::{
-    Authid, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions,
-    RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
-    BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
+    Authid, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem,
+    PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA,
+    BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
+    TRAFFIC_CONTROL_RATE_SCHEMA,
 };
 use pbs_client::catalog_shell::Shell;
 use pbs_client::tools::{
@@ -135,7 +136,7 @@ async fn api_datastore_list_snapshots(
 
     let mut args = json!({});
     if let Some(group) = group {
-        args["backup-type"] = group.backup_type().into();
+        args["backup-type"] = group.backup_type().to_string().into();
         args["backup-id"] = group.backup_id().into();
     }
 
@@ -148,7 +149,7 @@ pub async fn api_datastore_latest_snapshot(
     client: &HttpClient,
     store: &str,
     group: BackupGroup,
-) -> Result<(String, String, i64), Error> {
+) -> Result<(BackupType, String, i64), Error> {
     let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
     let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
 
@@ -159,9 +160,9 @@ pub async fn api_datastore_latest_snapshot(
         );
     }
 
-    list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
+    list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
 
-    let backup_time = list[0].backup_time;
+    let backup_time = list[0].backup.time;
 
     Ok((
         group.backup_type().to_owned(),
@@ -261,13 +262,13 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
 
     let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
         let item: GroupListItem = serde_json::from_value(record.to_owned())?;
-        let group = BackupGroup::new(item.backup_type, item.backup_id);
+        let group = BackupGroup::new(item.backup.ty, item.backup.id);
         Ok(group.group_path().to_str().unwrap().to_owned())
     };
 
     let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
         let item: GroupListItem = serde_json::from_value(record.to_owned())?;
-        let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
+        let snapshot = BackupDir::new(item.backup.ty, item.backup.id, item.last_backup)?;
         Ok(snapshot.relative_path().to_str().unwrap().to_owned())
     };
 
@@ -329,7 +330,7 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
 
     let group: BackupGroup = group.parse()?;
 
-    param["backup-type"] = group.backup_type().into();
+    param["backup-type"] = group.backup_type().to_string().into();
     param["backup-id"] = group.backup_id().into();
 
     let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
@@ -659,7 +660,7 @@ async fn create_backup(
         .as_str()
         .unwrap_or(proxmox_sys::nodename());
 
-    let backup_type = param["backup-type"].as_str().unwrap_or("host");
+    let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
 
     let include_dev = param["include-dev"].as_array();
 
@@ -1221,7 +1222,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
         client,
         crypt_config.clone(),
         repo.store(),
-        &backup_type,
+        backup_type,
         &backup_id,
         backup_time,
         true,
@@ -1414,7 +1415,7 @@ async fn prune(
     if let Some(dry_run) = dry_run {
         api_param["dry-run"] = dry_run.into();
     }
-    api_param["backup-type"] = group.backup_type().into();
+    api_param["backup-type"] = group.backup_type().to_string().into();
     api_param["backup-id"] = group.backup_id().into();
 
     let mut result = client.post(&path, Some(api_param)).await?;
@@ -1423,7 +1424,7 @@ async fn prune(
 
     let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
         let item: PruneListItem = serde_json::from_value(record.to_owned())?;
-        let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+        let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
         Ok(snapshot.relative_path().to_str().unwrap().to_owned())
     };
 
index c54dcbe8ef602b2b760d63c815a6f8f70d885bc6..237441794453052c3e7cdf3a517372742192b40e 100644 (file)
@@ -240,7 +240,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
         client,
         crypt_config.clone(),
         repo.store(),
-        &backup_type,
+        backup_type,
         &backup_id,
         backup_time,
         true,
index 868e54e2c1f96b20abc35beb71ce40de91dfd790..f327ba3ab357bd1ffbf8f65c142041ed97849632 100644 (file)
@@ -59,7 +59,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
 
     let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
         let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
-        let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+        let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
         Ok(snapshot.relative_path().to_str().unwrap().to_owned())
     };
 
index 5fe75324ac2c656ebd6dad9450a7fd6535cf8779..0a1583be2d2d3bf812bd9100beed5c17860083e9 100644 (file)
@@ -30,7 +30,7 @@ use pxar::accessor::aio::Accessor;
 use pxar::EntryKind;
 
 use pbs_api_types::{
-    Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
+    Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
     GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
     SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
     BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
@@ -207,8 +207,7 @@ pub fn list_groups(
             let comment = file_read_firstline(&note_path).ok();
 
             group_info.push(GroupListItem {
-                backup_type: group.backup_type().to_string(),
-                backup_id: group.backup_id().to_string(),
+                backup: group.into(),
                 last_backup: last_backup.backup_dir.backup_time(),
                 owner: Some(owner),
                 backup_count,
@@ -223,15 +222,9 @@ pub fn list_groups(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
         },
     },
     access: {
@@ -244,7 +237,7 @@ pub fn list_groups(
 /// Delete backup group including all snapshots.
 pub fn delete_group(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
@@ -266,18 +259,10 @@ pub fn delete_group(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
         },
     },
     returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
@@ -291,7 +276,7 @@ pub fn delete_group(
 /// List snapshot files.
 pub fn list_snapshot_files(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     _info: &ApiMethod,
@@ -319,18 +304,10 @@ pub fn list_snapshot_files(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
         },
     },
     access: {
@@ -343,7 +320,7 @@ pub fn list_snapshot_files(
 /// Delete backup snapshot.
 pub fn delete_snapshot(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     _info: &ApiMethod,
@@ -370,12 +347,10 @@ pub fn delete_snapshot(
     streaming: true,
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
             "backup-type": {
                 optional: true,
-                schema: BACKUP_TYPE_SCHEMA,
+                type: BackupType,
             },
             "backup-id": {
                 optional: true,
@@ -394,7 +369,7 @@ pub fn delete_snapshot(
 /// List backup snapshots.
 pub fn list_snapshots(
     store: String,
-    backup_type: Option<String>,
+    backup_type: Option<BackupType>,
     backup_id: Option<String>,
     _param: Value,
     _info: &ApiMethod,
@@ -424,9 +399,10 @@ pub fn list_snapshots(
     };
 
     let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
-        let backup_type = group.backup_type().to_string();
-        let backup_id = group.backup_id().to_string();
-        let backup_time = info.backup_dir.backup_time();
+        let backup = pbs_api_types::BackupDir {
+            group: group.into(),
+            time: info.backup_dir.backup_time(),
+        };
         let protected = info.backup_dir.is_protected(datastore.base_path());
 
         match get_all_snapshot_files(&datastore, &info) {
@@ -458,9 +434,7 @@ pub fn list_snapshots(
                 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
 
                 SnapshotListItem {
-                    backup_type,
-                    backup_id,
-                    backup_time,
+                    backup,
                     comment,
                     verification,
                     fingerprint,
@@ -483,9 +457,7 @@ pub fn list_snapshots(
                     .collect();
 
                 SnapshotListItem {
-                    backup_type,
-                    backup_id,
-                    backup_time,
+                    backup,
                     comment: None,
                     verification: None,
                     fingerprint: None,
@@ -550,10 +522,9 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
             // only include groups with snapshots, counting/displaying emtpy groups can confuse
             if snapshot_count > 0 {
                 let type_count = match group.backup_type() {
-                    "ct" => counts.ct.get_or_insert(Default::default()),
-                    "vm" => counts.vm.get_or_insert(Default::default()),
-                    "host" => counts.host.get_or_insert(Default::default()),
-                    _ => counts.other.get_or_insert(Default::default()),
+                    BackupType::Ct => counts.ct.get_or_insert(Default::default()),
+                    BackupType::Vm => counts.vm.get_or_insert(Default::default()),
+                    BackupType::Host => counts.host.get_or_insert(Default::default()),
                 };
 
                 type_count.groups += 1;
@@ -630,7 +601,7 @@ pub fn status(
                 schema: DATASTORE_SCHEMA,
             },
             "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
+                type: BackupType,
                 optional: true,
             },
             "backup-id": {
@@ -664,7 +635,7 @@ pub fn status(
 /// or all backups in the datastore.
 pub fn verify(
     store: String,
-    backup_type: Option<String>,
+    backup_type: Option<BackupType>,
     backup_id: Option<String>,
     backup_time: Option<i64>,
     ignore_verified: Option<bool>,
@@ -771,12 +742,8 @@ pub fn verify(
 #[api(
     input: {
         properties: {
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-type": { type: BackupType },
             "dry-run": {
                 optional: true,
                 type: bool,
@@ -800,7 +767,7 @@ pub fn verify(
 /// Prune a group on the datastore
 pub fn prune(
     backup_id: String,
-    backup_type: String,
+    backup_type: BackupType,
     dry_run: bool,
     prune_options: PruneOptions,
     store: String,
@@ -809,13 +776,13 @@ pub fn prune(
 ) -> Result<Value, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let group = BackupGroup::new(&backup_type, &backup_id);
+    let group = BackupGroup::new(backup_type, &backup_id);
 
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
     check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
 
-    let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
+    let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
 
     let mut prune_result = Vec::new();
 
@@ -1111,7 +1078,7 @@ pub fn download_file(
 
         let file_name = required_string_param(&param, "file-name")?.to_owned();
 
-        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
         let backup_id = required_string_param(&param, "backup-id")?;
         let backup_time = required_integer_param(&param, "backup-time")?;
 
@@ -1194,7 +1161,7 @@ pub fn download_file_decoded(
 
         let file_name = required_string_param(&param, "file-name")?.to_owned();
 
-        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
         let backup_id = required_string_param(&param, "backup-id")?;
         let backup_time = required_integer_param(&param, "backup-time")?;
 
@@ -1320,7 +1287,7 @@ pub fn upload_backup_log(
 
         let file_name = CLIENT_LOG_BLOB_NAME;
 
-        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
         let backup_id = required_string_param(&param, "backup-id")?;
         let backup_time = required_integer_param(&param, "backup-time")?;
 
@@ -1369,18 +1336,10 @@ pub fn upload_backup_log(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
             "filepath": {
                 description: "Base64 encoded path.",
                 type: String,
@@ -1394,7 +1353,7 @@ pub fn upload_backup_log(
 /// Get the entries of the given path of the catalog
 pub fn catalog(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     filepath: String,
@@ -1481,7 +1440,7 @@ pub fn pxar_file_download(
 
         let filepath = required_string_param(&param, "filepath")?.to_owned();
 
-        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
         let backup_id = required_string_param(&param, "backup-id")?;
         let backup_time = required_integer_param(&param, "backup-time")?;
 
@@ -1659,15 +1618,9 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
         },
     },
     access: {
@@ -1677,7 +1630,7 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
 /// Get "notes" for a backup group
 pub fn get_group_notes(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
@@ -1695,15 +1648,9 @@ pub fn get_group_notes(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
             notes: {
                 description: "A multiline text.",
             },
@@ -1718,7 +1665,7 @@ pub fn get_group_notes(
 /// Set "notes" for a backup group
 pub fn set_group_notes(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     notes: String,
     rpcenv: &mut dyn RpcEnvironment,
@@ -1739,18 +1686,10 @@ pub fn set_group_notes(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
         },
     },
     access: {
@@ -1760,7 +1699,7 @@ pub fn set_group_notes(
 /// Get "notes" for a specific backup
 pub fn get_notes(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     rpcenv: &mut dyn RpcEnvironment,
@@ -1787,18 +1726,10 @@ pub fn get_notes(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
             notes: {
                 description: "A multiline text.",
             },
@@ -1813,7 +1744,7 @@ pub fn get_notes(
 /// Set "notes" for a specific backup
 pub fn set_notes(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     notes: String,
@@ -1843,18 +1774,10 @@ pub fn set_notes(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
         },
     },
     access: {
@@ -1864,7 +1787,7 @@ pub fn set_notes(
 /// Query protection for a specific backup
 pub fn get_protection(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     rpcenv: &mut dyn RpcEnvironment,
@@ -1887,18 +1810,10 @@ pub fn get_protection(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
-            "backup-time": {
-                schema: BACKUP_TIME_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            "backup-time": { schema: BACKUP_TIME_SCHEMA },
             protected: {
                 description: "Enable/disable protection.",
             },
@@ -1913,7 +1828,7 @@ pub fn get_protection(
 /// En- or disable protection for a specific backup
 pub fn set_protection(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     backup_time: i64,
     protected: bool,
@@ -1937,15 +1852,9 @@ pub fn set_protection(
 #[api(
     input: {
         properties: {
-            store: {
-                schema: DATASTORE_SCHEMA,
-            },
-            "backup-type": {
-                schema: BACKUP_TYPE_SCHEMA,
-            },
-            "backup-id": {
-                schema: BACKUP_ID_SCHEMA,
-            },
+            store: { schema: DATASTORE_SCHEMA },
+            "backup-type": { type: BackupType },
+            "backup-id": { schema: BACKUP_ID_SCHEMA },
             "new-owner": {
                 type: Authid,
             },
@@ -1959,7 +1868,7 @@ pub fn set_protection(
 /// Change owner of a backup group
 pub fn set_backup_owner(
     store: String,
-    backup_type: String,
+    backup_type: BackupType,
     backup_id: String,
     new_owner: Authid,
     rpcenv: &mut dyn RpcEnvironment,
index 718d0386766c197640d02db05f0d33b60cd95bd1..febe79d1d32f7e46dc5e67937ea1a7d2a18ffd7a 100644 (file)
@@ -16,7 +16,7 @@ use proxmox_schema::*;
 use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
+    Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
     BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
     DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
 };
@@ -82,7 +82,7 @@ fn upgrade_to_backup_protocol(
 
         let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
-        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
         let backup_id = required_string_param(&param, "backup-id")?;
         let backup_time = required_integer_param(&param, "backup-time")?;
 
@@ -109,7 +109,7 @@ fn upgrade_to_backup_protocol(
 
         let backup_group = BackupGroup::new(backup_type, backup_id);
 
-        let worker_type = if backup_type == "host" && backup_id == "benchmark" {
+        let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
             if !benchmark {
                 bail!("unable to run benchmark without --benchmark flags");
             }
index 20d629b5f81ab545e2be87578b783276d002e247..22e0ae7e6940998ac856faa21e49bb3c8ff1ab17 100644 (file)
@@ -16,9 +16,9 @@ use proxmox_schema::{BooleanSchema, ObjectSchema};
 use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
-    BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
-    PRIV_DATASTORE_READ,
+    Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
+    BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
+    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
 };
 use pbs_config::CachedUserInfo;
 use pbs_datastore::backup_info::BackupDir;
@@ -90,7 +90,7 @@ fn upgrade_to_backup_reader_protocol(
 
         let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
-        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
         let backup_id = required_string_param(&param, "backup-id")?;
         let backup_time = required_integer_param(&param, "backup-time")?;
 
index ea27dd61cc40a6a5121583129e747200f90464e2..4fe3a365b83dc779df16d7dfbceaad9d4efdec76 100644 (file)
@@ -441,7 +441,7 @@ pub fn list_content(
         for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
             let backup_dir: BackupDir = snapshot.parse()?;
 
-            if let Some(ref backup_type) = filter.backup_type {
+            if let Some(backup_type) = filter.backup_type {
                 if backup_dir.group().backup_type() != backup_type {
                     continue;
                 }
index e9067536510f6dc74b7f5d4f19da27a5bcf72bbf..fc4c0cf404fcd03a62c3428a54b8ef1893e9dd5c 100644 (file)
@@ -8,7 +8,7 @@ use anyhow::{bail, format_err, Error};
 
 use proxmox_sys::{task_log, WorkerTaskContext};
 
-use pbs_api_types::{Authid, CryptMode, SnapshotVerifyState, VerifyState, UPID};
+use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID};
 use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
@@ -539,7 +539,9 @@ pub fn verify_all_backups(
 
     let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
         Ok(list) => list
-            .filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
+            .filter(|group| {
+                !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
+            })
             .filter(filter_by_owner)
             .collect::<Vec<BackupGroup>>(),
         Err(err) => {
index ad700feeb6ef5bf41606b27010fe679e073a8aef..abb7954a56802454ebfec73e544fc71b2cb77b13 100644 (file)
@@ -523,7 +523,7 @@ pub fn complete_remote_datastore_group(_arg: &str, param: &HashMap<String, Strin
                 .await
         }) {
             for item in data {
-                list.push(format!("{}/{}", item.backup_type, item.backup_id));
+                list.push(format!("{}/{}", item.backup.ty, item.backup.id));
             }
         }
     }
index bf9791f1e26dd074ade47b2cbf8edd098e64d167..6fa7c0f2de3a3337ae94823d441045f8298b4dde 100644 (file)
@@ -66,7 +66,7 @@ pub fn complete_datastore_group_filter(_arg: &str, param: &HashMap<String, Strin
             list.extend(
                 groups
                     .iter()
-                    .map(|group| format!("group:{}/{}", group.backup_type, group.backup_id)),
+                    .map(|group| format!("group:{}/{}", group.backup.ty, group.backup.id)),
             );
         }
     }
index 34755016a48e4531dc760b46e7efca3d577e24aa..5503bfebf48135c6d574a8da259f1a6ca3c40332 100644 (file)
@@ -577,7 +577,7 @@ pub async fn pull_group(
     let mut result = client.get(&path, Some(args)).await?;
     let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
 
-    list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
+    list.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time));
 
     client.login().await?; // make sure auth is complete
 
@@ -599,7 +599,7 @@ pub async fn pull_group(
     };
 
     for (pos, item) in list.into_iter().enumerate() {
-        let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+        let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
 
         // in-progress backups can't be synced
         if item.size.is_none() {
@@ -712,9 +712,9 @@ pub async fn pull_store(
 
     let total_count = list.len();
     list.sort_unstable_by(|a, b| {
-        let type_order = a.backup_type.cmp(&b.backup_type);
+        let type_order = a.backup.ty.cmp(&b.backup.ty);
         if type_order == std::cmp::Ordering::Equal {
-            a.backup_id.cmp(&b.backup_id)
+            a.backup.id.cmp(&b.backup.id)
         } else {
             type_order
         }
@@ -726,7 +726,7 @@ pub async fn pull_store(
 
     let list: Vec<BackupGroup> = list
         .into_iter()
-        .map(|item| BackupGroup::new(item.backup_type, item.backup_id))
+        .map(|item| BackupGroup::new(item.backup.ty, item.backup.id))
         .collect();
 
     let list = if let Some(ref group_filter) = &params.group_filter {