]> git.proxmox.com Git - proxmox-backup.git/commitdiff
api-types: add namespace to BackupGroup
authorWolfgang Bumiller <w.bumiller@proxmox.com>
Thu, 21 Apr 2022 13:04:59 +0000 (15:04 +0200)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Thu, 12 May 2022 07:33:50 +0000 (09:33 +0200)
Make it easier by adding an helper accepting either group or
directory

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
22 files changed:
examples/download-speed.rs
examples/upload-speed.rs
pbs-api-types/src/datastore.rs
pbs-api-types/src/lib.rs
pbs-client/src/backup_reader.rs
pbs-client/src/backup_writer.rs
pbs-client/src/tools/mod.rs
pbs-datastore/examples/ls-snapshots.rs
pbs-datastore/src/backup_info.rs
pbs-datastore/src/datastore.rs
proxmox-backup-client/src/benchmark.rs
proxmox-backup-client/src/catalog.rs
proxmox-backup-client/src/main.rs
proxmox-backup-client/src/mount.rs
proxmox-file-restore/src/main.rs
src/api2/admin/datastore.rs
src/api2/backup/mod.rs
src/api2/reader/mod.rs
src/api2/tape/backup.rs
src/backup/verify.rs
src/server/prune_job.rs
src/server/pull.rs

index dbd778af29db62e3b54e6e93f7b3cf92f70ad4dd..d17d5d45f684f7b97e040c9c9c042cc2e849c2cd 100644 (file)
@@ -2,7 +2,7 @@ use std::io::Write;
 
 use anyhow::Error;
 
-use pbs_api_types::{Authid, BackupType};
+use pbs_api_types::{Authid, BackupNamespace, BackupType};
 use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
 
 pub struct DummyWriter {
@@ -37,9 +37,13 @@ async fn run() -> Result<(), Error> {
         client,
         None,
         "store2",
-        BackupType::Host,
-        "elsa",
-        backup_time,
+        &(
+            BackupNamespace::root(),
+            BackupType::Host,
+            "elsa".to_string(),
+            backup_time,
+        )
+            .into(),
         true,
     )
     .await?;
index bfd01799487191dd72e9129a1db287ad036ef56b..26385816806be40e45a70409abc4a8a7e2bd9ded 100644 (file)
@@ -1,6 +1,6 @@
 use anyhow::Error;
 
-use pbs_api_types::{Authid, BackupType};
+use pbs_api_types::{Authid, BackupNamespace, BackupType};
 use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
 
 async fn upload_speed() -> Result<f64, Error> {
@@ -21,9 +21,13 @@ async fn upload_speed() -> Result<f64, Error> {
         client,
         None,
         datastore,
-        BackupType::Host,
-        "speedtest",
-        backup_time,
+        &(
+            BackupNamespace::root(),
+            BackupType::Host,
+            "speedtest".to_string(),
+            backup_time,
+        )
+            .into(),
         false,
         true,
     )
index 8c7ebad026ca566804d7662f12def648e0a6dd73..b2ef001b865b560b053e4ec39a35df69de0b91a4 100644 (file)
@@ -1,5 +1,5 @@
 use std::fmt;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
 
 use anyhow::{bail, format_err, Error};
 use serde::{Deserialize, Serialize};
@@ -16,19 +16,24 @@ use crate::{
 };
 
 const_regex! {
+    pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
+
     pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
 
     pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
 
     pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
 
-    pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
+    pub GROUP_PATH_REGEX = concat!(
+        r"^(", BACKUP_NS_PATH_RE!(), r")?",
+        r"(", BACKUP_TYPE_RE!(), ")/",
+        r"(", BACKUP_ID_RE!(), r")$",
+    );
 
     pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
 
     pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
-
-    pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
+    pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$");
 
     pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
 }
@@ -640,7 +645,7 @@ impl BackupNamespace {
 
     /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every
     /// component.
-    fn display_as_path(&self) -> BackupNamespacePath {
+    pub fn display_as_path(&self) -> BackupNamespacePath {
         BackupNamespacePath(self)
     }
 
@@ -775,6 +780,7 @@ impl std::cmp::PartialOrd for BackupType {
 
 #[api(
     properties: {
+        "backup-ns": { type: BackupNamespace },
         "backup-type": { type: BackupType },
         "backup-id": { schema: BACKUP_ID_SCHEMA },
     },
@@ -783,6 +789,14 @@ impl std::cmp::PartialOrd for BackupType {
 #[serde(rename_all = "kebab-case")]
 /// A backup group (without a data store).
 pub struct BackupGroup {
+    /// An optional namespace this backup belongs to.
+    #[serde(
+        rename = "backup-ns",
+        skip_serializing_if = "BackupNamespace::is_root",
+        default
+    )]
+    pub ns: BackupNamespace,
+
     /// Backup type.
     #[serde(rename = "backup-type")]
     pub ty: BackupType,
@@ -793,8 +807,12 @@ pub struct BackupGroup {
 }
 
 impl BackupGroup {
-    pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
-        Self { ty, id: id.into() }
+    pub fn new<T: Into<String>>(ns: BackupNamespace, ty: BackupType, id: T) -> Self {
+        Self {
+            ns,
+            ty,
+            id: id.into(),
+        }
     }
 
     pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
@@ -820,21 +838,29 @@ impl AsRef<BackupGroup> for BackupGroup {
     }
 }
 
-impl From<(BackupType, String)> for BackupGroup {
-    fn from(data: (BackupType, String)) -> Self {
+impl From<(BackupNamespace, BackupType, String)> for BackupGroup {
+    #[inline]
+    fn from(data: (BackupNamespace, BackupType, String)) -> Self {
         Self {
-            ty: data.0,
-            id: data.1,
+            ns: data.0,
+            ty: data.1,
+            id: data.2,
         }
     }
 }
 
 impl std::cmp::Ord for BackupGroup {
     fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        let ns_order = self.ns.cmp(&other.ns);
+        if ns_order != std::cmp::Ordering::Equal {
+            return ns_order;
+        }
+
         let type_order = self.ty.cmp(&other.ty);
         if type_order != std::cmp::Ordering::Equal {
             return type_order;
         }
+
         // try to compare IDs numerically
         let id_self = self.id.parse::<u64>();
         let id_other = other.id.parse::<u64>();
@@ -855,7 +881,11 @@ impl std::cmp::PartialOrd for BackupGroup {
 
 impl fmt::Display for BackupGroup {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "{}/{}", self.ty, self.id)
+        if self.ns.is_root() {
+            write!(f, "{}/{}", self.ty, self.id)
+        } else {
+            write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id)
+        }
     }
 }
 
@@ -871,8 +901,9 @@ impl std::str::FromStr for BackupGroup {
             .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
 
         Ok(Self {
-            ty: cap.get(1).unwrap().as_str().parse()?,
-            id: cap.get(2).unwrap().as_str().to_owned(),
+            ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?,
+            ty: cap.get(2).unwrap().as_str().parse()?,
+            id: cap.get(3).unwrap().as_str().to_owned(),
         })
     }
 }
@@ -921,32 +952,44 @@ impl From<(BackupGroup, i64)> for BackupDir {
     }
 }
 
-impl From<(BackupType, String, i64)> for BackupDir {
-    fn from(data: (BackupType, String, i64)) -> Self {
+impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir {
+    fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self {
         Self {
-            group: (data.0, data.1).into(),
-            time: data.2,
+            group: (data.0, data.1, data.2).into(),
+            time: data.3,
         }
     }
 }
 
 impl BackupDir {
-    pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
+    pub fn with_rfc3339<T>(
+        ns: BackupNamespace,
+        ty: BackupType,
+        id: T,
+        backup_time_string: &str,
+    ) -> Result<Self, Error>
     where
         T: Into<String>,
     {
         let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
-        let group = BackupGroup::new(ty, id.into());
+        let group = BackupGroup::new(ns, ty, id.into());
         Ok(Self { group, time })
     }
 
+    #[inline]
     pub fn ty(&self) -> BackupType {
         self.group.ty
     }
 
+    #[inline]
     pub fn id(&self) -> &str {
         &self.group.id
     }
+
+    #[inline]
+    pub fn ns(&self) -> &BackupNamespace {
+        &self.group.ns
+    }
 }
 
 impl std::str::FromStr for BackupDir {
@@ -960,22 +1003,56 @@ impl std::str::FromStr for BackupDir {
             .captures(path)
             .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
 
+        let ns = match cap.get(1) {
+            Some(cap) => BackupNamespace::from_path(cap.as_str())?,
+            None => BackupNamespace::root(),
+        };
         BackupDir::with_rfc3339(
-            cap.get(1).unwrap().as_str().parse()?,
-            cap.get(2).unwrap().as_str(),
+            ns,
+            cap.get(2).unwrap().as_str().parse()?,
             cap.get(3).unwrap().as_str(),
+            cap.get(4).unwrap().as_str(),
         )
     }
 }
 
-impl std::fmt::Display for BackupDir {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl fmt::Display for BackupDir {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         // FIXME: log error?
         let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
         write!(f, "{}/{}", self.group, time)
     }
 }
 
+/// Used when both a backup group or a directory can be valid.
+pub enum BackupPart {
+    Group(BackupGroup),
+    Dir(BackupDir),
+}
+
+impl std::str::FromStr for BackupPart {
+    type Err = Error;
+
+    /// Parse a path which can be either a backup group or a snapshot dir.
+    fn from_str(path: &str) -> Result<Self, Error> {
+        let cap = GROUP_OR_SNAPSHOT_PATH_REGEX
+            .captures(path)
+            .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
+
+        let ns = match cap.get(1) {
+            Some(cap) => BackupNamespace::from_path(cap.as_str())?,
+            None => BackupNamespace::root(),
+        };
+        let ty = cap.get(2).unwrap().as_str().parse()?;
+        let id = cap.get(3).unwrap().as_str().to_string();
+
+        Ok(match cap.get(4) {
+            Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?),
+            None => BackupPart::Group((ns, ty, id).into()),
+        })
+    }
+}
+
 #[api(
     properties: {
         "backup": { type: BackupDir },
index 459a01f5a2d6f3a937b57e4341eb2e079e722544..4f40a27f28cddd53783b2c150f0123cd46cd35fa 100644 (file)
@@ -34,14 +34,32 @@ macro_rules! BACKUP_NS_RE {
     );
 }
 
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! BACKUP_NS_PATH_RE {
+    () => (
+        concat!(r"(:?ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!())
+    );
+}
+
 #[rustfmt::skip]
 #[macro_export]
 macro_rules! SNAPSHOT_PATH_REGEX_STR {
     () => (
-        concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
+        concat!(
+            r"(", BACKUP_NS_PATH_RE!(), ")?",
+            r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
+        )
     );
 }
 
+#[macro_export]
+macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
+    () => {
+        concat!(SNAPSHOT_PATH_REGEX_STR!(), "?")
+    };
+}
+
 mod acl;
 pub use acl::*;
 
index 99195492f27ca44c611f1c0089e9f0593dfab498..fb3df2a9c6db6a7e1322b0fa43bd8faf79d4d962 100644 (file)
@@ -7,7 +7,7 @@ use std::sync::Arc;
 use futures::future::AbortHandle;
 use serde_json::{json, Value};
 
-use pbs_api_types::BackupType;
+use pbs_api_types::BackupDir;
 use pbs_datastore::data_blob::DataBlob;
 use pbs_datastore::data_blob_reader::DataBlobReader;
 use pbs_datastore::dynamic_index::DynamicIndexReader;
@@ -47,15 +47,14 @@ impl BackupReader {
         client: HttpClient,
         crypt_config: Option<Arc<CryptConfig>>,
         datastore: &str,
-        backup_type: BackupType,
-        backup_id: &str,
-        backup_time: i64,
+        backup: &BackupDir,
         debug: bool,
     ) -> Result<Arc<BackupReader>, Error> {
         let param = json!({
-            "backup-type": backup_type,
-            "backup-id": backup_id,
-            "backup-time": backup_time,
+            "backup-ns": backup.ns(),
+            "backup-type": backup.ty(),
+            "backup-id": backup.id(),
+            "backup-time": backup.time,
             "store": datastore,
             "debug": debug,
         });
index 17f7bdad2cf0bc342f978b0a27abf861fede080d..60b21a80eb33bb0251361859a21a3ea549a6eb44 100644 (file)
@@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
 use tokio::sync::{mpsc, oneshot};
 use tokio_stream::wrappers::ReceiverStream;
 
-use pbs_api_types::{BackupType, HumanByte};
+use pbs_api_types::{BackupDir, HumanByte};
 use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
 use pbs_datastore::dynamic_index::DynamicIndexReader;
 use pbs_datastore::fixed_index::FixedIndexReader;
@@ -86,16 +86,15 @@ impl BackupWriter {
         client: HttpClient,
         crypt_config: Option<Arc<CryptConfig>>,
         datastore: &str,
-        backup_type: BackupType,
-        backup_id: &str,
-        backup_time: i64,
+        backup: &BackupDir,
         debug: bool,
         benchmark: bool,
     ) -> Result<Arc<BackupWriter>, Error> {
         let param = json!({
-            "backup-type": backup_type,
-            "backup-id": backup_id,
-            "backup-time": backup_time,
+            "backup-ns": backup.ns(),
+            "backup-type": backup.ty(),
+            "backup-id": backup.id(),
+            "backup-time": backup.time,
             "store": datastore,
             "debug": debug,
             "benchmark": benchmark
index 60239ee411946917f5248f82fa2e5e3ba120298a..afe74849826ac2258aded15e69f3793c596a14d3 100644 (file)
@@ -293,6 +293,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
     };
 
     let query = json_object_to_query(json!({
+        "backup-ns": snapshot.group.ns,
         "backup-type": snapshot.group.ty,
         "backup-id": snapshot.group.id,
         "backup-time": snapshot.time,
index d87d4484dc2f1485868e669b4e0d630a706c098d..7b4445b294db2e55e935c71fec14f96f278e9fa1 100644 (file)
@@ -12,7 +12,7 @@ fn run() -> Result<(), Error> {
 
     let store = unsafe { DataStore::open_path("", &base, None)? };
 
-    for group in store.iter_backup_groups()? {
+    for group in store.iter_backup_groups(Default::default())? {
         let group = group?;
         println!("found group {}", group);
 
index 30275b222fbc54b1d54c85d903da6f7d695f36b2..94ff1717755e50ada1a6aee952e8c71d2f418964 100644 (file)
@@ -217,11 +217,10 @@ impl From<BackupGroup> for pbs_api_types::BackupGroup {
     }
 }
 
-impl std::fmt::Display for BackupGroup {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        let backup_type = self.backup_type();
-        let id = self.backup_id();
-        write!(f, "{}/{}", backup_type, id)
+impl fmt::Display for BackupGroup {
+    #[inline]
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(&self.group, f)
     }
 }
 
@@ -446,8 +445,8 @@ impl From<BackupDir> for pbs_api_types::BackupDir {
     }
 }
 
-impl std::fmt::Display for BackupDir {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl fmt::Display for BackupDir {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "{}/{}", self.dir.group, self.backup_time_string)
     }
 }
index 6df23c52952fefbecefa79d5d1661afe754b75e1..8a9f16b82433b6a5de03ed385c964e9cd3b8b095 100644 (file)
@@ -17,8 +17,8 @@ use proxmox_sys::WorkerTaskContext;
 use proxmox_sys::{task_log, task_warn};
 
 use pbs_api_types::{
-    Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
-    HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
+    Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning,
+    GarbageCollectionStatus, HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
 };
 use pbs_config::ConfigVersionCache;
 
@@ -348,6 +348,16 @@ impl DataStore {
         self.inner.chunk_store.base_path()
     }
 
+    pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
+        let mut path = self.base_path();
+        path.reserve(ns.path_len());
+        for part in ns.components() {
+            path.push("ns");
+            path.push(part);
+        }
+        path
+    }
+
     /// Cleanup a backup directory
     ///
     /// Removes all files not mentioned in the manifest.
@@ -517,6 +527,10 @@ impl DataStore {
     ) -> Result<(Authid, DirLockGuard), Error> {
         // create intermediate path first:
         let mut full_path = self.base_path();
+        for ns in backup_group.ns.components() {
+            full_path.push("ns");
+            full_path.push(ns);
+        }
         full_path.push(backup_group.ty.as_str());
         std::fs::create_dir_all(&full_path)?;
 
@@ -579,8 +593,11 @@ impl DataStore {
     ///
     /// The iterated item is still a Result that can contain errors from rather unexptected FS or
     /// parsing errors.
-    pub fn iter_backup_groups(self: &Arc<DataStore>) -> Result<ListGroups, Error> {
-        ListGroups::new(Arc::clone(self))
+    pub fn iter_backup_groups(
+        self: &Arc<DataStore>,
+        ns: BackupNamespace,
+    ) -> Result<ListGroups, Error> {
+        ListGroups::new(Arc::clone(self), ns)
     }
 
     /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
@@ -589,10 +606,11 @@ impl DataStore {
     /// logged. Can be useful in iterator chain commands
     pub fn iter_backup_groups_ok(
         self: &Arc<DataStore>,
+        ns: BackupNamespace,
     ) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
         let this = Arc::clone(self);
         Ok(
-            ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group {
+            ListGroups::new(Arc::clone(&self), ns)?.filter_map(move |group| match group {
                 Ok(group) => Some(group),
                 Err(err) => {
                     log::error!("list groups error on datastore {} - {}", this.name(), err);
@@ -605,8 +623,11 @@ impl DataStore {
     /// Get a in-memory vector for all top-level backup groups of a datatstore
     ///
     /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
-    pub fn list_backup_groups(self: &Arc<DataStore>) -> Result<Vec<BackupGroup>, Error> {
-        ListGroups::new(Arc::clone(self))?.collect()
+    pub fn list_backup_groups(
+        self: &Arc<DataStore>,
+        ns: BackupNamespace,
+    ) -> Result<Vec<BackupGroup>, Error> {
+        ListGroups::new(Arc::clone(self), ns)?.collect()
     }
 
     pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
@@ -1047,11 +1068,16 @@ impl DataStore {
     }
 
     /// Open a backup group from this datastore.
-    pub fn backup_group_from_parts<T>(self: &Arc<Self>, ty: BackupType, id: T) -> BackupGroup
+    pub fn backup_group_from_parts<T>(
+        self: &Arc<Self>,
+        ns: BackupNamespace,
+        ty: BackupType,
+        id: T,
+    ) -> BackupGroup
     where
         T: Into<String>,
     {
-        self.backup_group((ty, id.into()).into())
+        self.backup_group((ns, ty, id.into()).into())
     }
 
     /// Open a backup group from this datastore by backup group path such as `vm/100`.
@@ -1069,6 +1095,7 @@ impl DataStore {
     /// Open a snapshot (backup directory) from this datastore.
     pub fn backup_dir_from_parts<T>(
         self: &Arc<Self>,
+        ns: BackupNamespace,
         ty: BackupType,
         id: T,
         time: i64,
@@ -1076,7 +1103,7 @@ impl DataStore {
     where
         T: Into<String>,
     {
-        self.backup_dir((ty, id.into(), time).into())
+        self.backup_dir((ns, ty, id.into(), time).into())
     }
 
     /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
@@ -1143,15 +1170,19 @@ impl Iterator for ListSnapshots {
 /// A iterator for a (single) level of Backup Groups
 pub struct ListGroups {
     store: Arc<DataStore>,
+    ns: BackupNamespace,
     type_fd: proxmox_sys::fs::ReadDir,
     id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
 }
 
 impl ListGroups {
-    pub fn new(store: Arc<DataStore>) -> Result<Self, Error> {
+    pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+        let mut base_path = store.base_path().to_owned();
+        base_path.push(ns.path());
         Ok(ListGroups {
-            type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?,
+            type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?,
             store,
+            ns,
             id_state: None,
         })
     }
@@ -1183,7 +1214,7 @@ impl Iterator for ListGroups {
                     if BACKUP_ID_REGEX.is_match(name) {
                         return Some(Ok(BackupGroup::new(
                             Arc::clone(&self.store),
-                            (group_type, name.to_owned()).into(),
+                            (self.ns.clone(), group_type, name.to_owned()).into(),
                         )));
                     }
                 }
index bc853b5462fc1c9f38338edcd8620c6df6bd191a..f03d0d87fe6be490c6d6a4d343e8806c7803eaf9 100644 (file)
@@ -14,7 +14,7 @@ use proxmox_router::{
 };
 use proxmox_schema::{api, ApiType, ReturnType};
 
-use pbs_api_types::BackupType;
+use pbs_api_types::{BackupNamespace, BackupType};
 use pbs_client::tools::key_source::get_encryption_key_password;
 use pbs_client::{BackupRepository, BackupWriter};
 use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
@@ -242,9 +242,13 @@ async fn test_upload_speed(
         client,
         crypt_config.clone(),
         repo.store(),
-        BackupType::Host,
-        "benchmark",
-        backup_time,
+        &(
+            BackupNamespace::root(),
+            BackupType::Host,
+            "benchmark".to_string(),
+            backup_time,
+        )
+            .into(),
         false,
         true,
     )
index 46bc7223b04f1303aa9c6ca770e16f6d4127c63d..fc4e731b2890968fa5190ee6b065ccd1a741af4d 100644 (file)
@@ -14,9 +14,9 @@ use pbs_tools::crypt_config::CryptConfig;
 use pbs_tools::json::required_string_param;
 
 use crate::{
-    api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot,
-    complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
-    extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup,
+    complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
+    complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
+    extract_repository_from_value, format_key_source, record_repository, BackupDir,
     BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
     Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
 };
@@ -68,16 +68,8 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
 
     let client = connect(&repo)?;
 
-    let client = BackupReader::start(
-        client,
-        crypt_config.clone(),
-        repo.store(),
-        snapshot.group.ty,
-        &snapshot.group.id,
-        snapshot.time,
-        true,
-    )
-    .await?;
+    let client =
+        BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
 
     let (manifest, _) = client.download_manifest().await?;
     manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@@ -153,13 +145,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
     let path = required_string_param(&param, "snapshot")?;
     let archive_name = required_string_param(&param, "archive-name")?;
 
-    let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
-        let group: BackupGroup = path.parse()?;
-        api_datastore_latest_snapshot(&client, repo.store(), group).await?
-    } else {
-        let snapshot: BackupDir = path.parse()?;
-        (snapshot.group.ty, snapshot.group.id, snapshot.time)
-    };
+    let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
 
     let crypto = crypto_parameters(&param)?;
 
@@ -186,9 +172,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
         client,
         crypt_config.clone(),
         repo.store(),
-        backup_type,
-        &backup_id,
-        backup_time,
+        &backup_dir,
         true,
     )
     .await?;
index d7066b36555b83bf314372fd148a494627cba90b..5b5a791500c9ccff894bc61be036921de2c3fc44 100644 (file)
@@ -7,6 +7,7 @@ use std::task::Context;
 
 use anyhow::{bail, format_err, Error};
 use futures::stream::{StreamExt, TryStreamExt};
+use serde::Deserialize;
 use serde_json::{json, Value};
 use tokio::sync::mpsc;
 use tokio_stream::wrappers::ReceiverStream;
@@ -22,10 +23,10 @@ use proxmox_time::{epoch_i64, strftime_local};
 use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
 
 use pbs_api_types::{
-    Authid, BackupDir, BackupGroup, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte,
-    PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus,
-    BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
-    TRAFFIC_CONTROL_RATE_SCHEMA,
+    Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
+    Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions, RateLimitConfig,
+    SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
+    BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
 };
 use pbs_client::catalog_shell::Shell;
 use pbs_client::tools::{
@@ -148,7 +149,7 @@ pub async fn api_datastore_latest_snapshot(
     client: &HttpClient,
     store: &str,
     group: BackupGroup,
-) -> Result<(BackupType, String, i64), Error> {
+) -> Result<BackupDir, Error> {
     let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
     let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
 
@@ -158,7 +159,20 @@ pub async fn api_datastore_latest_snapshot(
 
     list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
 
-    Ok((group.ty, group.id, list[0].backup.time))
+    Ok((group, list[0].backup.time).into())
+}
+
+pub async fn dir_or_last_from_group(
+    client: &HttpClient,
+    repo: &BackupRepository,
+    path: &str,
+) -> Result<BackupDir, Error> {
+    match path.parse::<BackupPart>()? {
+        BackupPart::Dir(dir) => Ok(dir),
+        BackupPart::Group(group) => {
+            api_datastore_latest_snapshot(&client, repo.store(), group).await
+        }
+    }
 }
 
 async fn backup_directory<P: AsRef<Path>>(
@@ -251,13 +265,12 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
     record_repository(&repo);
 
     let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
-        let item: GroupListItem = serde_json::from_value(record.to_owned())?;
-        let group = BackupGroup::new(item.backup.ty, item.backup.id);
-        Ok(group.to_string())
+        let item = GroupListItem::deserialize(record)?;
+        Ok(item.backup.to_string())
     };
 
     let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
-        let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+        let item = GroupListItem::deserialize(record)?;
         let snapshot = BackupDir {
             group: item.backup,
             time: item.last_backup,
@@ -266,7 +279,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
     };
 
     let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
-        let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+        let item = GroupListItem::deserialize(record)?;
         Ok(pbs_tools::format::render_backup_file_list(&item.files))
     };
 
@@ -560,6 +573,10 @@ fn spawn_catalog_upload(
                optional: true,
                default: false,
            },
+           "backup-ns": {
+               schema: BACKUP_NAMESPACE_SCHEMA,
+               optional: true,
+           },
            "backup-type": {
                schema: BACKUP_TYPE_SCHEMA,
                optional: true,
@@ -653,6 +670,14 @@ async fn create_backup(
         .as_str()
         .unwrap_or(proxmox_sys::nodename());
 
+    let backup_namespace: BackupNamespace = match param.get("backup-ns") {
+        Some(ns) => ns
+            .as_str()
+            .ok_or_else(|| format_err!("bad namespace {:?}", ns))?
+            .parse()?,
+        None => BackupNamespace::root(),
+    };
+
     let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
 
     let include_dev = param["include-dev"].as_array();
@@ -775,12 +800,13 @@ async fn create_backup(
     let client = connect_rate_limited(&repo, rate_limit)?;
     record_repository(&repo);
 
-    println!(
-        "Starting backup: {}/{}/{}",
+    let snapshot = BackupDir::from((
+        backup_namespace,
         backup_type,
-        backup_id,
-        pbs_datastore::BackupDir::backup_time_to_string(backup_time)?
-    );
+        backup_id.to_owned(),
+        backup_time,
+    ));
+    println!("Starting backup: {snapshot}");
 
     println!("Client name: {}", proxmox_sys::nodename());
 
@@ -827,9 +853,7 @@ async fn create_backup(
         client,
         crypt_config.clone(),
         repo.store(),
-        backup_type,
-        backup_id,
-        backup_time,
+        &snapshot,
         verbose,
         false,
     )
@@ -873,7 +897,6 @@ async fn create_backup(
         None
     };
 
-    let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
     let mut manifest = BackupManifest::new(snapshot);
 
     let mut catalog = None;
@@ -1182,13 +1205,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
 
     let path = json::required_string_param(&param, "snapshot")?;
 
-    let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
-        let group: BackupGroup = path.parse()?;
-        api_datastore_latest_snapshot(&client, repo.store(), group).await?
-    } else {
-        let snapshot: BackupDir = path.parse()?;
-        (snapshot.group.ty, snapshot.group.id, snapshot.time)
-    };
+    let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
 
     let target = json::required_string_param(&param, "target")?;
     let target = if target == "-" { None } else { Some(target) };
@@ -1211,9 +1228,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
         client,
         crypt_config.clone(),
         repo.store(),
-        backup_type,
-        &backup_id,
-        backup_time,
+        &backup_dir,
         true,
     )
     .await?;
index 508a4f76a85dbf6a266209341859130e977896a1..cfee5e1815ee5b4c3d78e4488506e222a09f4a72 100644 (file)
@@ -18,7 +18,6 @@ use proxmox_schema::*;
 use proxmox_sys::fd::Fd;
 use proxmox_sys::sortable;
 
-use pbs_api_types::{BackupDir, BackupGroup};
 use pbs_client::tools::key_source::get_encryption_key_password;
 use pbs_client::{BackupReader, RemoteChunkReader};
 use pbs_config::key_config::load_and_decrypt_key;
@@ -29,8 +28,8 @@ use pbs_tools::crypt_config::CryptConfig;
 use pbs_tools::json::required_string_param;
 
 use crate::{
-    api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name,
-    complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value,
+    complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
+    complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
     record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
 };
 
@@ -199,13 +198,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
     record_repository(&repo);
 
     let path = required_string_param(&param, "snapshot")?;
-    let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
-        let group: BackupGroup = path.parse()?;
-        api_datastore_latest_snapshot(&client, repo.store(), group).await?
-    } else {
-        let snapshot: BackupDir = path.parse()?;
-        (snapshot.group.ty, snapshot.group.id, snapshot.time)
-    };
+    let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
 
     let keyfile = param["keyfile"].as_str().map(PathBuf::from);
     let crypt_config = match keyfile {
@@ -236,9 +229,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
         client,
         crypt_config.clone(),
         repo.store(),
-        backup_type,
-        &backup_id,
-        backup_time,
+        &backup_dir,
         true,
     )
     .await?;
index 6b5e65b9892f8d9582323be83994a010cb317ed3..1733f36b43a79f52fb2881a93eb81bf18c6f65f2 100644 (file)
@@ -102,16 +102,8 @@ async fn list_files(
     driver: Option<BlockDriverType>,
 ) -> Result<Vec<ArchiveEntry>, Error> {
     let client = connect(&repo)?;
-    let client = BackupReader::start(
-        client,
-        crypt_config.clone(),
-        repo.store(),
-        snapshot.group.ty,
-        &snapshot.group.id,
-        snapshot.time,
-        true,
-    )
-    .await?;
+    let client =
+        BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
 
     let (manifest, _) = client.download_manifest().await?;
     manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@@ -409,16 +401,8 @@ async fn extract(
     };
 
     let client = connect(&repo)?;
-    let client = BackupReader::start(
-        client,
-        crypt_config.clone(),
-        repo.store(),
-        snapshot.group.ty,
-        &snapshot.group.id,
-        snapshot.time,
-        true,
-    )
-    .await?;
+    let client =
+        BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
     let (manifest, _) = client.download_manifest().await?;
 
     match path {
index dcc4e1c137f82a0a526c9c0e536f48802649b3db..c9eabd3c1c1e99b1b62f7c56020a67d28fd376e3 100644 (file)
@@ -10,6 +10,7 @@ use anyhow::{bail, format_err, Error};
 use futures::*;
 use hyper::http::request::Parts;
 use hyper::{header, Body, Response, StatusCode};
+use serde::Deserialize;
 use serde_json::{json, Value};
 use tokio_stream::wrappers::ReceiverStream;
 
@@ -31,12 +32,13 @@ use pxar::accessor::aio::Accessor;
 use pxar::EntryKind;
 
 use pbs_api_types::{
-    Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
-    GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
-    SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
-    BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
-    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
-    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
+    Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
+    DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
+    RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
+    BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
+    IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
+    PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
+    UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
 };
 use pbs_client::pxar::{create_tar, create_zip};
 use pbs_config::CachedUserInfo;
@@ -54,7 +56,7 @@ use pbs_datastore::{
     check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
     StoreProgress, CATALOG_NAME,
 };
-use pbs_tools::json::{required_integer_param, required_string_param};
+use pbs_tools::json::required_string_param;
 use proxmox_rest_server::{formatter, WorkerTask};
 
 use crate::api2::node::rrd::create_value_from_rrd;
@@ -168,7 +170,7 @@ pub fn list_groups(
     let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
 
     datastore
-        .iter_backup_groups()?
+        .iter_backup_groups(Default::default())? // FIXME: Namespaces and recursion parameters!
         .try_fold(Vec::new(), |mut group_info, group| {
             let group = group?;
             let owner = match datastore.get_owner(group.as_ref()) {
@@ -224,8 +226,10 @@ pub fn list_groups(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            group: {
+                type: pbs_api_types::BackupGroup,
+                flatten: true,
+            },
         },
     },
     access: {
@@ -238,14 +242,12 @@ pub fn list_groups(
 /// Delete backup group including all snapshots.
 pub fn delete_group(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
+    group: pbs_api_types::BackupGroup,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
     check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
@@ -261,9 +263,10 @@ pub fn delete_group(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
         },
     },
     returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
@@ -277,16 +280,14 @@ pub fn delete_group(
 /// List snapshot files.
 pub fn list_snapshot_files(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<BackupContent>, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
-    let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let snapshot = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -306,9 +307,10 @@ pub fn list_snapshot_files(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
         },
     },
     access: {
@@ -321,16 +323,14 @@ pub fn list_snapshot_files(
 /// Delete backup snapshot.
 pub fn delete_snapshot(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
-    let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let snapshot = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -349,6 +349,10 @@ pub fn delete_snapshot(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
+            "backup-ns": {
+                type: BackupNamespace,
+                optional: true,
+            },
             "backup-type": {
                 optional: true,
                 type: BackupType,
@@ -370,6 +374,7 @@ pub fn delete_snapshot(
 /// List backup snapshots.
 pub fn list_snapshots(
     store: String,
+    backup_ns: Option<BackupNamespace>,
     backup_type: Option<BackupType>,
     backup_id: Option<String>,
     _param: Value,
@@ -384,21 +389,26 @@ pub fn list_snapshots(
 
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
+    let backup_ns = backup_ns.unwrap_or_default();
+
     // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
     // backup group and provide an error free (Err -> None) accessor
     let groups = match (backup_type, backup_id) {
         (Some(backup_type), Some(backup_id)) => {
-            vec![datastore.backup_group_from_parts(backup_type, backup_id)]
+            vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)]
         }
+        // FIXME: Recursion
         (Some(backup_type), None) => datastore
-            .iter_backup_groups_ok()?
+            .iter_backup_groups_ok(backup_ns)?
             .filter(|group| group.backup_type() == backup_type)
             .collect(),
+        // FIXME: Recursion
         (None, Some(backup_id)) => datastore
-            .iter_backup_groups_ok()?
+            .iter_backup_groups_ok(backup_ns)?
             .filter(|group| group.backup_id() == backup_id)
             .collect(),
-        _ => datastore.list_backup_groups()?,
+        // FIXME: Recursion
+        (None, None) => datastore.list_backup_groups(backup_ns)?,
     };
 
     let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
@@ -506,7 +516,7 @@ fn get_snapshots_count(
     filter_owner: Option<&Authid>,
 ) -> Result<Counts, Error> {
     store
-        .iter_backup_groups_ok()?
+        .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
         .filter(|group| {
             let owner = match store.get_owner(group.as_ref()) {
                 Ok(owner) => owner,
@@ -606,6 +616,10 @@ pub fn status(
             store: {
                 schema: DATASTORE_SCHEMA,
             },
+            "backup-ns": {
+                type: BackupNamespace,
+                optional: true,
+            },
             "backup-type": {
                 type: BackupType,
                 optional: true,
@@ -641,6 +655,7 @@ pub fn status(
 /// or all backups in the datastore.
 pub fn verify(
     store: String,
+    backup_ns: Option<BackupNamespace>,
     backup_type: Option<BackupType>,
     backup_id: Option<String>,
     backup_time: Option<i64>,
@@ -658,13 +673,22 @@ pub fn verify(
     let mut backup_group = None;
     let mut worker_type = "verify";
 
+    // FIXME: Recursion
+    // FIXME: Namespaces and worker ID, could this be an issue?
+    let backup_ns = backup_ns.unwrap_or_default();
+
     match (backup_type, backup_id, backup_time) {
         (Some(backup_type), Some(backup_id), Some(backup_time)) => {
             worker_id = format!(
-                "{}:{}/{}/{:08X}",
-                store, backup_type, backup_id, backup_time
+                "{}:{}/{}/{}/{:08X}",
+                store,
+                backup_ns.display_as_path(),
+                backup_type,
+                backup_id,
+                backup_time
             );
-            let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+            let dir =
+                datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
 
             check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
 
@@ -672,8 +696,14 @@ pub fn verify(
             worker_type = "verify_snapshot";
         }
         (Some(backup_type), Some(backup_id), None) => {
-            worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
-            let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
+            worker_id = format!(
+                "{}:{}/{}/{}",
+                store,
+                backup_ns.display_as_path(),
+                backup_type,
+                backup_id
+            );
+            let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id));
 
             check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
 
@@ -748,8 +778,10 @@ pub fn verify(
 #[api(
     input: {
         properties: {
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-type": { type: BackupType },
+            group: {
+                type: pbs_api_types::BackupGroup,
+                flatten: true,
+            },
             "dry-run": {
                 optional: true,
                 type: bool,
@@ -772,8 +804,7 @@ pub fn verify(
 )]
 /// Prune a group on the datastore
 pub fn prune(
-    backup_id: String,
-    backup_type: BackupType,
+    group: pbs_api_types::BackupGroup,
     dry_run: bool,
     prune_options: PruneOptions,
     store: String,
@@ -784,11 +815,11 @@ pub fn prune(
 
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
-    let group = datastore.backup_group_from_parts(backup_type, &backup_id);
+    let group = datastore.backup_group(group);
 
     check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
 
-    let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
+    let worker_id = format!("{}:{}", store, group);
 
     let mut prune_result = Vec::new();
 
@@ -828,10 +859,9 @@ pub fn prune(
         );
         task_log!(
             worker,
-            "Starting prune on store \"{}\" group \"{}/{}\"",
+            "Starting prune on store \"{}\" group \"{}\"",
             store,
-            backup_type,
-            backup_id
+            group,
         );
     }
 
@@ -1076,11 +1106,7 @@ pub fn download_file(
 
         let file_name = required_string_param(&param, "file-name")?.to_owned();
 
-        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
-        let backup_id = required_string_param(&param, "backup-id")?.to_owned();
-        let backup_time = required_integer_param(&param, "backup-time")?;
-
-        let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+        let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
 
         check_priv_or_backup_owner(
             &datastore,
@@ -1159,11 +1185,7 @@ pub fn download_file_decoded(
 
         let file_name = required_string_param(&param, "file-name")?.to_owned();
 
-        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
-        let backup_id = required_string_param(&param, "backup-id")?.to_owned();
-        let backup_time = required_integer_param(&param, "backup-time")?;
-
-        let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+        let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
 
         check_priv_or_backup_owner(
             &datastore,
@@ -1285,11 +1307,7 @@ pub fn upload_backup_log(
 
         let file_name = CLIENT_LOG_BLOB_NAME;
 
-        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
-        let backup_id = required_string_param(&param, "backup-id")?;
-        let backup_time = required_integer_param(&param, "backup-time")?;
-
-        let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+        let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
 
         let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
         let owner = datastore.get_owner(backup_dir.as_ref())?;
@@ -1303,14 +1321,7 @@ pub fn upload_backup_log(
             bail!("backup already contains a log.");
         }
 
-        println!(
-            "Upload backup log to {}/{}/{}/{}/{}",
-            store,
-            backup_type,
-            backup_id,
-            backup_dir.backup_time_string(),
-            file_name
-        );
+        println!("Upload backup log to {store}/{backup_dir}/{file_name}");
 
         let data = req_body
             .map_err(Error::from)
@@ -1335,9 +1346,10 @@ pub fn upload_backup_log(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
             "filepath": {
                 description: "Base64 encoded path.",
                 type: String,
@@ -1351,9 +1363,7 @@ pub fn upload_backup_log(
 /// Get the entries of the given path of the catalog
 pub fn catalog(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     filepath: String,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<ArchiveEntry>, Error> {
@@ -1361,7 +1371,7 @@ pub fn catalog(
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let backup_dir = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -1438,13 +1448,9 @@ pub fn pxar_file_download(
 
         let filepath = required_string_param(&param, "filepath")?.to_owned();
 
-        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
-        let backup_id = required_string_param(&param, "backup-id")?;
-        let backup_time = required_integer_param(&param, "backup-time")?;
-
         let tar = param["tar"].as_bool().unwrap_or(false);
 
-        let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+        let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
 
         check_priv_or_backup_owner(
             &datastore,
@@ -1617,8 +1623,10 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            backup_group: {
+                type: pbs_api_types::BackupGroup,
+                flatten: true,
+            },
         },
     },
     access: {
@@ -1628,14 +1636,12 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
 /// Get "notes" for a backup group
 pub fn get_group_notes(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
+    backup_group: pbs_api_types::BackupGroup,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-    let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
 
     check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
 
@@ -1647,8 +1653,10 @@ pub fn get_group_notes(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            backup_group: {
+                type: pbs_api_types::BackupGroup,
+                flatten: true,
+            },
             notes: {
                 description: "A multiline text.",
             },
@@ -1663,15 +1671,13 @@ pub fn get_group_notes(
 /// Set "notes" for a backup group
 pub fn set_group_notes(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
+    backup_group: pbs_api_types::BackupGroup,
     notes: String,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-    let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
 
     check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
 
@@ -1685,9 +1691,10 @@ pub fn set_group_notes(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
         },
     },
     access: {
@@ -1697,15 +1704,13 @@ pub fn set_group_notes(
 /// Get "notes" for a specific backup
 pub fn get_notes(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-    let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let backup_dir = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -1725,9 +1730,10 @@ pub fn get_notes(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
             notes: {
                 description: "A multiline text.",
             },
@@ -1742,16 +1748,14 @@ pub fn get_notes(
 /// Set "notes" for a specific backup
 pub fn set_notes(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     notes: String,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-    let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let backup_dir = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -1773,9 +1777,10 @@ pub fn set_notes(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
         },
     },
     access: {
@@ -1785,15 +1790,13 @@ pub fn set_notes(
 /// Query protection for a specific backup
 pub fn get_protection(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<bool, Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-    let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let backup_dir = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -1809,9 +1812,10 @@ pub fn get_protection(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
-            "backup-time": { schema: BACKUP_TIME_SCHEMA },
+            backup_dir: {
+                type: pbs_api_types::BackupDir,
+                flatten: true,
+            },
             protected: {
                 description: "Enable/disable protection.",
             },
@@ -1826,16 +1830,14 @@ pub fn get_protection(
 /// En- or disable protection for a specific backup
 pub fn set_protection(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
-    backup_time: i64,
+    backup_dir: pbs_api_types::BackupDir,
     protected: bool,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-    let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+    let backup_dir = datastore.backup_dir(backup_dir)?;
 
     check_priv_or_backup_owner(
         &datastore,
@@ -1851,8 +1853,10 @@ pub fn set_protection(
     input: {
         properties: {
             store: { schema: DATASTORE_SCHEMA },
-            "backup-type": { type: BackupType },
-            "backup-id": { schema: BACKUP_ID_SCHEMA },
+            backup_group: {
+                type: pbs_api_types::BackupGroup,
+                flatten: true,
+            },
             "new-owner": {
                 type: Authid,
             },
@@ -1866,14 +1870,13 @@ pub fn set_protection(
 /// Change owner of a backup group
 pub fn set_backup_owner(
     store: String,
-    backup_type: BackupType,
-    backup_id: String,
+    backup_group: pbs_api_types::BackupGroup,
     new_owner: Authid,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
-    let backup_group = datastore.backup_group_from_parts(backup_type, backup_id);
+    let backup_group = datastore.backup_group(backup_group);
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
index 9effc494f0c2fda45c88410520ca93f4d000206c..5f29edc3d732bdd3e38ff8a224102c4c251e66a9 100644 (file)
@@ -6,6 +6,7 @@ use hex::FromHex;
 use hyper::header::{HeaderValue, UPGRADE};
 use hyper::http::request::Parts;
 use hyper::{Body, Request, Response, StatusCode};
+use serde::Deserialize;
 use serde_json::{json, Value};
 
 use proxmox_router::list_subdirs_api_method;
@@ -81,9 +82,7 @@ fn upgrade_to_backup_protocol(
 
         let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
-        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
-        let backup_id = required_string_param(&param, "backup-id")?;
-        let backup_time = required_integer_param(&param, "backup-time")?;
+        let backup_dir_arg = pbs_api_types::BackupDir::deserialize(&param)?;
 
         let protocols = parts
             .headers
@@ -102,13 +101,15 @@ fn upgrade_to_backup_protocol(
             );
         }
 
-        let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
+        let worker_id = format!("{}:{}/{}", store, backup_dir_arg.ty(), backup_dir_arg.id());
 
         let env_type = rpcenv.env_type();
 
-        let backup_group = datastore.backup_group_from_parts(backup_type, backup_id);
+        let backup_group = datastore.backup_group(backup_dir_arg.group.clone());
 
-        let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
+        let worker_type = if backup_group.backup_type() == BackupType::Host
+            && backup_group.backup_id() == "benchmark"
+        {
             if !benchmark {
                 bail!("unable to run benchmark without --benchmark flags");
             }
@@ -152,7 +153,7 @@ fn upgrade_to_backup_protocol(
             }
         };
 
-        let backup_dir = backup_group.backup_dir(backup_time)?;
+        let backup_dir = backup_group.backup_dir(backup_dir_arg.time)?;
 
         let _last_guard = if let Some(last) = &last_backup {
             if backup_dir.backup_time() <= last.backup_dir.backup_time() {
index 6bde4ccb7f20b98a7c9651ffb28c2f3b73fc0c39..25c02bfe75785c7765778c308a4c2bf50a3bf8ca 100644 (file)
@@ -6,6 +6,7 @@ use hex::FromHex;
 use hyper::header::{self, HeaderValue, UPGRADE};
 use hyper::http::request::Parts;
 use hyper::{Body, Request, Response, StatusCode};
+use serde::Deserialize;
 use serde_json::Value;
 
 use proxmox_router::{
@@ -16,15 +17,15 @@ use proxmox_schema::{BooleanSchema, ObjectSchema};
 use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
-    BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
-    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
+    Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
+    BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
+    PRIV_DATASTORE_READ,
 };
 use pbs_config::CachedUserInfo;
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{archive_type, ArchiveType};
 use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
-use pbs_tools::json::{required_integer_param, required_string_param};
+use pbs_tools::json::required_string_param;
 use proxmox_rest_server::{H2Service, WorkerTask};
 use proxmox_sys::fs::lock_dir_noblock_shared;
 
@@ -89,9 +90,7 @@ fn upgrade_to_backup_reader_protocol(
 
         let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
-        let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
-        let backup_id = required_string_param(&param, "backup-id")?;
-        let backup_time = required_integer_param(&param, "backup-time")?;
+        let backup_dir = pbs_api_types::BackupDir::deserialize(&param)?;
 
         let protocols = parts
             .headers
@@ -112,7 +111,7 @@ fn upgrade_to_backup_reader_protocol(
 
         let env_type = rpcenv.env_type();
 
-        let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+        let backup_dir = datastore.backup_dir(backup_dir)?;
         if !priv_read {
             let owner = datastore.get_owner(backup_dir.as_ref())?;
             let correct_owner = owner == auth_id
@@ -135,9 +134,9 @@ fn upgrade_to_backup_reader_protocol(
         let worker_id = format!(
             "{}:{}/{}/{:08X}",
             store,
-            backup_type,
-            backup_id,
-            backup_dir.backup_time()
+            backup_dir.backup_type(),
+            backup_dir.backup_id(),
+            backup_dir.backup_time(),
         );
 
         WorkerTask::spawn(
index e83ac1bcaf9159a1fd9e25e189170fc7385e52b7..59e0994ba1891ee07c4600fea83126fa05f92f29 100644 (file)
@@ -408,7 +408,8 @@ fn backup_worker(
 
     let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?;
 
-    let mut group_list = datastore.list_backup_groups()?;
+    // FIXME: Namespaces! Probably just recurse for now? Not sure about the usage here...
+    let mut group_list = datastore.list_backup_groups(Default::default())?;
 
     group_list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
 
index 7d5d353904ceaab9c01e1d3e3f56afa13056fb7c..c00aefbc99b5034df1ad7588be41ac8f23468492 100644 (file)
@@ -533,7 +533,11 @@ pub fn verify_all_backups(
         }
     };
 
-    let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
+    // FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
+    let mut list = match verify_worker
+        .datastore
+        .iter_backup_groups_ok(Default::default())
+    {
         Ok(list) => list
             .filter(|group| {
                 !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
index 2f8461ebe1677c484657f1fafdbe9d4327a85cfd..0208fbc276d6a6af0985be4a314db900fbb654d4 100644 (file)
@@ -42,7 +42,8 @@ pub fn prune_datastore(
     let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
     let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
 
-    for group in datastore.iter_backup_groups()? {
+    // FIXME: Namespaces and recursion!
+    for group in datastore.iter_backup_groups(Default::default())? {
         let group = group?;
         let list = group.list_backups()?;
 
index 48eb5fdee204e391a7546d2b2f91c6a4fa37cccf..8128873d0df6e716d9cffc71b53afd68c97ba6d6 100644 (file)
@@ -651,13 +651,11 @@ async fn pull_group(
             continue;
         }
 
-        let backup_time = snapshot.time;
-
-        remote_snapshots.insert(backup_time);
+        remote_snapshots.insert(snapshot.time);
 
         if let Some(last_sync_time) = last_sync {
-            if last_sync_time > backup_time {
-                skip_info.update(backup_time);
+            if last_sync_time > snapshot.time {
+                skip_info.update(snapshot.time);
                 continue;
             }
         }
@@ -676,16 +674,8 @@ async fn pull_group(
             options,
         )?;
 
-        let reader = BackupReader::start(
-            new_client,
-            None,
-            params.source.store(),
-            snapshot.group.ty,
-            &snapshot.group.id,
-            backup_time,
-            true,
-        )
-        .await?;
+        let reader =
+            BackupReader::start(new_client, None, params.source.store(), &snapshot, true).await?;
 
         let result = pull_snapshot_from(
             worker,
@@ -757,6 +747,8 @@ pub async fn pull_store(
     // explicit create shared lock to prevent GC on newly created chunks
     let _shared_store_lock = params.store.try_shared_chunk_store_lock()?;
 
+    // FIXME: Namespaces! AND: If we make this API call recurse down namespaces we need to do the
+    // same down in the `remove_vanished` case!
     let path = format!("api2/json/admin/datastore/{}/groups", params.source.store());
 
     let mut result = client
@@ -850,7 +842,8 @@ pub async fn pull_store(
 
     if params.remove_vanished {
         let result: Result<(), Error> = proxmox_lang::try_block!({
-            for local_group in params.store.iter_backup_groups()? {
+            // FIXME: See above comment about namespaces & recursion
+            for local_group in params.store.iter_backup_groups(Default::default())? {
                 let local_group = local_group?;
                 if new_groups.contains(local_group.as_ref()) {
                     continue;