use std::collections::{HashMap, HashSet};
-use std::convert::TryFrom;
use std::io::{self, Write};
+use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::{Arc, Mutex};
-use std::time::Duration;
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
+use nix::unistd::{unlinkat, UnlinkatFlags};
use proxmox_schema::ApiType;
use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{
- Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
- HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
+ Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning,
+ GarbageCollectionStatus, HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
};
-use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
+use pbs_config::ConfigVersionCache;
use crate::backup_info::{BackupDir, BackupGroup};
use crate::chunk_store::ChunkStore;
use crate::index::IndexFile;
use crate::manifest::{
archive_type, ArchiveType, BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
- MANIFEST_LOCK_NAME,
};
use crate::task_tracking::update_active_operations;
use crate::DataBlob;
self.inner.chunk_store.base_path()
}
+ /// Returns the absolute path for a backup namespace on this datastore
+ pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
+ let mut path = self.base_path();
+ path.reserve(ns.path_len());
+ for part in ns.components() {
+ path.push("ns");
+ path.push(part);
+ }
+ path
+ }
+
/// Cleanup a backup directory
///
/// Removes all files not mentioned in the manifest.
}
/// Returns the absolute path for a backup_group
- pub fn group_path(&self, backup_group: &pbs_api_types::BackupGroup) -> PathBuf {
- let mut full_path = self.base_path();
+ pub fn group_path(
+ &self,
+ ns: &BackupNamespace,
+ backup_group: &pbs_api_types::BackupGroup,
+ ) -> PathBuf {
+ let mut full_path = self.namespace_path(ns);
full_path.push(backup_group.to_string());
full_path
}
/// Returns the absolute path for backup_dir
- pub fn snapshot_path(&self, backup_dir: &pbs_api_types::BackupDir) -> PathBuf {
- let mut full_path = self.base_path();
+ pub fn snapshot_path(
+ &self,
+ ns: &BackupNamespace,
+ backup_dir: &pbs_api_types::BackupDir,
+ ) -> PathBuf {
+ let mut full_path = self.namespace_path(ns);
full_path.push(backup_dir.to_string());
full_path
}
- /// Remove a complete backup group including all snapshots, returns true
- /// if all snapshots were removed, and false if some were protected
- pub fn remove_backup_group(
+ /// Create a backup namespace.
+ pub fn create_namespace(
self: &Arc<Self>,
- backup_group: &pbs_api_types::BackupGroup,
- ) -> Result<bool, Error> {
- let backup_group = self.backup_group(backup_group.clone());
+ parent: &BackupNamespace,
+ name: String,
+ ) -> Result<BackupNamespace, Error> {
+ let mut parent_path = self.base_path().to_owned();
+ parent_path.push(parent.path());
+
+ if !parent_path.exists() {
+ bail!("cannot create new namespace, parent {parent} doesn't already exists");
+ }
- let full_path = self.group_path(backup_group.as_ref());
+ // construct ns before mkdir to enforce max-depth and name validity
+ let ns = BackupNamespace::from_parent_ns(parent, name)?;
- let _guard = proxmox_sys::fs::lock_dir_noblock(
- &full_path,
- "backup group",
- "possible running backup",
- )?;
+ let mut ns_full_path = self.base_path().to_owned();
+ ns_full_path.push(ns.path());
- log::info!("removing backup group {:?}", full_path);
+ std::fs::create_dir_all(ns_full_path)?;
- let mut removed_all = true;
+ Ok(ns)
+ }
- // remove all individual backup dirs first to ensure nothing is using them
- for snap in backup_group.list_backups()? {
- if snap.backup_dir.is_protected() {
- removed_all = false;
- continue;
- }
- self.remove_backup_dir(snap.backup_dir.as_ref(), false)?;
+ /// Remove all backup groups of a single namespace level but not the namespace itself.
+ ///
+ /// Does *not* descends into child-namespaces and doesn't remoes the namespace itself either.
+ ///
+ /// Returns true if all the groups were removed, and false if some were protected.
+ pub fn remove_namespace_groups(self: &Arc<Self>, ns: &BackupNamespace) -> Result<bool, Error> {
+ // FIXME: locking? The single groups/snapshots are already protected, so may not be
+ // necesarry (depends on what we all allow to do with namespaces)
+ log::info!("removing all groups in namespace {}:/{ns}", self.name());
+
+ let mut removed_all_groups = true;
+
+ for group in self.iter_backup_groups(ns.to_owned())? {
+ let removed_group = group?.destroy()?;
+ removed_all_groups = removed_all_groups && removed_group;
}
- if removed_all {
- // no snapshots left, we can now safely remove the empty folder
- std::fs::remove_dir_all(&full_path).map_err(|err| {
- format_err!(
- "removing backup group directory {:?} failed - {}",
- full_path,
- err,
- )
- })?;
+ let base_file = std::fs::File::open(self.base_path())?;
+ let base_fd = base_file.as_raw_fd();
+ for ty in BackupType::iter() {
+ let mut ty_dir = ns.path();
+ ty_dir.push(ty.to_string());
+ // best effort only, but we probably should log the error
+ if let Err(err) = unlinkat(Some(base_fd), &ty_dir, UnlinkatFlags::RemoveDir) {
+ if err.as_errno() != Some(nix::errno::Errno::ENOENT) {
+ log::error!("failed to remove backup type {ty} in {ns} - {err}");
+ }
+ }
}
- Ok(removed_all)
+ Ok(removed_all_groups)
}
- /// Remove a backup directory including all content
- pub fn remove_backup_dir(
+ /// Remove a complete backup namespace including all it's, and child namespaces', groups.
+ ///
+ /// Returns true if all groups were removed, and false if some were protected
+ pub fn remove_namespace_recursive(
self: &Arc<Self>,
- backup_dir: &pbs_api_types::BackupDir,
- force: bool,
- ) -> Result<(), Error> {
- let backup_dir = self.backup_dir(backup_dir.clone())?;
+ ns: &BackupNamespace,
+ ) -> Result<bool, Error> {
+ // FIXME: locking? The single groups/snapshots are already protected, so may not be
+ // necesarry (depends on what we all allow to do with namespaces)
+ log::info!("removing whole namespace recursively {}:/{ns}", self.name());
- let full_path = backup_dir.full_path();
+ let mut removed_all_groups = true;
+ for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
+ let removed_ns_groups = self.remove_namespace_groups(&ns?)?;
- let (_guard, _manifest_guard);
- if !force {
- _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
- _manifest_guard = self.lock_manifest(&backup_dir)?;
+ removed_all_groups = removed_all_groups && removed_ns_groups;
}
- if backup_dir.is_protected() {
- bail!("cannot remove protected snapshot");
- }
+ // now try to delete the actual namespaces, bottom up so that we can use safe rmdir that
+ // will choke if a new backup/group appeared in the meantime (but not on an new empty NS)
+ let mut children = self
+ .recursive_iter_backup_ns(ns.to_owned())?
+ .collect::<Result<Vec<BackupNamespace>, Error>>()?;
- log::info!("removing backup snapshot {:?}", full_path);
- std::fs::remove_dir_all(&full_path).map_err(|err| {
- format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
- })?;
+ children.sort_by(|a, b| b.depth().cmp(&a.depth()));
- // the manifest does not exists anymore, we do not need to keep the lock
- if let Ok(path) = self.manifest_lock_path(&backup_dir) {
- // ignore errors
- let _ = std::fs::remove_file(path);
+ let base_file = std::fs::File::open(self.base_path())?;
+ let base_fd = base_file.as_raw_fd();
+
+ for ns in children.iter() {
+ let mut ns_dir = ns.path();
+ ns_dir.push("ns");
+ let _ = unlinkat(Some(base_fd), &ns_dir, UnlinkatFlags::RemoveDir);
+
+ if !ns.is_root() {
+ match unlinkat(Some(base_fd), &ns.path(), UnlinkatFlags::RemoveDir) {
+ Ok(()) => log::info!("removed namespace {ns}"),
+ Err(err) => log::error!("failed to remove namespace {ns} - {err}"),
+ }
+ }
}
- Ok(())
+ Ok(removed_all_groups)
+ }
+
+ /// Remove a complete backup group including all snapshots.
+ ///
+ /// Returns true if all snapshots were removed, and false if some were protected
+ pub fn remove_backup_group(
+ self: &Arc<Self>,
+ ns: &BackupNamespace,
+ backup_group: &pbs_api_types::BackupGroup,
+ ) -> Result<bool, Error> {
+ let backup_group = self.backup_group(ns.clone(), backup_group.clone());
+
+ backup_group.destroy()
+ }
+
+ /// Remove a backup directory including all content
+ pub fn remove_backup_dir(
+ self: &Arc<Self>,
+ ns: &BackupNamespace,
+ backup_dir: &pbs_api_types::BackupDir,
+ force: bool,
+ ) -> Result<(), Error> {
+ let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
+
+ backup_dir.destroy(force)
}
/// Returns the time of the last successful backup
/// Or None if there is no backup in the group (or the group dir does not exist).
pub fn last_successful_backup(
self: &Arc<Self>,
+ ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
) -> Result<Option<i64>, Error> {
- let backup_group = self.backup_group(backup_group.clone());
+ let backup_group = self.backup_group(ns.clone(), backup_group.clone());
let group_path = backup_group.full_group_path();
}
}
+ /// Return the path of the 'owner' file.
+ fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
+ self.group_path(ns, group).join("owner")
+ }
+
/// Returns the backup owner.
///
/// The backup owner is the entity who first created the backup group.
- pub fn get_owner(&self, backup_group: &pbs_api_types::BackupGroup) -> Result<Authid, Error> {
- let mut full_path = self.base_path();
- full_path.push(backup_group.to_string());
- full_path.push("owner");
+ pub fn get_owner(
+ &self,
+ ns: &BackupNamespace,
+ backup_group: &pbs_api_types::BackupGroup,
+ ) -> Result<Authid, Error> {
+ let full_path = self.owner_path(ns, backup_group);
let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
owner.trim_end().parse() // remove trailing newline
}
pub fn owns_backup(
&self,
+ ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
) -> Result<bool, Error> {
- let owner = self.get_owner(backup_group)?;
+ let owner = self.get_owner(ns, backup_group)?;
Ok(check_backup_owner(&owner, auth_id).is_ok())
}
/// Set the backup owner.
pub fn set_owner(
&self,
+ ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
force: bool,
) -> Result<(), Error> {
- let mut path = self.base_path();
- path.push(backup_group.to_string());
- path.push("owner");
+ let path = self.owner_path(ns, backup_group);
let mut open_options = std::fs::OpenOptions::new();
open_options.write(true);
/// This also acquires an exclusive lock on the directory and returns the lock guard.
pub fn create_locked_backup_group(
&self,
+ ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let mut full_path = self.base_path();
+ for ns in ns.components() {
+ full_path.push("ns");
+ full_path.push(ns);
+ }
full_path.push(backup_group.ty.as_str());
std::fs::create_dir_all(&full_path)?;
"backup group",
"another backup is already running",
)?;
- self.set_owner(backup_group, auth_id, false)?;
- let owner = self.get_owner(backup_group)?; // just to be sure
+ self.set_owner(ns, backup_group, auth_id, false)?;
+ let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard))
}
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
"backup group",
"another backup is already running",
)?;
- let owner = self.get_owner(backup_group)?; // just to be sure
+ let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard))
}
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
/// The BackupGroup directory needs to exist.
pub fn create_locked_backup_dir(
&self,
+ ns: &BackupNamespace,
backup_dir: &pbs_api_types::BackupDir,
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
- let relative_path = PathBuf::from(backup_dir.to_string());
- let mut full_path = self.base_path();
- full_path.push(&relative_path);
+ let full_path = self.snapshot_path(ns, backup_dir);
+ let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
+ format_err!(
+ "failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
+ )
+ })?;
let lock = || {
lock_dir_noblock(
};
match std::fs::create_dir(&full_path) {
- Ok(_) => Ok((relative_path, true, lock()?)),
+ Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
- Ok((relative_path, false, lock()?))
+ Ok((relative_path.to_owned(), false, lock()?))
}
Err(e) => Err(e.into()),
}
}
+ /// Get a streaming iter over single-level backup namespaces of a datatstore
+ ///
+ /// The iterated item is still a Result that can contain errors from rather unexptected FS or
+ /// parsing errors.
+ pub fn iter_backup_ns(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<ListNamespaces, Error> {
+ ListNamespaces::new(Arc::clone(self), ns)
+ }
+
+ /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
+ ///
+ /// The iterated item's result is already unwrapped, if it contained an error it will be
+ /// logged. Can be useful in iterator chain commands
+ pub fn iter_backup_ns_ok(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
+ let this = Arc::clone(self);
+ Ok(
+ ListNamespaces::new(Arc::clone(&self), ns)?.filter_map(move |ns| match ns {
+ Ok(ns) => Some(ns),
+ Err(err) => {
+ log::error!("list groups error on datastore {} - {}", this.name(), err);
+ None
+ }
+ }),
+ )
+ }
+
+ /// Get a streaming iter over single-level backup namespaces of a datatstore
+ ///
+ /// The iterated item is still a Result that can contain errors from rather unexptected FS or
+ /// parsing errors.
+ pub fn recursive_iter_backup_ns(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<ListNamespacesRecursive, Error> {
+ ListNamespacesRecursive::new(Arc::clone(self), ns)
+ }
+
+ /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
+ ///
+ /// The iterated item's result is already unwrapped, if it contained an error it will be
+ /// logged. Can be useful in iterator chain commands
+ pub fn recursive_iter_backup_ns_ok(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ max_depth: Option<usize>,
+ ) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
+ let this = Arc::clone(self);
+ Ok(if let Some(depth) = max_depth {
+ ListNamespacesRecursive::new_max_depth(Arc::clone(&self), ns, depth)?
+ } else {
+ ListNamespacesRecursive::new(Arc::clone(&self), ns)?
+ }
+ .filter_map(move |ns| match ns {
+ Ok(ns) => Some(ns),
+ Err(err) => {
+ log::error!("list groups error on datastore {} - {}", this.name(), err);
+ None
+ }
+ }))
+ }
+
/// Get a streaming iter over top-level backup groups of a datatstore
///
/// The iterated item is still a Result that can contain errors from rather unexptected FS or
/// parsing errors.
- pub fn iter_backup_groups(self: &Arc<DataStore>) -> Result<ListGroups, Error> {
- ListGroups::new(Arc::clone(self))
+ pub fn iter_backup_groups(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<ListGroups, Error> {
+ ListGroups::new(Arc::clone(self), ns)
}
/// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
/// logged. Can be useful in iterator chain commands
pub fn iter_backup_groups_ok(
self: &Arc<DataStore>,
+ ns: BackupNamespace,
) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
let this = Arc::clone(self);
Ok(
- ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group {
+ ListGroups::new(Arc::clone(&self), ns)?.filter_map(move |group| match group {
Ok(group) => Some(group),
Err(err) => {
log::error!("list groups error on datastore {} - {}", this.name(), err);
/// Get a in-memory vector for all top-level backup groups of a datatstore
///
/// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
- pub fn list_backup_groups(self: &Arc<DataStore>) -> Result<Vec<BackupGroup>, Error> {
- ListGroups::new(Arc::clone(self))?.collect()
+ pub fn list_backup_groups(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<Vec<BackupGroup>, Error> {
+ ListGroups::new(Arc::clone(self), ns)?.collect()
}
pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
self.inner.chunk_store.insert_chunk(chunk, digest)
}
- pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
- let mut path = backup_dir.full_path();
- path.push(filename);
-
- proxmox_lang::try_block!({
- let mut file = std::fs::File::open(&path)?;
- DataBlob::load_from_reader(&mut file)
- })
- .map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
- }
-
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
std::fs::metadata(chunk_path).map_err(Error::from)
})
}
- /// Returns the filename to lock a manifest
- ///
- /// Also creates the basedir. The lockfile is located in
- /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
- fn manifest_lock_path(&self, backup_dir: &BackupDir) -> Result<String, Error> {
- let mut path = format!(
- "/run/proxmox-backup/locks/{}/{}/{}",
- self.name(),
- backup_dir.backup_type(),
- backup_dir.backup_id(),
- );
- std::fs::create_dir_all(&path)?;
- use std::fmt::Write;
- write!(
- path,
- "/{}{}",
- backup_dir.backup_time_string(),
- &MANIFEST_LOCK_NAME
- )?;
-
- Ok(path)
- }
-
- fn lock_manifest(&self, backup_dir: &BackupDir) -> Result<BackupLockGuard, Error> {
- let path = self.manifest_lock_path(backup_dir)?;
-
- // update_manifest should never take a long time, so if someone else has
- // the lock we can simply block a bit and should get it soon
- open_backup_lockfile(&path, Some(Duration::from_secs(5)), true)
- .map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
- }
-
/// Load the manifest without a lock. Must not be written back.
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
- let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
- let raw_size = blob.raw_size();
- let manifest = BackupManifest::try_from(blob)?;
- Ok((manifest, raw_size))
+ backup_dir.load_manifest()
}
/// Update the manifest of the specified snapshot. Never write a manifest directly,
backup_dir: &BackupDir,
update_fn: impl FnOnce(&mut BackupManifest),
) -> Result<(), Error> {
- let _guard = self.lock_manifest(backup_dir)?;
+ let _guard = backup_dir.lock_manifest()?;
let (mut manifest, _) = self.load_manifest(backup_dir)?;
update_fn(&mut manifest);
}
/// Open a backup group from this datastore.
- pub fn backup_group(self: &Arc<Self>, group: pbs_api_types::BackupGroup) -> BackupGroup {
- BackupGroup::new(Arc::clone(&self), group)
+ pub fn backup_group(
+ self: &Arc<Self>,
+ ns: BackupNamespace,
+ group: pbs_api_types::BackupGroup,
+ ) -> BackupGroup {
+ BackupGroup::new(Arc::clone(&self), ns, group)
}
/// Open a backup group from this datastore.
- pub fn backup_group_from_parts<T>(self: &Arc<Self>, ty: BackupType, id: T) -> BackupGroup
+ pub fn backup_group_from_parts<T>(
+ self: &Arc<Self>,
+ ns: BackupNamespace,
+ ty: BackupType,
+ id: T,
+ ) -> BackupGroup
where
T: Into<String>,
{
- self.backup_group((ty, id.into()).into())
+ self.backup_group(ns, (ty, id.into()).into())
}
+ /*
/// Open a backup group from this datastore by backup group path such as `vm/100`.
///
/// Convenience method for `store.backup_group(path.parse()?)`
pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
- Ok(self.backup_group(path.parse()?))
+ todo!("split out the namespace");
}
+ */
/// Open a snapshot (backup directory) from this datastore.
- pub fn backup_dir(self: &Arc<Self>, dir: pbs_api_types::BackupDir) -> Result<BackupDir, Error> {
- BackupDir::with_group(self.backup_group(dir.group), dir.time)
+ pub fn backup_dir(
+ self: &Arc<Self>,
+ ns: BackupNamespace,
+ dir: pbs_api_types::BackupDir,
+ ) -> Result<BackupDir, Error> {
+ BackupDir::with_group(self.backup_group(ns, dir.group), dir.time)
}
/// Open a snapshot (backup directory) from this datastore.
pub fn backup_dir_from_parts<T>(
self: &Arc<Self>,
+ ns: BackupNamespace,
ty: BackupType,
id: T,
time: i64,
where
T: Into<String>,
{
- self.backup_dir((ty, id.into(), time).into())
+ self.backup_dir(ns, (ty, id.into(), time).into())
}
/// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
BackupDir::with_rfc3339(group, time_string.into())
}
+ /*
/// Open a snapshot (backup directory) from this datastore by a snapshot path.
pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
- self.backup_dir(path.parse()?)
+ todo!("split out the namespace");
}
+ */
}
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
/// A iterator for a (single) level of Backup Groups
pub struct ListGroups {
store: Arc<DataStore>,
+ ns: BackupNamespace,
type_fd: proxmox_sys::fs::ReadDir,
id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
}
impl ListGroups {
- pub fn new(store: Arc<DataStore>) -> Result<Self, Error> {
+ pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+ let mut base_path = store.base_path().to_owned();
+ base_path.push(ns.path());
Ok(ListGroups {
- type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?,
+ type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?,
store,
+ ns,
id_state: None,
})
}
if BACKUP_ID_REGEX.is_match(name) {
return Some(Ok(BackupGroup::new(
Arc::clone(&self.store),
+ self.ns.clone(),
(group_type, name.to_owned()).into(),
)));
}
}
}
}
+
+/// A iterator for a (single) level of Namespaces
+pub struct ListNamespaces {
+ ns: BackupNamespace,
+ base_path: PathBuf,
+ ns_state: Option<proxmox_sys::fs::ReadDir>,
+}
+
+impl ListNamespaces {
+ /// construct a new single-level namespace iterator on a datastore with an optional anchor ns
+ pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+ Ok(ListNamespaces {
+ ns,
+ base_path: store.base_path(),
+ ns_state: None,
+ })
+ }
+
+ /// to allow constructing the iter directly on a path, e.g., provided by section config
+ ///
+ /// NOTE: it's recommended to use the datastore one constructor or go over the recursive iter
+ pub fn new_from_path(path: PathBuf, ns: Option<BackupNamespace>) -> Result<Self, Error> {
+ Ok(ListNamespaces {
+ ns: ns.unwrap_or_default(),
+ base_path: path,
+ ns_state: None,
+ })
+ }
+}
+
+impl Iterator for ListNamespaces {
+ type Item = Result<BackupNamespace, Error>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ if let Some(ref mut id_fd) = self.ns_state {
+ let item = id_fd.next()?; // if this returns none we are done
+ let entry = match item {
+ Ok(ref entry) => {
+ match entry.file_type() {
+ Some(nix::dir::Type::Directory) => entry, // OK
+ _ => continue,
+ }
+ }
+ Err(err) => return Some(Err(err)),
+ };
+ if let Ok(name) = entry.file_name().to_str() {
+ if name != "." && name != ".." {
+ return Some(BackupNamespace::from_parent_ns(&self.ns, name.to_string()));
+ }
+ }
+ continue; // file did not match regex or isn't valid utf-8
+ } else {
+ let mut base_path = self.base_path.to_owned();
+ if !self.ns.is_root() {
+ base_path.push(self.ns.path());
+ }
+ base_path.push("ns");
+
+ let ns_dirfd = match proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path) {
+ Ok(dirfd) => dirfd,
+ Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => return None,
+ Err(err) => return Some(Err(err.into())),
+ };
+ // found a ns directory, descend into it to scan all it's namespaces
+ self.ns_state = Some(ns_dirfd);
+ }
+ }
+ }
+}
+
+/// A iterator for all Namespaces below an anchor namespace, most often that will be the
+/// `BackupNamespace::root()` one.
+///
+/// Descends depth-first (pre-order) into the namespace hierachy yielding namespaces immediately as
+/// it finds them.
+///
+/// Note: The anchor namespaces passed on creating the iterator will yielded as first element, this
+/// can be usefull for searching all backup groups from a certain anchor, as that can contain
+/// sub-namespaces but also groups on its own level, so otherwise one would need to special case
+/// the ones from the own level.
+pub struct ListNamespacesRecursive {
+ store: Arc<DataStore>,
+ /// the starting namespace we search downward from
+ ns: BackupNamespace,
+ /// the maximal recursion depth from the anchor start ns (depth == 0) downwards
+ max_depth: u8,
+ state: Option<Vec<ListNamespaces>>, // vector to avoid code recursion
+}
+
+impl ListNamespacesRecursive {
+ /// Creates an recursive namespace iterator.
+ pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+ Self::new_max_depth(store, ns, pbs_api_types::MAX_NAMESPACE_DEPTH)
+ }
+
+ /// Creates an recursive namespace iterator with max_depth
+ pub fn new_max_depth(
+ store: Arc<DataStore>,
+ ns: BackupNamespace,
+ max_depth: usize,
+ ) -> Result<Self, Error> {
+ if max_depth > pbs_api_types::MAX_NAMESPACE_DEPTH {
+ bail!("max_depth must be smaller 8");
+ }
+ Ok(ListNamespacesRecursive {
+ store: store,
+ ns,
+ max_depth: max_depth as u8,
+ state: None,
+ })
+ }
+}
+
+impl Iterator for ListNamespacesRecursive {
+ type Item = Result<BackupNamespace, Error>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ if let Some(ref mut state) = self.state {
+ if state.is_empty() {
+ return None; // there's a state but it's empty -> we're all done
+ }
+ let iter = match state.last_mut() {
+ Some(iter) => iter,
+ None => return None, // unexpected, should we just unwrap?
+ };
+ match iter.next() {
+ Some(Ok(ns)) => {
+ if state.len() < self.max_depth as usize {
+ match ListNamespaces::new(Arc::clone(&self.store), ns.to_owned()) {
+ Ok(iter) => state.push(iter),
+ Err(err) => log::error!("failed to create child ns iter {err}"),
+ }
+ }
+ return Some(Ok(ns));
+ }
+ Some(ns_err) => return Some(ns_err),
+ None => {
+ let _ = state.pop(); // done at this (and belows) level, continue in parent
+ }
+ }
+ } else {
+ // first next call ever: initialize state vector and start iterating at our level
+ let mut state = Vec::with_capacity(pbs_api_types::MAX_NAMESPACE_DEPTH);
+ if self.max_depth as usize > 0 {
+ match ListNamespaces::new(Arc::clone(&self.store), self.ns.to_owned()) {
+ Ok(list_ns) => state.push(list_ns),
+ Err(err) => {
+ // yield the error but set the state to Some to avoid re-try, a future
+ // next() will then see the state, and the empty check yield's None
+ self.state = Some(state);
+ return Some(Err(err));
+ }
+ }
+ }
+ self.state = Some(state);
+ return Some(Ok(self.ns.to_owned())); // return our anchor ns for convenience
+ }
+ }
+ }
+}