}
Ok(())
}
+
+ pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
+ let mut path: Vec<&str> = vec!["datastore", store];
+
+ if self.is_root() {
+ path
+ } else {
+ path.extend(self.inner.iter().map(|comp| comp.as_str()));
+ path
+ }
+ }
}
impl fmt::Display for BackupNamespace {
}
}
-/// Helper struct for places where sensible formatting of store+NS combo is required
-pub struct DatastoreWithNamespace {
- pub store: String,
- pub ns: BackupNamespace,
-}
-
-impl fmt::Display for DatastoreWithNamespace {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.ns.is_root() {
- write!(f, "datastore {}, root namespace", self.store)
- } else {
- write!(f, "datastore '{}', namespace '{}'", self.store, self.ns)
- }
- }
-}
-
-impl DatastoreWithNamespace {
- pub fn acl_path(&self) -> Vec<&str> {
- let mut path: Vec<&str> = vec!["datastore", &self.store];
-
- if self.ns.is_root() {
- path
- } else {
- path.extend(self.ns.inner.iter().map(|comp| comp.as_str()));
- path
- }
- }
-}
-
/// Used when both a backup group or a directory can be valid.
pub enum BackupPart {
Group(BackupGroup),
format!("{}/{}", ns.display_as_path(), dir)
}
}
+
+/// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
+pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String {
+ if ns.is_root() {
+ format!("datastore '{}', root namespace", store)
+ } else {
+ format!("datastore '{}', namespace '{}'", store, ns)
+ }
+}
use proxmox_schema::*;
use crate::{
- Authid, BackupNamespace, BackupType, DatastoreWithNamespace, RateLimitConfig, Userid,
- BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA,
- MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
+ Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
+ BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
+ NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
}
impl VerificationJobConfig {
- pub fn store_with_ns(&self) -> DatastoreWithNamespace {
- DatastoreWithNamespace {
- store: self.store.clone(),
- ns: self.ns.clone().unwrap_or_default(),
+ pub fn acl_path(&self) -> Vec<&str> {
+ match self.ns.as_ref() {
+ Some(ns) => ns.acl_path(&self.store),
+ None => vec!["datastore", &self.store],
}
}
}
}
impl SyncJobConfig {
- pub fn store_with_ns(&self) -> DatastoreWithNamespace {
- DatastoreWithNamespace {
- store: self.store.clone(),
- ns: self.ns.clone().unwrap_or_default(),
+ pub fn acl_path(&self) -> Vec<&str> {
+ match self.ns.as_ref() {
+ Some(ns) => ns.acl_path(&self.store),
+ None => vec!["datastore", &self.store],
}
}
}
use proxmox_sys::fs::lock_dir_noblock_shared;
-use pbs_api_types::{BackupNamespace, DatastoreWithNamespace, Operation};
+use pbs_api_types::{print_store_and_ns, BackupNamespace, Operation};
use crate::backup_info::BackupDir;
use crate::dynamic_index::DynamicIndexReader;
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
let datastore = snapshot.datastore();
- let store_with_ns = DatastoreWithNamespace {
- store: datastore.name().to_owned(),
- ns: snapshot.backup_ns().clone(),
- };
let snapshot_path = snapshot.full_path();
let locked_dir =
Err(err) => {
bail!(
"manifest load error on {}, snapshot '{}' - {}",
- store_with_ns,
+ print_store_and_ns(datastore.name(), snapshot.backup_ns()),
snapshot.dir(),
err
);
use pxar::EntryKind;
use pbs_api_types::{
- print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
- DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
- GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
- SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
- BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
- MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
- UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
+ print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
+ Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
+ Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
+ BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
+ NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA,
+ VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo;
// 2. load datastore
// 3. if needed (only limited access), check owner of group
fn check_privs_and_load_store(
- store_with_ns: &DatastoreWithNamespace,
+ store: &str,
+ ns: &BackupNamespace,
auth_id: &Authid,
full_access_privs: u64,
partial_access_privs: u64,
operation: Option<Operation>,
backup_group: &pbs_api_types::BackupGroup,
) -> Result<Arc<DataStore>, Error> {
- let limited = check_ns_privs_full(
- store_with_ns,
- auth_id,
- full_access_privs,
- partial_access_privs,
- )?;
+ let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
- let datastore = DataStore::lookup_datastore(&store_with_ns.store, operation)?;
+ let datastore = DataStore::lookup_datastore(store, operation)?;
if limited {
- let owner = datastore.get_owner(&store_with_ns.ns, backup_group)?;
+ let owner = datastore.get_owner(ns, backup_group)?;
check_backup_owner(&owner, &auth_id)?;
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-
- let store_with_ns = DatastoreWithNamespace {
- store: store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let list_all = !check_ns_privs_full(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
)?;
- let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
datastore
- .iter_backup_groups(store_with_ns.ns.clone())? // FIXME: Namespaces and recursion parameters!
+ .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
.try_fold(Vec::new(), |mut group_info, group| {
let group = group?;
- let owner = match datastore.get_owner(&store_with_ns.ns, group.as_ref()) {
+ let owner = match datastore.get_owner(&ns, group.as_ref()) {
Ok(auth_id) => auth_id,
Err(err) => {
eprintln!(
"Failed to get owner of group '{}' in {} - {}",
group.group(),
- store_with_ns,
+ print_store_and_ns(&store, &ns),
err
);
return Ok(group_info);
})
.to_owned();
- let note_path = get_group_note_path(&datastore, &store_with_ns.ns, group.as_ref());
+ let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
let comment = file_read_firstline(¬e_path).ok();
group_info.push(GroupListItem {
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
&group,
)?;
- if !datastore.remove_backup_group(&store_with_ns.ns, &group)? {
+ if !datastore.remove_backup_group(&ns, &group)? {
bail!("group only partially deleted due to protected snapshots");
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-
- let store_with_ns = DatastoreWithNamespace {
- store: store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
&backup_dir.group,
)?;
- let snapshot = datastore.backup_dir(store_with_ns.ns, backup_dir)?;
+ let snapshot = datastore.backup_dir(ns, backup_dir)?;
let info = BackupInfo::new(snapshot)?;
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-
- let store_with_ns = DatastoreWithNamespace {
- store: store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
&backup_dir.group,
)?;
- let snapshot = datastore.backup_dir(store_with_ns.ns, backup_dir)?;
+ let snapshot = datastore.backup_dir(ns, backup_dir)?;
snapshot.destroy(false)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.clone(),
- };
let list_all = !check_ns_privs_full(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
)?;
- let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
// backup group and provide an error free (Err -> None) accessor
let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => {
- vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
+ vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
}
// FIXME: Recursion
(Some(backup_type), None) => datastore
- .iter_backup_groups_ok(ns)?
+ .iter_backup_groups_ok(ns.clone())?
.filter(|group| group.backup_type() == backup_type)
.collect(),
// FIXME: Recursion
(None, Some(backup_id)) => datastore
- .iter_backup_groups_ok(ns)?
+ .iter_backup_groups_ok(ns.clone())?
.filter(|group| group.backup_id() == backup_id)
.collect(),
// FIXME: Recursion
- (None, None) => datastore.list_backup_groups(ns)?,
+ (None, None) => datastore.list_backup_groups(ns.clone())?,
};
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
eprintln!(
"Failed to get owner of group '{}' in {} - {}",
group.group(),
- &store_with_ns,
+ print_store_and_ns(&store, &ns),
err
);
return Ok(snapshots);
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.clone(),
- };
let owner_check_required = check_ns_privs_full(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_VERIFY,
PRIV_DATASTORE_BACKUP,
)?;
- let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let ignore_verified = ignore_verified.unwrap_or(true);
let worker_id;
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!(
"{}:{}/{}/{}/{:08X}",
- store_with_ns.store,
+ store,
ns.display_as_path(),
backup_type,
backup_id,
(Some(backup_type), Some(backup_id), None) => {
worker_id = format!(
"{}:{}/{}/{}",
- store_with_ns.store,
+ store,
ns.display_as_path(),
backup_type,
backup_id
}
(None, None, None) => {
worker_id = if ns.is_root() {
- format!("{}", store_with_ns.store)
+ store
} else {
- format!("{}:{}", store_with_ns.store, ns.display_as_path())
+ format!("{}:{}", store, ns.display_as_path())
};
}
_ => bail!("parameters do not specify a backup group or snapshot"),
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
&group,
)?;
- let worker_id = format!("{}:{}:{}", store_with_ns.store, store_with_ns.ns, group);
- let group = datastore.backup_group(store_with_ns.ns.clone(), group);
+ let worker_id = format!("{}:{}:{}", store, ns, group);
+ let group = datastore.backup_group(ns.clone(), group);
let mut prune_result = Vec::new();
task_log!(
worker,
"Starting prune on {} group \"{}\"",
- store_with_ns,
+ print_store_and_ns(&store, &ns),
group.group(),
);
}
};
let wanted =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
- let name = store.name();
iter.any(|ns| -> bool {
- let store_with_ns = DatastoreWithNamespace {
- store: name.to_string(),
- ns: ns,
- };
- let user_privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
+ let user_privs = user_info.lookup_privs(&auth_id, &ns.acl_path(store.name()));
user_privs & wanted != 0
})
}
let store = required_string_param(¶m, "store")?;
let backup_ns = optional_ns_param(¶m)?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.to_owned(),
- ns: backup_ns.clone(),
- };
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &backup_ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
println!(
"Download {} from {} ({}/{})",
- file_name, store_with_ns, backup_dir, file_name
+ file_name,
+ print_store_and_ns(&store, &backup_ns),
+ backup_dir,
+ file_name
);
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(¶m, "store")?;
let backup_ns = optional_ns_param(¶m)?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.to_owned(),
- ns: backup_ns.clone(),
- };
+
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &backup_ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
)?;
let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
+ let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files {
println!(
"Download {} from {} ({}/{})",
- file_name, store_with_ns, backup_dir_api, file_name
+ file_name,
+ print_store_and_ns(&store, &backup_ns),
+ backup_dir_api,
+ file_name
);
let mut path = datastore.base_path();
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(¶m, "store")?;
let backup_ns = optional_ns_param(¶m)?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.to_owned(),
- ns: backup_ns.clone(),
- };
+
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &backup_ns,
&auth_id,
0,
PRIV_DATASTORE_BACKUP,
Some(Operation::Write),
&backup_dir_api.group,
)?;
- let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
+ let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let file_name = CLIENT_LOG_BLOB_NAME;
bail!("backup already contains a log.");
}
- println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
+ println!(
+ "Upload backup log to {} {backup_dir_api}/{file_name}",
+ print_store_and_ns(&store, &backup_ns),
+ );
let data = req_body
.map_err(Error::from)
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
&backup_dir.group,
)?;
- let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
+ let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
let file_name = CATALOG_NAME;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(¶m, "store")?;
let ns = optional_ns_param(¶m)?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.to_owned(),
- ns: ns.clone(),
- };
+
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
&backup_group,
)?;
- let note_path = get_group_note_path(&datastore, &store_with_ns.ns, &backup_group);
+ let note_path = get_group_note_path(&datastore, &ns, &backup_group);
Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
+
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
&backup_group,
)?;
- let note_path = get_group_note_path(&datastore, &store_with_ns.ns, &backup_group);
+ let note_path = get_group_note_path(&datastore, &ns, &backup_group);
replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
Ok(())
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.clone(),
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
&backup_dir.group,
)?;
- let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
+ let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
let (manifest, _) = backup_dir.load_manifest()?;
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
&backup_dir.group,
)?;
- let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
+ let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
backup_dir
.update_manifest(|manifest| {
rpcenv: &mut dyn RpcEnvironment,
) -> Result<bool, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
&backup_dir.group,
)?;
- let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
+ let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
Ok(backup_dir.is_protected())
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
&backup_dir.group,
)?;
- let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
+ let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
datastore.update_protection(&backup_dir, protected)
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store,
- ns: ns.unwrap_or_default(),
- };
+ let ns = ns.unwrap_or_default();
let owner_check_required = check_ns_privs_full(
- &store_with_ns,
+ &store,
+ &ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
)?;
- let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Write))?;
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let backup_group = datastore.backup_group(store_with_ns.ns, backup_group);
+ let backup_group = datastore.backup_group(ns, backup_group);
if owner_check_required {
let owner = backup_group.get_owner()?;
use proxmox_schema::*;
use pbs_api_types::{
- Authid, BackupNamespace, DatastoreWithNamespace, NamespaceListItem, Operation,
- DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_MODIFY, PROXMOX_SAFE_ID_FORMAT,
+ Authid, BackupNamespace, NamespaceListItem, Operation, DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA,
+ PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PROXMOX_SAFE_ID_FORMAT,
};
use pbs_datastore::DataStore;
let mut ns = parent.clone();
ns.push(name.clone())?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.clone(),
- ns,
- };
-
- check_ns_modification_privs(&store_with_ns, &auth_id)?;
+ check_ns_modification_privs(&store, &ns, &auth_id)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
const PRIVS_OK: u64 = PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT;
// first do a base check to avoid leaking if a NS exists or not
- let store_with_parent = DatastoreWithNamespace {
- store: store.clone(),
- ns: parent.clone(),
- };
- check_ns_privs(&store_with_parent, &auth_id, PRIVS_OK)?;
+ check_ns_privs(&store, &parent, &auth_id, PRIVS_OK)?;
let user_info = CachedUserInfo::new()?;
if ns.is_root() {
return true; // already covered by access permission above
}
- let privs = user_info.lookup_privs(&auth_id, &["datastore", &store, &ns.to_string()]);
+ let privs = user_info.lookup_privs(&auth_id, &ns.acl_path(&store));
privs & PRIVS_OK != 0
})
.map(ns_to_item)
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.clone(),
- ns: ns.clone(),
- };
- check_ns_modification_privs(&store_with_ns, &auth_id)?;
+
+ check_ns_modification_privs(&store, &ns, &auth_id)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
.convert_to_typed_array("verification")?
.into_iter()
.filter(|job: &VerificationJobConfig| {
- let privs = user_info.lookup_privs(&auth_id, &job.store_with_ns().acl_path());
+ let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
if privs & required_privs == 0 {
return false;
}
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
- let store_with_ns = verification_job.store_with_ns();
-
user_info.check_privs(
&auth_id,
- &store_with_ns.acl_path(),
+ &verification_job.acl_path(),
PRIV_DATASTORE_VERIFY,
true,
)?;
use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, BackupNamespace, BackupType, DatastoreWithNamespace, Operation, SnapshotVerifyState,
- VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
- BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
- PRIV_DATASTORE_BACKUP,
+ Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState,
+ BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile;
let store = required_string_param(¶m, "store")?.to_owned();
let backup_ns = optional_ns_param(¶m)?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.clone(),
- ns: backup_ns.clone(),
- };
let backup_dir_arg = pbs_api_types::BackupDir::deserialize(¶m)?;
let user_info = CachedUserInfo::new()?;
user_info
.check_privs(
&auth_id,
- &store_with_ns.acl_path(),
+ &backup_ns.acl_path(&store),
PRIV_DATASTORE_BACKUP,
false,
)
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
- let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.store_with_ns().acl_path());
+ let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
return false;
}
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
- let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.store_with_ns().acl_path());
+ let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 {
return false;
}
let list = list
.into_iter()
.filter(|job: &VerificationJobConfig| {
- let privs = user_info.lookup_privs(&auth_id, &job.store_with_ns().acl_path());
+ let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
privs & required_privs != 00
})
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(
- &auth_id,
- &config.store_with_ns().acl_path(),
- PRIV_DATASTORE_VERIFY,
- false,
- )?;
+ user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_VERIFY, false)?;
let _lock = verify::lock_config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
- user_info.check_privs(
- &auth_id,
- &verification_job.store_with_ns().acl_path(),
- required_privs,
- true,
- )?;
+ user_info.check_privs(&auth_id, &verification_job.acl_path(), required_privs, true)?;
rpcenv["digest"] = hex::encode(&digest).into();
let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
// check existing store and NS
- user_info.check_privs(
- &auth_id,
- &data.store_with_ns().acl_path(),
- PRIV_DATASTORE_VERIFY,
- true,
- )?;
+ user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
if let Some(delete) = delete {
for delete_prop in delete {
}
// check new store and NS
- user_info.check_privs(
- &auth_id,
- &data.store_with_ns().acl_path(),
- PRIV_DATASTORE_VERIFY,
- true,
- )?;
+ user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
config.set_data(&id, "verification", &data)?;
let (mut config, expected_digest) = verify::config()?;
let job: VerificationJobConfig = config.lookup("verification", &id)?;
- user_info.check_privs(
- &auth_id,
- &job.store_with_ns().acl_path(),
- PRIV_DATASTORE_VERIFY,
- true,
- )?;
+ user_info.check_privs(&auth_id, &job.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, DatastoreWithNamespace, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
- BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
- DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
+ Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
+ BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
+ PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile;
let store = required_string_param(¶m, "store")?.to_owned();
let backup_ns = optional_ns_param(¶m)?;
- let store_with_ns = DatastoreWithNamespace {
- store: store.clone(),
- ns: backup_ns.clone(),
- };
-
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
+ let acl_path = backup_ns.acl_path(&store);
+ let privs = user_info.lookup_privs(&auth_id, &acl_path);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
// priv_backup needs owner check further down below!
if !priv_read && !priv_backup {
- bail!("no permissions on /{}", store_with_ns.acl_path().join("/"));
+ bail!("no permissions on /{}", acl_path.join("/"));
}
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
- print_ns_and_snapshot, Authid, DatastoreWithNamespace, GroupFilter, MediaPoolConfig, Operation,
+ print_ns_and_snapshot, print_store_and_ns, Authid, GroupFilter, MediaPoolConfig, Operation,
TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA,
PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
};
let mut need_catalog = false; // avoid writing catalog for empty jobs
for (group_number, group) in group_list.into_iter().enumerate() {
- let store_with_ns = DatastoreWithNamespace {
- store: datastore_name.to_owned(),
- ns: group.backup_ns().clone(),
- };
-
progress.done_groups = group_number as u64;
progress.done_snapshots = 0;
progress.group_snapshots = 0;
task_log!(
worker,
"{}, group {} was empty",
- store_with_ns,
+ print_store_and_ns(datastore_name, group.backup_ns()),
group.group()
);
continue;
use pbs_api_types::{
parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode,
- DatastoreWithNamespace, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA,
- DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA,
- TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
+ Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
+ DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
+ PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader;
fn check_datastore_privs(
user_info: &CachedUserInfo,
- store_with_ns: &DatastoreWithNamespace,
+ store: &str,
+ ns: &BackupNamespace,
auth_id: &Authid,
owner: Option<&Authid>,
) -> Result<(), Error> {
- let privs = user_info.lookup_privs(auth_id, &store_with_ns.acl_path());
+ let acl_path = ns.acl_path(store);
+ let privs = user_info.lookup_privs(auth_id, &acl_path);
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
- bail!("no permissions on /{}", store_with_ns.acl_path().join("/"));
+ bail!("no permissions on /{}", acl_path.join("/"));
}
if let Some(ref owner) = owner {
owner: Option<&Authid>,
) -> Result<(), Error> {
// check normal restore privs first
- let mut store_with_ns = DatastoreWithNamespace {
- store: store.name().to_string(),
- ns: ns.clone(),
- };
- check_datastore_privs(user_info, &store_with_ns, auth_id, owner)?;
+ check_datastore_privs(user_info, store.name(), ns, auth_id, owner)?;
// try create recursively if it does not exist
if !store.namespace_exists(ns) {
- store_with_ns.ns = Default::default();
+ let mut tmp_ns = BackupNamespace::root();
for comp in ns.components() {
- store_with_ns.ns.push(comp.to_string())?;
- if !store.namespace_exists(&store_with_ns.ns) {
- check_ns_modification_privs(&store_with_ns, auth_id).map_err(|_err| {
- format_err!("no permission to create namespace '{}'", store_with_ns.ns)
+ tmp_ns.push(comp.to_string())?;
+ if !store.namespace_exists(&tmp_ns) {
+ check_ns_modification_privs(store.name(), &tmp_ns, auth_id).map_err(|_err| {
+ format_err!("no permission to create namespace '{}'", tmp_ns)
})?;
- store.create_namespace(&store_with_ns.ns.parent(), comp.to_string())?;
+ store.create_namespace(&tmp_ns.parent(), comp.to_string())?;
}
}
}
for (target, namespaces) in used_datastores.values() {
check_datastore_privs(
&user_info,
- &DatastoreWithNamespace {
- store: target.name().to_string(),
- ns: Default::default(),
- },
+ target.name(),
+ &BackupNamespace::root(),
&auth_id,
owner.as_ref(),
)?;
let mut can_restore_some = false;
for ns in namespaces {
// only simple check, ns creation comes later
- let store_with_ns = DatastoreWithNamespace {
- store: datastore.name().to_string(),
- ns: ns.clone(),
- };
- if let Err(err) =
- check_datastore_privs(user_info, &store_with_ns, auth_id, Some(restore_owner))
- {
+ if let Err(err) = check_datastore_privs(
+ user_info,
+ datastore.name(),
+ &ns,
+ auth_id,
+ Some(restore_owner),
+ ) {
task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'");
continue;
}
use anyhow::{bail, Error};
use pbs_api_types::{
- privs_to_priv_names, Authid, BackupNamespace, DatastoreWithNamespace, PRIV_DATASTORE_AUDIT,
- PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ,
+ privs_to_priv_names, Authid, BackupNamespace, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
+ PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::{backup_info::BackupGroup, DataStore, ListGroups, ListNamespacesRecursive};
/// Asserts that `privs` are fulfilled on datastore + (optional) namespace.
pub fn check_ns_privs(
- store_with_ns: &DatastoreWithNamespace,
+ store: &str,
+ ns: &BackupNamespace,
auth_id: &Authid,
privs: u64,
) -> Result<(), Error> {
- check_ns_privs_full(store_with_ns, auth_id, privs, 0).map(|_| ())
+ check_ns_privs_full(store, ns, auth_id, privs, 0).map(|_| ())
}
/// Asserts that `privs` for creating/destroying namespace in datastore are fulfilled.
pub fn check_ns_modification_privs(
- store_with_ns: &DatastoreWithNamespace,
+ store: &str,
+ ns: &BackupNamespace,
auth_id: &Authid,
) -> Result<(), Error> {
// we could allow it as easy purge-whole datastore, but lets be more restrictive for now
- if store_with_ns.ns.is_root() {
+ if ns.is_root() {
// TODO
bail!("Cannot create/delete root namespace!");
}
- let parent = DatastoreWithNamespace {
- store: store_with_ns.store.clone(),
- ns: store_with_ns.ns.parent(),
- };
+ let parent = ns.parent();
- check_ns_privs(&parent, auth_id, PRIV_DATASTORE_MODIFY)
+ check_ns_privs(store, &parent, auth_id, PRIV_DATASTORE_MODIFY)
}
/// Asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled on
/// Return value indicates whether further checks like group ownerships are required because
/// `full_access_privs` are missing.
pub fn check_ns_privs_full(
- store_with_ns: &DatastoreWithNamespace,
+ store: &str,
+ ns: &BackupNamespace,
auth_id: &Authid,
full_access_privs: u64,
partial_access_privs: u64,
) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(auth_id, &store_with_ns.acl_path());
+ let acl_path = ns.acl_path(store);
+ let privs = user_info.lookup_privs(auth_id, &acl_path);
if full_access_privs != 0 && (privs & full_access_privs) != 0 {
return Ok(false);
}
let priv_names = privs_to_priv_names(full_access_privs | partial_access_privs).join("|");
- let path = format!("/{}", store_with_ns.acl_path().join("/"));
+ let path = format!("/{}", acl_path.join("/"));
proxmox_router::http_bail!(
FORBIDDEN,
let mut override_owner = false;
if let Some(auth_id) = &self.auth_id {
let info = &self.user_info;
- let store_with_ns = DatastoreWithNamespace {
- store: self.store.name().to_string(),
- ns: ns.clone(),
- };
- let privs = info.lookup_privs(&auth_id, &store_with_ns.acl_path());
+
+ let privs =
+ info.lookup_privs(&auth_id, &ns.acl_path(self.store.name()));
if privs & NS_PRIVS_OK == 0 {
continue;
use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{
- print_ns_and_snapshot, Authid, BackupNamespace, BackupType, CryptMode, DatastoreWithNamespace,
+ print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode,
SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, UPID,
};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
let mut list = match group.list_backups() {
Ok(list) => list,
Err(err) => {
- let store_with_ns = DatastoreWithNamespace {
- store: verify_worker.datastore.name().to_owned(),
- ns: group.backup_ns().clone(),
- };
task_log!(
verify_worker.worker,
"verify {}, group {} - unable to list backups: {}",
- store_with_ns,
+ print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
group.group(),
err,
);
use proxmox_sys::task_log;
use pbs_api_types::{
- Authid, BackupNamespace, DatastoreWithNamespace, GroupFilter, GroupListItem, NamespaceListItem,
+ print_store_and_ns, Authid, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem,
Operation, RateLimitConfig, Remote, SnapshotListItem, MAX_NAMESPACE_DEPTH,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
};
pub async fn client(&self) -> Result<HttpClient, Error> {
crate::api2::config::remote::remote_client(&self.remote, Some(self.limit.clone())).await
}
-
- /// Returns DatastoreWithNamespace with namespace (or local namespace anchor).
- pub fn store_with_ns(&self, ns: BackupNamespace) -> DatastoreWithNamespace {
- DatastoreWithNamespace {
- store: self.store.name().to_string(),
- ns,
- }
- }
}
async fn pull_index_chunks<I: IndexFile>(
Ok(list.iter().map(|item| item.ns.clone()).collect())
}
-fn check_and_create_ns(
- params: &PullParameters,
- store_with_ns: &DatastoreWithNamespace,
-) -> Result<bool, Error> {
- let ns = &store_with_ns.ns;
+fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result<bool, Error> {
let mut created = false;
+ let store_ns_str = print_store_and_ns(params.store.name(), ns);
if !ns.is_root() && !params.store.namespace_path(&ns).exists() {
- check_ns_modification_privs(&store_with_ns, ¶ms.owner)
+ check_ns_modification_privs(params.store.name(), ns, ¶ms.owner)
.map_err(|err| format_err!("Creating {ns} not allowed - {err}"))?;
let name = match ns.components().last() {
};
if let Err(err) = params.store.create_namespace(&ns.parent(), name) {
- bail!(
- "sync into {} failed - namespace creation failed: {}",
- &store_with_ns,
- err
- );
+ bail!("sync into {store_ns_str} failed - namespace creation failed: {err}");
}
created = true;
}
- check_ns_privs(&store_with_ns, ¶ms.owner, PRIV_DATASTORE_BACKUP)
- .map_err(|err| format_err!("sync into {store_with_ns} not allowed - {err}"))?;
+ check_ns_privs(
+ params.store.name(),
+ ns,
+ ¶ms.owner,
+ PRIV_DATASTORE_BACKUP,
+ )
+ .map_err(|err| format_err!("sync into {store_ns_str} not allowed - {err}"))?;
Ok(created)
}
fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> Result<bool, Error> {
- let store_with_ns = params.store_with_ns(local_ns.clone());
- check_ns_modification_privs(&store_with_ns, ¶ms.owner)
+ check_ns_modification_privs(¶ms.store.name(), local_ns, ¶ms.owner)
.map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?;
params.store.remove_namespace_recursive(local_ns, true)
.store
.recursive_iter_backup_ns_ok(params.ns.clone(), Some(max_depth))?
.filter(|ns| {
- let store_with_ns = params.store_with_ns(ns.clone());
- let user_privs = user_info.lookup_privs(¶ms.owner, &store_with_ns.acl_path());
+ let user_privs =
+ user_info.lookup_privs(¶ms.owner, &ns.acl_path(params.store.name()));
user_privs & (PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT) != 0
})
.collect();
let mut synced_ns = HashSet::with_capacity(namespaces.len());
for namespace in namespaces {
- let source_store_ns = DatastoreWithNamespace {
- store: params.source.store().to_owned(),
- ns: namespace.clone(),
- };
+ let source_store_ns_str = print_store_and_ns(params.source.store(), &namespace);
+
let target_ns = namespace.map_prefix(¶ms.remote_ns, ¶ms.ns)?;
- let target_store_ns = params.store_with_ns(target_ns.clone());
+ let target_store_ns_str = print_store_and_ns(params.store.name(), &target_ns);
task_log!(worker, "----");
task_log!(
worker,
"Syncing {} into {}",
- source_store_ns,
- target_store_ns
+ source_store_ns_str,
+ target_store_ns_str
);
synced_ns.insert(target_ns.clone());
- match check_and_create_ns(¶ms, &target_store_ns) {
+ match check_and_create_ns(¶ms, &target_ns) {
Ok(true) => task_log!(worker, "Created namespace {}", target_ns),
Ok(false) => {}
Err(err) => {
task_log!(
worker,
"Cannot sync {} into {} - {}",
- source_store_ns,
- target_store_ns,
+ source_store_ns_str,
+ target_store_ns_str,
err,
);
errors = true;