From b9310489cf8bfaed2a3956f8da208ec8320671a2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 12 May 2022 16:44:52 +0200 Subject: [PATCH] pull/sync: treat unset max-depth as full recursion MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit to be consistent with tape backup and verification jobs. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/jobs.rs | 4 ++-- src/api2/config/sync.rs | 17 +++++++++++++---- src/api2/pull.rs | 1 - src/server/pull.rs | 23 ++++++++++++----------- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 368e60e3..56d87e2b 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -473,8 +473,8 @@ pub struct SyncJobConfig { pub remote_ns: Option, #[serde(skip_serializing_if = "Option::is_none")] pub remove_vanished: Option, - #[serde(default)] - pub max_depth: usize, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_depth: Option, #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, #[serde(skip_serializing_if = "Option::is_none")] diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 966ce2a4..70a42d53 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -221,6 +221,8 @@ pub enum DeletableProperty { ns, /// Delete the remote_ns property, remote_ns, + /// Delete the max_depth property, + max_depth, } #[api( @@ -312,6 +314,9 @@ pub fn update_sync_job( DeletableProperty::remote_ns => { data.remote_ns = None; } + DeletableProperty::max_depth => { + data.max_depth = None; + } } } } @@ -341,7 +346,9 @@ pub fn update_sync_job( data.store = store; } if let Some(ns) = update.ns { - check_max_depth(&ns, update.max_depth.unwrap_or(data.max_depth))?; + if let Some(explicit_depth) = update.max_depth.or(data.max_depth) { + check_max_depth(&ns, explicit_depth)?; + } data.ns = Some(ns); } if let Some(remote) = update.remote { @@ -351,7 +358,9 @@ pub fn update_sync_job( data.remote_store = remote_store; } if let Some(remote_ns) = update.remote_ns { - check_max_depth(&remote_ns, update.max_depth.unwrap_or(data.max_depth))?; + if let Some(explicit_depth) = update.max_depth.or(data.max_depth) { + check_max_depth(&remote_ns, explicit_depth)?; + } data.remote_ns = Some(remote_ns); } if let Some(owner) = update.owner { @@ -391,7 +400,7 @@ pub fn update_sync_job( if let Some(ref ns) = data.remote_ns { check_max_depth(ns, max_depth)?; } - data.max_depth = max_depth; + data.max_depth = Some(max_depth); } if !check_sync_job_modify_access(&user_info, &auth_id, &data) { @@ -517,7 +526,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator owner: Some(write_auth_id.clone()), comment: None, remove_vanished: None, - max_depth: 0, + max_depth: None, group_filter: None, schedule: None, limit: pbs_api_types::RateLimitConfig::default(), // no limit diff --git a/src/api2/pull.rs b/src/api2/pull.rs index 807629d3..707fad3c 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -233,7 +233,6 @@ async fn pull( let delete = remove_vanished.unwrap_or(false); let ns = ns.unwrap_or_default(); - let max_depth = max_depth.unwrap_or(0); let ns_str = if ns.is_root() { None } else { diff --git a/src/server/pull.rs b/src/server/pull.rs index 8847c91d..4c3f5040 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -54,8 +54,8 @@ pub struct PullParameters { owner: Authid, /// Whether to remove groups which exist locally, but not on the remote end remove_vanished: bool, - /// How many levels of sub-namespaces to pull (0 == no recursion) - max_depth: usize, + /// How many levels of sub-namespaces to pull (0 == no recursion, None == maximum recursion) + max_depth: Option, /// Filters for reducing the pull scope group_filter: Option>, /// Rate limits for all transfers from `remote` @@ -75,13 +75,14 @@ impl PullParameters { remote_ns: BackupNamespace, owner: Authid, remove_vanished: Option, - max_depth: usize, + max_depth: Option, group_filter: Option>, limit: RateLimitConfig, ) -> Result { let store = DataStore::lookup_datastore(store, Some(Operation::Write))?; - let max_depth = min(max_depth, MAX_NAMESPACE_DEPTH - remote_ns.depth()); + let max_depth = + max_depth.map(|max_depth| min(max_depth, MAX_NAMESPACE_DEPTH - remote_ns.depth())); let (remote_config, _digest) = pbs_config::remote::config()?; let remote: Remote = remote_config.lookup("remote", remote)?; @@ -749,11 +750,11 @@ async fn query_namespaces( "api2/json/admin/datastore/{}/namespace", params.source.store() ); - let data = json!({ - "max-depth": params.max_depth, - }); + let data = params + .max_depth + .map(|max_depth| json!({ "max-depth": max_depth })); let mut result = client - .get(&path, Some(data)) + .get(&path, data) .await .map_err(|err| format_err!("Failed to retrieve namespaces from remote - {}", err))?; let mut list: Vec = serde_json::from_value(result["data"].take())?; @@ -846,7 +847,7 @@ fn check_and_remove_vanished_ns( let mut local_ns_list: Vec = params .store - .recursive_iter_backup_ns_ok(params.ns.clone(), Some(params.max_depth))? + .recursive_iter_backup_ns_ok(params.ns.clone(), params.max_depth)? .filter(|ns| { let store_with_ns = params.store_with_ns(ns.clone()); let user_privs = user_info.lookup_privs(¶ms.owner, &store_with_ns.acl_path()); @@ -911,7 +912,7 @@ pub async fn pull_store( // explicit create shared lock to prevent GC on newly created chunks let _shared_store_lock = params.store.try_shared_chunk_store_lock()?; - let namespaces = if params.remote_ns.is_root() && params.max_depth == 0 { + let namespaces = if params.remote_ns.is_root() && params.max_depth == Some(0) { vec![params.remote_ns.clone()] // backwards compat - don't query remote namespaces! } else { query_namespaces(client, ¶ms).await? @@ -959,7 +960,7 @@ pub async fn pull_store( Ok((ns_progress, ns_errors)) => { errors |= ns_errors; - if params.max_depth > 0 { + if params.max_depth != Some(0) { groups += ns_progress.done_groups; snapshots += ns_progress.done_snapshots; task_log!( -- 2.39.2