]> git.proxmox.com Git - proxmox-backup.git/commitdiff
verify: introduce & use new Datastore.Verify privilege
authorFabian Grünbichler <f.gruenbichler@proxmox.com>
Fri, 30 Oct 2020 11:36:39 +0000 (12:36 +0100)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Fri, 30 Oct 2020 15:36:52 +0000 (16:36 +0100)
for verifying a whole datastore. Datastore.Backup now allows verifying
only backups owned by the triggering user.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
src/api2/admin/datastore.rs
src/backup/verify.rs
src/config/acl.rs
src/server/verify_job.rs

index b9412ba67e5884dbbb49cd36c5e76215a3315cc9..220f06aede999368af2bdf0b574722ae533a43f1 100644 (file)
@@ -42,6 +42,7 @@ use crate::config::acl::{
     PRIV_DATASTORE_READ,
     PRIV_DATASTORE_PRUNE,
     PRIV_DATASTORE_BACKUP,
+    PRIV_DATASTORE_VERIFY,
 };
 
 fn check_priv_or_backup_owner(
@@ -537,7 +538,7 @@ pub fn status(
         schema: UPID_SCHEMA,
     },
     access: {
-        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
+        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
     },
 )]
 /// Verify backups.
@@ -553,6 +554,7 @@ pub fn verify(
 ) -> Result<Value, Error> {
     let datastore = DataStore::lookup_datastore(&store)?;
 
+    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let worker_id;
 
     let mut backup_dir = None;
@@ -563,12 +565,18 @@ pub fn verify(
         (Some(backup_type), Some(backup_id), Some(backup_time)) => {
             worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
             let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
+
+            check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
+
             backup_dir = Some(dir);
             worker_type = "verify_snapshot";
         }
         (Some(backup_type), Some(backup_id), None) => {
             worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
             let group = BackupGroup::new(backup_type, backup_id);
+
+            check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
+
             backup_group = Some(group);
             worker_type = "verify_group";
         }
@@ -578,13 +586,12 @@ pub fn verify(
         _ => bail!("parameters do not specify a backup group or snapshot"),
     }
 
-    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
 
     let upid_str = WorkerTask::new_thread(
         worker_type,
         Some(worker_id.clone()),
-        auth_id,
+        auth_id.clone(),
         to_stdout,
         move |worker| {
             let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
@@ -617,7 +624,16 @@ pub fn verify(
                 )?;
                 failed_dirs
             } else {
-                verify_all_backups(datastore, worker.clone(), worker.upid(), None)?
+                let privs = CachedUserInfo::new()?
+                    .lookup_privs(&auth_id, &["datastore", &store]);
+
+                let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
+                    Some(auth_id)
+                } else {
+                    None
+                };
+
+                verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
             };
             if failed_dirs.len() > 0 {
                 worker.log("Failed to verify following snapshots:");
index bb39f7d8323f12063eac7e5712c9a819d2b1109a..e0e28ee9bbd057b43d4ee054969131961c9851ad 100644 (file)
@@ -482,7 +482,7 @@ pub fn verify_backup_group(
     Ok((count, errors))
 }
 
-/// Verify all backups inside a datastore
+/// Verify all (owned) backups inside a datastore
 ///
 /// Errors are logged to the worker log.
 ///
@@ -493,14 +493,41 @@ pub fn verify_all_backups(
     datastore: Arc<DataStore>,
     worker: Arc<dyn TaskState + Send + Sync>,
     upid: &UPID,
+    owner: Option<Authid>,
     filter: Option<&dyn Fn(&BackupManifest) -> bool>,
 ) -> Result<Vec<String>, Error> {
     let mut errors = Vec::new();
 
+    if let Some(owner) = &owner {
+        task_log!(
+            worker,
+            "verify datastore {} - limiting to backups owned by {}",
+            datastore.name(),
+            owner
+        );
+    }
+
+    let filter_by_owner = |group: &BackupGroup| {
+        if let Some(owner) = &owner {
+            match datastore.get_owner(group) {
+                Ok(ref group_owner) => {
+                    group_owner == owner
+                        || (group_owner.is_token()
+                            && !owner.is_token()
+                            && group_owner.user() == owner.user())
+                },
+                Err(_) => false,
+            }
+        } else {
+            true
+        }
+    };
+
     let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
         Ok(list) => list
             .into_iter()
             .filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
+            .filter(filter_by_owner)
             .collect::<Vec<BackupGroup>>(),
         Err(err) => {
             task_log!(
index f82d5903c5878e326fe220423b73801fca62f5b4..7345adea5939dcdbe98b03dcb5577f3d926559e8 100644 (file)
@@ -30,6 +30,7 @@ constnamedbitmap! {
         PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
         PRIV_DATASTORE_MODIFY("Datastore.Modify");
         PRIV_DATASTORE_READ("Datastore.Read");
+        PRIV_DATASTORE_VERIFY("Datastore.Verify");
 
         /// Datastore.Backup also requires backup ownership
         PRIV_DATASTORE_BACKUP("Datastore.Backup");
@@ -64,12 +65,14 @@ pub const ROLE_DATASTORE_ADMIN: u64 =
 PRIV_DATASTORE_AUDIT |
 PRIV_DATASTORE_MODIFY |
 PRIV_DATASTORE_READ |
+PRIV_DATASTORE_VERIFY |
 PRIV_DATASTORE_BACKUP |
 PRIV_DATASTORE_PRUNE;
 
-/// Datastore.Reader can read datastore content an do restore
+/// Datastore.Reader can read/verify datastore content and do restore
 pub const ROLE_DATASTORE_READER: u64 =
 PRIV_DATASTORE_AUDIT |
+PRIV_DATASTORE_VERIFY |
 PRIV_DATASTORE_READ;
 
 /// Datastore.Backup can do backup and restore, but no prune.
index a8f532ac9bcc52525d413634b6c5ddc5c0ce207a..c98cd5b22b14d2735e9a1b46a9e3de5219afba73 100644 (file)
@@ -65,7 +65,7 @@ pub fn do_verification_job(
                 task_log!(worker,"task triggered by schedule '{}'", event_str);
             }
 
-            let result = verify_all_backups(datastore, worker.clone(), worker.upid(), Some(&filter));
+            let result = verify_all_backups(datastore, worker.clone(), worker.upid(), None, Some(&filter));
             let job_result = match result {
                 Ok(ref errors) if errors.is_empty() => Ok(()),
                 Ok(_) => Err(format_err!("verification failed - please check the log for details")),