]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/api2/admin/datastore.rs
datastore status: do not count empty groups
[proxmox-backup.git] / src / api2 / admin / datastore.rs
index 97b5b7fa368c23f7ba8889241f63ac69553c0c01..4073c96b5f8dc7595531046c997407b09343a141 100644 (file)
@@ -12,16 +12,18 @@ use hyper::{header, Body, Response, StatusCode};
 use serde_json::{json, Value};
 use tokio_stream::wrappers::ReceiverStream;
 
-use proxmox::api::{
-    api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
-    RpcEnvironment, RpcEnvironmentType, Permission
-};
-use proxmox::api::router::SubdirMap;
-use proxmox::api::schema::*;
-use proxmox::tools::fs::{
+use proxmox_sys::sortable;
+use proxmox_sys::fs::{
     file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
 };
-use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
+use proxmox_router::{
+    list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
+    RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
+};
+use proxmox_schema::*;
+use proxmox_sys::{task_log, task_warn};
+use proxmox_async::blocking::WrappedReaderStream;
+use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
 
 use pxar::accessor::aio::Accessor;
 use pxar::EntryKind;
@@ -29,7 +31,7 @@ use pxar::EntryKind;
 use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
     DataStoreListItem, GarbageCollectionStatus, GroupListItem,
     SnapshotListItem, SnapshotVerifyState, PruneOptions,
-    DataStoreStatus, RRDMode, RRDTimeFrameResolution,
+    DataStoreStatus, RRDMode, RRDTimeFrame,
     BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
     BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
     IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
@@ -53,10 +55,7 @@ use pbs_datastore::fixed_index::{FixedIndexReader};
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
 use pbs_datastore::prune::compute_prune_info;
-use pbs_tools::blocking::WrappedReaderStream;
-use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
 use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_tools::{task_log, task_warn};
 use pbs_config::CachedUserInfo;
 use proxmox_rest_server::{WorkerTask, formatter};
 
@@ -84,7 +83,7 @@ fn check_priv_or_backup_owner(
     required_privs: u64,
 ) -> Result<(), Error> {
     let user_info = CachedUserInfo::new()?;
-    let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
+    let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
 
     if privs & required_privs == 0 {
         let owner = store.get_owner(group)?;
@@ -126,7 +125,7 @@ fn get_all_snapshot_files(
     info: &BackupInfo,
 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
 
-    let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
+    let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
 
     let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
         acc.insert(item.filename.clone());
@@ -273,7 +272,9 @@ pub fn delete_group(
 
     check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
 
-    datastore.remove_backup_group(&group)?;
+    if !datastore.remove_backup_group(&group)? {
+        bail!("did not delete whole group because of protected snapthots");
+    }
 
     Ok(Value::Null)
 }
@@ -442,6 +443,7 @@ pub fn list_snapshots (
         let backup_type = group.backup_type().to_string();
         let backup_id = group.backup_id().to_string();
         let backup_time = info.backup_dir.backup_time();
+        let protected = info.backup_dir.is_protected(base_path.clone());
 
         match get_all_snapshot_files(&datastore, &info) {
             Ok((manifest, files)) => {
@@ -480,6 +482,7 @@ pub fn list_snapshots (
                     files,
                     size,
                     owner,
+                    protected,
                 }
             },
             Err(err) => {
@@ -504,6 +507,7 @@ pub fn list_snapshots (
                     files,
                     size: None,
                     owner,
+                    protected,
                 }
             },
         }
@@ -532,7 +536,7 @@ pub fn list_snapshots (
             snapshots.extend(
                 group_backups
                     .into_iter()
-                    .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
+                    .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
             );
 
             Ok(snapshots)
@@ -545,7 +549,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
 
     groups.iter()
         .filter(|group| {
-            let owner = match store.get_owner(&group) {
+            let owner = match store.get_owner(group) {
                 Ok(owner) => owner,
                 Err(err) => {
                     eprintln!("Failed to get owner of group '{}/{}' - {}",
@@ -564,15 +568,19 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
         .try_fold(Counts::default(), |mut counts, group| {
             let snapshot_count = group.list_backups(&base_path)?.len() as u64;
 
-            let type_count = match group.backup_type() {
-                "ct" => counts.ct.get_or_insert(Default::default()),
-                "vm" => counts.vm.get_or_insert(Default::default()),
-                "host" => counts.host.get_or_insert(Default::default()),
-                _ => counts.other.get_or_insert(Default::default()),
-            };
+            // only include groups with snapshots (avoid confusing users
+            // by counting/displaying emtpy groups)
+            if snapshot_count > 0 {
+                let type_count = match group.backup_type() {
+                    "ct" => counts.ct.get_or_insert(Default::default()),
+                    "vm" => counts.vm.get_or_insert(Default::default()),
+                    "host" => counts.host.get_or_insert(Default::default()),
+                    _ => counts.other.get_or_insert(Default::default()),
+                };
 
-            type_count.groups += 1;
-            type_count.snapshots += snapshot_count;
+                type_count.groups += 1;
+                type_count.snapshots += snapshot_count;
+            }
 
             Ok(counts)
         })
@@ -847,8 +855,8 @@ pub fn prune(
     let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
 
     if dry_run {
-        for (info, mut keep) in prune_info {
-            if keep_all { keep = true; }
+        for (info, mark) in prune_info {
+            let keep = keep_all || mark.keep();
 
             let backup_time = info.backup_dir.backup_time();
             let group = info.backup_dir.group();
@@ -858,6 +866,7 @@ pub fn prune(
                 "backup-id": group.backup_id(),
                 "backup-time": backup_time,
                 "keep": keep,
+                "protected": mark.protected(),
             }));
         }
         return Ok(json!(prune_result));
@@ -875,8 +884,8 @@ pub fn prune(
                   store, backup_type, backup_id);
     }
 
-    for (info, mut keep) in prune_info {
-        if keep_all { keep = true; }
+    for (info, mark) in prune_info {
+        let keep = keep_all || mark.keep();
 
         let backup_time = info.backup_dir.backup_time();
         let timestamp = info.backup_dir.backup_time_string();
@@ -888,7 +897,7 @@ pub fn prune(
             group.backup_type(),
             group.backup_id(),
             timestamp,
-            if keep { "keep" } else { "remove" },
+            mark,
         );
 
         task_log!(worker, "{}", msg);
@@ -898,6 +907,7 @@ pub fn prune(
             "backup-id": group.backup_id(),
             "backup-time": backup_time,
             "keep": keep,
+            "protected": mark.protected(),
         }));
 
         if !(dry_run || keep) {
@@ -963,7 +973,7 @@ pub fn prune_datastore(
         auth_id.to_string(),
         to_stdout,
         move |worker| crate::server::prune_datastore(
-            worker.clone(),
+            worker,
             auth_id,
             prune_options,
             &store,
@@ -1065,7 +1075,7 @@ pub fn get_datastore_list(
     let mut list = Vec::new();
 
     for (store, (_, data)) in &config.sections {
-        let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
+        let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
         let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
         if allowed {
             list.push(
@@ -1329,10 +1339,10 @@ pub fn upload_backup_log(
         // always verify blob/CRC at server side
         let blob = DataBlob::load_from_reader(&mut &data[..])?;
 
-        replace_file(&path, blob.raw_data(), CreateOptions::new())?;
+        replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
 
         // fixme: use correct formatter
-        Ok(formatter::json_response(Ok(Value::Null)))
+        Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
     }.boxed()
 }
 
@@ -1395,7 +1405,7 @@ pub fn catalog(
         .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
 
     let (csum, size) = index.compute_csum();
-    manifest.verify_file(&file_name, &csum, size)?;
+    manifest.verify_file(file_name, &csum, size)?;
 
     let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
     let reader = BufferedDynamicReader::new(index, chunk_reader);
@@ -1440,7 +1450,7 @@ pub fn pxar_file_download(
 
     async move {
         let store = required_string_param(&param, "store")?;
-        let datastore = DataStore::lookup_datastore(&store)?;
+        let datastore = DataStore::lookup_datastore(store)?;
 
         let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
@@ -1477,7 +1487,7 @@ pub fn pxar_file_download(
             .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
 
         let (csum, size) = index.compute_csum();
-        manifest.verify_file(&pxar_name, &csum, size)?;
+        manifest.verify_file(pxar_name, &csum, size)?;
 
         let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
         let reader = BufferedDynamicReader::new(index, chunk_reader);
@@ -1538,7 +1548,7 @@ pub fn pxar_file_download(
                 schema: DATASTORE_SCHEMA,
             },
             timeframe: {
-                type: RRDTimeFrameResolution,
+                type: RRDTimeFrame,
             },
             cf: {
                 type: RRDMode,
@@ -1552,7 +1562,7 @@ pub fn pxar_file_download(
 /// Read datastore stats
 pub fn get_rrd_stats(
     store: String,
-    timeframe: RRDTimeFrameResolution,
+    timeframe: RRDTimeFrame,
     cf: RRDMode,
     _param: Value,
 ) -> Result<Value, Error> {
@@ -1645,7 +1655,7 @@ pub fn set_group_notes(
     check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
 
     let note_path = get_group_note_path(&datastore, &backup_group);
-    replace_file(note_path, notes.as_bytes(), CreateOptions::new())?;
+    replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
 
     Ok(())
 }
@@ -1744,6 +1754,90 @@ pub fn set_notes(
     Ok(())
 }
 
+#[api(
+    input: {
+        properties: {
+            store: {
+                schema: DATASTORE_SCHEMA,
+            },
+            "backup-type": {
+                schema: BACKUP_TYPE_SCHEMA,
+            },
+            "backup-id": {
+                schema: BACKUP_ID_SCHEMA,
+            },
+            "backup-time": {
+                schema: BACKUP_TIME_SCHEMA,
+            },
+        },
+    },
+    access: {
+        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
+    },
+)]
+/// Query protection for a specific backup
+pub fn get_protection(
+    store: String,
+    backup_type: String,
+    backup_id: String,
+    backup_time: i64,
+    rpcenv: &mut dyn RpcEnvironment,
+) -> Result<bool, Error> {
+    let datastore = DataStore::lookup_datastore(&store)?;
+
+    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+    let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
+
+    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
+
+    Ok(backup_dir.is_protected(datastore.base_path()))
+}
+
+#[api(
+    input: {
+        properties: {
+            store: {
+                schema: DATASTORE_SCHEMA,
+            },
+            "backup-type": {
+                schema: BACKUP_TYPE_SCHEMA,
+            },
+            "backup-id": {
+                schema: BACKUP_ID_SCHEMA,
+            },
+            "backup-time": {
+                schema: BACKUP_TIME_SCHEMA,
+            },
+            protected: {
+                description: "Enable/disable protection.",
+            },
+        },
+    },
+    access: {
+        permission: &Permission::Privilege(&["datastore", "{store}"],
+                                           PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
+                                           true),
+    },
+)]
+/// En- or disable protection for a specific backup
+pub fn set_protection(
+    store: String,
+    backup_type: String,
+    backup_id: String,
+    backup_time: i64,
+    protected: bool,
+    rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+    let datastore = DataStore::lookup_datastore(&store)?;
+
+    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+    let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
+
+    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+
+    datastore.update_protection(&backup_dir, protected)
+}
+
 #[api(
     input: {
         properties: {
@@ -1892,6 +1986,12 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
             .get(&API_METHOD_GET_NOTES)
             .put(&API_METHOD_SET_NOTES)
     ),
+    (
+        "protected",
+        &Router::new()
+            .get(&API_METHOD_GET_PROTECTION)
+            .put(&API_METHOD_SET_PROTECTION)
+    ),
     (
         "prune",
         &Router::new()