]> git.proxmox.com Git - proxmox-backup.git/commitdiff
GC: flatten existing status into job status
authorFabian Grünbichler <f.gruenbichler@proxmox.com>
Mon, 22 Apr 2024 09:02:57 +0000 (11:02 +0200)
committerFabian Grünbichler <f.gruenbichler@proxmox.com>
Mon, 22 Apr 2024 11:58:08 +0000 (13:58 +0200)
to avoid drifting definitions and reduce duplication. with the next major
release, the 'upid' field could then be renamed and aliased to be in line with
the other jobs, which all use 'last-run-upid'. doing it now would break
existing callers of the GC status endpoint (or consumers of the on-disk status
file).

the main difference is that the GC status fields are now not optional (except
for the UPID) in the job status, since flattening an optional value is not
possible. this only affects datastores that were never GCed at all, and only
direct API consumers, since the UI handles those fields correctly.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
pbs-api-types/src/datastore.rs
src/api2/admin/datastore.rs
www/Utils.js
www/config/GCView.js

index c6641655b19e8a74ea4a222e7b907d7231cb5c1c..45dd41aee19a014d0e7337a86d723fc47645f8a8 100644 (file)
@@ -1280,7 +1280,7 @@ pub struct TypeCounts {
         },
     },
 )]
-#[derive(Clone, Default, Serialize, Deserialize, PartialEq)]
+#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
 #[serde(rename_all = "kebab-case")]
 /// Garbage collection status.
 pub struct GarbageCollectionStatus {
@@ -1309,11 +1309,10 @@ pub struct GarbageCollectionStatus {
 
 #[api(
     properties: {
-        "last-run-upid": {
-            optional: true,
-            type: UPID,
+        "status": {
+            type: GarbageCollectionStatus,
         },
-    },
+    }
 )]
 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
 #[serde(rename_all = "kebab-case")]
@@ -1321,21 +1320,8 @@ pub struct GarbageCollectionStatus {
 pub struct GarbageCollectionJobStatus {
     /// Datastore
     pub store: String,
-    /// upid of the last run gc job
-    #[serde(skip_serializing_if = "Option::is_none")]
-    pub last_run_upid: Option<String>,
-    /// Sum of removed bytes.
-    #[serde(skip_serializing_if = "Option::is_none")]
-    pub removed_bytes: Option<u64>,
-    /// Number of removed chunks
-    #[serde(skip_serializing_if = "Option::is_none")]
-    pub removed_chunks: Option<usize>,
-    /// Sum of pending bytes
-    #[serde(skip_serializing_if = "Option::is_none")]
-    pub pending_bytes: Option<u64>,
-    /// Number of pending chunks
-    #[serde(skip_serializing_if = "Option::is_none")]
-    pub pending_chunks: Option<usize>,
+    #[serde(flatten)]
+    pub status: GarbageCollectionStatus,
     /// Schedule of the gc job
     #[serde(skip_serializing_if = "Option::is_none")]
     pub schedule: Option<String>,
index 77f9fe3d8f3f1b77853548d84e4244ac81001fc7..da2b545aa4d178301aa7b9c4a261bba2bae0a00b 100644 (file)
@@ -35,13 +35,13 @@ use pxar::EntryKind;
 use pbs_api_types::{
     print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
     Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
-    GarbageCollectionJobStatus, GarbageCollectionStatus, GroupListItem, JobScheduleStatus,
-    KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
-    SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
-    BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
-    MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
-    PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID,
-    UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
+    GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation,
+    PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
+    BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
+    BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
+    NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
+    PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
+    VERIFICATION_OUTDATED_AFTER_SCHEMA,
 };
 use pbs_client::pxar::{create_tar, create_zip};
 use pbs_config::CachedUserInfo;
@@ -1273,35 +1273,15 @@ pub fn garbage_collection_job_status(
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
     let status_in_memory = datastore.last_gc_status();
     let state_file = JobState::load("garbage_collection", &store)
-        .map_err(|err| {
-            log::error!(
-                "could not open statefile for {:?}: {}",
-                info.last_run_upid,
-                err
-            )
-        })
+        .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
         .ok();
 
-    let mut selected_upid = None;
-    if status_in_memory.upid.is_some() {
-        selected_upid = status_in_memory.upid;
-    } else if let Some(JobState::Finished { upid, .. }) = &state_file {
-        selected_upid = Some(upid.to_owned());
-    }
-
-    info.last_run_upid = selected_upid.clone();
-
-    match selected_upid {
-        Some(upid) => {
-            info.removed_bytes = Some(status_in_memory.removed_bytes);
-            info.removed_chunks = Some(status_in_memory.removed_chunks);
-            info.pending_bytes = Some(status_in_memory.pending_bytes);
-            info.pending_chunks = Some(status_in_memory.pending_chunks);
-
+    match status_in_memory.upid {
+        Some(ref upid) => {
             let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
             let mut duration = None;
             if let Some(state) = state_file {
-                if let Ok(cs) = compute_schedule_status(&state, info.last_run_upid.as_deref()) {
+                if let Ok(cs) = compute_schedule_status(&state, Some(&upid)) {
                     computed_schedule = cs;
                 }
             }
@@ -1327,6 +1307,7 @@ pub fn garbage_collection_job_status(
                 }
             }
 
+            info.status = status_in_memory;
             info.next_run = computed_schedule.next_run;
             info.last_run_endtime = computed_schedule.last_run_endtime;
             info.last_run_state = computed_schedule.last_run_state;
index acd6e0d8d5813f9776bebf6a607a713308a58c16..0c1f78b681e237b58823b14c403156b08dce87be 100644 (file)
@@ -200,7 +200,13 @@ Ext.define('PBS.Utils', {
     },
 
     render_task_status: function(value, metadata, record, rowIndex, colIndex, store) {
-       if (!record.data['last-run-upid'] && !store.getById('last-run-upid')?.data.value) {
+       // GC tasks use 'upid' for backwards-compat, rest use 'last-run-upid'
+       if (
+           !record.data['last-run-upid'] &&
+           !store.getById('last-run-upid')?.data.value &&
+           !record.data.upid &&
+           !store.getById('upid')?.data.value
+       ) {
            return '-';
        }
 
index 8cdf63f9b08913c6e4923b29eb1c9abede993db7..982ab9394fdd71f4142052caeba700245274c3da 100644 (file)
@@ -1,14 +1,14 @@
 Ext.define('pbs-gc-jobs-status', {
     extend: 'Ext.data.Model',
     fields: [
-       'store', 'last-run-upid', 'removed-bytes', 'pending-bytes', 'schedule',
+       'store', 'upid', 'removed-bytes', 'pending-bytes', 'schedule',
        'next-run', 'last-run-endtime', 'last-run-state',
        {
            name: 'duration',
            calculate: function(data) {
                let endtime = data['last-run-endtime'];
                if (!endtime) return undefined;
-               let task = Proxmox.Utils.parse_task_upid(data['last-run-upid']);
+               let task = Proxmox.Utils.parse_task_upid(data['upid']);
                return endtime - task.starttime;
            },
        },
@@ -97,7 +97,7 @@ Ext.define('PBS.config.GCJobView', {
        showTaskLog: function() {
            let me = this;
 
-           let upid = this.getData()['last-run-upid'];
+           let upid = this.getData().upid;
            if (!upid) return;
 
            Ext.create('Proxmox.window.TaskViewer', { upid }).show();
@@ -147,7 +147,7 @@ Ext.define('PBS.config.GCJobView', {
            xtype: 'proxmoxButton',
            text: gettext('Show Log'),
            handler: 'showTaskLog',
-           enableFn: (rec) => !!rec.data["last-run-upid"],
+           enableFn: (rec) => !!rec.data.upid,
            disabled: true,
        },
        {
@@ -214,7 +214,7 @@ Ext.define('PBS.config.GCJobView', {
        {
            header: gettext('Removed Data'),
            dataIndex: 'removed-bytes',
-           renderer: (value) => value !== undefined
+           renderer: (value, meta, record) => record.data.upid !== null
                ? Proxmox.Utils.format_size(value, true) : "-",
            sortable: false,
            minWidth: 85,
@@ -223,7 +223,7 @@ Ext.define('PBS.config.GCJobView', {
        {
            header: gettext('Pending Data'),
            dataIndex: 'pending-bytes',
-           renderer: (value) => value !== undefined
+           renderer: (value, meta, record) => record.data.upid !== null
                ? Proxmox.Utils.format_size(value, true) : "-",
            sortable: false,
            minWidth: 80,