]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/api2/tape/backup.rs
tree-wide: fix needless borrows
[proxmox-backup.git] / src / api2 / tape / backup.rs
index ec35038a100f234e1b976bb2a9e9b0e19d157f93..99a717d43f2a56fd7ade4acaa2ea1483f1370cd7 100644 (file)
@@ -4,34 +4,23 @@ use std::sync::{Mutex, Arc};
 use anyhow::{bail, format_err, Error};
 use serde_json::Value;
 
-use proxmox::{
-    try_block,
-    api::{
-        api,
-        RpcEnvironment,
-        RpcEnvironmentType,
-        Router,
-        Permission,
-    },
+use proxmox_lang::try_block;
+use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
+use proxmox_schema::api;
+use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
+
+use pbs_api_types::{
+    Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig,
+    UPID_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE,
+    GroupFilter,
 };
 
+use pbs_datastore::{DataStore, StoreProgress, SnapshotReader};
+use pbs_datastore::backup_info::{BackupDir, BackupInfo, BackupGroup};
+use pbs_config::CachedUserInfo;
+use proxmox_rest_server::WorkerTask;
+
 use crate::{
-    task_log,
-    task_warn,
-    config::{
-        self,
-        cached_user_info::CachedUserInfo,
-        acl::{
-            PRIV_DATASTORE_READ,
-            PRIV_TAPE_AUDIT,
-            PRIV_TAPE_WRITE,
-        },
-        tape_job::{
-            TapeBackupJobConfig,
-            TapeBackupJobSetup,
-            TapeBackupJobStatus,
-        },
-    },
     server::{
         lookup_user_email,
         TapeBackupJobSummary,
@@ -41,30 +30,15 @@ use crate::{
             compute_schedule_status,
         },
     },
-    backup::{
-        DataStore,
-        BackupDir,
-        BackupInfo,
-        StoreProgress,
-    },
-    api2::types::{
-        Authid,
-        UPID_SCHEMA,
-        JOB_ID_SCHEMA,
-        MediaPoolConfig,
-        Userid,
-    },
-    server::WorkerTask,
-    task::TaskState,
     tape::{
         TAPE_STATUS_DIR,
         Inventory,
         PoolWriter,
         MediaPool,
-        SnapshotReader,
         drive::{
             media_changer,
             lock_tape_device,
+            TapeLockError,
             set_tape_device_state,
         },
         changer::update_changer_online_status,
@@ -125,9 +99,11 @@ pub fn list_tape_backup_jobs(
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
 
-    let (config, digest) = config::tape_job::config()?;
+    let (job_config, digest) = pbs_config::tape_job::config()?;
+    let (pool_config, _pool_digest) = pbs_config::media_pool::config()?;
+    let (drive_config, _digest) = pbs_config::drive::config()?;
 
-    let job_list_iter = config
+    let job_list_iter = job_config
         .convert_to_typed_array("backup")?
         .into_iter()
         .filter(|_job: &TapeBackupJobConfig| {
@@ -136,6 +112,8 @@ pub fn list_tape_backup_jobs(
         });
 
     let mut list = Vec::new();
+    let status_path = Path::new(TAPE_STATUS_DIR);
+    let current_time = proxmox_time::epoch_i64();
 
     for job in job_list_iter {
         let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
@@ -148,10 +126,28 @@ pub fn list_tape_backup_jobs(
 
         let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
 
-        list.push(TapeBackupJobStatus { config: job, status });
+        let next_run = status.next_run.unwrap_or(current_time);
+
+        let mut next_media_label = None;
+
+        if let Ok(pool) = pool_config.lookup::<MediaPoolConfig>("pool", &job.setup.pool) {
+            let mut changer_name = None;
+            if let Ok(Some((_, name))) = media_changer(&drive_config, &job.setup.drive) {
+                changer_name = Some(name);
+            }
+            if let Ok(mut pool) = MediaPool::with_config(status_path, &pool, changer_name, true) {
+                if pool.start_write_session(next_run, false).is_ok() {
+                    if let Ok(media_id) = pool.guess_next_writable_media(next_run) {
+                        next_media_label = Some(media_id.label.label_text);
+                    }
+                }
+            }
+        }
+
+        list.push(TapeBackupJobStatus { config: job, status, next_media_label });
     }
 
-    rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
+    rpcenv["digest"] = hex::encode(&digest).into();
 
     Ok(list)
 }
@@ -161,6 +157,7 @@ pub fn do_tape_backup_job(
     setup: TapeBackupJobSetup,
     auth_id: &Authid,
     schedule: Option<String>,
+    to_stdout: bool,
 ) -> Result<String, Error> {
 
     let job_id = format!("{}:{}:{}:{}",
@@ -173,10 +170,10 @@ pub fn do_tape_backup_job(
 
     let datastore = DataStore::lookup_datastore(&setup.store)?;
 
-    let (config, _digest) = config::media_pool::config()?;
+    let (config, _digest) = pbs_config::media_pool::config()?;
     let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
 
-    let (drive_config, _digest) = config::drive::config()?;
+    let (drive_config, _digest) = pbs_config::drive::config()?;
 
     // for scheduled jobs we acquire the lock later in the worker
     let drive_lock = if schedule.is_some() {
@@ -185,29 +182,33 @@ pub fn do_tape_backup_job(
         Some(lock_tape_device(&drive_config, &setup.drive)?)
     };
 
-    let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
+    let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
     let email = lookup_user_email(notify_user);
 
     let upid_str = WorkerTask::new_thread(
         &worker_type,
         Some(job_id.clone()),
-        auth_id.clone(),
-        false,
+        auth_id.to_string(),
+        to_stdout,
         move |worker| {
             job.start(&worker.upid().to_string())?;
             let mut drive_lock = drive_lock;
 
-            let (job_result, summary) = match try_block!({
+            let mut summary = Default::default();
+            let job_result = try_block!({
                 if schedule.is_some() {
                     // for scheduled tape backup jobs, we wait indefinitely for the lock
                     task_log!(worker, "waiting for drive lock...");
                     loop {
-                        if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
-                            drive_lock = Some(lock);
-                            break;
-                        } // ignore errors
-
                         worker.check_abort()?;
+                        match lock_tape_device(&drive_config, &setup.drive) {
+                            Ok(lock) => {
+                                drive_lock = Some(lock);
+                                break;
+                            }
+                            Err(TapeLockError::TimeOut) => continue,
+                            Err(TapeLockError::Other(err)) => return Err(err),
+                        }
                     }
                 }
                 set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
@@ -217,17 +218,17 @@ pub fn do_tape_backup_job(
                     task_log!(worker,"task triggered by schedule '{}'", event_str);
                 }
 
+
                 backup_worker(
                     &worker,
                     datastore,
                     &pool_config,
                     &setup,
                     email.clone(),
+                    &mut summary,
+                    false,
                 )
-            }) {
-                Ok(summary) => (Ok(()), summary),
-                Err(err) => (Err(err), Default::default()),
-            };
+            });
 
             let status = worker.create_state(&job_result);
 
@@ -288,7 +289,7 @@ pub fn run_tape_backup_job(
 ) -> Result<String, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let (config, _digest) = config::tape_job::config()?;
+    let (config, _digest) = pbs_config::tape_job::config()?;
     let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
 
     check_backup_permission(
@@ -300,7 +301,9 @@ pub fn run_tape_backup_job(
 
     let job = Job::new("tape-backup-job", &id)?;
 
-    let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
+    let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
+
+    let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None, to_stdout)?;
 
     Ok(upid_str)
 }
@@ -312,6 +315,12 @@ pub fn run_tape_backup_job(
                 type: TapeBackupJobSetup,
                 flatten: true,
             },
+            "force-media-set": {
+                description: "Ignore the allocation policy and start a new media-set.",
+                optional: true,
+                type: bool,
+                default: false,
+            },
         },
     },
     returns: {
@@ -327,6 +336,7 @@ pub fn run_tape_backup_job(
 /// Backup datastore to tape media pool
 pub fn backup(
     setup: TapeBackupJobSetup,
+    force_media_set: bool,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
 
@@ -341,10 +351,10 @@ pub fn backup(
 
     let datastore = DataStore::lookup_datastore(&setup.store)?;
 
-    let (config, _digest) = config::media_pool::config()?;
+    let (config, _digest) = pbs_config::media_pool::config()?;
     let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
 
-    let (drive_config, _digest) = config::drive::config()?;
+    let (drive_config, _digest) = pbs_config::drive::config()?;
 
     // early check/lock before starting worker
     let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
@@ -353,28 +363,28 @@ pub fn backup(
 
     let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
 
-    let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
+    let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
     let email = lookup_user_email(notify_user);
 
     let upid_str = WorkerTask::new_thread(
         "tape-backup",
         Some(job_id),
-        auth_id,
+        auth_id.to_string(),
         to_stdout,
         move |worker| {
             let _drive_lock = drive_lock; // keep lock guard
             set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
 
-            let (job_result, summary) = match backup_worker(
+            let mut summary = Default::default();
+            let job_result = backup_worker(
                 &worker,
                 datastore,
                 &pool_config,
                 &setup,
                 email.clone(),
-            ) {
-                Ok(summary) => (Ok(()), summary),
-                Err(err) => (Err(err), Default::default()),
-            };
+                &mut summary,
+                force_media_set,
+            );
 
             if let Some(email) = email {
                 if let Err(err) = crate::server::send_tape_backup_status(
@@ -403,25 +413,45 @@ fn backup_worker(
     pool_config: &MediaPoolConfig,
     setup: &TapeBackupJobSetup,
     email: Option<String>,
-) -> Result<TapeBackupJobSummary, Error> {
+    summary: &mut TapeBackupJobSummary,
+    force_media_set: bool,
+) -> Result<(), Error> {
 
     let status_path = Path::new(TAPE_STATUS_DIR);
     let start = std::time::Instant::now();
-    let mut summary: TapeBackupJobSummary = Default::default();
 
     task_log!(worker, "update media online status");
     let changer_name = update_media_online_status(&setup.drive)?;
 
-    let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
+    let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
 
-    let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
+    let mut pool_writer = PoolWriter::new(
+        pool,
+        &setup.drive,
+        worker,
+        email,
+        force_media_set
+    )?;
 
     let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
 
     group_list.sort_unstable();
 
-    let group_count = group_list.len();
-    task_log!(worker, "found {} groups", group_count);
+    let (group_list, group_count) = if let Some(group_filters) = &setup.group_filter {
+        let filter_fn = |group: &BackupGroup, group_filters: &[GroupFilter]| {
+            group_filters.iter().any(|filter| group.matches(filter))
+        };
+
+        let group_count_full = group_list.len();
+        let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, group_filters)).collect();
+        let group_count = list.len();
+        task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full);
+        (list, group_count)
+    } else {
+        let group_count = group_list.len();
+        task_log!(worker, "found {} groups", group_count);
+        (group_list, group_count)
+    };
 
     let mut progress = StoreProgress::new(group_count as u64);
 
@@ -442,7 +472,18 @@ fn backup_worker(
         progress.done_snapshots = 0;
         progress.group_snapshots = 0;
 
-        let mut snapshot_list = group.list_backups(&datastore.base_path())?;
+        let snapshot_list = group.list_backups(&datastore.base_path())?;
+
+        // filter out unfinished backups
+        let mut snapshot_list: Vec<_> = snapshot_list
+            .into_iter()
+            .filter(|item| item.is_finished())
+            .collect();
+
+        if snapshot_list.is_empty() {
+            task_log!(worker, "group {} was empty", group);
+            continue;
+        }
 
         BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
 
@@ -525,13 +566,13 @@ fn backup_worker(
 
     summary.duration = start.elapsed();
 
-    Ok(summary)
+    Ok(())
 }
 
 // Try to update the the media online status
 fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
 
-    let (config, _digest) = config::drive::config()?;
+    let (config, _digest) = pbs_config::drive::config()?;
 
     if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {