]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/tape/pool_writer.rs
typo fixes all over the place
[proxmox-backup.git] / src / tape / pool_writer.rs
index 24576428072be6aeea9bdb41eace189149ad5ae5..15c4b0544e69ceb8344b5faacb18c3fd76d6dc27 100644 (file)
@@ -7,6 +7,7 @@ use anyhow::{bail, Error};
 use proxmox::tools::Uuid;
 
 use crate::{
+    task_log,
     backup::{
         DataStore,
     },
@@ -15,20 +16,24 @@ use crate::{
         TAPE_STATUS_DIR,
         MAX_CHUNK_ARCHIVE_SIZE,
         COMMIT_BLOCK_SIZE,
-        TapeDriver,
         TapeWrite,
-        ChunkArchiveWriter,
         SnapshotReader,
         SnapshotChunkIterator,
         MediaPool,
         MediaId,
         MediaCatalog,
         MediaSetCatalog,
-        tape_write_snapshot_archive,
-        request_and_load_media,
-        tape_alert_flags_critical,
-        media_changer,
-        file_formats::MediaSetLabel,
+        file_formats::{
+            MediaSetLabel,
+            ChunkArchiveWriter,
+            tape_write_snapshot_archive,
+        },
+        drive::{
+            TapeDriver,
+            request_and_load_media,
+            tape_alert_flags_critical,
+            media_changer,
+        },
     },
     config::tape_encryption_keys::load_key_configs,
 };
@@ -59,15 +64,25 @@ pub struct PoolWriter {
     drive_name: String,
     status: Option<PoolWriterState>,
     media_set_catalog: MediaSetCatalog,
+    notify_email: Option<String>,
 }
 
 impl PoolWriter {
 
-    pub fn new(mut pool: MediaPool, drive_name: &str) -> Result<Self, Error> {
+    pub fn new(mut pool: MediaPool, drive_name: &str, worker: &WorkerTask, notify_email: Option<String>) -> Result<Self, Error> {
 
         let current_time = proxmox::tools::time::epoch_i64();
 
-        pool.start_write_session(current_time)?;
+        let new_media_set_reason = pool.start_write_session(current_time)?;
+        if let Some(reason) = new_media_set_reason {
+            task_log!(
+                worker,
+                "starting new media set - reason: {}",
+                reason,
+            );
+        }
+
+        task_log!(worker, "media set uuid: {}", pool.current_media_set());
 
         let mut media_set_catalog = MediaSetCatalog::new();
 
@@ -87,6 +102,7 @@ impl PoolWriter {
             drive_name: drive_name.to_string(),
             status: None,
             media_set_catalog,
+            notify_email,
          })
     }
 
@@ -159,11 +175,9 @@ impl PoolWriter {
                 }
             }
 
-        } else {
-            if let Some(mut status) = status {
-                worker.log("standalone drive - ejecting media instead of export");
-                status.drive.eject_media()?;
-            }
+        } else if let Some(mut status) = status {
+            worker.log("standalone drive - ejecting media instead of export");
+            status.drive.eject_media()?;
         }
 
         Ok(())
@@ -201,24 +215,28 @@ impl PoolWriter {
             return Ok(media_uuid);
         }
 
+        task_log!(worker, "allocated new writable media '{}'", media.label_text());
+
         // remove read-only catalog (we store a writable version in status)
         self.media_set_catalog.remove_catalog(&media_uuid);
 
         if let Some(PoolWriterState {mut drive, catalog, .. }) = self.status.take() {
             self.media_set_catalog.append_catalog(catalog)?;
+            task_log!(worker, "eject current media");
             drive.eject_media()?;
         }
 
         let (drive_config, _digest) = crate::config::drive::config()?;
 
         let (mut drive, old_media_id) =
-            request_and_load_media(worker, &drive_config, &self.drive_name, media.label())?;
+            request_and_load_media(worker, &drive_config, &self.drive_name, media.label(), &self.notify_email)?;
 
         // test for critical tape alert flags
         if let Ok(alert_flags) = drive.tape_alert_flags() {
             if !alert_flags.is_empty() {
                 worker.log(format!("TapeAlertFlags: {:?}", alert_flags));
                 if tape_alert_flags_critical(alert_flags) {
+                    self.pool.set_media_status_damaged(&media_uuid)?;
                     bail!("aborting due to critical tape alert flags: {:?}", alert_flags);
                 }
             }
@@ -253,7 +271,7 @@ impl PoolWriter {
         }
     }
 
-    /// Move to EOM (if not aleady there), then creates a new snapshot
+    /// Move to EOM (if not already there), then creates a new snapshot
     /// archive writing specified files (as .pxar) into it. On
     /// success, this return 'Ok(true)' and the media catalog gets
     /// updated.
@@ -303,7 +321,7 @@ impl PoolWriter {
 
         status.bytes_written += bytes_written;
 
-        let request_sync = if status.bytes_written >= COMMIT_BLOCK_SIZE { true } else { false };
+        let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
 
         if !done || request_sync {
             status.commit()?;
@@ -312,7 +330,7 @@ impl PoolWriter {
         Ok((done, bytes_written))
     }
 
-    /// Move to EOM (if not aleady there), then creates a new chunk
+    /// Move to EOM (if not already there), then creates a new chunk
     /// archive and writes chunks from 'chunk_iter'. This stops when
     /// it detect LEOM or when we reach max archive size
     /// (4GB). Written chunks are registered in the media catalog.
@@ -356,12 +374,12 @@ impl PoolWriter {
 
         let elapsed =  start_time.elapsed()?.as_secs_f64();
         worker.log(format!(
-            "wrote {:.2} MB ({} MB/s)",
+            "wrote {:.2} MB ({:.2} MB/s)",
             bytes_written as f64 / (1024.0*1024.0),
             (bytes_written as f64)/(1024.0*1024.0*elapsed),
         ));
 
-        let request_sync = if status.bytes_written >= COMMIT_BLOCK_SIZE { true } else { false };
+        let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
 
         // register chunks in media_catalog
         status.catalog.start_chunk_archive(content_uuid, current_file_number)?;
@@ -426,7 +444,7 @@ fn write_chunk_archive<'a>(
         }
 
         if writer.bytes_written() > max_size {
-            worker.log(format!("Chunk Archive max size reached, closing archive"));
+            worker.log("Chunk Archive max size reached, closing archive".to_string());
             break;
         }
     }
@@ -469,7 +487,7 @@ fn update_media_set_label(
 
     match old_set {
         None => {
-            worker.log(format!("wrinting new media set label"));
+            worker.log("wrinting new media set label".to_string());
             drive.write_media_set_label(new_set, key_config.as_ref())?;
             media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?;
         }