use std::collections::HashSet;
use std::path::Path;
+use std::time::SystemTime;
use anyhow::{bail, Error};
use proxmox::tools::Uuid;
use crate::{
+ task_log,
backup::{
DataStore,
},
TAPE_STATUS_DIR,
MAX_CHUNK_ARCHIVE_SIZE,
COMMIT_BLOCK_SIZE,
- TapeDriver,
TapeWrite,
- ChunkArchiveWriter,
SnapshotReader,
SnapshotChunkIterator,
MediaPool,
MediaId,
MediaCatalog,
MediaSetCatalog,
- tape_write_snapshot_archive,
- request_and_load_media,
- tape_alert_flags_critical,
- media_changer,
- file_formats::MediaSetLabel,
+ file_formats::{
+ MediaSetLabel,
+ ChunkArchiveWriter,
+ tape_write_snapshot_archive,
+ },
+ drive::{
+ TapeDriver,
+ request_and_load_media,
+ tape_alert_flags_critical,
+ media_changer,
+ },
},
+ config::tape_encryption_keys::load_key_configs,
};
drive_name: String,
status: Option<PoolWriterState>,
media_set_catalog: MediaSetCatalog,
+ notify_email: Option<String>,
}
impl PoolWriter {
- pub fn new(mut pool: MediaPool, drive_name: &str) -> Result<Self, Error> {
+ pub fn new(mut pool: MediaPool, drive_name: &str, worker: &WorkerTask, notify_email: Option<String>) -> Result<Self, Error> {
let current_time = proxmox::tools::time::epoch_i64();
- pool.start_write_session(current_time)?;
+ let new_media_set_reason = pool.start_write_session(current_time)?;
+ if let Some(reason) = new_media_set_reason {
+ task_log!(
+ worker,
+ "starting new media set - reason: {}",
+ reason,
+ );
+ }
+
+ task_log!(worker, "media set uuid: {}", pool.current_media_set());
let mut media_set_catalog = MediaSetCatalog::new();
drive_name: drive_name.to_string(),
status: None,
media_set_catalog,
+ notify_email,
})
}
}
/// Eject media and drop PoolWriterState (close drive)
- pub fn eject_media(&mut self) -> Result<(), Error> {
+ pub fn eject_media(&mut self, worker: &WorkerTask) -> Result<(), Error> {
let mut status = match self.status.take() {
Some(status) => status,
None => return Ok(()), // no media loaded
let (drive_config, _digest) = crate::config::drive::config()?;
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
+ worker.log("eject media");
+ status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
drop(status); // close drive
- changer.unload_media(None)?;
+ worker.log("unload media");
+ changer.unload_media(None)?; //eject and unload
} else {
+ worker.log("standalone drive - ejecting media");
status.drive.eject_media()?;
}
/// Export current media set and drop PoolWriterState (close drive)
pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> {
- let status = self.status.take();
+ let mut status = self.status.take();
let (drive_config, _digest) = crate::config::drive::config()?;
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
+
+ if let Some(ref mut status) = status {
+ worker.log("eject media");
+ status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
+ }
drop(status); // close drive
+ worker.log("unload media");
changer.unload_media(None)?;
for media_uuid in self.pool.current_media_list()? {
let media = self.pool.lookup_media(media_uuid)?;
- let changer_id = media.changer_id();
- if let Some(slot) = changer.export_media(changer_id)? {
- worker.log(format!("exported media '{}' to import/export slot {}", changer_id, slot));
+ let label_text = media.label_text();
+ if let Some(slot) = changer.export_media(label_text)? {
+ worker.log(format!("exported media '{}' to import/export slot {}", label_text, slot));
} else {
- worker.warn(format!("export failed - media '{}' is not online", changer_id));
+ worker.warn(format!("export failed - media '{}' is not online", label_text));
}
}
- } else {
+ } else if let Some(mut status) = status {
worker.log("standalone drive - ejecting media instead of export");
- if let Some(mut status) = status {
- status.drive.eject_media()?;
- }
+ status.drive.eject_media()?;
}
Ok(())
return Ok(media_uuid);
}
+ task_log!(worker, "allocated new writable media '{}'", media.label_text());
+
// remove read-only catalog (we store a writable version in status)
self.media_set_catalog.remove_catalog(&media_uuid);
if let Some(PoolWriterState {mut drive, catalog, .. }) = self.status.take() {
self.media_set_catalog.append_catalog(catalog)?;
+ task_log!(worker, "eject current media");
drive.eject_media()?;
}
let (drive_config, _digest) = crate::config::drive::config()?;
let (mut drive, old_media_id) =
- request_and_load_media(worker, &drive_config, &self.drive_name, media.label())?;
+ request_and_load_media(worker, &drive_config, &self.drive_name, media.label(), &self.notify_email)?;
// test for critical tape alert flags
- let alert_flags = drive.tape_alert_flags()?;
- if !alert_flags.is_empty() {
- worker.log(format!("TapeAlertFlags: {:?}", alert_flags));
- if tape_alert_flags_critical(alert_flags) {
- bail!("aborting due to critical tape alert flags: {:?}", alert_flags);
+ if let Ok(alert_flags) = drive.tape_alert_flags() {
+ if !alert_flags.is_empty() {
+ worker.log(format!("TapeAlertFlags: {:?}", alert_flags));
+ if tape_alert_flags_critical(alert_flags) {
+ self.pool.set_media_status_damaged(&media_uuid)?;
+ bail!("aborting due to critical tape alert flags: {:?}", alert_flags);
+ }
}
}
media.id(),
)?;
+ let media_set = media.media_set_label().clone().unwrap();
+
+ let encrypt_fingerprint = media_set
+ .encryption_key_fingerprint
+ .clone()
+ .map(|fp| (fp, media_set.uuid.clone()));
+
+ drive.set_encryption(encrypt_fingerprint)?;
+
self.status = Some(PoolWriterState { drive, catalog, at_eom: false, bytes_written: 0 });
Ok(media_uuid)
}
}
- /// Move to EOM (if not aleady there), then creates a new snapshot
+ /// Move to EOM (if not already there), then creates a new snapshot
/// archive writing specified files (as .pxar) into it. On
/// success, this return 'Ok(true)' and the media catalog gets
/// updated.
/// media.
pub fn append_snapshot_archive(
&mut self,
+ worker: &WorkerTask,
snapshot_reader: &SnapshotReader,
) -> Result<(bool, usize), Error> {
};
if !status.at_eom {
+ worker.log(String::from("moving to end of media"));
status.drive.move_to_eom()?;
status.at_eom = true;
}
status.bytes_written += bytes_written;
- let request_sync = if status.bytes_written >= COMMIT_BLOCK_SIZE { true } else { false };
+ let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
if !done || request_sync {
status.commit()?;
Ok((done, bytes_written))
}
- /// Move to EOM (if not aleady there), then creates a new chunk
+ /// Move to EOM (if not already there), then creates a new chunk
/// archive and writes chunks from 'chunk_iter'. This stops when
/// it detect LEOM or when we reach max archive size
/// (4GB). Written chunks are registered in the media catalog.
pub fn append_chunk_archive(
&mut self,
+ worker: &WorkerTask,
datastore: &DataStore,
chunk_iter: &mut std::iter::Peekable<SnapshotChunkIterator>,
) -> Result<(bool, usize), Error> {
};
if !status.at_eom {
+ worker.log(String::from("moving to end of media"));
status.drive.move_to_eom()?;
status.at_eom = true;
}
}
let writer = status.drive.write_file()?;
+ let start_time = SystemTime::now();
+
let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive(
+ worker,
writer,
datastore,
chunk_iter,
status.bytes_written += bytes_written;
- let request_sync = if status.bytes_written >= COMMIT_BLOCK_SIZE { true } else { false };
+ let elapsed = start_time.elapsed()?.as_secs_f64();
+ worker.log(format!(
+ "wrote {:.2} MB ({:.2} MB/s)",
+ bytes_written as f64 / (1024.0*1024.0),
+ (bytes_written as f64)/(1024.0*1024.0*elapsed),
+ ));
+
+ let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
// register chunks in media_catalog
status.catalog.start_chunk_archive(content_uuid, current_file_number)?;
/// write up to <max_size> of chunks
fn write_chunk_archive<'a>(
+ worker: &WorkerTask,
writer: Box<dyn 'a + TapeWrite>,
datastore: &DataStore,
chunk_iter: &mut std::iter::Peekable<SnapshotChunkIterator>,
}
let blob = datastore.load_chunk(&digest)?;
- println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(&digest), blob.raw_size());
+ //println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(&digest), blob.raw_size());
match writer.try_write_chunk(&digest, &blob) {
- Ok(true) => {
+ Ok(true) => {
chunk_index.insert(digest);
chunk_list.push(digest);
}
}
if writer.bytes_written() > max_size {
- println!("Chunk Archive max size reached, closing archive");
+ worker.log("Chunk Archive max size reached, closing archive".to_string());
break;
}
}
Some(ref set) => set,
};
+ let key_config = if let Some(ref fingerprint) = new_set.encryption_key_fingerprint {
+ let (config_map, _digest) = load_key_configs()?;
+ match config_map.get(fingerprint) {
+ Some(key_config) => Some(key_config.clone()),
+ None => {
+ bail!("unable to find tape encryption key config '{}'", fingerprint);
+ }
+ }
+ } else {
+ None
+ };
+
let status_path = Path::new(TAPE_STATUS_DIR);
match old_set {
None => {
- worker.log(format!("wrinting new media set label"));
- drive.write_media_set_label(new_set)?;
- media_catalog = MediaCatalog::overwrite(status_path, media_id, true)?;
+ worker.log("wrinting new media set label".to_string());
+ drive.write_media_set_label(new_set, key_config.as_ref())?;
+ media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?;
}
Some(media_set_label) => {
if new_set.uuid == media_set_label.uuid {
bail!("got media with wrong media sequence number ({} != {}",
new_set.seq_nr,media_set_label.seq_nr);
}
+ if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
+ bail!("detected changed encryption fingerprint - internal error");
+ }
media_catalog = MediaCatalog::open(status_path, &media_id.label.uuid, true, false)?;
} else {
worker.log(
media_set_label.uuid.to_string(), media_set_label.seq_nr)
);
- drive.write_media_set_label(new_set)?;
- media_catalog = MediaCatalog::overwrite(status_path, media_id, true)?;
+ drive.write_media_set_label(new_set, key_config.as_ref())?;
+ media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?;
}
}
}
// todo: verify last content/media_catalog somehow?
- drive.move_to_eom()?;
+ drive.move_to_eom()?; // just to be sure
Ok(media_catalog)
}