]> git.proxmox.com Git - proxmox-backup.git/blame - src/tape/pool_writer/mod.rs
WorkerTaskContext: make it Send + Sync
[proxmox-backup.git] / src / tape / pool_writer / mod.rs
CommitLineData
166a48f9
DM
1mod catalog_set;
2pub use catalog_set::*;
3
4mod new_chunks_iterator;
5pub use new_chunks_iterator::*;
6
d37da6b7 7use std::path::Path;
32b75d36 8use std::fs::File;
31cf625a 9use std::time::SystemTime;
5c4755ad 10use std::sync::{Arc, Mutex};
d37da6b7 11
166a48f9 12use anyhow::{bail, Error};
d37da6b7 13
66e42bec 14use proxmox::tools::Uuid;
d37da6b7 15
1ec0d70d 16use pbs_tools::{task_log, task_warn};
5839c469 17use pbs_config::tape_encryption_keys::load_key_configs;
048b43af
DM
18use pbs_tape::{
19 TapeWrite,
20 sg_tape::tape_alert_flags_critical,
21};
6d5d305d 22use pbs_datastore::DataStore;
b9700a9f 23use proxmox_rest_server::WorkerTask;
c23192d3 24
d37da6b7 25use crate::{
6d5d305d 26 backup::SnapshotReader,
d37da6b7
DM
27 tape::{
28 TAPE_STATUS_DIR,
29 MAX_CHUNK_ARCHIVE_SIZE,
30 COMMIT_BLOCK_SIZE,
d37da6b7
DM
31 MediaPool,
32 MediaId,
33 MediaCatalog,
f47e0357
DM
34 file_formats::{
35 MediaSetLabel,
36 ChunkArchiveWriter,
37 tape_write_snapshot_archive,
32b75d36 38 tape_write_catalog,
f47e0357 39 },
37796ff7
DM
40 drive::{
41 TapeDriver,
42 request_and_load_media,
37796ff7
DM
43 media_changer,
44 },
d37da6b7
DM
45 },
46};
47
48
49struct PoolWriterState {
50 drive: Box<dyn TapeDriver>,
88bc9635
DM
51 // Media Uuid from loaded media
52 media_uuid: Uuid,
d37da6b7
DM
53 // tell if we already moved to EOM
54 at_eom: bool,
55 // bytes written after the last tape fush/sync
56 bytes_written: usize,
57}
58
d37da6b7
DM
59/// Helper to manage a backup job, writing several tapes of a pool
60pub struct PoolWriter {
61 pool: MediaPool,
62 drive_name: String,
63 status: Option<PoolWriterState>,
1037f2bc 64 catalog_set: Arc<Mutex<CatalogSet>>,
c9793d47 65 notify_email: Option<String>,
d37da6b7
DM
66}
67
68impl PoolWriter {
69
32b75d36
DM
70 pub fn new(
71 mut pool: MediaPool,
72 drive_name: &str,
73 worker: &WorkerTask,
74 notify_email: Option<String>,
e953029e 75 force_media_set: bool,
32b75d36 76 ) -> Result<Self, Error> {
d37da6b7
DM
77
78 let current_time = proxmox::tools::time::epoch_i64();
79
e953029e 80 let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
90e16be3
DM
81 if let Some(reason) = new_media_set_reason {
82 task_log!(
83 worker,
84 "starting new media set - reason: {}",
85 reason,
86 );
87 }
88
32b75d36
DM
89 let media_set_uuid = pool.current_media_set().uuid();
90 task_log!(worker, "media set uuid: {}", media_set_uuid);
d37da6b7 91
166a48f9 92 let mut catalog_set = CatalogSet::new();
d37da6b7
DM
93
94 // load all catalogs read-only at start
95 for media_uuid in pool.current_media_list()? {
237314ad 96 let media_info = pool.lookup_media(media_uuid).unwrap();
d37da6b7
DM
97 let media_catalog = MediaCatalog::open(
98 Path::new(TAPE_STATUS_DIR),
237314ad 99 media_info.id(),
d37da6b7
DM
100 false,
101 false,
102 )?;
166a48f9 103 catalog_set.append_read_only_catalog(media_catalog)?;
d37da6b7
DM
104 }
105
106 Ok(Self {
107 pool,
108 drive_name: drive_name.to_string(),
109 status: None,
1037f2bc 110 catalog_set: Arc::new(Mutex::new(catalog_set)),
c9793d47 111 notify_email,
d37da6b7
DM
112 })
113 }
114
115 pub fn pool(&mut self) -> &mut MediaPool {
116 &mut self.pool
117 }
118
119 /// Set media status to FULL (persistent - stores pool status)
120 pub fn set_media_status_full(&mut self, uuid: &Uuid) -> Result<(), Error> {
121 self.pool.set_media_status_full(&uuid)?;
122 Ok(())
123 }
124
54722aca 125 pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
1037f2bc 126 self.catalog_set.lock().unwrap().contains_snapshot(store, snapshot)
d37da6b7
DM
127 }
128
42967bf1 129 /// Eject media and drop PoolWriterState (close drive)
5654d8ce 130 pub fn eject_media(&mut self, worker: &WorkerTask) -> Result<(), Error> {
42967bf1
DM
131 let mut status = match self.status.take() {
132 Some(status) => status,
133 None => return Ok(()), // no media loaded
134 };
135
1ce8e905 136 let (drive_config, _digest) = pbs_config::drive::config()?;
42967bf1
DM
137
138 if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
1ec0d70d 139 task_log!(worker, "eject media");
5654d8ce 140 status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
edb90f6a 141 drop(status); // close drive
1ec0d70d 142 task_log!(worker, "unload media");
5654d8ce 143 changer.unload_media(None)?; //eject and unload
42967bf1 144 } else {
1ec0d70d 145 task_log!(worker, "standalone drive - ejecting media");
42967bf1
DM
146 status.drive.eject_media()?;
147 }
148
149 Ok(())
150 }
151
edb90f6a
DM
152 /// Export current media set and drop PoolWriterState (close drive)
153 pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> {
5654d8ce 154 let mut status = self.status.take();
edb90f6a 155
1ce8e905 156 let (drive_config, _digest) = pbs_config::drive::config()?;
edb90f6a
DM
157
158 if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
5654d8ce
DM
159
160 if let Some(ref mut status) = status {
1ec0d70d 161 task_log!(worker, "eject media");
5654d8ce
DM
162 status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
163 }
edb90f6a
DM
164 drop(status); // close drive
165
1ec0d70d 166 task_log!(worker, "unload media");
edb90f6a
DM
167 changer.unload_media(None)?;
168
169 for media_uuid in self.pool.current_media_list()? {
170 let media = self.pool.lookup_media(media_uuid)?;
8446fbca
DM
171 let label_text = media.label_text();
172 if let Some(slot) = changer.export_media(label_text)? {
1ec0d70d 173 task_log!(worker, "exported media '{}' to import/export slot {}", label_text, slot);
edb90f6a 174 } else {
1ec0d70d 175 task_warn!(worker, "export failed - media '{}' is not online", label_text);
edb90f6a
DM
176 }
177 }
178
6334bdc1 179 } else if let Some(mut status) = status {
1ec0d70d 180 task_log!(worker, "standalone drive - ejecting media instead of export");
6334bdc1 181 status.drive.eject_media()?;
edb90f6a
DM
182 }
183
184 Ok(())
185 }
186
d37da6b7
DM
187 /// commit changes to tape and catalog
188 ///
189 /// This is done automatically during a backupsession, but needs to
190 /// be called explicitly before dropping the PoolWriter
191 pub fn commit(&mut self) -> Result<(), Error> {
5c4755ad
DM
192 if let Some(PoolWriterState {ref mut drive, .. }) = self.status {
193 drive.sync()?; // sync all data to the tape
d37da6b7 194 }
1037f2bc 195 self.catalog_set.lock().unwrap().commit()?; // then commit the catalog
d37da6b7
DM
196 Ok(())
197 }
198
199 /// Load a writable media into the drive
ff58c519 200 pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result<Uuid, Error> {
88bc9635
DM
201 let last_media_uuid = match self.status {
202 Some(PoolWriterState { ref media_uuid, ..}) => Some(media_uuid.clone()),
d37da6b7
DM
203 None => None,
204 };
205
206 let current_time = proxmox::tools::time::epoch_i64();
207 let media_uuid = self.pool.alloc_writable_media(current_time)?;
208
209 let media = self.pool.lookup_media(&media_uuid).unwrap();
210
211 let media_changed = match last_media_uuid {
212 Some(ref last_media_uuid) => last_media_uuid != &media_uuid,
213 None => true,
214 };
215
216 if !media_changed {
217 return Ok(media_uuid);
218 }
219
3fbf2311
DM
220 task_log!(worker, "allocated new writable media '{}'", media.label_text());
221
5c4755ad
DM
222 if let Some(PoolWriterState {mut drive, .. }) = self.status.take() {
223 if last_media_uuid.is_some() {
224 task_log!(worker, "eject current media");
225 drive.eject_media()?;
226 }
d37da6b7
DM
227 }
228
1ce8e905 229 let (drive_config, _digest) = pbs_config::drive::config()?;
66e42bec
DM
230
231 let (mut drive, old_media_id) =
c9793d47 232 request_and_load_media(worker, &drive_config, &self.drive_name, media.label(), &self.notify_email)?;
66e42bec 233
5843268c 234 // test for critical tape alert flags
a08a1985
DM
235 if let Ok(alert_flags) = drive.tape_alert_flags() {
236 if !alert_flags.is_empty() {
1ec0d70d 237 task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
a08a1985 238 if tape_alert_flags_critical(alert_flags) {
25350f33 239 self.pool.set_media_status_damaged(&media_uuid)?;
a08a1985
DM
240 bail!("aborting due to critical tape alert flags: {:?}", alert_flags);
241 }
5843268c
DM
242 }
243 }
244
88bc9635 245 let (catalog, is_new_media) = update_media_set_label(
c503ea70 246 worker,
66e42bec
DM
247 drive.as_mut(),
248 old_media_id.media_set_label,
249 media.id(),
c503ea70
DM
250 )?;
251
1037f2bc 252 self.catalog_set.lock().unwrap().append_catalog(catalog)?;
5c4755ad 253
2b191385
DM
254 let media_set = media.media_set_label().clone().unwrap();
255
256 let encrypt_fingerprint = media_set
8a0046f5 257 .encryption_key_fingerprint
2b191385
DM
258 .clone()
259 .map(|fp| (fp, media_set.uuid.clone()));
8a0046f5
DM
260
261 drive.set_encryption(encrypt_fingerprint)?;
262
88bc9635
DM
263 self.status = Some(PoolWriterState {
264 drive,
265 media_uuid: media_uuid.clone(),
266 at_eom: false,
267 bytes_written: 0,
268 });
d37da6b7 269
88bc9635 270 if is_new_media {
32b75d36
DM
271 // add catalogs from previous media
272 self.append_media_set_catalogs(worker)?;
273 }
274
d37da6b7
DM
275 Ok(media_uuid)
276 }
277
32b75d36
DM
278 fn open_catalog_file(uuid: &Uuid) -> Result<File, Error> {
279
280 let status_path = Path::new(TAPE_STATUS_DIR);
281 let mut path = status_path.to_owned();
282 path.push(uuid.to_string());
283 path.set_extension("log");
284
285 let file = std::fs::OpenOptions::new()
286 .read(true)
287 .open(&path)?;
288
289 Ok(file)
290 }
291
eac1beef
DM
292 // Check it tape is loaded, then move to EOM (if not already there)
293 //
294 // Returns the tape position at EOM.
295 fn prepare_tape_write(
296 status: &mut PoolWriterState,
297 worker: &WorkerTask,
298 ) -> Result<u64, Error> {
299
300 if !status.at_eom {
1ec0d70d 301 task_log!(worker, "moving to end of media");
7b11a809 302 status.drive.move_to_eom(true)?;
eac1beef
DM
303 status.at_eom = true;
304 }
305
306 let current_file_number = status.drive.current_file_number()?;
307 if current_file_number < 2 {
308 bail!("got strange file position number from drive ({})", current_file_number);
309 }
310
311 Ok(current_file_number)
312 }
313
32b75d36
DM
314 /// Move to EOM (if not already there), then write the current
315 /// catalog to the tape. On success, this return 'Ok(true)'.
316
317 /// Please note that this may fail when there is not enough space
318 /// on the media (return value 'Ok(false, _)'). In that case, the
319 /// archive is marked incomplete. The caller should mark the media
320 /// as full and try again using another media.
321 pub fn append_catalog_archive(
322 &mut self,
323 worker: &WorkerTask,
324 ) -> Result<bool, Error> {
325
326 let status = match self.status {
327 Some(ref mut status) => status,
328 None => bail!("PoolWriter - no media loaded"),
329 };
330
eac1beef 331 Self::prepare_tape_write(status, worker)?;
32b75d36 332
1037f2bc 333 let catalog_set = self.catalog_set.lock().unwrap();
32b75d36 334
1037f2bc 335 let catalog = match catalog_set.catalog {
32b75d36
DM
336 None => bail!("append_catalog_archive failed: no catalog - internal error"),
337 Some(ref catalog) => catalog,
338 };
339
340 let media_set = self.pool.current_media_set();
341
342 let media_list = media_set.media_list();
343 let uuid = match media_list.last() {
344 None => bail!("got empty media list - internal error"),
345 Some(None) => bail!("got incomplete media list - internal error"),
346 Some(Some(last_uuid)) => {
347 if last_uuid != catalog.uuid() {
348 bail!("got wrong media - internal error");
349 }
350 last_uuid
351 }
352 };
353
354 let seq_nr = media_list.len() - 1;
355
356 let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?;
357
358 let mut file = Self::open_catalog_file(uuid)?;
359
360 let done = tape_write_catalog(
361 writer.as_mut(),
362 uuid,
363 media_set.uuid(),
364 seq_nr,
365 &mut file,
366 )?.is_some();
367
368 Ok(done)
369 }
370
371 // Append catalogs for all previous media in set (without last)
372 fn append_media_set_catalogs(
373 &mut self,
374 worker: &WorkerTask,
375 ) -> Result<(), Error> {
376
377 let media_set = self.pool.current_media_set();
378
379 let mut media_list = &media_set.media_list()[..];
380 if media_list.len() < 2 {
381 return Ok(());
382 }
383 media_list = &media_list[..(media_list.len()-1)];
384
385 let status = match self.status {
386 Some(ref mut status) => status,
387 None => bail!("PoolWriter - no media loaded"),
388 };
389
eac1beef 390 Self::prepare_tape_write(status, worker)?;
32b75d36
DM
391
392 for (seq_nr, uuid) in media_list.iter().enumerate() {
393
394 let uuid = match uuid {
395 None => bail!("got incomplete media list - internal error"),
396 Some(uuid) => uuid,
397 };
398
399 let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?;
400
401 let mut file = Self::open_catalog_file(uuid)?;
402
403 task_log!(worker, "write catalog for previous media: {}", uuid);
404
405 if tape_write_catalog(
406 writer.as_mut(),
407 uuid,
408 media_set.uuid(),
409 seq_nr,
410 &mut file,
411 )?.is_none() {
412 bail!("got EOM while writing start catalog");
413 }
414 }
415
416 Ok(())
417 }
418
d1d74c43 419 /// Move to EOM (if not already there), then creates a new snapshot
d37da6b7
DM
420 /// archive writing specified files (as .pxar) into it. On
421 /// success, this return 'Ok(true)' and the media catalog gets
422 /// updated.
423
424 /// Please note that this may fail when there is not enough space
425 /// on the media (return value 'Ok(false, _)'). In that case, the
426 /// archive is marked incomplete, and we do not use it. The caller
427 /// should mark the media as full and try again using another
428 /// media.
429 pub fn append_snapshot_archive(
430 &mut self,
5654d8ce 431 worker: &WorkerTask,
d37da6b7
DM
432 snapshot_reader: &SnapshotReader,
433 ) -> Result<(bool, usize), Error> {
434
435 let status = match self.status {
436 Some(ref mut status) => status,
437 None => bail!("PoolWriter - no media loaded"),
438 };
439
eac1beef 440 let current_file_number = Self::prepare_tape_write(status, worker)?;
d37da6b7
DM
441
442 let (done, bytes_written) = {
443 let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?;
444
445 match tape_write_snapshot_archive(writer.as_mut(), snapshot_reader)? {
446 Some(content_uuid) => {
1037f2bc 447 self.catalog_set.lock().unwrap().register_snapshot(
d37da6b7
DM
448 content_uuid,
449 current_file_number,
54722aca 450 &snapshot_reader.datastore_name().to_string(),
d37da6b7
DM
451 &snapshot_reader.snapshot().to_string(),
452 )?;
453 (true, writer.bytes_written())
454 }
455 None => (false, writer.bytes_written()),
456 }
457 };
458
459 status.bytes_written += bytes_written;
460
39735609 461 let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
d37da6b7
DM
462
463 if !done || request_sync {
5c4755ad 464 self.commit()?;
d37da6b7
DM
465 }
466
467 Ok((done, bytes_written))
468 }
469
d1d74c43 470 /// Move to EOM (if not already there), then creates a new chunk
d37da6b7
DM
471 /// archive and writes chunks from 'chunk_iter'. This stops when
472 /// it detect LEOM or when we reach max archive size
473 /// (4GB). Written chunks are registered in the media catalog.
474 pub fn append_chunk_archive(
475 &mut self,
31cf625a 476 worker: &WorkerTask,
5c4755ad 477 chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
54722aca 478 store: &str,
d37da6b7
DM
479 ) -> Result<(bool, usize), Error> {
480
481 let status = match self.status {
482 Some(ref mut status) => status,
483 None => bail!("PoolWriter - no media loaded"),
484 };
485
eac1beef 486 let current_file_number = Self::prepare_tape_write(status, worker)?;
d37da6b7 487
d37da6b7
DM
488 let writer = status.drive.write_file()?;
489
31cf625a
DM
490 let start_time = SystemTime::now();
491
d37da6b7 492 let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive(
31cf625a 493 worker,
d37da6b7 494 writer,
d37da6b7 495 chunk_iter,
54722aca 496 store,
d37da6b7
DM
497 MAX_CHUNK_ARCHIVE_SIZE,
498 )?;
499
500 status.bytes_written += bytes_written;
501
31cf625a 502 let elapsed = start_time.elapsed()?.as_secs_f64();
1ec0d70d
DM
503 task_log!(
504 worker,
776dabfb 505 "wrote {} chunks ({:.2} MB at {:.2} MB/s)",
2c10410b 506 saved_chunks.len(),
776dabfb
DM
507 bytes_written as f64 /1_000_000.0,
508 (bytes_written as f64)/(1_000_000.0*elapsed),
1ec0d70d 509 );
31cf625a 510
39735609 511 let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
d37da6b7
DM
512
513 // register chunks in media_catalog
1037f2bc 514 self.catalog_set.lock().unwrap()
54722aca 515 .register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?;
d37da6b7
DM
516
517 if leom || request_sync {
5c4755ad 518 self.commit()?;
d37da6b7
DM
519 }
520
521 Ok((leom, bytes_written))
522 }
5c4755ad
DM
523
524 pub fn spawn_chunk_reader_thread(
525 &self,
526 datastore: Arc<DataStore>,
527 snapshot_reader: Arc<Mutex<SnapshotReader>>,
528 ) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
529 NewChunksIterator::spawn(
530 datastore,
531 snapshot_reader,
1037f2bc 532 Arc::clone(&self.catalog_set),
5c4755ad
DM
533 )
534 }
d37da6b7
DM
535}
536
537/// write up to <max_size> of chunks
538fn write_chunk_archive<'a>(
2c10410b 539 _worker: &WorkerTask,
d37da6b7 540 writer: Box<dyn 'a + TapeWrite>,
5c4755ad 541 chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
54722aca 542 store: &str,
d37da6b7
DM
543 max_size: usize,
544) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> {
545
54722aca 546 let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, store, true)?;
d37da6b7 547
d37da6b7
DM
548 // we want to get the chunk list in correct order
549 let mut chunk_list: Vec<[u8;32]> = Vec::new();
550
551 let mut leom = false;
552
553 loop {
5c4755ad 554 let (digest, blob) = match chunk_iter.peek() {
d37da6b7 555 None => break,
5c4755ad 556 Some(Ok((digest, blob))) => (digest, blob),
e8913fea 557 Some(Err(err)) => bail!("{}", err),
d37da6b7 558 };
e8913fea 559
5c4755ad 560 //println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(digest), blob.raw_size());
d37da6b7
DM
561
562 match writer.try_write_chunk(&digest, &blob) {
e8913fea 563 Ok(true) => {
5c4755ad 564 chunk_list.push(*digest);
e8913fea 565 chunk_iter.next(); // consume
d37da6b7
DM
566 }
567 Ok(false) => {
e8913fea 568 // Note; we do not consume the chunk (no chunk_iter.next())
d37da6b7
DM
569 leom = true;
570 break;
571 }
572 Err(err) => bail!("write chunk failed - {}", err),
573 }
574
575 if writer.bytes_written() > max_size {
1ec0d70d 576 //task_log!(worker, "Chunk Archive max size reached, closing archive");
d37da6b7
DM
577 break;
578 }
579 }
580
581 writer.finish()?;
582
583 Ok((chunk_list, content_uuid, leom, writer.bytes_written()))
584}
585
66e42bec
DM
586// Compare the media set label. If the media is empty, or the existing
587// set label does not match the expected media set, overwrite the
588// media set label.
589fn update_media_set_label(
ff58c519 590 worker: &WorkerTask,
66e42bec
DM
591 drive: &mut dyn TapeDriver,
592 old_set: Option<MediaSetLabel>,
d37da6b7 593 media_id: &MediaId,
32b75d36 594) -> Result<(MediaCatalog, bool), Error> {
d37da6b7
DM
595
596 let media_catalog;
597
598 let new_set = match media_id.media_set_label {
66e42bec 599 None => bail!("got media without media set - internal error"),
d37da6b7
DM
600 Some(ref set) => set,
601 };
602
feb1645f
DM
603 let key_config = if let Some(ref fingerprint) = new_set.encryption_key_fingerprint {
604 let (config_map, _digest) = load_key_configs()?;
605 match config_map.get(fingerprint) {
82a103c8 606 Some(key_config) => Some(key_config.clone()),
feb1645f
DM
607 None => {
608 bail!("unable to find tape encryption key config '{}'", fingerprint);
609 }
610 }
611 } else {
612 None
613 };
614
d37da6b7
DM
615 let status_path = Path::new(TAPE_STATUS_DIR);
616
32b75d36 617 let new_media = match old_set {
d37da6b7 618 None => {
1ec0d70d 619 task_log!(worker, "writing new media set label");
feb1645f 620 drive.write_media_set_label(new_set, key_config.as_ref())?;
31cf625a 621 media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?;
32b75d36 622 true
d37da6b7
DM
623 }
624 Some(media_set_label) => {
625 if new_set.uuid == media_set_label.uuid {
626 if new_set.seq_nr != media_set_label.seq_nr {
627 bail!("got media with wrong media sequence number ({} != {}",
628 new_set.seq_nr,media_set_label.seq_nr);
629 }
8a0046f5
DM
630 if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
631 bail!("detected changed encryption fingerprint - internal error");
632 }
237314ad 633 media_catalog = MediaCatalog::open(status_path, &media_id, true, false)?;
32b75d36
DM
634
635 // todo: verify last content/media_catalog somehow?
636
637 false
d37da6b7 638 } else {
1ec0d70d
DM
639 task_log!(
640 worker,
641 "writing new media set label (overwrite '{}/{}')",
642 media_set_label.uuid.to_string(),
643 media_set_label.seq_nr,
66e42bec 644 );
d37da6b7 645
feb1645f 646 drive.write_media_set_label(new_set, key_config.as_ref())?;
31cf625a 647 media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?;
32b75d36 648 true
d37da6b7
DM
649 }
650 }
32b75d36 651 };
d37da6b7 652
32b75d36 653 Ok((media_catalog, new_media))
d37da6b7 654}