]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
tape: skip catalog archives in restore
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::path::Path;
2 use std::ffi::OsStr;
3 use std::convert::TryFrom;
4 use std::io::{Seek, SeekFrom};
5
6 use anyhow::{bail, format_err, Error};
7 use serde_json::Value;
8
9 use proxmox::{
10 api::{
11 api,
12 RpcEnvironment,
13 RpcEnvironmentType,
14 Router,
15 Permission,
16 section_config::SectionConfigData,
17 },
18 tools::{
19 Uuid,
20 io::ReadExt,
21 fs::{
22 replace_file,
23 CreateOptions,
24 },
25 },
26 };
27
28 use crate::{
29 task_log,
30 task_warn,
31 task::TaskState,
32 tools::compute_file_csum,
33 api2::types::{
34 DATASTORE_SCHEMA,
35 DRIVE_NAME_SCHEMA,
36 UPID_SCHEMA,
37 Authid,
38 Userid,
39 },
40 config::{
41 self,
42 cached_user_info::CachedUserInfo,
43 acl::{
44 PRIV_DATASTORE_BACKUP,
45 PRIV_DATASTORE_MODIFY,
46 PRIV_TAPE_READ,
47 },
48 },
49 backup::{
50 archive_type,
51 MANIFEST_BLOB_NAME,
52 CryptMode,
53 DataStore,
54 BackupDir,
55 DataBlob,
56 BackupManifest,
57 ArchiveType,
58 IndexFile,
59 DynamicIndexReader,
60 FixedIndexReader,
61 },
62 server::{
63 lookup_user_email,
64 WorkerTask,
65 },
66 tape::{
67 TAPE_STATUS_DIR,
68 TapeRead,
69 MediaId,
70 MediaSet,
71 MediaCatalog,
72 Inventory,
73 lock_media_set,
74 file_formats::{
75 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
76 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
77 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
78 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
79 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
80 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
81 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
82 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
83 MediaContentHeader,
84 ChunkArchiveHeader,
85 ChunkArchiveDecoder,
86 SnapshotArchiveHeader,
87 CatalogArchiveHeader,
88 },
89 drive::{
90 TapeDriver,
91 request_and_load_media,
92 lock_tape_device,
93 set_tape_device_state,
94 },
95 },
96 };
97
98 pub const ROUTER: Router = Router::new()
99 .post(&API_METHOD_RESTORE);
100
101 #[api(
102 input: {
103 properties: {
104 store: {
105 schema: DATASTORE_SCHEMA,
106 },
107 drive: {
108 schema: DRIVE_NAME_SCHEMA,
109 },
110 "media-set": {
111 description: "Media set UUID.",
112 type: String,
113 },
114 "notify-user": {
115 type: Userid,
116 optional: true,
117 },
118 owner: {
119 type: Authid,
120 optional: true,
121 },
122 },
123 },
124 returns: {
125 schema: UPID_SCHEMA,
126 },
127 access: {
128 // Note: parameters are no uri parameter, so we need to test inside function body
129 description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
130 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
131 permission: &Permission::Anybody,
132 },
133 )]
134 /// Restore data from media-set
135 pub fn restore(
136 store: String,
137 drive: String,
138 media_set: String,
139 notify_user: Option<Userid>,
140 owner: Option<Authid>,
141 rpcenv: &mut dyn RpcEnvironment,
142 ) -> Result<Value, Error> {
143
144 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
145 let user_info = CachedUserInfo::new()?;
146
147 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
148 if (privs & PRIV_DATASTORE_BACKUP) == 0 {
149 bail!("no permissions on /datastore/{}", store);
150 }
151
152 if let Some(ref owner) = owner {
153 let correct_owner = owner == &auth_id
154 || (owner.is_token()
155 && !auth_id.is_token()
156 && owner.user() == auth_id.user());
157
158 // same permission as changing ownership after syncing
159 if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
160 bail!("no permission to restore as '{}'", owner);
161 }
162 }
163
164 let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
165 if (privs & PRIV_TAPE_READ) == 0 {
166 bail!("no permissions on /tape/drive/{}", drive);
167 }
168
169 let media_set_uuid = media_set.parse()?;
170
171 let status_path = Path::new(TAPE_STATUS_DIR);
172
173 let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
174
175 let inventory = Inventory::load(status_path)?;
176
177 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
178
179 let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
180 if (privs & PRIV_TAPE_READ) == 0 {
181 bail!("no permissions on /tape/pool/{}", pool);
182 }
183
184 let datastore = DataStore::lookup_datastore(&store)?;
185
186 let (drive_config, _digest) = config::drive::config()?;
187
188 // early check/lock before starting worker
189 let drive_lock = lock_tape_device(&drive_config, &drive)?;
190
191 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
192
193 let upid_str = WorkerTask::new_thread(
194 "tape-restore",
195 Some(store.clone()),
196 auth_id.clone(),
197 to_stdout,
198 move |worker| {
199 let _drive_lock = drive_lock; // keep lock guard
200
201 set_tape_device_state(&drive, &worker.upid().to_string())?;
202
203 let members = inventory.compute_media_set_members(&media_set_uuid)?;
204
205 let media_list = members.media_list();
206
207 let mut media_id_list = Vec::new();
208
209 let mut encryption_key_fingerprint = None;
210
211 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
212 match media_uuid {
213 None => {
214 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
215 }
216 Some(media_uuid) => {
217 let media_id = inventory.lookup_media(media_uuid).unwrap();
218 if let Some(ref set) = media_id.media_set_label { // always true here
219 if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
220 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
221 }
222 }
223 media_id_list.push(media_id);
224 }
225 }
226 }
227
228 task_log!(worker, "Restore mediaset '{}'", media_set);
229 if let Some(fingerprint) = encryption_key_fingerprint {
230 task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
231 }
232 task_log!(worker, "Pool: {}", pool);
233 task_log!(worker, "Datastore: {}", store);
234 task_log!(worker, "Drive: {}", drive);
235 task_log!(
236 worker,
237 "Required media list: {}",
238 media_id_list.iter()
239 .map(|media_id| media_id.label.label_text.as_str())
240 .collect::<Vec<&str>>()
241 .join(";")
242 );
243
244 for media_id in media_id_list.iter() {
245 request_and_restore_media(
246 &worker,
247 media_id,
248 &drive_config,
249 &drive,
250 &datastore,
251 &auth_id,
252 &notify_user,
253 &owner,
254 )?;
255 }
256
257 task_log!(worker, "Restore mediaset '{}' done", media_set);
258
259 if let Err(err) = set_tape_device_state(&drive, "") {
260 task_log!(
261 worker,
262 "could not unset drive state for {}: {}",
263 drive,
264 err
265 );
266 }
267
268 Ok(())
269 }
270 )?;
271
272 Ok(upid_str.into())
273 }
274
275 /// Request and restore complete media without using existing catalog (create catalog instead)
276 pub fn request_and_restore_media(
277 worker: &WorkerTask,
278 media_id: &MediaId,
279 drive_config: &SectionConfigData,
280 drive_name: &str,
281 datastore: &DataStore,
282 authid: &Authid,
283 notify_user: &Option<Userid>,
284 owner: &Option<Authid>,
285 ) -> Result<(), Error> {
286
287 let media_set_uuid = match media_id.media_set_label {
288 None => bail!("restore_media: no media set - internal error"),
289 Some(ref set) => &set.uuid,
290 };
291
292 let email = notify_user
293 .as_ref()
294 .and_then(|userid| lookup_user_email(userid))
295 .or_else(|| lookup_user_email(&authid.clone().into()));
296
297 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?;
298
299 match info.media_set_label {
300 None => {
301 bail!("missing media set label on media {} ({})",
302 media_id.label.label_text, media_id.label.uuid);
303 }
304 Some(ref set) => {
305 if &set.uuid != media_set_uuid {
306 bail!("wrong media set label on media {} ({} != {})",
307 media_id.label.label_text, media_id.label.uuid,
308 media_set_uuid);
309 }
310 let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
311 .map(|fp| (fp, set.uuid.clone()));
312
313 drive.set_encryption(encrypt_fingerprint)?;
314 }
315 }
316
317 let restore_owner = owner.as_ref().unwrap_or(authid);
318
319 restore_media(worker, &mut drive, &info, Some((datastore, restore_owner)), false)
320 }
321
322 /// Restore complete media content and catalog
323 ///
324 /// Only create the catalog if target is None.
325 pub fn restore_media(
326 worker: &WorkerTask,
327 drive: &mut Box<dyn TapeDriver>,
328 media_id: &MediaId,
329 target: Option<(&DataStore, &Authid)>,
330 verbose: bool,
331 ) -> Result<(), Error> {
332
333 let status_path = Path::new(TAPE_STATUS_DIR);
334 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
335
336 loop {
337 let current_file_number = drive.current_file_number()?;
338 let reader = match drive.read_next_file()? {
339 None => {
340 task_log!(worker, "detected EOT after {} files", current_file_number);
341 break;
342 }
343 Some(reader) => reader,
344 };
345
346 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
347 }
348
349 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
350
351 Ok(())
352 }
353
354 fn restore_archive<'a>(
355 worker: &WorkerTask,
356 mut reader: Box<dyn 'a + TapeRead>,
357 current_file_number: u64,
358 target: Option<(&DataStore, &Authid)>,
359 catalog: &mut MediaCatalog,
360 verbose: bool,
361 ) -> Result<(), Error> {
362
363 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
364 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
365 bail!("missing MediaContentHeader");
366 }
367
368 //println!("Found MediaContentHeader: {:?}", header);
369
370 match header.content_magic {
371 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
372 bail!("unexpected content magic (label)");
373 }
374 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
375 bail!("unexpected snapshot archive version (v1.0)");
376 }
377 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
378 let header_data = reader.read_exact_allocated(header.size as usize)?;
379
380 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
381 .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
382
383 let datastore_name = archive_header.store;
384 let snapshot = archive_header.snapshot;
385
386 task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
387
388 let backup_dir: BackupDir = snapshot.parse()?;
389
390 if let Some((datastore, authid)) = target.as_ref() {
391
392 let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
393 if *authid != &owner { // only the owner is allowed to create additional snapshots
394 bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
395 }
396
397 let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
398 let mut path = datastore.base_path();
399 path.push(rel_path);
400
401 if is_new {
402 task_log!(worker, "restore snapshot {}", backup_dir);
403
404 match restore_snapshot_archive(worker, reader, &path) {
405 Err(err) => {
406 std::fs::remove_dir_all(&path)?;
407 bail!("restore snapshot {} failed - {}", backup_dir, err);
408 }
409 Ok(false) => {
410 std::fs::remove_dir_all(&path)?;
411 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
412 }
413 Ok(true) => {
414 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
415 catalog.commit_if_large()?;
416 }
417 }
418 return Ok(());
419 }
420 }
421
422 reader.skip_to_end()?; // read all data
423 if let Ok(false) = reader.is_incomplete() {
424 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
425 catalog.commit_if_large()?;
426 }
427 }
428 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
429 bail!("unexpected chunk archive version (v1.0)");
430 }
431 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
432 let header_data = reader.read_exact_allocated(header.size as usize)?;
433
434 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
435 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
436
437 let source_datastore = archive_header.store;
438
439 task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
440 let datastore = target.as_ref().map(|t| t.0);
441
442 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
443 catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number, &source_datastore)?;
444 for digest in chunks.iter() {
445 catalog.register_chunk(&digest)?;
446 }
447 task_log!(worker, "register {} chunks", chunks.len());
448 catalog.end_chunk_archive()?;
449 catalog.commit_if_large()?;
450 }
451 }
452 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
453 let header_data = reader.read_exact_allocated(header.size as usize)?;
454
455 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
456 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
457
458 task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid);
459
460 reader.skip_to_end()?; // read all data
461 }
462 _ => bail!("unknown content magic {:?}", header.content_magic),
463 }
464
465 catalog.commit()?;
466
467 Ok(())
468 }
469
470 fn restore_chunk_archive<'a>(
471 worker: &WorkerTask,
472 reader: Box<dyn 'a + TapeRead>,
473 datastore: Option<&DataStore>,
474 verbose: bool,
475 ) -> Result<Option<Vec<[u8;32]>>, Error> {
476
477 let mut chunks = Vec::new();
478
479 let mut decoder = ChunkArchiveDecoder::new(reader);
480
481 let result: Result<_, Error> = proxmox::try_block!({
482 while let Some((digest, blob)) = decoder.next_chunk()? {
483
484 worker.check_abort()?;
485
486 if let Some(datastore) = datastore {
487 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
488 if !chunk_exists {
489 blob.verify_crc()?;
490
491 if blob.crypt_mode()? == CryptMode::None {
492 blob.decode(None, Some(&digest))?; // verify digest
493 }
494 if verbose {
495 task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
496 }
497 datastore.insert_chunk(&blob, &digest)?;
498 } else if verbose {
499 task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
500 }
501 } else if verbose {
502 task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
503 }
504 chunks.push(digest);
505 }
506 Ok(())
507 });
508
509 match result {
510 Ok(()) => Ok(Some(chunks)),
511 Err(err) => {
512 let reader = decoder.reader();
513
514 // check if this stream is marked incomplete
515 if let Ok(true) = reader.is_incomplete() {
516 return Ok(Some(chunks));
517 }
518
519 // check if this is an aborted stream without end marker
520 if let Ok(false) = reader.has_end_marker() {
521 worker.log("missing stream end marker".to_string());
522 return Ok(None);
523 }
524
525 // else the archive is corrupt
526 Err(err)
527 }
528 }
529 }
530
531 fn restore_snapshot_archive<'a>(
532 worker: &WorkerTask,
533 reader: Box<dyn 'a + TapeRead>,
534 snapshot_path: &Path,
535 ) -> Result<bool, Error> {
536
537 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
538 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
539 Ok(()) => Ok(true),
540 Err(err) => {
541 let reader = decoder.input();
542
543 // check if this stream is marked incomplete
544 if let Ok(true) = reader.is_incomplete() {
545 return Ok(false);
546 }
547
548 // check if this is an aborted stream without end marker
549 if let Ok(false) = reader.has_end_marker() {
550 return Ok(false);
551 }
552
553 // else the archive is corrupt
554 Err(err)
555 }
556 }
557 }
558
559 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
560 worker: &WorkerTask,
561 decoder: &mut pxar::decoder::sync::Decoder<R>,
562 snapshot_path: &Path,
563 ) -> Result<(), Error> {
564
565 let _root = match decoder.next() {
566 None => bail!("missing root entry"),
567 Some(root) => {
568 let root = root?;
569 match root.kind() {
570 pxar::EntryKind::Directory => { /* Ok */ }
571 _ => bail!("wrong root entry type"),
572 }
573 root
574 }
575 };
576
577 let root_path = Path::new("/");
578 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
579
580 let mut manifest = None;
581
582 loop {
583 worker.check_abort()?;
584
585 let entry = match decoder.next() {
586 None => break,
587 Some(entry) => entry?,
588 };
589 let entry_path = entry.path();
590
591 match entry.kind() {
592 pxar::EntryKind::File { .. } => { /* Ok */ }
593 _ => bail!("wrong entry type for {:?}", entry_path),
594 }
595 match entry_path.parent() {
596 None => bail!("wrong parent for {:?}", entry_path),
597 Some(p) => {
598 if p != root_path {
599 bail!("wrong parent for {:?}", entry_path);
600 }
601 }
602 }
603
604 let filename = entry.file_name();
605 let mut contents = match decoder.contents() {
606 None => bail!("missing file content"),
607 Some(contents) => contents,
608 };
609
610 let mut archive_path = snapshot_path.to_owned();
611 archive_path.push(&filename);
612
613 let mut tmp_path = archive_path.clone();
614 tmp_path.set_extension("tmp");
615
616 if filename == manifest_file_name {
617
618 let blob = DataBlob::load_from_reader(&mut contents)?;
619 let options = CreateOptions::new();
620 replace_file(&tmp_path, blob.raw_data(), options)?;
621
622 manifest = Some(BackupManifest::try_from(blob)?);
623 } else {
624 let mut tmpfile = std::fs::OpenOptions::new()
625 .write(true)
626 .create(true)
627 .read(true)
628 .open(&tmp_path)
629 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
630
631 std::io::copy(&mut contents, &mut tmpfile)?;
632
633 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
634 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
635 }
636 }
637 }
638
639 let manifest = match manifest {
640 None => bail!("missing manifest"),
641 Some(manifest) => manifest,
642 };
643
644 for item in manifest.files() {
645 let mut archive_path = snapshot_path.to_owned();
646 archive_path.push(&item.filename);
647
648 match archive_type(&item.filename)? {
649 ArchiveType::DynamicIndex => {
650 let index = DynamicIndexReader::open(&archive_path)?;
651 let (csum, size) = index.compute_csum();
652 manifest.verify_file(&item.filename, &csum, size)?;
653 }
654 ArchiveType::FixedIndex => {
655 let index = FixedIndexReader::open(&archive_path)?;
656 let (csum, size) = index.compute_csum();
657 manifest.verify_file(&item.filename, &csum, size)?;
658 }
659 ArchiveType::Blob => {
660 let mut tmpfile = std::fs::File::open(&archive_path)?;
661 let (csum, size) = compute_file_csum(&mut tmpfile)?;
662 manifest.verify_file(&item.filename, &csum, size)?;
663 }
664 }
665 }
666
667 // commit manifest
668 let mut manifest_path = snapshot_path.to_owned();
669 manifest_path.push(MANIFEST_BLOB_NAME);
670 let mut tmp_manifest_path = manifest_path.clone();
671 tmp_manifest_path.set_extension("tmp");
672
673 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
674 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
675 }
676
677 Ok(())
678 }
679
680 /// Try to restore media catalogs (form catalog_archives)
681 pub fn fast_catalog_restore(
682 worker: &WorkerTask,
683 drive: &mut Box<dyn TapeDriver>,
684 media_set: &MediaSet,
685 uuid: &Uuid, // current media Uuid
686 ) -> Result<bool, Error> {
687
688 let status_path = Path::new(TAPE_STATUS_DIR);
689
690 let current_file_number = drive.current_file_number()?;
691 if current_file_number != 2 {
692 bail!("fast_catalog_restore: wrong media position - internal error");
693 }
694
695 let mut found_catalog = false;
696
697 let mut moved_to_eom = false;
698
699 loop {
700 let current_file_number = drive.current_file_number()?;
701
702 { // limit reader scope
703 let mut reader = match drive.read_next_file()? {
704 None => {
705 task_log!(worker, "detected EOT after {} files", current_file_number);
706 break;
707 }
708 Some(reader) => reader,
709 };
710
711 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
712 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
713 bail!("missing MediaContentHeader");
714 }
715
716 if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
717 task_log!(worker, "found catalog at pos {}", current_file_number);
718
719 let header_data = reader.read_exact_allocated(header.size as usize)?;
720
721 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
722 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
723
724 if &archive_header.media_set_uuid != media_set.uuid() {
725 task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number);
726 reader.skip_to_end()?; // read all data
727 continue;
728 }
729
730 let catalog_uuid = &archive_header.uuid;
731
732 let wanted = media_set
733 .media_list()
734 .iter()
735 .find(|e| {
736 match e {
737 None => false,
738 Some(uuid) => uuid == catalog_uuid,
739 }
740 })
741 .is_some();
742
743 if !wanted {
744 task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid);
745 reader.skip_to_end()?; // read all data
746 continue;
747 }
748
749 if catalog_uuid == uuid {
750 // always restore and overwrite catalog
751 } else {
752 // only restore if catalog does not exist
753 if MediaCatalog::exists(status_path, catalog_uuid) {
754 task_log!(worker, "catalog for media '{}' already exists", catalog_uuid);
755 reader.skip_to_end()?; // read all data
756 continue;
757 }
758 }
759
760 let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?;
761
762 std::io::copy(&mut reader, &mut file)?;
763
764 file.seek(SeekFrom::Start(0))?;
765
766 match MediaCatalog::parse_catalog_header(&mut file)? {
767 (true, Some(media_uuid), Some(media_set_uuid)) => {
768 if &media_uuid != catalog_uuid {
769 task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number);
770 continue;
771 }
772 if media_set_uuid != archive_header.media_set_uuid {
773 task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number);
774 continue;
775 }
776
777 MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?;
778
779 if catalog_uuid == uuid {
780 task_log!(worker, "successfully restored catalog");
781 found_catalog = true
782 } else {
783 task_log!(worker, "successfully restored related catalog {}", media_uuid);
784 }
785 }
786 _ => {
787 task_warn!(worker, "got incomplete catalog header - skip file");
788 continue;
789 }
790 }
791
792 continue;
793 }
794 }
795
796 if moved_to_eom {
797 break; // already done - stop
798 }
799 moved_to_eom = true;
800
801 task_log!(worker, "searching for catalog at EOT (moving to EOT)");
802 drive.move_to_last_file()?;
803
804 let new_file_number = drive.current_file_number()?;
805
806 if new_file_number < (current_file_number + 1) {
807 break; // no new content - stop
808 }
809 }
810
811 Ok(found_catalog)
812 }