3 use std
::collections
::{HashMap, HashSet}
;
4 use std
::convert
::TryFrom
;
5 use std
::io
::{Seek, SeekFrom}
;
8 use anyhow
::{bail, format_err, Error}
;
18 schema
::parse_property_string
,
19 section_config
::SectionConfigData
,
35 tools
::compute_file_csum
,
37 DATASTORE_MAP_ARRAY_SCHEMA
,
38 DATASTORE_MAP_LIST_SCHEMA
,
46 cached_user_info
::CachedUserInfo
,
48 PRIV_DATASTORE_BACKUP
,
49 PRIV_DATASTORE_MODIFY
,
79 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0
,
80 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0
,
81 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
,
82 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0
,
83 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0
,
84 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0
,
85 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1
,
86 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
,
90 SnapshotArchiveHeader
,
95 request_and_load_media
,
97 set_tape_device_state
,
102 pub struct DataStoreMap
{
103 map
: HashMap
<String
, Arc
<DataStore
>>,
104 default: Option
<Arc
<DataStore
>>,
107 impl TryFrom
<String
> for DataStoreMap
{
110 fn try_from(value
: String
) -> Result
<Self, Error
> {
111 let value
= parse_property_string(&value
, &DATASTORE_MAP_ARRAY_SCHEMA
)?
;
112 let mut mapping
: Vec
<String
> = value
116 .map(|v
| v
.as_str().unwrap().to_string())
119 let mut map
= HashMap
::new();
120 let mut default = None
;
121 while let Some(mut store
) = mapping
.pop() {
122 if let Some(index
) = store
.find('
='
) {
123 let mut target
= store
.split_off(index
);
124 target
.remove(0); // remove '='
125 let datastore
= DataStore
::lookup_datastore(&target
)?
;
126 map
.insert(store
, datastore
);
127 } else if default.is_none() {
128 default = Some(DataStore
::lookup_datastore(&store
)?
);
130 bail
!("multiple default stores given");
134 Ok(Self { map, default }
)
139 fn used_datastores
<'a
>(&self) -> HashSet
<&str> {
140 let mut set
= HashSet
::new();
141 for store
in self.map
.values() {
142 set
.insert(store
.name());
145 if let Some(ref store
) = self.default {
146 set
.insert(store
.name());
152 fn get_datastore(&self, source
: &str) -> Option
<&DataStore
> {
153 if let Some(store
) = self.map
.get(source
) {
156 if let Some(ref store
) = self.default {
164 pub const ROUTER
: Router
= Router
::new().post(&API_METHOD_RESTORE
);
170 schema
: DATASTORE_MAP_LIST_SCHEMA
,
173 schema
: DRIVE_NAME_SCHEMA
,
176 description
: "Media set UUID.",
193 // Note: parameters are no uri parameter, so we need to test inside function body
194 description
: "The user needs Tape.Read privilege on /tape/pool/{pool} \
195 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
196 permission
: &Permission
::Anybody
,
199 /// Restore data from media-set
204 notify_user
: Option
<Userid
>,
205 owner
: Option
<Authid
>,
206 rpcenv
: &mut dyn RpcEnvironment
,
207 ) -> Result
<Value
, Error
> {
208 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
209 let user_info
= CachedUserInfo
::new()?
;
211 let store_map
= DataStoreMap
::try_from(store
)
212 .map_err(|err
| format_err
!("cannot parse store mapping: {}", err
))?
;
213 let used_datastores
= store_map
.used_datastores();
214 if used_datastores
.len() == 0 {
215 bail
!("no datastores given");
218 for store
in used_datastores
.iter() {
219 let privs
= user_info
.lookup_privs(&auth_id
, &["datastore", &store
]);
220 if (privs
& PRIV_DATASTORE_BACKUP
) == 0 {
221 bail
!("no permissions on /datastore/{}", store
);
224 if let Some(ref owner
) = owner
{
225 let correct_owner
= owner
== &auth_id
226 || (owner
.is_token() && !auth_id
.is_token() && owner
.user() == auth_id
.user());
228 // same permission as changing ownership after syncing
229 if !correct_owner
&& privs
& PRIV_DATASTORE_MODIFY
== 0 {
230 bail
!("no permission to restore as '{}'", owner
);
235 let privs
= user_info
.lookup_privs(&auth_id
, &["tape", "drive", &drive
]);
236 if (privs
& PRIV_TAPE_READ
) == 0 {
237 bail
!("no permissions on /tape/drive/{}", drive
);
240 let media_set_uuid
= media_set
.parse()?
;
242 let status_path
= Path
::new(TAPE_STATUS_DIR
);
244 let _lock
= lock_media_set(status_path
, &media_set_uuid
, None
)?
;
246 let inventory
= Inventory
::load(status_path
)?
;
248 let pool
= inventory
.lookup_media_set_pool(&media_set_uuid
)?
;
250 let privs
= user_info
.lookup_privs(&auth_id
, &["tape", "pool", &pool
]);
251 if (privs
& PRIV_TAPE_READ
) == 0 {
252 bail
!("no permissions on /tape/pool/{}", pool
);
255 let (drive_config
, _digest
) = config
::drive
::config()?
;
257 // early check/lock before starting worker
258 let drive_lock
= lock_tape_device(&drive_config
, &drive
)?
;
260 let to_stdout
= rpcenv
.env_type() == RpcEnvironmentType
::CLI
;
262 let taskid
= used_datastores
264 .map(|s
| s
.to_string())
265 .collect
::<Vec
<String
>>()
267 let upid_str
= WorkerTask
::new_thread(
273 let _drive_lock
= drive_lock
; // keep lock guard
275 set_tape_device_state(&drive
, &worker
.upid().to_string())?
;
277 let members
= inventory
.compute_media_set_members(&media_set_uuid
)?
;
279 let media_list
= members
.media_list();
281 let mut media_id_list
= Vec
::new();
283 let mut encryption_key_fingerprint
= None
;
285 for (seq_nr
, media_uuid
) in media_list
.iter().enumerate() {
288 bail
!("media set {} is incomplete (missing member {}).", media_set_uuid
, seq_nr
);
290 Some(media_uuid
) => {
291 let media_id
= inventory
.lookup_media(media_uuid
).unwrap();
292 if let Some(ref set
) = media_id
.media_set_label
{ // always true here
293 if encryption_key_fingerprint
.is_none() && set
.encryption_key_fingerprint
.is_some() {
294 encryption_key_fingerprint
= set
.encryption_key_fingerprint
.clone();
297 media_id_list
.push(media_id
);
302 task_log
!(worker
, "Restore mediaset '{}'", media_set
);
303 if let Some(fingerprint
) = encryption_key_fingerprint
{
304 task_log
!(worker
, "Encryption key fingerprint: {}", fingerprint
);
306 task_log
!(worker
, "Pool: {}", pool
);
307 task_log
!(worker
, "Datastore(s):");
311 .for_each(|store
| task_log
!(worker
, "\t{}", store
));
312 task_log
!(worker
, "Drive: {}", drive
);
315 "Required media list: {}",
317 .map(|media_id
| media_id
.label
.label_text
.as_str())
318 .collect
::<Vec
<&str>>()
322 for media_id
in media_id_list
.iter() {
323 request_and_restore_media(
335 task_log
!(worker
, "Restore mediaset '{}' done", media_set
);
337 if let Err(err
) = set_tape_device_state(&drive
, "") {
340 "could not unset drive state for {}: {}",
353 /// Request and restore complete media without using existing catalog (create catalog instead)
354 pub fn request_and_restore_media(
357 drive_config
: &SectionConfigData
,
359 store_map
: &DataStoreMap
,
361 notify_user
: &Option
<Userid
>,
362 owner
: &Option
<Authid
>,
363 ) -> Result
<(), Error
> {
364 let media_set_uuid
= match media_id
.media_set_label
{
365 None
=> bail
!("restore_media: no media set - internal error"),
366 Some(ref set
) => &set
.uuid
,
369 let email
= notify_user
371 .and_then(|userid
| lookup_user_email(userid
))
372 .or_else(|| lookup_user_email(&authid
.clone().into()));
374 let (mut drive
, info
) = request_and_load_media(worker
, &drive_config
, &drive_name
, &media_id
.label
, &email
)?
;
376 match info
.media_set_label
{
378 bail
!("missing media set label on media {} ({})",
379 media_id
.label
.label_text
, media_id
.label
.uuid
);
382 if &set
.uuid
!= media_set_uuid
{
383 bail
!("wrong media set label on media {} ({} != {})",
384 media_id
.label
.label_text
, media_id
.label
.uuid
,
387 let encrypt_fingerprint
= set
.encryption_key_fingerprint
.clone()
388 .map(|fp
| (fp
, set
.uuid
.clone()));
390 drive
.set_encryption(encrypt_fingerprint
)?
;
394 let restore_owner
= owner
.as_ref().unwrap_or(authid
);
400 Some((&store_map
, restore_owner
)),
405 /// Restore complete media content and catalog
407 /// Only create the catalog if target is None.
408 pub fn restore_media(
410 drive
: &mut Box
<dyn TapeDriver
>,
412 target
: Option
<(&DataStoreMap
, &Authid
)>,
414 ) -> Result
<(), Error
> {
416 let status_path
= Path
::new(TAPE_STATUS_DIR
);
417 let mut catalog
= MediaCatalog
::create_temporary_database(status_path
, media_id
, false)?
;
420 let current_file_number
= drive
.current_file_number()?
;
421 let reader
= match drive
.read_next_file()?
{
423 task_log
!(worker
, "detected EOT after {} files", current_file_number
);
426 Some(reader
) => reader
,
429 restore_archive(worker
, reader
, current_file_number
, target
, &mut catalog
, verbose
)?
;
432 MediaCatalog
::finish_temporary_database(status_path
, &media_id
.label
.uuid
, true)?
;
437 fn restore_archive
<'a
>(
439 mut reader
: Box
<dyn 'a
+ TapeRead
>,
440 current_file_number
: u64,
441 target
: Option
<(&DataStoreMap
, &Authid
)>,
442 catalog
: &mut MediaCatalog
,
444 ) -> Result
<(), Error
> {
445 let header
: MediaContentHeader
= unsafe { reader.read_le_value()? }
;
446 if header
.magic
!= PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0
{
447 bail
!("missing MediaContentHeader");
450 //println!("Found MediaContentHeader: {:?}", header);
452 match header
.content_magic
{
453 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0
| PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0
=> {
454 bail
!("unexpected content magic (label)");
456 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0
=> {
457 bail
!("unexpected snapshot archive version (v1.0)");
459 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
=> {
460 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
462 let archive_header
: SnapshotArchiveHeader
= serde_json
::from_slice(&header_data
)
463 .map_err(|err
| format_err
!("unable to parse snapshot archive header - {}", err
))?
;
465 let datastore_name
= archive_header
.store
;
466 let snapshot
= archive_header
.snapshot
;
468 task_log
!(worker
, "File {}: snapshot archive {}:{}", current_file_number
, datastore_name
, snapshot
);
470 let backup_dir
: BackupDir
= snapshot
.parse()?
;
472 if let Some((store_map
, authid
)) = target
.as_ref() {
473 if let Some(datastore
) = store_map
.get_datastore(&datastore_name
) {
474 let (owner
, _group_lock
) =
475 datastore
.create_locked_backup_group(backup_dir
.group(), authid
)?
;
476 if *authid
!= &owner
{
477 // only the owner is allowed to create additional snapshots
479 "restore '{}' failed - owner check failed ({} != {})",
486 let (rel_path
, is_new
, _snap_lock
) =
487 datastore
.create_locked_backup_dir(&backup_dir
)?
;
488 let mut path
= datastore
.base_path();
492 task_log
!(worker
, "restore snapshot {}", backup_dir
);
494 match restore_snapshot_archive(worker
, reader
, &path
) {
496 std
::fs
::remove_dir_all(&path
)?
;
497 bail
!("restore snapshot {} failed - {}", backup_dir
, err
);
500 std
::fs
::remove_dir_all(&path
)?
;
501 task_log
!(worker
, "skip incomplete snapshot {}", backup_dir
);
504 catalog
.register_snapshot(
505 Uuid
::from(header
.uuid
),
510 catalog
.commit_if_large()?
;
516 task_log
!(worker
, "skipping...");
520 reader
.skip_to_end()?
; // read all data
521 if let Ok(false) = reader
.is_incomplete() {
522 catalog
.register_snapshot(Uuid
::from(header
.uuid
), current_file_number
, &datastore_name
, &snapshot
)?
;
523 catalog
.commit_if_large()?
;
526 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0
=> {
527 bail
!("unexpected chunk archive version (v1.0)");
529 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1
=> {
530 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
532 let archive_header
: ChunkArchiveHeader
= serde_json
::from_slice(&header_data
)
533 .map_err(|err
| format_err
!("unable to parse chunk archive header - {}", err
))?
;
535 let source_datastore
= archive_header
.store
;
537 task_log
!(worker
, "File {}: chunk archive for datastore '{}'", current_file_number
, source_datastore
);
538 let datastore
= target
540 .and_then(|t
| t
.0.get_datastore(&source_datastore
));
542 if datastore
.is_some() || target
.is_none() {
543 if let Some(chunks
) = restore_chunk_archive(worker
, reader
, datastore
, verbose
)?
{
544 catalog
.start_chunk_archive(
545 Uuid
::from(header
.uuid
),
549 for digest
in chunks
.iter() {
550 catalog
.register_chunk(&digest
)?
;
552 task_log
!(worker
, "register {} chunks", chunks
.len());
553 catalog
.end_chunk_archive()?
;
554 catalog
.commit_if_large()?
;
557 } else if target
.is_some() {
558 task_log
!(worker
, "skipping...");
561 reader
.skip_to_end()?
; // read all data
563 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
=> {
564 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
566 let archive_header
: CatalogArchiveHeader
= serde_json
::from_slice(&header_data
)
567 .map_err(|err
| format_err
!("unable to parse catalog archive header - {}", err
))?
;
569 task_log
!(worker
, "File {}: skip catalog '{}'", current_file_number
, archive_header
.uuid
);
571 reader
.skip_to_end()?
; // read all data
573 _
=> bail
!("unknown content magic {:?}", header
.content_magic
),
581 fn restore_chunk_archive
<'a
>(
583 reader
: Box
<dyn 'a
+ TapeRead
>,
584 datastore
: Option
<&DataStore
>,
586 ) -> Result
<Option
<Vec
<[u8;32]>>, Error
> {
588 let mut chunks
= Vec
::new();
590 let mut decoder
= ChunkArchiveDecoder
::new(reader
);
592 let result
: Result
<_
, Error
> = proxmox
::try_block
!({
593 while let Some((digest
, blob
)) = decoder
.next_chunk()?
{
595 worker
.check_abort()?
;
597 if let Some(datastore
) = datastore
{
598 let chunk_exists
= datastore
.cond_touch_chunk(&digest
, false)?
;
602 if blob
.crypt_mode()?
== CryptMode
::None
{
603 blob
.decode(None
, Some(&digest
))?
; // verify digest
606 task_log
!(worker
, "Insert chunk: {}", proxmox
::tools
::digest_to_hex(&digest
));
608 datastore
.insert_chunk(&blob
, &digest
)?
;
610 task_log
!(worker
, "Found existing chunk: {}", proxmox
::tools
::digest_to_hex(&digest
));
613 task_log
!(worker
, "Found chunk: {}", proxmox
::tools
::digest_to_hex(&digest
));
621 Ok(()) => Ok(Some(chunks
)),
623 let reader
= decoder
.reader();
625 // check if this stream is marked incomplete
626 if let Ok(true) = reader
.is_incomplete() {
627 return Ok(Some(chunks
));
630 // check if this is an aborted stream without end marker
631 if let Ok(false) = reader
.has_end_marker() {
632 worker
.log("missing stream end marker".to_string());
636 // else the archive is corrupt
642 fn restore_snapshot_archive
<'a
>(
644 reader
: Box
<dyn 'a
+ TapeRead
>,
645 snapshot_path
: &Path
,
646 ) -> Result
<bool
, Error
> {
648 let mut decoder
= pxar
::decoder
::sync
::Decoder
::from_std(reader
)?
;
649 match try_restore_snapshot_archive(worker
, &mut decoder
, snapshot_path
) {
652 let reader
= decoder
.input();
654 // check if this stream is marked incomplete
655 if let Ok(true) = reader
.is_incomplete() {
659 // check if this is an aborted stream without end marker
660 if let Ok(false) = reader
.has_end_marker() {
664 // else the archive is corrupt
670 fn try_restore_snapshot_archive
<R
: pxar
::decoder
::SeqRead
>(
672 decoder
: &mut pxar
::decoder
::sync
::Decoder
<R
>,
673 snapshot_path
: &Path
,
674 ) -> Result
<(), Error
> {
676 let _root
= match decoder
.next() {
677 None
=> bail
!("missing root entry"),
681 pxar
::EntryKind
::Directory
=> { /* Ok */ }
682 _
=> bail
!("wrong root entry type"),
688 let root_path
= Path
::new("/");
689 let manifest_file_name
= OsStr
::new(MANIFEST_BLOB_NAME
);
691 let mut manifest
= None
;
694 worker
.check_abort()?
;
696 let entry
= match decoder
.next() {
698 Some(entry
) => entry?
,
700 let entry_path
= entry
.path();
703 pxar
::EntryKind
::File { .. }
=> { /* Ok */ }
704 _
=> bail
!("wrong entry type for {:?}", entry_path
),
706 match entry_path
.parent() {
707 None
=> bail
!("wrong parent for {:?}", entry_path
),
710 bail
!("wrong parent for {:?}", entry_path
);
715 let filename
= entry
.file_name();
716 let mut contents
= match decoder
.contents() {
717 None
=> bail
!("missing file content"),
718 Some(contents
) => contents
,
721 let mut archive_path
= snapshot_path
.to_owned();
722 archive_path
.push(&filename
);
724 let mut tmp_path
= archive_path
.clone();
725 tmp_path
.set_extension("tmp");
727 if filename
== manifest_file_name
{
729 let blob
= DataBlob
::load_from_reader(&mut contents
)?
;
730 let options
= CreateOptions
::new();
731 replace_file(&tmp_path
, blob
.raw_data(), options
)?
;
733 manifest
= Some(BackupManifest
::try_from(blob
)?
);
735 let mut tmpfile
= std
::fs
::OpenOptions
::new()
740 .map_err(|err
| format_err
!("restore {:?} failed - {}", tmp_path
, err
))?
;
742 std
::io
::copy(&mut contents
, &mut tmpfile
)?
;
744 if let Err(err
) = std
::fs
::rename(&tmp_path
, &archive_path
) {
745 bail
!("Atomic rename file {:?} failed - {}", archive_path
, err
);
750 let manifest
= match manifest
{
751 None
=> bail
!("missing manifest"),
752 Some(manifest
) => manifest
,
755 for item
in manifest
.files() {
756 let mut archive_path
= snapshot_path
.to_owned();
757 archive_path
.push(&item
.filename
);
759 match archive_type(&item
.filename
)?
{
760 ArchiveType
::DynamicIndex
=> {
761 let index
= DynamicIndexReader
::open(&archive_path
)?
;
762 let (csum
, size
) = index
.compute_csum();
763 manifest
.verify_file(&item
.filename
, &csum
, size
)?
;
765 ArchiveType
::FixedIndex
=> {
766 let index
= FixedIndexReader
::open(&archive_path
)?
;
767 let (csum
, size
) = index
.compute_csum();
768 manifest
.verify_file(&item
.filename
, &csum
, size
)?
;
770 ArchiveType
::Blob
=> {
771 let mut tmpfile
= std
::fs
::File
::open(&archive_path
)?
;
772 let (csum
, size
) = compute_file_csum(&mut tmpfile
)?
;
773 manifest
.verify_file(&item
.filename
, &csum
, size
)?
;
779 let mut manifest_path
= snapshot_path
.to_owned();
780 manifest_path
.push(MANIFEST_BLOB_NAME
);
781 let mut tmp_manifest_path
= manifest_path
.clone();
782 tmp_manifest_path
.set_extension("tmp");
784 if let Err(err
) = std
::fs
::rename(&tmp_manifest_path
, &manifest_path
) {
785 bail
!("Atomic rename manifest {:?} failed - {}", manifest_path
, err
);
791 /// Try to restore media catalogs (form catalog_archives)
792 pub fn fast_catalog_restore(
794 drive
: &mut Box
<dyn TapeDriver
>,
795 media_set
: &MediaSet
,
796 uuid
: &Uuid
, // current media Uuid
797 ) -> Result
<bool
, Error
> {
799 let status_path
= Path
::new(TAPE_STATUS_DIR
);
801 let current_file_number
= drive
.current_file_number()?
;
802 if current_file_number
!= 2 {
803 bail
!("fast_catalog_restore: wrong media position - internal error");
806 let mut found_catalog
= false;
808 let mut moved_to_eom
= false;
811 let current_file_number
= drive
.current_file_number()?
;
813 { // limit reader scope
814 let mut reader
= match drive
.read_next_file()?
{
816 task_log
!(worker
, "detected EOT after {} files", current_file_number
);
819 Some(reader
) => reader
,
822 let header
: MediaContentHeader
= unsafe { reader.read_le_value()? }
;
823 if header
.magic
!= PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0
{
824 bail
!("missing MediaContentHeader");
827 if header
.content_magic
== PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
{
828 task_log
!(worker
, "found catalog at pos {}", current_file_number
);
830 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
832 let archive_header
: CatalogArchiveHeader
= serde_json
::from_slice(&header_data
)
833 .map_err(|err
| format_err
!("unable to parse catalog archive header - {}", err
))?
;
835 if &archive_header
.media_set_uuid
!= media_set
.uuid() {
836 task_log
!(worker
, "skipping unrelated catalog at pos {}", current_file_number
);
837 reader
.skip_to_end()?
; // read all data
841 let catalog_uuid
= &archive_header
.uuid
;
843 let wanted
= media_set
849 Some(uuid
) => uuid
== catalog_uuid
,
855 task_log
!(worker
, "skip catalog because media '{}' not inventarized", catalog_uuid
);
856 reader
.skip_to_end()?
; // read all data
860 if catalog_uuid
== uuid
{
861 // always restore and overwrite catalog
863 // only restore if catalog does not exist
864 if MediaCatalog
::exists(status_path
, catalog_uuid
) {
865 task_log
!(worker
, "catalog for media '{}' already exists", catalog_uuid
);
866 reader
.skip_to_end()?
; // read all data
871 let mut file
= MediaCatalog
::create_temporary_database_file(status_path
, catalog_uuid
)?
;
873 std
::io
::copy(&mut reader
, &mut file
)?
;
875 file
.seek(SeekFrom
::Start(0))?
;
877 match MediaCatalog
::parse_catalog_header(&mut file
)?
{
878 (true, Some(media_uuid
), Some(media_set_uuid
)) => {
879 if &media_uuid
!= catalog_uuid
{
880 task_log
!(worker
, "catalog uuid missmatch at pos {}", current_file_number
);
883 if media_set_uuid
!= archive_header
.media_set_uuid
{
884 task_log
!(worker
, "catalog media_set missmatch at pos {}", current_file_number
);
888 MediaCatalog
::finish_temporary_database(status_path
, &media_uuid
, true)?
;
890 if catalog_uuid
== uuid
{
891 task_log
!(worker
, "successfully restored catalog");
894 task_log
!(worker
, "successfully restored related catalog {}", media_uuid
);
898 task_warn
!(worker
, "got incomplete catalog header - skip file");
908 break; // already done - stop
912 task_log
!(worker
, "searching for catalog at EOT (moving to EOT)");
913 drive
.move_to_last_file()?
;
915 let new_file_number
= drive
.current_file_number()?
;
917 if new_file_number
< (current_file_number
+ 1) {
918 break; // no new content - stop