3 use std
::collections
::{HashMap, HashSet}
;
4 use std
::convert
::TryFrom
;
5 use std
::io
::{Seek, SeekFrom}
;
8 use anyhow
::{bail, format_err, Error}
;
18 schema
::parse_property_string
,
19 section_config
::SectionConfigData
,
35 tools
::compute_file_csum
,
37 DATASTORE_MAP_ARRAY_SCHEMA
,
38 DATASTORE_MAP_LIST_SCHEMA
,
46 cached_user_info
::CachedUserInfo
,
48 PRIV_DATASTORE_BACKUP
,
49 PRIV_DATASTORE_MODIFY
,
80 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0
,
81 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0
,
82 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
,
83 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0
,
84 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0
,
85 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0
,
86 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1
,
87 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
,
91 SnapshotArchiveHeader
,
96 request_and_load_media
,
98 set_tape_device_state
,
103 pub struct DataStoreMap
{
104 map
: HashMap
<String
, Arc
<DataStore
>>,
105 default: Option
<Arc
<DataStore
>>,
108 impl TryFrom
<String
> for DataStoreMap
{
111 fn try_from(value
: String
) -> Result
<Self, Error
> {
112 let value
= parse_property_string(&value
, &DATASTORE_MAP_ARRAY_SCHEMA
)?
;
113 let mut mapping
: Vec
<String
> = value
117 .map(|v
| v
.as_str().unwrap().to_string())
120 let mut map
= HashMap
::new();
121 let mut default = None
;
122 while let Some(mut store
) = mapping
.pop() {
123 if let Some(index
) = store
.find('
='
) {
124 let mut target
= store
.split_off(index
);
125 target
.remove(0); // remove '='
126 let datastore
= DataStore
::lookup_datastore(&target
)?
;
127 map
.insert(store
, datastore
);
128 } else if default.is_none() {
129 default = Some(DataStore
::lookup_datastore(&store
)?
);
131 bail
!("multiple default stores given");
135 Ok(Self { map, default }
)
140 fn used_datastores
<'a
>(&self) -> HashSet
<&str> {
141 let mut set
= HashSet
::new();
142 for store
in self.map
.values() {
143 set
.insert(store
.name());
146 if let Some(ref store
) = self.default {
147 set
.insert(store
.name());
153 fn get_datastore(&self, source
: &str) -> Option
<&DataStore
> {
154 if let Some(store
) = self.map
.get(source
) {
157 if let Some(ref store
) = self.default {
165 pub const ROUTER
: Router
= Router
::new().post(&API_METHOD_RESTORE
);
171 schema
: DATASTORE_MAP_LIST_SCHEMA
,
174 schema
: DRIVE_NAME_SCHEMA
,
177 description
: "Media set UUID.",
194 // Note: parameters are no uri parameter, so we need to test inside function body
195 description
: "The user needs Tape.Read privilege on /tape/pool/{pool} \
196 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
197 permission
: &Permission
::Anybody
,
200 /// Restore data from media-set
205 notify_user
: Option
<Userid
>,
206 owner
: Option
<Authid
>,
207 rpcenv
: &mut dyn RpcEnvironment
,
208 ) -> Result
<Value
, Error
> {
209 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
210 let user_info
= CachedUserInfo
::new()?
;
212 let store_map
= DataStoreMap
::try_from(store
)
213 .map_err(|err
| format_err
!("cannot parse store mapping: {}", err
))?
;
214 let used_datastores
= store_map
.used_datastores();
215 if used_datastores
.len() == 0 {
216 bail
!("no datastores given");
219 for store
in used_datastores
.iter() {
220 let privs
= user_info
.lookup_privs(&auth_id
, &["datastore", &store
]);
221 if (privs
& PRIV_DATASTORE_BACKUP
) == 0 {
222 bail
!("no permissions on /datastore/{}", store
);
225 if let Some(ref owner
) = owner
{
226 let correct_owner
= owner
== &auth_id
227 || (owner
.is_token() && !auth_id
.is_token() && owner
.user() == auth_id
.user());
229 // same permission as changing ownership after syncing
230 if !correct_owner
&& privs
& PRIV_DATASTORE_MODIFY
== 0 {
231 bail
!("no permission to restore as '{}'", owner
);
236 let privs
= user_info
.lookup_privs(&auth_id
, &["tape", "drive", &drive
]);
237 if (privs
& PRIV_TAPE_READ
) == 0 {
238 bail
!("no permissions on /tape/drive/{}", drive
);
241 let media_set_uuid
= media_set
.parse()?
;
243 let status_path
= Path
::new(TAPE_STATUS_DIR
);
245 let _lock
= lock_media_set(status_path
, &media_set_uuid
, None
)?
;
247 let inventory
= Inventory
::load(status_path
)?
;
249 let pool
= inventory
.lookup_media_set_pool(&media_set_uuid
)?
;
251 let privs
= user_info
.lookup_privs(&auth_id
, &["tape", "pool", &pool
]);
252 if (privs
& PRIV_TAPE_READ
) == 0 {
253 bail
!("no permissions on /tape/pool/{}", pool
);
256 let (drive_config
, _digest
) = config
::drive
::config()?
;
258 // early check/lock before starting worker
259 let drive_lock
= lock_tape_device(&drive_config
, &drive
)?
;
261 let to_stdout
= rpcenv
.env_type() == RpcEnvironmentType
::CLI
;
263 let taskid
= used_datastores
265 .map(|s
| s
.to_string())
266 .collect
::<Vec
<String
>>()
268 let upid_str
= WorkerTask
::new_thread(
274 let _drive_lock
= drive_lock
; // keep lock guard
276 set_tape_device_state(&drive
, &worker
.upid().to_string())?
;
278 let members
= inventory
.compute_media_set_members(&media_set_uuid
)?
;
280 let media_list
= members
.media_list();
282 let mut media_id_list
= Vec
::new();
284 let mut encryption_key_fingerprint
= None
;
286 for (seq_nr
, media_uuid
) in media_list
.iter().enumerate() {
289 bail
!("media set {} is incomplete (missing member {}).", media_set_uuid
, seq_nr
);
291 Some(media_uuid
) => {
292 let media_id
= inventory
.lookup_media(media_uuid
).unwrap();
293 if let Some(ref set
) = media_id
.media_set_label
{ // always true here
294 if encryption_key_fingerprint
.is_none() && set
.encryption_key_fingerprint
.is_some() {
295 encryption_key_fingerprint
= set
.encryption_key_fingerprint
.clone();
298 media_id_list
.push(media_id
);
303 task_log
!(worker
, "Restore mediaset '{}'", media_set
);
304 if let Some(fingerprint
) = encryption_key_fingerprint
{
305 task_log
!(worker
, "Encryption key fingerprint: {}", fingerprint
);
307 task_log
!(worker
, "Pool: {}", pool
);
308 task_log
!(worker
, "Datastore(s):");
312 .for_each(|store
| task_log
!(worker
, "\t{}", store
));
313 task_log
!(worker
, "Drive: {}", drive
);
316 "Required media list: {}",
318 .map(|media_id
| media_id
.label
.label_text
.as_str())
319 .collect
::<Vec
<&str>>()
323 for media_id
in media_id_list
.iter() {
324 request_and_restore_media(
336 task_log
!(worker
, "Restore mediaset '{}' done", media_set
);
338 if let Err(err
) = set_tape_device_state(&drive
, "") {
341 "could not unset drive state for {}: {}",
354 /// Request and restore complete media without using existing catalog (create catalog instead)
355 pub fn request_and_restore_media(
358 drive_config
: &SectionConfigData
,
360 store_map
: &DataStoreMap
,
362 notify_user
: &Option
<Userid
>,
363 owner
: &Option
<Authid
>,
364 ) -> Result
<(), Error
> {
365 let media_set_uuid
= match media_id
.media_set_label
{
366 None
=> bail
!("restore_media: no media set - internal error"),
367 Some(ref set
) => &set
.uuid
,
370 let email
= notify_user
372 .and_then(|userid
| lookup_user_email(userid
))
373 .or_else(|| lookup_user_email(&authid
.clone().into()));
375 let (mut drive
, info
) = request_and_load_media(worker
, &drive_config
, &drive_name
, &media_id
.label
, &email
)?
;
377 match info
.media_set_label
{
379 bail
!("missing media set label on media {} ({})",
380 media_id
.label
.label_text
, media_id
.label
.uuid
);
383 if &set
.uuid
!= media_set_uuid
{
384 bail
!("wrong media set label on media {} ({} != {})",
385 media_id
.label
.label_text
, media_id
.label
.uuid
,
388 let encrypt_fingerprint
= set
.encryption_key_fingerprint
.clone()
389 .map(|fp
| (fp
, set
.uuid
.clone()));
391 drive
.set_encryption(encrypt_fingerprint
)?
;
395 let restore_owner
= owner
.as_ref().unwrap_or(authid
);
401 Some((&store_map
, restore_owner
)),
406 /// Restore complete media content and catalog
408 /// Only create the catalog if target is None.
409 pub fn restore_media(
411 drive
: &mut Box
<dyn TapeDriver
>,
413 target
: Option
<(&DataStoreMap
, &Authid
)>,
415 ) -> Result
<(), Error
> {
417 let status_path
= Path
::new(TAPE_STATUS_DIR
);
418 let mut catalog
= MediaCatalog
::create_temporary_database(status_path
, media_id
, false)?
;
421 let current_file_number
= drive
.current_file_number()?
;
422 let reader
= match drive
.read_next_file() {
423 Err(BlockReadError
::EndOfFile
) => {
424 task_log
!(worker
, "skip unexpected filemark at pos {}", current_file_number
);
427 Err(BlockReadError
::EndOfStream
) => {
428 task_log
!(worker
, "detected EOT after {} files", current_file_number
);
431 Err(BlockReadError
::Error(err
)) => {
432 return Err(err
.into());
434 Ok(reader
) => reader
,
437 restore_archive(worker
, reader
, current_file_number
, target
, &mut catalog
, verbose
)?
;
440 MediaCatalog
::finish_temporary_database(status_path
, &media_id
.label
.uuid
, true)?
;
445 fn restore_archive
<'a
>(
447 mut reader
: Box
<dyn 'a
+ TapeRead
>,
448 current_file_number
: u64,
449 target
: Option
<(&DataStoreMap
, &Authid
)>,
450 catalog
: &mut MediaCatalog
,
452 ) -> Result
<(), Error
> {
453 let header
: MediaContentHeader
= unsafe { reader.read_le_value()? }
;
454 if header
.magic
!= PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0
{
455 bail
!("missing MediaContentHeader");
458 //println!("Found MediaContentHeader: {:?}", header);
460 match header
.content_magic
{
461 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0
| PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0
=> {
462 bail
!("unexpected content magic (label)");
464 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0
=> {
465 bail
!("unexpected snapshot archive version (v1.0)");
467 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
=> {
468 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
470 let archive_header
: SnapshotArchiveHeader
= serde_json
::from_slice(&header_data
)
471 .map_err(|err
| format_err
!("unable to parse snapshot archive header - {}", err
))?
;
473 let datastore_name
= archive_header
.store
;
474 let snapshot
= archive_header
.snapshot
;
476 task_log
!(worker
, "File {}: snapshot archive {}:{}", current_file_number
, datastore_name
, snapshot
);
478 let backup_dir
: BackupDir
= snapshot
.parse()?
;
480 if let Some((store_map
, authid
)) = target
.as_ref() {
481 if let Some(datastore
) = store_map
.get_datastore(&datastore_name
) {
482 let (owner
, _group_lock
) =
483 datastore
.create_locked_backup_group(backup_dir
.group(), authid
)?
;
484 if *authid
!= &owner
{
485 // only the owner is allowed to create additional snapshots
487 "restore '{}' failed - owner check failed ({} != {})",
494 let (rel_path
, is_new
, _snap_lock
) =
495 datastore
.create_locked_backup_dir(&backup_dir
)?
;
496 let mut path
= datastore
.base_path();
500 task_log
!(worker
, "restore snapshot {}", backup_dir
);
502 match restore_snapshot_archive(worker
, reader
, &path
) {
504 std
::fs
::remove_dir_all(&path
)?
;
505 bail
!("restore snapshot {} failed - {}", backup_dir
, err
);
508 std
::fs
::remove_dir_all(&path
)?
;
509 task_log
!(worker
, "skip incomplete snapshot {}", backup_dir
);
512 catalog
.register_snapshot(
513 Uuid
::from(header
.uuid
),
518 catalog
.commit_if_large()?
;
524 task_log
!(worker
, "skipping...");
528 reader
.skip_data()?
; // read all data
529 if let Ok(false) = reader
.is_incomplete() {
530 catalog
.register_snapshot(Uuid
::from(header
.uuid
), current_file_number
, &datastore_name
, &snapshot
)?
;
531 catalog
.commit_if_large()?
;
534 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0
=> {
535 bail
!("unexpected chunk archive version (v1.0)");
537 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1
=> {
538 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
540 let archive_header
: ChunkArchiveHeader
= serde_json
::from_slice(&header_data
)
541 .map_err(|err
| format_err
!("unable to parse chunk archive header - {}", err
))?
;
543 let source_datastore
= archive_header
.store
;
545 task_log
!(worker
, "File {}: chunk archive for datastore '{}'", current_file_number
, source_datastore
);
546 let datastore
= target
548 .and_then(|t
| t
.0.get_datastore(&source_datastore
));
550 if datastore
.is_some() || target
.is_none() {
551 if let Some(chunks
) = restore_chunk_archive(worker
, reader
, datastore
, verbose
)?
{
552 catalog
.start_chunk_archive(
553 Uuid
::from(header
.uuid
),
557 for digest
in chunks
.iter() {
558 catalog
.register_chunk(&digest
)?
;
560 task_log
!(worker
, "register {} chunks", chunks
.len());
561 catalog
.end_chunk_archive()?
;
562 catalog
.commit_if_large()?
;
565 } else if target
.is_some() {
566 task_log
!(worker
, "skipping...");
569 reader
.skip_data()?
; // read all data
571 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
=> {
572 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
574 let archive_header
: CatalogArchiveHeader
= serde_json
::from_slice(&header_data
)
575 .map_err(|err
| format_err
!("unable to parse catalog archive header - {}", err
))?
;
577 task_log
!(worker
, "File {}: skip catalog '{}'", current_file_number
, archive_header
.uuid
);
579 reader
.skip_data()?
; // read all data
581 _
=> bail
!("unknown content magic {:?}", header
.content_magic
),
589 fn restore_chunk_archive
<'a
>(
591 reader
: Box
<dyn 'a
+ TapeRead
>,
592 datastore
: Option
<&DataStore
>,
594 ) -> Result
<Option
<Vec
<[u8;32]>>, Error
> {
596 let mut chunks
= Vec
::new();
598 let mut decoder
= ChunkArchiveDecoder
::new(reader
);
600 let result
: Result
<_
, Error
> = proxmox
::try_block
!({
601 while let Some((digest
, blob
)) = decoder
.next_chunk()?
{
603 worker
.check_abort()?
;
605 if let Some(datastore
) = datastore
{
606 let chunk_exists
= datastore
.cond_touch_chunk(&digest
, false)?
;
610 if blob
.crypt_mode()?
== CryptMode
::None
{
611 blob
.decode(None
, Some(&digest
))?
; // verify digest
614 task_log
!(worker
, "Insert chunk: {}", proxmox
::tools
::digest_to_hex(&digest
));
616 datastore
.insert_chunk(&blob
, &digest
)?
;
618 task_log
!(worker
, "Found existing chunk: {}", proxmox
::tools
::digest_to_hex(&digest
));
621 task_log
!(worker
, "Found chunk: {}", proxmox
::tools
::digest_to_hex(&digest
));
629 Ok(()) => Ok(Some(chunks
)),
631 let reader
= decoder
.reader();
633 // check if this stream is marked incomplete
634 if let Ok(true) = reader
.is_incomplete() {
635 return Ok(Some(chunks
));
638 // check if this is an aborted stream without end marker
639 if let Ok(false) = reader
.has_end_marker() {
640 worker
.log("missing stream end marker".to_string());
644 // else the archive is corrupt
650 fn restore_snapshot_archive
<'a
>(
652 reader
: Box
<dyn 'a
+ TapeRead
>,
653 snapshot_path
: &Path
,
654 ) -> Result
<bool
, Error
> {
656 let mut decoder
= pxar
::decoder
::sync
::Decoder
::from_std(reader
)?
;
657 match try_restore_snapshot_archive(worker
, &mut decoder
, snapshot_path
) {
660 let reader
= decoder
.input();
662 // check if this stream is marked incomplete
663 if let Ok(true) = reader
.is_incomplete() {
667 // check if this is an aborted stream without end marker
668 if let Ok(false) = reader
.has_end_marker() {
672 // else the archive is corrupt
678 fn try_restore_snapshot_archive
<R
: pxar
::decoder
::SeqRead
>(
680 decoder
: &mut pxar
::decoder
::sync
::Decoder
<R
>,
681 snapshot_path
: &Path
,
682 ) -> Result
<(), Error
> {
684 let _root
= match decoder
.next() {
685 None
=> bail
!("missing root entry"),
689 pxar
::EntryKind
::Directory
=> { /* Ok */ }
690 _
=> bail
!("wrong root entry type"),
696 let root_path
= Path
::new("/");
697 let manifest_file_name
= OsStr
::new(MANIFEST_BLOB_NAME
);
699 let mut manifest
= None
;
702 worker
.check_abort()?
;
704 let entry
= match decoder
.next() {
706 Some(entry
) => entry?
,
708 let entry_path
= entry
.path();
711 pxar
::EntryKind
::File { .. }
=> { /* Ok */ }
712 _
=> bail
!("wrong entry type for {:?}", entry_path
),
714 match entry_path
.parent() {
715 None
=> bail
!("wrong parent for {:?}", entry_path
),
718 bail
!("wrong parent for {:?}", entry_path
);
723 let filename
= entry
.file_name();
724 let mut contents
= match decoder
.contents() {
725 None
=> bail
!("missing file content"),
726 Some(contents
) => contents
,
729 let mut archive_path
= snapshot_path
.to_owned();
730 archive_path
.push(&filename
);
732 let mut tmp_path
= archive_path
.clone();
733 tmp_path
.set_extension("tmp");
735 if filename
== manifest_file_name
{
737 let blob
= DataBlob
::load_from_reader(&mut contents
)?
;
738 let options
= CreateOptions
::new();
739 replace_file(&tmp_path
, blob
.raw_data(), options
)?
;
741 manifest
= Some(BackupManifest
::try_from(blob
)?
);
743 let mut tmpfile
= std
::fs
::OpenOptions
::new()
748 .map_err(|err
| format_err
!("restore {:?} failed - {}", tmp_path
, err
))?
;
750 std
::io
::copy(&mut contents
, &mut tmpfile
)?
;
752 if let Err(err
) = std
::fs
::rename(&tmp_path
, &archive_path
) {
753 bail
!("Atomic rename file {:?} failed - {}", archive_path
, err
);
758 let manifest
= match manifest
{
759 None
=> bail
!("missing manifest"),
760 Some(manifest
) => manifest
,
763 for item
in manifest
.files() {
764 let mut archive_path
= snapshot_path
.to_owned();
765 archive_path
.push(&item
.filename
);
767 match archive_type(&item
.filename
)?
{
768 ArchiveType
::DynamicIndex
=> {
769 let index
= DynamicIndexReader
::open(&archive_path
)?
;
770 let (csum
, size
) = index
.compute_csum();
771 manifest
.verify_file(&item
.filename
, &csum
, size
)?
;
773 ArchiveType
::FixedIndex
=> {
774 let index
= FixedIndexReader
::open(&archive_path
)?
;
775 let (csum
, size
) = index
.compute_csum();
776 manifest
.verify_file(&item
.filename
, &csum
, size
)?
;
778 ArchiveType
::Blob
=> {
779 let mut tmpfile
= std
::fs
::File
::open(&archive_path
)?
;
780 let (csum
, size
) = compute_file_csum(&mut tmpfile
)?
;
781 manifest
.verify_file(&item
.filename
, &csum
, size
)?
;
787 let mut manifest_path
= snapshot_path
.to_owned();
788 manifest_path
.push(MANIFEST_BLOB_NAME
);
789 let mut tmp_manifest_path
= manifest_path
.clone();
790 tmp_manifest_path
.set_extension("tmp");
792 if let Err(err
) = std
::fs
::rename(&tmp_manifest_path
, &manifest_path
) {
793 bail
!("Atomic rename manifest {:?} failed - {}", manifest_path
, err
);
799 /// Try to restore media catalogs (form catalog_archives)
800 pub fn fast_catalog_restore(
802 drive
: &mut Box
<dyn TapeDriver
>,
803 media_set
: &MediaSet
,
804 uuid
: &Uuid
, // current media Uuid
805 ) -> Result
<bool
, Error
> {
807 let status_path
= Path
::new(TAPE_STATUS_DIR
);
809 let current_file_number
= drive
.current_file_number()?
;
810 if current_file_number
!= 2 {
811 bail
!("fast_catalog_restore: wrong media position - internal error");
814 let mut found_catalog
= false;
816 let mut moved_to_eom
= false;
819 let current_file_number
= drive
.current_file_number()?
;
821 { // limit reader scope
822 let mut reader
= match drive
.read_next_file() {
823 Err(BlockReadError
::EndOfFile
) => {
824 task_log
!(worker
, "skip unexpected filemark at pos {}", current_file_number
);
827 Err(BlockReadError
::EndOfStream
) => {
828 task_log
!(worker
, "detected EOT after {} files", current_file_number
);
831 Err(BlockReadError
::Error(err
)) => {
832 return Err(err
.into());
834 Ok(reader
) => reader
,
837 let header
: MediaContentHeader
= unsafe { reader.read_le_value()? }
;
838 if header
.magic
!= PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0
{
839 bail
!("missing MediaContentHeader");
842 if header
.content_magic
== PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
{
843 task_log
!(worker
, "found catalog at pos {}", current_file_number
);
845 let header_data
= reader
.read_exact_allocated(header
.size
as usize)?
;
847 let archive_header
: CatalogArchiveHeader
= serde_json
::from_slice(&header_data
)
848 .map_err(|err
| format_err
!("unable to parse catalog archive header - {}", err
))?
;
850 if &archive_header
.media_set_uuid
!= media_set
.uuid() {
851 task_log
!(worker
, "skipping unrelated catalog at pos {}", current_file_number
);
852 reader
.skip_data()?
; // read all data
856 let catalog_uuid
= &archive_header
.uuid
;
858 let wanted
= media_set
864 Some(uuid
) => uuid
== catalog_uuid
,
870 task_log
!(worker
, "skip catalog because media '{}' not inventarized", catalog_uuid
);
871 reader
.skip_data()?
; // read all data
875 if catalog_uuid
== uuid
{
876 // always restore and overwrite catalog
878 // only restore if catalog does not exist
879 if MediaCatalog
::exists(status_path
, catalog_uuid
) {
880 task_log
!(worker
, "catalog for media '{}' already exists", catalog_uuid
);
881 reader
.skip_data()?
; // read all data
886 let mut file
= MediaCatalog
::create_temporary_database_file(status_path
, catalog_uuid
)?
;
888 std
::io
::copy(&mut reader
, &mut file
)?
;
890 file
.seek(SeekFrom
::Start(0))?
;
892 match MediaCatalog
::parse_catalog_header(&mut file
)?
{
893 (true, Some(media_uuid
), Some(media_set_uuid
)) => {
894 if &media_uuid
!= catalog_uuid
{
895 task_log
!(worker
, "catalog uuid missmatch at pos {}", current_file_number
);
898 if media_set_uuid
!= archive_header
.media_set_uuid
{
899 task_log
!(worker
, "catalog media_set missmatch at pos {}", current_file_number
);
903 MediaCatalog
::finish_temporary_database(status_path
, &media_uuid
, true)?
;
905 if catalog_uuid
== uuid
{
906 task_log
!(worker
, "successfully restored catalog");
909 task_log
!(worker
, "successfully restored related catalog {}", media_uuid
);
913 task_warn
!(worker
, "got incomplete catalog header - skip file");
923 break; // already done - stop
927 task_log
!(worker
, "searching for catalog at EOT (moving to EOT)");
928 drive
.move_to_last_file()?
;
930 let new_file_number
= drive
.current_file_number()?
;
932 if new_file_number
< (current_file_number
+ 1) {
933 break; // no new content - stop