]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
api2/tape/restore: enable restore mapping of datastores
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::path::Path;
2 use std::ffi::OsStr;
3 use std::collections::{HashMap, HashSet};
4 use std::convert::TryFrom;
5 use std::io::{Seek, SeekFrom};
6 use std::sync::Arc;
7
8 use anyhow::{bail, format_err, Error};
9 use serde_json::Value;
10
11 use proxmox::{
12 api::{
13 api,
14 RpcEnvironment,
15 RpcEnvironmentType,
16 Router,
17 Permission,
18 schema::parse_property_string,
19 section_config::SectionConfigData,
20 },
21 tools::{
22 Uuid,
23 io::ReadExt,
24 fs::{
25 replace_file,
26 CreateOptions,
27 },
28 },
29 };
30
31 use crate::{
32 task_log,
33 task_warn,
34 task::TaskState,
35 tools::compute_file_csum,
36 api2::types::{
37 DATASTORE_MAP_ARRAY_SCHEMA,
38 DATASTORE_MAP_LIST_SCHEMA,
39 DRIVE_NAME_SCHEMA,
40 UPID_SCHEMA,
41 Authid,
42 Userid,
43 },
44 config::{
45 self,
46 cached_user_info::CachedUserInfo,
47 acl::{
48 PRIV_DATASTORE_BACKUP,
49 PRIV_DATASTORE_MODIFY,
50 PRIV_TAPE_READ,
51 },
52 },
53 backup::{
54 archive_type,
55 MANIFEST_BLOB_NAME,
56 CryptMode,
57 DataStore,
58 BackupDir,
59 DataBlob,
60 BackupManifest,
61 ArchiveType,
62 IndexFile,
63 DynamicIndexReader,
64 FixedIndexReader,
65 },
66 server::{
67 lookup_user_email,
68 WorkerTask,
69 },
70 tape::{
71 TAPE_STATUS_DIR,
72 TapeRead,
73 MediaId,
74 MediaSet,
75 MediaCatalog,
76 Inventory,
77 lock_media_set,
78 file_formats::{
79 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
80 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
81 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
82 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
83 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
84 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
85 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
86 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
87 MediaContentHeader,
88 ChunkArchiveHeader,
89 ChunkArchiveDecoder,
90 SnapshotArchiveHeader,
91 CatalogArchiveHeader,
92 },
93 drive::{
94 TapeDriver,
95 request_and_load_media,
96 lock_tape_device,
97 set_tape_device_state,
98 },
99 },
100 };
101
102 pub struct DataStoreMap {
103 map: HashMap<String, Arc<DataStore>>,
104 default: Option<Arc<DataStore>>,
105 }
106
107 impl TryFrom<String> for DataStoreMap {
108 type Error = Error;
109
110 fn try_from(value: String) -> Result<Self, Error> {
111 let value = parse_property_string(&value, &DATASTORE_MAP_ARRAY_SCHEMA)?;
112 let mut mapping: Vec<String> = value
113 .as_array()
114 .unwrap()
115 .iter()
116 .map(|v| v.as_str().unwrap().to_string())
117 .collect();
118
119 let mut map = HashMap::new();
120 let mut default = None;
121 while let Some(mut store) = mapping.pop() {
122 if let Some(index) = store.find('=') {
123 let mut target = store.split_off(index);
124 target.remove(0); // remove '='
125 let datastore = DataStore::lookup_datastore(&target)?;
126 map.insert(store, datastore);
127 } else if default.is_none() {
128 default = Some(DataStore::lookup_datastore(&store)?);
129 } else {
130 bail!("multiple default stores given");
131 }
132 }
133
134 Ok(Self { map, default })
135 }
136 }
137
138 impl DataStoreMap {
139 fn used_datastores<'a>(&self) -> HashSet<&str> {
140 let mut set = HashSet::new();
141 for store in self.map.values() {
142 set.insert(store.name());
143 }
144
145 if let Some(ref store) = self.default {
146 set.insert(store.name());
147 }
148
149 set
150 }
151
152 fn get_datastore(&self, source: &str) -> Option<&DataStore> {
153 if let Some(store) = self.map.get(source) {
154 return Some(&store);
155 }
156 if let Some(ref store) = self.default {
157 return Some(&store);
158 }
159
160 return None;
161 }
162 }
163
164 pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
165
166 #[api(
167 input: {
168 properties: {
169 store: {
170 schema: DATASTORE_MAP_LIST_SCHEMA,
171 },
172 drive: {
173 schema: DRIVE_NAME_SCHEMA,
174 },
175 "media-set": {
176 description: "Media set UUID.",
177 type: String,
178 },
179 "notify-user": {
180 type: Userid,
181 optional: true,
182 },
183 owner: {
184 type: Authid,
185 optional: true,
186 },
187 },
188 },
189 returns: {
190 schema: UPID_SCHEMA,
191 },
192 access: {
193 // Note: parameters are no uri parameter, so we need to test inside function body
194 description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
195 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
196 permission: &Permission::Anybody,
197 },
198 )]
199 /// Restore data from media-set
200 pub fn restore(
201 store: String,
202 drive: String,
203 media_set: String,
204 notify_user: Option<Userid>,
205 owner: Option<Authid>,
206 rpcenv: &mut dyn RpcEnvironment,
207 ) -> Result<Value, Error> {
208 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
209 let user_info = CachedUserInfo::new()?;
210
211 let store_map = DataStoreMap::try_from(store)
212 .map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
213 let used_datastores = store_map.used_datastores();
214 if used_datastores.len() == 0 {
215 bail!("no datastores given");
216 }
217
218 for store in used_datastores.iter() {
219 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
220 if (privs & PRIV_DATASTORE_BACKUP) == 0 {
221 bail!("no permissions on /datastore/{}", store);
222 }
223
224 if let Some(ref owner) = owner {
225 let correct_owner = owner == &auth_id
226 || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user());
227
228 // same permission as changing ownership after syncing
229 if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
230 bail!("no permission to restore as '{}'", owner);
231 }
232 }
233 }
234
235 let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
236 if (privs & PRIV_TAPE_READ) == 0 {
237 bail!("no permissions on /tape/drive/{}", drive);
238 }
239
240 let media_set_uuid = media_set.parse()?;
241
242 let status_path = Path::new(TAPE_STATUS_DIR);
243
244 let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
245
246 let inventory = Inventory::load(status_path)?;
247
248 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
249
250 let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
251 if (privs & PRIV_TAPE_READ) == 0 {
252 bail!("no permissions on /tape/pool/{}", pool);
253 }
254
255 let (drive_config, _digest) = config::drive::config()?;
256
257 // early check/lock before starting worker
258 let drive_lock = lock_tape_device(&drive_config, &drive)?;
259
260 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
261
262 let taskid = used_datastores
263 .iter()
264 .map(|s| s.to_string())
265 .collect::<Vec<String>>()
266 .join(", ");
267 let upid_str = WorkerTask::new_thread(
268 "tape-restore",
269 Some(taskid),
270 auth_id.clone(),
271 to_stdout,
272 move |worker| {
273 let _drive_lock = drive_lock; // keep lock guard
274
275 set_tape_device_state(&drive, &worker.upid().to_string())?;
276
277 let members = inventory.compute_media_set_members(&media_set_uuid)?;
278
279 let media_list = members.media_list();
280
281 let mut media_id_list = Vec::new();
282
283 let mut encryption_key_fingerprint = None;
284
285 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
286 match media_uuid {
287 None => {
288 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
289 }
290 Some(media_uuid) => {
291 let media_id = inventory.lookup_media(media_uuid).unwrap();
292 if let Some(ref set) = media_id.media_set_label { // always true here
293 if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
294 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
295 }
296 }
297 media_id_list.push(media_id);
298 }
299 }
300 }
301
302 task_log!(worker, "Restore mediaset '{}'", media_set);
303 if let Some(fingerprint) = encryption_key_fingerprint {
304 task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
305 }
306 task_log!(worker, "Pool: {}", pool);
307 task_log!(worker, "Datastore(s):");
308 store_map
309 .used_datastores()
310 .iter()
311 .for_each(|store| task_log!(worker, "\t{}", store));
312 task_log!(worker, "Drive: {}", drive);
313 task_log!(
314 worker,
315 "Required media list: {}",
316 media_id_list.iter()
317 .map(|media_id| media_id.label.label_text.as_str())
318 .collect::<Vec<&str>>()
319 .join(";")
320 );
321
322 for media_id in media_id_list.iter() {
323 request_and_restore_media(
324 &worker,
325 media_id,
326 &drive_config,
327 &drive,
328 &store_map,
329 &auth_id,
330 &notify_user,
331 &owner,
332 )?;
333 }
334
335 task_log!(worker, "Restore mediaset '{}' done", media_set);
336
337 if let Err(err) = set_tape_device_state(&drive, "") {
338 task_log!(
339 worker,
340 "could not unset drive state for {}: {}",
341 drive,
342 err
343 );
344 }
345
346 Ok(())
347 }
348 )?;
349
350 Ok(upid_str.into())
351 }
352
353 /// Request and restore complete media without using existing catalog (create catalog instead)
354 pub fn request_and_restore_media(
355 worker: &WorkerTask,
356 media_id: &MediaId,
357 drive_config: &SectionConfigData,
358 drive_name: &str,
359 store_map: &DataStoreMap,
360 authid: &Authid,
361 notify_user: &Option<Userid>,
362 owner: &Option<Authid>,
363 ) -> Result<(), Error> {
364 let media_set_uuid = match media_id.media_set_label {
365 None => bail!("restore_media: no media set - internal error"),
366 Some(ref set) => &set.uuid,
367 };
368
369 let email = notify_user
370 .as_ref()
371 .and_then(|userid| lookup_user_email(userid))
372 .or_else(|| lookup_user_email(&authid.clone().into()));
373
374 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?;
375
376 match info.media_set_label {
377 None => {
378 bail!("missing media set label on media {} ({})",
379 media_id.label.label_text, media_id.label.uuid);
380 }
381 Some(ref set) => {
382 if &set.uuid != media_set_uuid {
383 bail!("wrong media set label on media {} ({} != {})",
384 media_id.label.label_text, media_id.label.uuid,
385 media_set_uuid);
386 }
387 let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
388 .map(|fp| (fp, set.uuid.clone()));
389
390 drive.set_encryption(encrypt_fingerprint)?;
391 }
392 }
393
394 let restore_owner = owner.as_ref().unwrap_or(authid);
395
396 restore_media(
397 worker,
398 &mut drive,
399 &info,
400 Some((&store_map, restore_owner)),
401 false,
402 )
403 }
404
405 /// Restore complete media content and catalog
406 ///
407 /// Only create the catalog if target is None.
408 pub fn restore_media(
409 worker: &WorkerTask,
410 drive: &mut Box<dyn TapeDriver>,
411 media_id: &MediaId,
412 target: Option<(&DataStoreMap, &Authid)>,
413 verbose: bool,
414 ) -> Result<(), Error> {
415
416 let status_path = Path::new(TAPE_STATUS_DIR);
417 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
418
419 loop {
420 let current_file_number = drive.current_file_number()?;
421 let reader = match drive.read_next_file()? {
422 None => {
423 task_log!(worker, "detected EOT after {} files", current_file_number);
424 break;
425 }
426 Some(reader) => reader,
427 };
428
429 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
430 }
431
432 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
433
434 Ok(())
435 }
436
437 fn restore_archive<'a>(
438 worker: &WorkerTask,
439 mut reader: Box<dyn 'a + TapeRead>,
440 current_file_number: u64,
441 target: Option<(&DataStoreMap, &Authid)>,
442 catalog: &mut MediaCatalog,
443 verbose: bool,
444 ) -> Result<(), Error> {
445 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
446 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
447 bail!("missing MediaContentHeader");
448 }
449
450 //println!("Found MediaContentHeader: {:?}", header);
451
452 match header.content_magic {
453 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
454 bail!("unexpected content magic (label)");
455 }
456 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
457 bail!("unexpected snapshot archive version (v1.0)");
458 }
459 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
460 let header_data = reader.read_exact_allocated(header.size as usize)?;
461
462 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
463 .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
464
465 let datastore_name = archive_header.store;
466 let snapshot = archive_header.snapshot;
467
468 task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
469
470 let backup_dir: BackupDir = snapshot.parse()?;
471
472 if let Some((store_map, authid)) = target.as_ref() {
473 if let Some(datastore) = store_map.get_datastore(&datastore_name) {
474 let (owner, _group_lock) =
475 datastore.create_locked_backup_group(backup_dir.group(), authid)?;
476 if *authid != &owner {
477 // only the owner is allowed to create additional snapshots
478 bail!(
479 "restore '{}' failed - owner check failed ({} != {})",
480 snapshot,
481 authid,
482 owner
483 );
484 }
485
486 let (rel_path, is_new, _snap_lock) =
487 datastore.create_locked_backup_dir(&backup_dir)?;
488 let mut path = datastore.base_path();
489 path.push(rel_path);
490
491 if is_new {
492 task_log!(worker, "restore snapshot {}", backup_dir);
493
494 match restore_snapshot_archive(worker, reader, &path) {
495 Err(err) => {
496 std::fs::remove_dir_all(&path)?;
497 bail!("restore snapshot {} failed - {}", backup_dir, err);
498 }
499 Ok(false) => {
500 std::fs::remove_dir_all(&path)?;
501 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
502 }
503 Ok(true) => {
504 catalog.register_snapshot(
505 Uuid::from(header.uuid),
506 current_file_number,
507 &datastore_name,
508 &snapshot,
509 )?;
510 catalog.commit_if_large()?;
511 }
512 }
513 return Ok(());
514 }
515 } else {
516 task_log!(worker, "skipping...");
517 }
518 }
519
520 reader.skip_to_end()?; // read all data
521 if let Ok(false) = reader.is_incomplete() {
522 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
523 catalog.commit_if_large()?;
524 }
525 }
526 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
527 bail!("unexpected chunk archive version (v1.0)");
528 }
529 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
530 let header_data = reader.read_exact_allocated(header.size as usize)?;
531
532 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
533 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
534
535 let source_datastore = archive_header.store;
536
537 task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
538 let datastore = target
539 .as_ref()
540 .and_then(|t| t.0.get_datastore(&source_datastore));
541
542 if datastore.is_some() || target.is_none() {
543 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
544 catalog.start_chunk_archive(
545 Uuid::from(header.uuid),
546 current_file_number,
547 &source_datastore,
548 )?;
549 for digest in chunks.iter() {
550 catalog.register_chunk(&digest)?;
551 }
552 task_log!(worker, "register {} chunks", chunks.len());
553 catalog.end_chunk_archive()?;
554 catalog.commit_if_large()?;
555 }
556 return Ok(());
557 } else if target.is_some() {
558 task_log!(worker, "skipping...");
559 }
560
561 reader.skip_to_end()?; // read all data
562 }
563 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
564 let header_data = reader.read_exact_allocated(header.size as usize)?;
565
566 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
567 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
568
569 task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid);
570
571 reader.skip_to_end()?; // read all data
572 }
573 _ => bail!("unknown content magic {:?}", header.content_magic),
574 }
575
576 catalog.commit()?;
577
578 Ok(())
579 }
580
581 fn restore_chunk_archive<'a>(
582 worker: &WorkerTask,
583 reader: Box<dyn 'a + TapeRead>,
584 datastore: Option<&DataStore>,
585 verbose: bool,
586 ) -> Result<Option<Vec<[u8;32]>>, Error> {
587
588 let mut chunks = Vec::new();
589
590 let mut decoder = ChunkArchiveDecoder::new(reader);
591
592 let result: Result<_, Error> = proxmox::try_block!({
593 while let Some((digest, blob)) = decoder.next_chunk()? {
594
595 worker.check_abort()?;
596
597 if let Some(datastore) = datastore {
598 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
599 if !chunk_exists {
600 blob.verify_crc()?;
601
602 if blob.crypt_mode()? == CryptMode::None {
603 blob.decode(None, Some(&digest))?; // verify digest
604 }
605 if verbose {
606 task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
607 }
608 datastore.insert_chunk(&blob, &digest)?;
609 } else if verbose {
610 task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
611 }
612 } else if verbose {
613 task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
614 }
615 chunks.push(digest);
616 }
617 Ok(())
618 });
619
620 match result {
621 Ok(()) => Ok(Some(chunks)),
622 Err(err) => {
623 let reader = decoder.reader();
624
625 // check if this stream is marked incomplete
626 if let Ok(true) = reader.is_incomplete() {
627 return Ok(Some(chunks));
628 }
629
630 // check if this is an aborted stream without end marker
631 if let Ok(false) = reader.has_end_marker() {
632 worker.log("missing stream end marker".to_string());
633 return Ok(None);
634 }
635
636 // else the archive is corrupt
637 Err(err)
638 }
639 }
640 }
641
642 fn restore_snapshot_archive<'a>(
643 worker: &WorkerTask,
644 reader: Box<dyn 'a + TapeRead>,
645 snapshot_path: &Path,
646 ) -> Result<bool, Error> {
647
648 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
649 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
650 Ok(()) => Ok(true),
651 Err(err) => {
652 let reader = decoder.input();
653
654 // check if this stream is marked incomplete
655 if let Ok(true) = reader.is_incomplete() {
656 return Ok(false);
657 }
658
659 // check if this is an aborted stream without end marker
660 if let Ok(false) = reader.has_end_marker() {
661 return Ok(false);
662 }
663
664 // else the archive is corrupt
665 Err(err)
666 }
667 }
668 }
669
670 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
671 worker: &WorkerTask,
672 decoder: &mut pxar::decoder::sync::Decoder<R>,
673 snapshot_path: &Path,
674 ) -> Result<(), Error> {
675
676 let _root = match decoder.next() {
677 None => bail!("missing root entry"),
678 Some(root) => {
679 let root = root?;
680 match root.kind() {
681 pxar::EntryKind::Directory => { /* Ok */ }
682 _ => bail!("wrong root entry type"),
683 }
684 root
685 }
686 };
687
688 let root_path = Path::new("/");
689 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
690
691 let mut manifest = None;
692
693 loop {
694 worker.check_abort()?;
695
696 let entry = match decoder.next() {
697 None => break,
698 Some(entry) => entry?,
699 };
700 let entry_path = entry.path();
701
702 match entry.kind() {
703 pxar::EntryKind::File { .. } => { /* Ok */ }
704 _ => bail!("wrong entry type for {:?}", entry_path),
705 }
706 match entry_path.parent() {
707 None => bail!("wrong parent for {:?}", entry_path),
708 Some(p) => {
709 if p != root_path {
710 bail!("wrong parent for {:?}", entry_path);
711 }
712 }
713 }
714
715 let filename = entry.file_name();
716 let mut contents = match decoder.contents() {
717 None => bail!("missing file content"),
718 Some(contents) => contents,
719 };
720
721 let mut archive_path = snapshot_path.to_owned();
722 archive_path.push(&filename);
723
724 let mut tmp_path = archive_path.clone();
725 tmp_path.set_extension("tmp");
726
727 if filename == manifest_file_name {
728
729 let blob = DataBlob::load_from_reader(&mut contents)?;
730 let options = CreateOptions::new();
731 replace_file(&tmp_path, blob.raw_data(), options)?;
732
733 manifest = Some(BackupManifest::try_from(blob)?);
734 } else {
735 let mut tmpfile = std::fs::OpenOptions::new()
736 .write(true)
737 .create(true)
738 .read(true)
739 .open(&tmp_path)
740 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
741
742 std::io::copy(&mut contents, &mut tmpfile)?;
743
744 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
745 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
746 }
747 }
748 }
749
750 let manifest = match manifest {
751 None => bail!("missing manifest"),
752 Some(manifest) => manifest,
753 };
754
755 for item in manifest.files() {
756 let mut archive_path = snapshot_path.to_owned();
757 archive_path.push(&item.filename);
758
759 match archive_type(&item.filename)? {
760 ArchiveType::DynamicIndex => {
761 let index = DynamicIndexReader::open(&archive_path)?;
762 let (csum, size) = index.compute_csum();
763 manifest.verify_file(&item.filename, &csum, size)?;
764 }
765 ArchiveType::FixedIndex => {
766 let index = FixedIndexReader::open(&archive_path)?;
767 let (csum, size) = index.compute_csum();
768 manifest.verify_file(&item.filename, &csum, size)?;
769 }
770 ArchiveType::Blob => {
771 let mut tmpfile = std::fs::File::open(&archive_path)?;
772 let (csum, size) = compute_file_csum(&mut tmpfile)?;
773 manifest.verify_file(&item.filename, &csum, size)?;
774 }
775 }
776 }
777
778 // commit manifest
779 let mut manifest_path = snapshot_path.to_owned();
780 manifest_path.push(MANIFEST_BLOB_NAME);
781 let mut tmp_manifest_path = manifest_path.clone();
782 tmp_manifest_path.set_extension("tmp");
783
784 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
785 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
786 }
787
788 Ok(())
789 }
790
791 /// Try to restore media catalogs (form catalog_archives)
792 pub fn fast_catalog_restore(
793 worker: &WorkerTask,
794 drive: &mut Box<dyn TapeDriver>,
795 media_set: &MediaSet,
796 uuid: &Uuid, // current media Uuid
797 ) -> Result<bool, Error> {
798
799 let status_path = Path::new(TAPE_STATUS_DIR);
800
801 let current_file_number = drive.current_file_number()?;
802 if current_file_number != 2 {
803 bail!("fast_catalog_restore: wrong media position - internal error");
804 }
805
806 let mut found_catalog = false;
807
808 let mut moved_to_eom = false;
809
810 loop {
811 let current_file_number = drive.current_file_number()?;
812
813 { // limit reader scope
814 let mut reader = match drive.read_next_file()? {
815 None => {
816 task_log!(worker, "detected EOT after {} files", current_file_number);
817 break;
818 }
819 Some(reader) => reader,
820 };
821
822 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
823 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
824 bail!("missing MediaContentHeader");
825 }
826
827 if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
828 task_log!(worker, "found catalog at pos {}", current_file_number);
829
830 let header_data = reader.read_exact_allocated(header.size as usize)?;
831
832 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
833 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
834
835 if &archive_header.media_set_uuid != media_set.uuid() {
836 task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number);
837 reader.skip_to_end()?; // read all data
838 continue;
839 }
840
841 let catalog_uuid = &archive_header.uuid;
842
843 let wanted = media_set
844 .media_list()
845 .iter()
846 .find(|e| {
847 match e {
848 None => false,
849 Some(uuid) => uuid == catalog_uuid,
850 }
851 })
852 .is_some();
853
854 if !wanted {
855 task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid);
856 reader.skip_to_end()?; // read all data
857 continue;
858 }
859
860 if catalog_uuid == uuid {
861 // always restore and overwrite catalog
862 } else {
863 // only restore if catalog does not exist
864 if MediaCatalog::exists(status_path, catalog_uuid) {
865 task_log!(worker, "catalog for media '{}' already exists", catalog_uuid);
866 reader.skip_to_end()?; // read all data
867 continue;
868 }
869 }
870
871 let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?;
872
873 std::io::copy(&mut reader, &mut file)?;
874
875 file.seek(SeekFrom::Start(0))?;
876
877 match MediaCatalog::parse_catalog_header(&mut file)? {
878 (true, Some(media_uuid), Some(media_set_uuid)) => {
879 if &media_uuid != catalog_uuid {
880 task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number);
881 continue;
882 }
883 if media_set_uuid != archive_header.media_set_uuid {
884 task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number);
885 continue;
886 }
887
888 MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?;
889
890 if catalog_uuid == uuid {
891 task_log!(worker, "successfully restored catalog");
892 found_catalog = true
893 } else {
894 task_log!(worker, "successfully restored related catalog {}", media_uuid);
895 }
896 }
897 _ => {
898 task_warn!(worker, "got incomplete catalog header - skip file");
899 continue;
900 }
901 }
902
903 continue;
904 }
905 }
906
907 if moved_to_eom {
908 break; // already done - stop
909 }
910 moved_to_eom = true;
911
912 task_log!(worker, "searching for catalog at EOT (moving to EOT)");
913 drive.move_to_last_file()?;
914
915 let new_file_number = drive.current_file_number()?;
916
917 if new_file_number < (current_file_number + 1) {
918 break; // no new content - stop
919 }
920 }
921
922 Ok(found_catalog)
923 }