]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
bump version to 1.1.0-1
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::path::Path;
2 use std::ffi::OsStr;
3 use std::collections::{HashMap, HashSet};
4 use std::convert::TryFrom;
5 use std::io::{Seek, SeekFrom};
6 use std::sync::Arc;
7
8 use anyhow::{bail, format_err, Error};
9 use serde_json::Value;
10
11 use proxmox::{
12 api::{
13 api,
14 RpcEnvironment,
15 RpcEnvironmentType,
16 Router,
17 Permission,
18 schema::parse_property_string,
19 section_config::SectionConfigData,
20 },
21 tools::{
22 Uuid,
23 io::ReadExt,
24 fs::{
25 replace_file,
26 CreateOptions,
27 },
28 },
29 };
30
31 use crate::{
32 task_log,
33 task_warn,
34 task::TaskState,
35 tools::compute_file_csum,
36 api2::types::{
37 DATASTORE_MAP_ARRAY_SCHEMA,
38 DATASTORE_MAP_LIST_SCHEMA,
39 DRIVE_NAME_SCHEMA,
40 UPID_SCHEMA,
41 Authid,
42 Userid,
43 },
44 config::{
45 self,
46 cached_user_info::CachedUserInfo,
47 acl::{
48 PRIV_DATASTORE_BACKUP,
49 PRIV_DATASTORE_MODIFY,
50 PRIV_TAPE_READ,
51 },
52 },
53 backup::{
54 archive_type,
55 MANIFEST_BLOB_NAME,
56 CryptMode,
57 DataStore,
58 BackupDir,
59 DataBlob,
60 BackupManifest,
61 ArchiveType,
62 IndexFile,
63 DynamicIndexReader,
64 FixedIndexReader,
65 },
66 server::{
67 lookup_user_email,
68 WorkerTask,
69 },
70 tape::{
71 TAPE_STATUS_DIR,
72 TapeRead,
73 BlockReadError,
74 MediaId,
75 MediaSet,
76 MediaCatalog,
77 Inventory,
78 lock_media_set,
79 file_formats::{
80 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
81 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
82 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
83 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
84 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
85 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
86 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
87 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
88 MediaContentHeader,
89 ChunkArchiveHeader,
90 ChunkArchiveDecoder,
91 SnapshotArchiveHeader,
92 CatalogArchiveHeader,
93 },
94 drive::{
95 TapeDriver,
96 request_and_load_media,
97 lock_tape_device,
98 set_tape_device_state,
99 },
100 },
101 };
102
103 pub struct DataStoreMap {
104 map: HashMap<String, Arc<DataStore>>,
105 default: Option<Arc<DataStore>>,
106 }
107
108 impl TryFrom<String> for DataStoreMap {
109 type Error = Error;
110
111 fn try_from(value: String) -> Result<Self, Error> {
112 let value = parse_property_string(&value, &DATASTORE_MAP_ARRAY_SCHEMA)?;
113 let mut mapping: Vec<String> = value
114 .as_array()
115 .unwrap()
116 .iter()
117 .map(|v| v.as_str().unwrap().to_string())
118 .collect();
119
120 let mut map = HashMap::new();
121 let mut default = None;
122 while let Some(mut store) = mapping.pop() {
123 if let Some(index) = store.find('=') {
124 let mut target = store.split_off(index);
125 target.remove(0); // remove '='
126 let datastore = DataStore::lookup_datastore(&target)?;
127 map.insert(store, datastore);
128 } else if default.is_none() {
129 default = Some(DataStore::lookup_datastore(&store)?);
130 } else {
131 bail!("multiple default stores given");
132 }
133 }
134
135 Ok(Self { map, default })
136 }
137 }
138
139 impl DataStoreMap {
140 fn used_datastores<'a>(&self) -> HashSet<&str> {
141 let mut set = HashSet::new();
142 for store in self.map.values() {
143 set.insert(store.name());
144 }
145
146 if let Some(ref store) = self.default {
147 set.insert(store.name());
148 }
149
150 set
151 }
152
153 fn get_datastore(&self, source: &str) -> Option<&DataStore> {
154 if let Some(store) = self.map.get(source) {
155 return Some(&store);
156 }
157 if let Some(ref store) = self.default {
158 return Some(&store);
159 }
160
161 return None;
162 }
163 }
164
165 pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
166
167 #[api(
168 input: {
169 properties: {
170 store: {
171 schema: DATASTORE_MAP_LIST_SCHEMA,
172 },
173 drive: {
174 schema: DRIVE_NAME_SCHEMA,
175 },
176 "media-set": {
177 description: "Media set UUID.",
178 type: String,
179 },
180 "notify-user": {
181 type: Userid,
182 optional: true,
183 },
184 owner: {
185 type: Authid,
186 optional: true,
187 },
188 },
189 },
190 returns: {
191 schema: UPID_SCHEMA,
192 },
193 access: {
194 // Note: parameters are no uri parameter, so we need to test inside function body
195 description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
196 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
197 permission: &Permission::Anybody,
198 },
199 )]
200 /// Restore data from media-set
201 pub fn restore(
202 store: String,
203 drive: String,
204 media_set: String,
205 notify_user: Option<Userid>,
206 owner: Option<Authid>,
207 rpcenv: &mut dyn RpcEnvironment,
208 ) -> Result<Value, Error> {
209 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
210 let user_info = CachedUserInfo::new()?;
211
212 let store_map = DataStoreMap::try_from(store)
213 .map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
214 let used_datastores = store_map.used_datastores();
215 if used_datastores.len() == 0 {
216 bail!("no datastores given");
217 }
218
219 for store in used_datastores.iter() {
220 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
221 if (privs & PRIV_DATASTORE_BACKUP) == 0 {
222 bail!("no permissions on /datastore/{}", store);
223 }
224
225 if let Some(ref owner) = owner {
226 let correct_owner = owner == &auth_id
227 || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user());
228
229 // same permission as changing ownership after syncing
230 if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
231 bail!("no permission to restore as '{}'", owner);
232 }
233 }
234 }
235
236 let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
237 if (privs & PRIV_TAPE_READ) == 0 {
238 bail!("no permissions on /tape/drive/{}", drive);
239 }
240
241 let media_set_uuid = media_set.parse()?;
242
243 let status_path = Path::new(TAPE_STATUS_DIR);
244
245 let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
246
247 let inventory = Inventory::load(status_path)?;
248
249 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
250
251 let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
252 if (privs & PRIV_TAPE_READ) == 0 {
253 bail!("no permissions on /tape/pool/{}", pool);
254 }
255
256 let (drive_config, _digest) = config::drive::config()?;
257
258 // early check/lock before starting worker
259 let drive_lock = lock_tape_device(&drive_config, &drive)?;
260
261 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
262
263 let taskid = used_datastores
264 .iter()
265 .map(|s| s.to_string())
266 .collect::<Vec<String>>()
267 .join(", ");
268 let upid_str = WorkerTask::new_thread(
269 "tape-restore",
270 Some(taskid),
271 auth_id.clone(),
272 to_stdout,
273 move |worker| {
274 let _drive_lock = drive_lock; // keep lock guard
275
276 set_tape_device_state(&drive, &worker.upid().to_string())?;
277
278 let members = inventory.compute_media_set_members(&media_set_uuid)?;
279
280 let media_list = members.media_list();
281
282 let mut media_id_list = Vec::new();
283
284 let mut encryption_key_fingerprint = None;
285
286 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
287 match media_uuid {
288 None => {
289 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
290 }
291 Some(media_uuid) => {
292 let media_id = inventory.lookup_media(media_uuid).unwrap();
293 if let Some(ref set) = media_id.media_set_label { // always true here
294 if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
295 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
296 }
297 }
298 media_id_list.push(media_id);
299 }
300 }
301 }
302
303 task_log!(worker, "Restore mediaset '{}'", media_set);
304 if let Some(fingerprint) = encryption_key_fingerprint {
305 task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
306 }
307 task_log!(worker, "Pool: {}", pool);
308 task_log!(worker, "Datastore(s):");
309 store_map
310 .used_datastores()
311 .iter()
312 .for_each(|store| task_log!(worker, "\t{}", store));
313 task_log!(worker, "Drive: {}", drive);
314 task_log!(
315 worker,
316 "Required media list: {}",
317 media_id_list.iter()
318 .map(|media_id| media_id.label.label_text.as_str())
319 .collect::<Vec<&str>>()
320 .join(";")
321 );
322
323 for media_id in media_id_list.iter() {
324 request_and_restore_media(
325 &worker,
326 media_id,
327 &drive_config,
328 &drive,
329 &store_map,
330 &auth_id,
331 &notify_user,
332 &owner,
333 )?;
334 }
335
336 task_log!(worker, "Restore mediaset '{}' done", media_set);
337
338 if let Err(err) = set_tape_device_state(&drive, "") {
339 task_log!(
340 worker,
341 "could not unset drive state for {}: {}",
342 drive,
343 err
344 );
345 }
346
347 Ok(())
348 }
349 )?;
350
351 Ok(upid_str.into())
352 }
353
354 /// Request and restore complete media without using existing catalog (create catalog instead)
355 pub fn request_and_restore_media(
356 worker: &WorkerTask,
357 media_id: &MediaId,
358 drive_config: &SectionConfigData,
359 drive_name: &str,
360 store_map: &DataStoreMap,
361 authid: &Authid,
362 notify_user: &Option<Userid>,
363 owner: &Option<Authid>,
364 ) -> Result<(), Error> {
365 let media_set_uuid = match media_id.media_set_label {
366 None => bail!("restore_media: no media set - internal error"),
367 Some(ref set) => &set.uuid,
368 };
369
370 let email = notify_user
371 .as_ref()
372 .and_then(|userid| lookup_user_email(userid))
373 .or_else(|| lookup_user_email(&authid.clone().into()));
374
375 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?;
376
377 match info.media_set_label {
378 None => {
379 bail!("missing media set label on media {} ({})",
380 media_id.label.label_text, media_id.label.uuid);
381 }
382 Some(ref set) => {
383 if &set.uuid != media_set_uuid {
384 bail!("wrong media set label on media {} ({} != {})",
385 media_id.label.label_text, media_id.label.uuid,
386 media_set_uuid);
387 }
388 let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
389 .map(|fp| (fp, set.uuid.clone()));
390
391 drive.set_encryption(encrypt_fingerprint)?;
392 }
393 }
394
395 let restore_owner = owner.as_ref().unwrap_or(authid);
396
397 restore_media(
398 worker,
399 &mut drive,
400 &info,
401 Some((&store_map, restore_owner)),
402 false,
403 )
404 }
405
406 /// Restore complete media content and catalog
407 ///
408 /// Only create the catalog if target is None.
409 pub fn restore_media(
410 worker: &WorkerTask,
411 drive: &mut Box<dyn TapeDriver>,
412 media_id: &MediaId,
413 target: Option<(&DataStoreMap, &Authid)>,
414 verbose: bool,
415 ) -> Result<(), Error> {
416
417 let status_path = Path::new(TAPE_STATUS_DIR);
418 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
419
420 loop {
421 let current_file_number = drive.current_file_number()?;
422 let reader = match drive.read_next_file() {
423 Err(BlockReadError::EndOfFile) => {
424 task_log!(worker, "skip unexpected filemark at pos {}", current_file_number);
425 continue;
426 }
427 Err(BlockReadError::EndOfStream) => {
428 task_log!(worker, "detected EOT after {} files", current_file_number);
429 break;
430 }
431 Err(BlockReadError::Error(err)) => {
432 return Err(err.into());
433 }
434 Ok(reader) => reader,
435 };
436
437 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
438 }
439
440 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
441
442 Ok(())
443 }
444
445 fn restore_archive<'a>(
446 worker: &WorkerTask,
447 mut reader: Box<dyn 'a + TapeRead>,
448 current_file_number: u64,
449 target: Option<(&DataStoreMap, &Authid)>,
450 catalog: &mut MediaCatalog,
451 verbose: bool,
452 ) -> Result<(), Error> {
453 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
454 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
455 bail!("missing MediaContentHeader");
456 }
457
458 //println!("Found MediaContentHeader: {:?}", header);
459
460 match header.content_magic {
461 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
462 bail!("unexpected content magic (label)");
463 }
464 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
465 bail!("unexpected snapshot archive version (v1.0)");
466 }
467 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
468 let header_data = reader.read_exact_allocated(header.size as usize)?;
469
470 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
471 .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
472
473 let datastore_name = archive_header.store;
474 let snapshot = archive_header.snapshot;
475
476 task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
477
478 let backup_dir: BackupDir = snapshot.parse()?;
479
480 if let Some((store_map, authid)) = target.as_ref() {
481 if let Some(datastore) = store_map.get_datastore(&datastore_name) {
482 let (owner, _group_lock) =
483 datastore.create_locked_backup_group(backup_dir.group(), authid)?;
484 if *authid != &owner {
485 // only the owner is allowed to create additional snapshots
486 bail!(
487 "restore '{}' failed - owner check failed ({} != {})",
488 snapshot,
489 authid,
490 owner
491 );
492 }
493
494 let (rel_path, is_new, _snap_lock) =
495 datastore.create_locked_backup_dir(&backup_dir)?;
496 let mut path = datastore.base_path();
497 path.push(rel_path);
498
499 if is_new {
500 task_log!(worker, "restore snapshot {}", backup_dir);
501
502 match restore_snapshot_archive(worker, reader, &path) {
503 Err(err) => {
504 std::fs::remove_dir_all(&path)?;
505 bail!("restore snapshot {} failed - {}", backup_dir, err);
506 }
507 Ok(false) => {
508 std::fs::remove_dir_all(&path)?;
509 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
510 }
511 Ok(true) => {
512 catalog.register_snapshot(
513 Uuid::from(header.uuid),
514 current_file_number,
515 &datastore_name,
516 &snapshot,
517 )?;
518 catalog.commit_if_large()?;
519 }
520 }
521 return Ok(());
522 }
523 } else {
524 task_log!(worker, "skipping...");
525 }
526 }
527
528 reader.skip_data()?; // read all data
529 if let Ok(false) = reader.is_incomplete() {
530 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
531 catalog.commit_if_large()?;
532 }
533 }
534 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
535 bail!("unexpected chunk archive version (v1.0)");
536 }
537 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
538 let header_data = reader.read_exact_allocated(header.size as usize)?;
539
540 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
541 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
542
543 let source_datastore = archive_header.store;
544
545 task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
546 let datastore = target
547 .as_ref()
548 .and_then(|t| t.0.get_datastore(&source_datastore));
549
550 if datastore.is_some() || target.is_none() {
551 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
552 catalog.start_chunk_archive(
553 Uuid::from(header.uuid),
554 current_file_number,
555 &source_datastore,
556 )?;
557 for digest in chunks.iter() {
558 catalog.register_chunk(&digest)?;
559 }
560 task_log!(worker, "register {} chunks", chunks.len());
561 catalog.end_chunk_archive()?;
562 catalog.commit_if_large()?;
563 }
564 return Ok(());
565 } else if target.is_some() {
566 task_log!(worker, "skipping...");
567 }
568
569 reader.skip_data()?; // read all data
570 }
571 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
572 let header_data = reader.read_exact_allocated(header.size as usize)?;
573
574 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
575 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
576
577 task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid);
578
579 reader.skip_data()?; // read all data
580 }
581 _ => bail!("unknown content magic {:?}", header.content_magic),
582 }
583
584 catalog.commit()?;
585
586 Ok(())
587 }
588
589 fn restore_chunk_archive<'a>(
590 worker: &WorkerTask,
591 reader: Box<dyn 'a + TapeRead>,
592 datastore: Option<&DataStore>,
593 verbose: bool,
594 ) -> Result<Option<Vec<[u8;32]>>, Error> {
595
596 let mut chunks = Vec::new();
597
598 let mut decoder = ChunkArchiveDecoder::new(reader);
599
600 let result: Result<_, Error> = proxmox::try_block!({
601 while let Some((digest, blob)) = decoder.next_chunk()? {
602
603 worker.check_abort()?;
604
605 if let Some(datastore) = datastore {
606 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
607 if !chunk_exists {
608 blob.verify_crc()?;
609
610 if blob.crypt_mode()? == CryptMode::None {
611 blob.decode(None, Some(&digest))?; // verify digest
612 }
613 if verbose {
614 task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
615 }
616 datastore.insert_chunk(&blob, &digest)?;
617 } else if verbose {
618 task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
619 }
620 } else if verbose {
621 task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
622 }
623 chunks.push(digest);
624 }
625 Ok(())
626 });
627
628 match result {
629 Ok(()) => Ok(Some(chunks)),
630 Err(err) => {
631 let reader = decoder.reader();
632
633 // check if this stream is marked incomplete
634 if let Ok(true) = reader.is_incomplete() {
635 return Ok(Some(chunks));
636 }
637
638 // check if this is an aborted stream without end marker
639 if let Ok(false) = reader.has_end_marker() {
640 worker.log("missing stream end marker".to_string());
641 return Ok(None);
642 }
643
644 // else the archive is corrupt
645 Err(err)
646 }
647 }
648 }
649
650 fn restore_snapshot_archive<'a>(
651 worker: &WorkerTask,
652 reader: Box<dyn 'a + TapeRead>,
653 snapshot_path: &Path,
654 ) -> Result<bool, Error> {
655
656 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
657 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
658 Ok(()) => Ok(true),
659 Err(err) => {
660 let reader = decoder.input();
661
662 // check if this stream is marked incomplete
663 if let Ok(true) = reader.is_incomplete() {
664 return Ok(false);
665 }
666
667 // check if this is an aborted stream without end marker
668 if let Ok(false) = reader.has_end_marker() {
669 return Ok(false);
670 }
671
672 // else the archive is corrupt
673 Err(err)
674 }
675 }
676 }
677
678 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
679 worker: &WorkerTask,
680 decoder: &mut pxar::decoder::sync::Decoder<R>,
681 snapshot_path: &Path,
682 ) -> Result<(), Error> {
683
684 let _root = match decoder.next() {
685 None => bail!("missing root entry"),
686 Some(root) => {
687 let root = root?;
688 match root.kind() {
689 pxar::EntryKind::Directory => { /* Ok */ }
690 _ => bail!("wrong root entry type"),
691 }
692 root
693 }
694 };
695
696 let root_path = Path::new("/");
697 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
698
699 let mut manifest = None;
700
701 loop {
702 worker.check_abort()?;
703
704 let entry = match decoder.next() {
705 None => break,
706 Some(entry) => entry?,
707 };
708 let entry_path = entry.path();
709
710 match entry.kind() {
711 pxar::EntryKind::File { .. } => { /* Ok */ }
712 _ => bail!("wrong entry type for {:?}", entry_path),
713 }
714 match entry_path.parent() {
715 None => bail!("wrong parent for {:?}", entry_path),
716 Some(p) => {
717 if p != root_path {
718 bail!("wrong parent for {:?}", entry_path);
719 }
720 }
721 }
722
723 let filename = entry.file_name();
724 let mut contents = match decoder.contents() {
725 None => bail!("missing file content"),
726 Some(contents) => contents,
727 };
728
729 let mut archive_path = snapshot_path.to_owned();
730 archive_path.push(&filename);
731
732 let mut tmp_path = archive_path.clone();
733 tmp_path.set_extension("tmp");
734
735 if filename == manifest_file_name {
736
737 let blob = DataBlob::load_from_reader(&mut contents)?;
738 let options = CreateOptions::new();
739 replace_file(&tmp_path, blob.raw_data(), options)?;
740
741 manifest = Some(BackupManifest::try_from(blob)?);
742 } else {
743 let mut tmpfile = std::fs::OpenOptions::new()
744 .write(true)
745 .create(true)
746 .read(true)
747 .open(&tmp_path)
748 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
749
750 std::io::copy(&mut contents, &mut tmpfile)?;
751
752 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
753 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
754 }
755 }
756 }
757
758 let manifest = match manifest {
759 None => bail!("missing manifest"),
760 Some(manifest) => manifest,
761 };
762
763 for item in manifest.files() {
764 let mut archive_path = snapshot_path.to_owned();
765 archive_path.push(&item.filename);
766
767 match archive_type(&item.filename)? {
768 ArchiveType::DynamicIndex => {
769 let index = DynamicIndexReader::open(&archive_path)?;
770 let (csum, size) = index.compute_csum();
771 manifest.verify_file(&item.filename, &csum, size)?;
772 }
773 ArchiveType::FixedIndex => {
774 let index = FixedIndexReader::open(&archive_path)?;
775 let (csum, size) = index.compute_csum();
776 manifest.verify_file(&item.filename, &csum, size)?;
777 }
778 ArchiveType::Blob => {
779 let mut tmpfile = std::fs::File::open(&archive_path)?;
780 let (csum, size) = compute_file_csum(&mut tmpfile)?;
781 manifest.verify_file(&item.filename, &csum, size)?;
782 }
783 }
784 }
785
786 // commit manifest
787 let mut manifest_path = snapshot_path.to_owned();
788 manifest_path.push(MANIFEST_BLOB_NAME);
789 let mut tmp_manifest_path = manifest_path.clone();
790 tmp_manifest_path.set_extension("tmp");
791
792 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
793 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
794 }
795
796 Ok(())
797 }
798
799 /// Try to restore media catalogs (form catalog_archives)
800 pub fn fast_catalog_restore(
801 worker: &WorkerTask,
802 drive: &mut Box<dyn TapeDriver>,
803 media_set: &MediaSet,
804 uuid: &Uuid, // current media Uuid
805 ) -> Result<bool, Error> {
806
807 let status_path = Path::new(TAPE_STATUS_DIR);
808
809 let current_file_number = drive.current_file_number()?;
810 if current_file_number != 2 {
811 bail!("fast_catalog_restore: wrong media position - internal error");
812 }
813
814 let mut found_catalog = false;
815
816 let mut moved_to_eom = false;
817
818 loop {
819 let current_file_number = drive.current_file_number()?;
820
821 { // limit reader scope
822 let mut reader = match drive.read_next_file() {
823 Err(BlockReadError::EndOfFile) => {
824 task_log!(worker, "skip unexpected filemark at pos {}", current_file_number);
825 continue;
826 }
827 Err(BlockReadError::EndOfStream) => {
828 task_log!(worker, "detected EOT after {} files", current_file_number);
829 break;
830 }
831 Err(BlockReadError::Error(err)) => {
832 return Err(err.into());
833 }
834 Ok(reader) => reader,
835 };
836
837 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
838 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
839 bail!("missing MediaContentHeader");
840 }
841
842 if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
843 task_log!(worker, "found catalog at pos {}", current_file_number);
844
845 let header_data = reader.read_exact_allocated(header.size as usize)?;
846
847 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
848 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
849
850 if &archive_header.media_set_uuid != media_set.uuid() {
851 task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number);
852 reader.skip_data()?; // read all data
853 continue;
854 }
855
856 let catalog_uuid = &archive_header.uuid;
857
858 let wanted = media_set
859 .media_list()
860 .iter()
861 .find(|e| {
862 match e {
863 None => false,
864 Some(uuid) => uuid == catalog_uuid,
865 }
866 })
867 .is_some();
868
869 if !wanted {
870 task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid);
871 reader.skip_data()?; // read all data
872 continue;
873 }
874
875 if catalog_uuid == uuid {
876 // always restore and overwrite catalog
877 } else {
878 // only restore if catalog does not exist
879 if MediaCatalog::exists(status_path, catalog_uuid) {
880 task_log!(worker, "catalog for media '{}' already exists", catalog_uuid);
881 reader.skip_data()?; // read all data
882 continue;
883 }
884 }
885
886 let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?;
887
888 std::io::copy(&mut reader, &mut file)?;
889
890 file.seek(SeekFrom::Start(0))?;
891
892 match MediaCatalog::parse_catalog_header(&mut file)? {
893 (true, Some(media_uuid), Some(media_set_uuid)) => {
894 if &media_uuid != catalog_uuid {
895 task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number);
896 continue;
897 }
898 if media_set_uuid != archive_header.media_set_uuid {
899 task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number);
900 continue;
901 }
902
903 MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?;
904
905 if catalog_uuid == uuid {
906 task_log!(worker, "successfully restored catalog");
907 found_catalog = true
908 } else {
909 task_log!(worker, "successfully restored related catalog {}", media_uuid);
910 }
911 }
912 _ => {
913 task_warn!(worker, "got incomplete catalog header - skip file");
914 continue;
915 }
916 }
917
918 continue;
919 }
920 }
921
922 if moved_to_eom {
923 break; // already done - stop
924 }
925 moved_to_eom = true;
926
927 task_log!(worker, "searching for catalog at EOT (moving to EOT)");
928 drive.move_to_last_file()?;
929
930 let new_file_number = drive.current_file_number()?;
931
932 if new_file_number < (current_file_number + 1) {
933 break; // no new content - stop
934 }
935 }
936
937 Ok(found_catalog)
938 }