]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
3008aec8a6b318e51c891df084468be741e2d499
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::collections::{BTreeMap, HashMap, HashSet};
2 use std::convert::TryFrom;
3 use std::ffi::OsStr;
4 use std::io::{Seek, SeekFrom};
5 use std::path::{Path, PathBuf};
6 use std::sync::Arc;
7
8 use anyhow::{bail, format_err, Error};
9 use serde_json::Value;
10
11 use proxmox_io::ReadExt;
12 use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
13 use proxmox_schema::api;
14 use proxmox_section_config::SectionConfigData;
15 use proxmox_sys::fs::{replace_file, CreateOptions};
16 use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
17 use proxmox_uuid::Uuid;
18
19 use pbs_api_types::{
20 Authid, CryptMode, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
21 DRIVE_NAME_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ,
22 TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
23 };
24 use pbs_config::CachedUserInfo;
25 use pbs_datastore::backup_info::BackupDir;
26 use pbs_datastore::dynamic_index::DynamicIndexReader;
27 use pbs_datastore::fixed_index::FixedIndexReader;
28 use pbs_datastore::index::IndexFile;
29 use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
30 use pbs_datastore::{DataBlob, DataStore};
31 use pbs_tape::{
32 BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
33 };
34 use proxmox_rest_server::WorkerTask;
35
36 use crate::{
37 server::lookup_user_email,
38 tape::{
39 drive::{lock_tape_device, request_and_load_media, set_tape_device_state, TapeDriver},
40 file_formats::{
41 CatalogArchiveHeader, ChunkArchiveDecoder, ChunkArchiveHeader, SnapshotArchiveHeader,
42 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
43 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
44 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
45 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
46 },
47 lock_media_set, Inventory, MediaCatalog, MediaId, MediaSet, MediaSetCatalog,
48 TAPE_STATUS_DIR,
49 },
50 tools::parallel_handler::ParallelHandler,
51 };
52
53 const RESTORE_TMP_DIR: &str = "/var/tmp/proxmox-backup";
54
55 pub struct DataStoreMap {
56 map: HashMap<String, Arc<DataStore>>,
57 default: Option<Arc<DataStore>>,
58 }
59
60 impl TryFrom<String> for DataStoreMap {
61 type Error = Error;
62
63 fn try_from(value: String) -> Result<Self, Error> {
64 let value = DATASTORE_MAP_ARRAY_SCHEMA.parse_property_string(&value)?;
65 let mut mapping: Vec<String> = value
66 .as_array()
67 .unwrap()
68 .iter()
69 .map(|v| v.as_str().unwrap().to_string())
70 .collect();
71
72 let mut map = HashMap::new();
73 let mut default = None;
74 while let Some(mut store) = mapping.pop() {
75 if let Some(index) = store.find('=') {
76 let mut target = store.split_off(index);
77 target.remove(0); // remove '='
78 let datastore = DataStore::lookup_datastore(&target)?;
79 map.insert(store, datastore);
80 } else if default.is_none() {
81 default = Some(DataStore::lookup_datastore(&store)?);
82 } else {
83 bail!("multiple default stores given");
84 }
85 }
86
87 Ok(Self { map, default })
88 }
89 }
90
91 impl DataStoreMap {
92 fn used_datastores<'a>(&self) -> HashSet<&str> {
93 let mut set = HashSet::new();
94 for store in self.map.values() {
95 set.insert(store.name());
96 }
97
98 if let Some(ref store) = self.default {
99 set.insert(store.name());
100 }
101
102 set
103 }
104
105 fn get_datastore(&self, source: &str) -> Option<Arc<DataStore>> {
106 if let Some(store) = self.map.get(source) {
107 return Some(Arc::clone(store));
108 }
109 if let Some(ref store) = self.default {
110 return Some(Arc::clone(store));
111 }
112
113 return None;
114 }
115 }
116
117 fn check_datastore_privs(
118 user_info: &CachedUserInfo,
119 store: &str,
120 auth_id: &Authid,
121 owner: &Option<Authid>,
122 ) -> Result<(), Error> {
123 let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
124 if (privs & PRIV_DATASTORE_BACKUP) == 0 {
125 bail!("no permissions on /datastore/{}", store);
126 }
127
128 if let Some(ref owner) = owner {
129 let correct_owner = owner == auth_id
130 || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user());
131
132 // same permission as changing ownership after syncing
133 if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
134 bail!("no permission to restore as '{}'", owner);
135 }
136 }
137
138 Ok(())
139 }
140
141 pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
142
143 #[api(
144 input: {
145 properties: {
146 store: {
147 schema: DATASTORE_MAP_LIST_SCHEMA,
148 },
149 drive: {
150 schema: DRIVE_NAME_SCHEMA,
151 },
152 "media-set": {
153 description: "Media set UUID.",
154 type: String,
155 },
156 "notify-user": {
157 type: Userid,
158 optional: true,
159 },
160 "snapshots": {
161 description: "List of snapshots.",
162 type: Array,
163 optional: true,
164 items: {
165 schema: TAPE_RESTORE_SNAPSHOT_SCHEMA,
166 },
167 },
168 owner: {
169 type: Authid,
170 optional: true,
171 },
172 },
173 },
174 returns: {
175 schema: UPID_SCHEMA,
176 },
177 access: {
178 // Note: parameters are no uri parameter, so we need to test inside function body
179 description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
180 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
181 permission: &Permission::Anybody,
182 },
183 )]
184 /// Restore data from media-set
185 pub fn restore(
186 store: String,
187 drive: String,
188 media_set: String,
189 notify_user: Option<Userid>,
190 snapshots: Option<Vec<String>>,
191 owner: Option<Authid>,
192 rpcenv: &mut dyn RpcEnvironment,
193 ) -> Result<Value, Error> {
194 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
195 let user_info = CachedUserInfo::new()?;
196
197 let store_map = DataStoreMap::try_from(store)
198 .map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
199 let used_datastores = store_map.used_datastores();
200 if used_datastores.is_empty() {
201 bail!("no datastores given");
202 }
203
204 for store in used_datastores.iter() {
205 check_datastore_privs(&user_info, store, &auth_id, &owner)?;
206 }
207
208 let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
209 if (privs & PRIV_TAPE_READ) == 0 {
210 bail!("no permissions on /tape/drive/{}", drive);
211 }
212
213 let media_set_uuid = media_set.parse()?;
214
215 let status_path = Path::new(TAPE_STATUS_DIR);
216
217 let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
218
219 let inventory = Inventory::load(status_path)?;
220
221 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
222
223 let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
224 if (privs & PRIV_TAPE_READ) == 0 {
225 bail!("no permissions on /tape/pool/{}", pool);
226 }
227
228 let (drive_config, _digest) = pbs_config::drive::config()?;
229
230 // early check/lock before starting worker
231 let drive_lock = lock_tape_device(&drive_config, &drive)?;
232
233 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
234
235 let taskid = used_datastores
236 .iter()
237 .map(|s| s.to_string())
238 .collect::<Vec<String>>()
239 .join(", ");
240
241 let upid_str = WorkerTask::new_thread(
242 "tape-restore",
243 Some(taskid),
244 auth_id.to_string(),
245 to_stdout,
246 move |worker| {
247 let _drive_lock = drive_lock; // keep lock guard
248
249 set_tape_device_state(&drive, &worker.upid().to_string())?;
250
251 let restore_owner = owner.as_ref().unwrap_or(&auth_id);
252
253 let email = notify_user
254 .as_ref()
255 .and_then(|userid| lookup_user_email(userid))
256 .or_else(|| lookup_user_email(&auth_id.clone().into()));
257
258 task_log!(worker, "Mediaset '{}'", media_set);
259 task_log!(worker, "Pool: {}", pool);
260
261 let res = if let Some(snapshots) = snapshots {
262 restore_list_worker(
263 worker.clone(),
264 snapshots,
265 inventory,
266 media_set_uuid,
267 drive_config,
268 &drive,
269 store_map,
270 restore_owner,
271 email,
272 )
273 } else {
274 restore_full_worker(
275 worker.clone(),
276 inventory,
277 media_set_uuid,
278 drive_config,
279 &drive,
280 store_map,
281 restore_owner,
282 email,
283 )
284 };
285
286 if res.is_ok() {
287 task_log!(worker, "Restore mediaset '{}' done", media_set);
288 }
289
290 if let Err(err) = set_tape_device_state(&drive, "") {
291 task_log!(worker, "could not unset drive state for {}: {}", drive, err);
292 }
293
294 res
295 },
296 )?;
297
298 Ok(upid_str.into())
299 }
300
301 fn restore_full_worker(
302 worker: Arc<WorkerTask>,
303 inventory: Inventory,
304 media_set_uuid: Uuid,
305 drive_config: SectionConfigData,
306 drive_name: &str,
307 store_map: DataStoreMap,
308 restore_owner: &Authid,
309 email: Option<String>,
310 ) -> Result<(), Error> {
311 let members = inventory.compute_media_set_members(&media_set_uuid)?;
312
313 let media_list = members.media_list();
314
315 let mut media_id_list = Vec::new();
316
317 let mut encryption_key_fingerprint = None;
318
319 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
320 match media_uuid {
321 None => {
322 bail!(
323 "media set {} is incomplete (missing member {}).",
324 media_set_uuid,
325 seq_nr
326 );
327 }
328 Some(media_uuid) => {
329 let media_id = inventory.lookup_media(media_uuid).unwrap();
330 if let Some(ref set) = media_id.media_set_label {
331 // always true here
332 if encryption_key_fingerprint.is_none()
333 && set.encryption_key_fingerprint.is_some()
334 {
335 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
336 }
337 }
338 media_id_list.push(media_id);
339 }
340 }
341 }
342
343 if let Some(fingerprint) = encryption_key_fingerprint {
344 task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
345 }
346
347 task_log!(
348 worker,
349 "Datastore(s): {}",
350 store_map
351 .used_datastores()
352 .into_iter()
353 .map(String::from)
354 .collect::<Vec<String>>()
355 .join(", "),
356 );
357
358 task_log!(worker, "Drive: {}", drive_name);
359 task_log!(
360 worker,
361 "Required media list: {}",
362 media_id_list
363 .iter()
364 .map(|media_id| media_id.label.label_text.as_str())
365 .collect::<Vec<&str>>()
366 .join(";")
367 );
368
369 let mut datastore_locks = Vec::new();
370 for store_name in store_map.used_datastores() {
371 // explicit create shared lock to prevent GC on newly created chunks
372 if let Some(store) = store_map.get_datastore(store_name) {
373 let shared_store_lock = store.try_shared_chunk_store_lock()?;
374 datastore_locks.push(shared_store_lock);
375 }
376 }
377
378 let mut checked_chunks_map = HashMap::new();
379
380 for media_id in media_id_list.iter() {
381 request_and_restore_media(
382 worker.clone(),
383 media_id,
384 &drive_config,
385 drive_name,
386 &store_map,
387 &mut checked_chunks_map,
388 restore_owner,
389 &email,
390 )?;
391 }
392
393 Ok(())
394 }
395
396 fn restore_list_worker(
397 worker: Arc<WorkerTask>,
398 snapshots: Vec<String>,
399 inventory: Inventory,
400 media_set_uuid: Uuid,
401 drive_config: SectionConfigData,
402 drive_name: &str,
403 store_map: DataStoreMap,
404 restore_owner: &Authid,
405 email: Option<String>,
406 ) -> Result<(), Error> {
407 let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into();
408 std::fs::create_dir_all(&base_path)?;
409
410 let catalog = get_media_set_catalog(&inventory, &media_set_uuid)?;
411
412 let mut datastore_locks = Vec::new();
413 let mut snapshot_file_hash: BTreeMap<Uuid, Vec<u64>> = BTreeMap::new();
414 let mut snapshot_locks = HashMap::new();
415
416 let res = proxmox_lang::try_block!({
417 // assemble snapshot files/locks
418 for store_snapshot in snapshots.iter() {
419 let mut split = store_snapshot.splitn(2, ':');
420 let source_datastore = split
421 .next()
422 .ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?;
423 let snapshot = split
424 .next()
425 .ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
426 let backup_dir: BackupDir = snapshot.parse()?;
427
428 let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
429 format_err!(
430 "could not find mapping for source datastore: {}",
431 source_datastore
432 )
433 })?;
434
435 let (owner, _group_lock) =
436 datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?;
437 if restore_owner != &owner {
438 // only the owner is allowed to create additional snapshots
439 bail!(
440 "restore '{}' failed - owner check failed ({} != {})",
441 snapshot,
442 restore_owner,
443 owner
444 );
445 }
446
447 let (media_id, file_num) = if let Some((media_uuid, file_num)) =
448 catalog.lookup_snapshot(source_datastore, snapshot)
449 {
450 let media_id = inventory.lookup_media(media_uuid).unwrap();
451 (media_id, file_num)
452 } else {
453 task_warn!(
454 worker,
455 "did not find snapshot '{}' in media set {}",
456 snapshot,
457 media_set_uuid
458 );
459 continue;
460 };
461
462 let (_rel_path, is_new, snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
463
464 if !is_new {
465 task_log!(
466 worker,
467 "found snapshot {} on target datastore, skipping...",
468 snapshot
469 );
470 continue;
471 }
472
473 snapshot_locks.insert(store_snapshot.to_string(), snap_lock);
474
475 let shared_store_lock = datastore.try_shared_chunk_store_lock()?;
476 datastore_locks.push(shared_store_lock);
477
478 let file_list = snapshot_file_hash
479 .entry(media_id.label.uuid.clone())
480 .or_insert_with(Vec::new);
481 file_list.push(file_num);
482
483 task_log!(
484 worker,
485 "found snapshot {} on {}: file {}",
486 snapshot,
487 media_id.label.label_text,
488 file_num
489 );
490 }
491
492 if snapshot_file_hash.is_empty() {
493 task_log!(worker, "nothing to restore, skipping remaining phases...");
494 return Ok(());
495 }
496
497 task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir");
498 let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new();
499 for (media_uuid, file_list) in snapshot_file_hash.iter_mut() {
500 let media_id = inventory.lookup_media(media_uuid).unwrap();
501 let (drive, info) = request_and_load_media(
502 &worker,
503 &drive_config,
504 drive_name,
505 &media_id.label,
506 &email,
507 )?;
508 file_list.sort_unstable();
509 restore_snapshots_to_tmpdir(
510 worker.clone(),
511 &base_path,
512 file_list,
513 drive,
514 &info,
515 &media_set_uuid,
516 &mut datastore_chunk_map,
517 )
518 .map_err(|err| format_err!("could not restore snapshots to tmpdir: {}", err))?;
519 }
520
521 // sorted media_uuid => (sorted file_num => (set of digests)))
522 let mut media_file_chunk_map: BTreeMap<Uuid, BTreeMap<u64, HashSet<[u8; 32]>>> =
523 BTreeMap::new();
524
525 for (source_datastore, chunks) in datastore_chunk_map.into_iter() {
526 let datastore = store_map.get_datastore(&source_datastore).ok_or_else(|| {
527 format_err!(
528 "could not find mapping for source datastore: {}",
529 source_datastore
530 )
531 })?;
532 for digest in chunks.into_iter() {
533 // we only want to restore chunks that we do not have yet
534 if !datastore.cond_touch_chunk(&digest, false)? {
535 if let Some((uuid, nr)) = catalog.lookup_chunk(&source_datastore, &digest) {
536 let file = media_file_chunk_map
537 .entry(uuid.clone())
538 .or_insert_with(BTreeMap::new);
539 let chunks = file.entry(nr).or_insert_with(HashSet::new);
540 chunks.insert(digest);
541 }
542 }
543 }
544 }
545
546 // we do not need it anymore, saves memory
547 drop(catalog);
548
549 if !media_file_chunk_map.is_empty() {
550 task_log!(worker, "Phase 2: restore chunks to datastores");
551 } else {
552 task_log!(worker, "all chunks exist already, skipping phase 2...");
553 }
554
555 for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() {
556 let media_id = inventory.lookup_media(media_uuid).unwrap();
557 let (mut drive, _info) = request_and_load_media(
558 &worker,
559 &drive_config,
560 drive_name,
561 &media_id.label,
562 &email,
563 )?;
564 restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?;
565 }
566
567 task_log!(
568 worker,
569 "Phase 3: copy snapshots from temp dir to datastores"
570 );
571 for (store_snapshot, _lock) in snapshot_locks.into_iter() {
572 proxmox_lang::try_block!({
573 let mut split = store_snapshot.splitn(2, ':');
574 let source_datastore = split
575 .next()
576 .ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?;
577 let snapshot = split
578 .next()
579 .ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
580 let backup_dir: BackupDir = snapshot.parse()?;
581
582 let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
583 format_err!("unexpected source datastore: {}", source_datastore)
584 })?;
585
586 let mut tmp_path = base_path.clone();
587 tmp_path.push(&source_datastore);
588 tmp_path.push(snapshot);
589
590 let path = datastore.snapshot_path(&backup_dir);
591
592 for entry in std::fs::read_dir(tmp_path)? {
593 let entry = entry?;
594 let mut new_path = path.clone();
595 new_path.push(entry.file_name());
596 std::fs::copy(entry.path(), new_path)?;
597 }
598 task_log!(worker, "Restore snapshot '{}' done", snapshot);
599 Ok(())
600 })
601 .map_err(|err: Error| format_err!("could not copy {}: {}", store_snapshot, err))?;
602 }
603 Ok(())
604 });
605
606 if res.is_err() {
607 task_warn!(
608 worker,
609 "Error during restore, partially restored snapshots will NOT be cleaned up"
610 );
611 }
612
613 match std::fs::remove_dir_all(&base_path) {
614 Ok(()) => {}
615 Err(err) => task_warn!(worker, "error cleaning up: {}", err),
616 }
617
618 res
619 }
620
621 fn get_media_set_catalog(
622 inventory: &Inventory,
623 media_set_uuid: &Uuid,
624 ) -> Result<MediaSetCatalog, Error> {
625 let status_path = Path::new(TAPE_STATUS_DIR);
626
627 let members = inventory.compute_media_set_members(media_set_uuid)?;
628 let media_list = members.media_list();
629 let mut catalog = MediaSetCatalog::new();
630
631 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
632 match media_uuid {
633 None => {
634 bail!(
635 "media set {} is incomplete (missing member {}).",
636 media_set_uuid,
637 seq_nr
638 );
639 }
640 Some(media_uuid) => {
641 let media_id = inventory.lookup_media(media_uuid).unwrap();
642 let media_catalog = MediaCatalog::open(status_path, media_id, false, false)?;
643 catalog.append_catalog(media_catalog)?;
644 }
645 }
646 }
647
648 Ok(catalog)
649 }
650
651 fn restore_snapshots_to_tmpdir(
652 worker: Arc<WorkerTask>,
653 path: &PathBuf,
654 file_list: &[u64],
655 mut drive: Box<dyn TapeDriver>,
656 media_id: &MediaId,
657 media_set_uuid: &Uuid,
658 chunks_list: &mut HashMap<String, HashSet<[u8; 32]>>,
659 ) -> Result<(), Error> {
660 match media_id.media_set_label {
661 None => {
662 bail!(
663 "missing media set label on media {} ({})",
664 media_id.label.label_text,
665 media_id.label.uuid
666 );
667 }
668 Some(ref set) => {
669 if set.uuid != *media_set_uuid {
670 bail!(
671 "wrong media set label on media {} ({} != {})",
672 media_id.label.label_text,
673 media_id.label.uuid,
674 media_set_uuid
675 );
676 }
677 let encrypt_fingerprint = set.encryption_key_fingerprint.clone().map(|fp| {
678 task_log!(worker, "Encryption key fingerprint: {}", fp);
679 (fp, set.uuid.clone())
680 });
681
682 drive.set_encryption(encrypt_fingerprint)?;
683 }
684 }
685
686 for file_num in file_list {
687 let current_file_number = drive.current_file_number()?;
688 if current_file_number != *file_num {
689 task_log!(
690 worker,
691 "was at file {}, moving to {}",
692 current_file_number,
693 file_num
694 );
695 drive.move_to_file(*file_num)?;
696 let current_file_number = drive.current_file_number()?;
697 task_log!(worker, "now at file {}", current_file_number);
698 }
699 let mut reader = drive.read_next_file()?;
700
701 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
702 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
703 bail!("missing MediaContentHeader");
704 }
705
706 match header.content_magic {
707 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
708 let header_data = reader.read_exact_allocated(header.size as usize)?;
709
710 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
711 .map_err(|err| {
712 format_err!("unable to parse snapshot archive header - {}", err)
713 })?;
714
715 let source_datastore = archive_header.store;
716 let snapshot = archive_header.snapshot;
717
718 task_log!(
719 worker,
720 "File {}: snapshot archive {}:{}",
721 file_num,
722 source_datastore,
723 snapshot
724 );
725
726 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
727
728 let mut tmp_path = path.clone();
729 tmp_path.push(&source_datastore);
730 tmp_path.push(snapshot);
731 std::fs::create_dir_all(&tmp_path)?;
732
733 let chunks = chunks_list
734 .entry(source_datastore)
735 .or_insert_with(HashSet::new);
736 let manifest =
737 try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?;
738 for item in manifest.files() {
739 let mut archive_path = tmp_path.to_owned();
740 archive_path.push(&item.filename);
741
742 let index: Box<dyn IndexFile> = match archive_type(&item.filename)? {
743 ArchiveType::DynamicIndex => {
744 Box::new(DynamicIndexReader::open(&archive_path)?)
745 }
746 ArchiveType::FixedIndex => Box::new(FixedIndexReader::open(&archive_path)?),
747 ArchiveType::Blob => continue,
748 };
749 for i in 0..index.index_count() {
750 if let Some(digest) = index.index_digest(i) {
751 chunks.insert(*digest);
752 }
753 }
754 }
755 }
756 other => bail!("unexpected file type: {:?}", other),
757 }
758 }
759
760 Ok(())
761 }
762
763 fn restore_file_chunk_map(
764 worker: Arc<WorkerTask>,
765 drive: &mut Box<dyn TapeDriver>,
766 store_map: &DataStoreMap,
767 file_chunk_map: &mut BTreeMap<u64, HashSet<[u8; 32]>>,
768 ) -> Result<(), Error> {
769 for (nr, chunk_map) in file_chunk_map.iter_mut() {
770 let current_file_number = drive.current_file_number()?;
771 if current_file_number != *nr {
772 task_log!(
773 worker,
774 "was at file {}, moving to {}",
775 current_file_number,
776 nr
777 );
778 drive.move_to_file(*nr)?;
779 let current_file_number = drive.current_file_number()?;
780 task_log!(worker, "now at file {}", current_file_number);
781 }
782 let mut reader = drive.read_next_file()?;
783 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
784 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
785 bail!("missing MediaContentHeader");
786 }
787
788 match header.content_magic {
789 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
790 let header_data = reader.read_exact_allocated(header.size as usize)?;
791
792 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
793 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
794
795 let source_datastore = archive_header.store;
796
797 task_log!(
798 worker,
799 "File {}: chunk archive for datastore '{}'",
800 nr,
801 source_datastore
802 );
803
804 let datastore = store_map.get_datastore(&source_datastore).ok_or_else(|| {
805 format_err!("unexpected chunk archive for store: {}", source_datastore)
806 })?;
807
808 let count = restore_partial_chunk_archive(
809 worker.clone(),
810 reader,
811 datastore.clone(),
812 chunk_map,
813 )?;
814 task_log!(worker, "restored {} chunks", count);
815 }
816 _ => bail!("unexpected content magic {:?}", header.content_magic),
817 }
818 }
819
820 Ok(())
821 }
822
823 fn restore_partial_chunk_archive<'a>(
824 worker: Arc<WorkerTask>,
825 reader: Box<dyn 'a + TapeRead>,
826 datastore: Arc<DataStore>,
827 chunk_list: &mut HashSet<[u8; 32]>,
828 ) -> Result<usize, Error> {
829 let mut decoder = ChunkArchiveDecoder::new(reader);
830
831 let mut count = 0;
832
833 let start_time = std::time::SystemTime::now();
834 let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0));
835 let bytes2 = bytes.clone();
836
837 let writer_pool = ParallelHandler::new(
838 "tape restore chunk writer",
839 4,
840 move |(chunk, digest): (DataBlob, [u8; 32])| {
841 if !datastore.cond_touch_chunk(&digest, false)? {
842 bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
843 chunk.verify_crc()?;
844 if chunk.crypt_mode()? == CryptMode::None {
845 chunk.decode(None, Some(&digest))?; // verify digest
846 }
847
848 datastore.insert_chunk(&chunk, &digest)?;
849 }
850 Ok(())
851 },
852 );
853
854 let verify_and_write_channel = writer_pool.channel();
855
856 loop {
857 let (digest, blob) = match decoder.next_chunk()? {
858 Some((digest, blob)) => (digest, blob),
859 None => break,
860 };
861
862 worker.check_abort()?;
863
864 if chunk_list.remove(&digest) {
865 verify_and_write_channel.send((blob, digest.clone()))?;
866 count += 1;
867 }
868
869 if chunk_list.is_empty() {
870 break;
871 }
872 }
873
874 drop(verify_and_write_channel);
875
876 writer_pool.complete()?;
877
878 let elapsed = start_time.elapsed()?.as_secs_f64();
879
880 let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst);
881
882 task_log!(
883 worker,
884 "restored {} bytes ({:.2} MB/s)",
885 bytes,
886 (bytes as f64) / (1_000_000.0 * elapsed)
887 );
888
889 Ok(count)
890 }
891
892 /// Request and restore complete media without using existing catalog (create catalog instead)
893 pub fn request_and_restore_media(
894 worker: Arc<WorkerTask>,
895 media_id: &MediaId,
896 drive_config: &SectionConfigData,
897 drive_name: &str,
898 store_map: &DataStoreMap,
899 checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>,
900 restore_owner: &Authid,
901 email: &Option<String>,
902 ) -> Result<(), Error> {
903 let media_set_uuid = match media_id.media_set_label {
904 None => bail!("restore_media: no media set - internal error"),
905 Some(ref set) => &set.uuid,
906 };
907
908 let (mut drive, info) =
909 request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?;
910
911 match info.media_set_label {
912 None => {
913 bail!(
914 "missing media set label on media {} ({})",
915 media_id.label.label_text,
916 media_id.label.uuid
917 );
918 }
919 Some(ref set) => {
920 if &set.uuid != media_set_uuid {
921 bail!(
922 "wrong media set label on media {} ({} != {})",
923 media_id.label.label_text,
924 media_id.label.uuid,
925 media_set_uuid
926 );
927 }
928 let encrypt_fingerprint = set
929 .encryption_key_fingerprint
930 .clone()
931 .map(|fp| (fp, set.uuid.clone()));
932
933 drive.set_encryption(encrypt_fingerprint)?;
934 }
935 }
936
937 restore_media(
938 worker,
939 &mut drive,
940 &info,
941 Some((store_map, restore_owner)),
942 checked_chunks_map,
943 false,
944 )
945 }
946
947 /// Restore complete media content and catalog
948 ///
949 /// Only create the catalog if target is None.
950 pub fn restore_media(
951 worker: Arc<WorkerTask>,
952 drive: &mut Box<dyn TapeDriver>,
953 media_id: &MediaId,
954 target: Option<(&DataStoreMap, &Authid)>,
955 checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>,
956 verbose: bool,
957 ) -> Result<(), Error> {
958 let status_path = Path::new(TAPE_STATUS_DIR);
959 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
960
961 loop {
962 let current_file_number = drive.current_file_number()?;
963 let reader = match drive.read_next_file() {
964 Err(BlockReadError::EndOfFile) => {
965 task_log!(
966 worker,
967 "skip unexpected filemark at pos {}",
968 current_file_number
969 );
970 continue;
971 }
972 Err(BlockReadError::EndOfStream) => {
973 task_log!(worker, "detected EOT after {} files", current_file_number);
974 break;
975 }
976 Err(BlockReadError::Error(err)) => {
977 return Err(err.into());
978 }
979 Ok(reader) => reader,
980 };
981
982 restore_archive(
983 worker.clone(),
984 reader,
985 current_file_number,
986 target,
987 &mut catalog,
988 checked_chunks_map,
989 verbose,
990 )?;
991 }
992
993 catalog.commit()?;
994
995 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
996
997 Ok(())
998 }
999
1000 fn restore_archive<'a>(
1001 worker: Arc<WorkerTask>,
1002 mut reader: Box<dyn 'a + TapeRead>,
1003 current_file_number: u64,
1004 target: Option<(&DataStoreMap, &Authid)>,
1005 catalog: &mut MediaCatalog,
1006 checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>,
1007 verbose: bool,
1008 ) -> Result<(), Error> {
1009 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
1010 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
1011 bail!("missing MediaContentHeader");
1012 }
1013
1014 //println!("Found MediaContentHeader: {:?}", header);
1015
1016 match header.content_magic {
1017 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
1018 bail!("unexpected content magic (label)");
1019 }
1020 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
1021 bail!("unexpected snapshot archive version (v1.0)");
1022 }
1023 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
1024 let header_data = reader.read_exact_allocated(header.size as usize)?;
1025
1026 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
1027 .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
1028
1029 let datastore_name = archive_header.store;
1030 let snapshot = archive_header.snapshot;
1031
1032 task_log!(
1033 worker,
1034 "File {}: snapshot archive {}:{}",
1035 current_file_number,
1036 datastore_name,
1037 snapshot
1038 );
1039
1040 let backup_dir: BackupDir = snapshot.parse()?;
1041
1042 if let Some((store_map, authid)) = target.as_ref() {
1043 if let Some(datastore) = store_map.get_datastore(&datastore_name) {
1044 let (owner, _group_lock) =
1045 datastore.create_locked_backup_group(backup_dir.group(), authid)?;
1046 if *authid != &owner {
1047 // only the owner is allowed to create additional snapshots
1048 bail!(
1049 "restore '{}' failed - owner check failed ({} != {})",
1050 snapshot,
1051 authid,
1052 owner
1053 );
1054 }
1055
1056 let (rel_path, is_new, _snap_lock) =
1057 datastore.create_locked_backup_dir(&backup_dir)?;
1058 let mut path = datastore.base_path();
1059 path.push(rel_path);
1060
1061 if is_new {
1062 task_log!(worker, "restore snapshot {}", backup_dir);
1063
1064 match restore_snapshot_archive(worker.clone(), reader, &path) {
1065 Err(err) => {
1066 std::fs::remove_dir_all(&path)?;
1067 bail!("restore snapshot {} failed - {}", backup_dir, err);
1068 }
1069 Ok(false) => {
1070 std::fs::remove_dir_all(&path)?;
1071 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
1072 }
1073 Ok(true) => {
1074 catalog.register_snapshot(
1075 Uuid::from(header.uuid),
1076 current_file_number,
1077 &datastore_name,
1078 &snapshot,
1079 )?;
1080 catalog.commit_if_large()?;
1081 }
1082 }
1083 return Ok(());
1084 }
1085 } else {
1086 task_log!(worker, "skipping...");
1087 }
1088 }
1089
1090 reader.skip_data()?; // read all data
1091 if let Ok(false) = reader.is_incomplete() {
1092 catalog.register_snapshot(
1093 Uuid::from(header.uuid),
1094 current_file_number,
1095 &datastore_name,
1096 &snapshot,
1097 )?;
1098 catalog.commit_if_large()?;
1099 }
1100 }
1101 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
1102 bail!("unexpected chunk archive version (v1.0)");
1103 }
1104 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
1105 let header_data = reader.read_exact_allocated(header.size as usize)?;
1106
1107 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
1108 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
1109
1110 let source_datastore = archive_header.store;
1111
1112 task_log!(
1113 worker,
1114 "File {}: chunk archive for datastore '{}'",
1115 current_file_number,
1116 source_datastore
1117 );
1118 let datastore = target
1119 .as_ref()
1120 .and_then(|t| t.0.get_datastore(&source_datastore));
1121
1122 if datastore.is_some() || target.is_none() {
1123 let checked_chunks = checked_chunks_map
1124 .entry(
1125 datastore
1126 .as_ref()
1127 .map(|d| d.name())
1128 .unwrap_or("_unused_")
1129 .to_string(),
1130 )
1131 .or_insert(HashSet::new());
1132
1133 let chunks = if let Some(datastore) = datastore {
1134 restore_chunk_archive(
1135 worker.clone(),
1136 reader,
1137 datastore,
1138 checked_chunks,
1139 verbose,
1140 )?
1141 } else {
1142 scan_chunk_archive(worker.clone(), reader, verbose)?
1143 };
1144
1145 if let Some(chunks) = chunks {
1146 catalog.register_chunk_archive(
1147 Uuid::from(header.uuid),
1148 current_file_number,
1149 &source_datastore,
1150 &chunks[..],
1151 )?;
1152 task_log!(worker, "register {} chunks", chunks.len());
1153 catalog.commit_if_large()?;
1154 }
1155 return Ok(());
1156 } else if target.is_some() {
1157 task_log!(worker, "skipping...");
1158 }
1159
1160 reader.skip_data()?; // read all data
1161 }
1162 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
1163 let header_data = reader.read_exact_allocated(header.size as usize)?;
1164
1165 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
1166 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
1167
1168 task_log!(
1169 worker,
1170 "File {}: skip catalog '{}'",
1171 current_file_number,
1172 archive_header.uuid
1173 );
1174
1175 reader.skip_data()?; // read all data
1176 }
1177 _ => bail!("unknown content magic {:?}", header.content_magic),
1178 }
1179
1180 Ok(())
1181 }
1182
1183 // Read chunk archive without restoring data - just record contained chunks
1184 fn scan_chunk_archive<'a>(
1185 worker: Arc<WorkerTask>,
1186 reader: Box<dyn 'a + TapeRead>,
1187 verbose: bool,
1188 ) -> Result<Option<Vec<[u8; 32]>>, Error> {
1189 let mut chunks = Vec::new();
1190
1191 let mut decoder = ChunkArchiveDecoder::new(reader);
1192
1193 loop {
1194 let digest = match decoder.next_chunk() {
1195 Ok(Some((digest, _blob))) => digest,
1196 Ok(None) => break,
1197 Err(err) => {
1198 let reader = decoder.reader();
1199
1200 // check if this stream is marked incomplete
1201 if let Ok(true) = reader.is_incomplete() {
1202 return Ok(Some(chunks));
1203 }
1204
1205 // check if this is an aborted stream without end marker
1206 if let Ok(false) = reader.has_end_marker() {
1207 task_log!(worker, "missing stream end marker");
1208 return Ok(None);
1209 }
1210
1211 // else the archive is corrupt
1212 return Err(err);
1213 }
1214 };
1215
1216 worker.check_abort()?;
1217
1218 if verbose {
1219 task_log!(worker, "Found chunk: {}", hex::encode(&digest));
1220 }
1221
1222 chunks.push(digest);
1223 }
1224
1225 Ok(Some(chunks))
1226 }
1227
1228 fn restore_chunk_archive<'a>(
1229 worker: Arc<WorkerTask>,
1230 reader: Box<dyn 'a + TapeRead>,
1231 datastore: Arc<DataStore>,
1232 checked_chunks: &mut HashSet<[u8; 32]>,
1233 verbose: bool,
1234 ) -> Result<Option<Vec<[u8; 32]>>, Error> {
1235 let mut chunks = Vec::new();
1236
1237 let mut decoder = ChunkArchiveDecoder::new(reader);
1238
1239 let start_time = std::time::SystemTime::now();
1240 let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0));
1241 let bytes2 = bytes.clone();
1242
1243 let worker2 = worker.clone();
1244
1245 let writer_pool = ParallelHandler::new(
1246 "tape restore chunk writer",
1247 4,
1248 move |(chunk, digest): (DataBlob, [u8; 32])| {
1249 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
1250 if !chunk_exists {
1251 if verbose {
1252 task_log!(worker2, "Insert chunk: {}", hex::encode(&digest));
1253 }
1254 bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
1255 // println!("verify and write {}", hex::encode(&digest));
1256 chunk.verify_crc()?;
1257 if chunk.crypt_mode()? == CryptMode::None {
1258 chunk.decode(None, Some(&digest))?; // verify digest
1259 }
1260
1261 datastore.insert_chunk(&chunk, &digest)?;
1262 } else if verbose {
1263 task_log!(worker2, "Found existing chunk: {}", hex::encode(&digest));
1264 }
1265 Ok(())
1266 },
1267 );
1268
1269 let verify_and_write_channel = writer_pool.channel();
1270
1271 loop {
1272 let (digest, blob) = match decoder.next_chunk() {
1273 Ok(Some((digest, blob))) => (digest, blob),
1274 Ok(None) => break,
1275 Err(err) => {
1276 let reader = decoder.reader();
1277
1278 // check if this stream is marked incomplete
1279 if let Ok(true) = reader.is_incomplete() {
1280 return Ok(Some(chunks));
1281 }
1282
1283 // check if this is an aborted stream without end marker
1284 if let Ok(false) = reader.has_end_marker() {
1285 task_log!(worker, "missing stream end marker");
1286 return Ok(None);
1287 }
1288
1289 // else the archive is corrupt
1290 return Err(err);
1291 }
1292 };
1293
1294 worker.check_abort()?;
1295
1296 if !checked_chunks.contains(&digest) {
1297 verify_and_write_channel.send((blob, digest.clone()))?;
1298 checked_chunks.insert(digest.clone());
1299 }
1300 chunks.push(digest);
1301 }
1302
1303 drop(verify_and_write_channel);
1304
1305 writer_pool.complete()?;
1306
1307 let elapsed = start_time.elapsed()?.as_secs_f64();
1308
1309 let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst);
1310
1311 task_log!(
1312 worker,
1313 "restored {} bytes ({:.2} MB/s)",
1314 bytes,
1315 (bytes as f64) / (1_000_000.0 * elapsed)
1316 );
1317
1318 Ok(Some(chunks))
1319 }
1320
1321 fn restore_snapshot_archive<'a>(
1322 worker: Arc<WorkerTask>,
1323 reader: Box<dyn 'a + TapeRead>,
1324 snapshot_path: &Path,
1325 ) -> Result<bool, Error> {
1326 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
1327 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
1328 Ok(_) => Ok(true),
1329 Err(err) => {
1330 let reader = decoder.input();
1331
1332 // check if this stream is marked incomplete
1333 if let Ok(true) = reader.is_incomplete() {
1334 return Ok(false);
1335 }
1336
1337 // check if this is an aborted stream without end marker
1338 if let Ok(false) = reader.has_end_marker() {
1339 return Ok(false);
1340 }
1341
1342 // else the archive is corrupt
1343 Err(err)
1344 }
1345 }
1346 }
1347
1348 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
1349 worker: Arc<WorkerTask>,
1350 decoder: &mut pxar::decoder::sync::Decoder<R>,
1351 snapshot_path: &Path,
1352 ) -> Result<BackupManifest, Error> {
1353 let _root = match decoder.next() {
1354 None => bail!("missing root entry"),
1355 Some(root) => {
1356 let root = root?;
1357 match root.kind() {
1358 pxar::EntryKind::Directory => { /* Ok */ }
1359 _ => bail!("wrong root entry type"),
1360 }
1361 root
1362 }
1363 };
1364
1365 let root_path = Path::new("/");
1366 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
1367
1368 let mut manifest = None;
1369
1370 loop {
1371 worker.check_abort()?;
1372
1373 let entry = match decoder.next() {
1374 None => break,
1375 Some(entry) => entry?,
1376 };
1377 let entry_path = entry.path();
1378
1379 match entry.kind() {
1380 pxar::EntryKind::File { .. } => { /* Ok */ }
1381 _ => bail!("wrong entry type for {:?}", entry_path),
1382 }
1383 match entry_path.parent() {
1384 None => bail!("wrong parent for {:?}", entry_path),
1385 Some(p) => {
1386 if p != root_path {
1387 bail!("wrong parent for {:?}", entry_path);
1388 }
1389 }
1390 }
1391
1392 let filename = entry.file_name();
1393 let mut contents = match decoder.contents() {
1394 None => bail!("missing file content"),
1395 Some(contents) => contents,
1396 };
1397
1398 let mut archive_path = snapshot_path.to_owned();
1399 archive_path.push(&filename);
1400
1401 let mut tmp_path = archive_path.clone();
1402 tmp_path.set_extension("tmp");
1403
1404 if filename == manifest_file_name {
1405 let blob = DataBlob::load_from_reader(&mut contents)?;
1406 let mut old_manifest = BackupManifest::try_from(blob)?;
1407
1408 // Remove verify_state to indicate that this snapshot is not verified
1409 old_manifest
1410 .unprotected
1411 .as_object_mut()
1412 .map(|m| m.remove("verify_state"));
1413
1414 let old_manifest = serde_json::to_string_pretty(&old_manifest)?;
1415 let blob = DataBlob::encode(old_manifest.as_bytes(), None, true)?;
1416
1417 let options = CreateOptions::new();
1418 replace_file(&tmp_path, blob.raw_data(), options, false)?;
1419
1420 manifest = Some(BackupManifest::try_from(blob)?);
1421 } else {
1422 let mut tmpfile = std::fs::OpenOptions::new()
1423 .write(true)
1424 .create(true)
1425 .read(true)
1426 .open(&tmp_path)
1427 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
1428
1429 std::io::copy(&mut contents, &mut tmpfile)?;
1430
1431 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
1432 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
1433 }
1434 }
1435 }
1436
1437 let manifest = match manifest {
1438 None => bail!("missing manifest"),
1439 Some(manifest) => manifest,
1440 };
1441
1442 // Do not verify anything here, because this would be to slow (causes tape stops).
1443
1444 // commit manifest
1445 let mut manifest_path = snapshot_path.to_owned();
1446 manifest_path.push(MANIFEST_BLOB_NAME);
1447 let mut tmp_manifest_path = manifest_path.clone();
1448 tmp_manifest_path.set_extension("tmp");
1449
1450 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
1451 bail!(
1452 "Atomic rename manifest {:?} failed - {}",
1453 manifest_path,
1454 err
1455 );
1456 }
1457
1458 Ok(manifest)
1459 }
1460
1461 /// Try to restore media catalogs (form catalog_archives)
1462 pub fn fast_catalog_restore(
1463 worker: &WorkerTask,
1464 drive: &mut Box<dyn TapeDriver>,
1465 media_set: &MediaSet,
1466 uuid: &Uuid, // current media Uuid
1467 ) -> Result<bool, Error> {
1468 let status_path = Path::new(TAPE_STATUS_DIR);
1469
1470 let current_file_number = drive.current_file_number()?;
1471 if current_file_number != 2 {
1472 bail!("fast_catalog_restore: wrong media position - internal error");
1473 }
1474
1475 let mut found_catalog = false;
1476
1477 let mut moved_to_eom = false;
1478
1479 loop {
1480 let current_file_number = drive.current_file_number()?;
1481
1482 {
1483 // limit reader scope
1484 let mut reader = match drive.read_next_file() {
1485 Err(BlockReadError::EndOfFile) => {
1486 task_log!(
1487 worker,
1488 "skip unexpected filemark at pos {}",
1489 current_file_number
1490 );
1491 continue;
1492 }
1493 Err(BlockReadError::EndOfStream) => {
1494 task_log!(worker, "detected EOT after {} files", current_file_number);
1495 break;
1496 }
1497 Err(BlockReadError::Error(err)) => {
1498 return Err(err.into());
1499 }
1500 Ok(reader) => reader,
1501 };
1502
1503 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
1504 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
1505 bail!("missing MediaContentHeader");
1506 }
1507
1508 if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
1509 task_log!(worker, "found catalog at pos {}", current_file_number);
1510
1511 let header_data = reader.read_exact_allocated(header.size as usize)?;
1512
1513 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
1514 .map_err(|err| {
1515 format_err!("unable to parse catalog archive header - {}", err)
1516 })?;
1517
1518 if &archive_header.media_set_uuid != media_set.uuid() {
1519 task_log!(
1520 worker,
1521 "skipping unrelated catalog at pos {}",
1522 current_file_number
1523 );
1524 reader.skip_data()?; // read all data
1525 continue;
1526 }
1527
1528 let catalog_uuid = &archive_header.uuid;
1529
1530 let wanted = media_set
1531 .media_list()
1532 .iter()
1533 .find(|e| match e {
1534 None => false,
1535 Some(uuid) => uuid == catalog_uuid,
1536 })
1537 .is_some();
1538
1539 if !wanted {
1540 task_log!(
1541 worker,
1542 "skip catalog because media '{}' not inventarized",
1543 catalog_uuid
1544 );
1545 reader.skip_data()?; // read all data
1546 continue;
1547 }
1548
1549 if catalog_uuid == uuid {
1550 // always restore and overwrite catalog
1551 } else {
1552 // only restore if catalog does not exist
1553 if MediaCatalog::exists(status_path, catalog_uuid) {
1554 task_log!(
1555 worker,
1556 "catalog for media '{}' already exists",
1557 catalog_uuid
1558 );
1559 reader.skip_data()?; // read all data
1560 continue;
1561 }
1562 }
1563
1564 let mut file =
1565 MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?;
1566
1567 std::io::copy(&mut reader, &mut file)?;
1568
1569 file.seek(SeekFrom::Start(0))?;
1570
1571 match MediaCatalog::parse_catalog_header(&mut file)? {
1572 (true, Some(media_uuid), Some(media_set_uuid)) => {
1573 if &media_uuid != catalog_uuid {
1574 task_log!(
1575 worker,
1576 "catalog uuid missmatch at pos {}",
1577 current_file_number
1578 );
1579 continue;
1580 }
1581 if media_set_uuid != archive_header.media_set_uuid {
1582 task_log!(
1583 worker,
1584 "catalog media_set missmatch at pos {}",
1585 current_file_number
1586 );
1587 continue;
1588 }
1589
1590 MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?;
1591
1592 if catalog_uuid == uuid {
1593 task_log!(worker, "successfully restored catalog");
1594 found_catalog = true
1595 } else {
1596 task_log!(
1597 worker,
1598 "successfully restored related catalog {}",
1599 media_uuid
1600 );
1601 }
1602 }
1603 _ => {
1604 task_warn!(worker, "got incomplete catalog header - skip file");
1605 continue;
1606 }
1607 }
1608
1609 continue;
1610 }
1611 }
1612
1613 if moved_to_eom {
1614 break; // already done - stop
1615 }
1616 moved_to_eom = true;
1617
1618 task_log!(worker, "searching for catalog at EOT (moving to EOT)");
1619 drive.move_to_last_file()?;
1620
1621 let new_file_number = drive.current_file_number()?;
1622
1623 if new_file_number < (current_file_number + 1) {
1624 break; // no new content - stop
1625 }
1626 }
1627
1628 Ok(found_catalog)
1629 }