]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/tape/restore.rs
api-types: client: datastore: tools: use proxmox-human-bytes crate
[proxmox-backup.git] / src / api2 / tape / restore.rs
CommitLineData
085ae873 1use std::collections::{BTreeMap, HashMap, HashSet};
085ae873 2use std::ffi::OsStr;
074503f2 3use std::io::{Seek, SeekFrom};
085ae873 4use std::path::{Path, PathBuf};
4c4e5c2b 5use std::sync::Arc;
b017bbc4
DM
6
7use anyhow::{bail, format_err, Error};
b9b4b312 8use serde_json::Value;
b017bbc4 9
08f8a3e5 10use proxmox_human_byte::HumanByte;
6ef1b649
WB
11use proxmox_io::ReadExt;
12use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
be97e0a5 13use proxmox_schema::{api, ApiType};
6ef1b649 14use proxmox_section_config::SectionConfigData;
085ae873 15use proxmox_sys::fs::{replace_file, CreateOptions};
25877d05 16use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
085ae873 17use proxmox_uuid::Uuid;
b017bbc4 18
8cc3760e 19use pbs_api_types::{
07ffb864 20 parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode,
08f8a3e5
LW
21 Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
22 DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
23 PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
8cc3760e 24};
085ae873 25use pbs_config::CachedUserInfo;
b2065dc7
WB
26use pbs_datastore::dynamic_index::DynamicIndexReader;
27use pbs_datastore::fixed_index::FixedIndexReader;
28use pbs_datastore::index::IndexFile;
29use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
085ae873 30use pbs_datastore::{DataBlob, DataStore};
048b43af 31use pbs_tape::{
085ae873 32 BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
048b43af 33};
b9700a9f 34use proxmox_rest_server::WorkerTask;
c23192d3 35
ea2e91e5 36use crate::backup::check_ns_modification_privs;
b017bbc4 37use crate::{
b9700a9f 38 server::lookup_user_email,
b017bbc4 39 tape::{
085ae873 40 drive::{lock_tape_device, request_and_load_media, set_tape_device_state, TapeDriver},
b017bbc4 41 file_formats::{
085ae873 42 CatalogArchiveHeader, ChunkArchiveDecoder, ChunkArchiveHeader, SnapshotArchiveHeader,
707c48ad
FG
43 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
44 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
45 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
46 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
47 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2,
25aa55b5 48 },
085ae873
TL
49 lock_media_set, Inventory, MediaCatalog, MediaId, MediaSet, MediaSetCatalog,
50 TAPE_STATUS_DIR,
b017bbc4 51 },
085ae873 52 tools::parallel_handler::ParallelHandler,
b017bbc4
DM
53};
54
be97e0a5
DC
55struct NamespaceMap {
56 map: HashMap<String, HashMap<BackupNamespace, (BackupNamespace, usize)>>,
57}
58
59impl TryFrom<Vec<String>> for NamespaceMap {
60 type Error = Error;
61
62 fn try_from(mappings: Vec<String>) -> Result<Self, Error> {
63 let mut map = HashMap::new();
64
65 let mappings = mappings.into_iter().map(|s| {
66 let value = TapeRestoreNamespace::API_SCHEMA.parse_property_string(&s)?;
67 let value: TapeRestoreNamespace = serde_json::from_value(value)?;
68 Ok::<_, Error>(value)
69 });
70
71 for mapping in mappings {
72 let mapping = mapping?;
73 let source = mapping.source.unwrap_or_default();
74 let target = mapping.target.unwrap_or_default();
75 let max_depth = mapping.max_depth.unwrap_or(MAX_NAMESPACE_DEPTH);
76
77 let ns_map: &mut HashMap<BackupNamespace, (BackupNamespace, usize)> =
78 map.entry(mapping.store).or_insert_with(HashMap::new);
79
80 if ns_map.insert(source, (target, max_depth)).is_some() {
81 bail!("duplicate mapping found");
82 }
83 }
84
85 Ok(Self { map })
86 }
87}
88
89impl NamespaceMap {
3be9106f 90 fn used_namespaces(&self, datastore: &str) -> HashSet<BackupNamespace> {
be97e0a5
DC
91 let mut set = HashSet::new();
92 if let Some(mapping) = self.map.get(datastore) {
93 for (ns, _) in mapping.values() {
94 set.insert(ns.clone());
95 }
96 }
97
98 set
99 }
100
101 fn get_namespaces(&self, source_ds: &str, source_ns: &BackupNamespace) -> Vec<BackupNamespace> {
102 if let Some(mapping) = self.map.get(source_ds) {
103 return mapping
104 .iter()
105 .filter_map(|(ns, (target_ns, max_depth))| {
106 // filter out prefixes which are too long
107 if ns.depth() > source_ns.depth() || source_ns.depth() - ns.depth() > *max_depth
108 {
109 return None;
110 }
111 source_ns.map_prefix(ns, target_ns).ok()
112 })
113 .collect();
114 }
115
116 vec![]
117 }
118}
119
2ad96e16
TL
120pub struct DataStoreMap {
121 map: HashMap<String, Arc<DataStore>>,
122 default: Option<Arc<DataStore>>,
123 ns_map: Option<NamespaceMap>,
124}
125
4c4e5c2b
DC
126impl TryFrom<String> for DataStoreMap {
127 type Error = Error;
128
129 fn try_from(value: String) -> Result<Self, Error> {
9fa3026a 130 let value = DATASTORE_MAP_ARRAY_SCHEMA.parse_property_string(&value)?;
4c4e5c2b
DC
131 let mut mapping: Vec<String> = value
132 .as_array()
133 .unwrap()
134 .iter()
135 .map(|v| v.as_str().unwrap().to_string())
136 .collect();
137
138 let mut map = HashMap::new();
139 let mut default = None;
140 while let Some(mut store) = mapping.pop() {
141 if let Some(index) = store.find('=') {
142 let mut target = store.split_off(index);
143 target.remove(0); // remove '='
e9d2fc93 144 let datastore = DataStore::lookup_datastore(&target, Some(Operation::Write))?;
4c4e5c2b
DC
145 map.insert(store, datastore);
146 } else if default.is_none() {
e9d2fc93 147 default = Some(DataStore::lookup_datastore(&store, Some(Operation::Write))?);
4c4e5c2b
DC
148 } else {
149 bail!("multiple default stores given");
150 }
151 }
152
6b61d319
DC
153 Ok(Self {
154 map,
155 default,
156 ns_map: None,
157 })
4c4e5c2b
DC
158 }
159}
160
161impl DataStoreMap {
6b61d319
DC
162 fn add_namespaces_maps(&mut self, mappings: Vec<String>) -> Result<bool, Error> {
163 let count = mappings.len();
164 let ns_map = NamespaceMap::try_from(mappings)?;
165 self.ns_map = Some(ns_map);
166 Ok(count > 0)
167 }
168
169 fn used_datastores(&self) -> HashMap<&str, (Arc<DataStore>, Option<HashSet<BackupNamespace>>)> {
c94d2867
DC
170 let mut map = HashMap::new();
171 for (source, target) in self.map.iter() {
6b61d319
DC
172 let ns = self.ns_map.as_ref().map(|map| map.used_namespaces(source));
173 map.insert(source.as_str(), (Arc::clone(target), ns));
4c4e5c2b
DC
174 }
175
176 if let Some(ref store) = self.default {
6b61d319 177 map.insert("", (Arc::clone(store), None));
4c4e5c2b
DC
178 }
179
c94d2867 180 map
4c4e5c2b
DC
181 }
182
2ad96e16
TL
183 fn target_ns(&self, datastore: &str, ns: &BackupNamespace) -> Option<Vec<BackupNamespace>> {
184 self.ns_map
185 .as_ref()
186 .map(|mapping| mapping.get_namespaces(datastore, ns))
187 }
188
189 fn target_store(&self, source_datastore: &str) -> Option<Arc<DataStore>> {
190 self.map
191 .get(source_datastore)
3be9106f
FG
192 .or(self.default.as_ref())
193 .map(Arc::clone)
2ad96e16
TL
194 }
195
6b61d319
DC
196 fn get_targets(
197 &self,
2ad96e16 198 source_datastore: &str,
6b61d319
DC
199 source_ns: &BackupNamespace,
200 ) -> Option<(Arc<DataStore>, Option<Vec<BackupNamespace>>)> {
2ad96e16
TL
201 self.target_store(source_datastore)
202 .map(|store| (store, self.target_ns(source_datastore, source_ns)))
4c4e5c2b 203 }
72b550a8
TL
204
205 /// Returns true if there's both a datastore and namespace mapping from a source datastore/ns
206 fn has_full_mapping(&self, datastore: &str, ns: &BackupNamespace) -> bool {
207 self.target_store(datastore).is_some() && self.target_ns(datastore, ns).is_some()
208 }
4c4e5c2b
DC
209}
210
c4a04b7c
DC
211fn check_datastore_privs(
212 user_info: &CachedUserInfo,
abd82485
FG
213 store: &str,
214 ns: &BackupNamespace,
c4a04b7c 215 auth_id: &Authid,
fc99c279 216 owner: Option<&Authid>,
c4a04b7c 217) -> Result<(), Error> {
abd82485
FG
218 let acl_path = ns.acl_path(store);
219 let privs = user_info.lookup_privs(auth_id, &acl_path);
c4a04b7c 220 if (privs & PRIV_DATASTORE_BACKUP) == 0 {
abd82485 221 bail!("no permissions on /{}", acl_path.join("/"));
c4a04b7c
DC
222 }
223
224 if let Some(ref owner) = owner {
fc99c279 225 let correct_owner = *owner == auth_id
c4a04b7c
DC
226 || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user());
227
228 // same permission as changing ownership after syncing
229 if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
230 bail!("no permission to restore as '{}'", owner);
231 }
232 }
233
234 Ok(())
235}
236
fc99c279
DC
237fn check_and_create_namespaces(
238 user_info: &CachedUserInfo,
239 store: &Arc<DataStore>,
240 ns: &BackupNamespace,
241 auth_id: &Authid,
242 owner: Option<&Authid>,
243) -> Result<(), Error> {
244 // check normal restore privs first
abd82485 245 check_datastore_privs(user_info, store.name(), ns, auth_id, owner)?;
fc99c279
DC
246
247 // try create recursively if it does not exist
248 if !store.namespace_exists(ns) {
abd82485 249 let mut tmp_ns = BackupNamespace::root();
fc99c279
DC
250
251 for comp in ns.components() {
abd82485
FG
252 tmp_ns.push(comp.to_string())?;
253 if !store.namespace_exists(&tmp_ns) {
254 check_ns_modification_privs(store.name(), &tmp_ns, auth_id).map_err(|_err| {
255 format_err!("no permission to create namespace '{}'", tmp_ns)
ea2e91e5
FG
256 })?;
257
abd82485 258 store.create_namespace(&tmp_ns.parent(), comp.to_string())?;
fc99c279
DC
259 }
260 }
261 }
262 Ok(())
263}
264
4c4e5c2b 265pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
b9b4b312 266
b9b4b312
DM
267#[api(
268 input: {
269 properties: {
270 store: {
4c4e5c2b 271 schema: DATASTORE_MAP_LIST_SCHEMA,
b9b4b312 272 },
07ffb864
DC
273 "namespaces": {
274 description: "List of namespace to restore.",
275 type: Array,
276 optional: true,
277 items: {
278 schema: TAPE_RESTORE_NAMESPACE_SCHEMA,
279 },
280 },
9883b54c
DM
281 drive: {
282 schema: DRIVE_NAME_SCHEMA,
283 },
b9b4b312
DM
284 "media-set": {
285 description: "Media set UUID.",
286 type: String,
287 },
c9793d47
DC
288 "notify-user": {
289 type: Userid,
290 optional: true,
291 },
ff997803
DC
292 "snapshots": {
293 description: "List of snapshots.",
294 type: Array,
295 optional: true,
296 items: {
297 schema: TAPE_RESTORE_SNAPSHOT_SCHEMA,
298 },
299 },
e3613503
DC
300 owner: {
301 type: Authid,
302 optional: true,
303 },
b9b4b312
DM
304 },
305 },
306 returns: {
307 schema: UPID_SCHEMA,
308 },
b4975d31
DM
309 access: {
310 // Note: parameters are no uri parameter, so we need to test inside function body
a40ffb92
TL
311 description: "The user needs Tape.Read privilege on /tape/pool/{pool} and \
312 /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}/[{namespace}], \
313 Datastore.Modify privileges to create namespaces (if they don't exist).",
b4975d31
DM
314 permission: &Permission::Anybody,
315 },
b9b4b312 316)]
fc99c279 317/// Restore data from media-set. Namespaces will be automatically created if necessary.
e1db0670 318#[allow(clippy::too_many_arguments)]
b9b4b312
DM
319pub fn restore(
320 store: String,
9883b54c 321 drive: String,
07ffb864 322 namespaces: Option<Vec<String>>,
b9b4b312 323 media_set: String,
c9793d47 324 notify_user: Option<Userid>,
ff997803 325 snapshots: Option<Vec<String>>,
e3613503 326 owner: Option<Authid>,
b9b4b312
DM
327 rpcenv: &mut dyn RpcEnvironment,
328) -> Result<Value, Error> {
b9b4b312 329 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
b4975d31
DM
330 let user_info = CachedUserInfo::new()?;
331
07ffb864 332 let mut store_map = DataStoreMap::try_from(store)
e2aeff40 333 .map_err(|err| format_err!("cannot parse store mapping: {err}"))?;
07ffb864
DC
334 let namespaces = if let Some(maps) = namespaces {
335 store_map
336 .add_namespaces_maps(maps)
e2aeff40 337 .map_err(|err| format_err!("cannot parse namespace mapping: {err}"))?
07ffb864
DC
338 } else {
339 false
340 };
341
4c4e5c2b 342 let used_datastores = store_map.used_datastores();
3afecb84 343 if used_datastores.is_empty() {
4c4e5c2b 344 bail!("no datastores given");
b4975d31
DM
345 }
346
07ffb864 347 for (target, namespaces) in used_datastores.values() {
fc99c279
DC
348 check_datastore_privs(
349 &user_info,
abd82485
FG
350 target.name(),
351 &BackupNamespace::root(),
fc99c279
DC
352 &auth_id,
353 owner.as_ref(),
354 )?;
fc99c279
DC
355 if let Some(namespaces) = namespaces {
356 for ns in namespaces {
357 check_and_create_namespaces(&user_info, target, ns, &auth_id, owner.as_ref())?;
358 }
359 }
e3613503 360 }
3e4994a5 361 user_info.check_privs(&auth_id, &["tape", "drive", &drive], PRIV_TAPE_READ, false)?;
b9b4b312 362
30316192 363 let media_set_uuid = media_set.parse()?;
b9b4b312 364
3921deb2 365 let _lock = lock_media_set(TAPE_STATUS_DIR, &media_set_uuid, None)?;
30316192 366
3921deb2 367 let inventory = Inventory::load(TAPE_STATUS_DIR)?;
b9b4b312
DM
368
369 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
3e4994a5 370 user_info.check_privs(&auth_id, &["tape", "pool", &pool], PRIV_TAPE_READ, false)?;
b4975d31 371
1ce8e905 372 let (drive_config, _digest) = pbs_config::drive::config()?;
25aa55b5
DM
373
374 // early check/lock before starting worker
375 let drive_lock = lock_tape_device(&drive_config, &drive)?;
b9b4b312 376
39735609 377 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
b9b4b312 378
4c4e5c2b 379 let taskid = used_datastores
07ffb864
DC
380 .values()
381 .map(|(t, _)| t.name().to_string())
4c4e5c2b
DC
382 .collect::<Vec<String>>()
383 .join(", ");
ff997803 384
b9b4b312
DM
385 let upid_str = WorkerTask::new_thread(
386 "tape-restore",
4c4e5c2b 387 Some(taskid),
049a22a3 388 auth_id.to_string(),
b9b4b312
DM
389 to_stdout,
390 move |worker| {
25aa55b5 391 let _drive_lock = drive_lock; // keep lock guard
b9b4b312 392
926d05ef
DC
393 set_tape_device_state(&drive, &worker.upid().to_string())?;
394
fa950702 395 let restore_owner = owner.as_ref().unwrap_or(&auth_id);
b9b4b312 396
fa950702
DC
397 let email = notify_user
398 .as_ref()
3be9106f 399 .and_then(lookup_user_email)
fa950702 400 .or_else(|| lookup_user_email(&auth_id.clone().into()));
b9b4b312 401
e2aeff40
TL
402 task_log!(worker, "Mediaset '{media_set}'");
403 task_log!(worker, "Pool: {pool}");
ff997803 404
07ffb864 405 let res = if snapshots.is_some() || namespaces {
ff997803
DC
406 restore_list_worker(
407 worker.clone(),
3be9106f 408 snapshots.unwrap_or_default(),
ff997803
DC
409 inventory,
410 media_set_uuid,
411 drive_config,
412 &drive,
413 store_map,
414 restore_owner,
415 email,
fc99c279
DC
416 user_info,
417 &auth_id,
ff997803
DC
418 )
419 } else {
420 restore_full_worker(
421 worker.clone(),
422 inventory,
423 media_set_uuid,
424 drive_config,
425 &drive,
426 store_map,
427 restore_owner,
428 email,
fc99c279 429 &auth_id,
ff997803
DC
430 )
431 };
ff997803 432 if res.is_ok() {
e2aeff40 433 task_log!(worker, "Restore mediaset '{media_set}' done");
ff997803 434 }
926d05ef 435 if let Err(err) = set_tape_device_state(&drive, "") {
e2aeff40 436 task_log!(worker, "could not unset drive state for {drive}: {err}");
926d05ef
DC
437 }
438
fa950702 439 res
085ae873 440 },
b9b4b312
DM
441 )?;
442
443 Ok(upid_str.into())
444}
445
968d3b4f 446#[allow(clippy::too_many_arguments)]
ff997803 447fn restore_full_worker(
fa950702
DC
448 worker: Arc<WorkerTask>,
449 inventory: Inventory,
450 media_set_uuid: Uuid,
451 drive_config: SectionConfigData,
452 drive_name: &str,
453 store_map: DataStoreMap,
454 restore_owner: &Authid,
455 email: Option<String>,
fc99c279 456 auth_id: &Authid,
fa950702
DC
457) -> Result<(), Error> {
458 let members = inventory.compute_media_set_members(&media_set_uuid)?;
459
460 let media_list = members.media_list();
461
462 let mut media_id_list = Vec::new();
463
464 let mut encryption_key_fingerprint = None;
465
466 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
467 match media_uuid {
468 None => {
e2aeff40 469 bail!("media set {media_set_uuid} is incomplete (missing member {seq_nr}).");
fa950702
DC
470 }
471 Some(media_uuid) => {
472 let media_id = inventory.lookup_media(media_uuid).unwrap();
085ae873
TL
473 if let Some(ref set) = media_id.media_set_label {
474 // always true here
475 if encryption_key_fingerprint.is_none()
476 && set.encryption_key_fingerprint.is_some()
477 {
fa950702
DC
478 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
479 }
480 }
481 media_id_list.push(media_id);
482 }
483 }
484 }
485
486 if let Some(fingerprint) = encryption_key_fingerprint {
e2aeff40 487 task_log!(worker, "Encryption key fingerprint: {fingerprint}");
fa950702
DC
488 }
489
07ffb864 490 let used_datastores = store_map.used_datastores();
20a04cf0
TL
491 let datastore_list = used_datastores
492 .values()
493 .map(|(t, _)| String::from(t.name()))
494 .collect::<Vec<String>>()
495 .join(", ");
496 task_log!(worker, "Datastore(s): {datastore_list}",);
497 task_log!(worker, "Drive: {drive_name}");
2ebe7bb5
DC
498 log_required_tapes(
499 &worker,
500 &inventory,
501 media_id_list.iter().map(|id| &id.label.uuid),
502 );
fa950702
DC
503
504 let mut datastore_locks = Vec::new();
07ffb864 505 for (target, _) in used_datastores.values() {
fa950702 506 // explicit create shared lock to prevent GC on newly created chunks
c94d2867
DC
507 let shared_store_lock = target.try_shared_chunk_store_lock()?;
508 datastore_locks.push(shared_store_lock);
fa950702
DC
509 }
510
511 let mut checked_chunks_map = HashMap::new();
512
513 for media_id in media_id_list.iter() {
514 request_and_restore_media(
515 worker.clone(),
516 media_id,
517 &drive_config,
518 drive_name,
519 &store_map,
520 &mut checked_chunks_map,
521 restore_owner,
522 &email,
3be9106f 523 auth_id,
fa950702
DC
524 )?;
525 }
526
527 Ok(())
528}
529
968d3b4f 530#[allow(clippy::too_many_arguments)]
07ffb864
DC
531fn check_snapshot_restorable(
532 worker: &WorkerTask,
533 store_map: &DataStoreMap,
534 store: &str,
535 snapshot: &str,
536 ns: &BackupNamespace,
537 dir: &BackupDir,
538 required: bool,
539 user_info: &CachedUserInfo,
540 auth_id: &Authid,
541 restore_owner: &Authid,
542) -> Result<bool, Error> {
543 let (datastore, namespaces) = if required {
544 let (datastore, namespaces) = match store_map.get_targets(store, ns) {
f31e32a0
TL
545 Some((target_ds, Some(target_ns))) => (target_ds, target_ns),
546 Some((target_ds, None)) => (target_ds, vec![ns.clone()]),
07ffb864
DC
547 None => bail!("could not find target datastore for {store}:{snapshot}"),
548 };
549 if namespaces.is_empty() {
550 bail!("could not find target namespace for {store}:{snapshot}");
551 }
552
553 (datastore, namespaces)
554 } else {
555 match store_map.get_targets(store, ns) {
20a04cf0
TL
556 Some((_, Some(ns))) if ns.is_empty() => return Ok(false),
557 Some((datastore, Some(ns))) => (datastore, ns),
f31e32a0 558 Some((_, None)) | None => return Ok(false),
07ffb864
DC
559 }
560 };
561
562 let mut have_some_permissions = false;
563 let mut can_restore_some = false;
564 for ns in namespaces {
565 // only simple check, ns creation comes later
abd82485
FG
566 if let Err(err) = check_datastore_privs(
567 user_info,
568 datastore.name(),
569 &ns,
570 auth_id,
571 Some(restore_owner),
572 ) {
07ffb864
DC
573 task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'");
574 continue;
575 }
576
577 // rechecked when we create the group!
578 if let Ok(owner) = datastore.get_owner(&ns, dir.as_ref()) {
579 if restore_owner != &owner {
580 // only the owner is allowed to create additional snapshots
581 task_warn!(
582 worker,
e2aeff40
TL
583 "restore of '{snapshot}' to {ns} failed, owner check failed ({restore_owner} \
584 != {owner})",
07ffb864
DC
585 );
586 continue;
587 }
588 }
589
590 have_some_permissions = true;
591
3be9106f 592 if datastore.snapshot_path(&ns, dir).exists() {
07ffb864
DC
593 task_warn!(
594 worker,
e2aeff40 595 "found snapshot {snapshot} on target datastore/namespace, skipping...",
07ffb864
DC
596 );
597 continue;
598 }
599 can_restore_some = true;
600 }
601
602 if !have_some_permissions {
e2aeff40 603 bail!("cannot restore {snapshot} to any target namespace due to permissions");
07ffb864
DC
604 }
605
3be9106f 606 Ok(can_restore_some)
07ffb864
DC
607}
608
2ebe7bb5
DC
609fn log_required_tapes<'a>(
610 worker: &WorkerTask,
611 inventory: &Inventory,
612 list: impl Iterator<Item = &'a Uuid>,
613) {
614 let mut tape_list = list
615 .map(|uuid| {
616 inventory
617 .lookup_media(uuid)
618 .unwrap()
619 .label
620 .label_text
621 .as_str()
622 })
623 .collect::<Vec<&str>>();
624 tape_list.sort_unstable();
625 task_log!(worker, "Required media list: {}", tape_list.join(";"));
626}
627
968d3b4f 628#[allow(clippy::too_many_arguments)]
ff997803
DC
629fn restore_list_worker(
630 worker: Arc<WorkerTask>,
631 snapshots: Vec<String>,
632 inventory: Inventory,
633 media_set_uuid: Uuid,
634 drive_config: SectionConfigData,
635 drive_name: &str,
636 store_map: DataStoreMap,
637 restore_owner: &Authid,
638 email: Option<String>,
fc99c279
DC
639 user_info: Arc<CachedUserInfo>,
640 auth_id: &Authid,
ff997803 641) -> Result<(), Error> {
ff997803
DC
642 let catalog = get_media_set_catalog(&inventory, &media_set_uuid)?;
643
644 let mut datastore_locks = Vec::new();
645 let mut snapshot_file_hash: BTreeMap<Uuid, Vec<u64>> = BTreeMap::new();
07ffb864 646 let mut skipped = Vec::new();
ff997803 647
6ef1b649 648 let res = proxmox_lang::try_block!({
07ffb864
DC
649 // phase 0
650 let snapshots = if snapshots.is_empty() {
651 let mut restorable = Vec::new();
652 // restore source namespaces
653 for (store, snapshot) in catalog.list_snapshots() {
e1db0670 654 let (ns, dir) = match parse_ns_and_snapshot(snapshot) {
72b550a8 655 Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir),
ce9b00e7
TL
656 Err(err) => {
657 task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}");
658 continue;
87005234 659 }
72b550a8
TL
660 _ => continue,
661 };
662 let snapshot = print_ns_and_snapshot(&ns, &dir);
663 match check_snapshot_restorable(
664 &worker,
665 &store_map,
666 store,
667 &snapshot,
668 &ns,
669 &dir,
670 false,
671 &user_info,
672 auth_id,
673 restore_owner,
674 ) {
675 Ok(true) => restorable.push((store.to_string(), snapshot.to_string(), ns, dir)),
676 Ok(false) => {}
677 Err(err) => {
678 task_warn!(worker, "{err}");
679 skipped.push(format!("{store}:{snapshot}"));
07ffb864
DC
680 }
681 }
ff997803 682 }
07ffb864
DC
683 restorable
684 } else {
685 snapshots
686 .into_iter()
687 .filter_map(|store_snapshot| {
688 // we can unwrap here because of the api format
689 let idx = store_snapshot.find(':').unwrap();
690 let (store, snapshot) = store_snapshot.split_at(idx + 1);
691 let store = &store[..idx]; // remove ':'
692
3be9106f 693 match parse_ns_and_snapshot(snapshot) {
07ffb864
DC
694 Ok((ns, dir)) => {
695 match check_snapshot_restorable(
696 &worker,
697 &store_map,
3be9106f
FG
698 store,
699 snapshot,
07ffb864
DC
700 &ns,
701 &dir,
702 true,
703 &user_info,
704 auth_id,
705 restore_owner,
706 ) {
707 Ok(true) => {
708 Some((store.to_string(), snapshot.to_string(), ns, dir))
709 }
710 Ok(false) => None,
711 Err(err) => {
712 task_warn!(worker, "{err}");
713 skipped.push(format!("{store}:{snapshot}"));
714 None
715 }
716 }
717 }
718 Err(err) => {
719 task_warn!(worker, "could not restore {store_snapshot}: {err}");
720 skipped.push(store_snapshot);
721 None
722 }
723 }
724 })
725 .collect()
726 };
f31e32a0 727 for (store, snapshot, _ns, _) in snapshots.iter() {
5a2e6ccf
TL
728 let datastore = match store_map.target_store(store) {
729 Some(store) => store,
730 None => bail!("unexpected error"), // we already checked those
f31e32a0 731 };
07ffb864 732 let (media_id, file_num) =
3be9106f 733 if let Some((media_uuid, file_num)) = catalog.lookup_snapshot(store, snapshot) {
07ffb864
DC
734 let media_id = inventory.lookup_media(media_uuid).unwrap();
735 (media_id, file_num)
736 } else {
737 task_warn!(
738 worker,
739 "did not find snapshot '{store}:{snapshot}' in media set",
740 );
741 skipped.push(format!("{store}:{snapshot}"));
742 continue;
743 };
ff997803
DC
744
745 let shared_store_lock = datastore.try_shared_chunk_store_lock()?;
746 datastore_locks.push(shared_store_lock);
747
748 let file_list = snapshot_file_hash
749 .entry(media_id.label.uuid.clone())
750 .or_insert_with(Vec::new);
751 file_list.push(file_num);
752
753 task_log!(
754 worker,
e2aeff40 755 "found snapshot {snapshot} on {}: file {file_num}",
ff997803 756 media_id.label.label_text,
ff997803
DC
757 );
758 }
759
760 if snapshot_file_hash.is_empty() {
761 task_log!(worker, "nothing to restore, skipping remaining phases...");
07ffb864
DC
762 if !skipped.is_empty() {
763 task_log!(worker, "skipped the following snapshots:");
764 for snap in skipped {
765 task_log!(worker, " {snap}");
766 }
767 }
ff997803
DC
768 return Ok(());
769 }
770
771 task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir");
2ebe7bb5 772 log_required_tapes(&worker, &inventory, snapshot_file_hash.keys());
ff997803 773 let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new();
07ffb864 774 let mut tmp_paths = Vec::new();
ff997803
DC
775 for (media_uuid, file_list) in snapshot_file_hash.iter_mut() {
776 let media_id = inventory.lookup_media(media_uuid).unwrap();
777 let (drive, info) = request_and_load_media(
778 &worker,
779 &drive_config,
9a37bd6c 780 drive_name,
ff997803
DC
781 &media_id.label,
782 &email,
783 )?;
784 file_list.sort_unstable();
07ffb864
DC
785
786 let tmp_path = restore_snapshots_to_tmpdir(
ff997803 787 worker.clone(),
07ffb864 788 &store_map,
ff997803
DC
789 file_list,
790 drive,
791 &info,
792 &media_set_uuid,
793 &mut datastore_chunk_map,
085ae873
TL
794 )
795 .map_err(|err| format_err!("could not restore snapshots to tmpdir: {}", err))?;
07ffb864 796 tmp_paths.extend(tmp_path);
ff997803
DC
797 }
798
799 // sorted media_uuid => (sorted file_num => (set of digests)))
085ae873
TL
800 let mut media_file_chunk_map: BTreeMap<Uuid, BTreeMap<u64, HashSet<[u8; 32]>>> =
801 BTreeMap::new();
ff997803
DC
802
803 for (source_datastore, chunks) in datastore_chunk_map.into_iter() {
5a2e6ccf
TL
804 let datastore = store_map.target_store(&source_datastore).ok_or_else(|| {
805 format_err!("could not find mapping for source datastore: {source_datastore}")
806 })?;
ff997803
DC
807 for digest in chunks.into_iter() {
808 // we only want to restore chunks that we do not have yet
809 if !datastore.cond_touch_chunk(&digest, false)? {
810 if let Some((uuid, nr)) = catalog.lookup_chunk(&source_datastore, &digest) {
085ae873
TL
811 let file = media_file_chunk_map
812 .entry(uuid.clone())
813 .or_insert_with(BTreeMap::new);
ff997803
DC
814 let chunks = file.entry(nr).or_insert_with(HashSet::new);
815 chunks.insert(digest);
816 }
817 }
818 }
819 }
820
821 // we do not need it anymore, saves memory
822 drop(catalog);
823
824 if !media_file_chunk_map.is_empty() {
825 task_log!(worker, "Phase 2: restore chunks to datastores");
2ebe7bb5 826 log_required_tapes(&worker, &inventory, media_file_chunk_map.keys());
ff997803 827 } else {
d20137e5 828 task_log!(worker, "All chunks are already present, skip phase 2...");
ff997803
DC
829 }
830
831 for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() {
832 let media_id = inventory.lookup_media(media_uuid).unwrap();
833 let (mut drive, _info) = request_and_load_media(
834 &worker,
835 &drive_config,
9a37bd6c 836 drive_name,
ff997803
DC
837 &media_id.label,
838 &email,
839 )?;
840 restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?;
841 }
842
843 task_log!(
844 worker,
845 "Phase 3: copy snapshots from temp dir to datastores"
846 );
07ffb864
DC
847 let mut errors = false;
848 for (source_datastore, snapshot, source_ns, backup_dir) in snapshots.into_iter() {
849 if let Err(err) = proxmox_lang::try_block!({
850 let (datastore, target_ns) = store_map
851 .get_targets(&source_datastore, &source_ns)
852 .ok_or_else(|| {
853 format_err!("unexpected source datastore: {}", source_datastore)
854 })?;
fc99c279 855
20a04cf0 856 for ns in target_ns.unwrap_or_else(|| vec![source_ns.clone()]) {
07ffb864
DC
857 if let Err(err) = proxmox_lang::try_block!({
858 check_and_create_namespaces(
859 &user_info,
860 &datastore,
861 &ns,
862 auth_id,
863 Some(restore_owner),
864 )?;
865
866 let (owner, _group_lock) = datastore.create_locked_backup_group(
867 &ns,
868 backup_dir.as_ref(),
869 restore_owner,
870 )?;
871 if restore_owner != &owner {
872 bail!(
e2aeff40
TL
873 "cannot restore snapshot '{snapshot}' into group '{}', owner check \
874 failed ({restore_owner} != {owner})",
07ffb864 875 backup_dir.group,
07ffb864
DC
876 );
877 }
ff997803 878
07ffb864
DC
879 let (_rel_path, is_new, _snap_lock) =
880 datastore.create_locked_backup_dir(&ns, backup_dir.as_ref())?;
881
882 if !is_new {
883 bail!("snapshot {}/{} already exists", datastore.name(), &snapshot);
884 }
885
886 let path = datastore.snapshot_path(&ns, &backup_dir);
887 let tmp_path = snapshot_tmpdir(
888 &source_datastore,
889 &datastore,
890 &snapshot,
891 &media_set_uuid,
892 );
893
894 for entry in std::fs::read_dir(tmp_path)? {
895 let entry = entry?;
896 let mut new_path = path.clone();
897 new_path.push(entry.file_name());
898 std::fs::copy(entry.path(), new_path)?;
899 }
900
901 Ok(())
902 }) {
903 task_warn!(
904 worker,
905 "could not restore {source_datastore}:{snapshot}: '{err}'"
906 );
907 skipped.push(format!("{source_datastore}:{snapshot}"));
908 }
ff997803
DC
909 }
910 task_log!(worker, "Restore snapshot '{}' done", snapshot);
07ffb864
DC
911 Ok::<_, Error>(())
912 }) {
913 task_warn!(
914 worker,
e2aeff40 915 "could not copy {source_datastore}:{snapshot}: {err}"
07ffb864
DC
916 );
917 errors = true;
918 }
919 }
920
921 for tmp_path in tmp_paths {
922 if let Err(err) = proxmox_lang::try_block!({
923 std::fs::remove_dir_all(&tmp_path)
924 .map_err(|err| format_err!("remove_dir_all failed - {err}"))
925 }) {
e2aeff40 926 task_warn!(worker, "could not clean up temp dir {tmp_path:?}: {err}");
07ffb864
DC
927 errors = true;
928 };
929 }
930
931 if errors {
932 bail!("errors during copy occurred");
933 }
934 if !skipped.is_empty() {
935 task_log!(worker, "(partially) skipped the following snapshots:");
936 for snap in skipped {
937 task_log!(worker, " {snap}");
938 }
ff997803
DC
939 }
940 Ok(())
941 });
942
2072dede 943 if res.is_err() {
085ae873
TL
944 task_warn!(
945 worker,
946 "Error during restore, partially restored snapshots will NOT be cleaned up"
947 );
2072dede
DC
948 }
949
07ffb864 950 for (datastore, _) in store_map.used_datastores().values() {
3be9106f 951 let tmp_path = media_set_tmpdir(datastore, &media_set_uuid);
cd0daa8b 952 match std::fs::remove_dir_all(tmp_path) {
07ffb864
DC
953 Ok(()) => {}
954 Err(err) if err.kind() == std::io::ErrorKind::NotFound => {}
955 Err(err) => task_warn!(worker, "error cleaning up: {}", err),
956 }
ff997803
DC
957 }
958
959 res
960}
961
962fn get_media_set_catalog(
963 inventory: &Inventory,
964 media_set_uuid: &Uuid,
965) -> Result<MediaSetCatalog, Error> {
ff997803
DC
966 let members = inventory.compute_media_set_members(media_set_uuid)?;
967 let media_list = members.media_list();
968 let mut catalog = MediaSetCatalog::new();
969
970 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
971 match media_uuid {
972 None => {
e2aeff40 973 bail!("media set {media_set_uuid} is incomplete (missing member {seq_nr}).");
ff997803
DC
974 }
975 Some(media_uuid) => {
976 let media_id = inventory.lookup_media(media_uuid).unwrap();
3921deb2 977 let media_catalog = MediaCatalog::open(TAPE_STATUS_DIR, media_id, false, false)?;
ff997803
DC
978 catalog.append_catalog(media_catalog)?;
979 }
980 }
981 }
982
983 Ok(catalog)
984}
985
07ffb864
DC
986fn media_set_tmpdir(datastore: &DataStore, media_set_uuid: &Uuid) -> PathBuf {
987 let mut path = datastore.base_path();
988 path.push(".tmp");
989 path.push(media_set_uuid.to_string());
990 path
991}
992
993fn snapshot_tmpdir(
994 source_datastore: &str,
995 datastore: &DataStore,
996 snapshot: &str,
997 media_set_uuid: &Uuid,
998) -> PathBuf {
999 let mut path = media_set_tmpdir(datastore, media_set_uuid);
1000 path.push(source_datastore);
1001 path.push(snapshot);
1002 path
1003}
1004
ff997803
DC
1005fn restore_snapshots_to_tmpdir(
1006 worker: Arc<WorkerTask>,
07ffb864 1007 store_map: &DataStoreMap,
ff997803
DC
1008 file_list: &[u64],
1009 mut drive: Box<dyn TapeDriver>,
1010 media_id: &MediaId,
1011 media_set_uuid: &Uuid,
1012 chunks_list: &mut HashMap<String, HashSet<[u8; 32]>>,
07ffb864
DC
1013) -> Result<Vec<PathBuf>, Error> {
1014 let mut tmp_paths = Vec::new();
ff997803
DC
1015 match media_id.media_set_label {
1016 None => {
1017 bail!(
1018 "missing media set label on media {} ({})",
1019 media_id.label.label_text,
1020 media_id.label.uuid
1021 );
1022 }
1023 Some(ref set) => {
1024 if set.uuid != *media_set_uuid {
1025 bail!(
1026 "wrong media set label on media {} ({} != {})",
1027 media_id.label.label_text,
1028 media_id.label.uuid,
1029 media_set_uuid
1030 );
1031 }
1032 let encrypt_fingerprint = set.encryption_key_fingerprint.clone().map(|fp| {
1033 task_log!(worker, "Encryption key fingerprint: {}", fp);
1034 (fp, set.uuid.clone())
1035 });
1036
1037 drive.set_encryption(encrypt_fingerprint)?;
1038 }
1039 }
1040
1041 for file_num in file_list {
1042 let current_file_number = drive.current_file_number()?;
1043 if current_file_number != *file_num {
085ae873
TL
1044 task_log!(
1045 worker,
e2aeff40 1046 "was at file {current_file_number}, moving to {file_num}"
085ae873 1047 );
ff997803
DC
1048 drive.move_to_file(*file_num)?;
1049 let current_file_number = drive.current_file_number()?;
1050 task_log!(worker, "now at file {}", current_file_number);
1051 }
1052 let mut reader = drive.read_next_file()?;
1053
1054 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
1055 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
1056 bail!("missing MediaContentHeader");
1057 }
1058
1059 match header.content_magic {
707c48ad
FG
1060 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
1061 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2 => {
ff997803
DC
1062 let header_data = reader.read_exact_allocated(header.size as usize)?;
1063
1064 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
1065 .map_err(|err| {
e2aeff40 1066 format_err!("unable to parse snapshot archive header - {err}")
ff997803
DC
1067 })?;
1068
1069 let source_datastore = archive_header.store;
1070 let snapshot = archive_header.snapshot;
1071
1072 task_log!(
1073 worker,
e2aeff40 1074 "File {file_num}: snapshot archive {source_datastore}:{snapshot}",
ff997803
DC
1075 );
1076
1077 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
1078
5a2e6ccf
TL
1079 let target_datastore = match store_map.target_store(&source_datastore) {
1080 Some(datastore) => datastore,
1081 None => {
1082 task_warn!(
1083 worker,
1084 "could not find target datastore for {source_datastore}:{snapshot}",
1085 );
1086 continue;
1087 }
1088 };
07ffb864
DC
1089
1090 let tmp_path = snapshot_tmpdir(
1091 &source_datastore,
1092 &target_datastore,
1093 &snapshot,
1094 media_set_uuid,
1095 );
ff997803
DC
1096 std::fs::create_dir_all(&tmp_path)?;
1097
1098 let chunks = chunks_list
1099 .entry(source_datastore)
1100 .or_insert_with(HashSet::new);
085ae873
TL
1101 let manifest =
1102 try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?;
07ffb864 1103
ff997803
DC
1104 for item in manifest.files() {
1105 let mut archive_path = tmp_path.to_owned();
1106 archive_path.push(&item.filename);
1107
1108 let index: Box<dyn IndexFile> = match archive_type(&item.filename)? {
1109 ArchiveType::DynamicIndex => {
1110 Box::new(DynamicIndexReader::open(&archive_path)?)
1111 }
085ae873 1112 ArchiveType::FixedIndex => Box::new(FixedIndexReader::open(&archive_path)?),
ff997803
DC
1113 ArchiveType::Blob => continue,
1114 };
1115 for i in 0..index.index_count() {
1116 if let Some(digest) = index.index_digest(i) {
1117 chunks.insert(*digest);
1118 }
1119 }
1120 }
07ffb864 1121 tmp_paths.push(tmp_path);
ff997803 1122 }
e2aeff40 1123 other => bail!("unexpected file type: {other:?}"),
ff997803
DC
1124 }
1125 }
1126
07ffb864 1127 Ok(tmp_paths)
ff997803
DC
1128}
1129
1130fn restore_file_chunk_map(
1131 worker: Arc<WorkerTask>,
1132 drive: &mut Box<dyn TapeDriver>,
1133 store_map: &DataStoreMap,
1134 file_chunk_map: &mut BTreeMap<u64, HashSet<[u8; 32]>>,
1135) -> Result<(), Error> {
1136 for (nr, chunk_map) in file_chunk_map.iter_mut() {
1137 let current_file_number = drive.current_file_number()?;
1138 if current_file_number != *nr {
e2aeff40 1139 task_log!(worker, "was at file {current_file_number}, moving to {nr}");
ff997803
DC
1140 drive.move_to_file(*nr)?;
1141 let current_file_number = drive.current_file_number()?;
1142 task_log!(worker, "now at file {}", current_file_number);
1143 }
1144 let mut reader = drive.read_next_file()?;
1145 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
1146 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
e2aeff40 1147 bail!("file is missing the MediaContentHeader");
ff997803
DC
1148 }
1149
1150 match header.content_magic {
1151 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
1152 let header_data = reader.read_exact_allocated(header.size as usize)?;
1153
1154 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
e2aeff40 1155 .map_err(|err| format_err!("unable to parse chunk archive header - {err}"))?;
ff997803
DC
1156
1157 let source_datastore = archive_header.store;
1158
1159 task_log!(
1160 worker,
e2aeff40 1161 "File {nr}: chunk archive for datastore '{source_datastore}'",
ff997803
DC
1162 );
1163
5a2e6ccf
TL
1164 let datastore = store_map.target_store(&source_datastore).ok_or_else(|| {
1165 format_err!("unexpected chunk archive for store: {source_datastore}")
1166 })?;
ff997803 1167
085ae873
TL
1168 let count = restore_partial_chunk_archive(
1169 worker.clone(),
1170 reader,
1171 datastore.clone(),
1172 chunk_map,
1173 )?;
e2aeff40 1174 task_log!(worker, "restored {count} chunks");
ff997803
DC
1175 }
1176 _ => bail!("unexpected content magic {:?}", header.content_magic),
1177 }
1178 }
1179
1180 Ok(())
1181}
1182
1183fn restore_partial_chunk_archive<'a>(
1184 worker: Arc<WorkerTask>,
1185 reader: Box<dyn 'a + TapeRead>,
1186 datastore: Arc<DataStore>,
1187 chunk_list: &mut HashSet<[u8; 32]>,
1188) -> Result<usize, Error> {
1189 let mut decoder = ChunkArchiveDecoder::new(reader);
1190
1191 let mut count = 0;
1192
1193 let start_time = std::time::SystemTime::now();
1194 let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0));
1195 let bytes2 = bytes.clone();
1196
1197 let writer_pool = ParallelHandler::new(
1198 "tape restore chunk writer",
1199 4,
1200 move |(chunk, digest): (DataBlob, [u8; 32])| {
1201 if !datastore.cond_touch_chunk(&digest, false)? {
1202 bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
1203 chunk.verify_crc()?;
1204 if chunk.crypt_mode()? == CryptMode::None {
1205 chunk.decode(None, Some(&digest))?; // verify digest
1206 }
1207
1208 datastore.insert_chunk(&chunk, &digest)?;
1209 }
1210 Ok(())
1211 },
1212 );
1213
1214 let verify_and_write_channel = writer_pool.channel();
1215
3be9106f 1216 while let Some((digest, blob)) = decoder.next_chunk()? {
ff997803
DC
1217 worker.check_abort()?;
1218
1219 if chunk_list.remove(&digest) {
3be9106f 1220 verify_and_write_channel.send((blob, digest))?;
ff997803
DC
1221 count += 1;
1222 }
ff997803
DC
1223 if chunk_list.is_empty() {
1224 break;
1225 }
1226 }
1227
1228 drop(verify_and_write_channel);
1229
1230 writer_pool.complete()?;
1231
1232 let elapsed = start_time.elapsed()?.as_secs_f64();
7bc2e240 1233 let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64;
ff997803
DC
1234 task_log!(
1235 worker,
7bc2e240
TL
1236 "restored {} ({:.2}/s)",
1237 HumanByte::new_decimal(bytes),
1238 HumanByte::new_decimal(bytes / elapsed),
ff997803
DC
1239 );
1240
1241 Ok(count)
1242}
1243
b017bbc4 1244/// Request and restore complete media without using existing catalog (create catalog instead)
968d3b4f 1245#[allow(clippy::too_many_arguments)]
b017bbc4 1246pub fn request_and_restore_media(
49f9aca6 1247 worker: Arc<WorkerTask>,
b017bbc4
DM
1248 media_id: &MediaId,
1249 drive_config: &SectionConfigData,
1250 drive_name: &str,
4c4e5c2b 1251 store_map: &DataStoreMap,
085ae873 1252 checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>,
fa950702
DC
1253 restore_owner: &Authid,
1254 email: &Option<String>,
fc99c279 1255 auth_id: &Authid,
b017bbc4 1256) -> Result<(), Error> {
b017bbc4
DM
1257 let media_set_uuid = match media_id.media_set_label {
1258 None => bail!("restore_media: no media set - internal error"),
1259 Some(ref set) => &set.uuid,
1260 };
1261
085ae873
TL
1262 let (mut drive, info) =
1263 request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?;
b017bbc4
DM
1264
1265 match info.media_set_label {
1266 None => {
085ae873
TL
1267 bail!(
1268 "missing media set label on media {} ({})",
1269 media_id.label.label_text,
1270 media_id.label.uuid
1271 );
b017bbc4
DM
1272 }
1273 Some(ref set) => {
1274 if &set.uuid != media_set_uuid {
085ae873
TL
1275 bail!(
1276 "wrong media set label on media {} ({} != {})",
1277 media_id.label.label_text,
1278 media_id.label.uuid,
1279 media_set_uuid
1280 );
b017bbc4 1281 }
085ae873
TL
1282 let encrypt_fingerprint = set
1283 .encryption_key_fingerprint
1284 .clone()
8e6459a8
DM
1285 .map(|fp| (fp, set.uuid.clone()));
1286
1287 drive.set_encryption(encrypt_fingerprint)?;
b017bbc4
DM
1288 }
1289 }
1290
4c4e5c2b
DC
1291 restore_media(
1292 worker,
1293 &mut drive,
1294 &info,
9a37bd6c 1295 Some((store_map, restore_owner)),
28570d19 1296 checked_chunks_map,
4c4e5c2b 1297 false,
fc99c279 1298 auth_id,
4c4e5c2b 1299 )
b017bbc4
DM
1300}
1301
1302/// Restore complete media content and catalog
1303///
1304/// Only create the catalog if target is None.
1305pub fn restore_media(
49f9aca6 1306 worker: Arc<WorkerTask>,
b017bbc4
DM
1307 drive: &mut Box<dyn TapeDriver>,
1308 media_id: &MediaId,
4c4e5c2b 1309 target: Option<(&DataStoreMap, &Authid)>,
085ae873 1310 checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>,
b017bbc4 1311 verbose: bool,
fc99c279 1312 auth_id: &Authid,
085ae873 1313) -> Result<(), Error> {
3921deb2 1314 let mut catalog = MediaCatalog::create_temporary_database(TAPE_STATUS_DIR, media_id, false)?;
b017bbc4
DM
1315
1316 loop {
1317 let current_file_number = drive.current_file_number()?;
318b3106
DM
1318 let reader = match drive.read_next_file() {
1319 Err(BlockReadError::EndOfFile) => {
085ae873
TL
1320 task_log!(
1321 worker,
1322 "skip unexpected filemark at pos {}",
1323 current_file_number
1324 );
318b3106
DM
1325 continue;
1326 }
1327 Err(BlockReadError::EndOfStream) => {
8de9a991 1328 task_log!(worker, "detected EOT after {} files", current_file_number);
b017bbc4
DM
1329 break;
1330 }
318b3106
DM
1331 Err(BlockReadError::Error(err)) => {
1332 return Err(err.into());
1333 }
1334 Ok(reader) => reader,
b017bbc4
DM
1335 };
1336
085ae873
TL
1337 restore_archive(
1338 worker.clone(),
1339 reader,
1340 current_file_number,
1341 target,
1342 &mut catalog,
1343 checked_chunks_map,
1344 verbose,
3be9106f 1345 auth_id,
085ae873 1346 )?;
b017bbc4
DM
1347 }
1348
76e85650
DC
1349 catalog.commit()?;
1350
3921deb2 1351 MediaCatalog::finish_temporary_database(TAPE_STATUS_DIR, &media_id.label.uuid, true)?;
b017bbc4
DM
1352
1353 Ok(())
1354}
1355
968d3b4f 1356#[allow(clippy::too_many_arguments)]
b017bbc4 1357fn restore_archive<'a>(
49f9aca6 1358 worker: Arc<WorkerTask>,
b017bbc4
DM
1359 mut reader: Box<dyn 'a + TapeRead>,
1360 current_file_number: u64,
4c4e5c2b 1361 target: Option<(&DataStoreMap, &Authid)>,
b017bbc4 1362 catalog: &mut MediaCatalog,
085ae873 1363 checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>,
b017bbc4 1364 verbose: bool,
fc99c279 1365 auth_id: &Authid,
b017bbc4 1366) -> Result<(), Error> {
fc99c279
DC
1367 let user_info = CachedUserInfo::new()?;
1368
b017bbc4
DM
1369 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
1370 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
1371 bail!("missing MediaContentHeader");
1372 }
1373
1374 //println!("Found MediaContentHeader: {:?}", header);
1375
1376 match header.content_magic {
1377 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
1378 bail!("unexpected content magic (label)");
1379 }
1380 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
54722aca
DM
1381 bail!("unexpected snapshot archive version (v1.0)");
1382 }
707c48ad 1383 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2 => {
54722aca
DM
1384 let header_data = reader.read_exact_allocated(header.size as usize)?;
1385
1386 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
1387 .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
1388
1389 let datastore_name = archive_header.store;
1390 let snapshot = archive_header.snapshot;
1391
085ae873
TL
1392 task_log!(
1393 worker,
1394 "File {}: snapshot archive {}:{}",
1395 current_file_number,
1396 datastore_name,
1397 snapshot
1398 );
b017bbc4 1399
07ffb864 1400 let (backup_ns, backup_dir) = parse_ns_and_snapshot(&snapshot)?;
b017bbc4 1401
6b61d319 1402 if let Some((store_map, restore_owner)) = target.as_ref() {
5a2e6ccf 1403 if let Some(datastore) = store_map.target_store(&datastore_name) {
fc99c279
DC
1404 check_and_create_namespaces(
1405 &user_info,
1406 &datastore,
1407 &backup_ns,
3be9106f 1408 auth_id,
fc99c279
DC
1409 Some(restore_owner),
1410 )?;
133d718f
WB
1411 let (owner, _group_lock) = datastore.create_locked_backup_group(
1412 &backup_ns,
1413 backup_dir.as_ref(),
6b61d319 1414 restore_owner,
133d718f 1415 )?;
6b61d319 1416 if *restore_owner != &owner {
4c4e5c2b
DC
1417 // only the owner is allowed to create additional snapshots
1418 bail!(
1419 "restore '{}' failed - owner check failed ({} != {})",
1420 snapshot,
6b61d319 1421 restore_owner,
4c4e5c2b
DC
1422 owner
1423 );
1424 }
b017bbc4 1425
4c4e5c2b 1426 let (rel_path, is_new, _snap_lock) =
133d718f 1427 datastore.create_locked_backup_dir(&backup_ns, backup_dir.as_ref())?;
4c4e5c2b
DC
1428 let mut path = datastore.base_path();
1429 path.push(rel_path);
b017bbc4 1430
4c4e5c2b
DC
1431 if is_new {
1432 task_log!(worker, "restore snapshot {}", backup_dir);
b017bbc4 1433
b7b9a574 1434 match restore_snapshot_archive(worker.clone(), reader, &path) {
4c4e5c2b
DC
1435 Err(err) => {
1436 std::fs::remove_dir_all(&path)?;
1437 bail!("restore snapshot {} failed - {}", backup_dir, err);
1438 }
1439 Ok(false) => {
1440 std::fs::remove_dir_all(&path)?;
1441 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
1442 }
1443 Ok(true) => {
1444 catalog.register_snapshot(
1445 Uuid::from(header.uuid),
1446 current_file_number,
1447 &datastore_name,
9c65e6ab
DC
1448 &backup_ns,
1449 &backup_dir,
4c4e5c2b
DC
1450 )?;
1451 catalog.commit_if_large()?;
1452 }
b017bbc4 1453 }
4c4e5c2b 1454 return Ok(());
b017bbc4 1455 }
4c4e5c2b
DC
1456 } else {
1457 task_log!(worker, "skipping...");
b017bbc4
DM
1458 }
1459 }
1460
90461b76 1461 reader.skip_data()?; // read all data
b017bbc4 1462 if let Ok(false) = reader.is_incomplete() {
085ae873
TL
1463 catalog.register_snapshot(
1464 Uuid::from(header.uuid),
1465 current_file_number,
1466 &datastore_name,
9c65e6ab
DC
1467 &backup_ns,
1468 &backup_dir,
085ae873 1469 )?;
b017bbc4
DM
1470 catalog.commit_if_large()?;
1471 }
1472 }
1473 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
54722aca
DM
1474 bail!("unexpected chunk archive version (v1.0)");
1475 }
1476 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
1477 let header_data = reader.read_exact_allocated(header.size as usize)?;
1478
1479 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
1480 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
1481
1482 let source_datastore = archive_header.store;
b017bbc4 1483
085ae873
TL
1484 task_log!(
1485 worker,
1486 "File {}: chunk archive for datastore '{}'",
1487 current_file_number,
1488 source_datastore
1489 );
4c4e5c2b
DC
1490 let datastore = target
1491 .as_ref()
5a2e6ccf 1492 .and_then(|t| t.0.target_store(&source_datastore));
4c4e5c2b
DC
1493
1494 if datastore.is_some() || target.is_none() {
28570d19 1495 let checked_chunks = checked_chunks_map
085ae873
TL
1496 .entry(
1497 datastore
1498 .as_ref()
5a2e6ccf 1499 .map(|d| d.name())
085ae873
TL
1500 .unwrap_or("_unused_")
1501 .to_string(),
1502 )
3be9106f 1503 .or_default();
28570d19 1504
5a2e6ccf 1505 let chunks = if let Some(datastore) = datastore {
085ae873
TL
1506 restore_chunk_archive(
1507 worker.clone(),
1508 reader,
1509 datastore,
1510 checked_chunks,
1511 verbose,
1512 )?
87bf9f56 1513 } else {
49f9aca6 1514 scan_chunk_archive(worker.clone(), reader, verbose)?
87bf9f56
DM
1515 };
1516
1517 if let Some(chunks) = chunks {
a2ef36d4 1518 catalog.register_chunk_archive(
4c4e5c2b
DC
1519 Uuid::from(header.uuid),
1520 current_file_number,
1521 &source_datastore,
a2ef36d4 1522 &chunks[..],
4c4e5c2b 1523 )?;
4c4e5c2b 1524 task_log!(worker, "register {} chunks", chunks.len());
4c4e5c2b 1525 catalog.commit_if_large()?;
b017bbc4 1526 }
4c4e5c2b
DC
1527 return Ok(());
1528 } else if target.is_some() {
1529 task_log!(worker, "skipping...");
b017bbc4 1530 }
4c4e5c2b 1531
90461b76 1532 reader.skip_data()?; // read all data
b017bbc4 1533 }
707c48ad 1534 PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1 => {
8b1289f3
DM
1535 let header_data = reader.read_exact_allocated(header.size as usize)?;
1536
1537 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
1538 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
1539
085ae873
TL
1540 task_log!(
1541 worker,
1542 "File {}: skip catalog '{}'",
1543 current_file_number,
1544 archive_header.uuid
1545 );
8b1289f3 1546
90461b76 1547 reader.skip_data()?; // read all data
8b1289f3 1548 }
085ae873 1549 _ => bail!("unknown content magic {:?}", header.content_magic),
b017bbc4
DM
1550 }
1551
b017bbc4
DM
1552 Ok(())
1553}
1554
87bf9f56
DM
1555// Read chunk archive without restoring data - just record contained chunks
1556fn scan_chunk_archive<'a>(
49f9aca6 1557 worker: Arc<WorkerTask>,
87bf9f56
DM
1558 reader: Box<dyn 'a + TapeRead>,
1559 verbose: bool,
085ae873 1560) -> Result<Option<Vec<[u8; 32]>>, Error> {
87bf9f56
DM
1561 let mut chunks = Vec::new();
1562
1563 let mut decoder = ChunkArchiveDecoder::new(reader);
1564
1565 loop {
1566 let digest = match decoder.next_chunk() {
1567 Ok(Some((digest, _blob))) => digest,
1568 Ok(None) => break,
1569 Err(err) => {
1570 let reader = decoder.reader();
1571
1572 // check if this stream is marked incomplete
1573 if let Ok(true) = reader.is_incomplete() {
1574 return Ok(Some(chunks));
1575 }
1576
1577 // check if this is an aborted stream without end marker
1578 if let Ok(false) = reader.has_end_marker() {
49f9aca6 1579 task_log!(worker, "missing stream end marker");
87bf9f56
DM
1580 return Ok(None);
1581 }
1582
1583 // else the archive is corrupt
1584 return Err(err);
1585 }
1586 };
1587
1588 worker.check_abort()?;
1589
1590 if verbose {
16f6766a 1591 task_log!(worker, "Found chunk: {}", hex::encode(digest));
87bf9f56
DM
1592 }
1593
1594 chunks.push(digest);
1595 }
1596
1597 Ok(Some(chunks))
1598}
1599
b017bbc4 1600fn restore_chunk_archive<'a>(
49f9aca6 1601 worker: Arc<WorkerTask>,
b017bbc4 1602 reader: Box<dyn 'a + TapeRead>,
87bf9f56 1603 datastore: Arc<DataStore>,
085ae873 1604 checked_chunks: &mut HashSet<[u8; 32]>,
b017bbc4 1605 verbose: bool,
085ae873 1606) -> Result<Option<Vec<[u8; 32]>>, Error> {
28570d19 1607 let mut chunks = Vec::new();
b017bbc4
DM
1608
1609 let mut decoder = ChunkArchiveDecoder::new(reader);
1610
49f9aca6
DC
1611 let start_time = std::time::SystemTime::now();
1612 let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0));
1613 let bytes2 = bytes.clone();
1614
1615 let worker2 = worker.clone();
1616
5400fe17
DM
1617 let writer_pool = ParallelHandler::new(
1618 "tape restore chunk writer",
1619 4,
1620 move |(chunk, digest): (DataBlob, [u8; 32])| {
aa174e8e 1621 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
49f9aca6
DC
1622 if !chunk_exists {
1623 if verbose {
16f6766a 1624 task_log!(worker2, "Insert chunk: {}", hex::encode(digest));
49f9aca6
DC
1625 }
1626 bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
25877d05 1627 // println!("verify and write {}", hex::encode(&digest));
49f9aca6
DC
1628 chunk.verify_crc()?;
1629 if chunk.crypt_mode()? == CryptMode::None {
1630 chunk.decode(None, Some(&digest))?; // verify digest
1631 }
5400fe17 1632
aa174e8e 1633 datastore.insert_chunk(&chunk, &digest)?;
49f9aca6 1634 } else if verbose {
16f6766a 1635 task_log!(worker2, "Found existing chunk: {}", hex::encode(digest));
49f9aca6 1636 }
5400fe17
DM
1637 Ok(())
1638 },
1639 );
1640
1641 let verify_and_write_channel = writer_pool.channel();
1642
1dd1c9eb
DC
1643 loop {
1644 let (digest, blob) = match decoder.next_chunk() {
1645 Ok(Some((digest, blob))) => (digest, blob),
1646 Ok(None) => break,
1647 Err(err) => {
1648 let reader = decoder.reader();
1649
1650 // check if this stream is marked incomplete
1651 if let Ok(true) = reader.is_incomplete() {
1652 return Ok(Some(chunks));
1653 }
0d2133db 1654
1dd1c9eb
DC
1655 // check if this is an aborted stream without end marker
1656 if let Ok(false) = reader.has_end_marker() {
49f9aca6 1657 task_log!(worker, "missing stream end marker");
1dd1c9eb 1658 return Ok(None);
b017bbc4 1659 }
1dd1c9eb
DC
1660
1661 // else the archive is corrupt
1662 return Err(err);
b017bbc4 1663 }
1dd1c9eb 1664 };
b017bbc4 1665
1dd1c9eb 1666 worker.check_abort()?;
b017bbc4 1667
49f9aca6 1668 if !checked_chunks.contains(&digest) {
3be9106f
FG
1669 verify_and_write_channel.send((blob, digest))?;
1670 checked_chunks.insert(digest);
b017bbc4 1671 }
1dd1c9eb 1672 chunks.push(digest);
b017bbc4 1673 }
1dd1c9eb 1674
5400fe17
DM
1675 drop(verify_and_write_channel);
1676
1677 writer_pool.complete()?;
1678
88aa3076 1679 let elapsed = start_time.elapsed()?.as_secs_f64();
7bc2e240 1680 let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64;
88aa3076
DM
1681 task_log!(
1682 worker,
7bc2e240
TL
1683 "restored {} ({:.2}/s)",
1684 HumanByte::new_decimal(bytes),
1685 HumanByte::new_decimal(bytes / elapsed),
88aa3076
DM
1686 );
1687
1dd1c9eb 1688 Ok(Some(chunks))
b017bbc4
DM
1689}
1690
1691fn restore_snapshot_archive<'a>(
49f9aca6 1692 worker: Arc<WorkerTask>,
b017bbc4
DM
1693 reader: Box<dyn 'a + TapeRead>,
1694 snapshot_path: &Path,
1695) -> Result<bool, Error> {
b017bbc4 1696 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
b7b9a574
DC
1697 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
1698 Ok(_) => Ok(true),
b017bbc4
DM
1699 Err(err) => {
1700 let reader = decoder.input();
1701
1702 // check if this stream is marked incomplete
1703 if let Ok(true) = reader.is_incomplete() {
1704 return Ok(false);
1705 }
1706
1707 // check if this is an aborted stream without end marker
1708 if let Ok(false) = reader.has_end_marker() {
1709 return Ok(false);
1710 }
1711
1712 // else the archive is corrupt
38556bf6 1713 Err(err)
b017bbc4
DM
1714 }
1715 }
1716}
1717
1718fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
49f9aca6 1719 worker: Arc<WorkerTask>,
b017bbc4
DM
1720 decoder: &mut pxar::decoder::sync::Decoder<R>,
1721 snapshot_path: &Path,
82a4bb5e 1722) -> Result<BackupManifest, Error> {
b017bbc4
DM
1723 let _root = match decoder.next() {
1724 None => bail!("missing root entry"),
1725 Some(root) => {
1726 let root = root?;
1727 match root.kind() {
1728 pxar::EntryKind::Directory => { /* Ok */ }
1729 _ => bail!("wrong root entry type"),
1730 }
1731 root
1732 }
1733 };
1734
1735 let root_path = Path::new("/");
1736 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
1737
1738 let mut manifest = None;
1739
1740 loop {
a80d72f9
DM
1741 worker.check_abort()?;
1742
b017bbc4
DM
1743 let entry = match decoder.next() {
1744 None => break,
1745 Some(entry) => entry?,
1746 };
1747 let entry_path = entry.path();
1748
1749 match entry.kind() {
1750 pxar::EntryKind::File { .. } => { /* Ok */ }
1751 _ => bail!("wrong entry type for {:?}", entry_path),
1752 }
1753 match entry_path.parent() {
1754 None => bail!("wrong parent for {:?}", entry_path),
1755 Some(p) => {
1756 if p != root_path {
1757 bail!("wrong parent for {:?}", entry_path);
1758 }
1759 }
1760 }
1761
1762 let filename = entry.file_name();
1763 let mut contents = match decoder.contents() {
1764 None => bail!("missing file content"),
1765 Some(contents) => contents,
1766 };
1767
1768 let mut archive_path = snapshot_path.to_owned();
16f6766a 1769 archive_path.push(filename);
b017bbc4
DM
1770
1771 let mut tmp_path = archive_path.clone();
1772 tmp_path.set_extension("tmp");
1773
1774 if filename == manifest_file_name {
b017bbc4 1775 let blob = DataBlob::load_from_reader(&mut contents)?;
603aa09d
DM
1776 let mut old_manifest = BackupManifest::try_from(blob)?;
1777
1778 // Remove verify_state to indicate that this snapshot is not verified
085ae873
TL
1779 old_manifest
1780 .unprotected
603aa09d
DM
1781 .as_object_mut()
1782 .map(|m| m.remove("verify_state"));
1783
1784 let old_manifest = serde_json::to_string_pretty(&old_manifest)?;
1785 let blob = DataBlob::encode(old_manifest.as_bytes(), None, true)?;
1786
b017bbc4 1787 let options = CreateOptions::new();
e0a19d33 1788 replace_file(&tmp_path, blob.raw_data(), options, false)?;
b017bbc4
DM
1789
1790 manifest = Some(BackupManifest::try_from(blob)?);
1791 } else {
1792 let mut tmpfile = std::fs::OpenOptions::new()
1793 .write(true)
1794 .create(true)
1795 .read(true)
1796 .open(&tmp_path)
1797 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
1798
1799 std::io::copy(&mut contents, &mut tmpfile)?;
1800
1801 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
1802 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
1803 }
1804 }
1805 }
1806
82a4bb5e
DC
1807 let manifest = match manifest {
1808 None => bail!("missing manifest"),
1809 Some(manifest) => manifest,
1810 };
b017bbc4 1811
603aa09d
DM
1812 // Do not verify anything here, because this would be to slow (causes tape stops).
1813
b017bbc4
DM
1814 // commit manifest
1815 let mut manifest_path = snapshot_path.to_owned();
1816 manifest_path.push(MANIFEST_BLOB_NAME);
1817 let mut tmp_manifest_path = manifest_path.clone();
1818 tmp_manifest_path.set_extension("tmp");
1819
1820 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
085ae873
TL
1821 bail!(
1822 "Atomic rename manifest {:?} failed - {}",
1823 manifest_path,
1824 err
1825 );
b017bbc4
DM
1826 }
1827
82a4bb5e 1828 Ok(manifest)
b017bbc4 1829}
074503f2
DM
1830
1831/// Try to restore media catalogs (form catalog_archives)
1832pub fn fast_catalog_restore(
1833 worker: &WorkerTask,
1834 drive: &mut Box<dyn TapeDriver>,
1835 media_set: &MediaSet,
1836 uuid: &Uuid, // current media Uuid
085ae873 1837) -> Result<bool, Error> {
074503f2
DM
1838 let current_file_number = drive.current_file_number()?;
1839 if current_file_number != 2 {
1840 bail!("fast_catalog_restore: wrong media position - internal error");
1841 }
1842
1843 let mut found_catalog = false;
1844
1845 let mut moved_to_eom = false;
1846
1847 loop {
1848 let current_file_number = drive.current_file_number()?;
1849
085ae873
TL
1850 {
1851 // limit reader scope
318b3106
DM
1852 let mut reader = match drive.read_next_file() {
1853 Err(BlockReadError::EndOfFile) => {
085ae873
TL
1854 task_log!(
1855 worker,
87005234 1856 "skip unexpected filemark at pos {current_file_number}"
085ae873 1857 );
318b3106
DM
1858 continue;
1859 }
1860 Err(BlockReadError::EndOfStream) => {
87005234 1861 task_log!(worker, "detected EOT after {current_file_number} files");
074503f2
DM
1862 break;
1863 }
318b3106
DM
1864 Err(BlockReadError::Error(err)) => {
1865 return Err(err.into());
1866 }
1867 Ok(reader) => reader,
074503f2
DM
1868 };
1869
1870 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
1871 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
1872 bail!("missing MediaContentHeader");
1873 }
1874
707c48ad
FG
1875 if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
1876 || header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1
1877 {
074503f2
DM
1878 task_log!(worker, "found catalog at pos {}", current_file_number);
1879
1880 let header_data = reader.read_exact_allocated(header.size as usize)?;
1881
1882 let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
085ae873
TL
1883 .map_err(|err| {
1884 format_err!("unable to parse catalog archive header - {}", err)
1885 })?;
074503f2
DM
1886
1887 if &archive_header.media_set_uuid != media_set.uuid() {
085ae873
TL
1888 task_log!(
1889 worker,
1890 "skipping unrelated catalog at pos {}",
1891 current_file_number
1892 );
90461b76 1893 reader.skip_data()?; // read all data
074503f2
DM
1894 continue;
1895 }
1896
1897 let catalog_uuid = &archive_header.uuid;
1898
e1db0670
FG
1899 let wanted = media_set.media_list().iter().any(|e| match e {
1900 None => false,
1901 Some(uuid) => uuid == catalog_uuid,
1902 });
074503f2
DM
1903
1904 if !wanted {
085ae873
TL
1905 task_log!(
1906 worker,
1907 "skip catalog because media '{}' not inventarized",
1908 catalog_uuid
1909 );
90461b76 1910 reader.skip_data()?; // read all data
074503f2
DM
1911 continue;
1912 }
1913
1914 if catalog_uuid == uuid {
1915 // always restore and overwrite catalog
1916 } else {
1917 // only restore if catalog does not exist
3921deb2 1918 if MediaCatalog::exists(TAPE_STATUS_DIR, catalog_uuid) {
085ae873
TL
1919 task_log!(
1920 worker,
1921 "catalog for media '{}' already exists",
1922 catalog_uuid
1923 );
90461b76 1924 reader.skip_data()?; // read all data
074503f2
DM
1925 continue;
1926 }
1927 }
1928
085ae873 1929 let mut file =
3921deb2 1930 MediaCatalog::create_temporary_database_file(TAPE_STATUS_DIR, catalog_uuid)?;
074503f2
DM
1931
1932 std::io::copy(&mut reader, &mut file)?;
1933
1934 file.seek(SeekFrom::Start(0))?;
1935
1936 match MediaCatalog::parse_catalog_header(&mut file)? {
1937 (true, Some(media_uuid), Some(media_set_uuid)) => {
1938 if &media_uuid != catalog_uuid {
085ae873
TL
1939 task_log!(
1940 worker,
d20137e5 1941 "catalog uuid mismatch at pos {}",
085ae873
TL
1942 current_file_number
1943 );
074503f2
DM
1944 continue;
1945 }
1946 if media_set_uuid != archive_header.media_set_uuid {
085ae873
TL
1947 task_log!(
1948 worker,
d20137e5 1949 "catalog media_set mismatch at pos {}",
085ae873
TL
1950 current_file_number
1951 );
074503f2
DM
1952 continue;
1953 }
1954
3921deb2
DC
1955 MediaCatalog::finish_temporary_database(
1956 TAPE_STATUS_DIR,
1957 &media_uuid,
1958 true,
1959 )?;
074503f2
DM
1960
1961 if catalog_uuid == uuid {
1962 task_log!(worker, "successfully restored catalog");
1963 found_catalog = true
1964 } else {
085ae873
TL
1965 task_log!(
1966 worker,
1967 "successfully restored related catalog {}",
1968 media_uuid
1969 );
074503f2
DM
1970 }
1971 }
1972 _ => {
1973 task_warn!(worker, "got incomplete catalog header - skip file");
1974 continue;
1975 }
1976 }
1977
1978 continue;
1979 }
1980 }
1981
1982 if moved_to_eom {
1983 break; // already done - stop
1984 }
1985 moved_to_eom = true;
1986
1987 task_log!(worker, "searching for catalog at EOT (moving to EOT)");
1988 drive.move_to_last_file()?;
1989
1990 let new_file_number = drive.current_file_number()?;
1991
1992 if new_file_number < (current_file_number + 1) {
1993 break; // no new content - stop
1994 }
1995 }
1996
1997 Ok(found_catalog)
1998}