]>
Commit | Line | Data |
---|---|---|
085ae873 | 1 | use std::collections::{BTreeMap, HashMap, HashSet}; |
b017bbc4 | 2 | use std::convert::TryFrom; |
085ae873 | 3 | use std::ffi::OsStr; |
074503f2 | 4 | use std::io::{Seek, SeekFrom}; |
085ae873 | 5 | use std::path::{Path, PathBuf}; |
4c4e5c2b | 6 | use std::sync::Arc; |
b017bbc4 DM |
7 | |
8 | use anyhow::{bail, format_err, Error}; | |
b9b4b312 | 9 | use serde_json::Value; |
b017bbc4 | 10 | |
6ef1b649 WB |
11 | use proxmox_io::ReadExt; |
12 | use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; | |
9fa3026a | 13 | use proxmox_schema::api; |
6ef1b649 | 14 | use proxmox_section_config::SectionConfigData; |
085ae873 | 15 | use proxmox_sys::fs::{replace_file, CreateOptions}; |
25877d05 | 16 | use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; |
085ae873 | 17 | use proxmox_uuid::Uuid; |
b017bbc4 | 18 | |
8cc3760e | 19 | use pbs_api_types::{ |
133d718f WB |
20 | Authid, BackupNamespace, CryptMode, Operation, Userid, DATASTORE_MAP_ARRAY_SCHEMA, |
21 | DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, | |
22 | PRIV_TAPE_READ, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, | |
8cc3760e | 23 | }; |
085ae873 | 24 | use pbs_config::CachedUserInfo; |
b2065dc7 WB |
25 | use pbs_datastore::dynamic_index::DynamicIndexReader; |
26 | use pbs_datastore::fixed_index::FixedIndexReader; | |
27 | use pbs_datastore::index::IndexFile; | |
28 | use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; | |
085ae873 | 29 | use pbs_datastore::{DataBlob, DataStore}; |
048b43af | 30 | use pbs_tape::{ |
085ae873 | 31 | BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, |
048b43af | 32 | }; |
b9700a9f | 33 | use proxmox_rest_server::WorkerTask; |
c23192d3 | 34 | |
b017bbc4 | 35 | use crate::{ |
b9700a9f | 36 | server::lookup_user_email, |
b017bbc4 | 37 | tape::{ |
085ae873 | 38 | drive::{lock_tape_device, request_and_load_media, set_tape_device_state, TapeDriver}, |
b017bbc4 | 39 | file_formats::{ |
085ae873 TL |
40 | CatalogArchiveHeader, ChunkArchiveDecoder, ChunkArchiveHeader, SnapshotArchiveHeader, |
41 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, | |
42 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, | |
43 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, | |
54722aca | 44 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, |
25aa55b5 | 45 | }, |
085ae873 TL |
46 | lock_media_set, Inventory, MediaCatalog, MediaId, MediaSet, MediaSetCatalog, |
47 | TAPE_STATUS_DIR, | |
b017bbc4 | 48 | }, |
085ae873 | 49 | tools::parallel_handler::ParallelHandler, |
b017bbc4 DM |
50 | }; |
51 | ||
ff997803 DC |
52 | const RESTORE_TMP_DIR: &str = "/var/tmp/proxmox-backup"; |
53 | ||
4c4e5c2b DC |
54 | pub struct DataStoreMap { |
55 | map: HashMap<String, Arc<DataStore>>, | |
56 | default: Option<Arc<DataStore>>, | |
57 | } | |
58 | ||
59 | impl TryFrom<String> for DataStoreMap { | |
60 | type Error = Error; | |
61 | ||
62 | fn try_from(value: String) -> Result<Self, Error> { | |
9fa3026a | 63 | let value = DATASTORE_MAP_ARRAY_SCHEMA.parse_property_string(&value)?; |
4c4e5c2b DC |
64 | let mut mapping: Vec<String> = value |
65 | .as_array() | |
66 | .unwrap() | |
67 | .iter() | |
68 | .map(|v| v.as_str().unwrap().to_string()) | |
69 | .collect(); | |
70 | ||
71 | let mut map = HashMap::new(); | |
72 | let mut default = None; | |
73 | while let Some(mut store) = mapping.pop() { | |
74 | if let Some(index) = store.find('=') { | |
75 | let mut target = store.split_off(index); | |
76 | target.remove(0); // remove '=' | |
e9d2fc93 | 77 | let datastore = DataStore::lookup_datastore(&target, Some(Operation::Write))?; |
4c4e5c2b DC |
78 | map.insert(store, datastore); |
79 | } else if default.is_none() { | |
e9d2fc93 | 80 | default = Some(DataStore::lookup_datastore(&store, Some(Operation::Write))?); |
4c4e5c2b DC |
81 | } else { |
82 | bail!("multiple default stores given"); | |
83 | } | |
84 | } | |
85 | ||
86 | Ok(Self { map, default }) | |
87 | } | |
88 | } | |
89 | ||
90 | impl DataStoreMap { | |
c94d2867 DC |
91 | fn used_datastores<'a>(&self) -> HashMap<&str, Arc<DataStore>> { |
92 | let mut map = HashMap::new(); | |
93 | for (source, target) in self.map.iter() { | |
94 | map.insert(source.as_str(), Arc::clone(target)); | |
4c4e5c2b DC |
95 | } |
96 | ||
97 | if let Some(ref store) = self.default { | |
c94d2867 | 98 | map.insert("", Arc::clone(store)); |
4c4e5c2b DC |
99 | } |
100 | ||
c94d2867 | 101 | map |
4c4e5c2b DC |
102 | } |
103 | ||
87bf9f56 | 104 | fn get_datastore(&self, source: &str) -> Option<Arc<DataStore>> { |
4c4e5c2b | 105 | if let Some(store) = self.map.get(source) { |
87bf9f56 | 106 | return Some(Arc::clone(store)); |
4c4e5c2b DC |
107 | } |
108 | if let Some(ref store) = self.default { | |
87bf9f56 | 109 | return Some(Arc::clone(store)); |
4c4e5c2b DC |
110 | } |
111 | ||
112 | return None; | |
113 | } | |
114 | } | |
115 | ||
c4a04b7c DC |
116 | fn check_datastore_privs( |
117 | user_info: &CachedUserInfo, | |
118 | store: &str, | |
119 | auth_id: &Authid, | |
120 | owner: &Option<Authid>, | |
121 | ) -> Result<(), Error> { | |
9a37bd6c | 122 | let privs = user_info.lookup_privs(auth_id, &["datastore", store]); |
c4a04b7c DC |
123 | if (privs & PRIV_DATASTORE_BACKUP) == 0 { |
124 | bail!("no permissions on /datastore/{}", store); | |
125 | } | |
126 | ||
127 | if let Some(ref owner) = owner { | |
128 | let correct_owner = owner == auth_id | |
129 | || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user()); | |
130 | ||
131 | // same permission as changing ownership after syncing | |
132 | if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 { | |
133 | bail!("no permission to restore as '{}'", owner); | |
134 | } | |
135 | } | |
136 | ||
137 | Ok(()) | |
138 | } | |
139 | ||
4c4e5c2b | 140 | pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE); |
b9b4b312 | 141 | |
b9b4b312 DM |
142 | #[api( |
143 | input: { | |
144 | properties: { | |
145 | store: { | |
4c4e5c2b | 146 | schema: DATASTORE_MAP_LIST_SCHEMA, |
b9b4b312 | 147 | }, |
9883b54c DM |
148 | drive: { |
149 | schema: DRIVE_NAME_SCHEMA, | |
150 | }, | |
b9b4b312 DM |
151 | "media-set": { |
152 | description: "Media set UUID.", | |
153 | type: String, | |
154 | }, | |
c9793d47 DC |
155 | "notify-user": { |
156 | type: Userid, | |
157 | optional: true, | |
158 | }, | |
ff997803 DC |
159 | "snapshots": { |
160 | description: "List of snapshots.", | |
161 | type: Array, | |
162 | optional: true, | |
163 | items: { | |
164 | schema: TAPE_RESTORE_SNAPSHOT_SCHEMA, | |
165 | }, | |
166 | }, | |
e3613503 DC |
167 | owner: { |
168 | type: Authid, | |
169 | optional: true, | |
170 | }, | |
b9b4b312 DM |
171 | }, |
172 | }, | |
173 | returns: { | |
174 | schema: UPID_SCHEMA, | |
175 | }, | |
b4975d31 DM |
176 | access: { |
177 | // Note: parameters are no uri parameter, so we need to test inside function body | |
178 | description: "The user needs Tape.Read privilege on /tape/pool/{pool} \ | |
179 | and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.", | |
180 | permission: &Permission::Anybody, | |
181 | }, | |
b9b4b312 DM |
182 | )] |
183 | /// Restore data from media-set | |
184 | pub fn restore( | |
185 | store: String, | |
9883b54c | 186 | drive: String, |
b9b4b312 | 187 | media_set: String, |
c9793d47 | 188 | notify_user: Option<Userid>, |
ff997803 | 189 | snapshots: Option<Vec<String>>, |
e3613503 | 190 | owner: Option<Authid>, |
b9b4b312 DM |
191 | rpcenv: &mut dyn RpcEnvironment, |
192 | ) -> Result<Value, Error> { | |
b9b4b312 | 193 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
b4975d31 DM |
194 | let user_info = CachedUserInfo::new()?; |
195 | ||
4c4e5c2b DC |
196 | let store_map = DataStoreMap::try_from(store) |
197 | .map_err(|err| format_err!("cannot parse store mapping: {}", err))?; | |
198 | let used_datastores = store_map.used_datastores(); | |
3afecb84 | 199 | if used_datastores.is_empty() { |
4c4e5c2b | 200 | bail!("no datastores given"); |
b4975d31 DM |
201 | } |
202 | ||
c94d2867 DC |
203 | for (_, target) in used_datastores.iter() { |
204 | check_datastore_privs(&user_info, target.name(), &auth_id, &owner)?; | |
e3613503 DC |
205 | } |
206 | ||
b4975d31 DM |
207 | let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]); |
208 | if (privs & PRIV_TAPE_READ) == 0 { | |
209 | bail!("no permissions on /tape/drive/{}", drive); | |
210 | } | |
b9b4b312 | 211 | |
30316192 DM |
212 | let media_set_uuid = media_set.parse()?; |
213 | ||
b9b4b312 | 214 | let status_path = Path::new(TAPE_STATUS_DIR); |
b9b4b312 | 215 | |
30316192 DM |
216 | let _lock = lock_media_set(status_path, &media_set_uuid, None)?; |
217 | ||
218 | let inventory = Inventory::load(status_path)?; | |
b9b4b312 DM |
219 | |
220 | let pool = inventory.lookup_media_set_pool(&media_set_uuid)?; | |
221 | ||
b4975d31 DM |
222 | let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]); |
223 | if (privs & PRIV_TAPE_READ) == 0 { | |
224 | bail!("no permissions on /tape/pool/{}", pool); | |
225 | } | |
226 | ||
1ce8e905 | 227 | let (drive_config, _digest) = pbs_config::drive::config()?; |
25aa55b5 DM |
228 | |
229 | // early check/lock before starting worker | |
230 | let drive_lock = lock_tape_device(&drive_config, &drive)?; | |
b9b4b312 | 231 | |
39735609 | 232 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
b9b4b312 | 233 | |
4c4e5c2b DC |
234 | let taskid = used_datastores |
235 | .iter() | |
c94d2867 | 236 | .map(|(_, t)| t.name().to_string()) |
4c4e5c2b DC |
237 | .collect::<Vec<String>>() |
238 | .join(", "); | |
ff997803 | 239 | |
b9b4b312 DM |
240 | let upid_str = WorkerTask::new_thread( |
241 | "tape-restore", | |
4c4e5c2b | 242 | Some(taskid), |
049a22a3 | 243 | auth_id.to_string(), |
b9b4b312 DM |
244 | to_stdout, |
245 | move |worker| { | |
25aa55b5 | 246 | let _drive_lock = drive_lock; // keep lock guard |
b9b4b312 | 247 | |
926d05ef DC |
248 | set_tape_device_state(&drive, &worker.upid().to_string())?; |
249 | ||
fa950702 | 250 | let restore_owner = owner.as_ref().unwrap_or(&auth_id); |
b9b4b312 | 251 | |
fa950702 DC |
252 | let email = notify_user |
253 | .as_ref() | |
254 | .and_then(|userid| lookup_user_email(userid)) | |
255 | .or_else(|| lookup_user_email(&auth_id.clone().into())); | |
b9b4b312 | 256 | |
ff997803 | 257 | task_log!(worker, "Mediaset '{}'", media_set); |
8de9a991 | 258 | task_log!(worker, "Pool: {}", pool); |
ff997803 DC |
259 | |
260 | let res = if let Some(snapshots) = snapshots { | |
261 | restore_list_worker( | |
262 | worker.clone(), | |
263 | snapshots, | |
264 | inventory, | |
265 | media_set_uuid, | |
266 | drive_config, | |
267 | &drive, | |
268 | store_map, | |
269 | restore_owner, | |
270 | email, | |
271 | ) | |
272 | } else { | |
273 | restore_full_worker( | |
274 | worker.clone(), | |
275 | inventory, | |
276 | media_set_uuid, | |
277 | drive_config, | |
278 | &drive, | |
279 | store_map, | |
280 | restore_owner, | |
281 | email, | |
282 | ) | |
283 | }; | |
284 | ||
285 | if res.is_ok() { | |
286 | task_log!(worker, "Restore mediaset '{}' done", media_set); | |
287 | } | |
926d05ef DC |
288 | |
289 | if let Err(err) = set_tape_device_state(&drive, "") { | |
085ae873 | 290 | task_log!(worker, "could not unset drive state for {}: {}", drive, err); |
926d05ef DC |
291 | } |
292 | ||
fa950702 | 293 | res |
085ae873 | 294 | }, |
b9b4b312 DM |
295 | )?; |
296 | ||
297 | Ok(upid_str.into()) | |
298 | } | |
299 | ||
ff997803 | 300 | fn restore_full_worker( |
fa950702 DC |
301 | worker: Arc<WorkerTask>, |
302 | inventory: Inventory, | |
303 | media_set_uuid: Uuid, | |
304 | drive_config: SectionConfigData, | |
305 | drive_name: &str, | |
306 | store_map: DataStoreMap, | |
307 | restore_owner: &Authid, | |
308 | email: Option<String>, | |
309 | ) -> Result<(), Error> { | |
310 | let members = inventory.compute_media_set_members(&media_set_uuid)?; | |
311 | ||
312 | let media_list = members.media_list(); | |
313 | ||
314 | let mut media_id_list = Vec::new(); | |
315 | ||
316 | let mut encryption_key_fingerprint = None; | |
317 | ||
318 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { | |
319 | match media_uuid { | |
320 | None => { | |
085ae873 TL |
321 | bail!( |
322 | "media set {} is incomplete (missing member {}).", | |
323 | media_set_uuid, | |
324 | seq_nr | |
325 | ); | |
fa950702 DC |
326 | } |
327 | Some(media_uuid) => { | |
328 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
085ae873 TL |
329 | if let Some(ref set) = media_id.media_set_label { |
330 | // always true here | |
331 | if encryption_key_fingerprint.is_none() | |
332 | && set.encryption_key_fingerprint.is_some() | |
333 | { | |
fa950702 DC |
334 | encryption_key_fingerprint = set.encryption_key_fingerprint.clone(); |
335 | } | |
336 | } | |
337 | media_id_list.push(media_id); | |
338 | } | |
339 | } | |
340 | } | |
341 | ||
342 | if let Some(fingerprint) = encryption_key_fingerprint { | |
343 | task_log!(worker, "Encryption key fingerprint: {}", fingerprint); | |
344 | } | |
345 | ||
346 | task_log!( | |
347 | worker, | |
348 | "Datastore(s): {}", | |
349 | store_map | |
085ae873 TL |
350 | .used_datastores() |
351 | .into_iter() | |
c94d2867 | 352 | .map(|(_, t)| String::from(t.name())) |
085ae873 TL |
353 | .collect::<Vec<String>>() |
354 | .join(", "), | |
fa950702 DC |
355 | ); |
356 | ||
357 | task_log!(worker, "Drive: {}", drive_name); | |
358 | task_log!( | |
359 | worker, | |
360 | "Required media list: {}", | |
085ae873 TL |
361 | media_id_list |
362 | .iter() | |
363 | .map(|media_id| media_id.label.label_text.as_str()) | |
364 | .collect::<Vec<&str>>() | |
365 | .join(";") | |
fa950702 DC |
366 | ); |
367 | ||
368 | let mut datastore_locks = Vec::new(); | |
c94d2867 | 369 | for (_, target) in store_map.used_datastores() { |
fa950702 | 370 | // explicit create shared lock to prevent GC on newly created chunks |
c94d2867 DC |
371 | let shared_store_lock = target.try_shared_chunk_store_lock()?; |
372 | datastore_locks.push(shared_store_lock); | |
fa950702 DC |
373 | } |
374 | ||
375 | let mut checked_chunks_map = HashMap::new(); | |
376 | ||
377 | for media_id in media_id_list.iter() { | |
378 | request_and_restore_media( | |
379 | worker.clone(), | |
380 | media_id, | |
381 | &drive_config, | |
382 | drive_name, | |
383 | &store_map, | |
384 | &mut checked_chunks_map, | |
385 | restore_owner, | |
386 | &email, | |
387 | )?; | |
388 | } | |
389 | ||
390 | Ok(()) | |
391 | } | |
392 | ||
ff997803 DC |
393 | fn restore_list_worker( |
394 | worker: Arc<WorkerTask>, | |
395 | snapshots: Vec<String>, | |
396 | inventory: Inventory, | |
397 | media_set_uuid: Uuid, | |
398 | drive_config: SectionConfigData, | |
399 | drive_name: &str, | |
400 | store_map: DataStoreMap, | |
401 | restore_owner: &Authid, | |
402 | email: Option<String>, | |
403 | ) -> Result<(), Error> { | |
133d718f WB |
404 | // FIXME: Namespace needs to come from somewhere, `snapshots` is just a snapshot string list |
405 | // here. | |
406 | let ns = BackupNamespace::root(); | |
407 | ||
ff997803 DC |
408 | let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into(); |
409 | std::fs::create_dir_all(&base_path)?; | |
410 | ||
411 | let catalog = get_media_set_catalog(&inventory, &media_set_uuid)?; | |
412 | ||
413 | let mut datastore_locks = Vec::new(); | |
414 | let mut snapshot_file_hash: BTreeMap<Uuid, Vec<u64>> = BTreeMap::new(); | |
415 | let mut snapshot_locks = HashMap::new(); | |
416 | ||
6ef1b649 | 417 | let res = proxmox_lang::try_block!({ |
ff997803 DC |
418 | // assemble snapshot files/locks |
419 | for store_snapshot in snapshots.iter() { | |
420 | let mut split = store_snapshot.splitn(2, ':'); | |
421 | let source_datastore = split | |
422 | .next() | |
423 | .ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?; | |
424 | let snapshot = split | |
425 | .next() | |
426 | .ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?; | |
db87d93e | 427 | let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?; |
ff997803 DC |
428 | |
429 | let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| { | |
430 | format_err!( | |
431 | "could not find mapping for source datastore: {}", | |
432 | source_datastore | |
433 | ) | |
434 | })?; | |
435 | ||
436 | let (owner, _group_lock) = | |
133d718f | 437 | datastore.create_locked_backup_group(&ns, backup_dir.as_ref(), restore_owner)?; |
ff997803 DC |
438 | if restore_owner != &owner { |
439 | // only the owner is allowed to create additional snapshots | |
8915c1e7 DC |
440 | task_warn!( |
441 | worker, | |
ff997803 DC |
442 | "restore '{}' failed - owner check failed ({} != {})", |
443 | snapshot, | |
444 | restore_owner, | |
445 | owner | |
446 | ); | |
8915c1e7 | 447 | continue; |
ff997803 DC |
448 | } |
449 | ||
450 | let (media_id, file_num) = if let Some((media_uuid, file_num)) = | |
9a37bd6c | 451 | catalog.lookup_snapshot(source_datastore, snapshot) |
ff997803 DC |
452 | { |
453 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
454 | (media_id, file_num) | |
455 | } else { | |
456 | task_warn!( | |
457 | worker, | |
458 | "did not find snapshot '{}' in media set {}", | |
459 | snapshot, | |
460 | media_set_uuid | |
461 | ); | |
462 | continue; | |
463 | }; | |
464 | ||
133d718f WB |
465 | let (_rel_path, is_new, snap_lock) = |
466 | datastore.create_locked_backup_dir(&ns, &backup_dir)?; | |
ff997803 DC |
467 | |
468 | if !is_new { | |
469 | task_log!( | |
470 | worker, | |
471 | "found snapshot {} on target datastore, skipping...", | |
472 | snapshot | |
473 | ); | |
474 | continue; | |
475 | } | |
476 | ||
477 | snapshot_locks.insert(store_snapshot.to_string(), snap_lock); | |
478 | ||
479 | let shared_store_lock = datastore.try_shared_chunk_store_lock()?; | |
480 | datastore_locks.push(shared_store_lock); | |
481 | ||
482 | let file_list = snapshot_file_hash | |
483 | .entry(media_id.label.uuid.clone()) | |
484 | .or_insert_with(Vec::new); | |
485 | file_list.push(file_num); | |
486 | ||
487 | task_log!( | |
488 | worker, | |
489 | "found snapshot {} on {}: file {}", | |
490 | snapshot, | |
491 | media_id.label.label_text, | |
492 | file_num | |
493 | ); | |
494 | } | |
495 | ||
496 | if snapshot_file_hash.is_empty() { | |
497 | task_log!(worker, "nothing to restore, skipping remaining phases..."); | |
498 | return Ok(()); | |
499 | } | |
500 | ||
501 | task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir"); | |
502 | let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new(); | |
503 | for (media_uuid, file_list) in snapshot_file_hash.iter_mut() { | |
504 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
505 | let (drive, info) = request_and_load_media( | |
506 | &worker, | |
507 | &drive_config, | |
9a37bd6c | 508 | drive_name, |
ff997803 DC |
509 | &media_id.label, |
510 | &email, | |
511 | )?; | |
512 | file_list.sort_unstable(); | |
513 | restore_snapshots_to_tmpdir( | |
514 | worker.clone(), | |
515 | &base_path, | |
516 | file_list, | |
517 | drive, | |
518 | &info, | |
519 | &media_set_uuid, | |
520 | &mut datastore_chunk_map, | |
085ae873 TL |
521 | ) |
522 | .map_err(|err| format_err!("could not restore snapshots to tmpdir: {}", err))?; | |
ff997803 DC |
523 | } |
524 | ||
525 | // sorted media_uuid => (sorted file_num => (set of digests))) | |
085ae873 TL |
526 | let mut media_file_chunk_map: BTreeMap<Uuid, BTreeMap<u64, HashSet<[u8; 32]>>> = |
527 | BTreeMap::new(); | |
ff997803 DC |
528 | |
529 | for (source_datastore, chunks) in datastore_chunk_map.into_iter() { | |
530 | let datastore = store_map.get_datastore(&source_datastore).ok_or_else(|| { | |
531 | format_err!( | |
532 | "could not find mapping for source datastore: {}", | |
533 | source_datastore | |
534 | ) | |
535 | })?; | |
536 | for digest in chunks.into_iter() { | |
537 | // we only want to restore chunks that we do not have yet | |
538 | if !datastore.cond_touch_chunk(&digest, false)? { | |
539 | if let Some((uuid, nr)) = catalog.lookup_chunk(&source_datastore, &digest) { | |
085ae873 TL |
540 | let file = media_file_chunk_map |
541 | .entry(uuid.clone()) | |
542 | .or_insert_with(BTreeMap::new); | |
ff997803 DC |
543 | let chunks = file.entry(nr).or_insert_with(HashSet::new); |
544 | chunks.insert(digest); | |
545 | } | |
546 | } | |
547 | } | |
548 | } | |
549 | ||
550 | // we do not need it anymore, saves memory | |
551 | drop(catalog); | |
552 | ||
553 | if !media_file_chunk_map.is_empty() { | |
554 | task_log!(worker, "Phase 2: restore chunks to datastores"); | |
555 | } else { | |
556 | task_log!(worker, "all chunks exist already, skipping phase 2..."); | |
557 | } | |
558 | ||
559 | for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() { | |
560 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
561 | let (mut drive, _info) = request_and_load_media( | |
562 | &worker, | |
563 | &drive_config, | |
9a37bd6c | 564 | drive_name, |
ff997803 DC |
565 | &media_id.label, |
566 | &email, | |
567 | )?; | |
568 | restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?; | |
569 | } | |
570 | ||
571 | task_log!( | |
572 | worker, | |
573 | "Phase 3: copy snapshots from temp dir to datastores" | |
574 | ); | |
575 | for (store_snapshot, _lock) in snapshot_locks.into_iter() { | |
6ef1b649 | 576 | proxmox_lang::try_block!({ |
ff997803 DC |
577 | let mut split = store_snapshot.splitn(2, ':'); |
578 | let source_datastore = split | |
579 | .next() | |
580 | .ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?; | |
581 | let snapshot = split | |
582 | .next() | |
583 | .ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?; | |
db87d93e | 584 | let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?; |
ff997803 | 585 | |
085ae873 TL |
586 | let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| { |
587 | format_err!("unexpected source datastore: {}", source_datastore) | |
588 | })?; | |
ff997803 DC |
589 | |
590 | let mut tmp_path = base_path.clone(); | |
591 | tmp_path.push(&source_datastore); | |
592 | tmp_path.push(snapshot); | |
593 | ||
133d718f | 594 | let path = datastore.snapshot_path(&ns, &backup_dir); |
ff997803 DC |
595 | |
596 | for entry in std::fs::read_dir(tmp_path)? { | |
597 | let entry = entry?; | |
598 | let mut new_path = path.clone(); | |
599 | new_path.push(entry.file_name()); | |
600 | std::fs::copy(entry.path(), new_path)?; | |
601 | } | |
602 | task_log!(worker, "Restore snapshot '{}' done", snapshot); | |
603 | Ok(()) | |
085ae873 TL |
604 | }) |
605 | .map_err(|err: Error| format_err!("could not copy {}: {}", store_snapshot, err))?; | |
ff997803 DC |
606 | } |
607 | Ok(()) | |
608 | }); | |
609 | ||
2072dede | 610 | if res.is_err() { |
085ae873 TL |
611 | task_warn!( |
612 | worker, | |
613 | "Error during restore, partially restored snapshots will NOT be cleaned up" | |
614 | ); | |
2072dede DC |
615 | } |
616 | ||
ff997803 DC |
617 | match std::fs::remove_dir_all(&base_path) { |
618 | Ok(()) => {} | |
619 | Err(err) => task_warn!(worker, "error cleaning up: {}", err), | |
620 | } | |
621 | ||
622 | res | |
623 | } | |
624 | ||
625 | fn get_media_set_catalog( | |
626 | inventory: &Inventory, | |
627 | media_set_uuid: &Uuid, | |
628 | ) -> Result<MediaSetCatalog, Error> { | |
629 | let status_path = Path::new(TAPE_STATUS_DIR); | |
630 | ||
631 | let members = inventory.compute_media_set_members(media_set_uuid)?; | |
632 | let media_list = members.media_list(); | |
633 | let mut catalog = MediaSetCatalog::new(); | |
634 | ||
635 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { | |
636 | match media_uuid { | |
637 | None => { | |
638 | bail!( | |
639 | "media set {} is incomplete (missing member {}).", | |
640 | media_set_uuid, | |
641 | seq_nr | |
642 | ); | |
643 | } | |
644 | Some(media_uuid) => { | |
645 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
9a37bd6c | 646 | let media_catalog = MediaCatalog::open(status_path, media_id, false, false)?; |
ff997803 DC |
647 | catalog.append_catalog(media_catalog)?; |
648 | } | |
649 | } | |
650 | } | |
651 | ||
652 | Ok(catalog) | |
653 | } | |
654 | ||
655 | fn restore_snapshots_to_tmpdir( | |
656 | worker: Arc<WorkerTask>, | |
657 | path: &PathBuf, | |
658 | file_list: &[u64], | |
659 | mut drive: Box<dyn TapeDriver>, | |
660 | media_id: &MediaId, | |
661 | media_set_uuid: &Uuid, | |
662 | chunks_list: &mut HashMap<String, HashSet<[u8; 32]>>, | |
663 | ) -> Result<(), Error> { | |
664 | match media_id.media_set_label { | |
665 | None => { | |
666 | bail!( | |
667 | "missing media set label on media {} ({})", | |
668 | media_id.label.label_text, | |
669 | media_id.label.uuid | |
670 | ); | |
671 | } | |
672 | Some(ref set) => { | |
673 | if set.uuid != *media_set_uuid { | |
674 | bail!( | |
675 | "wrong media set label on media {} ({} != {})", | |
676 | media_id.label.label_text, | |
677 | media_id.label.uuid, | |
678 | media_set_uuid | |
679 | ); | |
680 | } | |
681 | let encrypt_fingerprint = set.encryption_key_fingerprint.clone().map(|fp| { | |
682 | task_log!(worker, "Encryption key fingerprint: {}", fp); | |
683 | (fp, set.uuid.clone()) | |
684 | }); | |
685 | ||
686 | drive.set_encryption(encrypt_fingerprint)?; | |
687 | } | |
688 | } | |
689 | ||
690 | for file_num in file_list { | |
691 | let current_file_number = drive.current_file_number()?; | |
692 | if current_file_number != *file_num { | |
085ae873 TL |
693 | task_log!( |
694 | worker, | |
695 | "was at file {}, moving to {}", | |
696 | current_file_number, | |
697 | file_num | |
698 | ); | |
ff997803 DC |
699 | drive.move_to_file(*file_num)?; |
700 | let current_file_number = drive.current_file_number()?; | |
701 | task_log!(worker, "now at file {}", current_file_number); | |
702 | } | |
703 | let mut reader = drive.read_next_file()?; | |
704 | ||
705 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
706 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
707 | bail!("missing MediaContentHeader"); | |
708 | } | |
709 | ||
710 | match header.content_magic { | |
711 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => { | |
712 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
713 | ||
714 | let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data) | |
715 | .map_err(|err| { | |
716 | format_err!("unable to parse snapshot archive header - {}", err) | |
717 | })?; | |
718 | ||
719 | let source_datastore = archive_header.store; | |
720 | let snapshot = archive_header.snapshot; | |
721 | ||
722 | task_log!( | |
723 | worker, | |
724 | "File {}: snapshot archive {}:{}", | |
725 | file_num, | |
726 | source_datastore, | |
727 | snapshot | |
728 | ); | |
729 | ||
730 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
731 | ||
732 | let mut tmp_path = path.clone(); | |
733 | tmp_path.push(&source_datastore); | |
734 | tmp_path.push(snapshot); | |
735 | std::fs::create_dir_all(&tmp_path)?; | |
736 | ||
737 | let chunks = chunks_list | |
738 | .entry(source_datastore) | |
739 | .or_insert_with(HashSet::new); | |
085ae873 TL |
740 | let manifest = |
741 | try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?; | |
ff997803 DC |
742 | for item in manifest.files() { |
743 | let mut archive_path = tmp_path.to_owned(); | |
744 | archive_path.push(&item.filename); | |
745 | ||
746 | let index: Box<dyn IndexFile> = match archive_type(&item.filename)? { | |
747 | ArchiveType::DynamicIndex => { | |
748 | Box::new(DynamicIndexReader::open(&archive_path)?) | |
749 | } | |
085ae873 | 750 | ArchiveType::FixedIndex => Box::new(FixedIndexReader::open(&archive_path)?), |
ff997803 DC |
751 | ArchiveType::Blob => continue, |
752 | }; | |
753 | for i in 0..index.index_count() { | |
754 | if let Some(digest) = index.index_digest(i) { | |
755 | chunks.insert(*digest); | |
756 | } | |
757 | } | |
758 | } | |
759 | } | |
760 | other => bail!("unexpected file type: {:?}", other), | |
761 | } | |
762 | } | |
763 | ||
764 | Ok(()) | |
765 | } | |
766 | ||
767 | fn restore_file_chunk_map( | |
768 | worker: Arc<WorkerTask>, | |
769 | drive: &mut Box<dyn TapeDriver>, | |
770 | store_map: &DataStoreMap, | |
771 | file_chunk_map: &mut BTreeMap<u64, HashSet<[u8; 32]>>, | |
772 | ) -> Result<(), Error> { | |
773 | for (nr, chunk_map) in file_chunk_map.iter_mut() { | |
774 | let current_file_number = drive.current_file_number()?; | |
775 | if current_file_number != *nr { | |
085ae873 TL |
776 | task_log!( |
777 | worker, | |
778 | "was at file {}, moving to {}", | |
779 | current_file_number, | |
780 | nr | |
781 | ); | |
ff997803 DC |
782 | drive.move_to_file(*nr)?; |
783 | let current_file_number = drive.current_file_number()?; | |
784 | task_log!(worker, "now at file {}", current_file_number); | |
785 | } | |
786 | let mut reader = drive.read_next_file()?; | |
787 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
788 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
789 | bail!("missing MediaContentHeader"); | |
790 | } | |
791 | ||
792 | match header.content_magic { | |
793 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => { | |
794 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
795 | ||
796 | let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data) | |
797 | .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?; | |
798 | ||
799 | let source_datastore = archive_header.store; | |
800 | ||
801 | task_log!( | |
802 | worker, | |
803 | "File {}: chunk archive for datastore '{}'", | |
804 | nr, | |
805 | source_datastore | |
806 | ); | |
807 | ||
808 | let datastore = store_map.get_datastore(&source_datastore).ok_or_else(|| { | |
809 | format_err!("unexpected chunk archive for store: {}", source_datastore) | |
810 | })?; | |
811 | ||
085ae873 TL |
812 | let count = restore_partial_chunk_archive( |
813 | worker.clone(), | |
814 | reader, | |
815 | datastore.clone(), | |
816 | chunk_map, | |
817 | )?; | |
ff997803 DC |
818 | task_log!(worker, "restored {} chunks", count); |
819 | } | |
820 | _ => bail!("unexpected content magic {:?}", header.content_magic), | |
821 | } | |
822 | } | |
823 | ||
824 | Ok(()) | |
825 | } | |
826 | ||
827 | fn restore_partial_chunk_archive<'a>( | |
828 | worker: Arc<WorkerTask>, | |
829 | reader: Box<dyn 'a + TapeRead>, | |
830 | datastore: Arc<DataStore>, | |
831 | chunk_list: &mut HashSet<[u8; 32]>, | |
832 | ) -> Result<usize, Error> { | |
833 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
834 | ||
835 | let mut count = 0; | |
836 | ||
837 | let start_time = std::time::SystemTime::now(); | |
838 | let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0)); | |
839 | let bytes2 = bytes.clone(); | |
840 | ||
841 | let writer_pool = ParallelHandler::new( | |
842 | "tape restore chunk writer", | |
843 | 4, | |
844 | move |(chunk, digest): (DataBlob, [u8; 32])| { | |
845 | if !datastore.cond_touch_chunk(&digest, false)? { | |
846 | bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst); | |
847 | chunk.verify_crc()?; | |
848 | if chunk.crypt_mode()? == CryptMode::None { | |
849 | chunk.decode(None, Some(&digest))?; // verify digest | |
850 | } | |
851 | ||
852 | datastore.insert_chunk(&chunk, &digest)?; | |
853 | } | |
854 | Ok(()) | |
855 | }, | |
856 | ); | |
857 | ||
858 | let verify_and_write_channel = writer_pool.channel(); | |
859 | ||
860 | loop { | |
861 | let (digest, blob) = match decoder.next_chunk()? { | |
862 | Some((digest, blob)) => (digest, blob), | |
863 | None => break, | |
864 | }; | |
865 | ||
866 | worker.check_abort()?; | |
867 | ||
868 | if chunk_list.remove(&digest) { | |
869 | verify_and_write_channel.send((blob, digest.clone()))?; | |
870 | count += 1; | |
871 | } | |
872 | ||
873 | if chunk_list.is_empty() { | |
874 | break; | |
875 | } | |
876 | } | |
877 | ||
878 | drop(verify_and_write_channel); | |
879 | ||
880 | writer_pool.complete()?; | |
881 | ||
882 | let elapsed = start_time.elapsed()?.as_secs_f64(); | |
883 | ||
884 | let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst); | |
885 | ||
886 | task_log!( | |
887 | worker, | |
888 | "restored {} bytes ({:.2} MB/s)", | |
889 | bytes, | |
890 | (bytes as f64) / (1_000_000.0 * elapsed) | |
891 | ); | |
892 | ||
893 | Ok(count) | |
894 | } | |
895 | ||
b017bbc4 DM |
896 | /// Request and restore complete media without using existing catalog (create catalog instead) |
897 | pub fn request_and_restore_media( | |
49f9aca6 | 898 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
899 | media_id: &MediaId, |
900 | drive_config: &SectionConfigData, | |
901 | drive_name: &str, | |
4c4e5c2b | 902 | store_map: &DataStoreMap, |
085ae873 | 903 | checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>, |
fa950702 DC |
904 | restore_owner: &Authid, |
905 | email: &Option<String>, | |
b017bbc4 | 906 | ) -> Result<(), Error> { |
b017bbc4 DM |
907 | let media_set_uuid = match media_id.media_set_label { |
908 | None => bail!("restore_media: no media set - internal error"), | |
909 | Some(ref set) => &set.uuid, | |
910 | }; | |
911 | ||
085ae873 TL |
912 | let (mut drive, info) = |
913 | request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?; | |
b017bbc4 DM |
914 | |
915 | match info.media_set_label { | |
916 | None => { | |
085ae873 TL |
917 | bail!( |
918 | "missing media set label on media {} ({})", | |
919 | media_id.label.label_text, | |
920 | media_id.label.uuid | |
921 | ); | |
b017bbc4 DM |
922 | } |
923 | Some(ref set) => { | |
924 | if &set.uuid != media_set_uuid { | |
085ae873 TL |
925 | bail!( |
926 | "wrong media set label on media {} ({} != {})", | |
927 | media_id.label.label_text, | |
928 | media_id.label.uuid, | |
929 | media_set_uuid | |
930 | ); | |
b017bbc4 | 931 | } |
085ae873 TL |
932 | let encrypt_fingerprint = set |
933 | .encryption_key_fingerprint | |
934 | .clone() | |
8e6459a8 DM |
935 | .map(|fp| (fp, set.uuid.clone())); |
936 | ||
937 | drive.set_encryption(encrypt_fingerprint)?; | |
b017bbc4 DM |
938 | } |
939 | } | |
940 | ||
4c4e5c2b DC |
941 | restore_media( |
942 | worker, | |
943 | &mut drive, | |
944 | &info, | |
9a37bd6c | 945 | Some((store_map, restore_owner)), |
28570d19 | 946 | checked_chunks_map, |
4c4e5c2b DC |
947 | false, |
948 | ) | |
b017bbc4 DM |
949 | } |
950 | ||
951 | /// Restore complete media content and catalog | |
952 | /// | |
953 | /// Only create the catalog if target is None. | |
954 | pub fn restore_media( | |
49f9aca6 | 955 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
956 | drive: &mut Box<dyn TapeDriver>, |
957 | media_id: &MediaId, | |
4c4e5c2b | 958 | target: Option<(&DataStoreMap, &Authid)>, |
085ae873 | 959 | checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>, |
b017bbc4 | 960 | verbose: bool, |
085ae873 | 961 | ) -> Result<(), Error> { |
b017bbc4 DM |
962 | let status_path = Path::new(TAPE_STATUS_DIR); |
963 | let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?; | |
964 | ||
965 | loop { | |
966 | let current_file_number = drive.current_file_number()?; | |
318b3106 DM |
967 | let reader = match drive.read_next_file() { |
968 | Err(BlockReadError::EndOfFile) => { | |
085ae873 TL |
969 | task_log!( |
970 | worker, | |
971 | "skip unexpected filemark at pos {}", | |
972 | current_file_number | |
973 | ); | |
318b3106 DM |
974 | continue; |
975 | } | |
976 | Err(BlockReadError::EndOfStream) => { | |
8de9a991 | 977 | task_log!(worker, "detected EOT after {} files", current_file_number); |
b017bbc4 DM |
978 | break; |
979 | } | |
318b3106 DM |
980 | Err(BlockReadError::Error(err)) => { |
981 | return Err(err.into()); | |
982 | } | |
983 | Ok(reader) => reader, | |
b017bbc4 DM |
984 | }; |
985 | ||
085ae873 TL |
986 | restore_archive( |
987 | worker.clone(), | |
988 | reader, | |
989 | current_file_number, | |
990 | target, | |
991 | &mut catalog, | |
992 | checked_chunks_map, | |
993 | verbose, | |
994 | )?; | |
b017bbc4 DM |
995 | } |
996 | ||
76e85650 DC |
997 | catalog.commit()?; |
998 | ||
b017bbc4 DM |
999 | MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?; |
1000 | ||
1001 | Ok(()) | |
1002 | } | |
1003 | ||
1004 | fn restore_archive<'a>( | |
49f9aca6 | 1005 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
1006 | mut reader: Box<dyn 'a + TapeRead>, |
1007 | current_file_number: u64, | |
4c4e5c2b | 1008 | target: Option<(&DataStoreMap, &Authid)>, |
b017bbc4 | 1009 | catalog: &mut MediaCatalog, |
085ae873 | 1010 | checked_chunks_map: &mut HashMap<String, HashSet<[u8; 32]>>, |
b017bbc4 DM |
1011 | verbose: bool, |
1012 | ) -> Result<(), Error> { | |
b017bbc4 DM |
1013 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; |
1014 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
1015 | bail!("missing MediaContentHeader"); | |
1016 | } | |
1017 | ||
1018 | //println!("Found MediaContentHeader: {:?}", header); | |
1019 | ||
1020 | match header.content_magic { | |
1021 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => { | |
1022 | bail!("unexpected content magic (label)"); | |
1023 | } | |
1024 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => { | |
54722aca DM |
1025 | bail!("unexpected snapshot archive version (v1.0)"); |
1026 | } | |
1027 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => { | |
1028 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1029 | ||
1030 | let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data) | |
1031 | .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?; | |
1032 | ||
1033 | let datastore_name = archive_header.store; | |
1034 | let snapshot = archive_header.snapshot; | |
1035 | ||
085ae873 TL |
1036 | task_log!( |
1037 | worker, | |
1038 | "File {}: snapshot archive {}:{}", | |
1039 | current_file_number, | |
1040 | datastore_name, | |
1041 | snapshot | |
1042 | ); | |
b017bbc4 | 1043 | |
133d718f WB |
1044 | // FIXME: Namespace |
1045 | let backup_ns = BackupNamespace::root(); | |
db87d93e | 1046 | let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?; |
b017bbc4 | 1047 | |
4c4e5c2b DC |
1048 | if let Some((store_map, authid)) = target.as_ref() { |
1049 | if let Some(datastore) = store_map.get_datastore(&datastore_name) { | |
133d718f WB |
1050 | let (owner, _group_lock) = datastore.create_locked_backup_group( |
1051 | &backup_ns, | |
1052 | backup_dir.as_ref(), | |
1053 | authid, | |
1054 | )?; | |
4c4e5c2b DC |
1055 | if *authid != &owner { |
1056 | // only the owner is allowed to create additional snapshots | |
1057 | bail!( | |
1058 | "restore '{}' failed - owner check failed ({} != {})", | |
1059 | snapshot, | |
1060 | authid, | |
1061 | owner | |
1062 | ); | |
1063 | } | |
b017bbc4 | 1064 | |
4c4e5c2b | 1065 | let (rel_path, is_new, _snap_lock) = |
133d718f | 1066 | datastore.create_locked_backup_dir(&backup_ns, backup_dir.as_ref())?; |
4c4e5c2b DC |
1067 | let mut path = datastore.base_path(); |
1068 | path.push(rel_path); | |
b017bbc4 | 1069 | |
4c4e5c2b DC |
1070 | if is_new { |
1071 | task_log!(worker, "restore snapshot {}", backup_dir); | |
b017bbc4 | 1072 | |
b7b9a574 | 1073 | match restore_snapshot_archive(worker.clone(), reader, &path) { |
4c4e5c2b DC |
1074 | Err(err) => { |
1075 | std::fs::remove_dir_all(&path)?; | |
1076 | bail!("restore snapshot {} failed - {}", backup_dir, err); | |
1077 | } | |
1078 | Ok(false) => { | |
1079 | std::fs::remove_dir_all(&path)?; | |
1080 | task_log!(worker, "skip incomplete snapshot {}", backup_dir); | |
1081 | } | |
1082 | Ok(true) => { | |
1083 | catalog.register_snapshot( | |
1084 | Uuid::from(header.uuid), | |
1085 | current_file_number, | |
1086 | &datastore_name, | |
1087 | &snapshot, | |
1088 | )?; | |
1089 | catalog.commit_if_large()?; | |
1090 | } | |
b017bbc4 | 1091 | } |
4c4e5c2b | 1092 | return Ok(()); |
b017bbc4 | 1093 | } |
4c4e5c2b DC |
1094 | } else { |
1095 | task_log!(worker, "skipping..."); | |
b017bbc4 DM |
1096 | } |
1097 | } | |
1098 | ||
90461b76 | 1099 | reader.skip_data()?; // read all data |
b017bbc4 | 1100 | if let Ok(false) = reader.is_incomplete() { |
085ae873 TL |
1101 | catalog.register_snapshot( |
1102 | Uuid::from(header.uuid), | |
1103 | current_file_number, | |
1104 | &datastore_name, | |
1105 | &snapshot, | |
1106 | )?; | |
b017bbc4 DM |
1107 | catalog.commit_if_large()?; |
1108 | } | |
1109 | } | |
1110 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { | |
54722aca DM |
1111 | bail!("unexpected chunk archive version (v1.0)"); |
1112 | } | |
1113 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => { | |
1114 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1115 | ||
1116 | let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data) | |
1117 | .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?; | |
1118 | ||
1119 | let source_datastore = archive_header.store; | |
b017bbc4 | 1120 | |
085ae873 TL |
1121 | task_log!( |
1122 | worker, | |
1123 | "File {}: chunk archive for datastore '{}'", | |
1124 | current_file_number, | |
1125 | source_datastore | |
1126 | ); | |
4c4e5c2b DC |
1127 | let datastore = target |
1128 | .as_ref() | |
1129 | .and_then(|t| t.0.get_datastore(&source_datastore)); | |
1130 | ||
1131 | if datastore.is_some() || target.is_none() { | |
28570d19 | 1132 | let checked_chunks = checked_chunks_map |
085ae873 TL |
1133 | .entry( |
1134 | datastore | |
1135 | .as_ref() | |
1136 | .map(|d| d.name()) | |
1137 | .unwrap_or("_unused_") | |
1138 | .to_string(), | |
1139 | ) | |
28570d19 DM |
1140 | .or_insert(HashSet::new()); |
1141 | ||
87bf9f56 | 1142 | let chunks = if let Some(datastore) = datastore { |
085ae873 TL |
1143 | restore_chunk_archive( |
1144 | worker.clone(), | |
1145 | reader, | |
1146 | datastore, | |
1147 | checked_chunks, | |
1148 | verbose, | |
1149 | )? | |
87bf9f56 | 1150 | } else { |
49f9aca6 | 1151 | scan_chunk_archive(worker.clone(), reader, verbose)? |
87bf9f56 DM |
1152 | }; |
1153 | ||
1154 | if let Some(chunks) = chunks { | |
a2ef36d4 | 1155 | catalog.register_chunk_archive( |
4c4e5c2b DC |
1156 | Uuid::from(header.uuid), |
1157 | current_file_number, | |
1158 | &source_datastore, | |
a2ef36d4 | 1159 | &chunks[..], |
4c4e5c2b | 1160 | )?; |
4c4e5c2b | 1161 | task_log!(worker, "register {} chunks", chunks.len()); |
4c4e5c2b | 1162 | catalog.commit_if_large()?; |
b017bbc4 | 1163 | } |
4c4e5c2b DC |
1164 | return Ok(()); |
1165 | } else if target.is_some() { | |
1166 | task_log!(worker, "skipping..."); | |
b017bbc4 | 1167 | } |
4c4e5c2b | 1168 | |
90461b76 | 1169 | reader.skip_data()?; // read all data |
b017bbc4 | 1170 | } |
8b1289f3 DM |
1171 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => { |
1172 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1173 | ||
1174 | let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) | |
1175 | .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; | |
1176 | ||
085ae873 TL |
1177 | task_log!( |
1178 | worker, | |
1179 | "File {}: skip catalog '{}'", | |
1180 | current_file_number, | |
1181 | archive_header.uuid | |
1182 | ); | |
8b1289f3 | 1183 | |
90461b76 | 1184 | reader.skip_data()?; // read all data |
8b1289f3 | 1185 | } |
085ae873 | 1186 | _ => bail!("unknown content magic {:?}", header.content_magic), |
b017bbc4 DM |
1187 | } |
1188 | ||
b017bbc4 DM |
1189 | Ok(()) |
1190 | } | |
1191 | ||
87bf9f56 DM |
1192 | // Read chunk archive without restoring data - just record contained chunks |
1193 | fn scan_chunk_archive<'a>( | |
49f9aca6 | 1194 | worker: Arc<WorkerTask>, |
87bf9f56 DM |
1195 | reader: Box<dyn 'a + TapeRead>, |
1196 | verbose: bool, | |
085ae873 | 1197 | ) -> Result<Option<Vec<[u8; 32]>>, Error> { |
87bf9f56 DM |
1198 | let mut chunks = Vec::new(); |
1199 | ||
1200 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
1201 | ||
1202 | loop { | |
1203 | let digest = match decoder.next_chunk() { | |
1204 | Ok(Some((digest, _blob))) => digest, | |
1205 | Ok(None) => break, | |
1206 | Err(err) => { | |
1207 | let reader = decoder.reader(); | |
1208 | ||
1209 | // check if this stream is marked incomplete | |
1210 | if let Ok(true) = reader.is_incomplete() { | |
1211 | return Ok(Some(chunks)); | |
1212 | } | |
1213 | ||
1214 | // check if this is an aborted stream without end marker | |
1215 | if let Ok(false) = reader.has_end_marker() { | |
49f9aca6 | 1216 | task_log!(worker, "missing stream end marker"); |
87bf9f56 DM |
1217 | return Ok(None); |
1218 | } | |
1219 | ||
1220 | // else the archive is corrupt | |
1221 | return Err(err); | |
1222 | } | |
1223 | }; | |
1224 | ||
1225 | worker.check_abort()?; | |
1226 | ||
1227 | if verbose { | |
25877d05 | 1228 | task_log!(worker, "Found chunk: {}", hex::encode(&digest)); |
87bf9f56 DM |
1229 | } |
1230 | ||
1231 | chunks.push(digest); | |
1232 | } | |
1233 | ||
1234 | Ok(Some(chunks)) | |
1235 | } | |
1236 | ||
b017bbc4 | 1237 | fn restore_chunk_archive<'a>( |
49f9aca6 | 1238 | worker: Arc<WorkerTask>, |
b017bbc4 | 1239 | reader: Box<dyn 'a + TapeRead>, |
87bf9f56 | 1240 | datastore: Arc<DataStore>, |
085ae873 | 1241 | checked_chunks: &mut HashSet<[u8; 32]>, |
b017bbc4 | 1242 | verbose: bool, |
085ae873 | 1243 | ) -> Result<Option<Vec<[u8; 32]>>, Error> { |
28570d19 | 1244 | let mut chunks = Vec::new(); |
b017bbc4 DM |
1245 | |
1246 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
1247 | ||
49f9aca6 DC |
1248 | let start_time = std::time::SystemTime::now(); |
1249 | let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0)); | |
1250 | let bytes2 = bytes.clone(); | |
1251 | ||
1252 | let worker2 = worker.clone(); | |
1253 | ||
5400fe17 DM |
1254 | let writer_pool = ParallelHandler::new( |
1255 | "tape restore chunk writer", | |
1256 | 4, | |
1257 | move |(chunk, digest): (DataBlob, [u8; 32])| { | |
aa174e8e | 1258 | let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; |
49f9aca6 DC |
1259 | if !chunk_exists { |
1260 | if verbose { | |
25877d05 | 1261 | task_log!(worker2, "Insert chunk: {}", hex::encode(&digest)); |
49f9aca6 DC |
1262 | } |
1263 | bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst); | |
25877d05 | 1264 | // println!("verify and write {}", hex::encode(&digest)); |
49f9aca6 DC |
1265 | chunk.verify_crc()?; |
1266 | if chunk.crypt_mode()? == CryptMode::None { | |
1267 | chunk.decode(None, Some(&digest))?; // verify digest | |
1268 | } | |
5400fe17 | 1269 | |
aa174e8e | 1270 | datastore.insert_chunk(&chunk, &digest)?; |
49f9aca6 | 1271 | } else if verbose { |
25877d05 | 1272 | task_log!(worker2, "Found existing chunk: {}", hex::encode(&digest)); |
49f9aca6 | 1273 | } |
5400fe17 DM |
1274 | Ok(()) |
1275 | }, | |
1276 | ); | |
1277 | ||
1278 | let verify_and_write_channel = writer_pool.channel(); | |
1279 | ||
1dd1c9eb DC |
1280 | loop { |
1281 | let (digest, blob) = match decoder.next_chunk() { | |
1282 | Ok(Some((digest, blob))) => (digest, blob), | |
1283 | Ok(None) => break, | |
1284 | Err(err) => { | |
1285 | let reader = decoder.reader(); | |
1286 | ||
1287 | // check if this stream is marked incomplete | |
1288 | if let Ok(true) = reader.is_incomplete() { | |
1289 | return Ok(Some(chunks)); | |
1290 | } | |
0d2133db | 1291 | |
1dd1c9eb DC |
1292 | // check if this is an aborted stream without end marker |
1293 | if let Ok(false) = reader.has_end_marker() { | |
49f9aca6 | 1294 | task_log!(worker, "missing stream end marker"); |
1dd1c9eb | 1295 | return Ok(None); |
b017bbc4 | 1296 | } |
1dd1c9eb DC |
1297 | |
1298 | // else the archive is corrupt | |
1299 | return Err(err); | |
b017bbc4 | 1300 | } |
1dd1c9eb | 1301 | }; |
b017bbc4 | 1302 | |
1dd1c9eb | 1303 | worker.check_abort()?; |
b017bbc4 | 1304 | |
49f9aca6 | 1305 | if !checked_chunks.contains(&digest) { |
5400fe17 | 1306 | verify_and_write_channel.send((blob, digest.clone()))?; |
49f9aca6 | 1307 | checked_chunks.insert(digest.clone()); |
b017bbc4 | 1308 | } |
1dd1c9eb | 1309 | chunks.push(digest); |
b017bbc4 | 1310 | } |
1dd1c9eb | 1311 | |
5400fe17 DM |
1312 | drop(verify_and_write_channel); |
1313 | ||
1314 | writer_pool.complete()?; | |
1315 | ||
88aa3076 DM |
1316 | let elapsed = start_time.elapsed()?.as_secs_f64(); |
1317 | ||
49f9aca6 DC |
1318 | let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst); |
1319 | ||
88aa3076 DM |
1320 | task_log!( |
1321 | worker, | |
1322 | "restored {} bytes ({:.2} MB/s)", | |
1323 | bytes, | |
1324 | (bytes as f64) / (1_000_000.0 * elapsed) | |
1325 | ); | |
1326 | ||
1dd1c9eb | 1327 | Ok(Some(chunks)) |
b017bbc4 DM |
1328 | } |
1329 | ||
1330 | fn restore_snapshot_archive<'a>( | |
49f9aca6 | 1331 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
1332 | reader: Box<dyn 'a + TapeRead>, |
1333 | snapshot_path: &Path, | |
1334 | ) -> Result<bool, Error> { | |
b017bbc4 | 1335 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; |
b7b9a574 DC |
1336 | match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) { |
1337 | Ok(_) => Ok(true), | |
b017bbc4 DM |
1338 | Err(err) => { |
1339 | let reader = decoder.input(); | |
1340 | ||
1341 | // check if this stream is marked incomplete | |
1342 | if let Ok(true) = reader.is_incomplete() { | |
1343 | return Ok(false); | |
1344 | } | |
1345 | ||
1346 | // check if this is an aborted stream without end marker | |
1347 | if let Ok(false) = reader.has_end_marker() { | |
1348 | return Ok(false); | |
1349 | } | |
1350 | ||
1351 | // else the archive is corrupt | |
38556bf6 | 1352 | Err(err) |
b017bbc4 DM |
1353 | } |
1354 | } | |
1355 | } | |
1356 | ||
1357 | fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( | |
49f9aca6 | 1358 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
1359 | decoder: &mut pxar::decoder::sync::Decoder<R>, |
1360 | snapshot_path: &Path, | |
82a4bb5e | 1361 | ) -> Result<BackupManifest, Error> { |
b017bbc4 DM |
1362 | let _root = match decoder.next() { |
1363 | None => bail!("missing root entry"), | |
1364 | Some(root) => { | |
1365 | let root = root?; | |
1366 | match root.kind() { | |
1367 | pxar::EntryKind::Directory => { /* Ok */ } | |
1368 | _ => bail!("wrong root entry type"), | |
1369 | } | |
1370 | root | |
1371 | } | |
1372 | }; | |
1373 | ||
1374 | let root_path = Path::new("/"); | |
1375 | let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); | |
1376 | ||
1377 | let mut manifest = None; | |
1378 | ||
1379 | loop { | |
a80d72f9 DM |
1380 | worker.check_abort()?; |
1381 | ||
b017bbc4 DM |
1382 | let entry = match decoder.next() { |
1383 | None => break, | |
1384 | Some(entry) => entry?, | |
1385 | }; | |
1386 | let entry_path = entry.path(); | |
1387 | ||
1388 | match entry.kind() { | |
1389 | pxar::EntryKind::File { .. } => { /* Ok */ } | |
1390 | _ => bail!("wrong entry type for {:?}", entry_path), | |
1391 | } | |
1392 | match entry_path.parent() { | |
1393 | None => bail!("wrong parent for {:?}", entry_path), | |
1394 | Some(p) => { | |
1395 | if p != root_path { | |
1396 | bail!("wrong parent for {:?}", entry_path); | |
1397 | } | |
1398 | } | |
1399 | } | |
1400 | ||
1401 | let filename = entry.file_name(); | |
1402 | let mut contents = match decoder.contents() { | |
1403 | None => bail!("missing file content"), | |
1404 | Some(contents) => contents, | |
1405 | }; | |
1406 | ||
1407 | let mut archive_path = snapshot_path.to_owned(); | |
1408 | archive_path.push(&filename); | |
1409 | ||
1410 | let mut tmp_path = archive_path.clone(); | |
1411 | tmp_path.set_extension("tmp"); | |
1412 | ||
1413 | if filename == manifest_file_name { | |
b017bbc4 | 1414 | let blob = DataBlob::load_from_reader(&mut contents)?; |
603aa09d DM |
1415 | let mut old_manifest = BackupManifest::try_from(blob)?; |
1416 | ||
1417 | // Remove verify_state to indicate that this snapshot is not verified | |
085ae873 TL |
1418 | old_manifest |
1419 | .unprotected | |
603aa09d DM |
1420 | .as_object_mut() |
1421 | .map(|m| m.remove("verify_state")); | |
1422 | ||
1423 | let old_manifest = serde_json::to_string_pretty(&old_manifest)?; | |
1424 | let blob = DataBlob::encode(old_manifest.as_bytes(), None, true)?; | |
1425 | ||
b017bbc4 | 1426 | let options = CreateOptions::new(); |
e0a19d33 | 1427 | replace_file(&tmp_path, blob.raw_data(), options, false)?; |
b017bbc4 DM |
1428 | |
1429 | manifest = Some(BackupManifest::try_from(blob)?); | |
1430 | } else { | |
1431 | let mut tmpfile = std::fs::OpenOptions::new() | |
1432 | .write(true) | |
1433 | .create(true) | |
1434 | .read(true) | |
1435 | .open(&tmp_path) | |
1436 | .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?; | |
1437 | ||
1438 | std::io::copy(&mut contents, &mut tmpfile)?; | |
1439 | ||
1440 | if let Err(err) = std::fs::rename(&tmp_path, &archive_path) { | |
1441 | bail!("Atomic rename file {:?} failed - {}", archive_path, err); | |
1442 | } | |
1443 | } | |
1444 | } | |
1445 | ||
82a4bb5e DC |
1446 | let manifest = match manifest { |
1447 | None => bail!("missing manifest"), | |
1448 | Some(manifest) => manifest, | |
1449 | }; | |
b017bbc4 | 1450 | |
603aa09d DM |
1451 | // Do not verify anything here, because this would be to slow (causes tape stops). |
1452 | ||
b017bbc4 DM |
1453 | // commit manifest |
1454 | let mut manifest_path = snapshot_path.to_owned(); | |
1455 | manifest_path.push(MANIFEST_BLOB_NAME); | |
1456 | let mut tmp_manifest_path = manifest_path.clone(); | |
1457 | tmp_manifest_path.set_extension("tmp"); | |
1458 | ||
1459 | if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) { | |
085ae873 TL |
1460 | bail!( |
1461 | "Atomic rename manifest {:?} failed - {}", | |
1462 | manifest_path, | |
1463 | err | |
1464 | ); | |
b017bbc4 DM |
1465 | } |
1466 | ||
82a4bb5e | 1467 | Ok(manifest) |
b017bbc4 | 1468 | } |
074503f2 DM |
1469 | |
1470 | /// Try to restore media catalogs (form catalog_archives) | |
1471 | pub fn fast_catalog_restore( | |
1472 | worker: &WorkerTask, | |
1473 | drive: &mut Box<dyn TapeDriver>, | |
1474 | media_set: &MediaSet, | |
1475 | uuid: &Uuid, // current media Uuid | |
085ae873 | 1476 | ) -> Result<bool, Error> { |
074503f2 DM |
1477 | let status_path = Path::new(TAPE_STATUS_DIR); |
1478 | ||
1479 | let current_file_number = drive.current_file_number()?; | |
1480 | if current_file_number != 2 { | |
1481 | bail!("fast_catalog_restore: wrong media position - internal error"); | |
1482 | } | |
1483 | ||
1484 | let mut found_catalog = false; | |
1485 | ||
1486 | let mut moved_to_eom = false; | |
1487 | ||
1488 | loop { | |
1489 | let current_file_number = drive.current_file_number()?; | |
1490 | ||
085ae873 TL |
1491 | { |
1492 | // limit reader scope | |
318b3106 DM |
1493 | let mut reader = match drive.read_next_file() { |
1494 | Err(BlockReadError::EndOfFile) => { | |
085ae873 TL |
1495 | task_log!( |
1496 | worker, | |
1497 | "skip unexpected filemark at pos {}", | |
1498 | current_file_number | |
1499 | ); | |
318b3106 DM |
1500 | continue; |
1501 | } | |
1502 | Err(BlockReadError::EndOfStream) => { | |
074503f2 DM |
1503 | task_log!(worker, "detected EOT after {} files", current_file_number); |
1504 | break; | |
1505 | } | |
318b3106 DM |
1506 | Err(BlockReadError::Error(err)) => { |
1507 | return Err(err.into()); | |
1508 | } | |
1509 | Ok(reader) => reader, | |
074503f2 DM |
1510 | }; |
1511 | ||
1512 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
1513 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
1514 | bail!("missing MediaContentHeader"); | |
1515 | } | |
1516 | ||
1517 | if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 { | |
1518 | task_log!(worker, "found catalog at pos {}", current_file_number); | |
1519 | ||
1520 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1521 | ||
1522 | let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) | |
085ae873 TL |
1523 | .map_err(|err| { |
1524 | format_err!("unable to parse catalog archive header - {}", err) | |
1525 | })?; | |
074503f2 DM |
1526 | |
1527 | if &archive_header.media_set_uuid != media_set.uuid() { | |
085ae873 TL |
1528 | task_log!( |
1529 | worker, | |
1530 | "skipping unrelated catalog at pos {}", | |
1531 | current_file_number | |
1532 | ); | |
90461b76 | 1533 | reader.skip_data()?; // read all data |
074503f2 DM |
1534 | continue; |
1535 | } | |
1536 | ||
1537 | let catalog_uuid = &archive_header.uuid; | |
1538 | ||
1539 | let wanted = media_set | |
1540 | .media_list() | |
1541 | .iter() | |
085ae873 TL |
1542 | .find(|e| match e { |
1543 | None => false, | |
1544 | Some(uuid) => uuid == catalog_uuid, | |
074503f2 DM |
1545 | }) |
1546 | .is_some(); | |
1547 | ||
1548 | if !wanted { | |
085ae873 TL |
1549 | task_log!( |
1550 | worker, | |
1551 | "skip catalog because media '{}' not inventarized", | |
1552 | catalog_uuid | |
1553 | ); | |
90461b76 | 1554 | reader.skip_data()?; // read all data |
074503f2 DM |
1555 | continue; |
1556 | } | |
1557 | ||
1558 | if catalog_uuid == uuid { | |
1559 | // always restore and overwrite catalog | |
1560 | } else { | |
1561 | // only restore if catalog does not exist | |
1562 | if MediaCatalog::exists(status_path, catalog_uuid) { | |
085ae873 TL |
1563 | task_log!( |
1564 | worker, | |
1565 | "catalog for media '{}' already exists", | |
1566 | catalog_uuid | |
1567 | ); | |
90461b76 | 1568 | reader.skip_data()?; // read all data |
074503f2 DM |
1569 | continue; |
1570 | } | |
1571 | } | |
1572 | ||
085ae873 TL |
1573 | let mut file = |
1574 | MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?; | |
074503f2 DM |
1575 | |
1576 | std::io::copy(&mut reader, &mut file)?; | |
1577 | ||
1578 | file.seek(SeekFrom::Start(0))?; | |
1579 | ||
1580 | match MediaCatalog::parse_catalog_header(&mut file)? { | |
1581 | (true, Some(media_uuid), Some(media_set_uuid)) => { | |
1582 | if &media_uuid != catalog_uuid { | |
085ae873 TL |
1583 | task_log!( |
1584 | worker, | |
1585 | "catalog uuid missmatch at pos {}", | |
1586 | current_file_number | |
1587 | ); | |
074503f2 DM |
1588 | continue; |
1589 | } | |
1590 | if media_set_uuid != archive_header.media_set_uuid { | |
085ae873 TL |
1591 | task_log!( |
1592 | worker, | |
1593 | "catalog media_set missmatch at pos {}", | |
1594 | current_file_number | |
1595 | ); | |
074503f2 DM |
1596 | continue; |
1597 | } | |
1598 | ||
1599 | MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?; | |
1600 | ||
1601 | if catalog_uuid == uuid { | |
1602 | task_log!(worker, "successfully restored catalog"); | |
1603 | found_catalog = true | |
1604 | } else { | |
085ae873 TL |
1605 | task_log!( |
1606 | worker, | |
1607 | "successfully restored related catalog {}", | |
1608 | media_uuid | |
1609 | ); | |
074503f2 DM |
1610 | } |
1611 | } | |
1612 | _ => { | |
1613 | task_warn!(worker, "got incomplete catalog header - skip file"); | |
1614 | continue; | |
1615 | } | |
1616 | } | |
1617 | ||
1618 | continue; | |
1619 | } | |
1620 | } | |
1621 | ||
1622 | if moved_to_eom { | |
1623 | break; // already done - stop | |
1624 | } | |
1625 | moved_to_eom = true; | |
1626 | ||
1627 | task_log!(worker, "searching for catalog at EOT (moving to EOT)"); | |
1628 | drive.move_to_last_file()?; | |
1629 | ||
1630 | let new_file_number = drive.current_file_number()?; | |
1631 | ||
1632 | if new_file_number < (current_file_number + 1) { | |
1633 | break; // no new content - stop | |
1634 | } | |
1635 | } | |
1636 | ||
1637 | Ok(found_catalog) | |
1638 | } |