]>
Commit | Line | Data |
---|---|---|
ff997803 | 1 | use std::path::{Path, PathBuf}; |
b017bbc4 | 2 | use std::ffi::OsStr; |
ff997803 | 3 | use std::collections::{HashMap, HashSet, BTreeMap}; |
b017bbc4 | 4 | use std::convert::TryFrom; |
074503f2 | 5 | use std::io::{Seek, SeekFrom}; |
4c4e5c2b | 6 | use std::sync::Arc; |
b017bbc4 DM |
7 | |
8 | use anyhow::{bail, format_err, Error}; | |
b9b4b312 | 9 | use serde_json::Value; |
b017bbc4 DM |
10 | |
11 | use proxmox::{ | |
b9b4b312 DM |
12 | api::{ |
13 | api, | |
14 | RpcEnvironment, | |
15 | RpcEnvironmentType, | |
16 | Router, | |
b4975d31 | 17 | Permission, |
4c4e5c2b | 18 | schema::parse_property_string, |
b9b4b312 DM |
19 | section_config::SectionConfigData, |
20 | }, | |
b017bbc4 DM |
21 | tools::{ |
22 | Uuid, | |
23 | io::ReadExt, | |
24 | fs::{ | |
25 | replace_file, | |
26 | CreateOptions, | |
27 | }, | |
28 | }, | |
b017bbc4 DM |
29 | }; |
30 | ||
8cc3760e DM |
31 | use pbs_api_types::{ |
32 | Authid, Userid, CryptMode, | |
33 | DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, | |
34 | UPID_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, | |
35 | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, | |
36 | }; | |
b2065dc7 WB |
37 | use pbs_datastore::{task_log, task_warn, DataBlob}; |
38 | use pbs_datastore::backup_info::BackupDir; | |
39 | use pbs_datastore::dynamic_index::DynamicIndexReader; | |
40 | use pbs_datastore::fixed_index::FixedIndexReader; | |
41 | use pbs_datastore::index::IndexFile; | |
42 | use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; | |
c23192d3 WB |
43 | use pbs_datastore::task::TaskState; |
44 | ||
b017bbc4 | 45 | use crate::{ |
603aa09d | 46 | tools::ParallelHandler, |
8cc3760e | 47 | config::cached_user_info::CachedUserInfo, |
b2065dc7 | 48 | backup::DataStore, |
c9793d47 DC |
49 | server::{ |
50 | lookup_user_email, | |
51 | WorkerTask, | |
52 | }, | |
b017bbc4 DM |
53 | tape::{ |
54 | TAPE_STATUS_DIR, | |
55 | TapeRead, | |
318b3106 | 56 | BlockReadError, |
b017bbc4 | 57 | MediaId, |
074503f2 | 58 | MediaSet, |
b017bbc4 | 59 | MediaCatalog, |
ff997803 | 60 | MediaSetCatalog, |
b9b4b312 | 61 | Inventory, |
30316192 | 62 | lock_media_set, |
b017bbc4 DM |
63 | file_formats::{ |
64 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, | |
65 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, | |
54722aca | 66 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, |
b017bbc4 DM |
67 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, |
68 | PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, | |
69 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, | |
54722aca | 70 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, |
074503f2 | 71 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, |
b017bbc4 | 72 | MediaContentHeader, |
54722aca | 73 | ChunkArchiveHeader, |
f47e0357 | 74 | ChunkArchiveDecoder, |
54722aca | 75 | SnapshotArchiveHeader, |
074503f2 | 76 | CatalogArchiveHeader, |
b017bbc4 | 77 | }, |
37796ff7 DM |
78 | drive::{ |
79 | TapeDriver, | |
80 | request_and_load_media, | |
25aa55b5 | 81 | lock_tape_device, |
926d05ef | 82 | set_tape_device_state, |
25aa55b5 | 83 | }, |
b017bbc4 DM |
84 | }, |
85 | }; | |
86 | ||
ff997803 DC |
87 | const RESTORE_TMP_DIR: &str = "/var/tmp/proxmox-backup"; |
88 | ||
4c4e5c2b DC |
89 | pub struct DataStoreMap { |
90 | map: HashMap<String, Arc<DataStore>>, | |
91 | default: Option<Arc<DataStore>>, | |
92 | } | |
93 | ||
94 | impl TryFrom<String> for DataStoreMap { | |
95 | type Error = Error; | |
96 | ||
97 | fn try_from(value: String) -> Result<Self, Error> { | |
98 | let value = parse_property_string(&value, &DATASTORE_MAP_ARRAY_SCHEMA)?; | |
99 | let mut mapping: Vec<String> = value | |
100 | .as_array() | |
101 | .unwrap() | |
102 | .iter() | |
103 | .map(|v| v.as_str().unwrap().to_string()) | |
104 | .collect(); | |
105 | ||
106 | let mut map = HashMap::new(); | |
107 | let mut default = None; | |
108 | while let Some(mut store) = mapping.pop() { | |
109 | if let Some(index) = store.find('=') { | |
110 | let mut target = store.split_off(index); | |
111 | target.remove(0); // remove '=' | |
112 | let datastore = DataStore::lookup_datastore(&target)?; | |
113 | map.insert(store, datastore); | |
114 | } else if default.is_none() { | |
115 | default = Some(DataStore::lookup_datastore(&store)?); | |
116 | } else { | |
117 | bail!("multiple default stores given"); | |
118 | } | |
119 | } | |
120 | ||
121 | Ok(Self { map, default }) | |
122 | } | |
123 | } | |
124 | ||
125 | impl DataStoreMap { | |
126 | fn used_datastores<'a>(&self) -> HashSet<&str> { | |
127 | let mut set = HashSet::new(); | |
128 | for store in self.map.values() { | |
129 | set.insert(store.name()); | |
130 | } | |
131 | ||
132 | if let Some(ref store) = self.default { | |
133 | set.insert(store.name()); | |
134 | } | |
135 | ||
136 | set | |
137 | } | |
138 | ||
87bf9f56 | 139 | fn get_datastore(&self, source: &str) -> Option<Arc<DataStore>> { |
4c4e5c2b | 140 | if let Some(store) = self.map.get(source) { |
87bf9f56 | 141 | return Some(Arc::clone(store)); |
4c4e5c2b DC |
142 | } |
143 | if let Some(ref store) = self.default { | |
87bf9f56 | 144 | return Some(Arc::clone(store)); |
4c4e5c2b DC |
145 | } |
146 | ||
147 | return None; | |
148 | } | |
149 | } | |
150 | ||
c4a04b7c DC |
151 | fn check_datastore_privs( |
152 | user_info: &CachedUserInfo, | |
153 | store: &str, | |
154 | auth_id: &Authid, | |
155 | owner: &Option<Authid>, | |
156 | ) -> Result<(), Error> { | |
157 | let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
158 | if (privs & PRIV_DATASTORE_BACKUP) == 0 { | |
159 | bail!("no permissions on /datastore/{}", store); | |
160 | } | |
161 | ||
162 | if let Some(ref owner) = owner { | |
163 | let correct_owner = owner == auth_id | |
164 | || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user()); | |
165 | ||
166 | // same permission as changing ownership after syncing | |
167 | if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 { | |
168 | bail!("no permission to restore as '{}'", owner); | |
169 | } | |
170 | } | |
171 | ||
172 | Ok(()) | |
173 | } | |
174 | ||
4c4e5c2b | 175 | pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE); |
b9b4b312 | 176 | |
b9b4b312 DM |
177 | #[api( |
178 | input: { | |
179 | properties: { | |
180 | store: { | |
4c4e5c2b | 181 | schema: DATASTORE_MAP_LIST_SCHEMA, |
b9b4b312 | 182 | }, |
9883b54c DM |
183 | drive: { |
184 | schema: DRIVE_NAME_SCHEMA, | |
185 | }, | |
b9b4b312 DM |
186 | "media-set": { |
187 | description: "Media set UUID.", | |
188 | type: String, | |
189 | }, | |
c9793d47 DC |
190 | "notify-user": { |
191 | type: Userid, | |
192 | optional: true, | |
193 | }, | |
ff997803 DC |
194 | "snapshots": { |
195 | description: "List of snapshots.", | |
196 | type: Array, | |
197 | optional: true, | |
198 | items: { | |
199 | schema: TAPE_RESTORE_SNAPSHOT_SCHEMA, | |
200 | }, | |
201 | }, | |
e3613503 DC |
202 | owner: { |
203 | type: Authid, | |
204 | optional: true, | |
205 | }, | |
b9b4b312 DM |
206 | }, |
207 | }, | |
208 | returns: { | |
209 | schema: UPID_SCHEMA, | |
210 | }, | |
b4975d31 DM |
211 | access: { |
212 | // Note: parameters are no uri parameter, so we need to test inside function body | |
213 | description: "The user needs Tape.Read privilege on /tape/pool/{pool} \ | |
214 | and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.", | |
215 | permission: &Permission::Anybody, | |
216 | }, | |
b9b4b312 DM |
217 | )] |
218 | /// Restore data from media-set | |
219 | pub fn restore( | |
220 | store: String, | |
9883b54c | 221 | drive: String, |
b9b4b312 | 222 | media_set: String, |
c9793d47 | 223 | notify_user: Option<Userid>, |
ff997803 | 224 | snapshots: Option<Vec<String>>, |
e3613503 | 225 | owner: Option<Authid>, |
b9b4b312 DM |
226 | rpcenv: &mut dyn RpcEnvironment, |
227 | ) -> Result<Value, Error> { | |
b9b4b312 | 228 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
b4975d31 DM |
229 | let user_info = CachedUserInfo::new()?; |
230 | ||
4c4e5c2b DC |
231 | let store_map = DataStoreMap::try_from(store) |
232 | .map_err(|err| format_err!("cannot parse store mapping: {}", err))?; | |
233 | let used_datastores = store_map.used_datastores(); | |
234 | if used_datastores.len() == 0 { | |
235 | bail!("no datastores given"); | |
b4975d31 DM |
236 | } |
237 | ||
4c4e5c2b | 238 | for store in used_datastores.iter() { |
c4a04b7c | 239 | check_datastore_privs(&user_info, &store, &auth_id, &owner)?; |
e3613503 DC |
240 | } |
241 | ||
b4975d31 DM |
242 | let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]); |
243 | if (privs & PRIV_TAPE_READ) == 0 { | |
244 | bail!("no permissions on /tape/drive/{}", drive); | |
245 | } | |
b9b4b312 | 246 | |
30316192 DM |
247 | let media_set_uuid = media_set.parse()?; |
248 | ||
b9b4b312 | 249 | let status_path = Path::new(TAPE_STATUS_DIR); |
b9b4b312 | 250 | |
30316192 DM |
251 | let _lock = lock_media_set(status_path, &media_set_uuid, None)?; |
252 | ||
253 | let inventory = Inventory::load(status_path)?; | |
b9b4b312 DM |
254 | |
255 | let pool = inventory.lookup_media_set_pool(&media_set_uuid)?; | |
256 | ||
b4975d31 DM |
257 | let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]); |
258 | if (privs & PRIV_TAPE_READ) == 0 { | |
259 | bail!("no permissions on /tape/pool/{}", pool); | |
260 | } | |
261 | ||
1ce8e905 | 262 | let (drive_config, _digest) = pbs_config::drive::config()?; |
25aa55b5 DM |
263 | |
264 | // early check/lock before starting worker | |
265 | let drive_lock = lock_tape_device(&drive_config, &drive)?; | |
b9b4b312 | 266 | |
39735609 | 267 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
b9b4b312 | 268 | |
4c4e5c2b DC |
269 | let taskid = used_datastores |
270 | .iter() | |
271 | .map(|s| s.to_string()) | |
272 | .collect::<Vec<String>>() | |
273 | .join(", "); | |
ff997803 | 274 | |
b9b4b312 DM |
275 | let upid_str = WorkerTask::new_thread( |
276 | "tape-restore", | |
4c4e5c2b | 277 | Some(taskid), |
b9b4b312 DM |
278 | auth_id.clone(), |
279 | to_stdout, | |
280 | move |worker| { | |
25aa55b5 | 281 | let _drive_lock = drive_lock; // keep lock guard |
b9b4b312 | 282 | |
926d05ef DC |
283 | set_tape_device_state(&drive, &worker.upid().to_string())?; |
284 | ||
fa950702 | 285 | let restore_owner = owner.as_ref().unwrap_or(&auth_id); |
b9b4b312 | 286 | |
fa950702 DC |
287 | let email = notify_user |
288 | .as_ref() | |
289 | .and_then(|userid| lookup_user_email(userid)) | |
290 | .or_else(|| lookup_user_email(&auth_id.clone().into())); | |
b9b4b312 | 291 | |
ff997803 | 292 | task_log!(worker, "Mediaset '{}'", media_set); |
8de9a991 | 293 | task_log!(worker, "Pool: {}", pool); |
ff997803 DC |
294 | |
295 | let res = if let Some(snapshots) = snapshots { | |
296 | restore_list_worker( | |
297 | worker.clone(), | |
298 | snapshots, | |
299 | inventory, | |
300 | media_set_uuid, | |
301 | drive_config, | |
302 | &drive, | |
303 | store_map, | |
304 | restore_owner, | |
305 | email, | |
306 | ) | |
307 | } else { | |
308 | restore_full_worker( | |
309 | worker.clone(), | |
310 | inventory, | |
311 | media_set_uuid, | |
312 | drive_config, | |
313 | &drive, | |
314 | store_map, | |
315 | restore_owner, | |
316 | email, | |
317 | ) | |
318 | }; | |
319 | ||
320 | if res.is_ok() { | |
321 | task_log!(worker, "Restore mediaset '{}' done", media_set); | |
322 | } | |
926d05ef DC |
323 | |
324 | if let Err(err) = set_tape_device_state(&drive, "") { | |
325 | task_log!( | |
326 | worker, | |
327 | "could not unset drive state for {}: {}", | |
328 | drive, | |
329 | err | |
330 | ); | |
331 | } | |
332 | ||
fa950702 | 333 | res |
b9b4b312 DM |
334 | } |
335 | )?; | |
336 | ||
337 | Ok(upid_str.into()) | |
338 | } | |
339 | ||
ff997803 | 340 | fn restore_full_worker( |
fa950702 DC |
341 | worker: Arc<WorkerTask>, |
342 | inventory: Inventory, | |
343 | media_set_uuid: Uuid, | |
344 | drive_config: SectionConfigData, | |
345 | drive_name: &str, | |
346 | store_map: DataStoreMap, | |
347 | restore_owner: &Authid, | |
348 | email: Option<String>, | |
349 | ) -> Result<(), Error> { | |
350 | let members = inventory.compute_media_set_members(&media_set_uuid)?; | |
351 | ||
352 | let media_list = members.media_list(); | |
353 | ||
354 | let mut media_id_list = Vec::new(); | |
355 | ||
356 | let mut encryption_key_fingerprint = None; | |
357 | ||
358 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { | |
359 | match media_uuid { | |
360 | None => { | |
361 | bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr); | |
362 | } | |
363 | Some(media_uuid) => { | |
364 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
365 | if let Some(ref set) = media_id.media_set_label { // always true here | |
366 | if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() { | |
367 | encryption_key_fingerprint = set.encryption_key_fingerprint.clone(); | |
368 | } | |
369 | } | |
370 | media_id_list.push(media_id); | |
371 | } | |
372 | } | |
373 | } | |
374 | ||
375 | if let Some(fingerprint) = encryption_key_fingerprint { | |
376 | task_log!(worker, "Encryption key fingerprint: {}", fingerprint); | |
377 | } | |
378 | ||
379 | task_log!( | |
380 | worker, | |
381 | "Datastore(s): {}", | |
382 | store_map | |
383 | .used_datastores() | |
384 | .into_iter() | |
385 | .map(String::from) | |
386 | .collect::<Vec<String>>() | |
387 | .join(", "), | |
388 | ); | |
389 | ||
390 | task_log!(worker, "Drive: {}", drive_name); | |
391 | task_log!( | |
392 | worker, | |
393 | "Required media list: {}", | |
394 | media_id_list.iter() | |
395 | .map(|media_id| media_id.label.label_text.as_str()) | |
396 | .collect::<Vec<&str>>() | |
397 | .join(";") | |
398 | ); | |
399 | ||
400 | let mut datastore_locks = Vec::new(); | |
401 | for store_name in store_map.used_datastores() { | |
402 | // explicit create shared lock to prevent GC on newly created chunks | |
403 | if let Some(store) = store_map.get_datastore(store_name) { | |
404 | let shared_store_lock = store.try_shared_chunk_store_lock()?; | |
405 | datastore_locks.push(shared_store_lock); | |
406 | } | |
407 | } | |
408 | ||
409 | let mut checked_chunks_map = HashMap::new(); | |
410 | ||
411 | for media_id in media_id_list.iter() { | |
412 | request_and_restore_media( | |
413 | worker.clone(), | |
414 | media_id, | |
415 | &drive_config, | |
416 | drive_name, | |
417 | &store_map, | |
418 | &mut checked_chunks_map, | |
419 | restore_owner, | |
420 | &email, | |
421 | )?; | |
422 | } | |
423 | ||
424 | Ok(()) | |
425 | } | |
426 | ||
ff997803 DC |
427 | fn restore_list_worker( |
428 | worker: Arc<WorkerTask>, | |
429 | snapshots: Vec<String>, | |
430 | inventory: Inventory, | |
431 | media_set_uuid: Uuid, | |
432 | drive_config: SectionConfigData, | |
433 | drive_name: &str, | |
434 | store_map: DataStoreMap, | |
435 | restore_owner: &Authid, | |
436 | email: Option<String>, | |
437 | ) -> Result<(), Error> { | |
438 | let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into(); | |
439 | std::fs::create_dir_all(&base_path)?; | |
440 | ||
441 | let catalog = get_media_set_catalog(&inventory, &media_set_uuid)?; | |
442 | ||
443 | let mut datastore_locks = Vec::new(); | |
444 | let mut snapshot_file_hash: BTreeMap<Uuid, Vec<u64>> = BTreeMap::new(); | |
445 | let mut snapshot_locks = HashMap::new(); | |
446 | ||
447 | let res = proxmox::try_block!({ | |
448 | // assemble snapshot files/locks | |
449 | for store_snapshot in snapshots.iter() { | |
450 | let mut split = store_snapshot.splitn(2, ':'); | |
451 | let source_datastore = split | |
452 | .next() | |
453 | .ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?; | |
454 | let snapshot = split | |
455 | .next() | |
456 | .ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?; | |
457 | let backup_dir: BackupDir = snapshot.parse()?; | |
458 | ||
459 | let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| { | |
460 | format_err!( | |
461 | "could not find mapping for source datastore: {}", | |
462 | source_datastore | |
463 | ) | |
464 | })?; | |
465 | ||
466 | let (owner, _group_lock) = | |
467 | datastore.create_locked_backup_group(backup_dir.group(), &restore_owner)?; | |
468 | if restore_owner != &owner { | |
469 | // only the owner is allowed to create additional snapshots | |
470 | bail!( | |
471 | "restore '{}' failed - owner check failed ({} != {})", | |
472 | snapshot, | |
473 | restore_owner, | |
474 | owner | |
475 | ); | |
476 | } | |
477 | ||
478 | let (media_id, file_num) = if let Some((media_uuid, file_num)) = | |
479 | catalog.lookup_snapshot(&source_datastore, &snapshot) | |
480 | { | |
481 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
482 | (media_id, file_num) | |
483 | } else { | |
484 | task_warn!( | |
485 | worker, | |
486 | "did not find snapshot '{}' in media set {}", | |
487 | snapshot, | |
488 | media_set_uuid | |
489 | ); | |
490 | continue; | |
491 | }; | |
492 | ||
493 | let (_rel_path, is_new, snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?; | |
494 | ||
495 | if !is_new { | |
496 | task_log!( | |
497 | worker, | |
498 | "found snapshot {} on target datastore, skipping...", | |
499 | snapshot | |
500 | ); | |
501 | continue; | |
502 | } | |
503 | ||
504 | snapshot_locks.insert(store_snapshot.to_string(), snap_lock); | |
505 | ||
506 | let shared_store_lock = datastore.try_shared_chunk_store_lock()?; | |
507 | datastore_locks.push(shared_store_lock); | |
508 | ||
509 | let file_list = snapshot_file_hash | |
510 | .entry(media_id.label.uuid.clone()) | |
511 | .or_insert_with(Vec::new); | |
512 | file_list.push(file_num); | |
513 | ||
514 | task_log!( | |
515 | worker, | |
516 | "found snapshot {} on {}: file {}", | |
517 | snapshot, | |
518 | media_id.label.label_text, | |
519 | file_num | |
520 | ); | |
521 | } | |
522 | ||
523 | if snapshot_file_hash.is_empty() { | |
524 | task_log!(worker, "nothing to restore, skipping remaining phases..."); | |
525 | return Ok(()); | |
526 | } | |
527 | ||
528 | task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir"); | |
529 | let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new(); | |
530 | for (media_uuid, file_list) in snapshot_file_hash.iter_mut() { | |
531 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
532 | let (drive, info) = request_and_load_media( | |
533 | &worker, | |
534 | &drive_config, | |
535 | &drive_name, | |
536 | &media_id.label, | |
537 | &email, | |
538 | )?; | |
539 | file_list.sort_unstable(); | |
540 | restore_snapshots_to_tmpdir( | |
541 | worker.clone(), | |
542 | &base_path, | |
543 | file_list, | |
544 | drive, | |
545 | &info, | |
546 | &media_set_uuid, | |
547 | &mut datastore_chunk_map, | |
548 | ).map_err(|err| format_err!("could not restore snapshots to tmpdir: {}", err))?; | |
549 | } | |
550 | ||
551 | // sorted media_uuid => (sorted file_num => (set of digests))) | |
552 | let mut media_file_chunk_map: BTreeMap<Uuid, BTreeMap<u64, HashSet<[u8; 32]>>> = BTreeMap::new(); | |
553 | ||
554 | for (source_datastore, chunks) in datastore_chunk_map.into_iter() { | |
555 | let datastore = store_map.get_datastore(&source_datastore).ok_or_else(|| { | |
556 | format_err!( | |
557 | "could not find mapping for source datastore: {}", | |
558 | source_datastore | |
559 | ) | |
560 | })?; | |
561 | for digest in chunks.into_iter() { | |
562 | // we only want to restore chunks that we do not have yet | |
563 | if !datastore.cond_touch_chunk(&digest, false)? { | |
564 | if let Some((uuid, nr)) = catalog.lookup_chunk(&source_datastore, &digest) { | |
565 | let file = media_file_chunk_map.entry(uuid.clone()).or_insert_with(BTreeMap::new); | |
566 | let chunks = file.entry(nr).or_insert_with(HashSet::new); | |
567 | chunks.insert(digest); | |
568 | } | |
569 | } | |
570 | } | |
571 | } | |
572 | ||
573 | // we do not need it anymore, saves memory | |
574 | drop(catalog); | |
575 | ||
576 | if !media_file_chunk_map.is_empty() { | |
577 | task_log!(worker, "Phase 2: restore chunks to datastores"); | |
578 | } else { | |
579 | task_log!(worker, "all chunks exist already, skipping phase 2..."); | |
580 | } | |
581 | ||
582 | for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() { | |
583 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
584 | let (mut drive, _info) = request_and_load_media( | |
585 | &worker, | |
586 | &drive_config, | |
587 | &drive_name, | |
588 | &media_id.label, | |
589 | &email, | |
590 | )?; | |
591 | restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?; | |
592 | } | |
593 | ||
594 | task_log!( | |
595 | worker, | |
596 | "Phase 3: copy snapshots from temp dir to datastores" | |
597 | ); | |
598 | for (store_snapshot, _lock) in snapshot_locks.into_iter() { | |
599 | proxmox::try_block!({ | |
600 | let mut split = store_snapshot.splitn(2, ':'); | |
601 | let source_datastore = split | |
602 | .next() | |
603 | .ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?; | |
604 | let snapshot = split | |
605 | .next() | |
606 | .ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?; | |
607 | let backup_dir: BackupDir = snapshot.parse()?; | |
608 | ||
609 | let datastore = store_map | |
610 | .get_datastore(&source_datastore) | |
611 | .ok_or_else(|| format_err!("unexpected source datastore: {}", source_datastore))?; | |
612 | ||
613 | let mut tmp_path = base_path.clone(); | |
614 | tmp_path.push(&source_datastore); | |
615 | tmp_path.push(snapshot); | |
616 | ||
617 | let path = datastore.snapshot_path(&backup_dir); | |
618 | ||
619 | for entry in std::fs::read_dir(tmp_path)? { | |
620 | let entry = entry?; | |
621 | let mut new_path = path.clone(); | |
622 | new_path.push(entry.file_name()); | |
623 | std::fs::copy(entry.path(), new_path)?; | |
624 | } | |
625 | task_log!(worker, "Restore snapshot '{}' done", snapshot); | |
626 | Ok(()) | |
627 | }).map_err(|err: Error| format_err!("could not copy {}: {}", store_snapshot, err))?; | |
628 | } | |
629 | Ok(()) | |
630 | }); | |
631 | ||
2072dede DC |
632 | if res.is_err() { |
633 | task_warn!(worker, "Error during restore, partially restored snapshots will NOT be cleaned up"); | |
634 | } | |
635 | ||
ff997803 DC |
636 | match std::fs::remove_dir_all(&base_path) { |
637 | Ok(()) => {} | |
638 | Err(err) => task_warn!(worker, "error cleaning up: {}", err), | |
639 | } | |
640 | ||
641 | res | |
642 | } | |
643 | ||
644 | fn get_media_set_catalog( | |
645 | inventory: &Inventory, | |
646 | media_set_uuid: &Uuid, | |
647 | ) -> Result<MediaSetCatalog, Error> { | |
648 | let status_path = Path::new(TAPE_STATUS_DIR); | |
649 | ||
650 | let members = inventory.compute_media_set_members(media_set_uuid)?; | |
651 | let media_list = members.media_list(); | |
652 | let mut catalog = MediaSetCatalog::new(); | |
653 | ||
654 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { | |
655 | match media_uuid { | |
656 | None => { | |
657 | bail!( | |
658 | "media set {} is incomplete (missing member {}).", | |
659 | media_set_uuid, | |
660 | seq_nr | |
661 | ); | |
662 | } | |
663 | Some(media_uuid) => { | |
664 | let media_id = inventory.lookup_media(media_uuid).unwrap(); | |
665 | let media_catalog = MediaCatalog::open(status_path, &media_id, false, false)?; | |
666 | catalog.append_catalog(media_catalog)?; | |
667 | } | |
668 | } | |
669 | } | |
670 | ||
671 | Ok(catalog) | |
672 | } | |
673 | ||
674 | fn restore_snapshots_to_tmpdir( | |
675 | worker: Arc<WorkerTask>, | |
676 | path: &PathBuf, | |
677 | file_list: &[u64], | |
678 | mut drive: Box<dyn TapeDriver>, | |
679 | media_id: &MediaId, | |
680 | media_set_uuid: &Uuid, | |
681 | chunks_list: &mut HashMap<String, HashSet<[u8; 32]>>, | |
682 | ) -> Result<(), Error> { | |
683 | match media_id.media_set_label { | |
684 | None => { | |
685 | bail!( | |
686 | "missing media set label on media {} ({})", | |
687 | media_id.label.label_text, | |
688 | media_id.label.uuid | |
689 | ); | |
690 | } | |
691 | Some(ref set) => { | |
692 | if set.uuid != *media_set_uuid { | |
693 | bail!( | |
694 | "wrong media set label on media {} ({} != {})", | |
695 | media_id.label.label_text, | |
696 | media_id.label.uuid, | |
697 | media_set_uuid | |
698 | ); | |
699 | } | |
700 | let encrypt_fingerprint = set.encryption_key_fingerprint.clone().map(|fp| { | |
701 | task_log!(worker, "Encryption key fingerprint: {}", fp); | |
702 | (fp, set.uuid.clone()) | |
703 | }); | |
704 | ||
705 | drive.set_encryption(encrypt_fingerprint)?; | |
706 | } | |
707 | } | |
708 | ||
709 | for file_num in file_list { | |
710 | let current_file_number = drive.current_file_number()?; | |
711 | if current_file_number != *file_num { | |
712 | task_log!(worker, "was at file {}, moving to {}", current_file_number, file_num); | |
713 | drive.move_to_file(*file_num)?; | |
714 | let current_file_number = drive.current_file_number()?; | |
715 | task_log!(worker, "now at file {}", current_file_number); | |
716 | } | |
717 | let mut reader = drive.read_next_file()?; | |
718 | ||
719 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
720 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
721 | bail!("missing MediaContentHeader"); | |
722 | } | |
723 | ||
724 | match header.content_magic { | |
725 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => { | |
726 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
727 | ||
728 | let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data) | |
729 | .map_err(|err| { | |
730 | format_err!("unable to parse snapshot archive header - {}", err) | |
731 | })?; | |
732 | ||
733 | let source_datastore = archive_header.store; | |
734 | let snapshot = archive_header.snapshot; | |
735 | ||
736 | task_log!( | |
737 | worker, | |
738 | "File {}: snapshot archive {}:{}", | |
739 | file_num, | |
740 | source_datastore, | |
741 | snapshot | |
742 | ); | |
743 | ||
744 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
745 | ||
746 | let mut tmp_path = path.clone(); | |
747 | tmp_path.push(&source_datastore); | |
748 | tmp_path.push(snapshot); | |
749 | std::fs::create_dir_all(&tmp_path)?; | |
750 | ||
751 | let chunks = chunks_list | |
752 | .entry(source_datastore) | |
753 | .or_insert_with(HashSet::new); | |
754 | let manifest = try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?; | |
755 | for item in manifest.files() { | |
756 | let mut archive_path = tmp_path.to_owned(); | |
757 | archive_path.push(&item.filename); | |
758 | ||
759 | let index: Box<dyn IndexFile> = match archive_type(&item.filename)? { | |
760 | ArchiveType::DynamicIndex => { | |
761 | Box::new(DynamicIndexReader::open(&archive_path)?) | |
762 | } | |
763 | ArchiveType::FixedIndex => { | |
764 | Box::new(FixedIndexReader::open(&archive_path)?) | |
765 | } | |
766 | ArchiveType::Blob => continue, | |
767 | }; | |
768 | for i in 0..index.index_count() { | |
769 | if let Some(digest) = index.index_digest(i) { | |
770 | chunks.insert(*digest); | |
771 | } | |
772 | } | |
773 | } | |
774 | } | |
775 | other => bail!("unexpected file type: {:?}", other), | |
776 | } | |
777 | } | |
778 | ||
779 | Ok(()) | |
780 | } | |
781 | ||
782 | fn restore_file_chunk_map( | |
783 | worker: Arc<WorkerTask>, | |
784 | drive: &mut Box<dyn TapeDriver>, | |
785 | store_map: &DataStoreMap, | |
786 | file_chunk_map: &mut BTreeMap<u64, HashSet<[u8; 32]>>, | |
787 | ) -> Result<(), Error> { | |
788 | for (nr, chunk_map) in file_chunk_map.iter_mut() { | |
789 | let current_file_number = drive.current_file_number()?; | |
790 | if current_file_number != *nr { | |
791 | task_log!(worker, "was at file {}, moving to {}", current_file_number, nr); | |
792 | drive.move_to_file(*nr)?; | |
793 | let current_file_number = drive.current_file_number()?; | |
794 | task_log!(worker, "now at file {}", current_file_number); | |
795 | } | |
796 | let mut reader = drive.read_next_file()?; | |
797 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
798 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
799 | bail!("missing MediaContentHeader"); | |
800 | } | |
801 | ||
802 | match header.content_magic { | |
803 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => { | |
804 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
805 | ||
806 | let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data) | |
807 | .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?; | |
808 | ||
809 | let source_datastore = archive_header.store; | |
810 | ||
811 | task_log!( | |
812 | worker, | |
813 | "File {}: chunk archive for datastore '{}'", | |
814 | nr, | |
815 | source_datastore | |
816 | ); | |
817 | ||
818 | let datastore = store_map.get_datastore(&source_datastore).ok_or_else(|| { | |
819 | format_err!("unexpected chunk archive for store: {}", source_datastore) | |
820 | })?; | |
821 | ||
822 | let count = restore_partial_chunk_archive(worker.clone(), reader, datastore.clone(), chunk_map)?; | |
823 | task_log!(worker, "restored {} chunks", count); | |
824 | } | |
825 | _ => bail!("unexpected content magic {:?}", header.content_magic), | |
826 | } | |
827 | } | |
828 | ||
829 | Ok(()) | |
830 | } | |
831 | ||
832 | fn restore_partial_chunk_archive<'a>( | |
833 | worker: Arc<WorkerTask>, | |
834 | reader: Box<dyn 'a + TapeRead>, | |
835 | datastore: Arc<DataStore>, | |
836 | chunk_list: &mut HashSet<[u8; 32]>, | |
837 | ) -> Result<usize, Error> { | |
838 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
839 | ||
840 | let mut count = 0; | |
841 | ||
842 | let start_time = std::time::SystemTime::now(); | |
843 | let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0)); | |
844 | let bytes2 = bytes.clone(); | |
845 | ||
846 | let writer_pool = ParallelHandler::new( | |
847 | "tape restore chunk writer", | |
848 | 4, | |
849 | move |(chunk, digest): (DataBlob, [u8; 32])| { | |
850 | if !datastore.cond_touch_chunk(&digest, false)? { | |
851 | bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst); | |
852 | chunk.verify_crc()?; | |
853 | if chunk.crypt_mode()? == CryptMode::None { | |
854 | chunk.decode(None, Some(&digest))?; // verify digest | |
855 | } | |
856 | ||
857 | datastore.insert_chunk(&chunk, &digest)?; | |
858 | } | |
859 | Ok(()) | |
860 | }, | |
861 | ); | |
862 | ||
863 | let verify_and_write_channel = writer_pool.channel(); | |
864 | ||
865 | loop { | |
866 | let (digest, blob) = match decoder.next_chunk()? { | |
867 | Some((digest, blob)) => (digest, blob), | |
868 | None => break, | |
869 | }; | |
870 | ||
871 | worker.check_abort()?; | |
872 | ||
873 | if chunk_list.remove(&digest) { | |
874 | verify_and_write_channel.send((blob, digest.clone()))?; | |
875 | count += 1; | |
876 | } | |
877 | ||
878 | if chunk_list.is_empty() { | |
879 | break; | |
880 | } | |
881 | } | |
882 | ||
883 | drop(verify_and_write_channel); | |
884 | ||
885 | writer_pool.complete()?; | |
886 | ||
887 | let elapsed = start_time.elapsed()?.as_secs_f64(); | |
888 | ||
889 | let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst); | |
890 | ||
891 | task_log!( | |
892 | worker, | |
893 | "restored {} bytes ({:.2} MB/s)", | |
894 | bytes, | |
895 | (bytes as f64) / (1_000_000.0 * elapsed) | |
896 | ); | |
897 | ||
898 | Ok(count) | |
899 | } | |
900 | ||
901 | ||
b017bbc4 DM |
902 | /// Request and restore complete media without using existing catalog (create catalog instead) |
903 | pub fn request_and_restore_media( | |
49f9aca6 | 904 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
905 | media_id: &MediaId, |
906 | drive_config: &SectionConfigData, | |
907 | drive_name: &str, | |
4c4e5c2b | 908 | store_map: &DataStoreMap, |
28570d19 | 909 | checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>, |
fa950702 DC |
910 | restore_owner: &Authid, |
911 | email: &Option<String>, | |
b017bbc4 | 912 | ) -> Result<(), Error> { |
b017bbc4 DM |
913 | let media_set_uuid = match media_id.media_set_label { |
914 | None => bail!("restore_media: no media set - internal error"), | |
915 | Some(ref set) => &set.uuid, | |
916 | }; | |
917 | ||
fa950702 | 918 | let (mut drive, info) = request_and_load_media(&worker, &drive_config, &drive_name, &media_id.label, email)?; |
b017bbc4 DM |
919 | |
920 | match info.media_set_label { | |
921 | None => { | |
922 | bail!("missing media set label on media {} ({})", | |
8446fbca | 923 | media_id.label.label_text, media_id.label.uuid); |
b017bbc4 DM |
924 | } |
925 | Some(ref set) => { | |
926 | if &set.uuid != media_set_uuid { | |
927 | bail!("wrong media set label on media {} ({} != {})", | |
8446fbca | 928 | media_id.label.label_text, media_id.label.uuid, |
b017bbc4 DM |
929 | media_set_uuid); |
930 | } | |
8e6459a8 DM |
931 | let encrypt_fingerprint = set.encryption_key_fingerprint.clone() |
932 | .map(|fp| (fp, set.uuid.clone())); | |
933 | ||
934 | drive.set_encryption(encrypt_fingerprint)?; | |
b017bbc4 DM |
935 | } |
936 | } | |
937 | ||
4c4e5c2b DC |
938 | restore_media( |
939 | worker, | |
940 | &mut drive, | |
941 | &info, | |
942 | Some((&store_map, restore_owner)), | |
28570d19 | 943 | checked_chunks_map, |
4c4e5c2b DC |
944 | false, |
945 | ) | |
b017bbc4 DM |
946 | } |
947 | ||
948 | /// Restore complete media content and catalog | |
949 | /// | |
950 | /// Only create the catalog if target is None. | |
951 | pub fn restore_media( | |
49f9aca6 | 952 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
953 | drive: &mut Box<dyn TapeDriver>, |
954 | media_id: &MediaId, | |
4c4e5c2b | 955 | target: Option<(&DataStoreMap, &Authid)>, |
28570d19 | 956 | checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>, |
b017bbc4 DM |
957 | verbose: bool, |
958 | ) -> Result<(), Error> { | |
959 | ||
960 | let status_path = Path::new(TAPE_STATUS_DIR); | |
961 | let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?; | |
962 | ||
963 | loop { | |
964 | let current_file_number = drive.current_file_number()?; | |
318b3106 DM |
965 | let reader = match drive.read_next_file() { |
966 | Err(BlockReadError::EndOfFile) => { | |
967 | task_log!(worker, "skip unexpected filemark at pos {}", current_file_number); | |
968 | continue; | |
969 | } | |
970 | Err(BlockReadError::EndOfStream) => { | |
8de9a991 | 971 | task_log!(worker, "detected EOT after {} files", current_file_number); |
b017bbc4 DM |
972 | break; |
973 | } | |
318b3106 DM |
974 | Err(BlockReadError::Error(err)) => { |
975 | return Err(err.into()); | |
976 | } | |
977 | Ok(reader) => reader, | |
b017bbc4 DM |
978 | }; |
979 | ||
49f9aca6 | 980 | restore_archive(worker.clone(), reader, current_file_number, target, &mut catalog, checked_chunks_map, verbose)?; |
b017bbc4 DM |
981 | } |
982 | ||
76e85650 DC |
983 | catalog.commit()?; |
984 | ||
b017bbc4 DM |
985 | MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?; |
986 | ||
987 | Ok(()) | |
988 | } | |
989 | ||
990 | fn restore_archive<'a>( | |
49f9aca6 | 991 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
992 | mut reader: Box<dyn 'a + TapeRead>, |
993 | current_file_number: u64, | |
4c4e5c2b | 994 | target: Option<(&DataStoreMap, &Authid)>, |
b017bbc4 | 995 | catalog: &mut MediaCatalog, |
28570d19 | 996 | checked_chunks_map: &mut HashMap<String, HashSet<[u8;32]>>, |
b017bbc4 DM |
997 | verbose: bool, |
998 | ) -> Result<(), Error> { | |
b017bbc4 DM |
999 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; |
1000 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
1001 | bail!("missing MediaContentHeader"); | |
1002 | } | |
1003 | ||
1004 | //println!("Found MediaContentHeader: {:?}", header); | |
1005 | ||
1006 | match header.content_magic { | |
1007 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => { | |
1008 | bail!("unexpected content magic (label)"); | |
1009 | } | |
1010 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => { | |
54722aca DM |
1011 | bail!("unexpected snapshot archive version (v1.0)"); |
1012 | } | |
1013 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => { | |
1014 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1015 | ||
1016 | let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data) | |
1017 | .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?; | |
1018 | ||
1019 | let datastore_name = archive_header.store; | |
1020 | let snapshot = archive_header.snapshot; | |
1021 | ||
1022 | task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot); | |
b017bbc4 DM |
1023 | |
1024 | let backup_dir: BackupDir = snapshot.parse()?; | |
1025 | ||
4c4e5c2b DC |
1026 | if let Some((store_map, authid)) = target.as_ref() { |
1027 | if let Some(datastore) = store_map.get_datastore(&datastore_name) { | |
1028 | let (owner, _group_lock) = | |
1029 | datastore.create_locked_backup_group(backup_dir.group(), authid)?; | |
1030 | if *authid != &owner { | |
1031 | // only the owner is allowed to create additional snapshots | |
1032 | bail!( | |
1033 | "restore '{}' failed - owner check failed ({} != {})", | |
1034 | snapshot, | |
1035 | authid, | |
1036 | owner | |
1037 | ); | |
1038 | } | |
b017bbc4 | 1039 | |
4c4e5c2b DC |
1040 | let (rel_path, is_new, _snap_lock) = |
1041 | datastore.create_locked_backup_dir(&backup_dir)?; | |
1042 | let mut path = datastore.base_path(); | |
1043 | path.push(rel_path); | |
b017bbc4 | 1044 | |
4c4e5c2b DC |
1045 | if is_new { |
1046 | task_log!(worker, "restore snapshot {}", backup_dir); | |
b017bbc4 | 1047 | |
b7b9a574 | 1048 | match restore_snapshot_archive(worker.clone(), reader, &path) { |
4c4e5c2b DC |
1049 | Err(err) => { |
1050 | std::fs::remove_dir_all(&path)?; | |
1051 | bail!("restore snapshot {} failed - {}", backup_dir, err); | |
1052 | } | |
1053 | Ok(false) => { | |
1054 | std::fs::remove_dir_all(&path)?; | |
1055 | task_log!(worker, "skip incomplete snapshot {}", backup_dir); | |
1056 | } | |
1057 | Ok(true) => { | |
1058 | catalog.register_snapshot( | |
1059 | Uuid::from(header.uuid), | |
1060 | current_file_number, | |
1061 | &datastore_name, | |
1062 | &snapshot, | |
1063 | )?; | |
1064 | catalog.commit_if_large()?; | |
1065 | } | |
b017bbc4 | 1066 | } |
4c4e5c2b | 1067 | return Ok(()); |
b017bbc4 | 1068 | } |
4c4e5c2b DC |
1069 | } else { |
1070 | task_log!(worker, "skipping..."); | |
b017bbc4 DM |
1071 | } |
1072 | } | |
1073 | ||
90461b76 | 1074 | reader.skip_data()?; // read all data |
b017bbc4 | 1075 | if let Ok(false) = reader.is_incomplete() { |
54722aca | 1076 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?; |
b017bbc4 DM |
1077 | catalog.commit_if_large()?; |
1078 | } | |
1079 | } | |
1080 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { | |
54722aca DM |
1081 | bail!("unexpected chunk archive version (v1.0)"); |
1082 | } | |
1083 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => { | |
1084 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1085 | ||
1086 | let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data) | |
1087 | .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?; | |
1088 | ||
1089 | let source_datastore = archive_header.store; | |
b017bbc4 | 1090 | |
54722aca | 1091 | task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore); |
4c4e5c2b DC |
1092 | let datastore = target |
1093 | .as_ref() | |
1094 | .and_then(|t| t.0.get_datastore(&source_datastore)); | |
1095 | ||
1096 | if datastore.is_some() || target.is_none() { | |
28570d19 | 1097 | let checked_chunks = checked_chunks_map |
87bf9f56 | 1098 | .entry(datastore.as_ref().map(|d| d.name()).unwrap_or("_unused_").to_string()) |
28570d19 DM |
1099 | .or_insert(HashSet::new()); |
1100 | ||
87bf9f56 | 1101 | let chunks = if let Some(datastore) = datastore { |
49f9aca6 | 1102 | restore_chunk_archive(worker.clone(), reader, datastore, checked_chunks, verbose)? |
87bf9f56 | 1103 | } else { |
49f9aca6 | 1104 | scan_chunk_archive(worker.clone(), reader, verbose)? |
87bf9f56 DM |
1105 | }; |
1106 | ||
1107 | if let Some(chunks) = chunks { | |
a2ef36d4 | 1108 | catalog.register_chunk_archive( |
4c4e5c2b DC |
1109 | Uuid::from(header.uuid), |
1110 | current_file_number, | |
1111 | &source_datastore, | |
a2ef36d4 | 1112 | &chunks[..], |
4c4e5c2b | 1113 | )?; |
4c4e5c2b | 1114 | task_log!(worker, "register {} chunks", chunks.len()); |
4c4e5c2b | 1115 | catalog.commit_if_large()?; |
b017bbc4 | 1116 | } |
4c4e5c2b DC |
1117 | return Ok(()); |
1118 | } else if target.is_some() { | |
1119 | task_log!(worker, "skipping..."); | |
b017bbc4 | 1120 | } |
4c4e5c2b | 1121 | |
90461b76 | 1122 | reader.skip_data()?; // read all data |
b017bbc4 | 1123 | } |
8b1289f3 DM |
1124 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => { |
1125 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1126 | ||
1127 | let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) | |
1128 | .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; | |
1129 | ||
1130 | task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid); | |
1131 | ||
90461b76 | 1132 | reader.skip_data()?; // read all data |
8b1289f3 DM |
1133 | } |
1134 | _ => bail!("unknown content magic {:?}", header.content_magic), | |
b017bbc4 DM |
1135 | } |
1136 | ||
b017bbc4 DM |
1137 | Ok(()) |
1138 | } | |
1139 | ||
87bf9f56 DM |
1140 | // Read chunk archive without restoring data - just record contained chunks |
1141 | fn scan_chunk_archive<'a>( | |
49f9aca6 | 1142 | worker: Arc<WorkerTask>, |
87bf9f56 DM |
1143 | reader: Box<dyn 'a + TapeRead>, |
1144 | verbose: bool, | |
1145 | ) -> Result<Option<Vec<[u8;32]>>, Error> { | |
1146 | ||
1147 | let mut chunks = Vec::new(); | |
1148 | ||
1149 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
1150 | ||
1151 | loop { | |
1152 | let digest = match decoder.next_chunk() { | |
1153 | Ok(Some((digest, _blob))) => digest, | |
1154 | Ok(None) => break, | |
1155 | Err(err) => { | |
1156 | let reader = decoder.reader(); | |
1157 | ||
1158 | // check if this stream is marked incomplete | |
1159 | if let Ok(true) = reader.is_incomplete() { | |
1160 | return Ok(Some(chunks)); | |
1161 | } | |
1162 | ||
1163 | // check if this is an aborted stream without end marker | |
1164 | if let Ok(false) = reader.has_end_marker() { | |
49f9aca6 | 1165 | task_log!(worker, "missing stream end marker"); |
87bf9f56 DM |
1166 | return Ok(None); |
1167 | } | |
1168 | ||
1169 | // else the archive is corrupt | |
1170 | return Err(err); | |
1171 | } | |
1172 | }; | |
1173 | ||
1174 | worker.check_abort()?; | |
1175 | ||
1176 | if verbose { | |
1177 | task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest)); | |
1178 | } | |
1179 | ||
1180 | chunks.push(digest); | |
1181 | } | |
1182 | ||
1183 | Ok(Some(chunks)) | |
1184 | } | |
1185 | ||
b017bbc4 | 1186 | fn restore_chunk_archive<'a>( |
49f9aca6 | 1187 | worker: Arc<WorkerTask>, |
b017bbc4 | 1188 | reader: Box<dyn 'a + TapeRead>, |
87bf9f56 | 1189 | datastore: Arc<DataStore>, |
28570d19 | 1190 | checked_chunks: &mut HashSet<[u8;32]>, |
b017bbc4 DM |
1191 | verbose: bool, |
1192 | ) -> Result<Option<Vec<[u8;32]>>, Error> { | |
1193 | ||
28570d19 | 1194 | let mut chunks = Vec::new(); |
b017bbc4 DM |
1195 | |
1196 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
1197 | ||
5400fe17 | 1198 | let datastore2 = datastore.clone(); |
49f9aca6 DC |
1199 | let start_time = std::time::SystemTime::now(); |
1200 | let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0)); | |
1201 | let bytes2 = bytes.clone(); | |
1202 | ||
1203 | let worker2 = worker.clone(); | |
1204 | ||
5400fe17 DM |
1205 | let writer_pool = ParallelHandler::new( |
1206 | "tape restore chunk writer", | |
1207 | 4, | |
1208 | move |(chunk, digest): (DataBlob, [u8; 32])| { | |
49f9aca6 DC |
1209 | let chunk_exists = datastore2.cond_touch_chunk(&digest, false)?; |
1210 | if !chunk_exists { | |
1211 | if verbose { | |
1212 | task_log!(worker2, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)); | |
1213 | } | |
1214 | bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst); | |
1215 | // println!("verify and write {}", proxmox::tools::digest_to_hex(&digest)); | |
1216 | chunk.verify_crc()?; | |
1217 | if chunk.crypt_mode()? == CryptMode::None { | |
1218 | chunk.decode(None, Some(&digest))?; // verify digest | |
1219 | } | |
5400fe17 | 1220 | |
49f9aca6 DC |
1221 | datastore2.insert_chunk(&chunk, &digest)?; |
1222 | } else if verbose { | |
1223 | task_log!(worker2, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)); | |
1224 | } | |
5400fe17 DM |
1225 | Ok(()) |
1226 | }, | |
1227 | ); | |
1228 | ||
1229 | let verify_and_write_channel = writer_pool.channel(); | |
1230 | ||
88aa3076 | 1231 | |
1dd1c9eb DC |
1232 | loop { |
1233 | let (digest, blob) = match decoder.next_chunk() { | |
1234 | Ok(Some((digest, blob))) => (digest, blob), | |
1235 | Ok(None) => break, | |
1236 | Err(err) => { | |
1237 | let reader = decoder.reader(); | |
1238 | ||
1239 | // check if this stream is marked incomplete | |
1240 | if let Ok(true) = reader.is_incomplete() { | |
1241 | return Ok(Some(chunks)); | |
1242 | } | |
0d2133db | 1243 | |
1dd1c9eb DC |
1244 | // check if this is an aborted stream without end marker |
1245 | if let Ok(false) = reader.has_end_marker() { | |
49f9aca6 | 1246 | task_log!(worker, "missing stream end marker"); |
1dd1c9eb | 1247 | return Ok(None); |
b017bbc4 | 1248 | } |
1dd1c9eb DC |
1249 | |
1250 | // else the archive is corrupt | |
1251 | return Err(err); | |
b017bbc4 | 1252 | } |
1dd1c9eb | 1253 | }; |
b017bbc4 | 1254 | |
1dd1c9eb | 1255 | worker.check_abort()?; |
b017bbc4 | 1256 | |
49f9aca6 | 1257 | if !checked_chunks.contains(&digest) { |
5400fe17 | 1258 | verify_and_write_channel.send((blob, digest.clone()))?; |
49f9aca6 | 1259 | checked_chunks.insert(digest.clone()); |
b017bbc4 | 1260 | } |
1dd1c9eb | 1261 | chunks.push(digest); |
b017bbc4 | 1262 | } |
1dd1c9eb | 1263 | |
5400fe17 DM |
1264 | drop(verify_and_write_channel); |
1265 | ||
1266 | writer_pool.complete()?; | |
1267 | ||
88aa3076 DM |
1268 | let elapsed = start_time.elapsed()?.as_secs_f64(); |
1269 | ||
49f9aca6 DC |
1270 | let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst); |
1271 | ||
88aa3076 DM |
1272 | task_log!( |
1273 | worker, | |
1274 | "restored {} bytes ({:.2} MB/s)", | |
1275 | bytes, | |
1276 | (bytes as f64) / (1_000_000.0 * elapsed) | |
1277 | ); | |
1278 | ||
1dd1c9eb | 1279 | Ok(Some(chunks)) |
b017bbc4 DM |
1280 | } |
1281 | ||
1282 | fn restore_snapshot_archive<'a>( | |
49f9aca6 | 1283 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
1284 | reader: Box<dyn 'a + TapeRead>, |
1285 | snapshot_path: &Path, | |
1286 | ) -> Result<bool, Error> { | |
1287 | ||
1288 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
b7b9a574 DC |
1289 | match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) { |
1290 | Ok(_) => Ok(true), | |
b017bbc4 DM |
1291 | Err(err) => { |
1292 | let reader = decoder.input(); | |
1293 | ||
1294 | // check if this stream is marked incomplete | |
1295 | if let Ok(true) = reader.is_incomplete() { | |
1296 | return Ok(false); | |
1297 | } | |
1298 | ||
1299 | // check if this is an aborted stream without end marker | |
1300 | if let Ok(false) = reader.has_end_marker() { | |
1301 | return Ok(false); | |
1302 | } | |
1303 | ||
1304 | // else the archive is corrupt | |
38556bf6 | 1305 | Err(err) |
b017bbc4 DM |
1306 | } |
1307 | } | |
1308 | } | |
1309 | ||
1310 | fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( | |
49f9aca6 | 1311 | worker: Arc<WorkerTask>, |
b017bbc4 DM |
1312 | decoder: &mut pxar::decoder::sync::Decoder<R>, |
1313 | snapshot_path: &Path, | |
82a4bb5e | 1314 | ) -> Result<BackupManifest, Error> { |
b017bbc4 DM |
1315 | |
1316 | let _root = match decoder.next() { | |
1317 | None => bail!("missing root entry"), | |
1318 | Some(root) => { | |
1319 | let root = root?; | |
1320 | match root.kind() { | |
1321 | pxar::EntryKind::Directory => { /* Ok */ } | |
1322 | _ => bail!("wrong root entry type"), | |
1323 | } | |
1324 | root | |
1325 | } | |
1326 | }; | |
1327 | ||
1328 | let root_path = Path::new("/"); | |
1329 | let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); | |
1330 | ||
1331 | let mut manifest = None; | |
1332 | ||
1333 | loop { | |
a80d72f9 DM |
1334 | worker.check_abort()?; |
1335 | ||
b017bbc4 DM |
1336 | let entry = match decoder.next() { |
1337 | None => break, | |
1338 | Some(entry) => entry?, | |
1339 | }; | |
1340 | let entry_path = entry.path(); | |
1341 | ||
1342 | match entry.kind() { | |
1343 | pxar::EntryKind::File { .. } => { /* Ok */ } | |
1344 | _ => bail!("wrong entry type for {:?}", entry_path), | |
1345 | } | |
1346 | match entry_path.parent() { | |
1347 | None => bail!("wrong parent for {:?}", entry_path), | |
1348 | Some(p) => { | |
1349 | if p != root_path { | |
1350 | bail!("wrong parent for {:?}", entry_path); | |
1351 | } | |
1352 | } | |
1353 | } | |
1354 | ||
1355 | let filename = entry.file_name(); | |
1356 | let mut contents = match decoder.contents() { | |
1357 | None => bail!("missing file content"), | |
1358 | Some(contents) => contents, | |
1359 | }; | |
1360 | ||
1361 | let mut archive_path = snapshot_path.to_owned(); | |
1362 | archive_path.push(&filename); | |
1363 | ||
1364 | let mut tmp_path = archive_path.clone(); | |
1365 | tmp_path.set_extension("tmp"); | |
1366 | ||
1367 | if filename == manifest_file_name { | |
1368 | ||
1369 | let blob = DataBlob::load_from_reader(&mut contents)?; | |
603aa09d DM |
1370 | let mut old_manifest = BackupManifest::try_from(blob)?; |
1371 | ||
1372 | // Remove verify_state to indicate that this snapshot is not verified | |
1373 | old_manifest.unprotected | |
1374 | .as_object_mut() | |
1375 | .map(|m| m.remove("verify_state")); | |
1376 | ||
1377 | let old_manifest = serde_json::to_string_pretty(&old_manifest)?; | |
1378 | let blob = DataBlob::encode(old_manifest.as_bytes(), None, true)?; | |
1379 | ||
b017bbc4 DM |
1380 | let options = CreateOptions::new(); |
1381 | replace_file(&tmp_path, blob.raw_data(), options)?; | |
1382 | ||
1383 | manifest = Some(BackupManifest::try_from(blob)?); | |
1384 | } else { | |
1385 | let mut tmpfile = std::fs::OpenOptions::new() | |
1386 | .write(true) | |
1387 | .create(true) | |
1388 | .read(true) | |
1389 | .open(&tmp_path) | |
1390 | .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?; | |
1391 | ||
1392 | std::io::copy(&mut contents, &mut tmpfile)?; | |
1393 | ||
1394 | if let Err(err) = std::fs::rename(&tmp_path, &archive_path) { | |
1395 | bail!("Atomic rename file {:?} failed - {}", archive_path, err); | |
1396 | } | |
1397 | } | |
1398 | } | |
1399 | ||
82a4bb5e DC |
1400 | let manifest = match manifest { |
1401 | None => bail!("missing manifest"), | |
1402 | Some(manifest) => manifest, | |
1403 | }; | |
b017bbc4 | 1404 | |
603aa09d DM |
1405 | // Do not verify anything here, because this would be to slow (causes tape stops). |
1406 | ||
b017bbc4 DM |
1407 | // commit manifest |
1408 | let mut manifest_path = snapshot_path.to_owned(); | |
1409 | manifest_path.push(MANIFEST_BLOB_NAME); | |
1410 | let mut tmp_manifest_path = manifest_path.clone(); | |
1411 | tmp_manifest_path.set_extension("tmp"); | |
1412 | ||
1413 | if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) { | |
1414 | bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err); | |
1415 | } | |
1416 | ||
82a4bb5e | 1417 | Ok(manifest) |
b017bbc4 | 1418 | } |
074503f2 DM |
1419 | |
1420 | /// Try to restore media catalogs (form catalog_archives) | |
1421 | pub fn fast_catalog_restore( | |
1422 | worker: &WorkerTask, | |
1423 | drive: &mut Box<dyn TapeDriver>, | |
1424 | media_set: &MediaSet, | |
1425 | uuid: &Uuid, // current media Uuid | |
1426 | ) -> Result<bool, Error> { | |
1427 | ||
1428 | let status_path = Path::new(TAPE_STATUS_DIR); | |
1429 | ||
1430 | let current_file_number = drive.current_file_number()?; | |
1431 | if current_file_number != 2 { | |
1432 | bail!("fast_catalog_restore: wrong media position - internal error"); | |
1433 | } | |
1434 | ||
1435 | let mut found_catalog = false; | |
1436 | ||
1437 | let mut moved_to_eom = false; | |
1438 | ||
1439 | loop { | |
1440 | let current_file_number = drive.current_file_number()?; | |
1441 | ||
1442 | { // limit reader scope | |
318b3106 DM |
1443 | let mut reader = match drive.read_next_file() { |
1444 | Err(BlockReadError::EndOfFile) => { | |
1445 | task_log!(worker, "skip unexpected filemark at pos {}", current_file_number); | |
1446 | continue; | |
1447 | } | |
1448 | Err(BlockReadError::EndOfStream) => { | |
074503f2 DM |
1449 | task_log!(worker, "detected EOT after {} files", current_file_number); |
1450 | break; | |
1451 | } | |
318b3106 DM |
1452 | Err(BlockReadError::Error(err)) => { |
1453 | return Err(err.into()); | |
1454 | } | |
1455 | Ok(reader) => reader, | |
074503f2 DM |
1456 | }; |
1457 | ||
1458 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
1459 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
1460 | bail!("missing MediaContentHeader"); | |
1461 | } | |
1462 | ||
1463 | if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 { | |
1464 | task_log!(worker, "found catalog at pos {}", current_file_number); | |
1465 | ||
1466 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
1467 | ||
1468 | let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) | |
1469 | .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; | |
1470 | ||
1471 | if &archive_header.media_set_uuid != media_set.uuid() { | |
1472 | task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number); | |
90461b76 | 1473 | reader.skip_data()?; // read all data |
074503f2 DM |
1474 | continue; |
1475 | } | |
1476 | ||
1477 | let catalog_uuid = &archive_header.uuid; | |
1478 | ||
1479 | let wanted = media_set | |
1480 | .media_list() | |
1481 | .iter() | |
1482 | .find(|e| { | |
1483 | match e { | |
1484 | None => false, | |
1485 | Some(uuid) => uuid == catalog_uuid, | |
1486 | } | |
1487 | }) | |
1488 | .is_some(); | |
1489 | ||
1490 | if !wanted { | |
1491 | task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid); | |
90461b76 | 1492 | reader.skip_data()?; // read all data |
074503f2 DM |
1493 | continue; |
1494 | } | |
1495 | ||
1496 | if catalog_uuid == uuid { | |
1497 | // always restore and overwrite catalog | |
1498 | } else { | |
1499 | // only restore if catalog does not exist | |
1500 | if MediaCatalog::exists(status_path, catalog_uuid) { | |
1501 | task_log!(worker, "catalog for media '{}' already exists", catalog_uuid); | |
90461b76 | 1502 | reader.skip_data()?; // read all data |
074503f2 DM |
1503 | continue; |
1504 | } | |
1505 | } | |
1506 | ||
1507 | let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?; | |
1508 | ||
1509 | std::io::copy(&mut reader, &mut file)?; | |
1510 | ||
1511 | file.seek(SeekFrom::Start(0))?; | |
1512 | ||
1513 | match MediaCatalog::parse_catalog_header(&mut file)? { | |
1514 | (true, Some(media_uuid), Some(media_set_uuid)) => { | |
1515 | if &media_uuid != catalog_uuid { | |
1516 | task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number); | |
1517 | continue; | |
1518 | } | |
1519 | if media_set_uuid != archive_header.media_set_uuid { | |
1520 | task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number); | |
1521 | continue; | |
1522 | } | |
1523 | ||
1524 | MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?; | |
1525 | ||
1526 | if catalog_uuid == uuid { | |
1527 | task_log!(worker, "successfully restored catalog"); | |
1528 | found_catalog = true | |
1529 | } else { | |
1530 | task_log!(worker, "successfully restored related catalog {}", media_uuid); | |
1531 | } | |
1532 | } | |
1533 | _ => { | |
1534 | task_warn!(worker, "got incomplete catalog header - skip file"); | |
1535 | continue; | |
1536 | } | |
1537 | } | |
1538 | ||
1539 | continue; | |
1540 | } | |
1541 | } | |
1542 | ||
1543 | if moved_to_eom { | |
1544 | break; // already done - stop | |
1545 | } | |
1546 | moved_to_eom = true; | |
1547 | ||
1548 | task_log!(worker, "searching for catalog at EOT (moving to EOT)"); | |
1549 | drive.move_to_last_file()?; | |
1550 | ||
1551 | let new_file_number = drive.current_file_number()?; | |
1552 | ||
1553 | if new_file_number < (current_file_number + 1) { | |
1554 | break; // no new content - stop | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | Ok(found_catalog) | |
1559 | } |