]>
Commit | Line | Data |
---|---|---|
b017bbc4 DM |
1 | use std::path::Path; |
2 | use std::ffi::OsStr; | |
4c4e5c2b | 3 | use std::collections::{HashMap, HashSet}; |
b017bbc4 | 4 | use std::convert::TryFrom; |
074503f2 | 5 | use std::io::{Seek, SeekFrom}; |
4c4e5c2b | 6 | use std::sync::Arc; |
b017bbc4 DM |
7 | |
8 | use anyhow::{bail, format_err, Error}; | |
b9b4b312 | 9 | use serde_json::Value; |
b017bbc4 DM |
10 | |
11 | use proxmox::{ | |
b9b4b312 DM |
12 | api::{ |
13 | api, | |
14 | RpcEnvironment, | |
15 | RpcEnvironmentType, | |
16 | Router, | |
b4975d31 | 17 | Permission, |
4c4e5c2b | 18 | schema::parse_property_string, |
b9b4b312 DM |
19 | section_config::SectionConfigData, |
20 | }, | |
b017bbc4 DM |
21 | tools::{ |
22 | Uuid, | |
23 | io::ReadExt, | |
24 | fs::{ | |
25 | replace_file, | |
26 | CreateOptions, | |
27 | }, | |
28 | }, | |
b017bbc4 DM |
29 | }; |
30 | ||
31 | use crate::{ | |
8de9a991 | 32 | task_log, |
074503f2 | 33 | task_warn, |
a80d72f9 | 34 | task::TaskState, |
b017bbc4 | 35 | tools::compute_file_csum, |
b9b4b312 | 36 | api2::types::{ |
4c4e5c2b DC |
37 | DATASTORE_MAP_ARRAY_SCHEMA, |
38 | DATASTORE_MAP_LIST_SCHEMA, | |
9883b54c | 39 | DRIVE_NAME_SCHEMA, |
b9b4b312 DM |
40 | UPID_SCHEMA, |
41 | Authid, | |
c9793d47 | 42 | Userid, |
b9b4b312 | 43 | }, |
b4975d31 DM |
44 | config::{ |
45 | self, | |
46 | cached_user_info::CachedUserInfo, | |
47 | acl::{ | |
48 | PRIV_DATASTORE_BACKUP, | |
e3613503 | 49 | PRIV_DATASTORE_MODIFY, |
b4975d31 DM |
50 | PRIV_TAPE_READ, |
51 | }, | |
52 | }, | |
b017bbc4 DM |
53 | backup::{ |
54 | archive_type, | |
55 | MANIFEST_BLOB_NAME, | |
56 | CryptMode, | |
57 | DataStore, | |
58 | BackupDir, | |
59 | DataBlob, | |
60 | BackupManifest, | |
61 | ArchiveType, | |
62 | IndexFile, | |
63 | DynamicIndexReader, | |
64 | FixedIndexReader, | |
65 | }, | |
c9793d47 DC |
66 | server::{ |
67 | lookup_user_email, | |
68 | WorkerTask, | |
69 | }, | |
b017bbc4 DM |
70 | tape::{ |
71 | TAPE_STATUS_DIR, | |
72 | TapeRead, | |
318b3106 | 73 | BlockReadError, |
b017bbc4 | 74 | MediaId, |
074503f2 | 75 | MediaSet, |
b017bbc4 | 76 | MediaCatalog, |
b9b4b312 | 77 | Inventory, |
30316192 | 78 | lock_media_set, |
b017bbc4 DM |
79 | file_formats::{ |
80 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, | |
81 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, | |
54722aca | 82 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, |
b017bbc4 DM |
83 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, |
84 | PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, | |
85 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, | |
54722aca | 86 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, |
074503f2 | 87 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, |
b017bbc4 | 88 | MediaContentHeader, |
54722aca | 89 | ChunkArchiveHeader, |
f47e0357 | 90 | ChunkArchiveDecoder, |
54722aca | 91 | SnapshotArchiveHeader, |
074503f2 | 92 | CatalogArchiveHeader, |
b017bbc4 | 93 | }, |
37796ff7 DM |
94 | drive::{ |
95 | TapeDriver, | |
96 | request_and_load_media, | |
25aa55b5 | 97 | lock_tape_device, |
926d05ef | 98 | set_tape_device_state, |
25aa55b5 | 99 | }, |
b017bbc4 DM |
100 | }, |
101 | }; | |
102 | ||
4c4e5c2b DC |
103 | pub struct DataStoreMap { |
104 | map: HashMap<String, Arc<DataStore>>, | |
105 | default: Option<Arc<DataStore>>, | |
106 | } | |
107 | ||
108 | impl TryFrom<String> for DataStoreMap { | |
109 | type Error = Error; | |
110 | ||
111 | fn try_from(value: String) -> Result<Self, Error> { | |
112 | let value = parse_property_string(&value, &DATASTORE_MAP_ARRAY_SCHEMA)?; | |
113 | let mut mapping: Vec<String> = value | |
114 | .as_array() | |
115 | .unwrap() | |
116 | .iter() | |
117 | .map(|v| v.as_str().unwrap().to_string()) | |
118 | .collect(); | |
119 | ||
120 | let mut map = HashMap::new(); | |
121 | let mut default = None; | |
122 | while let Some(mut store) = mapping.pop() { | |
123 | if let Some(index) = store.find('=') { | |
124 | let mut target = store.split_off(index); | |
125 | target.remove(0); // remove '=' | |
126 | let datastore = DataStore::lookup_datastore(&target)?; | |
127 | map.insert(store, datastore); | |
128 | } else if default.is_none() { | |
129 | default = Some(DataStore::lookup_datastore(&store)?); | |
130 | } else { | |
131 | bail!("multiple default stores given"); | |
132 | } | |
133 | } | |
134 | ||
135 | Ok(Self { map, default }) | |
136 | } | |
137 | } | |
138 | ||
139 | impl DataStoreMap { | |
140 | fn used_datastores<'a>(&self) -> HashSet<&str> { | |
141 | let mut set = HashSet::new(); | |
142 | for store in self.map.values() { | |
143 | set.insert(store.name()); | |
144 | } | |
145 | ||
146 | if let Some(ref store) = self.default { | |
147 | set.insert(store.name()); | |
148 | } | |
149 | ||
150 | set | |
151 | } | |
152 | ||
153 | fn get_datastore(&self, source: &str) -> Option<&DataStore> { | |
154 | if let Some(store) = self.map.get(source) { | |
155 | return Some(&store); | |
156 | } | |
157 | if let Some(ref store) = self.default { | |
158 | return Some(&store); | |
159 | } | |
160 | ||
161 | return None; | |
162 | } | |
163 | } | |
164 | ||
165 | pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE); | |
b9b4b312 | 166 | |
b9b4b312 DM |
167 | #[api( |
168 | input: { | |
169 | properties: { | |
170 | store: { | |
4c4e5c2b | 171 | schema: DATASTORE_MAP_LIST_SCHEMA, |
b9b4b312 | 172 | }, |
9883b54c DM |
173 | drive: { |
174 | schema: DRIVE_NAME_SCHEMA, | |
175 | }, | |
b9b4b312 DM |
176 | "media-set": { |
177 | description: "Media set UUID.", | |
178 | type: String, | |
179 | }, | |
c9793d47 DC |
180 | "notify-user": { |
181 | type: Userid, | |
182 | optional: true, | |
183 | }, | |
e3613503 DC |
184 | owner: { |
185 | type: Authid, | |
186 | optional: true, | |
187 | }, | |
b9b4b312 DM |
188 | }, |
189 | }, | |
190 | returns: { | |
191 | schema: UPID_SCHEMA, | |
192 | }, | |
b4975d31 DM |
193 | access: { |
194 | // Note: parameters are no uri parameter, so we need to test inside function body | |
195 | description: "The user needs Tape.Read privilege on /tape/pool/{pool} \ | |
196 | and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.", | |
197 | permission: &Permission::Anybody, | |
198 | }, | |
b9b4b312 DM |
199 | )] |
200 | /// Restore data from media-set | |
201 | pub fn restore( | |
202 | store: String, | |
9883b54c | 203 | drive: String, |
b9b4b312 | 204 | media_set: String, |
c9793d47 | 205 | notify_user: Option<Userid>, |
e3613503 | 206 | owner: Option<Authid>, |
b9b4b312 DM |
207 | rpcenv: &mut dyn RpcEnvironment, |
208 | ) -> Result<Value, Error> { | |
b9b4b312 | 209 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
b4975d31 DM |
210 | let user_info = CachedUserInfo::new()?; |
211 | ||
4c4e5c2b DC |
212 | let store_map = DataStoreMap::try_from(store) |
213 | .map_err(|err| format_err!("cannot parse store mapping: {}", err))?; | |
214 | let used_datastores = store_map.used_datastores(); | |
215 | if used_datastores.len() == 0 { | |
216 | bail!("no datastores given"); | |
b4975d31 DM |
217 | } |
218 | ||
4c4e5c2b DC |
219 | for store in used_datastores.iter() { |
220 | let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
221 | if (privs & PRIV_DATASTORE_BACKUP) == 0 { | |
222 | bail!("no permissions on /datastore/{}", store); | |
223 | } | |
224 | ||
225 | if let Some(ref owner) = owner { | |
226 | let correct_owner = owner == &auth_id | |
227 | || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user()); | |
e3613503 | 228 | |
4c4e5c2b DC |
229 | // same permission as changing ownership after syncing |
230 | if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 { | |
231 | bail!("no permission to restore as '{}'", owner); | |
232 | } | |
e3613503 DC |
233 | } |
234 | } | |
235 | ||
b4975d31 DM |
236 | let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]); |
237 | if (privs & PRIV_TAPE_READ) == 0 { | |
238 | bail!("no permissions on /tape/drive/{}", drive); | |
239 | } | |
b9b4b312 | 240 | |
30316192 DM |
241 | let media_set_uuid = media_set.parse()?; |
242 | ||
b9b4b312 | 243 | let status_path = Path::new(TAPE_STATUS_DIR); |
b9b4b312 | 244 | |
30316192 DM |
245 | let _lock = lock_media_set(status_path, &media_set_uuid, None)?; |
246 | ||
247 | let inventory = Inventory::load(status_path)?; | |
b9b4b312 DM |
248 | |
249 | let pool = inventory.lookup_media_set_pool(&media_set_uuid)?; | |
250 | ||
b4975d31 DM |
251 | let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]); |
252 | if (privs & PRIV_TAPE_READ) == 0 { | |
253 | bail!("no permissions on /tape/pool/{}", pool); | |
254 | } | |
255 | ||
b9b4b312 | 256 | let (drive_config, _digest) = config::drive::config()?; |
25aa55b5 DM |
257 | |
258 | // early check/lock before starting worker | |
259 | let drive_lock = lock_tape_device(&drive_config, &drive)?; | |
b9b4b312 | 260 | |
39735609 | 261 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
b9b4b312 | 262 | |
4c4e5c2b DC |
263 | let taskid = used_datastores |
264 | .iter() | |
265 | .map(|s| s.to_string()) | |
266 | .collect::<Vec<String>>() | |
267 | .join(", "); | |
b9b4b312 DM |
268 | let upid_str = WorkerTask::new_thread( |
269 | "tape-restore", | |
4c4e5c2b | 270 | Some(taskid), |
b9b4b312 DM |
271 | auth_id.clone(), |
272 | to_stdout, | |
273 | move |worker| { | |
25aa55b5 | 274 | let _drive_lock = drive_lock; // keep lock guard |
b9b4b312 | 275 | |
926d05ef DC |
276 | set_tape_device_state(&drive, &worker.upid().to_string())?; |
277 | ||
b9b4b312 DM |
278 | let members = inventory.compute_media_set_members(&media_set_uuid)?; |
279 | ||
44288184 | 280 | let media_list = members.media_list(); |
b9b4b312 DM |
281 | |
282 | let mut media_id_list = Vec::new(); | |
283 | ||
8e6459a8 DM |
284 | let mut encryption_key_fingerprint = None; |
285 | ||
b9b4b312 DM |
286 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { |
287 | match media_uuid { | |
288 | None => { | |
289 | bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr); | |
290 | } | |
291 | Some(media_uuid) => { | |
8e6459a8 DM |
292 | let media_id = inventory.lookup_media(media_uuid).unwrap(); |
293 | if let Some(ref set) = media_id.media_set_label { // always true here | |
294 | if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() { | |
295 | encryption_key_fingerprint = set.encryption_key_fingerprint.clone(); | |
296 | } | |
297 | } | |
298 | media_id_list.push(media_id); | |
b9b4b312 DM |
299 | } |
300 | } | |
301 | } | |
302 | ||
8de9a991 | 303 | task_log!(worker, "Restore mediaset '{}'", media_set); |
8e6459a8 | 304 | if let Some(fingerprint) = encryption_key_fingerprint { |
8de9a991 | 305 | task_log!(worker, "Encryption key fingerprint: {}", fingerprint); |
8e6459a8 | 306 | } |
8de9a991 | 307 | task_log!(worker, "Pool: {}", pool); |
4c4e5c2b DC |
308 | task_log!(worker, "Datastore(s):"); |
309 | store_map | |
310 | .used_datastores() | |
311 | .iter() | |
312 | .for_each(|store| task_log!(worker, "\t{}", store)); | |
8de9a991 DM |
313 | task_log!(worker, "Drive: {}", drive); |
314 | task_log!( | |
315 | worker, | |
b9b4b312 DM |
316 | "Required media list: {}", |
317 | media_id_list.iter() | |
8446fbca | 318 | .map(|media_id| media_id.label.label_text.as_str()) |
b9b4b312 DM |
319 | .collect::<Vec<&str>>() |
320 | .join(";") | |
8de9a991 | 321 | ); |
b9b4b312 DM |
322 | |
323 | for media_id in media_id_list.iter() { | |
324 | request_and_restore_media( | |
325 | &worker, | |
326 | media_id, | |
327 | &drive_config, | |
9883b54c | 328 | &drive, |
4c4e5c2b | 329 | &store_map, |
b9b4b312 | 330 | &auth_id, |
c9793d47 | 331 | ¬ify_user, |
e3613503 | 332 | &owner, |
b9b4b312 DM |
333 | )?; |
334 | } | |
335 | ||
8de9a991 | 336 | task_log!(worker, "Restore mediaset '{}' done", media_set); |
926d05ef DC |
337 | |
338 | if let Err(err) = set_tape_device_state(&drive, "") { | |
339 | task_log!( | |
340 | worker, | |
341 | "could not unset drive state for {}: {}", | |
342 | drive, | |
343 | err | |
344 | ); | |
345 | } | |
346 | ||
b9b4b312 DM |
347 | Ok(()) |
348 | } | |
349 | )?; | |
350 | ||
351 | Ok(upid_str.into()) | |
352 | } | |
353 | ||
b017bbc4 DM |
354 | /// Request and restore complete media without using existing catalog (create catalog instead) |
355 | pub fn request_and_restore_media( | |
356 | worker: &WorkerTask, | |
357 | media_id: &MediaId, | |
358 | drive_config: &SectionConfigData, | |
359 | drive_name: &str, | |
4c4e5c2b | 360 | store_map: &DataStoreMap, |
b017bbc4 | 361 | authid: &Authid, |
c9793d47 | 362 | notify_user: &Option<Userid>, |
e3613503 | 363 | owner: &Option<Authid>, |
b017bbc4 | 364 | ) -> Result<(), Error> { |
b017bbc4 DM |
365 | let media_set_uuid = match media_id.media_set_label { |
366 | None => bail!("restore_media: no media set - internal error"), | |
367 | Some(ref set) => &set.uuid, | |
368 | }; | |
369 | ||
c9793d47 DC |
370 | let email = notify_user |
371 | .as_ref() | |
372 | .and_then(|userid| lookup_user_email(userid)) | |
373 | .or_else(|| lookup_user_email(&authid.clone().into())); | |
374 | ||
375 | let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?; | |
b017bbc4 DM |
376 | |
377 | match info.media_set_label { | |
378 | None => { | |
379 | bail!("missing media set label on media {} ({})", | |
8446fbca | 380 | media_id.label.label_text, media_id.label.uuid); |
b017bbc4 DM |
381 | } |
382 | Some(ref set) => { | |
383 | if &set.uuid != media_set_uuid { | |
384 | bail!("wrong media set label on media {} ({} != {})", | |
8446fbca | 385 | media_id.label.label_text, media_id.label.uuid, |
b017bbc4 DM |
386 | media_set_uuid); |
387 | } | |
8e6459a8 DM |
388 | let encrypt_fingerprint = set.encryption_key_fingerprint.clone() |
389 | .map(|fp| (fp, set.uuid.clone())); | |
390 | ||
391 | drive.set_encryption(encrypt_fingerprint)?; | |
b017bbc4 DM |
392 | } |
393 | } | |
394 | ||
e3613503 DC |
395 | let restore_owner = owner.as_ref().unwrap_or(authid); |
396 | ||
4c4e5c2b DC |
397 | restore_media( |
398 | worker, | |
399 | &mut drive, | |
400 | &info, | |
401 | Some((&store_map, restore_owner)), | |
402 | false, | |
403 | ) | |
b017bbc4 DM |
404 | } |
405 | ||
406 | /// Restore complete media content and catalog | |
407 | /// | |
408 | /// Only create the catalog if target is None. | |
409 | pub fn restore_media( | |
410 | worker: &WorkerTask, | |
411 | drive: &mut Box<dyn TapeDriver>, | |
412 | media_id: &MediaId, | |
4c4e5c2b | 413 | target: Option<(&DataStoreMap, &Authid)>, |
b017bbc4 DM |
414 | verbose: bool, |
415 | ) -> Result<(), Error> { | |
416 | ||
417 | let status_path = Path::new(TAPE_STATUS_DIR); | |
418 | let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?; | |
419 | ||
420 | loop { | |
421 | let current_file_number = drive.current_file_number()?; | |
318b3106 DM |
422 | let reader = match drive.read_next_file() { |
423 | Err(BlockReadError::EndOfFile) => { | |
424 | task_log!(worker, "skip unexpected filemark at pos {}", current_file_number); | |
425 | continue; | |
426 | } | |
427 | Err(BlockReadError::EndOfStream) => { | |
8de9a991 | 428 | task_log!(worker, "detected EOT after {} files", current_file_number); |
b017bbc4 DM |
429 | break; |
430 | } | |
318b3106 DM |
431 | Err(BlockReadError::Error(err)) => { |
432 | return Err(err.into()); | |
433 | } | |
434 | Ok(reader) => reader, | |
b017bbc4 DM |
435 | }; |
436 | ||
b017bbc4 DM |
437 | restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?; |
438 | } | |
439 | ||
440 | MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?; | |
441 | ||
442 | Ok(()) | |
443 | } | |
444 | ||
445 | fn restore_archive<'a>( | |
446 | worker: &WorkerTask, | |
447 | mut reader: Box<dyn 'a + TapeRead>, | |
448 | current_file_number: u64, | |
4c4e5c2b | 449 | target: Option<(&DataStoreMap, &Authid)>, |
b017bbc4 DM |
450 | catalog: &mut MediaCatalog, |
451 | verbose: bool, | |
452 | ) -> Result<(), Error> { | |
b017bbc4 DM |
453 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; |
454 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
455 | bail!("missing MediaContentHeader"); | |
456 | } | |
457 | ||
458 | //println!("Found MediaContentHeader: {:?}", header); | |
459 | ||
460 | match header.content_magic { | |
461 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => { | |
462 | bail!("unexpected content magic (label)"); | |
463 | } | |
464 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => { | |
54722aca DM |
465 | bail!("unexpected snapshot archive version (v1.0)"); |
466 | } | |
467 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => { | |
468 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
469 | ||
470 | let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data) | |
471 | .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?; | |
472 | ||
473 | let datastore_name = archive_header.store; | |
474 | let snapshot = archive_header.snapshot; | |
475 | ||
476 | task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot); | |
b017bbc4 DM |
477 | |
478 | let backup_dir: BackupDir = snapshot.parse()?; | |
479 | ||
4c4e5c2b DC |
480 | if let Some((store_map, authid)) = target.as_ref() { |
481 | if let Some(datastore) = store_map.get_datastore(&datastore_name) { | |
482 | let (owner, _group_lock) = | |
483 | datastore.create_locked_backup_group(backup_dir.group(), authid)?; | |
484 | if *authid != &owner { | |
485 | // only the owner is allowed to create additional snapshots | |
486 | bail!( | |
487 | "restore '{}' failed - owner check failed ({} != {})", | |
488 | snapshot, | |
489 | authid, | |
490 | owner | |
491 | ); | |
492 | } | |
b017bbc4 | 493 | |
4c4e5c2b DC |
494 | let (rel_path, is_new, _snap_lock) = |
495 | datastore.create_locked_backup_dir(&backup_dir)?; | |
496 | let mut path = datastore.base_path(); | |
497 | path.push(rel_path); | |
b017bbc4 | 498 | |
4c4e5c2b DC |
499 | if is_new { |
500 | task_log!(worker, "restore snapshot {}", backup_dir); | |
b017bbc4 | 501 | |
4c4e5c2b DC |
502 | match restore_snapshot_archive(worker, reader, &path) { |
503 | Err(err) => { | |
504 | std::fs::remove_dir_all(&path)?; | |
505 | bail!("restore snapshot {} failed - {}", backup_dir, err); | |
506 | } | |
507 | Ok(false) => { | |
508 | std::fs::remove_dir_all(&path)?; | |
509 | task_log!(worker, "skip incomplete snapshot {}", backup_dir); | |
510 | } | |
511 | Ok(true) => { | |
512 | catalog.register_snapshot( | |
513 | Uuid::from(header.uuid), | |
514 | current_file_number, | |
515 | &datastore_name, | |
516 | &snapshot, | |
517 | )?; | |
518 | catalog.commit_if_large()?; | |
519 | } | |
b017bbc4 | 520 | } |
4c4e5c2b | 521 | return Ok(()); |
b017bbc4 | 522 | } |
4c4e5c2b DC |
523 | } else { |
524 | task_log!(worker, "skipping..."); | |
b017bbc4 DM |
525 | } |
526 | } | |
527 | ||
90461b76 | 528 | reader.skip_data()?; // read all data |
b017bbc4 | 529 | if let Ok(false) = reader.is_incomplete() { |
54722aca | 530 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?; |
b017bbc4 DM |
531 | catalog.commit_if_large()?; |
532 | } | |
533 | } | |
534 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { | |
54722aca DM |
535 | bail!("unexpected chunk archive version (v1.0)"); |
536 | } | |
537 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => { | |
538 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
539 | ||
540 | let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data) | |
541 | .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?; | |
542 | ||
543 | let source_datastore = archive_header.store; | |
b017bbc4 | 544 | |
54722aca | 545 | task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore); |
4c4e5c2b DC |
546 | let datastore = target |
547 | .as_ref() | |
548 | .and_then(|t| t.0.get_datastore(&source_datastore)); | |
549 | ||
550 | if datastore.is_some() || target.is_none() { | |
551 | if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? { | |
552 | catalog.start_chunk_archive( | |
553 | Uuid::from(header.uuid), | |
554 | current_file_number, | |
555 | &source_datastore, | |
556 | )?; | |
557 | for digest in chunks.iter() { | |
558 | catalog.register_chunk(&digest)?; | |
559 | } | |
560 | task_log!(worker, "register {} chunks", chunks.len()); | |
561 | catalog.end_chunk_archive()?; | |
562 | catalog.commit_if_large()?; | |
b017bbc4 | 563 | } |
4c4e5c2b DC |
564 | return Ok(()); |
565 | } else if target.is_some() { | |
566 | task_log!(worker, "skipping..."); | |
b017bbc4 | 567 | } |
4c4e5c2b | 568 | |
90461b76 | 569 | reader.skip_data()?; // read all data |
b017bbc4 | 570 | } |
8b1289f3 DM |
571 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => { |
572 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
573 | ||
574 | let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) | |
575 | .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; | |
576 | ||
577 | task_log!(worker, "File {}: skip catalog '{}'", current_file_number, archive_header.uuid); | |
578 | ||
90461b76 | 579 | reader.skip_data()?; // read all data |
8b1289f3 DM |
580 | } |
581 | _ => bail!("unknown content magic {:?}", header.content_magic), | |
b017bbc4 DM |
582 | } |
583 | ||
584 | catalog.commit()?; | |
585 | ||
586 | Ok(()) | |
587 | } | |
588 | ||
589 | fn restore_chunk_archive<'a>( | |
590 | worker: &WorkerTask, | |
591 | reader: Box<dyn 'a + TapeRead>, | |
592 | datastore: Option<&DataStore>, | |
593 | verbose: bool, | |
594 | ) -> Result<Option<Vec<[u8;32]>>, Error> { | |
595 | ||
596 | let mut chunks = Vec::new(); | |
597 | ||
598 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
599 | ||
600 | let result: Result<_, Error> = proxmox::try_block!({ | |
0d2133db | 601 | while let Some((digest, blob)) = decoder.next_chunk()? { |
a80d72f9 DM |
602 | |
603 | worker.check_abort()?; | |
604 | ||
0d2133db FG |
605 | if let Some(datastore) = datastore { |
606 | let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; | |
607 | if !chunk_exists { | |
608 | blob.verify_crc()?; | |
609 | ||
610 | if blob.crypt_mode()? == CryptMode::None { | |
611 | blob.decode(None, Some(&digest))?; // verify digest | |
612 | } | |
613 | if verbose { | |
8de9a991 | 614 | task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 615 | } |
0d2133db FG |
616 | datastore.insert_chunk(&blob, &digest)?; |
617 | } else if verbose { | |
8de9a991 | 618 | task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 619 | } |
0d2133db | 620 | } else if verbose { |
8de9a991 | 621 | task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 622 | } |
0d2133db | 623 | chunks.push(digest); |
b017bbc4 DM |
624 | } |
625 | Ok(()) | |
626 | }); | |
627 | ||
628 | match result { | |
629 | Ok(()) => Ok(Some(chunks)), | |
630 | Err(err) => { | |
631 | let reader = decoder.reader(); | |
632 | ||
633 | // check if this stream is marked incomplete | |
634 | if let Ok(true) = reader.is_incomplete() { | |
635 | return Ok(Some(chunks)); | |
636 | } | |
637 | ||
638 | // check if this is an aborted stream without end marker | |
639 | if let Ok(false) = reader.has_end_marker() { | |
3b82f3ee | 640 | worker.log("missing stream end marker".to_string()); |
b017bbc4 DM |
641 | return Ok(None); |
642 | } | |
643 | ||
644 | // else the archive is corrupt | |
645 | Err(err) | |
646 | } | |
647 | } | |
648 | } | |
649 | ||
650 | fn restore_snapshot_archive<'a>( | |
a80d72f9 | 651 | worker: &WorkerTask, |
b017bbc4 DM |
652 | reader: Box<dyn 'a + TapeRead>, |
653 | snapshot_path: &Path, | |
654 | ) -> Result<bool, Error> { | |
655 | ||
656 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
a80d72f9 | 657 | match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) { |
38556bf6 | 658 | Ok(()) => Ok(true), |
b017bbc4 DM |
659 | Err(err) => { |
660 | let reader = decoder.input(); | |
661 | ||
662 | // check if this stream is marked incomplete | |
663 | if let Ok(true) = reader.is_incomplete() { | |
664 | return Ok(false); | |
665 | } | |
666 | ||
667 | // check if this is an aborted stream without end marker | |
668 | if let Ok(false) = reader.has_end_marker() { | |
669 | return Ok(false); | |
670 | } | |
671 | ||
672 | // else the archive is corrupt | |
38556bf6 | 673 | Err(err) |
b017bbc4 DM |
674 | } |
675 | } | |
676 | } | |
677 | ||
678 | fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( | |
a80d72f9 | 679 | worker: &WorkerTask, |
b017bbc4 DM |
680 | decoder: &mut pxar::decoder::sync::Decoder<R>, |
681 | snapshot_path: &Path, | |
682 | ) -> Result<(), Error> { | |
683 | ||
684 | let _root = match decoder.next() { | |
685 | None => bail!("missing root entry"), | |
686 | Some(root) => { | |
687 | let root = root?; | |
688 | match root.kind() { | |
689 | pxar::EntryKind::Directory => { /* Ok */ } | |
690 | _ => bail!("wrong root entry type"), | |
691 | } | |
692 | root | |
693 | } | |
694 | }; | |
695 | ||
696 | let root_path = Path::new("/"); | |
697 | let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); | |
698 | ||
699 | let mut manifest = None; | |
700 | ||
701 | loop { | |
a80d72f9 DM |
702 | worker.check_abort()?; |
703 | ||
b017bbc4 DM |
704 | let entry = match decoder.next() { |
705 | None => break, | |
706 | Some(entry) => entry?, | |
707 | }; | |
708 | let entry_path = entry.path(); | |
709 | ||
710 | match entry.kind() { | |
711 | pxar::EntryKind::File { .. } => { /* Ok */ } | |
712 | _ => bail!("wrong entry type for {:?}", entry_path), | |
713 | } | |
714 | match entry_path.parent() { | |
715 | None => bail!("wrong parent for {:?}", entry_path), | |
716 | Some(p) => { | |
717 | if p != root_path { | |
718 | bail!("wrong parent for {:?}", entry_path); | |
719 | } | |
720 | } | |
721 | } | |
722 | ||
723 | let filename = entry.file_name(); | |
724 | let mut contents = match decoder.contents() { | |
725 | None => bail!("missing file content"), | |
726 | Some(contents) => contents, | |
727 | }; | |
728 | ||
729 | let mut archive_path = snapshot_path.to_owned(); | |
730 | archive_path.push(&filename); | |
731 | ||
732 | let mut tmp_path = archive_path.clone(); | |
733 | tmp_path.set_extension("tmp"); | |
734 | ||
735 | if filename == manifest_file_name { | |
736 | ||
737 | let blob = DataBlob::load_from_reader(&mut contents)?; | |
738 | let options = CreateOptions::new(); | |
739 | replace_file(&tmp_path, blob.raw_data(), options)?; | |
740 | ||
741 | manifest = Some(BackupManifest::try_from(blob)?); | |
742 | } else { | |
743 | let mut tmpfile = std::fs::OpenOptions::new() | |
744 | .write(true) | |
745 | .create(true) | |
746 | .read(true) | |
747 | .open(&tmp_path) | |
748 | .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?; | |
749 | ||
750 | std::io::copy(&mut contents, &mut tmpfile)?; | |
751 | ||
752 | if let Err(err) = std::fs::rename(&tmp_path, &archive_path) { | |
753 | bail!("Atomic rename file {:?} failed - {}", archive_path, err); | |
754 | } | |
755 | } | |
756 | } | |
757 | ||
758 | let manifest = match manifest { | |
759 | None => bail!("missing manifest"), | |
760 | Some(manifest) => manifest, | |
761 | }; | |
762 | ||
763 | for item in manifest.files() { | |
764 | let mut archive_path = snapshot_path.to_owned(); | |
765 | archive_path.push(&item.filename); | |
766 | ||
767 | match archive_type(&item.filename)? { | |
768 | ArchiveType::DynamicIndex => { | |
769 | let index = DynamicIndexReader::open(&archive_path)?; | |
770 | let (csum, size) = index.compute_csum(); | |
771 | manifest.verify_file(&item.filename, &csum, size)?; | |
772 | } | |
773 | ArchiveType::FixedIndex => { | |
774 | let index = FixedIndexReader::open(&archive_path)?; | |
775 | let (csum, size) = index.compute_csum(); | |
776 | manifest.verify_file(&item.filename, &csum, size)?; | |
777 | } | |
778 | ArchiveType::Blob => { | |
779 | let mut tmpfile = std::fs::File::open(&archive_path)?; | |
780 | let (csum, size) = compute_file_csum(&mut tmpfile)?; | |
781 | manifest.verify_file(&item.filename, &csum, size)?; | |
782 | } | |
783 | } | |
784 | } | |
785 | ||
786 | // commit manifest | |
787 | let mut manifest_path = snapshot_path.to_owned(); | |
788 | manifest_path.push(MANIFEST_BLOB_NAME); | |
789 | let mut tmp_manifest_path = manifest_path.clone(); | |
790 | tmp_manifest_path.set_extension("tmp"); | |
791 | ||
792 | if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) { | |
793 | bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err); | |
794 | } | |
795 | ||
796 | Ok(()) | |
797 | } | |
074503f2 DM |
798 | |
799 | /// Try to restore media catalogs (form catalog_archives) | |
800 | pub fn fast_catalog_restore( | |
801 | worker: &WorkerTask, | |
802 | drive: &mut Box<dyn TapeDriver>, | |
803 | media_set: &MediaSet, | |
804 | uuid: &Uuid, // current media Uuid | |
805 | ) -> Result<bool, Error> { | |
806 | ||
807 | let status_path = Path::new(TAPE_STATUS_DIR); | |
808 | ||
809 | let current_file_number = drive.current_file_number()?; | |
810 | if current_file_number != 2 { | |
811 | bail!("fast_catalog_restore: wrong media position - internal error"); | |
812 | } | |
813 | ||
814 | let mut found_catalog = false; | |
815 | ||
816 | let mut moved_to_eom = false; | |
817 | ||
818 | loop { | |
819 | let current_file_number = drive.current_file_number()?; | |
820 | ||
821 | { // limit reader scope | |
318b3106 DM |
822 | let mut reader = match drive.read_next_file() { |
823 | Err(BlockReadError::EndOfFile) => { | |
824 | task_log!(worker, "skip unexpected filemark at pos {}", current_file_number); | |
825 | continue; | |
826 | } | |
827 | Err(BlockReadError::EndOfStream) => { | |
074503f2 DM |
828 | task_log!(worker, "detected EOT after {} files", current_file_number); |
829 | break; | |
830 | } | |
318b3106 DM |
831 | Err(BlockReadError::Error(err)) => { |
832 | return Err(err.into()); | |
833 | } | |
834 | Ok(reader) => reader, | |
074503f2 DM |
835 | }; |
836 | ||
837 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
838 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
839 | bail!("missing MediaContentHeader"); | |
840 | } | |
841 | ||
842 | if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 { | |
843 | task_log!(worker, "found catalog at pos {}", current_file_number); | |
844 | ||
845 | let header_data = reader.read_exact_allocated(header.size as usize)?; | |
846 | ||
847 | let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) | |
848 | .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; | |
849 | ||
850 | if &archive_header.media_set_uuid != media_set.uuid() { | |
851 | task_log!(worker, "skipping unrelated catalog at pos {}", current_file_number); | |
90461b76 | 852 | reader.skip_data()?; // read all data |
074503f2 DM |
853 | continue; |
854 | } | |
855 | ||
856 | let catalog_uuid = &archive_header.uuid; | |
857 | ||
858 | let wanted = media_set | |
859 | .media_list() | |
860 | .iter() | |
861 | .find(|e| { | |
862 | match e { | |
863 | None => false, | |
864 | Some(uuid) => uuid == catalog_uuid, | |
865 | } | |
866 | }) | |
867 | .is_some(); | |
868 | ||
869 | if !wanted { | |
870 | task_log!(worker, "skip catalog because media '{}' not inventarized", catalog_uuid); | |
90461b76 | 871 | reader.skip_data()?; // read all data |
074503f2 DM |
872 | continue; |
873 | } | |
874 | ||
875 | if catalog_uuid == uuid { | |
876 | // always restore and overwrite catalog | |
877 | } else { | |
878 | // only restore if catalog does not exist | |
879 | if MediaCatalog::exists(status_path, catalog_uuid) { | |
880 | task_log!(worker, "catalog for media '{}' already exists", catalog_uuid); | |
90461b76 | 881 | reader.skip_data()?; // read all data |
074503f2 DM |
882 | continue; |
883 | } | |
884 | } | |
885 | ||
886 | let mut file = MediaCatalog::create_temporary_database_file(status_path, catalog_uuid)?; | |
887 | ||
888 | std::io::copy(&mut reader, &mut file)?; | |
889 | ||
890 | file.seek(SeekFrom::Start(0))?; | |
891 | ||
892 | match MediaCatalog::parse_catalog_header(&mut file)? { | |
893 | (true, Some(media_uuid), Some(media_set_uuid)) => { | |
894 | if &media_uuid != catalog_uuid { | |
895 | task_log!(worker, "catalog uuid missmatch at pos {}", current_file_number); | |
896 | continue; | |
897 | } | |
898 | if media_set_uuid != archive_header.media_set_uuid { | |
899 | task_log!(worker, "catalog media_set missmatch at pos {}", current_file_number); | |
900 | continue; | |
901 | } | |
902 | ||
903 | MediaCatalog::finish_temporary_database(status_path, &media_uuid, true)?; | |
904 | ||
905 | if catalog_uuid == uuid { | |
906 | task_log!(worker, "successfully restored catalog"); | |
907 | found_catalog = true | |
908 | } else { | |
909 | task_log!(worker, "successfully restored related catalog {}", media_uuid); | |
910 | } | |
911 | } | |
912 | _ => { | |
913 | task_warn!(worker, "got incomplete catalog header - skip file"); | |
914 | continue; | |
915 | } | |
916 | } | |
917 | ||
918 | continue; | |
919 | } | |
920 | } | |
921 | ||
922 | if moved_to_eom { | |
923 | break; // already done - stop | |
924 | } | |
925 | moved_to_eom = true; | |
926 | ||
927 | task_log!(worker, "searching for catalog at EOT (moving to EOT)"); | |
928 | drive.move_to_last_file()?; | |
929 | ||
930 | let new_file_number = drive.current_file_number()?; | |
931 | ||
932 | if new_file_number < (current_file_number + 1) { | |
933 | break; // no new content - stop | |
934 | } | |
935 | } | |
936 | ||
937 | Ok(found_catalog) | |
938 | } |