]>
Commit | Line | Data |
---|---|---|
b017bbc4 DM |
1 | use std::path::Path; |
2 | use std::ffi::OsStr; | |
3 | use std::convert::TryFrom; | |
4 | ||
5 | use anyhow::{bail, format_err, Error}; | |
b9b4b312 | 6 | use serde_json::Value; |
b017bbc4 DM |
7 | |
8 | use proxmox::{ | |
b9b4b312 DM |
9 | api::{ |
10 | api, | |
11 | RpcEnvironment, | |
12 | RpcEnvironmentType, | |
13 | Router, | |
14 | section_config::SectionConfigData, | |
15 | }, | |
b017bbc4 DM |
16 | tools::{ |
17 | Uuid, | |
18 | io::ReadExt, | |
19 | fs::{ | |
20 | replace_file, | |
21 | CreateOptions, | |
22 | }, | |
23 | }, | |
b017bbc4 DM |
24 | }; |
25 | ||
26 | use crate::{ | |
27 | tools::compute_file_csum, | |
b9b4b312 DM |
28 | api2::types::{ |
29 | DATASTORE_SCHEMA, | |
30 | UPID_SCHEMA, | |
31 | Authid, | |
32 | MediaPoolConfig, | |
33 | }, | |
34 | config::{ | |
35 | self, | |
36 | drive::check_drive_exists, | |
37 | }, | |
b017bbc4 DM |
38 | backup::{ |
39 | archive_type, | |
40 | MANIFEST_BLOB_NAME, | |
41 | CryptMode, | |
42 | DataStore, | |
43 | BackupDir, | |
44 | DataBlob, | |
45 | BackupManifest, | |
46 | ArchiveType, | |
47 | IndexFile, | |
48 | DynamicIndexReader, | |
49 | FixedIndexReader, | |
50 | }, | |
51 | server::WorkerTask, | |
52 | tape::{ | |
53 | TAPE_STATUS_DIR, | |
54 | TapeRead, | |
55 | MediaId, | |
56 | MediaCatalog, | |
57 | ChunkArchiveDecoder, | |
58 | TapeDriver, | |
b9b4b312 DM |
59 | MediaPool, |
60 | Inventory, | |
b017bbc4 DM |
61 | request_and_load_media, |
62 | file_formats::{ | |
63 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, | |
64 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, | |
65 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, | |
66 | PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, | |
67 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, | |
68 | MediaContentHeader, | |
69 | }, | |
70 | }, | |
71 | }; | |
72 | ||
b9b4b312 DM |
73 | pub const ROUTER: Router = Router::new() |
74 | .post(&API_METHOD_RESTORE); | |
75 | ||
76 | ||
77 | #[api( | |
78 | input: { | |
79 | properties: { | |
80 | store: { | |
81 | schema: DATASTORE_SCHEMA, | |
82 | }, | |
83 | "media-set": { | |
84 | description: "Media set UUID.", | |
85 | type: String, | |
86 | }, | |
87 | }, | |
88 | }, | |
89 | returns: { | |
90 | schema: UPID_SCHEMA, | |
91 | }, | |
92 | )] | |
93 | /// Restore data from media-set | |
94 | pub fn restore( | |
95 | store: String, | |
96 | media_set: String, | |
97 | rpcenv: &mut dyn RpcEnvironment, | |
98 | ) -> Result<Value, Error> { | |
99 | ||
100 | let datastore = DataStore::lookup_datastore(&store)?; | |
101 | ||
102 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
103 | ||
104 | let status_path = Path::new(TAPE_STATUS_DIR); | |
105 | let inventory = Inventory::load(status_path)?; | |
106 | ||
107 | let media_set_uuid = media_set.parse()?; | |
108 | ||
109 | let pool = inventory.lookup_media_set_pool(&media_set_uuid)?; | |
110 | ||
111 | let (config, _digest) = config::media_pool::config()?; | |
112 | let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?; | |
113 | ||
114 | let (drive_config, _digest) = config::drive::config()?; | |
115 | // early check before starting worker | |
116 | check_drive_exists(&drive_config, &pool_config.drive)?; | |
117 | ||
39735609 | 118 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
b9b4b312 DM |
119 | |
120 | let upid_str = WorkerTask::new_thread( | |
121 | "tape-restore", | |
122 | Some(store.clone()), | |
123 | auth_id.clone(), | |
124 | to_stdout, | |
125 | move |worker| { | |
126 | ||
127 | let _lock = MediaPool::lock(status_path, &pool)?; | |
128 | ||
129 | let members = inventory.compute_media_set_members(&media_set_uuid)?; | |
130 | ||
44288184 | 131 | let media_list = members.media_list(); |
b9b4b312 DM |
132 | |
133 | let mut media_id_list = Vec::new(); | |
134 | ||
135 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { | |
136 | match media_uuid { | |
137 | None => { | |
138 | bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr); | |
139 | } | |
140 | Some(media_uuid) => { | |
141 | media_id_list.push(inventory.lookup_media(media_uuid).unwrap()); | |
142 | } | |
143 | } | |
144 | } | |
145 | ||
146 | let drive = &pool_config.drive; | |
147 | ||
148 | worker.log(format!("Restore mediaset '{}'", media_set)); | |
149 | worker.log(format!("Pool: {}", pool)); | |
150 | worker.log(format!("Datastore: {}", store)); | |
151 | worker.log(format!("Drive: {}", drive)); | |
152 | worker.log(format!( | |
153 | "Required media list: {}", | |
154 | media_id_list.iter() | |
8446fbca | 155 | .map(|media_id| media_id.label.label_text.as_str()) |
b9b4b312 DM |
156 | .collect::<Vec<&str>>() |
157 | .join(";") | |
158 | )); | |
159 | ||
160 | for media_id in media_id_list.iter() { | |
161 | request_and_restore_media( | |
162 | &worker, | |
163 | media_id, | |
164 | &drive_config, | |
165 | drive, | |
166 | &datastore, | |
167 | &auth_id, | |
168 | )?; | |
169 | } | |
170 | ||
171 | worker.log(format!("Restore mediaset '{}' done", media_set)); | |
172 | Ok(()) | |
173 | } | |
174 | )?; | |
175 | ||
176 | Ok(upid_str.into()) | |
177 | } | |
178 | ||
b017bbc4 DM |
179 | /// Request and restore complete media without using existing catalog (create catalog instead) |
180 | pub fn request_and_restore_media( | |
181 | worker: &WorkerTask, | |
182 | media_id: &MediaId, | |
183 | drive_config: &SectionConfigData, | |
184 | drive_name: &str, | |
185 | datastore: &DataStore, | |
186 | authid: &Authid, | |
187 | ) -> Result<(), Error> { | |
188 | ||
189 | let media_set_uuid = match media_id.media_set_label { | |
190 | None => bail!("restore_media: no media set - internal error"), | |
191 | Some(ref set) => &set.uuid, | |
192 | }; | |
193 | ||
ff58c519 | 194 | let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?; |
b017bbc4 DM |
195 | |
196 | match info.media_set_label { | |
197 | None => { | |
198 | bail!("missing media set label on media {} ({})", | |
8446fbca | 199 | media_id.label.label_text, media_id.label.uuid); |
b017bbc4 DM |
200 | } |
201 | Some(ref set) => { | |
202 | if &set.uuid != media_set_uuid { | |
203 | bail!("wrong media set label on media {} ({} != {})", | |
8446fbca | 204 | media_id.label.label_text, media_id.label.uuid, |
b017bbc4 DM |
205 | media_set_uuid); |
206 | } | |
207 | } | |
208 | } | |
209 | ||
210 | restore_media(worker, &mut drive, &info, Some((datastore, authid)), false) | |
211 | } | |
212 | ||
213 | /// Restore complete media content and catalog | |
214 | /// | |
215 | /// Only create the catalog if target is None. | |
216 | pub fn restore_media( | |
217 | worker: &WorkerTask, | |
218 | drive: &mut Box<dyn TapeDriver>, | |
219 | media_id: &MediaId, | |
220 | target: Option<(&DataStore, &Authid)>, | |
221 | verbose: bool, | |
222 | ) -> Result<(), Error> { | |
223 | ||
224 | let status_path = Path::new(TAPE_STATUS_DIR); | |
225 | let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?; | |
226 | ||
227 | loop { | |
228 | let current_file_number = drive.current_file_number()?; | |
229 | let reader = match drive.read_next_file()? { | |
230 | None => { | |
231 | worker.log(format!("detected EOT after {} files", current_file_number)); | |
232 | break; | |
233 | } | |
234 | Some(reader) => reader, | |
235 | }; | |
236 | ||
b017bbc4 DM |
237 | restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?; |
238 | } | |
239 | ||
240 | MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?; | |
241 | ||
242 | Ok(()) | |
243 | } | |
244 | ||
245 | fn restore_archive<'a>( | |
246 | worker: &WorkerTask, | |
247 | mut reader: Box<dyn 'a + TapeRead>, | |
248 | current_file_number: u64, | |
249 | target: Option<(&DataStore, &Authid)>, | |
250 | catalog: &mut MediaCatalog, | |
251 | verbose: bool, | |
252 | ) -> Result<(), Error> { | |
253 | ||
254 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
255 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
256 | bail!("missing MediaContentHeader"); | |
257 | } | |
258 | ||
259 | //println!("Found MediaContentHeader: {:?}", header); | |
260 | ||
261 | match header.content_magic { | |
262 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => { | |
263 | bail!("unexpected content magic (label)"); | |
264 | } | |
265 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => { | |
266 | let snapshot = reader.read_exact_allocated(header.size as usize)?; | |
267 | let snapshot = std::str::from_utf8(&snapshot) | |
268 | .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?; | |
269 | worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot)); | |
270 | ||
271 | let backup_dir: BackupDir = snapshot.parse()?; | |
272 | ||
273 | if let Some((datastore, authid)) = target.as_ref() { | |
274 | ||
275 | let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?; | |
276 | if *authid != &owner { // only the owner is allowed to create additional snapshots | |
277 | bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner); | |
278 | } | |
279 | ||
280 | let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?; | |
281 | let mut path = datastore.base_path(); | |
282 | path.push(rel_path); | |
283 | ||
284 | if is_new { | |
285 | worker.log(format!("restore snapshot {}", backup_dir)); | |
286 | ||
287 | match restore_snapshot_archive(reader, &path) { | |
288 | Err(err) => { | |
289 | std::fs::remove_dir_all(&path)?; | |
290 | bail!("restore snapshot {} failed - {}", backup_dir, err); | |
291 | } | |
292 | Ok(false) => { | |
293 | std::fs::remove_dir_all(&path)?; | |
294 | worker.log(format!("skip incomplete snapshot {}", backup_dir)); | |
295 | } | |
296 | Ok(true) => { | |
297 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; | |
298 | catalog.commit_if_large()?; | |
299 | } | |
300 | } | |
301 | return Ok(()); | |
302 | } | |
303 | } | |
304 | ||
305 | reader.skip_to_end()?; // read all data | |
306 | if let Ok(false) = reader.is_incomplete() { | |
307 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; | |
308 | catalog.commit_if_large()?; | |
309 | } | |
310 | } | |
311 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { | |
312 | ||
313 | worker.log(format!("Found chunk archive: {}", current_file_number)); | |
314 | let datastore = target.as_ref().map(|t| t.0); | |
315 | ||
316 | if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? { | |
317 | catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?; | |
318 | for digest in chunks.iter() { | |
319 | catalog.register_chunk(&digest)?; | |
320 | } | |
321 | worker.log(format!("register {} chunks", chunks.len())); | |
322 | catalog.end_chunk_archive()?; | |
323 | catalog.commit_if_large()?; | |
324 | } | |
325 | } | |
326 | _ => bail!("unknown content magic {:?}", header.content_magic), | |
327 | } | |
328 | ||
329 | catalog.commit()?; | |
330 | ||
331 | Ok(()) | |
332 | } | |
333 | ||
334 | fn restore_chunk_archive<'a>( | |
335 | worker: &WorkerTask, | |
336 | reader: Box<dyn 'a + TapeRead>, | |
337 | datastore: Option<&DataStore>, | |
338 | verbose: bool, | |
339 | ) -> Result<Option<Vec<[u8;32]>>, Error> { | |
340 | ||
341 | let mut chunks = Vec::new(); | |
342 | ||
343 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
344 | ||
345 | let result: Result<_, Error> = proxmox::try_block!({ | |
346 | loop { | |
347 | match decoder.next_chunk()? { | |
348 | Some((digest, blob)) => { | |
349 | ||
350 | if let Some(datastore) = datastore { | |
351 | let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; | |
352 | if !chunk_exists { | |
353 | blob.verify_crc()?; | |
354 | ||
355 | if blob.crypt_mode()? == CryptMode::None { | |
356 | blob.decode(None, Some(&digest))?; // verify digest | |
357 | } | |
358 | if verbose { | |
359 | worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest))); | |
360 | } | |
361 | datastore.insert_chunk(&blob, &digest)?; | |
6334bdc1 FG |
362 | } else if verbose { |
363 | worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest))); | |
b017bbc4 | 364 | } |
6334bdc1 FG |
365 | } else if verbose { |
366 | worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest))); | |
b017bbc4 DM |
367 | } |
368 | chunks.push(digest); | |
369 | } | |
370 | None => break, | |
371 | } | |
372 | } | |
373 | Ok(()) | |
374 | }); | |
375 | ||
376 | match result { | |
377 | Ok(()) => Ok(Some(chunks)), | |
378 | Err(err) => { | |
379 | let reader = decoder.reader(); | |
380 | ||
381 | // check if this stream is marked incomplete | |
382 | if let Ok(true) = reader.is_incomplete() { | |
383 | return Ok(Some(chunks)); | |
384 | } | |
385 | ||
386 | // check if this is an aborted stream without end marker | |
387 | if let Ok(false) = reader.has_end_marker() { | |
3b82f3ee | 388 | worker.log("missing stream end marker".to_string()); |
b017bbc4 DM |
389 | return Ok(None); |
390 | } | |
391 | ||
392 | // else the archive is corrupt | |
393 | Err(err) | |
394 | } | |
395 | } | |
396 | } | |
397 | ||
398 | fn restore_snapshot_archive<'a>( | |
399 | reader: Box<dyn 'a + TapeRead>, | |
400 | snapshot_path: &Path, | |
401 | ) -> Result<bool, Error> { | |
402 | ||
403 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
404 | match try_restore_snapshot_archive(&mut decoder, snapshot_path) { | |
38556bf6 | 405 | Ok(()) => Ok(true), |
b017bbc4 DM |
406 | Err(err) => { |
407 | let reader = decoder.input(); | |
408 | ||
409 | // check if this stream is marked incomplete | |
410 | if let Ok(true) = reader.is_incomplete() { | |
411 | return Ok(false); | |
412 | } | |
413 | ||
414 | // check if this is an aborted stream without end marker | |
415 | if let Ok(false) = reader.has_end_marker() { | |
416 | return Ok(false); | |
417 | } | |
418 | ||
419 | // else the archive is corrupt | |
38556bf6 | 420 | Err(err) |
b017bbc4 DM |
421 | } |
422 | } | |
423 | } | |
424 | ||
425 | fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( | |
426 | decoder: &mut pxar::decoder::sync::Decoder<R>, | |
427 | snapshot_path: &Path, | |
428 | ) -> Result<(), Error> { | |
429 | ||
430 | let _root = match decoder.next() { | |
431 | None => bail!("missing root entry"), | |
432 | Some(root) => { | |
433 | let root = root?; | |
434 | match root.kind() { | |
435 | pxar::EntryKind::Directory => { /* Ok */ } | |
436 | _ => bail!("wrong root entry type"), | |
437 | } | |
438 | root | |
439 | } | |
440 | }; | |
441 | ||
442 | let root_path = Path::new("/"); | |
443 | let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); | |
444 | ||
445 | let mut manifest = None; | |
446 | ||
447 | loop { | |
448 | let entry = match decoder.next() { | |
449 | None => break, | |
450 | Some(entry) => entry?, | |
451 | }; | |
452 | let entry_path = entry.path(); | |
453 | ||
454 | match entry.kind() { | |
455 | pxar::EntryKind::File { .. } => { /* Ok */ } | |
456 | _ => bail!("wrong entry type for {:?}", entry_path), | |
457 | } | |
458 | match entry_path.parent() { | |
459 | None => bail!("wrong parent for {:?}", entry_path), | |
460 | Some(p) => { | |
461 | if p != root_path { | |
462 | bail!("wrong parent for {:?}", entry_path); | |
463 | } | |
464 | } | |
465 | } | |
466 | ||
467 | let filename = entry.file_name(); | |
468 | let mut contents = match decoder.contents() { | |
469 | None => bail!("missing file content"), | |
470 | Some(contents) => contents, | |
471 | }; | |
472 | ||
473 | let mut archive_path = snapshot_path.to_owned(); | |
474 | archive_path.push(&filename); | |
475 | ||
476 | let mut tmp_path = archive_path.clone(); | |
477 | tmp_path.set_extension("tmp"); | |
478 | ||
479 | if filename == manifest_file_name { | |
480 | ||
481 | let blob = DataBlob::load_from_reader(&mut contents)?; | |
482 | let options = CreateOptions::new(); | |
483 | replace_file(&tmp_path, blob.raw_data(), options)?; | |
484 | ||
485 | manifest = Some(BackupManifest::try_from(blob)?); | |
486 | } else { | |
487 | let mut tmpfile = std::fs::OpenOptions::new() | |
488 | .write(true) | |
489 | .create(true) | |
490 | .read(true) | |
491 | .open(&tmp_path) | |
492 | .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?; | |
493 | ||
494 | std::io::copy(&mut contents, &mut tmpfile)?; | |
495 | ||
496 | if let Err(err) = std::fs::rename(&tmp_path, &archive_path) { | |
497 | bail!("Atomic rename file {:?} failed - {}", archive_path, err); | |
498 | } | |
499 | } | |
500 | } | |
501 | ||
502 | let manifest = match manifest { | |
503 | None => bail!("missing manifest"), | |
504 | Some(manifest) => manifest, | |
505 | }; | |
506 | ||
507 | for item in manifest.files() { | |
508 | let mut archive_path = snapshot_path.to_owned(); | |
509 | archive_path.push(&item.filename); | |
510 | ||
511 | match archive_type(&item.filename)? { | |
512 | ArchiveType::DynamicIndex => { | |
513 | let index = DynamicIndexReader::open(&archive_path)?; | |
514 | let (csum, size) = index.compute_csum(); | |
515 | manifest.verify_file(&item.filename, &csum, size)?; | |
516 | } | |
517 | ArchiveType::FixedIndex => { | |
518 | let index = FixedIndexReader::open(&archive_path)?; | |
519 | let (csum, size) = index.compute_csum(); | |
520 | manifest.verify_file(&item.filename, &csum, size)?; | |
521 | } | |
522 | ArchiveType::Blob => { | |
523 | let mut tmpfile = std::fs::File::open(&archive_path)?; | |
524 | let (csum, size) = compute_file_csum(&mut tmpfile)?; | |
525 | manifest.verify_file(&item.filename, &csum, size)?; | |
526 | } | |
527 | } | |
528 | } | |
529 | ||
530 | // commit manifest | |
531 | let mut manifest_path = snapshot_path.to_owned(); | |
532 | manifest_path.push(MANIFEST_BLOB_NAME); | |
533 | let mut tmp_manifest_path = manifest_path.clone(); | |
534 | tmp_manifest_path.set_extension("tmp"); | |
535 | ||
536 | if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) { | |
537 | bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err); | |
538 | } | |
539 | ||
540 | Ok(()) | |
541 | } |