]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
clippy: remove unnecessary clones
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::path::Path;
2 use std::ffi::OsStr;
3 use std::convert::TryFrom;
4
5 use anyhow::{bail, format_err, Error};
6 use serde_json::Value;
7
8 use proxmox::{
9 api::{
10 api,
11 RpcEnvironment,
12 RpcEnvironmentType,
13 Router,
14 section_config::SectionConfigData,
15 },
16 tools::{
17 Uuid,
18 io::ReadExt,
19 fs::{
20 replace_file,
21 CreateOptions,
22 },
23 },
24 };
25
26 use crate::{
27 tools::compute_file_csum,
28 api2::types::{
29 DATASTORE_SCHEMA,
30 UPID_SCHEMA,
31 Authid,
32 MediaPoolConfig,
33 },
34 config::{
35 self,
36 drive::check_drive_exists,
37 },
38 backup::{
39 archive_type,
40 MANIFEST_BLOB_NAME,
41 CryptMode,
42 DataStore,
43 BackupDir,
44 DataBlob,
45 BackupManifest,
46 ArchiveType,
47 IndexFile,
48 DynamicIndexReader,
49 FixedIndexReader,
50 },
51 server::WorkerTask,
52 tape::{
53 TAPE_STATUS_DIR,
54 TapeRead,
55 MediaId,
56 MediaCatalog,
57 ChunkArchiveDecoder,
58 TapeDriver,
59 MediaPool,
60 Inventory,
61 request_and_load_media,
62 file_formats::{
63 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
64 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
65 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
66 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
67 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
68 MediaContentHeader,
69 },
70 },
71 };
72
73 pub const ROUTER: Router = Router::new()
74 .post(&API_METHOD_RESTORE);
75
76
77 #[api(
78 input: {
79 properties: {
80 store: {
81 schema: DATASTORE_SCHEMA,
82 },
83 "media-set": {
84 description: "Media set UUID.",
85 type: String,
86 },
87 },
88 },
89 returns: {
90 schema: UPID_SCHEMA,
91 },
92 )]
93 /// Restore data from media-set
94 pub fn restore(
95 store: String,
96 media_set: String,
97 rpcenv: &mut dyn RpcEnvironment,
98 ) -> Result<Value, Error> {
99
100 let datastore = DataStore::lookup_datastore(&store)?;
101
102 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
103
104 let status_path = Path::new(TAPE_STATUS_DIR);
105 let inventory = Inventory::load(status_path)?;
106
107 let media_set_uuid = media_set.parse()?;
108
109 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
110
111 let (config, _digest) = config::media_pool::config()?;
112 let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
113
114 let (drive_config, _digest) = config::drive::config()?;
115 // early check before starting worker
116 check_drive_exists(&drive_config, &pool_config.drive)?;
117
118 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
119
120 let upid_str = WorkerTask::new_thread(
121 "tape-restore",
122 Some(store.clone()),
123 auth_id.clone(),
124 to_stdout,
125 move |worker| {
126
127 let _lock = MediaPool::lock(status_path, &pool)?;
128
129 let members = inventory.compute_media_set_members(&media_set_uuid)?;
130
131 let media_list = members.media_list();
132
133 let mut media_id_list = Vec::new();
134
135 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
136 match media_uuid {
137 None => {
138 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
139 }
140 Some(media_uuid) => {
141 media_id_list.push(inventory.lookup_media(media_uuid).unwrap());
142 }
143 }
144 }
145
146 let drive = &pool_config.drive;
147
148 worker.log(format!("Restore mediaset '{}'", media_set));
149 worker.log(format!("Pool: {}", pool));
150 worker.log(format!("Datastore: {}", store));
151 worker.log(format!("Drive: {}", drive));
152 worker.log(format!(
153 "Required media list: {}",
154 media_id_list.iter()
155 .map(|media_id| media_id.label.label_text.as_str())
156 .collect::<Vec<&str>>()
157 .join(";")
158 ));
159
160 for media_id in media_id_list.iter() {
161 request_and_restore_media(
162 &worker,
163 media_id,
164 &drive_config,
165 drive,
166 &datastore,
167 &auth_id,
168 )?;
169 }
170
171 worker.log(format!("Restore mediaset '{}' done", media_set));
172 Ok(())
173 }
174 )?;
175
176 Ok(upid_str.into())
177 }
178
179 /// Request and restore complete media without using existing catalog (create catalog instead)
180 pub fn request_and_restore_media(
181 worker: &WorkerTask,
182 media_id: &MediaId,
183 drive_config: &SectionConfigData,
184 drive_name: &str,
185 datastore: &DataStore,
186 authid: &Authid,
187 ) -> Result<(), Error> {
188
189 let media_set_uuid = match media_id.media_set_label {
190 None => bail!("restore_media: no media set - internal error"),
191 Some(ref set) => &set.uuid,
192 };
193
194 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?;
195
196 match info.media_set_label {
197 None => {
198 bail!("missing media set label on media {} ({})",
199 media_id.label.label_text, media_id.label.uuid);
200 }
201 Some(ref set) => {
202 if &set.uuid != media_set_uuid {
203 bail!("wrong media set label on media {} ({} != {})",
204 media_id.label.label_text, media_id.label.uuid,
205 media_set_uuid);
206 }
207 }
208 }
209
210 restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
211 }
212
213 /// Restore complete media content and catalog
214 ///
215 /// Only create the catalog if target is None.
216 pub fn restore_media(
217 worker: &WorkerTask,
218 drive: &mut Box<dyn TapeDriver>,
219 media_id: &MediaId,
220 target: Option<(&DataStore, &Authid)>,
221 verbose: bool,
222 ) -> Result<(), Error> {
223
224 let status_path = Path::new(TAPE_STATUS_DIR);
225 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
226
227 loop {
228 let current_file_number = drive.current_file_number()?;
229 let reader = match drive.read_next_file()? {
230 None => {
231 worker.log(format!("detected EOT after {} files", current_file_number));
232 break;
233 }
234 Some(reader) => reader,
235 };
236
237 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
238 }
239
240 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
241
242 Ok(())
243 }
244
245 fn restore_archive<'a>(
246 worker: &WorkerTask,
247 mut reader: Box<dyn 'a + TapeRead>,
248 current_file_number: u64,
249 target: Option<(&DataStore, &Authid)>,
250 catalog: &mut MediaCatalog,
251 verbose: bool,
252 ) -> Result<(), Error> {
253
254 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
255 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
256 bail!("missing MediaContentHeader");
257 }
258
259 //println!("Found MediaContentHeader: {:?}", header);
260
261 match header.content_magic {
262 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
263 bail!("unexpected content magic (label)");
264 }
265 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
266 let snapshot = reader.read_exact_allocated(header.size as usize)?;
267 let snapshot = std::str::from_utf8(&snapshot)
268 .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
269 worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot));
270
271 let backup_dir: BackupDir = snapshot.parse()?;
272
273 if let Some((datastore, authid)) = target.as_ref() {
274
275 let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
276 if *authid != &owner { // only the owner is allowed to create additional snapshots
277 bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
278 }
279
280 let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
281 let mut path = datastore.base_path();
282 path.push(rel_path);
283
284 if is_new {
285 worker.log(format!("restore snapshot {}", backup_dir));
286
287 match restore_snapshot_archive(reader, &path) {
288 Err(err) => {
289 std::fs::remove_dir_all(&path)?;
290 bail!("restore snapshot {} failed - {}", backup_dir, err);
291 }
292 Ok(false) => {
293 std::fs::remove_dir_all(&path)?;
294 worker.log(format!("skip incomplete snapshot {}", backup_dir));
295 }
296 Ok(true) => {
297 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
298 catalog.commit_if_large()?;
299 }
300 }
301 return Ok(());
302 }
303 }
304
305 reader.skip_to_end()?; // read all data
306 if let Ok(false) = reader.is_incomplete() {
307 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
308 catalog.commit_if_large()?;
309 }
310 }
311 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
312
313 worker.log(format!("Found chunk archive: {}", current_file_number));
314 let datastore = target.as_ref().map(|t| t.0);
315
316 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
317 catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
318 for digest in chunks.iter() {
319 catalog.register_chunk(&digest)?;
320 }
321 worker.log(format!("register {} chunks", chunks.len()));
322 catalog.end_chunk_archive()?;
323 catalog.commit_if_large()?;
324 }
325 }
326 _ => bail!("unknown content magic {:?}", header.content_magic),
327 }
328
329 catalog.commit()?;
330
331 Ok(())
332 }
333
334 fn restore_chunk_archive<'a>(
335 worker: &WorkerTask,
336 reader: Box<dyn 'a + TapeRead>,
337 datastore: Option<&DataStore>,
338 verbose: bool,
339 ) -> Result<Option<Vec<[u8;32]>>, Error> {
340
341 let mut chunks = Vec::new();
342
343 let mut decoder = ChunkArchiveDecoder::new(reader);
344
345 let result: Result<_, Error> = proxmox::try_block!({
346 loop {
347 match decoder.next_chunk()? {
348 Some((digest, blob)) => {
349
350 if let Some(datastore) = datastore {
351 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
352 if !chunk_exists {
353 blob.verify_crc()?;
354
355 if blob.crypt_mode()? == CryptMode::None {
356 blob.decode(None, Some(&digest))?; // verify digest
357 }
358 if verbose {
359 worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
360 }
361 datastore.insert_chunk(&blob, &digest)?;
362 } else {
363 if verbose {
364 worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
365 }
366 }
367 } else {
368 if verbose {
369 worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
370 }
371 }
372 chunks.push(digest);
373 }
374 None => break,
375 }
376 }
377 Ok(())
378 });
379
380 match result {
381 Ok(()) => Ok(Some(chunks)),
382 Err(err) => {
383 let reader = decoder.reader();
384
385 // check if this stream is marked incomplete
386 if let Ok(true) = reader.is_incomplete() {
387 return Ok(Some(chunks));
388 }
389
390 // check if this is an aborted stream without end marker
391 if let Ok(false) = reader.has_end_marker() {
392 worker.log(format!("missing stream end marker"));
393 return Ok(None);
394 }
395
396 // else the archive is corrupt
397 Err(err)
398 }
399 }
400 }
401
402 fn restore_snapshot_archive<'a>(
403 reader: Box<dyn 'a + TapeRead>,
404 snapshot_path: &Path,
405 ) -> Result<bool, Error> {
406
407 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
408 match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
409 Ok(()) => return Ok(true),
410 Err(err) => {
411 let reader = decoder.input();
412
413 // check if this stream is marked incomplete
414 if let Ok(true) = reader.is_incomplete() {
415 return Ok(false);
416 }
417
418 // check if this is an aborted stream without end marker
419 if let Ok(false) = reader.has_end_marker() {
420 return Ok(false);
421 }
422
423 // else the archive is corrupt
424 return Err(err);
425 }
426 }
427 }
428
429 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
430 decoder: &mut pxar::decoder::sync::Decoder<R>,
431 snapshot_path: &Path,
432 ) -> Result<(), Error> {
433
434 let _root = match decoder.next() {
435 None => bail!("missing root entry"),
436 Some(root) => {
437 let root = root?;
438 match root.kind() {
439 pxar::EntryKind::Directory => { /* Ok */ }
440 _ => bail!("wrong root entry type"),
441 }
442 root
443 }
444 };
445
446 let root_path = Path::new("/");
447 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
448
449 let mut manifest = None;
450
451 loop {
452 let entry = match decoder.next() {
453 None => break,
454 Some(entry) => entry?,
455 };
456 let entry_path = entry.path();
457
458 match entry.kind() {
459 pxar::EntryKind::File { .. } => { /* Ok */ }
460 _ => bail!("wrong entry type for {:?}", entry_path),
461 }
462 match entry_path.parent() {
463 None => bail!("wrong parent for {:?}", entry_path),
464 Some(p) => {
465 if p != root_path {
466 bail!("wrong parent for {:?}", entry_path);
467 }
468 }
469 }
470
471 let filename = entry.file_name();
472 let mut contents = match decoder.contents() {
473 None => bail!("missing file content"),
474 Some(contents) => contents,
475 };
476
477 let mut archive_path = snapshot_path.to_owned();
478 archive_path.push(&filename);
479
480 let mut tmp_path = archive_path.clone();
481 tmp_path.set_extension("tmp");
482
483 if filename == manifest_file_name {
484
485 let blob = DataBlob::load_from_reader(&mut contents)?;
486 let options = CreateOptions::new();
487 replace_file(&tmp_path, blob.raw_data(), options)?;
488
489 manifest = Some(BackupManifest::try_from(blob)?);
490 } else {
491 let mut tmpfile = std::fs::OpenOptions::new()
492 .write(true)
493 .create(true)
494 .read(true)
495 .open(&tmp_path)
496 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
497
498 std::io::copy(&mut contents, &mut tmpfile)?;
499
500 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
501 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
502 }
503 }
504 }
505
506 let manifest = match manifest {
507 None => bail!("missing manifest"),
508 Some(manifest) => manifest,
509 };
510
511 for item in manifest.files() {
512 let mut archive_path = snapshot_path.to_owned();
513 archive_path.push(&item.filename);
514
515 match archive_type(&item.filename)? {
516 ArchiveType::DynamicIndex => {
517 let index = DynamicIndexReader::open(&archive_path)?;
518 let (csum, size) = index.compute_csum();
519 manifest.verify_file(&item.filename, &csum, size)?;
520 }
521 ArchiveType::FixedIndex => {
522 let index = FixedIndexReader::open(&archive_path)?;
523 let (csum, size) = index.compute_csum();
524 manifest.verify_file(&item.filename, &csum, size)?;
525 }
526 ArchiveType::Blob => {
527 let mut tmpfile = std::fs::File::open(&archive_path)?;
528 let (csum, size) = compute_file_csum(&mut tmpfile)?;
529 manifest.verify_file(&item.filename, &csum, size)?;
530 }
531 }
532 }
533
534 // commit manifest
535 let mut manifest_path = snapshot_path.to_owned();
536 manifest_path.push(MANIFEST_BLOB_NAME);
537 let mut tmp_manifest_path = manifest_path.clone();
538 tmp_manifest_path.set_extension("tmp");
539
540 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
541 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
542 }
543
544 Ok(())
545 }