]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
tape: improve locking (lock media-sets)
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::path::Path;
2 use std::ffi::OsStr;
3 use std::convert::TryFrom;
4
5 use anyhow::{bail, format_err, Error};
6 use serde_json::Value;
7
8 use proxmox::{
9 api::{
10 api,
11 RpcEnvironment,
12 RpcEnvironmentType,
13 Router,
14 Permission,
15 section_config::SectionConfigData,
16 },
17 tools::{
18 Uuid,
19 io::ReadExt,
20 fs::{
21 replace_file,
22 CreateOptions,
23 },
24 },
25 };
26
27 use crate::{
28 task_log,
29 task::TaskState,
30 tools::compute_file_csum,
31 api2::types::{
32 DATASTORE_SCHEMA,
33 DRIVE_NAME_SCHEMA,
34 UPID_SCHEMA,
35 Authid,
36 Userid,
37 },
38 config::{
39 self,
40 cached_user_info::CachedUserInfo,
41 acl::{
42 PRIV_DATASTORE_BACKUP,
43 PRIV_DATASTORE_MODIFY,
44 PRIV_TAPE_READ,
45 },
46 },
47 backup::{
48 archive_type,
49 MANIFEST_BLOB_NAME,
50 CryptMode,
51 DataStore,
52 BackupDir,
53 DataBlob,
54 BackupManifest,
55 ArchiveType,
56 IndexFile,
57 DynamicIndexReader,
58 FixedIndexReader,
59 },
60 server::{
61 lookup_user_email,
62 WorkerTask,
63 },
64 tape::{
65 TAPE_STATUS_DIR,
66 TapeRead,
67 MediaId,
68 MediaCatalog,
69 Inventory,
70 lock_media_set,
71 file_formats::{
72 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
73 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
74 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
75 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
76 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
77 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
78 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
79 MediaContentHeader,
80 ChunkArchiveHeader,
81 ChunkArchiveDecoder,
82 SnapshotArchiveHeader,
83 },
84 drive::{
85 TapeDriver,
86 request_and_load_media,
87 lock_tape_device,
88 set_tape_device_state,
89 },
90 },
91 };
92
93 pub const ROUTER: Router = Router::new()
94 .post(&API_METHOD_RESTORE);
95
96 #[api(
97 input: {
98 properties: {
99 store: {
100 schema: DATASTORE_SCHEMA,
101 },
102 drive: {
103 schema: DRIVE_NAME_SCHEMA,
104 },
105 "media-set": {
106 description: "Media set UUID.",
107 type: String,
108 },
109 "notify-user": {
110 type: Userid,
111 optional: true,
112 },
113 owner: {
114 type: Authid,
115 optional: true,
116 },
117 },
118 },
119 returns: {
120 schema: UPID_SCHEMA,
121 },
122 access: {
123 // Note: parameters are no uri parameter, so we need to test inside function body
124 description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
125 and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}.",
126 permission: &Permission::Anybody,
127 },
128 )]
129 /// Restore data from media-set
130 pub fn restore(
131 store: String,
132 drive: String,
133 media_set: String,
134 notify_user: Option<Userid>,
135 owner: Option<Authid>,
136 rpcenv: &mut dyn RpcEnvironment,
137 ) -> Result<Value, Error> {
138
139 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
140 let user_info = CachedUserInfo::new()?;
141
142 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
143 if (privs & PRIV_DATASTORE_BACKUP) == 0 {
144 bail!("no permissions on /datastore/{}", store);
145 }
146
147 if let Some(ref owner) = owner {
148 let correct_owner = owner == &auth_id
149 || (owner.is_token()
150 && !auth_id.is_token()
151 && owner.user() == auth_id.user());
152
153 // same permission as changing ownership after syncing
154 if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
155 bail!("no permission to restore as '{}'", owner);
156 }
157 }
158
159 let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
160 if (privs & PRIV_TAPE_READ) == 0 {
161 bail!("no permissions on /tape/drive/{}", drive);
162 }
163
164 let media_set_uuid = media_set.parse()?;
165
166 let status_path = Path::new(TAPE_STATUS_DIR);
167
168 let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
169
170 let inventory = Inventory::load(status_path)?;
171
172 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
173
174 let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
175 if (privs & PRIV_TAPE_READ) == 0 {
176 bail!("no permissions on /tape/pool/{}", pool);
177 }
178
179 let datastore = DataStore::lookup_datastore(&store)?;
180
181 let (drive_config, _digest) = config::drive::config()?;
182
183 // early check/lock before starting worker
184 let drive_lock = lock_tape_device(&drive_config, &drive)?;
185
186 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
187
188 let upid_str = WorkerTask::new_thread(
189 "tape-restore",
190 Some(store.clone()),
191 auth_id.clone(),
192 to_stdout,
193 move |worker| {
194 let _drive_lock = drive_lock; // keep lock guard
195
196 set_tape_device_state(&drive, &worker.upid().to_string())?;
197
198 let members = inventory.compute_media_set_members(&media_set_uuid)?;
199
200 let media_list = members.media_list();
201
202 let mut media_id_list = Vec::new();
203
204 let mut encryption_key_fingerprint = None;
205
206 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
207 match media_uuid {
208 None => {
209 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
210 }
211 Some(media_uuid) => {
212 let media_id = inventory.lookup_media(media_uuid).unwrap();
213 if let Some(ref set) = media_id.media_set_label { // always true here
214 if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
215 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
216 }
217 }
218 media_id_list.push(media_id);
219 }
220 }
221 }
222
223 task_log!(worker, "Restore mediaset '{}'", media_set);
224 if let Some(fingerprint) = encryption_key_fingerprint {
225 task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
226 }
227 task_log!(worker, "Pool: {}", pool);
228 task_log!(worker, "Datastore: {}", store);
229 task_log!(worker, "Drive: {}", drive);
230 task_log!(
231 worker,
232 "Required media list: {}",
233 media_id_list.iter()
234 .map(|media_id| media_id.label.label_text.as_str())
235 .collect::<Vec<&str>>()
236 .join(";")
237 );
238
239 for media_id in media_id_list.iter() {
240 request_and_restore_media(
241 &worker,
242 media_id,
243 &drive_config,
244 &drive,
245 &datastore,
246 &auth_id,
247 &notify_user,
248 &owner,
249 )?;
250 }
251
252 task_log!(worker, "Restore mediaset '{}' done", media_set);
253
254 if let Err(err) = set_tape_device_state(&drive, "") {
255 task_log!(
256 worker,
257 "could not unset drive state for {}: {}",
258 drive,
259 err
260 );
261 }
262
263 Ok(())
264 }
265 )?;
266
267 Ok(upid_str.into())
268 }
269
270 /// Request and restore complete media without using existing catalog (create catalog instead)
271 pub fn request_and_restore_media(
272 worker: &WorkerTask,
273 media_id: &MediaId,
274 drive_config: &SectionConfigData,
275 drive_name: &str,
276 datastore: &DataStore,
277 authid: &Authid,
278 notify_user: &Option<Userid>,
279 owner: &Option<Authid>,
280 ) -> Result<(), Error> {
281
282 let media_set_uuid = match media_id.media_set_label {
283 None => bail!("restore_media: no media set - internal error"),
284 Some(ref set) => &set.uuid,
285 };
286
287 let email = notify_user
288 .as_ref()
289 .and_then(|userid| lookup_user_email(userid))
290 .or_else(|| lookup_user_email(&authid.clone().into()));
291
292 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label, &email)?;
293
294 match info.media_set_label {
295 None => {
296 bail!("missing media set label on media {} ({})",
297 media_id.label.label_text, media_id.label.uuid);
298 }
299 Some(ref set) => {
300 if &set.uuid != media_set_uuid {
301 bail!("wrong media set label on media {} ({} != {})",
302 media_id.label.label_text, media_id.label.uuid,
303 media_set_uuid);
304 }
305 let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
306 .map(|fp| (fp, set.uuid.clone()));
307
308 drive.set_encryption(encrypt_fingerprint)?;
309 }
310 }
311
312 let restore_owner = owner.as_ref().unwrap_or(authid);
313
314 restore_media(worker, &mut drive, &info, Some((datastore, restore_owner)), false)
315 }
316
317 /// Restore complete media content and catalog
318 ///
319 /// Only create the catalog if target is None.
320 pub fn restore_media(
321 worker: &WorkerTask,
322 drive: &mut Box<dyn TapeDriver>,
323 media_id: &MediaId,
324 target: Option<(&DataStore, &Authid)>,
325 verbose: bool,
326 ) -> Result<(), Error> {
327
328 let status_path = Path::new(TAPE_STATUS_DIR);
329 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
330
331 loop {
332 let current_file_number = drive.current_file_number()?;
333 let reader = match drive.read_next_file()? {
334 None => {
335 task_log!(worker, "detected EOT after {} files", current_file_number);
336 break;
337 }
338 Some(reader) => reader,
339 };
340
341 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
342 }
343
344 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
345
346 Ok(())
347 }
348
349 fn restore_archive<'a>(
350 worker: &WorkerTask,
351 mut reader: Box<dyn 'a + TapeRead>,
352 current_file_number: u64,
353 target: Option<(&DataStore, &Authid)>,
354 catalog: &mut MediaCatalog,
355 verbose: bool,
356 ) -> Result<(), Error> {
357
358 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
359 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
360 bail!("missing MediaContentHeader");
361 }
362
363 //println!("Found MediaContentHeader: {:?}", header);
364
365 match header.content_magic {
366 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
367 bail!("unexpected content magic (label)");
368 }
369 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
370 bail!("unexpected snapshot archive version (v1.0)");
371 }
372 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
373 let header_data = reader.read_exact_allocated(header.size as usize)?;
374
375 let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
376 .map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
377
378 let datastore_name = archive_header.store;
379 let snapshot = archive_header.snapshot;
380
381 task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
382
383 let backup_dir: BackupDir = snapshot.parse()?;
384
385 if let Some((datastore, authid)) = target.as_ref() {
386
387 let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
388 if *authid != &owner { // only the owner is allowed to create additional snapshots
389 bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
390 }
391
392 let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
393 let mut path = datastore.base_path();
394 path.push(rel_path);
395
396 if is_new {
397 task_log!(worker, "restore snapshot {}", backup_dir);
398
399 match restore_snapshot_archive(worker, reader, &path) {
400 Err(err) => {
401 std::fs::remove_dir_all(&path)?;
402 bail!("restore snapshot {} failed - {}", backup_dir, err);
403 }
404 Ok(false) => {
405 std::fs::remove_dir_all(&path)?;
406 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
407 }
408 Ok(true) => {
409 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
410 catalog.commit_if_large()?;
411 }
412 }
413 return Ok(());
414 }
415 }
416
417 reader.skip_to_end()?; // read all data
418 if let Ok(false) = reader.is_incomplete() {
419 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
420 catalog.commit_if_large()?;
421 }
422 }
423 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
424 bail!("unexpected chunk archive version (v1.0)");
425 }
426 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
427 let header_data = reader.read_exact_allocated(header.size as usize)?;
428
429 let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
430 .map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
431
432 let source_datastore = archive_header.store;
433
434 task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
435 let datastore = target.as_ref().map(|t| t.0);
436
437 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
438 catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number, &source_datastore)?;
439 for digest in chunks.iter() {
440 catalog.register_chunk(&digest)?;
441 }
442 task_log!(worker, "register {} chunks", chunks.len());
443 catalog.end_chunk_archive()?;
444 catalog.commit_if_large()?;
445 }
446 }
447 _ => bail!("unknown content magic {:?}", header.content_magic),
448 }
449
450 catalog.commit()?;
451
452 Ok(())
453 }
454
455 fn restore_chunk_archive<'a>(
456 worker: &WorkerTask,
457 reader: Box<dyn 'a + TapeRead>,
458 datastore: Option<&DataStore>,
459 verbose: bool,
460 ) -> Result<Option<Vec<[u8;32]>>, Error> {
461
462 let mut chunks = Vec::new();
463
464 let mut decoder = ChunkArchiveDecoder::new(reader);
465
466 let result: Result<_, Error> = proxmox::try_block!({
467 while let Some((digest, blob)) = decoder.next_chunk()? {
468
469 worker.check_abort()?;
470
471 if let Some(datastore) = datastore {
472 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
473 if !chunk_exists {
474 blob.verify_crc()?;
475
476 if blob.crypt_mode()? == CryptMode::None {
477 blob.decode(None, Some(&digest))?; // verify digest
478 }
479 if verbose {
480 task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
481 }
482 datastore.insert_chunk(&blob, &digest)?;
483 } else if verbose {
484 task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
485 }
486 } else if verbose {
487 task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
488 }
489 chunks.push(digest);
490 }
491 Ok(())
492 });
493
494 match result {
495 Ok(()) => Ok(Some(chunks)),
496 Err(err) => {
497 let reader = decoder.reader();
498
499 // check if this stream is marked incomplete
500 if let Ok(true) = reader.is_incomplete() {
501 return Ok(Some(chunks));
502 }
503
504 // check if this is an aborted stream without end marker
505 if let Ok(false) = reader.has_end_marker() {
506 worker.log("missing stream end marker".to_string());
507 return Ok(None);
508 }
509
510 // else the archive is corrupt
511 Err(err)
512 }
513 }
514 }
515
516 fn restore_snapshot_archive<'a>(
517 worker: &WorkerTask,
518 reader: Box<dyn 'a + TapeRead>,
519 snapshot_path: &Path,
520 ) -> Result<bool, Error> {
521
522 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
523 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
524 Ok(()) => Ok(true),
525 Err(err) => {
526 let reader = decoder.input();
527
528 // check if this stream is marked incomplete
529 if let Ok(true) = reader.is_incomplete() {
530 return Ok(false);
531 }
532
533 // check if this is an aborted stream without end marker
534 if let Ok(false) = reader.has_end_marker() {
535 return Ok(false);
536 }
537
538 // else the archive is corrupt
539 Err(err)
540 }
541 }
542 }
543
544 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
545 worker: &WorkerTask,
546 decoder: &mut pxar::decoder::sync::Decoder<R>,
547 snapshot_path: &Path,
548 ) -> Result<(), Error> {
549
550 let _root = match decoder.next() {
551 None => bail!("missing root entry"),
552 Some(root) => {
553 let root = root?;
554 match root.kind() {
555 pxar::EntryKind::Directory => { /* Ok */ }
556 _ => bail!("wrong root entry type"),
557 }
558 root
559 }
560 };
561
562 let root_path = Path::new("/");
563 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
564
565 let mut manifest = None;
566
567 loop {
568 worker.check_abort()?;
569
570 let entry = match decoder.next() {
571 None => break,
572 Some(entry) => entry?,
573 };
574 let entry_path = entry.path();
575
576 match entry.kind() {
577 pxar::EntryKind::File { .. } => { /* Ok */ }
578 _ => bail!("wrong entry type for {:?}", entry_path),
579 }
580 match entry_path.parent() {
581 None => bail!("wrong parent for {:?}", entry_path),
582 Some(p) => {
583 if p != root_path {
584 bail!("wrong parent for {:?}", entry_path);
585 }
586 }
587 }
588
589 let filename = entry.file_name();
590 let mut contents = match decoder.contents() {
591 None => bail!("missing file content"),
592 Some(contents) => contents,
593 };
594
595 let mut archive_path = snapshot_path.to_owned();
596 archive_path.push(&filename);
597
598 let mut tmp_path = archive_path.clone();
599 tmp_path.set_extension("tmp");
600
601 if filename == manifest_file_name {
602
603 let blob = DataBlob::load_from_reader(&mut contents)?;
604 let options = CreateOptions::new();
605 replace_file(&tmp_path, blob.raw_data(), options)?;
606
607 manifest = Some(BackupManifest::try_from(blob)?);
608 } else {
609 let mut tmpfile = std::fs::OpenOptions::new()
610 .write(true)
611 .create(true)
612 .read(true)
613 .open(&tmp_path)
614 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
615
616 std::io::copy(&mut contents, &mut tmpfile)?;
617
618 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
619 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
620 }
621 }
622 }
623
624 let manifest = match manifest {
625 None => bail!("missing manifest"),
626 Some(manifest) => manifest,
627 };
628
629 for item in manifest.files() {
630 let mut archive_path = snapshot_path.to_owned();
631 archive_path.push(&item.filename);
632
633 match archive_type(&item.filename)? {
634 ArchiveType::DynamicIndex => {
635 let index = DynamicIndexReader::open(&archive_path)?;
636 let (csum, size) = index.compute_csum();
637 manifest.verify_file(&item.filename, &csum, size)?;
638 }
639 ArchiveType::FixedIndex => {
640 let index = FixedIndexReader::open(&archive_path)?;
641 let (csum, size) = index.compute_csum();
642 manifest.verify_file(&item.filename, &csum, size)?;
643 }
644 ArchiveType::Blob => {
645 let mut tmpfile = std::fs::File::open(&archive_path)?;
646 let (csum, size) = compute_file_csum(&mut tmpfile)?;
647 manifest.verify_file(&item.filename, &csum, size)?;
648 }
649 }
650 }
651
652 // commit manifest
653 let mut manifest_path = snapshot_path.to_owned();
654 manifest_path.push(MANIFEST_BLOB_NAME);
655 let mut tmp_manifest_path = manifest_path.clone();
656 tmp_manifest_path.set_extension("tmp");
657
658 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
659 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
660 }
661
662 Ok(())
663 }