]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/tape/restore.rs
tape: allow to abort restore tasks
[proxmox-backup.git] / src / api2 / tape / restore.rs
CommitLineData
b017bbc4
DM
1use std::path::Path;
2use std::ffi::OsStr;
3use std::convert::TryFrom;
4
5use anyhow::{bail, format_err, Error};
b9b4b312 6use serde_json::Value;
b017bbc4
DM
7
8use proxmox::{
b9b4b312
DM
9 api::{
10 api,
11 RpcEnvironment,
12 RpcEnvironmentType,
13 Router,
14 section_config::SectionConfigData,
15 },
b017bbc4
DM
16 tools::{
17 Uuid,
18 io::ReadExt,
19 fs::{
20 replace_file,
21 CreateOptions,
22 },
23 },
b017bbc4
DM
24};
25
26use crate::{
8de9a991 27 task_log,
a80d72f9 28 task::TaskState,
b017bbc4 29 tools::compute_file_csum,
b9b4b312
DM
30 api2::types::{
31 DATASTORE_SCHEMA,
9883b54c 32 DRIVE_NAME_SCHEMA,
b9b4b312
DM
33 UPID_SCHEMA,
34 Authid,
35 MediaPoolConfig,
36 },
37 config::{
38 self,
39 drive::check_drive_exists,
40 },
b017bbc4
DM
41 backup::{
42 archive_type,
43 MANIFEST_BLOB_NAME,
44 CryptMode,
45 DataStore,
46 BackupDir,
47 DataBlob,
48 BackupManifest,
49 ArchiveType,
50 IndexFile,
51 DynamicIndexReader,
52 FixedIndexReader,
53 },
54 server::WorkerTask,
55 tape::{
56 TAPE_STATUS_DIR,
57 TapeRead,
58 MediaId,
59 MediaCatalog,
60 ChunkArchiveDecoder,
b9b4b312
DM
61 MediaPool,
62 Inventory,
b017bbc4
DM
63 file_formats::{
64 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
65 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
66 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
67 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
68 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
69 MediaContentHeader,
70 },
37796ff7
DM
71 drive::{
72 TapeDriver,
73 request_and_load_media,
74 }
b017bbc4
DM
75 },
76};
77
b9b4b312
DM
78pub const ROUTER: Router = Router::new()
79 .post(&API_METHOD_RESTORE);
80
81
82#[api(
83 input: {
84 properties: {
85 store: {
86 schema: DATASTORE_SCHEMA,
87 },
9883b54c
DM
88 drive: {
89 schema: DRIVE_NAME_SCHEMA,
90 },
b9b4b312
DM
91 "media-set": {
92 description: "Media set UUID.",
93 type: String,
94 },
95 },
96 },
97 returns: {
98 schema: UPID_SCHEMA,
99 },
100)]
101/// Restore data from media-set
102pub fn restore(
103 store: String,
9883b54c 104 drive: String,
b9b4b312
DM
105 media_set: String,
106 rpcenv: &mut dyn RpcEnvironment,
107) -> Result<Value, Error> {
108
109 let datastore = DataStore::lookup_datastore(&store)?;
110
111 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
112
113 let status_path = Path::new(TAPE_STATUS_DIR);
114 let inventory = Inventory::load(status_path)?;
115
116 let media_set_uuid = media_set.parse()?;
117
118 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
119
9883b54c 120 // check if pool exists
b9b4b312 121 let (config, _digest) = config::media_pool::config()?;
9883b54c 122 let _pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
b9b4b312
DM
123
124 let (drive_config, _digest) = config::drive::config()?;
125 // early check before starting worker
9883b54c 126 check_drive_exists(&drive_config, &drive)?;
b9b4b312 127
39735609 128 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
b9b4b312
DM
129
130 let upid_str = WorkerTask::new_thread(
131 "tape-restore",
132 Some(store.clone()),
133 auth_id.clone(),
134 to_stdout,
135 move |worker| {
136
137 let _lock = MediaPool::lock(status_path, &pool)?;
138
139 let members = inventory.compute_media_set_members(&media_set_uuid)?;
140
44288184 141 let media_list = members.media_list();
b9b4b312
DM
142
143 let mut media_id_list = Vec::new();
144
8e6459a8
DM
145 let mut encryption_key_fingerprint = None;
146
b9b4b312
DM
147 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
148 match media_uuid {
149 None => {
150 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
151 }
152 Some(media_uuid) => {
8e6459a8
DM
153 let media_id = inventory.lookup_media(media_uuid).unwrap();
154 if let Some(ref set) = media_id.media_set_label { // always true here
155 if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
156 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
157 }
158 }
159 media_id_list.push(media_id);
b9b4b312
DM
160 }
161 }
162 }
163
8de9a991 164 task_log!(worker, "Restore mediaset '{}'", media_set);
8e6459a8 165 if let Some(fingerprint) = encryption_key_fingerprint {
8de9a991 166 task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
8e6459a8 167 }
8de9a991
DM
168 task_log!(worker, "Pool: {}", pool);
169 task_log!(worker, "Datastore: {}", store);
170 task_log!(worker, "Drive: {}", drive);
171 task_log!(
172 worker,
b9b4b312
DM
173 "Required media list: {}",
174 media_id_list.iter()
8446fbca 175 .map(|media_id| media_id.label.label_text.as_str())
b9b4b312
DM
176 .collect::<Vec<&str>>()
177 .join(";")
8de9a991 178 );
b9b4b312
DM
179
180 for media_id in media_id_list.iter() {
181 request_and_restore_media(
182 &worker,
183 media_id,
184 &drive_config,
9883b54c 185 &drive,
b9b4b312
DM
186 &datastore,
187 &auth_id,
188 )?;
189 }
190
8de9a991 191 task_log!(worker, "Restore mediaset '{}' done", media_set);
b9b4b312
DM
192 Ok(())
193 }
194 )?;
195
196 Ok(upid_str.into())
197}
198
b017bbc4
DM
199/// Request and restore complete media without using existing catalog (create catalog instead)
200pub fn request_and_restore_media(
201 worker: &WorkerTask,
202 media_id: &MediaId,
203 drive_config: &SectionConfigData,
204 drive_name: &str,
205 datastore: &DataStore,
206 authid: &Authid,
207) -> Result<(), Error> {
208
209 let media_set_uuid = match media_id.media_set_label {
210 None => bail!("restore_media: no media set - internal error"),
211 Some(ref set) => &set.uuid,
212 };
213
ff58c519 214 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?;
b017bbc4
DM
215
216 match info.media_set_label {
217 None => {
218 bail!("missing media set label on media {} ({})",
8446fbca 219 media_id.label.label_text, media_id.label.uuid);
b017bbc4
DM
220 }
221 Some(ref set) => {
222 if &set.uuid != media_set_uuid {
223 bail!("wrong media set label on media {} ({} != {})",
8446fbca 224 media_id.label.label_text, media_id.label.uuid,
b017bbc4
DM
225 media_set_uuid);
226 }
8e6459a8
DM
227 let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
228 .map(|fp| (fp, set.uuid.clone()));
229
230 drive.set_encryption(encrypt_fingerprint)?;
b017bbc4
DM
231 }
232 }
233
234 restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
235}
236
237/// Restore complete media content and catalog
238///
239/// Only create the catalog if target is None.
240pub fn restore_media(
241 worker: &WorkerTask,
242 drive: &mut Box<dyn TapeDriver>,
243 media_id: &MediaId,
244 target: Option<(&DataStore, &Authid)>,
245 verbose: bool,
246) -> Result<(), Error> {
247
248 let status_path = Path::new(TAPE_STATUS_DIR);
249 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
250
251 loop {
252 let current_file_number = drive.current_file_number()?;
253 let reader = match drive.read_next_file()? {
254 None => {
8de9a991 255 task_log!(worker, "detected EOT after {} files", current_file_number);
b017bbc4
DM
256 break;
257 }
258 Some(reader) => reader,
259 };
260
b017bbc4
DM
261 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
262 }
263
264 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
265
266 Ok(())
267}
268
269fn restore_archive<'a>(
270 worker: &WorkerTask,
271 mut reader: Box<dyn 'a + TapeRead>,
272 current_file_number: u64,
273 target: Option<(&DataStore, &Authid)>,
274 catalog: &mut MediaCatalog,
275 verbose: bool,
276) -> Result<(), Error> {
277
278 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
279 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
280 bail!("missing MediaContentHeader");
281 }
282
283 //println!("Found MediaContentHeader: {:?}", header);
284
285 match header.content_magic {
286 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
287 bail!("unexpected content magic (label)");
288 }
289 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
290 let snapshot = reader.read_exact_allocated(header.size as usize)?;
291 let snapshot = std::str::from_utf8(&snapshot)
292 .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
8de9a991 293 task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
b017bbc4
DM
294
295 let backup_dir: BackupDir = snapshot.parse()?;
296
297 if let Some((datastore, authid)) = target.as_ref() {
298
299 let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
300 if *authid != &owner { // only the owner is allowed to create additional snapshots
301 bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
302 }
303
304 let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
305 let mut path = datastore.base_path();
306 path.push(rel_path);
307
308 if is_new {
8de9a991 309 task_log!(worker, "restore snapshot {}", backup_dir);
b017bbc4 310
a80d72f9 311 match restore_snapshot_archive(worker, reader, &path) {
b017bbc4
DM
312 Err(err) => {
313 std::fs::remove_dir_all(&path)?;
314 bail!("restore snapshot {} failed - {}", backup_dir, err);
315 }
316 Ok(false) => {
317 std::fs::remove_dir_all(&path)?;
8de9a991 318 task_log!(worker, "skip incomplete snapshot {}", backup_dir);
b017bbc4
DM
319 }
320 Ok(true) => {
321 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
322 catalog.commit_if_large()?;
323 }
324 }
325 return Ok(());
326 }
327 }
328
329 reader.skip_to_end()?; // read all data
330 if let Ok(false) = reader.is_incomplete() {
331 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
332 catalog.commit_if_large()?;
333 }
334 }
335 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
336
8de9a991 337 task_log!(worker, "Found chunk archive: {}", current_file_number);
b017bbc4
DM
338 let datastore = target.as_ref().map(|t| t.0);
339
340 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
341 catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
342 for digest in chunks.iter() {
343 catalog.register_chunk(&digest)?;
344 }
8de9a991 345 task_log!(worker, "register {} chunks", chunks.len());
b017bbc4
DM
346 catalog.end_chunk_archive()?;
347 catalog.commit_if_large()?;
348 }
349 }
350 _ => bail!("unknown content magic {:?}", header.content_magic),
351 }
352
353 catalog.commit()?;
354
355 Ok(())
356}
357
358fn restore_chunk_archive<'a>(
359 worker: &WorkerTask,
360 reader: Box<dyn 'a + TapeRead>,
361 datastore: Option<&DataStore>,
362 verbose: bool,
363) -> Result<Option<Vec<[u8;32]>>, Error> {
364
365 let mut chunks = Vec::new();
366
367 let mut decoder = ChunkArchiveDecoder::new(reader);
368
369 let result: Result<_, Error> = proxmox::try_block!({
0d2133db 370 while let Some((digest, blob)) = decoder.next_chunk()? {
a80d72f9
DM
371
372 worker.check_abort()?;
373
0d2133db
FG
374 if let Some(datastore) = datastore {
375 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
376 if !chunk_exists {
377 blob.verify_crc()?;
378
379 if blob.crypt_mode()? == CryptMode::None {
380 blob.decode(None, Some(&digest))?; // verify digest
381 }
382 if verbose {
8de9a991 383 task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
b017bbc4 384 }
0d2133db
FG
385 datastore.insert_chunk(&blob, &digest)?;
386 } else if verbose {
8de9a991 387 task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
b017bbc4 388 }
0d2133db 389 } else if verbose {
8de9a991 390 task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
b017bbc4 391 }
0d2133db 392 chunks.push(digest);
b017bbc4
DM
393 }
394 Ok(())
395 });
396
397 match result {
398 Ok(()) => Ok(Some(chunks)),
399 Err(err) => {
400 let reader = decoder.reader();
401
402 // check if this stream is marked incomplete
403 if let Ok(true) = reader.is_incomplete() {
404 return Ok(Some(chunks));
405 }
406
407 // check if this is an aborted stream without end marker
408 if let Ok(false) = reader.has_end_marker() {
3b82f3ee 409 worker.log("missing stream end marker".to_string());
b017bbc4
DM
410 return Ok(None);
411 }
412
413 // else the archive is corrupt
414 Err(err)
415 }
416 }
417}
418
419fn restore_snapshot_archive<'a>(
a80d72f9 420 worker: &WorkerTask,
b017bbc4
DM
421 reader: Box<dyn 'a + TapeRead>,
422 snapshot_path: &Path,
423) -> Result<bool, Error> {
424
425 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
a80d72f9 426 match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
38556bf6 427 Ok(()) => Ok(true),
b017bbc4
DM
428 Err(err) => {
429 let reader = decoder.input();
430
431 // check if this stream is marked incomplete
432 if let Ok(true) = reader.is_incomplete() {
433 return Ok(false);
434 }
435
436 // check if this is an aborted stream without end marker
437 if let Ok(false) = reader.has_end_marker() {
438 return Ok(false);
439 }
440
441 // else the archive is corrupt
38556bf6 442 Err(err)
b017bbc4
DM
443 }
444 }
445}
446
447fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
a80d72f9 448 worker: &WorkerTask,
b017bbc4
DM
449 decoder: &mut pxar::decoder::sync::Decoder<R>,
450 snapshot_path: &Path,
451) -> Result<(), Error> {
452
453 let _root = match decoder.next() {
454 None => bail!("missing root entry"),
455 Some(root) => {
456 let root = root?;
457 match root.kind() {
458 pxar::EntryKind::Directory => { /* Ok */ }
459 _ => bail!("wrong root entry type"),
460 }
461 root
462 }
463 };
464
465 let root_path = Path::new("/");
466 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
467
468 let mut manifest = None;
469
470 loop {
a80d72f9
DM
471 worker.check_abort()?;
472
b017bbc4
DM
473 let entry = match decoder.next() {
474 None => break,
475 Some(entry) => entry?,
476 };
477 let entry_path = entry.path();
478
479 match entry.kind() {
480 pxar::EntryKind::File { .. } => { /* Ok */ }
481 _ => bail!("wrong entry type for {:?}", entry_path),
482 }
483 match entry_path.parent() {
484 None => bail!("wrong parent for {:?}", entry_path),
485 Some(p) => {
486 if p != root_path {
487 bail!("wrong parent for {:?}", entry_path);
488 }
489 }
490 }
491
492 let filename = entry.file_name();
493 let mut contents = match decoder.contents() {
494 None => bail!("missing file content"),
495 Some(contents) => contents,
496 };
497
498 let mut archive_path = snapshot_path.to_owned();
499 archive_path.push(&filename);
500
501 let mut tmp_path = archive_path.clone();
502 tmp_path.set_extension("tmp");
503
504 if filename == manifest_file_name {
505
506 let blob = DataBlob::load_from_reader(&mut contents)?;
507 let options = CreateOptions::new();
508 replace_file(&tmp_path, blob.raw_data(), options)?;
509
510 manifest = Some(BackupManifest::try_from(blob)?);
511 } else {
512 let mut tmpfile = std::fs::OpenOptions::new()
513 .write(true)
514 .create(true)
515 .read(true)
516 .open(&tmp_path)
517 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
518
519 std::io::copy(&mut contents, &mut tmpfile)?;
520
521 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
522 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
523 }
524 }
525 }
526
527 let manifest = match manifest {
528 None => bail!("missing manifest"),
529 Some(manifest) => manifest,
530 };
531
532 for item in manifest.files() {
533 let mut archive_path = snapshot_path.to_owned();
534 archive_path.push(&item.filename);
535
536 match archive_type(&item.filename)? {
537 ArchiveType::DynamicIndex => {
538 let index = DynamicIndexReader::open(&archive_path)?;
539 let (csum, size) = index.compute_csum();
540 manifest.verify_file(&item.filename, &csum, size)?;
541 }
542 ArchiveType::FixedIndex => {
543 let index = FixedIndexReader::open(&archive_path)?;
544 let (csum, size) = index.compute_csum();
545 manifest.verify_file(&item.filename, &csum, size)?;
546 }
547 ArchiveType::Blob => {
548 let mut tmpfile = std::fs::File::open(&archive_path)?;
549 let (csum, size) = compute_file_csum(&mut tmpfile)?;
550 manifest.verify_file(&item.filename, &csum, size)?;
551 }
552 }
553 }
554
555 // commit manifest
556 let mut manifest_path = snapshot_path.to_owned();
557 manifest_path.push(MANIFEST_BLOB_NAME);
558 let mut tmp_manifest_path = manifest_path.clone();
559 tmp_manifest_path.set_extension("tmp");
560
561 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
562 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
563 }
564
565 Ok(())
566}