]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/restore.rs
tape: set encryption key on restore
[proxmox-backup.git] / src / api2 / tape / restore.rs
1 use std::path::Path;
2 use std::ffi::OsStr;
3 use std::convert::TryFrom;
4
5 use anyhow::{bail, format_err, Error};
6 use serde_json::Value;
7
8 use proxmox::{
9 api::{
10 api,
11 RpcEnvironment,
12 RpcEnvironmentType,
13 Router,
14 section_config::SectionConfigData,
15 },
16 tools::{
17 Uuid,
18 io::ReadExt,
19 fs::{
20 replace_file,
21 CreateOptions,
22 },
23 },
24 };
25
26 use crate::{
27 tools::compute_file_csum,
28 api2::types::{
29 DATASTORE_SCHEMA,
30 UPID_SCHEMA,
31 Authid,
32 MediaPoolConfig,
33 },
34 config::{
35 self,
36 drive::check_drive_exists,
37 },
38 backup::{
39 archive_type,
40 MANIFEST_BLOB_NAME,
41 CryptMode,
42 DataStore,
43 BackupDir,
44 DataBlob,
45 BackupManifest,
46 ArchiveType,
47 IndexFile,
48 DynamicIndexReader,
49 FixedIndexReader,
50 },
51 server::WorkerTask,
52 tape::{
53 TAPE_STATUS_DIR,
54 TapeRead,
55 MediaId,
56 MediaCatalog,
57 ChunkArchiveDecoder,
58 MediaPool,
59 Inventory,
60 file_formats::{
61 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
62 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
63 PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
64 PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
65 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
66 MediaContentHeader,
67 },
68 drive::{
69 TapeDriver,
70 request_and_load_media,
71 }
72 },
73 };
74
75 pub const ROUTER: Router = Router::new()
76 .post(&API_METHOD_RESTORE);
77
78
79 #[api(
80 input: {
81 properties: {
82 store: {
83 schema: DATASTORE_SCHEMA,
84 },
85 "media-set": {
86 description: "Media set UUID.",
87 type: String,
88 },
89 },
90 },
91 returns: {
92 schema: UPID_SCHEMA,
93 },
94 )]
95 /// Restore data from media-set
96 pub fn restore(
97 store: String,
98 media_set: String,
99 rpcenv: &mut dyn RpcEnvironment,
100 ) -> Result<Value, Error> {
101
102 let datastore = DataStore::lookup_datastore(&store)?;
103
104 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
105
106 let status_path = Path::new(TAPE_STATUS_DIR);
107 let inventory = Inventory::load(status_path)?;
108
109 let media_set_uuid = media_set.parse()?;
110
111 let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
112
113 let (config, _digest) = config::media_pool::config()?;
114 let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
115
116 let (drive_config, _digest) = config::drive::config()?;
117 // early check before starting worker
118 check_drive_exists(&drive_config, &pool_config.drive)?;
119
120 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
121
122 let upid_str = WorkerTask::new_thread(
123 "tape-restore",
124 Some(store.clone()),
125 auth_id.clone(),
126 to_stdout,
127 move |worker| {
128
129 let _lock = MediaPool::lock(status_path, &pool)?;
130
131 let members = inventory.compute_media_set_members(&media_set_uuid)?;
132
133 let media_list = members.media_list();
134
135 let mut media_id_list = Vec::new();
136
137 let mut encryption_key_fingerprint = None;
138
139 for (seq_nr, media_uuid) in media_list.iter().enumerate() {
140 match media_uuid {
141 None => {
142 bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
143 }
144 Some(media_uuid) => {
145 let media_id = inventory.lookup_media(media_uuid).unwrap();
146 if let Some(ref set) = media_id.media_set_label { // always true here
147 if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
148 encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
149 }
150 }
151 media_id_list.push(media_id);
152 }
153 }
154 }
155
156 let drive = &pool_config.drive;
157
158 worker.log(format!("Restore mediaset '{}'", media_set));
159 if let Some(fingerprint) = encryption_key_fingerprint {
160 worker.log(format!("Encryption key fingerprint: {}", fingerprint));
161 }
162 worker.log(format!("Pool: {}", pool));
163 worker.log(format!("Datastore: {}", store));
164 worker.log(format!("Drive: {}", drive));
165 worker.log(format!(
166 "Required media list: {}",
167 media_id_list.iter()
168 .map(|media_id| media_id.label.label_text.as_str())
169 .collect::<Vec<&str>>()
170 .join(";")
171 ));
172
173 for media_id in media_id_list.iter() {
174 request_and_restore_media(
175 &worker,
176 media_id,
177 &drive_config,
178 drive,
179 &datastore,
180 &auth_id,
181 )?;
182 }
183
184 worker.log(format!("Restore mediaset '{}' done", media_set));
185 Ok(())
186 }
187 )?;
188
189 Ok(upid_str.into())
190 }
191
192 /// Request and restore complete media without using existing catalog (create catalog instead)
193 pub fn request_and_restore_media(
194 worker: &WorkerTask,
195 media_id: &MediaId,
196 drive_config: &SectionConfigData,
197 drive_name: &str,
198 datastore: &DataStore,
199 authid: &Authid,
200 ) -> Result<(), Error> {
201
202 let media_set_uuid = match media_id.media_set_label {
203 None => bail!("restore_media: no media set - internal error"),
204 Some(ref set) => &set.uuid,
205 };
206
207 let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?;
208
209 match info.media_set_label {
210 None => {
211 bail!("missing media set label on media {} ({})",
212 media_id.label.label_text, media_id.label.uuid);
213 }
214 Some(ref set) => {
215 if &set.uuid != media_set_uuid {
216 bail!("wrong media set label on media {} ({} != {})",
217 media_id.label.label_text, media_id.label.uuid,
218 media_set_uuid);
219 }
220 let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
221 .map(|fp| (fp, set.uuid.clone()));
222
223 drive.set_encryption(encrypt_fingerprint)?;
224 }
225 }
226
227 restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
228 }
229
230 /// Restore complete media content and catalog
231 ///
232 /// Only create the catalog if target is None.
233 pub fn restore_media(
234 worker: &WorkerTask,
235 drive: &mut Box<dyn TapeDriver>,
236 media_id: &MediaId,
237 target: Option<(&DataStore, &Authid)>,
238 verbose: bool,
239 ) -> Result<(), Error> {
240
241 let status_path = Path::new(TAPE_STATUS_DIR);
242 let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
243
244 loop {
245 let current_file_number = drive.current_file_number()?;
246 let reader = match drive.read_next_file()? {
247 None => {
248 worker.log(format!("detected EOT after {} files", current_file_number));
249 break;
250 }
251 Some(reader) => reader,
252 };
253
254 restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
255 }
256
257 MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
258
259 Ok(())
260 }
261
262 fn restore_archive<'a>(
263 worker: &WorkerTask,
264 mut reader: Box<dyn 'a + TapeRead>,
265 current_file_number: u64,
266 target: Option<(&DataStore, &Authid)>,
267 catalog: &mut MediaCatalog,
268 verbose: bool,
269 ) -> Result<(), Error> {
270
271 let header: MediaContentHeader = unsafe { reader.read_le_value()? };
272 if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
273 bail!("missing MediaContentHeader");
274 }
275
276 //println!("Found MediaContentHeader: {:?}", header);
277
278 match header.content_magic {
279 PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
280 bail!("unexpected content magic (label)");
281 }
282 PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
283 let snapshot = reader.read_exact_allocated(header.size as usize)?;
284 let snapshot = std::str::from_utf8(&snapshot)
285 .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
286 worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot));
287
288 let backup_dir: BackupDir = snapshot.parse()?;
289
290 if let Some((datastore, authid)) = target.as_ref() {
291
292 let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
293 if *authid != &owner { // only the owner is allowed to create additional snapshots
294 bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
295 }
296
297 let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
298 let mut path = datastore.base_path();
299 path.push(rel_path);
300
301 if is_new {
302 worker.log(format!("restore snapshot {}", backup_dir));
303
304 match restore_snapshot_archive(reader, &path) {
305 Err(err) => {
306 std::fs::remove_dir_all(&path)?;
307 bail!("restore snapshot {} failed - {}", backup_dir, err);
308 }
309 Ok(false) => {
310 std::fs::remove_dir_all(&path)?;
311 worker.log(format!("skip incomplete snapshot {}", backup_dir));
312 }
313 Ok(true) => {
314 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
315 catalog.commit_if_large()?;
316 }
317 }
318 return Ok(());
319 }
320 }
321
322 reader.skip_to_end()?; // read all data
323 if let Ok(false) = reader.is_incomplete() {
324 catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
325 catalog.commit_if_large()?;
326 }
327 }
328 PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
329
330 worker.log(format!("Found chunk archive: {}", current_file_number));
331 let datastore = target.as_ref().map(|t| t.0);
332
333 if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
334 catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
335 for digest in chunks.iter() {
336 catalog.register_chunk(&digest)?;
337 }
338 worker.log(format!("register {} chunks", chunks.len()));
339 catalog.end_chunk_archive()?;
340 catalog.commit_if_large()?;
341 }
342 }
343 _ => bail!("unknown content magic {:?}", header.content_magic),
344 }
345
346 catalog.commit()?;
347
348 Ok(())
349 }
350
351 fn restore_chunk_archive<'a>(
352 worker: &WorkerTask,
353 reader: Box<dyn 'a + TapeRead>,
354 datastore: Option<&DataStore>,
355 verbose: bool,
356 ) -> Result<Option<Vec<[u8;32]>>, Error> {
357
358 let mut chunks = Vec::new();
359
360 let mut decoder = ChunkArchiveDecoder::new(reader);
361
362 let result: Result<_, Error> = proxmox::try_block!({
363 while let Some((digest, blob)) = decoder.next_chunk()? {
364 if let Some(datastore) = datastore {
365 let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
366 if !chunk_exists {
367 blob.verify_crc()?;
368
369 if blob.crypt_mode()? == CryptMode::None {
370 blob.decode(None, Some(&digest))?; // verify digest
371 }
372 if verbose {
373 worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
374 }
375 datastore.insert_chunk(&blob, &digest)?;
376 } else if verbose {
377 worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
378 }
379 } else if verbose {
380 worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
381 }
382 chunks.push(digest);
383 }
384 Ok(())
385 });
386
387 match result {
388 Ok(()) => Ok(Some(chunks)),
389 Err(err) => {
390 let reader = decoder.reader();
391
392 // check if this stream is marked incomplete
393 if let Ok(true) = reader.is_incomplete() {
394 return Ok(Some(chunks));
395 }
396
397 // check if this is an aborted stream without end marker
398 if let Ok(false) = reader.has_end_marker() {
399 worker.log("missing stream end marker".to_string());
400 return Ok(None);
401 }
402
403 // else the archive is corrupt
404 Err(err)
405 }
406 }
407 }
408
409 fn restore_snapshot_archive<'a>(
410 reader: Box<dyn 'a + TapeRead>,
411 snapshot_path: &Path,
412 ) -> Result<bool, Error> {
413
414 let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
415 match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
416 Ok(()) => Ok(true),
417 Err(err) => {
418 let reader = decoder.input();
419
420 // check if this stream is marked incomplete
421 if let Ok(true) = reader.is_incomplete() {
422 return Ok(false);
423 }
424
425 // check if this is an aborted stream without end marker
426 if let Ok(false) = reader.has_end_marker() {
427 return Ok(false);
428 }
429
430 // else the archive is corrupt
431 Err(err)
432 }
433 }
434 }
435
436 fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
437 decoder: &mut pxar::decoder::sync::Decoder<R>,
438 snapshot_path: &Path,
439 ) -> Result<(), Error> {
440
441 let _root = match decoder.next() {
442 None => bail!("missing root entry"),
443 Some(root) => {
444 let root = root?;
445 match root.kind() {
446 pxar::EntryKind::Directory => { /* Ok */ }
447 _ => bail!("wrong root entry type"),
448 }
449 root
450 }
451 };
452
453 let root_path = Path::new("/");
454 let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
455
456 let mut manifest = None;
457
458 loop {
459 let entry = match decoder.next() {
460 None => break,
461 Some(entry) => entry?,
462 };
463 let entry_path = entry.path();
464
465 match entry.kind() {
466 pxar::EntryKind::File { .. } => { /* Ok */ }
467 _ => bail!("wrong entry type for {:?}", entry_path),
468 }
469 match entry_path.parent() {
470 None => bail!("wrong parent for {:?}", entry_path),
471 Some(p) => {
472 if p != root_path {
473 bail!("wrong parent for {:?}", entry_path);
474 }
475 }
476 }
477
478 let filename = entry.file_name();
479 let mut contents = match decoder.contents() {
480 None => bail!("missing file content"),
481 Some(contents) => contents,
482 };
483
484 let mut archive_path = snapshot_path.to_owned();
485 archive_path.push(&filename);
486
487 let mut tmp_path = archive_path.clone();
488 tmp_path.set_extension("tmp");
489
490 if filename == manifest_file_name {
491
492 let blob = DataBlob::load_from_reader(&mut contents)?;
493 let options = CreateOptions::new();
494 replace_file(&tmp_path, blob.raw_data(), options)?;
495
496 manifest = Some(BackupManifest::try_from(blob)?);
497 } else {
498 let mut tmpfile = std::fs::OpenOptions::new()
499 .write(true)
500 .create(true)
501 .read(true)
502 .open(&tmp_path)
503 .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
504
505 std::io::copy(&mut contents, &mut tmpfile)?;
506
507 if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
508 bail!("Atomic rename file {:?} failed - {}", archive_path, err);
509 }
510 }
511 }
512
513 let manifest = match manifest {
514 None => bail!("missing manifest"),
515 Some(manifest) => manifest,
516 };
517
518 for item in manifest.files() {
519 let mut archive_path = snapshot_path.to_owned();
520 archive_path.push(&item.filename);
521
522 match archive_type(&item.filename)? {
523 ArchiveType::DynamicIndex => {
524 let index = DynamicIndexReader::open(&archive_path)?;
525 let (csum, size) = index.compute_csum();
526 manifest.verify_file(&item.filename, &csum, size)?;
527 }
528 ArchiveType::FixedIndex => {
529 let index = FixedIndexReader::open(&archive_path)?;
530 let (csum, size) = index.compute_csum();
531 manifest.verify_file(&item.filename, &csum, size)?;
532 }
533 ArchiveType::Blob => {
534 let mut tmpfile = std::fs::File::open(&archive_path)?;
535 let (csum, size) = compute_file_csum(&mut tmpfile)?;
536 manifest.verify_file(&item.filename, &csum, size)?;
537 }
538 }
539 }
540
541 // commit manifest
542 let mut manifest_path = snapshot_path.to_owned();
543 manifest_path.push(MANIFEST_BLOB_NAME);
544 let mut tmp_manifest_path = manifest_path.clone();
545 tmp_manifest_path.set_extension("tmp");
546
547 if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
548 bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
549 }
550
551 Ok(())
552 }