]> git.proxmox.com Git - proxmox-backup.git/blob - proxmox-file-restore/src/main.rs
accept a ref to a HttpClient
[proxmox-backup.git] / proxmox-file-restore / src / main.rs
1 use std::ffi::OsStr;
2 use std::os::unix::ffi::OsStrExt;
3 use std::path::PathBuf;
4 use std::sync::Arc;
5
6 use anyhow::{bail, format_err, Error};
7 use futures::StreamExt;
8 use serde_json::{json, Value};
9 use tokio::io::AsyncWriteExt;
10
11 use proxmox_compression::zstd::ZstdEncoder;
12 use proxmox_router::cli::{
13 complete_file_name, default_table_format_options, format_and_print_result_full,
14 get_output_format, init_cli_logger, run_cli_command, CliCommand, CliCommandMap, CliEnvironment,
15 ColumnConfig, OUTPUT_FORMAT,
16 };
17 use proxmox_router::{http_err, HttpError};
18 use proxmox_schema::api;
19 use proxmox_sys::fs::{create_path, CreateOptions};
20 use pxar::accessor::aio::Accessor;
21 use pxar::decoder::aio::Decoder;
22
23 use pbs_api_types::{file_restore::FileRestoreFormat, BackupDir, BackupNamespace, CryptMode};
24 use pbs_client::pxar::{create_tar, create_zip, extract_sub_dir, extract_sub_dir_seq};
25 use pbs_client::tools::{
26 complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
27 key_source::{
28 crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
29 KEYFILE_SCHEMA,
30 },
31 REPO_URL_SCHEMA,
32 };
33 use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader};
34 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute};
35 use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt};
36 use pbs_datastore::index::IndexFile;
37 use pbs_datastore::CATALOG_NAME;
38 use pbs_key_config::decrypt_key;
39 use pbs_tools::crypt_config::CryptConfig;
40
41 pub mod block_driver;
42 pub use block_driver::*;
43
44 pub mod cpio;
45
46 mod block_driver_qemu;
47 mod qemu_helper;
48
49 enum ExtractPath {
50 ListArchives,
51 Pxar(String, Vec<u8>),
52 VM(String, Vec<u8>),
53 }
54
55 fn parse_path(path: String, base64: bool) -> Result<ExtractPath, Error> {
56 let mut bytes = if base64 {
57 base64::decode(&path)
58 .map_err(|err| format_err!("Failed base64-decoding path '{path}' - {err}"))?
59 } else {
60 path.into_bytes()
61 };
62
63 if bytes == b"/" {
64 return Ok(ExtractPath::ListArchives);
65 }
66
67 while !bytes.is_empty() && bytes[0] == b'/' {
68 bytes.remove(0);
69 }
70
71 let (file, path) = {
72 let slash_pos = bytes.iter().position(|c| *c == b'/').unwrap_or(bytes.len());
73 let path = bytes.split_off(slash_pos);
74 let file = String::from_utf8(bytes)?;
75 (file, path)
76 };
77
78 if file.ends_with(".pxar.didx") {
79 Ok(ExtractPath::Pxar(file, path))
80 } else if file.ends_with(".img.fidx") {
81 Ok(ExtractPath::VM(file, path))
82 } else {
83 bail!("'{file}' is not supported for file-restore");
84 }
85 }
86
87 fn keyfile_path(param: &Value) -> Option<String> {
88 if let Some(Value::String(keyfile)) = param.get("keyfile") {
89 return Some(keyfile.to_owned());
90 }
91
92 if let Some(Value::Number(keyfd)) = param.get("keyfd") {
93 return Some(format!("/dev/fd/{keyfd}"));
94 }
95
96 None
97 }
98
99 async fn list_files(
100 repo: BackupRepository,
101 namespace: BackupNamespace,
102 snapshot: BackupDir,
103 path: ExtractPath,
104 crypt_config: Option<Arc<CryptConfig>>,
105 keyfile: Option<String>,
106 driver: Option<BlockDriverType>,
107 ) -> Result<Vec<ArchiveEntry>, Error> {
108 let client = connect(&repo)?;
109 let client = BackupReader::start(
110 &client,
111 crypt_config.clone(),
112 repo.store(),
113 &namespace,
114 &snapshot,
115 true,
116 )
117 .await?;
118
119 let (manifest, _) = client.download_manifest().await?;
120 manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
121
122 match path {
123 ExtractPath::ListArchives => {
124 let mut entries = vec![];
125 for file in manifest.files() {
126 if !file.filename.ends_with(".pxar.didx") && !file.filename.ends_with(".img.fidx") {
127 continue;
128 }
129 let path = format!("/{}", file.filename);
130 let attr = if file.filename.ends_with(".pxar.didx") {
131 // a pxar file is a file archive, so it's root is also a directory root
132 Some(&DirEntryAttribute::Directory { start: 0 })
133 } else {
134 None
135 };
136 entries.push(ArchiveEntry::new_with_size(
137 path.as_bytes(),
138 attr,
139 Some(file.size),
140 ));
141 }
142
143 Ok(entries)
144 }
145 ExtractPath::Pxar(file, mut path) => {
146 let index = client
147 .download_dynamic_index(&manifest, CATALOG_NAME)
148 .await?;
149 let most_used = index.find_most_used_chunks(8);
150 let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
151 let chunk_reader = RemoteChunkReader::new(
152 client.clone(),
153 crypt_config,
154 file_info.chunk_crypt_mode(),
155 most_used,
156 );
157 let reader = BufferedDynamicReader::new(index, chunk_reader);
158 let mut catalog_reader = CatalogReader::new(reader);
159
160 let mut fullpath = file.into_bytes();
161 fullpath.append(&mut path);
162
163 catalog_reader.list_dir_contents(&fullpath)
164 }
165 ExtractPath::VM(file, path) => {
166 let details = SnapRestoreDetails {
167 manifest,
168 repo,
169 namespace,
170 snapshot,
171 keyfile,
172 };
173 data_list(driver, details, file, path).await
174 }
175 }
176 }
177
178 #[api(
179 input: {
180 properties: {
181 repository: {
182 schema: REPO_URL_SCHEMA,
183 optional: true,
184 },
185 ns: {
186 type: BackupNamespace,
187 optional: true,
188 },
189 snapshot: {
190 type: String,
191 description: "Group/Snapshot path.",
192 },
193 "path": {
194 description: "(Sub-)Path to list.",
195 type: String,
196 },
197 "base64": {
198 type: Boolean,
199 description: "If set, 'path' will be interpreted as base64 encoded.",
200 optional: true,
201 default: false,
202 },
203 keyfile: {
204 schema: KEYFILE_SCHEMA,
205 optional: true,
206 },
207 "keyfd": {
208 schema: KEYFD_SCHEMA,
209 optional: true,
210 },
211 "crypt-mode": {
212 type: CryptMode,
213 optional: true,
214 },
215 "driver": {
216 type: BlockDriverType,
217 optional: true,
218 },
219 "output-format": {
220 schema: OUTPUT_FORMAT,
221 optional: true,
222 },
223 "timeout": {
224 type: Integer,
225 description: "Defines the maximum time the call can should take.",
226 minimum: 1,
227 optional: true,
228 },
229 }
230 },
231 returns: {
232 description: "A list of elements under the given path",
233 type: Array,
234 items: {
235 type: ArchiveEntry,
236 }
237 }
238 )]
239 /// List a directory from a backup snapshot.
240 async fn list(
241 ns: Option<BackupNamespace>,
242 snapshot: String,
243 path: String,
244 base64: bool,
245 timeout: Option<u64>,
246 param: Value,
247 ) -> Result<(), Error> {
248 let repo = extract_repository_from_value(&param)?;
249 let ns = ns.unwrap_or_default();
250 let snapshot: BackupDir = snapshot.parse()?;
251 let path = parse_path(path, base64)?;
252
253 let keyfile = keyfile_path(&param);
254 let crypto = crypto_parameters_keep_fd(&param)?;
255 let crypt_config = match crypto.enc_key {
256 None => None,
257 Some(ref key) => {
258 let (key, _, _) =
259 decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
260 log::error!("{}", format_key_source(&key.source, "encryption"));
261 err
262 })?;
263 Some(Arc::new(CryptConfig::new(key)?))
264 }
265 };
266
267 let driver: Option<BlockDriverType> = match param.get("driver") {
268 Some(drv) => Some(serde::Deserialize::deserialize(drv)?),
269 None => None,
270 };
271
272 let result = if let Some(timeout) = timeout {
273 match tokio::time::timeout(
274 std::time::Duration::from_secs(timeout),
275 list_files(repo, ns, snapshot, path, crypt_config, keyfile, driver),
276 )
277 .await
278 {
279 Ok(res) => res,
280 Err(_) => Err(http_err!(SERVICE_UNAVAILABLE, "list not finished in time")),
281 }
282 } else {
283 list_files(repo, ns, snapshot, path, crypt_config, keyfile, driver).await
284 };
285
286 let output_format = get_output_format(&param);
287
288 if let Err(err) = result {
289 if &output_format == "text" {
290 return Err(err);
291 }
292 let (msg, code) = match err.downcast_ref::<HttpError>() {
293 Some(HttpError { code, message }) => (message.clone(), Some(code)),
294 None => (err.to_string(), None),
295 };
296 let mut json_err = json!({
297 "message": msg,
298 });
299 if let Some(code) = code {
300 json_err["code"] = Value::from(code.as_u16());
301 }
302 match output_format.as_ref() {
303 "json-pretty" => println!("{}", serde_json::to_string_pretty(&json_err)?),
304 _ => println!("{}", serde_json::to_string(&json_err)?),
305 }
306 return Ok(());
307 }
308
309 let options = default_table_format_options()
310 .sortby("type", false)
311 .sortby("text", false)
312 .column(ColumnConfig::new("type"))
313 .column(ColumnConfig::new("text").header("name"))
314 .column(ColumnConfig::new("mtime").header("last modified"))
315 .column(ColumnConfig::new("size"));
316
317 let output_format = get_output_format(&param);
318 format_and_print_result_full(
319 &mut json!(result.unwrap()),
320 &API_METHOD_LIST.returns,
321 &output_format,
322 &options,
323 );
324
325 Ok(())
326 }
327
328 #[api(
329 input: {
330 properties: {
331 repository: {
332 schema: REPO_URL_SCHEMA,
333 optional: true,
334 },
335 ns: {
336 type: BackupNamespace,
337 optional: true,
338 },
339 snapshot: {
340 type: String,
341 description: "Group/Snapshot path.",
342 },
343 "path": {
344 description: "Path to restore. Directories will be restored as archive files if extracted to stdout.",
345 type: String,
346 },
347 "format": {
348 type: FileRestoreFormat,
349 optional: true,
350 },
351 "zstd": {
352 type: bool,
353 description: "If true, output will be zstd compressed.",
354 optional: true,
355 default: false,
356 },
357 "base64": {
358 type: Boolean,
359 description: "If set, 'path' will be interpreted as base64 encoded.",
360 optional: true,
361 default: false,
362 },
363 target: {
364 type: String,
365 optional: true,
366 description: "Target directory path. Use '-' to write to standard output.",
367 },
368 keyfile: {
369 schema: KEYFILE_SCHEMA,
370 optional: true,
371 },
372 "keyfd": {
373 schema: KEYFD_SCHEMA,
374 optional: true,
375 },
376 "crypt-mode": {
377 type: CryptMode,
378 optional: true,
379 },
380 verbose: {
381 type: Boolean,
382 description: "Print verbose information",
383 optional: true,
384 default: false,
385 },
386 "driver": {
387 type: BlockDriverType,
388 optional: true,
389 },
390 }
391 }
392 )]
393 /// Restore files from a backup snapshot.
394 #[allow(clippy::too_many_arguments)]
395 async fn extract(
396 ns: Option<BackupNamespace>,
397 snapshot: String,
398 path: String,
399 base64: bool,
400 target: Option<String>,
401 format: Option<FileRestoreFormat>,
402 zstd: bool,
403 param: Value,
404 ) -> Result<(), Error> {
405 let repo = extract_repository_from_value(&param)?;
406 let namespace = ns.unwrap_or_default();
407 let snapshot: BackupDir = snapshot.parse()?;
408 let orig_path = path;
409 let path = parse_path(orig_path.clone(), base64)?;
410
411 let target = match target {
412 Some(target) if target == "-" => None,
413 Some(target) => Some(PathBuf::from(target)),
414 None => Some(std::env::current_dir()?),
415 };
416
417 let keyfile = keyfile_path(&param);
418 let crypto = crypto_parameters_keep_fd(&param)?;
419 let crypt_config = match crypto.enc_key {
420 None => None,
421 Some(ref key) => {
422 let (key, _, _) =
423 decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
424 log::error!("{}", format_key_source(&key.source, "encryption"));
425 err
426 })?;
427 Some(Arc::new(CryptConfig::new(key)?))
428 }
429 };
430
431 let client = connect(&repo)?;
432 let client = BackupReader::start(
433 &client,
434 crypt_config.clone(),
435 repo.store(),
436 &namespace,
437 &snapshot,
438 true,
439 )
440 .await?;
441 let (manifest, _) = client.download_manifest().await?;
442
443 match path {
444 ExtractPath::Pxar(archive_name, path) => {
445 let file_info = manifest.lookup_file_info(&archive_name)?;
446 let index = client
447 .download_dynamic_index(&manifest, &archive_name)
448 .await?;
449 let most_used = index.find_most_used_chunks(8);
450 let chunk_reader = RemoteChunkReader::new(
451 client.clone(),
452 crypt_config,
453 file_info.chunk_crypt_mode(),
454 most_used,
455 );
456 let reader = BufferedDynamicReader::new(index, chunk_reader);
457
458 let archive_size = reader.archive_size();
459 let reader = LocalDynamicReadAt::new(reader);
460 let decoder = Accessor::new(reader, archive_size).await?;
461 extract_to_target(decoder, &path, target, format, zstd).await?;
462 }
463 ExtractPath::VM(file, path) => {
464 let details = SnapRestoreDetails {
465 manifest,
466 repo,
467 namespace,
468 snapshot,
469 keyfile,
470 };
471 let driver: Option<BlockDriverType> = match param.get("driver") {
472 Some(drv) => Some(serde::Deserialize::deserialize(drv)?),
473 None => None,
474 };
475
476 if let Some(mut target) = target {
477 let reader = data_extract(
478 driver,
479 details,
480 file,
481 path.clone(),
482 Some(FileRestoreFormat::Pxar),
483 false,
484 )
485 .await?;
486 let decoder = Decoder::from_tokio(reader).await?;
487 extract_sub_dir_seq(&target, decoder).await?;
488
489 // we extracted a .pxarexclude-cli file auto-generated by the VM when encoding the
490 // archive, this file is of no use for the user, so try to remove it
491 target.push(".pxarexclude-cli");
492 std::fs::remove_file(target).map_err(|err| {
493 format_err!("unable to remove temporary .pxarexclude-cli file - {err}")
494 })?;
495 } else {
496 let mut reader =
497 data_extract(driver, details, file, path.clone(), format, zstd).await?;
498 tokio::io::copy(&mut reader, &mut tokio::io::stdout()).await?;
499 }
500 }
501 _ => {
502 bail!("cannot extract '{orig_path}'");
503 }
504 }
505
506 Ok(())
507 }
508
509 async fn extract_to_target<T>(
510 decoder: Accessor<T>,
511 path: &[u8],
512 target: Option<PathBuf>,
513 format: Option<FileRestoreFormat>,
514 zstd: bool,
515 ) -> Result<(), Error>
516 where
517 T: pxar::accessor::ReadAt + Clone + Send + Sync + Unpin + 'static,
518 {
519 let path = if path.is_empty() { b"/" } else { path };
520 let path = OsStr::from_bytes(path);
521
522 if let Some(target) = target {
523 extract_sub_dir(target, decoder, path).await?;
524 } else {
525 extract_archive(decoder, path, format, zstd).await?;
526 }
527
528 Ok(())
529 }
530
531 async fn extract_archive<T>(
532 decoder: Accessor<T>,
533 path: &OsStr,
534 format: Option<FileRestoreFormat>,
535 zstd: bool,
536 ) -> Result<(), Error>
537 where
538 T: pxar::accessor::ReadAt + Clone + Send + Sync + Unpin + 'static,
539 {
540 let path = path.to_owned();
541 let root = decoder.open_root().await?;
542 let file = root
543 .lookup(&path)
544 .await?
545 .ok_or_else(|| format_err!("error opening '{:?}'", &path))?;
546
547 let (mut writer, mut reader) = tokio::io::duplex(1024 * 1024);
548 if file.is_regular_file() {
549 match format {
550 Some(FileRestoreFormat::Plain) | None => {}
551 _ => bail!("cannot extract single files as archive"),
552 }
553 tokio::spawn(
554 async move { tokio::io::copy(&mut file.contents().await?, &mut writer).await },
555 );
556 } else {
557 match format {
558 Some(FileRestoreFormat::Pxar) => {
559 bail!("pxar target not supported for pxar source");
560 }
561 Some(FileRestoreFormat::Plain) => {
562 bail!("plain file not supported for non-regular files");
563 }
564 Some(FileRestoreFormat::Zip) | None => {
565 tokio::spawn(create_zip(writer, decoder, path));
566 }
567 Some(FileRestoreFormat::Tar) => {
568 tokio::spawn(create_tar(writer, decoder, path));
569 }
570 }
571 }
572
573 if zstd {
574 let mut zstdstream = ZstdEncoder::new(tokio_util::io::ReaderStream::new(reader))?;
575 let mut stdout = tokio::io::stdout();
576 while let Some(buf) = zstdstream.next().await {
577 let buf = buf?;
578 stdout.write_all(&buf).await?;
579 }
580 } else {
581 tokio::io::copy(&mut reader, &mut tokio::io::stdout()).await?;
582 }
583
584 Ok(())
585 }
586
587 fn main() {
588 let loglevel = match qemu_helper::debug_mode() {
589 true => "debug",
590 false => "info",
591 };
592 init_cli_logger("PBS_LOG", loglevel);
593
594 let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
595 .arg_param(&["snapshot", "path"])
596 .completion_cb("repository", complete_repository)
597 .completion_cb("snapshot", complete_group_or_snapshot);
598
599 let restore_cmd_def = CliCommand::new(&API_METHOD_EXTRACT)
600 .arg_param(&["snapshot", "path", "target"])
601 .completion_cb("repository", complete_repository)
602 .completion_cb("snapshot", complete_group_or_snapshot)
603 .completion_cb("target", complete_file_name);
604
605 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS);
606 let stop_cmd_def = CliCommand::new(&API_METHOD_STOP)
607 .arg_param(&["name"])
608 .completion_cb("name", complete_block_driver_ids);
609
610 let cmd_def = CliCommandMap::new()
611 .insert("list", list_cmd_def)
612 .insert("extract", restore_cmd_def)
613 .insert("status", status_cmd_def)
614 .insert("stop", stop_cmd_def);
615
616 let rpcenv = CliEnvironment::new();
617 run_cli_command(
618 cmd_def,
619 rpcenv,
620 Some(|future| proxmox_async::runtime::main(future)),
621 );
622 }
623
624 /// Returns a runtime dir owned by the current user.
625 /// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
626 /// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
627 pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
628 let uid = nix::unistd::Uid::current();
629 let mut path: std::path::PathBuf = pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
630 path.push(uid.to_string());
631 create_run_dir()?;
632 std::fs::create_dir_all(&path)?;
633 Ok(path)
634 }
635
636 /// FIXME: proxmox-file-restore should not depend on this!
637 fn create_run_dir() -> Result<(), Error> {
638 let backup_user = backup_user()?;
639 let opts = CreateOptions::new()
640 .owner(backup_user.uid)
641 .group(backup_user.gid);
642 let _: bool = create_path(pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), None, Some(opts))?;
643 Ok(())
644 }
645
646 /// Return User info for the 'backup' user (``getpwnam_r(3)``)
647 pub fn backup_user() -> Result<nix::unistd::User, Error> {
648 nix::unistd::User::from_name(pbs_buildcfg::BACKUP_USER_NAME)?.ok_or_else(|| {
649 format_err!(
650 "Unable to lookup '{}' user.",
651 pbs_buildcfg::BACKUP_USER_NAME
652 )
653 })
654 }