]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
bump version to 0.3.0-1
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::io::{self, Write, Seek, SeekFrom};
4 use std::os::unix::fs::OpenOptionsExt;
5 use std::os::unix::io::RawFd;
6 use std::path::{Path, PathBuf};
7 use std::pin::Pin;
8 use std::sync::{Arc, Mutex};
9 use std::task::{Context, Poll};
10
11 use anyhow::{bail, format_err, Error};
12 use chrono::{Local, DateTime, Utc, TimeZone};
13 use futures::future::FutureExt;
14 use futures::select;
15 use futures::stream::{StreamExt, TryStreamExt};
16 use nix::unistd::{fork, ForkResult, pipe};
17 use serde_json::{json, Value};
18 use tokio::signal::unix::{signal, SignalKind};
19 use tokio::sync::mpsc;
20 use xdg::BaseDirectories;
21
22 use pathpatterns::{MatchEntry, MatchType, PatternFlag};
23 use proxmox::{sortable, identity};
24 use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
25 use proxmox::sys::linux::tty;
26 use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
27 use proxmox::api::schema::*;
28 use proxmox::api::cli::*;
29 use proxmox::api::api;
30
31 use proxmox_backup::tools;
32 use proxmox_backup::api2::types::*;
33 use proxmox_backup::client::*;
34 use proxmox_backup::pxar::catalog::*;
35 use proxmox_backup::backup::{
36 archive_type,
37 encrypt_key_with_passphrase,
38 load_and_decrypt_key,
39 store_key_config,
40 verify_chunk_size,
41 ArchiveType,
42 AsyncReadChunk,
43 BackupDir,
44 BackupGroup,
45 BackupManifest,
46 BufferedDynamicReader,
47 CatalogReader,
48 CatalogWriter,
49 CATALOG_NAME,
50 ChunkStream,
51 CryptConfig,
52 DataBlob,
53 DynamicIndexReader,
54 FixedChunkStream,
55 FixedIndexReader,
56 IndexFile,
57 KeyConfig,
58 MANIFEST_BLOB_NAME,
59 Shell,
60 };
61
62 const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
63 const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
64
65
66 const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
67 .format(&BACKUP_REPO_URL)
68 .max_length(256)
69 .schema();
70
71 const KEYFILE_SCHEMA: Schema = StringSchema::new(
72 "Path to encryption key. All data will be encrypted using this key.")
73 .schema();
74
75 const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
76 "Chunk size in KB. Must be a power of 2.")
77 .minimum(64)
78 .maximum(4096)
79 .default(4096)
80 .schema();
81
82 fn get_default_repository() -> Option<String> {
83 std::env::var("PBS_REPOSITORY").ok()
84 }
85
86 fn extract_repository_from_value(
87 param: &Value,
88 ) -> Result<BackupRepository, Error> {
89
90 let repo_url = param["repository"]
91 .as_str()
92 .map(String::from)
93 .or_else(get_default_repository)
94 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
95
96 let repo: BackupRepository = repo_url.parse()?;
97
98 Ok(repo)
99 }
100
101 fn extract_repository_from_map(
102 param: &HashMap<String, String>,
103 ) -> Option<BackupRepository> {
104
105 param.get("repository")
106 .map(String::from)
107 .or_else(get_default_repository)
108 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
109 }
110
111 fn record_repository(repo: &BackupRepository) {
112
113 let base = match BaseDirectories::with_prefix("proxmox-backup") {
114 Ok(v) => v,
115 _ => return,
116 };
117
118 // usually $HOME/.cache/proxmox-backup/repo-list
119 let path = match base.place_cache_file("repo-list") {
120 Ok(v) => v,
121 _ => return,
122 };
123
124 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
125
126 let repo = repo.to_string();
127
128 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
129
130 let mut map = serde_json::map::Map::new();
131
132 loop {
133 let mut max_used = 0;
134 let mut max_repo = None;
135 for (repo, count) in data.as_object().unwrap() {
136 if map.contains_key(repo) { continue; }
137 if let Some(count) = count.as_i64() {
138 if count > max_used {
139 max_used = count;
140 max_repo = Some(repo);
141 }
142 }
143 }
144 if let Some(repo) = max_repo {
145 map.insert(repo.to_owned(), json!(max_used));
146 } else {
147 break;
148 }
149 if map.len() > 10 { // store max. 10 repos
150 break;
151 }
152 }
153
154 let new_data = json!(map);
155
156 let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
157 }
158
159 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
160
161 let mut result = vec![];
162
163 let base = match BaseDirectories::with_prefix("proxmox-backup") {
164 Ok(v) => v,
165 _ => return result,
166 };
167
168 // usually $HOME/.cache/proxmox-backup/repo-list
169 let path = match base.place_cache_file("repo-list") {
170 Ok(v) => v,
171 _ => return result,
172 };
173
174 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
175
176 if let Some(map) = data.as_object() {
177 for (repo, _count) in map {
178 result.push(repo.to_owned());
179 }
180 }
181
182 result
183 }
184
185 fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
186
187 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
188
189 use std::env::VarError::*;
190 let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
191 Ok(p) => Some(p),
192 Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
193 Err(NotPresent) => None,
194 };
195
196 let options = HttpClientOptions::new()
197 .prefix(Some("proxmox-backup".to_string()))
198 .password(password)
199 .interactive(true)
200 .fingerprint(fingerprint)
201 .fingerprint_cache(true)
202 .ticket_cache(true);
203
204 HttpClient::new(server, userid, options)
205 }
206
207 async fn view_task_result(
208 client: HttpClient,
209 result: Value,
210 output_format: &str,
211 ) -> Result<(), Error> {
212 let data = &result["data"];
213 if output_format == "text" {
214 if let Some(upid) = data.as_str() {
215 display_task_log(client, upid, true).await?;
216 }
217 } else {
218 format_and_print_result(&data, &output_format);
219 }
220
221 Ok(())
222 }
223
224 async fn api_datastore_list_snapshots(
225 client: &HttpClient,
226 store: &str,
227 group: Option<BackupGroup>,
228 ) -> Result<Value, Error> {
229
230 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
231
232 let mut args = json!({});
233 if let Some(group) = group {
234 args["backup-type"] = group.backup_type().into();
235 args["backup-id"] = group.backup_id().into();
236 }
237
238 let mut result = client.get(&path, Some(args)).await?;
239
240 Ok(result["data"].take())
241 }
242
243 async fn api_datastore_latest_snapshot(
244 client: &HttpClient,
245 store: &str,
246 group: BackupGroup,
247 ) -> Result<(String, String, DateTime<Utc>), Error> {
248
249 let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
250 let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
251
252 if list.is_empty() {
253 bail!("backup group {:?} does not contain any snapshots.", group.group_path());
254 }
255
256 list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
257
258 let backup_time = Utc.timestamp(list[0].backup_time, 0);
259
260 Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
261 }
262
263
264 async fn backup_directory<P: AsRef<Path>>(
265 client: &BackupWriter,
266 dir_path: P,
267 archive_name: &str,
268 chunk_size: Option<usize>,
269 device_set: Option<HashSet<u64>>,
270 verbose: bool,
271 skip_lost_and_found: bool,
272 crypt_config: Option<Arc<CryptConfig>>,
273 catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
274 exclude_pattern: Vec<MatchEntry>,
275 entries_max: usize,
276 ) -> Result<BackupStats, Error> {
277
278 let pxar_stream = PxarBackupStream::open(
279 dir_path.as_ref(),
280 device_set,
281 verbose,
282 skip_lost_and_found,
283 catalog,
284 exclude_pattern,
285 entries_max,
286 )?;
287 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
288
289 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
290
291 let stream = rx
292 .map_err(Error::from);
293
294 // spawn chunker inside a separate task so that it can run parallel
295 tokio::spawn(async move {
296 while let Some(v) = chunk_stream.next().await {
297 let _ = tx.send(v).await;
298 }
299 });
300
301 let stats = client
302 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
303 .await?;
304
305 Ok(stats)
306 }
307
308 async fn backup_image<P: AsRef<Path>>(
309 client: &BackupWriter,
310 image_path: P,
311 archive_name: &str,
312 image_size: u64,
313 chunk_size: Option<usize>,
314 _verbose: bool,
315 crypt_config: Option<Arc<CryptConfig>>,
316 ) -> Result<BackupStats, Error> {
317
318 let path = image_path.as_ref().to_owned();
319
320 let file = tokio::fs::File::open(path).await?;
321
322 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
323 .map_err(Error::from);
324
325 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
326
327 let stats = client
328 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
329 .await?;
330
331 Ok(stats)
332 }
333
334 #[api(
335 input: {
336 properties: {
337 repository: {
338 schema: REPO_URL_SCHEMA,
339 optional: true,
340 },
341 "output-format": {
342 schema: OUTPUT_FORMAT,
343 optional: true,
344 },
345 }
346 }
347 )]
348 /// List backup groups.
349 async fn list_backup_groups(param: Value) -> Result<Value, Error> {
350
351 let output_format = get_output_format(&param);
352
353 let repo = extract_repository_from_value(&param)?;
354
355 let client = connect(repo.host(), repo.user())?;
356
357 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
358
359 let mut result = client.get(&path, None).await?;
360
361 record_repository(&repo);
362
363 let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
364 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
365 let group = BackupGroup::new(item.backup_type, item.backup_id);
366 Ok(group.group_path().to_str().unwrap().to_owned())
367 };
368
369 let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
370 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
371 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
372 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
373 };
374
375 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
376 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
377 Ok(tools::format::render_backup_file_list(&item.files))
378 };
379
380 let options = default_table_format_options()
381 .sortby("backup-type", false)
382 .sortby("backup-id", false)
383 .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
384 .column(
385 ColumnConfig::new("last-backup")
386 .renderer(render_last_backup)
387 .header("last snapshot")
388 .right_align(false)
389 )
390 .column(ColumnConfig::new("backup-count"))
391 .column(ColumnConfig::new("files").renderer(render_files));
392
393 let mut data: Value = result["data"].take();
394
395 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_GROUPS;
396
397 format_and_print_result_full(&mut data, info, &output_format, &options);
398
399 Ok(Value::Null)
400 }
401
402 #[api(
403 input: {
404 properties: {
405 repository: {
406 schema: REPO_URL_SCHEMA,
407 optional: true,
408 },
409 group: {
410 type: String,
411 description: "Backup group.",
412 optional: true,
413 },
414 "output-format": {
415 schema: OUTPUT_FORMAT,
416 optional: true,
417 },
418 }
419 }
420 )]
421 /// List backup snapshots.
422 async fn list_snapshots(param: Value) -> Result<Value, Error> {
423
424 let repo = extract_repository_from_value(&param)?;
425
426 let output_format = get_output_format(&param);
427
428 let client = connect(repo.host(), repo.user())?;
429
430 let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
431 Some(path.parse()?)
432 } else {
433 None
434 };
435
436 let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
437
438 record_repository(&repo);
439
440 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
441 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
442 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
443 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
444 };
445
446 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
447 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
448 let mut filenames = Vec::new();
449 for file in &item.files {
450 filenames.push(file.filename.to_string());
451 }
452 Ok(tools::format::render_backup_file_list(&filenames[..]))
453 };
454
455 let options = default_table_format_options()
456 .sortby("backup-type", false)
457 .sortby("backup-id", false)
458 .sortby("backup-time", false)
459 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
460 .column(ColumnConfig::new("size"))
461 .column(ColumnConfig::new("files").renderer(render_files))
462 ;
463
464 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOTS;
465
466 format_and_print_result_full(&mut data, info, &output_format, &options);
467
468 Ok(Value::Null)
469 }
470
471 #[api(
472 input: {
473 properties: {
474 repository: {
475 schema: REPO_URL_SCHEMA,
476 optional: true,
477 },
478 snapshot: {
479 type: String,
480 description: "Snapshot path.",
481 },
482 }
483 }
484 )]
485 /// Forget (remove) backup snapshots.
486 async fn forget_snapshots(param: Value) -> Result<Value, Error> {
487
488 let repo = extract_repository_from_value(&param)?;
489
490 let path = tools::required_string_param(&param, "snapshot")?;
491 let snapshot: BackupDir = path.parse()?;
492
493 let mut client = connect(repo.host(), repo.user())?;
494
495 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
496
497 let result = client.delete(&path, Some(json!({
498 "backup-type": snapshot.group().backup_type(),
499 "backup-id": snapshot.group().backup_id(),
500 "backup-time": snapshot.backup_time().timestamp(),
501 }))).await?;
502
503 record_repository(&repo);
504
505 Ok(result)
506 }
507
508 #[api(
509 input: {
510 properties: {
511 repository: {
512 schema: REPO_URL_SCHEMA,
513 optional: true,
514 },
515 }
516 }
517 )]
518 /// Try to login. If successful, store ticket.
519 async fn api_login(param: Value) -> Result<Value, Error> {
520
521 let repo = extract_repository_from_value(&param)?;
522
523 let client = connect(repo.host(), repo.user())?;
524 client.login().await?;
525
526 record_repository(&repo);
527
528 Ok(Value::Null)
529 }
530
531 #[api(
532 input: {
533 properties: {
534 repository: {
535 schema: REPO_URL_SCHEMA,
536 optional: true,
537 },
538 }
539 }
540 )]
541 /// Logout (delete stored ticket).
542 fn api_logout(param: Value) -> Result<Value, Error> {
543
544 let repo = extract_repository_from_value(&param)?;
545
546 delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
547
548 Ok(Value::Null)
549 }
550
551 #[api(
552 input: {
553 properties: {
554 repository: {
555 schema: REPO_URL_SCHEMA,
556 optional: true,
557 },
558 snapshot: {
559 type: String,
560 description: "Snapshot path.",
561 },
562 }
563 }
564 )]
565 /// Dump catalog.
566 async fn dump_catalog(param: Value) -> Result<Value, Error> {
567
568 let repo = extract_repository_from_value(&param)?;
569
570 let path = tools::required_string_param(&param, "snapshot")?;
571 let snapshot: BackupDir = path.parse()?;
572
573 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
574
575 let crypt_config = match keyfile {
576 None => None,
577 Some(path) => {
578 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
579 Some(Arc::new(CryptConfig::new(key)?))
580 }
581 };
582
583 let client = connect(repo.host(), repo.user())?;
584
585 let client = BackupReader::start(
586 client,
587 crypt_config.clone(),
588 repo.store(),
589 &snapshot.group().backup_type(),
590 &snapshot.group().backup_id(),
591 snapshot.backup_time(),
592 true,
593 ).await?;
594
595 let manifest = client.download_manifest().await?;
596
597 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
598
599 let most_used = index.find_most_used_chunks(8);
600
601 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
602
603 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
604
605 let mut catalogfile = std::fs::OpenOptions::new()
606 .write(true)
607 .read(true)
608 .custom_flags(libc::O_TMPFILE)
609 .open("/tmp")?;
610
611 std::io::copy(&mut reader, &mut catalogfile)
612 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
613
614 catalogfile.seek(SeekFrom::Start(0))?;
615
616 let mut catalog_reader = CatalogReader::new(catalogfile);
617
618 catalog_reader.dump()?;
619
620 record_repository(&repo);
621
622 Ok(Value::Null)
623 }
624
625 #[api(
626 input: {
627 properties: {
628 repository: {
629 schema: REPO_URL_SCHEMA,
630 optional: true,
631 },
632 snapshot: {
633 type: String,
634 description: "Snapshot path.",
635 },
636 "output-format": {
637 schema: OUTPUT_FORMAT,
638 optional: true,
639 },
640 }
641 }
642 )]
643 /// List snapshot files.
644 async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
645
646 let repo = extract_repository_from_value(&param)?;
647
648 let path = tools::required_string_param(&param, "snapshot")?;
649 let snapshot: BackupDir = path.parse()?;
650
651 let output_format = get_output_format(&param);
652
653 let client = connect(repo.host(), repo.user())?;
654
655 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
656
657 let mut result = client.get(&path, Some(json!({
658 "backup-type": snapshot.group().backup_type(),
659 "backup-id": snapshot.group().backup_id(),
660 "backup-time": snapshot.backup_time().timestamp(),
661 }))).await?;
662
663 record_repository(&repo);
664
665 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES;
666
667 let mut data: Value = result["data"].take();
668
669 let options = default_table_format_options();
670
671 format_and_print_result_full(&mut data, info, &output_format, &options);
672
673 Ok(Value::Null)
674 }
675
676 #[api(
677 input: {
678 properties: {
679 repository: {
680 schema: REPO_URL_SCHEMA,
681 optional: true,
682 },
683 "output-format": {
684 schema: OUTPUT_FORMAT,
685 optional: true,
686 },
687 },
688 },
689 )]
690 /// Start garbage collection for a specific repository.
691 async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
692
693 let repo = extract_repository_from_value(&param)?;
694
695 let output_format = get_output_format(&param);
696
697 let mut client = connect(repo.host(), repo.user())?;
698
699 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
700
701 let result = client.post(&path, None).await?;
702
703 record_repository(&repo);
704
705 view_task_result(client, result, &output_format).await?;
706
707 Ok(Value::Null)
708 }
709
710 fn spawn_catalog_upload(
711 client: Arc<BackupWriter>,
712 crypt_config: Option<Arc<CryptConfig>>,
713 ) -> Result<
714 (
715 Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
716 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
717 ), Error>
718 {
719 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
720 let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
721 let catalog_chunk_size = 512*1024;
722 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
723
724 let catalog = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
725
726 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
727
728 tokio::spawn(async move {
729 let catalog_upload_result = client
730 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
731 .await;
732
733 if let Err(ref err) = catalog_upload_result {
734 eprintln!("catalog upload error - {}", err);
735 client.cancel();
736 }
737
738 let _ = catalog_result_tx.send(catalog_upload_result);
739 });
740
741 Ok((catalog, catalog_result_rx))
742 }
743
744 #[api(
745 input: {
746 properties: {
747 backupspec: {
748 type: Array,
749 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
750 items: {
751 schema: BACKUP_SOURCE_SCHEMA,
752 }
753 },
754 repository: {
755 schema: REPO_URL_SCHEMA,
756 optional: true,
757 },
758 "include-dev": {
759 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
760 optional: true,
761 items: {
762 type: String,
763 description: "Path to file.",
764 }
765 },
766 keyfile: {
767 schema: KEYFILE_SCHEMA,
768 optional: true,
769 },
770 "skip-lost-and-found": {
771 type: Boolean,
772 description: "Skip lost+found directory.",
773 optional: true,
774 },
775 "backup-type": {
776 schema: BACKUP_TYPE_SCHEMA,
777 optional: true,
778 },
779 "backup-id": {
780 schema: BACKUP_ID_SCHEMA,
781 optional: true,
782 },
783 "backup-time": {
784 schema: BACKUP_TIME_SCHEMA,
785 optional: true,
786 },
787 "chunk-size": {
788 schema: CHUNK_SIZE_SCHEMA,
789 optional: true,
790 },
791 "exclude": {
792 type: Array,
793 description: "List of paths or patterns for matching files to exclude.",
794 optional: true,
795 items: {
796 type: String,
797 description: "Path or match pattern.",
798 }
799 },
800 "entries-max": {
801 type: Integer,
802 description: "Max number of entries to hold in memory.",
803 optional: true,
804 default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
805 },
806 "verbose": {
807 type: Boolean,
808 description: "Verbose output.",
809 optional: true,
810 },
811 }
812 }
813 )]
814 /// Create (host) backup.
815 async fn create_backup(
816 param: Value,
817 _info: &ApiMethod,
818 _rpcenv: &mut dyn RpcEnvironment,
819 ) -> Result<Value, Error> {
820
821 let repo = extract_repository_from_value(&param)?;
822
823 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
824
825 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
826
827 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
828
829 let verbose = param["verbose"].as_bool().unwrap_or(false);
830
831 let backup_time_opt = param["backup-time"].as_i64();
832
833 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
834
835 if let Some(size) = chunk_size_opt {
836 verify_chunk_size(size)?;
837 }
838
839 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
840
841 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
842
843 let backup_type = param["backup-type"].as_str().unwrap_or("host");
844
845 let include_dev = param["include-dev"].as_array();
846
847 let entries_max = param["entries-max"].as_u64()
848 .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
849
850 let empty = Vec::new();
851 let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
852
853 let mut pattern_list = Vec::with_capacity(exclude_args.len());
854 for entry in exclude_args {
855 let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
856 pattern_list.push(
857 MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
858 .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
859 );
860 }
861
862 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
863
864 if let Some(include_dev) = include_dev {
865 if all_file_systems {
866 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
867 }
868
869 let mut set = HashSet::new();
870 for path in include_dev {
871 let path = path.as_str().unwrap();
872 let stat = nix::sys::stat::stat(path)
873 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
874 set.insert(stat.st_dev);
875 }
876 devices = Some(set);
877 }
878
879 let mut upload_list = vec![];
880
881 for backupspec in backupspec_list {
882 let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
883 let filename = &spec.config_string;
884 let target = &spec.archive_name;
885
886 use std::os::unix::fs::FileTypeExt;
887
888 let metadata = std::fs::metadata(filename)
889 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
890 let file_type = metadata.file_type();
891
892 match spec.spec_type {
893 BackupSpecificationType::PXAR => {
894 if !file_type.is_dir() {
895 bail!("got unexpected file type (expected directory)");
896 }
897 upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
898 }
899 BackupSpecificationType::IMAGE => {
900 if !(file_type.is_file() || file_type.is_block_device()) {
901 bail!("got unexpected file type (expected file or block device)");
902 }
903
904 let size = image_size(&PathBuf::from(filename))?;
905
906 if size == 0 { bail!("got zero-sized file '{}'", filename); }
907
908 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
909 }
910 BackupSpecificationType::CONFIG => {
911 if !file_type.is_file() {
912 bail!("got unexpected file type (expected regular file)");
913 }
914 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
915 }
916 BackupSpecificationType::LOGFILE => {
917 if !file_type.is_file() {
918 bail!("got unexpected file type (expected regular file)");
919 }
920 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
921 }
922 }
923 }
924
925 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
926
927 let client = connect(repo.host(), repo.user())?;
928 record_repository(&repo);
929
930 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
931
932 println!("Client name: {}", proxmox::tools::nodename());
933
934 let start_time = Local::now();
935
936 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
937
938 let (crypt_config, rsa_encrypted_key) = match keyfile {
939 None => (None, None),
940 Some(path) => {
941 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
942
943 let crypt_config = CryptConfig::new(key)?;
944
945 let path = master_pubkey_path()?;
946 if path.exists() {
947 let pem_data = file_get_contents(&path)?;
948 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
949 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
950 (Some(Arc::new(crypt_config)), Some(enc_key))
951 } else {
952 (Some(Arc::new(crypt_config)), None)
953 }
954 }
955 };
956
957 let is_encrypted = Some(crypt_config.is_some());
958
959 let client = BackupWriter::start(
960 client,
961 repo.store(),
962 backup_type,
963 &backup_id,
964 backup_time,
965 verbose,
966 ).await?;
967
968 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
969 let mut manifest = BackupManifest::new(snapshot);
970
971 let mut catalog = None;
972 let mut catalog_result_tx = None;
973
974 for (backup_type, filename, target, size) in upload_list {
975 match backup_type {
976 BackupSpecificationType::CONFIG => {
977 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
978 let stats = client
979 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
980 .await?;
981 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
982 }
983 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
984 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
985 let stats = client
986 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
987 .await?;
988 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
989 }
990 BackupSpecificationType::PXAR => {
991 // start catalog upload on first use
992 if catalog.is_none() {
993 let (cat, res) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
994 catalog = Some(cat);
995 catalog_result_tx = Some(res);
996 }
997 let catalog = catalog.as_ref().unwrap();
998
999 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
1000 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1001 let stats = backup_directory(
1002 &client,
1003 &filename,
1004 &target,
1005 chunk_size_opt,
1006 devices.clone(),
1007 verbose,
1008 skip_lost_and_found,
1009 crypt_config.clone(),
1010 catalog.clone(),
1011 pattern_list.clone(),
1012 entries_max as usize,
1013 ).await?;
1014 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
1015 catalog.lock().unwrap().end_directory()?;
1016 }
1017 BackupSpecificationType::IMAGE => {
1018 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
1019 let stats = backup_image(
1020 &client,
1021 &filename,
1022 &target,
1023 size,
1024 chunk_size_opt,
1025 verbose,
1026 crypt_config.clone(),
1027 ).await?;
1028 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
1029 }
1030 }
1031 }
1032
1033 // finalize and upload catalog
1034 if let Some(catalog) = catalog {
1035 let mutex = Arc::try_unwrap(catalog)
1036 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
1037 let mut catalog = mutex.into_inner().unwrap();
1038
1039 catalog.finish()?;
1040
1041 drop(catalog); // close upload stream
1042
1043 if let Some(catalog_result_rx) = catalog_result_tx {
1044 let stats = catalog_result_rx.await??;
1045 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
1046 }
1047 }
1048
1049 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1050 let target = "rsa-encrypted.key";
1051 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
1052 let stats = client
1053 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
1054 .await?;
1055 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
1056
1057 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1058 /*
1059 let mut buffer2 = vec![0u8; rsa.size() as usize];
1060 let pem_data = file_get_contents("master-private.pem")?;
1061 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1062 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1063 println!("TEST {} {:?}", len, buffer2);
1064 */
1065 }
1066
1067 // create manifest (index.json)
1068 let manifest = manifest.into_json();
1069
1070 println!("Upload index.json to '{:?}'", repo);
1071 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1072 client
1073 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
1074 .await?;
1075
1076 client.finish().await?;
1077
1078 let end_time = Local::now();
1079 let elapsed = end_time.signed_duration_since(start_time);
1080 println!("Duration: {}", elapsed);
1081
1082 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1083
1084 Ok(Value::Null)
1085 }
1086
1087 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1088
1089 let mut result = vec![];
1090
1091 let data: Vec<&str> = arg.splitn(2, ':').collect();
1092
1093 if data.len() != 2 {
1094 result.push(String::from("root.pxar:/"));
1095 result.push(String::from("etc.pxar:/etc"));
1096 return result;
1097 }
1098
1099 let files = tools::complete_file_name(data[1], param);
1100
1101 for file in files {
1102 result.push(format!("{}:{}", data[0], file));
1103 }
1104
1105 result
1106 }
1107
1108 async fn dump_image<W: Write>(
1109 client: Arc<BackupReader>,
1110 crypt_config: Option<Arc<CryptConfig>>,
1111 index: FixedIndexReader,
1112 mut writer: W,
1113 verbose: bool,
1114 ) -> Result<(), Error> {
1115
1116 let most_used = index.find_most_used_chunks(8);
1117
1118 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1119
1120 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1121 // and thus slows down reading. Instead, directly use RemoteChunkReader
1122 let mut per = 0;
1123 let mut bytes = 0;
1124 let start_time = std::time::Instant::now();
1125
1126 for pos in 0..index.index_count() {
1127 let digest = index.index_digest(pos).unwrap();
1128 let raw_data = chunk_reader.read_chunk(&digest).await?;
1129 writer.write_all(&raw_data)?;
1130 bytes += raw_data.len();
1131 if verbose {
1132 let next_per = ((pos+1)*100)/index.index_count();
1133 if per != next_per {
1134 eprintln!("progress {}% (read {} bytes, duration {} sec)",
1135 next_per, bytes, start_time.elapsed().as_secs());
1136 per = next_per;
1137 }
1138 }
1139 }
1140
1141 let end_time = std::time::Instant::now();
1142 let elapsed = end_time.duration_since(start_time);
1143 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1144 bytes,
1145 elapsed.as_secs_f64(),
1146 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1147 );
1148
1149
1150 Ok(())
1151 }
1152
1153 fn parse_archive_type(name: &str) -> (String, ArchiveType) {
1154 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
1155 (name.into(), archive_type(name).unwrap())
1156 } else if name.ends_with(".pxar") {
1157 (format!("{}.didx", name), ArchiveType::DynamicIndex)
1158 } else if name.ends_with(".img") {
1159 (format!("{}.fidx", name), ArchiveType::FixedIndex)
1160 } else {
1161 (format!("{}.blob", name), ArchiveType::Blob)
1162 }
1163 }
1164
1165 #[api(
1166 input: {
1167 properties: {
1168 repository: {
1169 schema: REPO_URL_SCHEMA,
1170 optional: true,
1171 },
1172 snapshot: {
1173 type: String,
1174 description: "Group/Snapshot path.",
1175 },
1176 "archive-name": {
1177 description: "Backup archive name.",
1178 type: String,
1179 },
1180 target: {
1181 type: String,
1182 description: r###"Target directory path. Use '-' to write to standard output.
1183
1184 We do not extraxt '.pxar' archives when writing to standard output.
1185
1186 "###
1187 },
1188 "allow-existing-dirs": {
1189 type: Boolean,
1190 description: "Do not fail if directories already exists.",
1191 optional: true,
1192 },
1193 keyfile: {
1194 schema: KEYFILE_SCHEMA,
1195 optional: true,
1196 },
1197 }
1198 }
1199 )]
1200 /// Restore backup repository.
1201 async fn restore(param: Value) -> Result<Value, Error> {
1202 let repo = extract_repository_from_value(&param)?;
1203
1204 let verbose = param["verbose"].as_bool().unwrap_or(false);
1205
1206 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
1207
1208 let archive_name = tools::required_string_param(&param, "archive-name")?;
1209
1210 let client = connect(repo.host(), repo.user())?;
1211
1212 record_repository(&repo);
1213
1214 let path = tools::required_string_param(&param, "snapshot")?;
1215
1216 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1217 let group: BackupGroup = path.parse()?;
1218 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1219 } else {
1220 let snapshot: BackupDir = path.parse()?;
1221 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1222 };
1223
1224 let target = tools::required_string_param(&param, "target")?;
1225 let target = if target == "-" { None } else { Some(target) };
1226
1227 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1228
1229 let crypt_config = match keyfile {
1230 None => None,
1231 Some(path) => {
1232 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1233 Some(Arc::new(CryptConfig::new(key)?))
1234 }
1235 };
1236
1237 let client = BackupReader::start(
1238 client,
1239 crypt_config.clone(),
1240 repo.store(),
1241 &backup_type,
1242 &backup_id,
1243 backup_time,
1244 true,
1245 ).await?;
1246
1247 let manifest = client.download_manifest().await?;
1248
1249 let (archive_name, archive_type) = parse_archive_type(archive_name);
1250
1251 if archive_name == MANIFEST_BLOB_NAME {
1252 let backup_index_data = manifest.into_json().to_string();
1253 if let Some(target) = target {
1254 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
1255 } else {
1256 let stdout = std::io::stdout();
1257 let mut writer = stdout.lock();
1258 writer.write_all(backup_index_data.as_bytes())
1259 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1260 }
1261
1262 } else if archive_type == ArchiveType::Blob {
1263
1264 let mut reader = client.download_blob(&manifest, &archive_name).await?;
1265
1266 if let Some(target) = target {
1267 let mut writer = std::fs::OpenOptions::new()
1268 .write(true)
1269 .create(true)
1270 .create_new(true)
1271 .open(target)
1272 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1273 std::io::copy(&mut reader, &mut writer)?;
1274 } else {
1275 let stdout = std::io::stdout();
1276 let mut writer = stdout.lock();
1277 std::io::copy(&mut reader, &mut writer)
1278 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1279 }
1280
1281 } else if archive_type == ArchiveType::DynamicIndex {
1282
1283 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
1284
1285 let most_used = index.find_most_used_chunks(8);
1286
1287 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1288
1289 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1290
1291 if let Some(target) = target {
1292 proxmox_backup::pxar::extract_archive(
1293 pxar::decoder::Decoder::from_std(reader)?,
1294 Path::new(target),
1295 &[],
1296 proxmox_backup::pxar::Flags::DEFAULT,
1297 allow_existing_dirs,
1298 |path| {
1299 if verbose {
1300 println!("{:?}", path);
1301 }
1302 },
1303 )
1304 .map_err(|err| format_err!("error extracting archive - {}", err))?;
1305 } else {
1306 let mut writer = std::fs::OpenOptions::new()
1307 .write(true)
1308 .open("/dev/stdout")
1309 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
1310
1311 std::io::copy(&mut reader, &mut writer)
1312 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1313 }
1314 } else if archive_type == ArchiveType::FixedIndex {
1315
1316 let index = client.download_fixed_index(&manifest, &archive_name).await?;
1317
1318 let mut writer = if let Some(target) = target {
1319 std::fs::OpenOptions::new()
1320 .write(true)
1321 .create(true)
1322 .create_new(true)
1323 .open(target)
1324 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
1325 } else {
1326 std::fs::OpenOptions::new()
1327 .write(true)
1328 .open("/dev/stdout")
1329 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1330 };
1331
1332 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
1333 }
1334
1335 Ok(Value::Null)
1336 }
1337
1338 #[api(
1339 input: {
1340 properties: {
1341 repository: {
1342 schema: REPO_URL_SCHEMA,
1343 optional: true,
1344 },
1345 snapshot: {
1346 type: String,
1347 description: "Group/Snapshot path.",
1348 },
1349 logfile: {
1350 type: String,
1351 description: "The path to the log file you want to upload.",
1352 },
1353 keyfile: {
1354 schema: KEYFILE_SCHEMA,
1355 optional: true,
1356 },
1357 }
1358 }
1359 )]
1360 /// Upload backup log file.
1361 async fn upload_log(param: Value) -> Result<Value, Error> {
1362
1363 let logfile = tools::required_string_param(&param, "logfile")?;
1364 let repo = extract_repository_from_value(&param)?;
1365
1366 let snapshot = tools::required_string_param(&param, "snapshot")?;
1367 let snapshot: BackupDir = snapshot.parse()?;
1368
1369 let mut client = connect(repo.host(), repo.user())?;
1370
1371 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1372
1373 let crypt_config = match keyfile {
1374 None => None,
1375 Some(path) => {
1376 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1377 let crypt_config = CryptConfig::new(key)?;
1378 Some(Arc::new(crypt_config))
1379 }
1380 };
1381
1382 let data = file_get_contents(logfile)?;
1383
1384 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1385
1386 let raw_data = blob.into_inner();
1387
1388 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1389
1390 let args = json!({
1391 "backup-type": snapshot.group().backup_type(),
1392 "backup-id": snapshot.group().backup_id(),
1393 "backup-time": snapshot.backup_time().timestamp(),
1394 });
1395
1396 let body = hyper::Body::from(raw_data);
1397
1398 client.upload("application/octet-stream", body, &path, Some(args)).await
1399 }
1400
1401 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1402 &ApiHandler::Async(&prune),
1403 &ObjectSchema::new(
1404 "Prune a backup repository.",
1405 &proxmox_backup::add_common_prune_prameters!([
1406 ("dry-run", true, &BooleanSchema::new(
1407 "Just show what prune would do, but do not delete anything.")
1408 .schema()),
1409 ("group", false, &StringSchema::new("Backup group.").schema()),
1410 ], [
1411 ("output-format", true, &OUTPUT_FORMAT),
1412 (
1413 "quiet",
1414 true,
1415 &BooleanSchema::new("Minimal output - only show removals.")
1416 .schema()
1417 ),
1418 ("repository", true, &REPO_URL_SCHEMA),
1419 ])
1420 )
1421 );
1422
1423 fn prune<'a>(
1424 param: Value,
1425 _info: &ApiMethod,
1426 _rpcenv: &'a mut dyn RpcEnvironment,
1427 ) -> proxmox::api::ApiFuture<'a> {
1428 async move {
1429 prune_async(param).await
1430 }.boxed()
1431 }
1432
1433 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1434 let repo = extract_repository_from_value(&param)?;
1435
1436 let mut client = connect(repo.host(), repo.user())?;
1437
1438 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
1439
1440 let group = tools::required_string_param(&param, "group")?;
1441 let group: BackupGroup = group.parse()?;
1442
1443 let output_format = get_output_format(&param);
1444
1445 let quiet = param["quiet"].as_bool().unwrap_or(false);
1446
1447 param.as_object_mut().unwrap().remove("repository");
1448 param.as_object_mut().unwrap().remove("group");
1449 param.as_object_mut().unwrap().remove("output-format");
1450 param.as_object_mut().unwrap().remove("quiet");
1451
1452 param["backup-type"] = group.backup_type().into();
1453 param["backup-id"] = group.backup_id().into();
1454
1455 let mut result = client.post(&path, Some(param)).await?;
1456
1457 record_repository(&repo);
1458
1459 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1460 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1461 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1462 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1463 };
1464
1465 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1466 Ok(match v.as_bool() {
1467 Some(true) => "keep",
1468 Some(false) => "remove",
1469 None => "unknown",
1470 }.to_string())
1471 };
1472
1473 let options = default_table_format_options()
1474 .sortby("backup-type", false)
1475 .sortby("backup-id", false)
1476 .sortby("backup-time", false)
1477 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
1478 .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
1479 .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
1480 ;
1481
1482 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1483
1484 let mut data = result["data"].take();
1485
1486 if quiet {
1487 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1488 item["keep"].as_bool() == Some(false)
1489 }).map(|v| v.clone()).collect();
1490 data = list.into();
1491 }
1492
1493 format_and_print_result_full(&mut data, info, &output_format, &options);
1494
1495 Ok(Value::Null)
1496 }
1497
1498 #[api(
1499 input: {
1500 properties: {
1501 repository: {
1502 schema: REPO_URL_SCHEMA,
1503 optional: true,
1504 },
1505 "output-format": {
1506 schema: OUTPUT_FORMAT,
1507 optional: true,
1508 },
1509 }
1510 }
1511 )]
1512 /// Get repository status.
1513 async fn status(param: Value) -> Result<Value, Error> {
1514
1515 let repo = extract_repository_from_value(&param)?;
1516
1517 let output_format = get_output_format(&param);
1518
1519 let client = connect(repo.host(), repo.user())?;
1520
1521 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1522
1523 let mut result = client.get(&path, None).await?;
1524 let mut data = result["data"].take();
1525
1526 record_repository(&repo);
1527
1528 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1529 let v = v.as_u64().unwrap();
1530 let total = record["total"].as_u64().unwrap();
1531 let roundup = total/200;
1532 let per = ((v+roundup)*100)/total;
1533 let info = format!(" ({} %)", per);
1534 Ok(format!("{} {:>8}", v, info))
1535 };
1536
1537 let options = default_table_format_options()
1538 .noheader(true)
1539 .column(ColumnConfig::new("total").renderer(render_total_percentage))
1540 .column(ColumnConfig::new("used").renderer(render_total_percentage))
1541 .column(ColumnConfig::new("avail").renderer(render_total_percentage));
1542
1543 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
1544
1545 format_and_print_result_full(&mut data, schema, &output_format, &options);
1546
1547 Ok(Value::Null)
1548 }
1549
1550 // like get, but simply ignore errors and return Null instead
1551 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1552
1553 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
1554 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
1555
1556 let options = HttpClientOptions::new()
1557 .prefix(Some("proxmox-backup".to_string()))
1558 .password(password)
1559 .interactive(false)
1560 .fingerprint(fingerprint)
1561 .fingerprint_cache(true)
1562 .ticket_cache(true);
1563
1564 let client = match HttpClient::new(repo.host(), repo.user(), options) {
1565 Ok(v) => v,
1566 _ => return Value::Null,
1567 };
1568
1569 let mut resp = match client.get(url, None).await {
1570 Ok(v) => v,
1571 _ => return Value::Null,
1572 };
1573
1574 if let Some(map) = resp.as_object_mut() {
1575 if let Some(data) = map.remove("data") {
1576 return data;
1577 }
1578 }
1579 Value::Null
1580 }
1581
1582 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1583 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1584 }
1585
1586 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1587
1588 let mut result = vec![];
1589
1590 let repo = match extract_repository_from_map(param) {
1591 Some(v) => v,
1592 _ => return result,
1593 };
1594
1595 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1596
1597 let data = try_get(&repo, &path).await;
1598
1599 if let Some(list) = data.as_array() {
1600 for item in list {
1601 if let (Some(backup_id), Some(backup_type)) =
1602 (item["backup-id"].as_str(), item["backup-type"].as_str())
1603 {
1604 result.push(format!("{}/{}", backup_type, backup_id));
1605 }
1606 }
1607 }
1608
1609 result
1610 }
1611
1612 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1613 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1614 }
1615
1616 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1617
1618 if arg.matches('/').count() < 2 {
1619 let groups = complete_backup_group_do(param).await;
1620 let mut result = vec![];
1621 for group in groups {
1622 result.push(group.to_string());
1623 result.push(format!("{}/", group));
1624 }
1625 return result;
1626 }
1627
1628 complete_backup_snapshot_do(param).await
1629 }
1630
1631 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1632 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1633 }
1634
1635 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1636
1637 let mut result = vec![];
1638
1639 let repo = match extract_repository_from_map(param) {
1640 Some(v) => v,
1641 _ => return result,
1642 };
1643
1644 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1645
1646 let data = try_get(&repo, &path).await;
1647
1648 if let Some(list) = data.as_array() {
1649 for item in list {
1650 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1651 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1652 {
1653 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1654 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1655 }
1656 }
1657 }
1658
1659 result
1660 }
1661
1662 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1663 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1664 }
1665
1666 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1667
1668 let mut result = vec![];
1669
1670 let repo = match extract_repository_from_map(param) {
1671 Some(v) => v,
1672 _ => return result,
1673 };
1674
1675 let snapshot: BackupDir = match param.get("snapshot") {
1676 Some(path) => {
1677 match path.parse() {
1678 Ok(v) => v,
1679 _ => return result,
1680 }
1681 }
1682 _ => return result,
1683 };
1684
1685 let query = tools::json_object_to_query(json!({
1686 "backup-type": snapshot.group().backup_type(),
1687 "backup-id": snapshot.group().backup_id(),
1688 "backup-time": snapshot.backup_time().timestamp(),
1689 })).unwrap();
1690
1691 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1692
1693 let data = try_get(&repo, &path).await;
1694
1695 if let Some(list) = data.as_array() {
1696 for item in list {
1697 if let Some(filename) = item["filename"].as_str() {
1698 result.push(filename.to_owned());
1699 }
1700 }
1701 }
1702
1703 result
1704 }
1705
1706 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1707 complete_server_file_name(arg, param)
1708 .iter()
1709 .map(|v| tools::format::strip_server_file_expenstion(&v))
1710 .collect()
1711 }
1712
1713 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1714 complete_server_file_name(arg, param)
1715 .iter()
1716 .filter_map(|v| {
1717 let name = tools::format::strip_server_file_expenstion(&v);
1718 if name.ends_with(".pxar") {
1719 Some(name)
1720 } else {
1721 None
1722 }
1723 })
1724 .collect()
1725 }
1726
1727 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1728
1729 let mut result = vec![];
1730
1731 let mut size = 64;
1732 loop {
1733 result.push(size.to_string());
1734 size *= 2;
1735 if size > 4096 { break; }
1736 }
1737
1738 result
1739 }
1740
1741 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1742
1743 // fixme: implement other input methods
1744
1745 use std::env::VarError::*;
1746 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
1747 Ok(p) => return Ok(p.as_bytes().to_vec()),
1748 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1749 Err(NotPresent) => {
1750 // Try another method
1751 }
1752 }
1753
1754 // If we're on a TTY, query the user for a password
1755 if tty::stdin_isatty() {
1756 return Ok(tty::read_password("Encryption Key Password: ")?);
1757 }
1758
1759 bail!("no password input mechanism available");
1760 }
1761
1762 fn key_create(
1763 param: Value,
1764 _info: &ApiMethod,
1765 _rpcenv: &mut dyn RpcEnvironment,
1766 ) -> Result<Value, Error> {
1767
1768 let path = tools::required_string_param(&param, "path")?;
1769 let path = PathBuf::from(path);
1770
1771 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1772
1773 let key = proxmox::sys::linux::random_data(32)?;
1774
1775 if kdf == "scrypt" {
1776 // always read passphrase from tty
1777 if !tty::stdin_isatty() {
1778 bail!("unable to read passphrase - no tty");
1779 }
1780
1781 let password = tty::read_and_verify_password("Encryption Key Password: ")?;
1782
1783 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1784
1785 store_key_config(&path, false, key_config)?;
1786
1787 Ok(Value::Null)
1788 } else if kdf == "none" {
1789 let created = Local.timestamp(Local::now().timestamp(), 0);
1790
1791 store_key_config(&path, false, KeyConfig {
1792 kdf: None,
1793 created,
1794 modified: created,
1795 data: key,
1796 })?;
1797
1798 Ok(Value::Null)
1799 } else {
1800 unreachable!();
1801 }
1802 }
1803
1804 fn master_pubkey_path() -> Result<PathBuf, Error> {
1805 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1806
1807 // usually $HOME/.config/proxmox-backup/master-public.pem
1808 let path = base.place_config_file("master-public.pem")?;
1809
1810 Ok(path)
1811 }
1812
1813 fn key_import_master_pubkey(
1814 param: Value,
1815 _info: &ApiMethod,
1816 _rpcenv: &mut dyn RpcEnvironment,
1817 ) -> Result<Value, Error> {
1818
1819 let path = tools::required_string_param(&param, "path")?;
1820 let path = PathBuf::from(path);
1821
1822 let pem_data = file_get_contents(&path)?;
1823
1824 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1825 bail!("Unable to decode PEM data - {}", err);
1826 }
1827
1828 let target_path = master_pubkey_path()?;
1829
1830 replace_file(&target_path, &pem_data, CreateOptions::new())?;
1831
1832 println!("Imported public master key to {:?}", target_path);
1833
1834 Ok(Value::Null)
1835 }
1836
1837 fn key_create_master_key(
1838 _param: Value,
1839 _info: &ApiMethod,
1840 _rpcenv: &mut dyn RpcEnvironment,
1841 ) -> Result<Value, Error> {
1842
1843 // we need a TTY to query the new password
1844 if !tty::stdin_isatty() {
1845 bail!("unable to create master key - no tty");
1846 }
1847
1848 let rsa = openssl::rsa::Rsa::generate(4096)?;
1849 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1850
1851
1852 let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
1853
1854 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1855 let filename_pub = "master-public.pem";
1856 println!("Writing public master key to {}", filename_pub);
1857 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
1858
1859 let cipher = openssl::symm::Cipher::aes_256_cbc();
1860 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
1861
1862 let filename_priv = "master-private.pem";
1863 println!("Writing private master key to {}", filename_priv);
1864 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
1865
1866 Ok(Value::Null)
1867 }
1868
1869 fn key_change_passphrase(
1870 param: Value,
1871 _info: &ApiMethod,
1872 _rpcenv: &mut dyn RpcEnvironment,
1873 ) -> Result<Value, Error> {
1874
1875 let path = tools::required_string_param(&param, "path")?;
1876 let path = PathBuf::from(path);
1877
1878 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1879
1880 // we need a TTY to query the new password
1881 if !tty::stdin_isatty() {
1882 bail!("unable to change passphrase - no tty");
1883 }
1884
1885 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1886
1887 if kdf == "scrypt" {
1888
1889 let password = tty::read_and_verify_password("New Password: ")?;
1890
1891 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
1892 new_key_config.created = created; // keep original value
1893
1894 store_key_config(&path, true, new_key_config)?;
1895
1896 Ok(Value::Null)
1897 } else if kdf == "none" {
1898 let modified = Local.timestamp(Local::now().timestamp(), 0);
1899
1900 store_key_config(&path, true, KeyConfig {
1901 kdf: None,
1902 created, // keep original value
1903 modified,
1904 data: key.to_vec(),
1905 })?;
1906
1907 Ok(Value::Null)
1908 } else {
1909 unreachable!();
1910 }
1911 }
1912
1913 fn key_mgmt_cli() -> CliCommandMap {
1914
1915 const KDF_SCHEMA: Schema =
1916 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1917 .format(&ApiStringFormat::Enum(&[
1918 EnumEntry::new("scrypt", "SCrypt"),
1919 EnumEntry::new("none", "Do not encrypt the key")]))
1920 .default("scrypt")
1921 .schema();
1922
1923 #[sortable]
1924 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1925 &ApiHandler::Sync(&key_create),
1926 &ObjectSchema::new(
1927 "Create a new encryption key.",
1928 &sorted!([
1929 ("path", false, &StringSchema::new("File system path.").schema()),
1930 ("kdf", true, &KDF_SCHEMA),
1931 ]),
1932 )
1933 );
1934
1935 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1936 .arg_param(&["path"])
1937 .completion_cb("path", tools::complete_file_name);
1938
1939 #[sortable]
1940 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1941 &ApiHandler::Sync(&key_change_passphrase),
1942 &ObjectSchema::new(
1943 "Change the passphrase required to decrypt the key.",
1944 &sorted!([
1945 ("path", false, &StringSchema::new("File system path.").schema()),
1946 ("kdf", true, &KDF_SCHEMA),
1947 ]),
1948 )
1949 );
1950
1951 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1952 .arg_param(&["path"])
1953 .completion_cb("path", tools::complete_file_name);
1954
1955 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1956 &ApiHandler::Sync(&key_create_master_key),
1957 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1958 );
1959
1960 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1961
1962 #[sortable]
1963 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1964 &ApiHandler::Sync(&key_import_master_pubkey),
1965 &ObjectSchema::new(
1966 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
1967 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
1968 )
1969 );
1970
1971 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1972 .arg_param(&["path"])
1973 .completion_cb("path", tools::complete_file_name);
1974
1975 CliCommandMap::new()
1976 .insert("create", key_create_cmd_def)
1977 .insert("create-master-key", key_create_master_key_cmd_def)
1978 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
1979 .insert("change-passphrase", key_change_passphrase_cmd_def)
1980 }
1981
1982 fn mount(
1983 param: Value,
1984 _info: &ApiMethod,
1985 _rpcenv: &mut dyn RpcEnvironment,
1986 ) -> Result<Value, Error> {
1987 let verbose = param["verbose"].as_bool().unwrap_or(false);
1988 if verbose {
1989 // This will stay in foreground with debug output enabled as None is
1990 // passed for the RawFd.
1991 return proxmox_backup::tools::runtime::main(mount_do(param, None));
1992 }
1993
1994 // Process should be deamonized.
1995 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1996 let pipe = pipe()?;
1997 match fork() {
1998 Ok(ForkResult::Parent { .. }) => {
1999 nix::unistd::close(pipe.1).unwrap();
2000 // Blocks the parent process until we are ready to go in the child
2001 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
2002 Ok(Value::Null)
2003 }
2004 Ok(ForkResult::Child) => {
2005 nix::unistd::close(pipe.0).unwrap();
2006 nix::unistd::setsid().unwrap();
2007 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
2008 }
2009 Err(_) => bail!("failed to daemonize process"),
2010 }
2011 }
2012
2013 use proxmox_backup::client::RemoteChunkReader;
2014 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
2015 /// async use!
2016 ///
2017 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
2018 /// so that we can properly access it from multiple threads simultaneously while not issuing
2019 /// duplicate simultaneous reads over http.
2020 struct BufferedDynamicReadAt {
2021 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
2022 }
2023
2024 impl BufferedDynamicReadAt {
2025 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
2026 Self {
2027 inner: Mutex::new(inner),
2028 }
2029 }
2030 }
2031
2032 impl pxar::accessor::ReadAt for BufferedDynamicReadAt {
2033 fn poll_read_at(
2034 self: Pin<&Self>,
2035 _cx: &mut Context,
2036 buf: &mut [u8],
2037 offset: u64,
2038 ) -> Poll<io::Result<usize>> {
2039 use std::io::Read;
2040 tokio::task::block_in_place(move || {
2041 let mut reader = self.inner.lock().unwrap();
2042 reader.seek(SeekFrom::Start(offset))?;
2043 Poll::Ready(Ok(reader.read(buf)?))
2044 })
2045 }
2046 }
2047
2048 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
2049 let repo = extract_repository_from_value(&param)?;
2050 let archive_name = tools::required_string_param(&param, "archive-name")?;
2051 let target = tools::required_string_param(&param, "target")?;
2052 let client = connect(repo.host(), repo.user())?;
2053
2054 record_repository(&repo);
2055
2056 let path = tools::required_string_param(&param, "snapshot")?;
2057 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2058 let group: BackupGroup = path.parse()?;
2059 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2060 } else {
2061 let snapshot: BackupDir = path.parse()?;
2062 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2063 };
2064
2065 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2066 let crypt_config = match keyfile {
2067 None => None,
2068 Some(path) => {
2069 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2070 Some(Arc::new(CryptConfig::new(key)?))
2071 }
2072 };
2073
2074 let server_archive_name = if archive_name.ends_with(".pxar") {
2075 format!("{}.didx", archive_name)
2076 } else {
2077 bail!("Can only mount pxar archives.");
2078 };
2079
2080 let client = BackupReader::start(
2081 client,
2082 crypt_config.clone(),
2083 repo.store(),
2084 &backup_type,
2085 &backup_id,
2086 backup_time,
2087 true,
2088 ).await?;
2089
2090 let manifest = client.download_manifest().await?;
2091
2092 if server_archive_name.ends_with(".didx") {
2093 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2094 let most_used = index.find_most_used_chunks(8);
2095 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2096 let reader = BufferedDynamicReader::new(index, chunk_reader);
2097 let archive_size = reader.archive_size();
2098 let reader: proxmox_backup::pxar::fuse::Reader =
2099 Arc::new(BufferedDynamicReadAt::new(reader));
2100 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
2101 let options = OsStr::new("ro,default_permissions");
2102
2103 let session = proxmox_backup::pxar::fuse::Session::mount(
2104 decoder,
2105 &options,
2106 false,
2107 Path::new(target),
2108 )
2109 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
2110
2111 if let Some(pipe) = pipe {
2112 nix::unistd::chdir(Path::new("/")).unwrap();
2113 // Finish creation of daemon by redirecting filedescriptors.
2114 let nullfd = nix::fcntl::open(
2115 "/dev/null",
2116 nix::fcntl::OFlag::O_RDWR,
2117 nix::sys::stat::Mode::empty(),
2118 ).unwrap();
2119 nix::unistd::dup2(nullfd, 0).unwrap();
2120 nix::unistd::dup2(nullfd, 1).unwrap();
2121 nix::unistd::dup2(nullfd, 2).unwrap();
2122 if nullfd > 2 {
2123 nix::unistd::close(nullfd).unwrap();
2124 }
2125 // Signal the parent process that we are done with the setup and it can
2126 // terminate.
2127 nix::unistd::write(pipe, &[0u8])?;
2128 nix::unistd::close(pipe).unwrap();
2129 }
2130
2131 let mut interrupt = signal(SignalKind::interrupt())?;
2132 select! {
2133 res = session.fuse() => res?,
2134 _ = interrupt.recv().fuse() => {
2135 // exit on interrupted
2136 }
2137 }
2138 } else {
2139 bail!("unknown archive file extension (expected .pxar)");
2140 }
2141
2142 Ok(Value::Null)
2143 }
2144
2145 #[api(
2146 input: {
2147 properties: {
2148 "snapshot": {
2149 type: String,
2150 description: "Group/Snapshot path.",
2151 },
2152 "archive-name": {
2153 type: String,
2154 description: "Backup archive name.",
2155 },
2156 "repository": {
2157 optional: true,
2158 schema: REPO_URL_SCHEMA,
2159 },
2160 "keyfile": {
2161 optional: true,
2162 type: String,
2163 description: "Path to encryption key.",
2164 },
2165 },
2166 },
2167 )]
2168 /// Shell to interactively inspect and restore snapshots.
2169 async fn catalog_shell(param: Value) -> Result<(), Error> {
2170 let repo = extract_repository_from_value(&param)?;
2171 let client = connect(repo.host(), repo.user())?;
2172 let path = tools::required_string_param(&param, "snapshot")?;
2173 let archive_name = tools::required_string_param(&param, "archive-name")?;
2174
2175 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2176 let group: BackupGroup = path.parse()?;
2177 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2178 } else {
2179 let snapshot: BackupDir = path.parse()?;
2180 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2181 };
2182
2183 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
2184 let crypt_config = match keyfile {
2185 None => None,
2186 Some(path) => {
2187 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2188 Some(Arc::new(CryptConfig::new(key)?))
2189 }
2190 };
2191
2192 let server_archive_name = if archive_name.ends_with(".pxar") {
2193 format!("{}.didx", archive_name)
2194 } else {
2195 bail!("Can only mount pxar archives.");
2196 };
2197
2198 let client = BackupReader::start(
2199 client,
2200 crypt_config.clone(),
2201 repo.store(),
2202 &backup_type,
2203 &backup_id,
2204 backup_time,
2205 true,
2206 ).await?;
2207
2208 let mut tmpfile = std::fs::OpenOptions::new()
2209 .write(true)
2210 .read(true)
2211 .custom_flags(libc::O_TMPFILE)
2212 .open("/tmp")?;
2213
2214 let manifest = client.download_manifest().await?;
2215
2216 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2217 let most_used = index.find_most_used_chunks(8);
2218 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2219 let reader = BufferedDynamicReader::new(index, chunk_reader);
2220 let archive_size = reader.archive_size();
2221 let reader: proxmox_backup::pxar::fuse::Reader =
2222 Arc::new(BufferedDynamicReadAt::new(reader));
2223 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
2224
2225 client.download(CATALOG_NAME, &mut tmpfile).await?;
2226 let index = DynamicIndexReader::new(tmpfile)
2227 .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
2228
2229 // Note: do not use values stored in index (not trusted) - instead, computed them again
2230 let (csum, size) = index.compute_csum();
2231 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2232
2233 let most_used = index.find_most_used_chunks(8);
2234 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2235 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2236 let mut catalogfile = std::fs::OpenOptions::new()
2237 .write(true)
2238 .read(true)
2239 .custom_flags(libc::O_TMPFILE)
2240 .open("/tmp")?;
2241
2242 std::io::copy(&mut reader, &mut catalogfile)
2243 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
2244
2245 catalogfile.seek(SeekFrom::Start(0))?;
2246 let catalog_reader = CatalogReader::new(catalogfile);
2247 let state = Shell::new(
2248 catalog_reader,
2249 &server_archive_name,
2250 decoder,
2251 ).await?;
2252
2253 println!("Starting interactive shell");
2254 state.shell().await?;
2255
2256 record_repository(&repo);
2257
2258 Ok(())
2259 }
2260
2261 fn catalog_mgmt_cli() -> CliCommandMap {
2262 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
2263 .arg_param(&["snapshot", "archive-name"])
2264 .completion_cb("repository", complete_repository)
2265 .completion_cb("archive-name", complete_pxar_archive_name)
2266 .completion_cb("snapshot", complete_group_or_snapshot);
2267
2268 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2269 .arg_param(&["snapshot"])
2270 .completion_cb("repository", complete_repository)
2271 .completion_cb("snapshot", complete_backup_snapshot);
2272
2273 CliCommandMap::new()
2274 .insert("dump", catalog_dump_cmd_def)
2275 .insert("shell", catalog_shell_cmd_def)
2276 }
2277
2278 #[api(
2279 input: {
2280 properties: {
2281 repository: {
2282 schema: REPO_URL_SCHEMA,
2283 optional: true,
2284 },
2285 limit: {
2286 description: "The maximal number of tasks to list.",
2287 type: Integer,
2288 optional: true,
2289 minimum: 1,
2290 maximum: 1000,
2291 default: 50,
2292 },
2293 "output-format": {
2294 schema: OUTPUT_FORMAT,
2295 optional: true,
2296 },
2297 all: {
2298 type: Boolean,
2299 description: "Also list stopped tasks.",
2300 optional: true,
2301 },
2302 }
2303 }
2304 )]
2305 /// List running server tasks for this repo user
2306 async fn task_list(param: Value) -> Result<Value, Error> {
2307
2308 let output_format = get_output_format(&param);
2309
2310 let repo = extract_repository_from_value(&param)?;
2311 let client = connect(repo.host(), repo.user())?;
2312
2313 let limit = param["limit"].as_u64().unwrap_or(50) as usize;
2314 let running = !param["all"].as_bool().unwrap_or(false);
2315
2316 let args = json!({
2317 "running": running,
2318 "start": 0,
2319 "limit": limit,
2320 "userfilter": repo.user(),
2321 "store": repo.store(),
2322 });
2323
2324 let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
2325 let mut data = result["data"].take();
2326
2327 let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
2328
2329 let options = default_table_format_options()
2330 .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
2331 .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
2332 .column(ColumnConfig::new("upid"))
2333 .column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
2334
2335 format_and_print_result_full(&mut data, schema, &output_format, &options);
2336
2337 Ok(Value::Null)
2338 }
2339
2340 #[api(
2341 input: {
2342 properties: {
2343 repository: {
2344 schema: REPO_URL_SCHEMA,
2345 optional: true,
2346 },
2347 upid: {
2348 schema: UPID_SCHEMA,
2349 },
2350 }
2351 }
2352 )]
2353 /// Display the task log.
2354 async fn task_log(param: Value) -> Result<Value, Error> {
2355
2356 let repo = extract_repository_from_value(&param)?;
2357 let upid = tools::required_string_param(&param, "upid")?;
2358
2359 let client = connect(repo.host(), repo.user())?;
2360
2361 display_task_log(client, upid, true).await?;
2362
2363 Ok(Value::Null)
2364 }
2365
2366 #[api(
2367 input: {
2368 properties: {
2369 repository: {
2370 schema: REPO_URL_SCHEMA,
2371 optional: true,
2372 },
2373 upid: {
2374 schema: UPID_SCHEMA,
2375 },
2376 }
2377 }
2378 )]
2379 /// Try to stop a specific task.
2380 async fn task_stop(param: Value) -> Result<Value, Error> {
2381
2382 let repo = extract_repository_from_value(&param)?;
2383 let upid_str = tools::required_string_param(&param, "upid")?;
2384
2385 let mut client = connect(repo.host(), repo.user())?;
2386
2387 let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
2388 let _ = client.delete(&path, None).await?;
2389
2390 Ok(Value::Null)
2391 }
2392
2393 fn task_mgmt_cli() -> CliCommandMap {
2394
2395 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2396 .completion_cb("repository", complete_repository);
2397
2398 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2399 .arg_param(&["upid"]);
2400
2401 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2402 .arg_param(&["upid"]);
2403
2404 CliCommandMap::new()
2405 .insert("log", task_log_cmd_def)
2406 .insert("list", task_list_cmd_def)
2407 .insert("stop", task_stop_cmd_def)
2408 }
2409
2410 fn main() {
2411
2412 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2413 .arg_param(&["backupspec"])
2414 .completion_cb("repository", complete_repository)
2415 .completion_cb("backupspec", complete_backup_source)
2416 .completion_cb("keyfile", tools::complete_file_name)
2417 .completion_cb("chunk-size", complete_chunk_size);
2418
2419 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2420 .arg_param(&["snapshot", "logfile"])
2421 .completion_cb("snapshot", complete_backup_snapshot)
2422 .completion_cb("logfile", tools::complete_file_name)
2423 .completion_cb("keyfile", tools::complete_file_name)
2424 .completion_cb("repository", complete_repository);
2425
2426 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2427 .completion_cb("repository", complete_repository);
2428
2429 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2430 .arg_param(&["group"])
2431 .completion_cb("group", complete_backup_group)
2432 .completion_cb("repository", complete_repository);
2433
2434 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2435 .arg_param(&["snapshot"])
2436 .completion_cb("repository", complete_repository)
2437 .completion_cb("snapshot", complete_backup_snapshot);
2438
2439 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2440 .completion_cb("repository", complete_repository);
2441
2442 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2443 .arg_param(&["snapshot", "archive-name", "target"])
2444 .completion_cb("repository", complete_repository)
2445 .completion_cb("snapshot", complete_group_or_snapshot)
2446 .completion_cb("archive-name", complete_archive_name)
2447 .completion_cb("target", tools::complete_file_name);
2448
2449 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2450 .arg_param(&["snapshot"])
2451 .completion_cb("repository", complete_repository)
2452 .completion_cb("snapshot", complete_backup_snapshot);
2453
2454 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2455 .arg_param(&["group"])
2456 .completion_cb("group", complete_backup_group)
2457 .completion_cb("repository", complete_repository);
2458
2459 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2460 .completion_cb("repository", complete_repository);
2461
2462 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2463 .completion_cb("repository", complete_repository);
2464
2465 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2466 .completion_cb("repository", complete_repository);
2467
2468 #[sortable]
2469 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2470 &ApiHandler::Sync(&mount),
2471 &ObjectSchema::new(
2472 "Mount pxar archive.",
2473 &sorted!([
2474 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2475 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2476 ("target", false, &StringSchema::new("Target directory path.").schema()),
2477 ("repository", true, &REPO_URL_SCHEMA),
2478 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2479 ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
2480 ]),
2481 )
2482 );
2483
2484 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2485 .arg_param(&["snapshot", "archive-name", "target"])
2486 .completion_cb("repository", complete_repository)
2487 .completion_cb("snapshot", complete_group_or_snapshot)
2488 .completion_cb("archive-name", complete_pxar_archive_name)
2489 .completion_cb("target", tools::complete_file_name);
2490
2491
2492 let cmd_def = CliCommandMap::new()
2493 .insert("backup", backup_cmd_def)
2494 .insert("upload-log", upload_log_cmd_def)
2495 .insert("forget", forget_cmd_def)
2496 .insert("garbage-collect", garbage_collect_cmd_def)
2497 .insert("list", list_cmd_def)
2498 .insert("login", login_cmd_def)
2499 .insert("logout", logout_cmd_def)
2500 .insert("prune", prune_cmd_def)
2501 .insert("restore", restore_cmd_def)
2502 .insert("snapshots", snapshots_cmd_def)
2503 .insert("files", files_cmd_def)
2504 .insert("status", status_cmd_def)
2505 .insert("key", key_mgmt_cli())
2506 .insert("mount", mount_cmd_def)
2507 .insert("catalog", catalog_mgmt_cli())
2508 .insert("task", task_mgmt_cli());
2509
2510 let rpcenv = CliEnvironment::new();
2511 run_cli_command(cmd_def, rpcenv, Some(|future| {
2512 proxmox_backup::tools::runtime::main(future)
2513 }));
2514 }