]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
update backup api for incremental backup
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::io::{self, Write, Seek, SeekFrom};
4 use std::os::unix::fs::OpenOptionsExt;
5 use std::os::unix::io::RawFd;
6 use std::path::{Path, PathBuf};
7 use std::pin::Pin;
8 use std::sync::{Arc, Mutex};
9 use std::task::Context;
10
11 use anyhow::{bail, format_err, Error};
12 use chrono::{Local, DateTime, Utc, TimeZone};
13 use futures::future::FutureExt;
14 use futures::select;
15 use futures::stream::{StreamExt, TryStreamExt};
16 use nix::unistd::{fork, ForkResult, pipe};
17 use serde_json::{json, Value};
18 use tokio::signal::unix::{signal, SignalKind};
19 use tokio::sync::mpsc;
20 use xdg::BaseDirectories;
21
22 use pathpatterns::{MatchEntry, MatchType, PatternFlag};
23 use proxmox::{sortable, identity};
24 use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
25 use proxmox::sys::linux::tty;
26 use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
27 use proxmox::api::schema::*;
28 use proxmox::api::cli::*;
29 use proxmox::api::api;
30 use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
31
32 use proxmox_backup::tools;
33 use proxmox_backup::api2::types::*;
34 use proxmox_backup::client::*;
35 use proxmox_backup::pxar::catalog::*;
36 use proxmox_backup::backup::{
37 archive_type,
38 encrypt_key_with_passphrase,
39 load_and_decrypt_key,
40 store_key_config,
41 verify_chunk_size,
42 ArchiveType,
43 AsyncReadChunk,
44 BackupDir,
45 BackupGroup,
46 BackupManifest,
47 BufferedDynamicReader,
48 CatalogReader,
49 CatalogWriter,
50 CATALOG_NAME,
51 ChunkStream,
52 CryptConfig,
53 DataBlob,
54 DynamicIndexReader,
55 FixedChunkStream,
56 FixedIndexReader,
57 IndexFile,
58 KeyConfig,
59 MANIFEST_BLOB_NAME,
60 Shell,
61 };
62
63 const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
64 const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
65
66
67 const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
68 .format(&BACKUP_REPO_URL)
69 .max_length(256)
70 .schema();
71
72 const KEYFILE_SCHEMA: Schema = StringSchema::new(
73 "Path to encryption key. All data will be encrypted using this key.")
74 .schema();
75
76 const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
77 "Chunk size in KB. Must be a power of 2.")
78 .minimum(64)
79 .maximum(4096)
80 .default(4096)
81 .schema();
82
83 fn get_default_repository() -> Option<String> {
84 std::env::var("PBS_REPOSITORY").ok()
85 }
86
87 fn extract_repository_from_value(
88 param: &Value,
89 ) -> Result<BackupRepository, Error> {
90
91 let repo_url = param["repository"]
92 .as_str()
93 .map(String::from)
94 .or_else(get_default_repository)
95 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
96
97 let repo: BackupRepository = repo_url.parse()?;
98
99 Ok(repo)
100 }
101
102 fn extract_repository_from_map(
103 param: &HashMap<String, String>,
104 ) -> Option<BackupRepository> {
105
106 param.get("repository")
107 .map(String::from)
108 .or_else(get_default_repository)
109 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
110 }
111
112 fn record_repository(repo: &BackupRepository) {
113
114 let base = match BaseDirectories::with_prefix("proxmox-backup") {
115 Ok(v) => v,
116 _ => return,
117 };
118
119 // usually $HOME/.cache/proxmox-backup/repo-list
120 let path = match base.place_cache_file("repo-list") {
121 Ok(v) => v,
122 _ => return,
123 };
124
125 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
126
127 let repo = repo.to_string();
128
129 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
130
131 let mut map = serde_json::map::Map::new();
132
133 loop {
134 let mut max_used = 0;
135 let mut max_repo = None;
136 for (repo, count) in data.as_object().unwrap() {
137 if map.contains_key(repo) { continue; }
138 if let Some(count) = count.as_i64() {
139 if count > max_used {
140 max_used = count;
141 max_repo = Some(repo);
142 }
143 }
144 }
145 if let Some(repo) = max_repo {
146 map.insert(repo.to_owned(), json!(max_used));
147 } else {
148 break;
149 }
150 if map.len() > 10 { // store max. 10 repos
151 break;
152 }
153 }
154
155 let new_data = json!(map);
156
157 let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
158 }
159
160 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
161
162 let mut result = vec![];
163
164 let base = match BaseDirectories::with_prefix("proxmox-backup") {
165 Ok(v) => v,
166 _ => return result,
167 };
168
169 // usually $HOME/.cache/proxmox-backup/repo-list
170 let path = match base.place_cache_file("repo-list") {
171 Ok(v) => v,
172 _ => return result,
173 };
174
175 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
176
177 if let Some(map) = data.as_object() {
178 for (repo, _count) in map {
179 result.push(repo.to_owned());
180 }
181 }
182
183 result
184 }
185
186 fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
187
188 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
189
190 use std::env::VarError::*;
191 let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
192 Ok(p) => Some(p),
193 Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
194 Err(NotPresent) => None,
195 };
196
197 let options = HttpClientOptions::new()
198 .prefix(Some("proxmox-backup".to_string()))
199 .password(password)
200 .interactive(true)
201 .fingerprint(fingerprint)
202 .fingerprint_cache(true)
203 .ticket_cache(true);
204
205 HttpClient::new(server, userid, options)
206 }
207
208 async fn view_task_result(
209 client: HttpClient,
210 result: Value,
211 output_format: &str,
212 ) -> Result<(), Error> {
213 let data = &result["data"];
214 if output_format == "text" {
215 if let Some(upid) = data.as_str() {
216 display_task_log(client, upid, true).await?;
217 }
218 } else {
219 format_and_print_result(&data, &output_format);
220 }
221
222 Ok(())
223 }
224
225 async fn api_datastore_list_snapshots(
226 client: &HttpClient,
227 store: &str,
228 group: Option<BackupGroup>,
229 ) -> Result<Value, Error> {
230
231 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
232
233 let mut args = json!({});
234 if let Some(group) = group {
235 args["backup-type"] = group.backup_type().into();
236 args["backup-id"] = group.backup_id().into();
237 }
238
239 let mut result = client.get(&path, Some(args)).await?;
240
241 Ok(result["data"].take())
242 }
243
244 async fn api_datastore_latest_snapshot(
245 client: &HttpClient,
246 store: &str,
247 group: BackupGroup,
248 ) -> Result<(String, String, DateTime<Utc>), Error> {
249
250 let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
251 let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
252
253 if list.is_empty() {
254 bail!("backup group {:?} does not contain any snapshots.", group.group_path());
255 }
256
257 list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
258
259 let backup_time = Utc.timestamp(list[0].backup_time, 0);
260
261 Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
262 }
263
264 async fn backup_directory<P: AsRef<Path>>(
265 client: &BackupWriter,
266 previous_manifest: Option<Arc<BackupManifest>>,
267 dir_path: P,
268 archive_name: &str,
269 chunk_size: Option<usize>,
270 device_set: Option<HashSet<u64>>,
271 verbose: bool,
272 skip_lost_and_found: bool,
273 catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
274 exclude_pattern: Vec<MatchEntry>,
275 entries_max: usize,
276 ) -> Result<BackupStats, Error> {
277
278 let pxar_stream = PxarBackupStream::open(
279 dir_path.as_ref(),
280 device_set,
281 verbose,
282 skip_lost_and_found,
283 catalog,
284 exclude_pattern,
285 entries_max,
286 )?;
287 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
288
289 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
290
291 let stream = rx
292 .map_err(Error::from);
293
294 // spawn chunker inside a separate task so that it can run parallel
295 tokio::spawn(async move {
296 while let Some(v) = chunk_stream.next().await {
297 let _ = tx.send(v).await;
298 }
299 });
300
301 let stats = client
302 .upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
303 .await?;
304
305 Ok(stats)
306 }
307
308 async fn backup_image<P: AsRef<Path>>(
309 client: &BackupWriter,
310 previous_manifest: Option<Arc<BackupManifest>>,
311 image_path: P,
312 archive_name: &str,
313 image_size: u64,
314 chunk_size: Option<usize>,
315 _verbose: bool,
316 ) -> Result<BackupStats, Error> {
317
318 let path = image_path.as_ref().to_owned();
319
320 let file = tokio::fs::File::open(path).await?;
321
322 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
323 .map_err(Error::from);
324
325 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
326
327 let stats = client
328 .upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
329 .await?;
330
331 Ok(stats)
332 }
333
334 #[api(
335 input: {
336 properties: {
337 repository: {
338 schema: REPO_URL_SCHEMA,
339 optional: true,
340 },
341 "output-format": {
342 schema: OUTPUT_FORMAT,
343 optional: true,
344 },
345 }
346 }
347 )]
348 /// List backup groups.
349 async fn list_backup_groups(param: Value) -> Result<Value, Error> {
350
351 let output_format = get_output_format(&param);
352
353 let repo = extract_repository_from_value(&param)?;
354
355 let client = connect(repo.host(), repo.user())?;
356
357 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
358
359 let mut result = client.get(&path, None).await?;
360
361 record_repository(&repo);
362
363 let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
364 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
365 let group = BackupGroup::new(item.backup_type, item.backup_id);
366 Ok(group.group_path().to_str().unwrap().to_owned())
367 };
368
369 let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
370 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
371 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
372 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
373 };
374
375 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
376 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
377 Ok(tools::format::render_backup_file_list(&item.files))
378 };
379
380 let options = default_table_format_options()
381 .sortby("backup-type", false)
382 .sortby("backup-id", false)
383 .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
384 .column(
385 ColumnConfig::new("last-backup")
386 .renderer(render_last_backup)
387 .header("last snapshot")
388 .right_align(false)
389 )
390 .column(ColumnConfig::new("backup-count"))
391 .column(ColumnConfig::new("files").renderer(render_files));
392
393 let mut data: Value = result["data"].take();
394
395 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_GROUPS;
396
397 format_and_print_result_full(&mut data, info, &output_format, &options);
398
399 Ok(Value::Null)
400 }
401
402 #[api(
403 input: {
404 properties: {
405 repository: {
406 schema: REPO_URL_SCHEMA,
407 optional: true,
408 },
409 group: {
410 type: String,
411 description: "Backup group.",
412 optional: true,
413 },
414 "output-format": {
415 schema: OUTPUT_FORMAT,
416 optional: true,
417 },
418 }
419 }
420 )]
421 /// List backup snapshots.
422 async fn list_snapshots(param: Value) -> Result<Value, Error> {
423
424 let repo = extract_repository_from_value(&param)?;
425
426 let output_format = get_output_format(&param);
427
428 let client = connect(repo.host(), repo.user())?;
429
430 let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
431 Some(path.parse()?)
432 } else {
433 None
434 };
435
436 let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
437
438 record_repository(&repo);
439
440 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
441 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
442 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
443 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
444 };
445
446 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
447 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
448 let mut filenames = Vec::new();
449 for file in &item.files {
450 filenames.push(file.filename.to_string());
451 }
452 Ok(tools::format::render_backup_file_list(&filenames[..]))
453 };
454
455 let options = default_table_format_options()
456 .sortby("backup-type", false)
457 .sortby("backup-id", false)
458 .sortby("backup-time", false)
459 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
460 .column(ColumnConfig::new("size"))
461 .column(ColumnConfig::new("files").renderer(render_files))
462 ;
463
464 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOTS;
465
466 format_and_print_result_full(&mut data, info, &output_format, &options);
467
468 Ok(Value::Null)
469 }
470
471 #[api(
472 input: {
473 properties: {
474 repository: {
475 schema: REPO_URL_SCHEMA,
476 optional: true,
477 },
478 snapshot: {
479 type: String,
480 description: "Snapshot path.",
481 },
482 }
483 }
484 )]
485 /// Forget (remove) backup snapshots.
486 async fn forget_snapshots(param: Value) -> Result<Value, Error> {
487
488 let repo = extract_repository_from_value(&param)?;
489
490 let path = tools::required_string_param(&param, "snapshot")?;
491 let snapshot: BackupDir = path.parse()?;
492
493 let mut client = connect(repo.host(), repo.user())?;
494
495 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
496
497 let result = client.delete(&path, Some(json!({
498 "backup-type": snapshot.group().backup_type(),
499 "backup-id": snapshot.group().backup_id(),
500 "backup-time": snapshot.backup_time().timestamp(),
501 }))).await?;
502
503 record_repository(&repo);
504
505 Ok(result)
506 }
507
508 #[api(
509 input: {
510 properties: {
511 repository: {
512 schema: REPO_URL_SCHEMA,
513 optional: true,
514 },
515 }
516 }
517 )]
518 /// Try to login. If successful, store ticket.
519 async fn api_login(param: Value) -> Result<Value, Error> {
520
521 let repo = extract_repository_from_value(&param)?;
522
523 let client = connect(repo.host(), repo.user())?;
524 client.login().await?;
525
526 record_repository(&repo);
527
528 Ok(Value::Null)
529 }
530
531 #[api(
532 input: {
533 properties: {
534 repository: {
535 schema: REPO_URL_SCHEMA,
536 optional: true,
537 },
538 }
539 }
540 )]
541 /// Logout (delete stored ticket).
542 fn api_logout(param: Value) -> Result<Value, Error> {
543
544 let repo = extract_repository_from_value(&param)?;
545
546 delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
547
548 Ok(Value::Null)
549 }
550
551 #[api(
552 input: {
553 properties: {
554 repository: {
555 schema: REPO_URL_SCHEMA,
556 optional: true,
557 },
558 snapshot: {
559 type: String,
560 description: "Snapshot path.",
561 },
562 }
563 }
564 )]
565 /// Dump catalog.
566 async fn dump_catalog(param: Value) -> Result<Value, Error> {
567
568 let repo = extract_repository_from_value(&param)?;
569
570 let path = tools::required_string_param(&param, "snapshot")?;
571 let snapshot: BackupDir = path.parse()?;
572
573 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
574
575 let crypt_config = match keyfile {
576 None => None,
577 Some(path) => {
578 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
579 Some(Arc::new(CryptConfig::new(key)?))
580 }
581 };
582
583 let client = connect(repo.host(), repo.user())?;
584
585 let client = BackupReader::start(
586 client,
587 crypt_config.clone(),
588 repo.store(),
589 &snapshot.group().backup_type(),
590 &snapshot.group().backup_id(),
591 snapshot.backup_time(),
592 true,
593 ).await?;
594
595 let manifest = client.download_manifest().await?;
596
597 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
598
599 let most_used = index.find_most_used_chunks(8);
600
601 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
602
603 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
604
605 let mut catalogfile = std::fs::OpenOptions::new()
606 .write(true)
607 .read(true)
608 .custom_flags(libc::O_TMPFILE)
609 .open("/tmp")?;
610
611 std::io::copy(&mut reader, &mut catalogfile)
612 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
613
614 catalogfile.seek(SeekFrom::Start(0))?;
615
616 let mut catalog_reader = CatalogReader::new(catalogfile);
617
618 catalog_reader.dump()?;
619
620 record_repository(&repo);
621
622 Ok(Value::Null)
623 }
624
625 #[api(
626 input: {
627 properties: {
628 repository: {
629 schema: REPO_URL_SCHEMA,
630 optional: true,
631 },
632 snapshot: {
633 type: String,
634 description: "Snapshot path.",
635 },
636 "output-format": {
637 schema: OUTPUT_FORMAT,
638 optional: true,
639 },
640 }
641 }
642 )]
643 /// List snapshot files.
644 async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
645
646 let repo = extract_repository_from_value(&param)?;
647
648 let path = tools::required_string_param(&param, "snapshot")?;
649 let snapshot: BackupDir = path.parse()?;
650
651 let output_format = get_output_format(&param);
652
653 let client = connect(repo.host(), repo.user())?;
654
655 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
656
657 let mut result = client.get(&path, Some(json!({
658 "backup-type": snapshot.group().backup_type(),
659 "backup-id": snapshot.group().backup_id(),
660 "backup-time": snapshot.backup_time().timestamp(),
661 }))).await?;
662
663 record_repository(&repo);
664
665 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES;
666
667 let mut data: Value = result["data"].take();
668
669 let options = default_table_format_options();
670
671 format_and_print_result_full(&mut data, info, &output_format, &options);
672
673 Ok(Value::Null)
674 }
675
676 #[api(
677 input: {
678 properties: {
679 repository: {
680 schema: REPO_URL_SCHEMA,
681 optional: true,
682 },
683 "output-format": {
684 schema: OUTPUT_FORMAT,
685 optional: true,
686 },
687 },
688 },
689 )]
690 /// Start garbage collection for a specific repository.
691 async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
692
693 let repo = extract_repository_from_value(&param)?;
694
695 let output_format = get_output_format(&param);
696
697 let mut client = connect(repo.host(), repo.user())?;
698
699 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
700
701 let result = client.post(&path, None).await?;
702
703 record_repository(&repo);
704
705 view_task_result(client, result, &output_format).await?;
706
707 Ok(Value::Null)
708 }
709
710 fn spawn_catalog_upload(
711 client: Arc<BackupWriter>
712 ) -> Result<
713 (
714 Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
715 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
716 ), Error>
717 {
718 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
719 let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
720 let catalog_chunk_size = 512*1024;
721 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
722
723 let catalog = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
724
725 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
726
727 tokio::spawn(async move {
728 let catalog_upload_result = client
729 .upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
730 .await;
731
732 if let Err(ref err) = catalog_upload_result {
733 eprintln!("catalog upload error - {}", err);
734 client.cancel();
735 }
736
737 let _ = catalog_result_tx.send(catalog_upload_result);
738 });
739
740 Ok((catalog, catalog_result_rx))
741 }
742
743 #[api(
744 input: {
745 properties: {
746 backupspec: {
747 type: Array,
748 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
749 items: {
750 schema: BACKUP_SOURCE_SCHEMA,
751 }
752 },
753 repository: {
754 schema: REPO_URL_SCHEMA,
755 optional: true,
756 },
757 "include-dev": {
758 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
759 optional: true,
760 items: {
761 type: String,
762 description: "Path to file.",
763 }
764 },
765 keyfile: {
766 schema: KEYFILE_SCHEMA,
767 optional: true,
768 },
769 "skip-lost-and-found": {
770 type: Boolean,
771 description: "Skip lost+found directory.",
772 optional: true,
773 },
774 "backup-type": {
775 schema: BACKUP_TYPE_SCHEMA,
776 optional: true,
777 },
778 "backup-id": {
779 schema: BACKUP_ID_SCHEMA,
780 optional: true,
781 },
782 "backup-time": {
783 schema: BACKUP_TIME_SCHEMA,
784 optional: true,
785 },
786 "chunk-size": {
787 schema: CHUNK_SIZE_SCHEMA,
788 optional: true,
789 },
790 "exclude": {
791 type: Array,
792 description: "List of paths or patterns for matching files to exclude.",
793 optional: true,
794 items: {
795 type: String,
796 description: "Path or match pattern.",
797 }
798 },
799 "entries-max": {
800 type: Integer,
801 description: "Max number of entries to hold in memory.",
802 optional: true,
803 default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
804 },
805 "verbose": {
806 type: Boolean,
807 description: "Verbose output.",
808 optional: true,
809 },
810 }
811 }
812 )]
813 /// Create (host) backup.
814 async fn create_backup(
815 param: Value,
816 _info: &ApiMethod,
817 _rpcenv: &mut dyn RpcEnvironment,
818 ) -> Result<Value, Error> {
819
820 let repo = extract_repository_from_value(&param)?;
821
822 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
823
824 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
825
826 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
827
828 let verbose = param["verbose"].as_bool().unwrap_or(false);
829
830 let backup_time_opt = param["backup-time"].as_i64();
831
832 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
833
834 if let Some(size) = chunk_size_opt {
835 verify_chunk_size(size)?;
836 }
837
838 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
839
840 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
841
842 let backup_type = param["backup-type"].as_str().unwrap_or("host");
843
844 let include_dev = param["include-dev"].as_array();
845
846 let entries_max = param["entries-max"].as_u64()
847 .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
848
849 let empty = Vec::new();
850 let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
851
852 let mut pattern_list = Vec::with_capacity(exclude_args.len());
853 for entry in exclude_args {
854 let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
855 pattern_list.push(
856 MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
857 .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
858 );
859 }
860
861 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
862
863 if let Some(include_dev) = include_dev {
864 if all_file_systems {
865 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
866 }
867
868 let mut set = HashSet::new();
869 for path in include_dev {
870 let path = path.as_str().unwrap();
871 let stat = nix::sys::stat::stat(path)
872 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
873 set.insert(stat.st_dev);
874 }
875 devices = Some(set);
876 }
877
878 let mut upload_list = vec![];
879
880 for backupspec in backupspec_list {
881 let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
882 let filename = &spec.config_string;
883 let target = &spec.archive_name;
884
885 use std::os::unix::fs::FileTypeExt;
886
887 let metadata = std::fs::metadata(filename)
888 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
889 let file_type = metadata.file_type();
890
891 match spec.spec_type {
892 BackupSpecificationType::PXAR => {
893 if !file_type.is_dir() {
894 bail!("got unexpected file type (expected directory)");
895 }
896 upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
897 }
898 BackupSpecificationType::IMAGE => {
899 if !(file_type.is_file() || file_type.is_block_device()) {
900 bail!("got unexpected file type (expected file or block device)");
901 }
902
903 let size = image_size(&PathBuf::from(filename))?;
904
905 if size == 0 { bail!("got zero-sized file '{}'", filename); }
906
907 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
908 }
909 BackupSpecificationType::CONFIG => {
910 if !file_type.is_file() {
911 bail!("got unexpected file type (expected regular file)");
912 }
913 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
914 }
915 BackupSpecificationType::LOGFILE => {
916 if !file_type.is_file() {
917 bail!("got unexpected file type (expected regular file)");
918 }
919 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
920 }
921 }
922 }
923
924 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
925
926 let client = connect(repo.host(), repo.user())?;
927 record_repository(&repo);
928
929 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
930
931 println!("Client name: {}", proxmox::tools::nodename());
932
933 let start_time = Local::now();
934
935 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
936
937 let (crypt_config, rsa_encrypted_key) = match keyfile {
938 None => (None, None),
939 Some(path) => {
940 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
941
942 let crypt_config = CryptConfig::new(key)?;
943
944 let path = master_pubkey_path()?;
945 if path.exists() {
946 let pem_data = file_get_contents(&path)?;
947 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
948 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
949 (Some(Arc::new(crypt_config)), Some(enc_key))
950 } else {
951 (Some(Arc::new(crypt_config)), None)
952 }
953 }
954 };
955
956 let is_encrypted = Some(crypt_config.is_some());
957
958 let client = BackupWriter::start(
959 client,
960 crypt_config.clone(),
961 repo.store(),
962 backup_type,
963 &backup_id,
964 backup_time,
965 verbose,
966 ).await?;
967
968 let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
969 Some(Arc::new(previous_manifest))
970 } else {
971 None
972 };
973
974 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
975 let mut manifest = BackupManifest::new(snapshot);
976
977 let mut catalog = None;
978 let mut catalog_result_tx = None;
979
980 for (backup_type, filename, target, size) in upload_list {
981 match backup_type {
982 BackupSpecificationType::CONFIG => {
983 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
984 let stats = client
985 .upload_blob_from_file(&filename, &target, true, Some(true))
986 .await?;
987 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
988 }
989 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
990 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
991 let stats = client
992 .upload_blob_from_file(&filename, &target, true, Some(true))
993 .await?;
994 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
995 }
996 BackupSpecificationType::PXAR => {
997 // start catalog upload on first use
998 if catalog.is_none() {
999 let (cat, res) = spawn_catalog_upload(client.clone())?;
1000 catalog = Some(cat);
1001 catalog_result_tx = Some(res);
1002 }
1003 let catalog = catalog.as_ref().unwrap();
1004
1005 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
1006 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1007 let stats = backup_directory(
1008 &client,
1009 previous_manifest.clone(),
1010 &filename,
1011 &target,
1012 chunk_size_opt,
1013 devices.clone(),
1014 verbose,
1015 skip_lost_and_found,
1016 catalog.clone(),
1017 pattern_list.clone(),
1018 entries_max as usize,
1019 ).await?;
1020 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
1021 catalog.lock().unwrap().end_directory()?;
1022 }
1023 BackupSpecificationType::IMAGE => {
1024 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
1025 let stats = backup_image(
1026 &client,
1027 previous_manifest.clone(),
1028 &filename,
1029 &target,
1030 size,
1031 chunk_size_opt,
1032 verbose,
1033 ).await?;
1034 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
1035 }
1036 }
1037 }
1038
1039 // finalize and upload catalog
1040 if let Some(catalog) = catalog {
1041 let mutex = Arc::try_unwrap(catalog)
1042 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
1043 let mut catalog = mutex.into_inner().unwrap();
1044
1045 catalog.finish()?;
1046
1047 drop(catalog); // close upload stream
1048
1049 if let Some(catalog_result_rx) = catalog_result_tx {
1050 let stats = catalog_result_rx.await??;
1051 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
1052 }
1053 }
1054
1055 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1056 let target = "rsa-encrypted.key";
1057 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
1058 let stats = client
1059 .upload_blob_from_data(rsa_encrypted_key, target, false, None)
1060 .await?;
1061 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
1062
1063 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1064 /*
1065 let mut buffer2 = vec![0u8; rsa.size() as usize];
1066 let pem_data = file_get_contents("master-private.pem")?;
1067 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1068 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1069 println!("TEST {} {:?}", len, buffer2);
1070 */
1071 }
1072
1073 // create manifest (index.json)
1074 let manifest = manifest.into_json();
1075
1076 println!("Upload index.json to '{:?}'", repo);
1077 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1078 client
1079 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
1080 .await?;
1081
1082 client.finish().await?;
1083
1084 let end_time = Local::now();
1085 let elapsed = end_time.signed_duration_since(start_time);
1086 println!("Duration: {}", elapsed);
1087
1088 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1089
1090 Ok(Value::Null)
1091 }
1092
1093 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1094
1095 let mut result = vec![];
1096
1097 let data: Vec<&str> = arg.splitn(2, ':').collect();
1098
1099 if data.len() != 2 {
1100 result.push(String::from("root.pxar:/"));
1101 result.push(String::from("etc.pxar:/etc"));
1102 return result;
1103 }
1104
1105 let files = tools::complete_file_name(data[1], param);
1106
1107 for file in files {
1108 result.push(format!("{}:{}", data[0], file));
1109 }
1110
1111 result
1112 }
1113
1114 async fn dump_image<W: Write>(
1115 client: Arc<BackupReader>,
1116 crypt_config: Option<Arc<CryptConfig>>,
1117 index: FixedIndexReader,
1118 mut writer: W,
1119 verbose: bool,
1120 ) -> Result<(), Error> {
1121
1122 let most_used = index.find_most_used_chunks(8);
1123
1124 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1125
1126 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1127 // and thus slows down reading. Instead, directly use RemoteChunkReader
1128 let mut per = 0;
1129 let mut bytes = 0;
1130 let start_time = std::time::Instant::now();
1131
1132 for pos in 0..index.index_count() {
1133 let digest = index.index_digest(pos).unwrap();
1134 let raw_data = chunk_reader.read_chunk(&digest).await?;
1135 writer.write_all(&raw_data)?;
1136 bytes += raw_data.len();
1137 if verbose {
1138 let next_per = ((pos+1)*100)/index.index_count();
1139 if per != next_per {
1140 eprintln!("progress {}% (read {} bytes, duration {} sec)",
1141 next_per, bytes, start_time.elapsed().as_secs());
1142 per = next_per;
1143 }
1144 }
1145 }
1146
1147 let end_time = std::time::Instant::now();
1148 let elapsed = end_time.duration_since(start_time);
1149 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1150 bytes,
1151 elapsed.as_secs_f64(),
1152 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1153 );
1154
1155
1156 Ok(())
1157 }
1158
1159 fn parse_archive_type(name: &str) -> (String, ArchiveType) {
1160 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
1161 (name.into(), archive_type(name).unwrap())
1162 } else if name.ends_with(".pxar") {
1163 (format!("{}.didx", name), ArchiveType::DynamicIndex)
1164 } else if name.ends_with(".img") {
1165 (format!("{}.fidx", name), ArchiveType::FixedIndex)
1166 } else {
1167 (format!("{}.blob", name), ArchiveType::Blob)
1168 }
1169 }
1170
1171 #[api(
1172 input: {
1173 properties: {
1174 repository: {
1175 schema: REPO_URL_SCHEMA,
1176 optional: true,
1177 },
1178 snapshot: {
1179 type: String,
1180 description: "Group/Snapshot path.",
1181 },
1182 "archive-name": {
1183 description: "Backup archive name.",
1184 type: String,
1185 },
1186 target: {
1187 type: String,
1188 description: r###"Target directory path. Use '-' to write to standard output.
1189
1190 We do not extraxt '.pxar' archives when writing to standard output.
1191
1192 "###
1193 },
1194 "allow-existing-dirs": {
1195 type: Boolean,
1196 description: "Do not fail if directories already exists.",
1197 optional: true,
1198 },
1199 keyfile: {
1200 schema: KEYFILE_SCHEMA,
1201 optional: true,
1202 },
1203 }
1204 }
1205 )]
1206 /// Restore backup repository.
1207 async fn restore(param: Value) -> Result<Value, Error> {
1208 let repo = extract_repository_from_value(&param)?;
1209
1210 let verbose = param["verbose"].as_bool().unwrap_or(false);
1211
1212 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
1213
1214 let archive_name = tools::required_string_param(&param, "archive-name")?;
1215
1216 let client = connect(repo.host(), repo.user())?;
1217
1218 record_repository(&repo);
1219
1220 let path = tools::required_string_param(&param, "snapshot")?;
1221
1222 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1223 let group: BackupGroup = path.parse()?;
1224 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1225 } else {
1226 let snapshot: BackupDir = path.parse()?;
1227 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1228 };
1229
1230 let target = tools::required_string_param(&param, "target")?;
1231 let target = if target == "-" { None } else { Some(target) };
1232
1233 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1234
1235 let crypt_config = match keyfile {
1236 None => None,
1237 Some(path) => {
1238 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1239 Some(Arc::new(CryptConfig::new(key)?))
1240 }
1241 };
1242
1243 let client = BackupReader::start(
1244 client,
1245 crypt_config.clone(),
1246 repo.store(),
1247 &backup_type,
1248 &backup_id,
1249 backup_time,
1250 true,
1251 ).await?;
1252
1253 let manifest = client.download_manifest().await?;
1254
1255 let (archive_name, archive_type) = parse_archive_type(archive_name);
1256
1257 if archive_name == MANIFEST_BLOB_NAME {
1258 let backup_index_data = manifest.into_json().to_string();
1259 if let Some(target) = target {
1260 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
1261 } else {
1262 let stdout = std::io::stdout();
1263 let mut writer = stdout.lock();
1264 writer.write_all(backup_index_data.as_bytes())
1265 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1266 }
1267
1268 } else if archive_type == ArchiveType::Blob {
1269
1270 let mut reader = client.download_blob(&manifest, &archive_name).await?;
1271
1272 if let Some(target) = target {
1273 let mut writer = std::fs::OpenOptions::new()
1274 .write(true)
1275 .create(true)
1276 .create_new(true)
1277 .open(target)
1278 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1279 std::io::copy(&mut reader, &mut writer)?;
1280 } else {
1281 let stdout = std::io::stdout();
1282 let mut writer = stdout.lock();
1283 std::io::copy(&mut reader, &mut writer)
1284 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1285 }
1286
1287 } else if archive_type == ArchiveType::DynamicIndex {
1288
1289 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
1290
1291 let most_used = index.find_most_used_chunks(8);
1292
1293 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1294
1295 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1296
1297 if let Some(target) = target {
1298 proxmox_backup::pxar::extract_archive(
1299 pxar::decoder::Decoder::from_std(reader)?,
1300 Path::new(target),
1301 &[],
1302 proxmox_backup::pxar::Flags::DEFAULT,
1303 allow_existing_dirs,
1304 |path| {
1305 if verbose {
1306 println!("{:?}", path);
1307 }
1308 },
1309 )
1310 .map_err(|err| format_err!("error extracting archive - {}", err))?;
1311 } else {
1312 let mut writer = std::fs::OpenOptions::new()
1313 .write(true)
1314 .open("/dev/stdout")
1315 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
1316
1317 std::io::copy(&mut reader, &mut writer)
1318 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1319 }
1320 } else if archive_type == ArchiveType::FixedIndex {
1321
1322 let index = client.download_fixed_index(&manifest, &archive_name).await?;
1323
1324 let mut writer = if let Some(target) = target {
1325 std::fs::OpenOptions::new()
1326 .write(true)
1327 .create(true)
1328 .create_new(true)
1329 .open(target)
1330 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
1331 } else {
1332 std::fs::OpenOptions::new()
1333 .write(true)
1334 .open("/dev/stdout")
1335 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1336 };
1337
1338 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
1339 }
1340
1341 Ok(Value::Null)
1342 }
1343
1344 #[api(
1345 input: {
1346 properties: {
1347 repository: {
1348 schema: REPO_URL_SCHEMA,
1349 optional: true,
1350 },
1351 snapshot: {
1352 type: String,
1353 description: "Group/Snapshot path.",
1354 },
1355 logfile: {
1356 type: String,
1357 description: "The path to the log file you want to upload.",
1358 },
1359 keyfile: {
1360 schema: KEYFILE_SCHEMA,
1361 optional: true,
1362 },
1363 }
1364 }
1365 )]
1366 /// Upload backup log file.
1367 async fn upload_log(param: Value) -> Result<Value, Error> {
1368
1369 let logfile = tools::required_string_param(&param, "logfile")?;
1370 let repo = extract_repository_from_value(&param)?;
1371
1372 let snapshot = tools::required_string_param(&param, "snapshot")?;
1373 let snapshot: BackupDir = snapshot.parse()?;
1374
1375 let mut client = connect(repo.host(), repo.user())?;
1376
1377 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1378
1379 let crypt_config = match keyfile {
1380 None => None,
1381 Some(path) => {
1382 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1383 let crypt_config = CryptConfig::new(key)?;
1384 Some(Arc::new(crypt_config))
1385 }
1386 };
1387
1388 let data = file_get_contents(logfile)?;
1389
1390 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1391
1392 let raw_data = blob.into_inner();
1393
1394 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1395
1396 let args = json!({
1397 "backup-type": snapshot.group().backup_type(),
1398 "backup-id": snapshot.group().backup_id(),
1399 "backup-time": snapshot.backup_time().timestamp(),
1400 });
1401
1402 let body = hyper::Body::from(raw_data);
1403
1404 client.upload("application/octet-stream", body, &path, Some(args)).await
1405 }
1406
1407 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1408 &ApiHandler::Async(&prune),
1409 &ObjectSchema::new(
1410 "Prune a backup repository.",
1411 &proxmox_backup::add_common_prune_prameters!([
1412 ("dry-run", true, &BooleanSchema::new(
1413 "Just show what prune would do, but do not delete anything.")
1414 .schema()),
1415 ("group", false, &StringSchema::new("Backup group.").schema()),
1416 ], [
1417 ("output-format", true, &OUTPUT_FORMAT),
1418 (
1419 "quiet",
1420 true,
1421 &BooleanSchema::new("Minimal output - only show removals.")
1422 .schema()
1423 ),
1424 ("repository", true, &REPO_URL_SCHEMA),
1425 ])
1426 )
1427 );
1428
1429 fn prune<'a>(
1430 param: Value,
1431 _info: &ApiMethod,
1432 _rpcenv: &'a mut dyn RpcEnvironment,
1433 ) -> proxmox::api::ApiFuture<'a> {
1434 async move {
1435 prune_async(param).await
1436 }.boxed()
1437 }
1438
1439 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1440 let repo = extract_repository_from_value(&param)?;
1441
1442 let mut client = connect(repo.host(), repo.user())?;
1443
1444 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
1445
1446 let group = tools::required_string_param(&param, "group")?;
1447 let group: BackupGroup = group.parse()?;
1448
1449 let output_format = get_output_format(&param);
1450
1451 let quiet = param["quiet"].as_bool().unwrap_or(false);
1452
1453 param.as_object_mut().unwrap().remove("repository");
1454 param.as_object_mut().unwrap().remove("group");
1455 param.as_object_mut().unwrap().remove("output-format");
1456 param.as_object_mut().unwrap().remove("quiet");
1457
1458 param["backup-type"] = group.backup_type().into();
1459 param["backup-id"] = group.backup_id().into();
1460
1461 let mut result = client.post(&path, Some(param)).await?;
1462
1463 record_repository(&repo);
1464
1465 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1466 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1467 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1468 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1469 };
1470
1471 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1472 Ok(match v.as_bool() {
1473 Some(true) => "keep",
1474 Some(false) => "remove",
1475 None => "unknown",
1476 }.to_string())
1477 };
1478
1479 let options = default_table_format_options()
1480 .sortby("backup-type", false)
1481 .sortby("backup-id", false)
1482 .sortby("backup-time", false)
1483 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
1484 .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
1485 .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
1486 ;
1487
1488 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1489
1490 let mut data = result["data"].take();
1491
1492 if quiet {
1493 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1494 item["keep"].as_bool() == Some(false)
1495 }).map(|v| v.clone()).collect();
1496 data = list.into();
1497 }
1498
1499 format_and_print_result_full(&mut data, info, &output_format, &options);
1500
1501 Ok(Value::Null)
1502 }
1503
1504 #[api(
1505 input: {
1506 properties: {
1507 repository: {
1508 schema: REPO_URL_SCHEMA,
1509 optional: true,
1510 },
1511 "output-format": {
1512 schema: OUTPUT_FORMAT,
1513 optional: true,
1514 },
1515 }
1516 }
1517 )]
1518 /// Get repository status.
1519 async fn status(param: Value) -> Result<Value, Error> {
1520
1521 let repo = extract_repository_from_value(&param)?;
1522
1523 let output_format = get_output_format(&param);
1524
1525 let client = connect(repo.host(), repo.user())?;
1526
1527 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1528
1529 let mut result = client.get(&path, None).await?;
1530 let mut data = result["data"].take();
1531
1532 record_repository(&repo);
1533
1534 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1535 let v = v.as_u64().unwrap();
1536 let total = record["total"].as_u64().unwrap();
1537 let roundup = total/200;
1538 let per = ((v+roundup)*100)/total;
1539 let info = format!(" ({} %)", per);
1540 Ok(format!("{} {:>8}", v, info))
1541 };
1542
1543 let options = default_table_format_options()
1544 .noheader(true)
1545 .column(ColumnConfig::new("total").renderer(render_total_percentage))
1546 .column(ColumnConfig::new("used").renderer(render_total_percentage))
1547 .column(ColumnConfig::new("avail").renderer(render_total_percentage));
1548
1549 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
1550
1551 format_and_print_result_full(&mut data, schema, &output_format, &options);
1552
1553 Ok(Value::Null)
1554 }
1555
1556 // like get, but simply ignore errors and return Null instead
1557 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1558
1559 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
1560 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
1561
1562 let options = HttpClientOptions::new()
1563 .prefix(Some("proxmox-backup".to_string()))
1564 .password(password)
1565 .interactive(false)
1566 .fingerprint(fingerprint)
1567 .fingerprint_cache(true)
1568 .ticket_cache(true);
1569
1570 let client = match HttpClient::new(repo.host(), repo.user(), options) {
1571 Ok(v) => v,
1572 _ => return Value::Null,
1573 };
1574
1575 let mut resp = match client.get(url, None).await {
1576 Ok(v) => v,
1577 _ => return Value::Null,
1578 };
1579
1580 if let Some(map) = resp.as_object_mut() {
1581 if let Some(data) = map.remove("data") {
1582 return data;
1583 }
1584 }
1585 Value::Null
1586 }
1587
1588 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1589 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1590 }
1591
1592 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1593
1594 let mut result = vec![];
1595
1596 let repo = match extract_repository_from_map(param) {
1597 Some(v) => v,
1598 _ => return result,
1599 };
1600
1601 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1602
1603 let data = try_get(&repo, &path).await;
1604
1605 if let Some(list) = data.as_array() {
1606 for item in list {
1607 if let (Some(backup_id), Some(backup_type)) =
1608 (item["backup-id"].as_str(), item["backup-type"].as_str())
1609 {
1610 result.push(format!("{}/{}", backup_type, backup_id));
1611 }
1612 }
1613 }
1614
1615 result
1616 }
1617
1618 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1619 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1620 }
1621
1622 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1623
1624 if arg.matches('/').count() < 2 {
1625 let groups = complete_backup_group_do(param).await;
1626 let mut result = vec![];
1627 for group in groups {
1628 result.push(group.to_string());
1629 result.push(format!("{}/", group));
1630 }
1631 return result;
1632 }
1633
1634 complete_backup_snapshot_do(param).await
1635 }
1636
1637 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1638 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1639 }
1640
1641 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1642
1643 let mut result = vec![];
1644
1645 let repo = match extract_repository_from_map(param) {
1646 Some(v) => v,
1647 _ => return result,
1648 };
1649
1650 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1651
1652 let data = try_get(&repo, &path).await;
1653
1654 if let Some(list) = data.as_array() {
1655 for item in list {
1656 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1657 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1658 {
1659 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1660 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1661 }
1662 }
1663 }
1664
1665 result
1666 }
1667
1668 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1669 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1670 }
1671
1672 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1673
1674 let mut result = vec![];
1675
1676 let repo = match extract_repository_from_map(param) {
1677 Some(v) => v,
1678 _ => return result,
1679 };
1680
1681 let snapshot: BackupDir = match param.get("snapshot") {
1682 Some(path) => {
1683 match path.parse() {
1684 Ok(v) => v,
1685 _ => return result,
1686 }
1687 }
1688 _ => return result,
1689 };
1690
1691 let query = tools::json_object_to_query(json!({
1692 "backup-type": snapshot.group().backup_type(),
1693 "backup-id": snapshot.group().backup_id(),
1694 "backup-time": snapshot.backup_time().timestamp(),
1695 })).unwrap();
1696
1697 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1698
1699 let data = try_get(&repo, &path).await;
1700
1701 if let Some(list) = data.as_array() {
1702 for item in list {
1703 if let Some(filename) = item["filename"].as_str() {
1704 result.push(filename.to_owned());
1705 }
1706 }
1707 }
1708
1709 result
1710 }
1711
1712 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1713 complete_server_file_name(arg, param)
1714 .iter()
1715 .map(|v| tools::format::strip_server_file_expenstion(&v))
1716 .collect()
1717 }
1718
1719 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1720 complete_server_file_name(arg, param)
1721 .iter()
1722 .filter_map(|v| {
1723 let name = tools::format::strip_server_file_expenstion(&v);
1724 if name.ends_with(".pxar") {
1725 Some(name)
1726 } else {
1727 None
1728 }
1729 })
1730 .collect()
1731 }
1732
1733 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1734
1735 let mut result = vec![];
1736
1737 let mut size = 64;
1738 loop {
1739 result.push(size.to_string());
1740 size *= 2;
1741 if size > 4096 { break; }
1742 }
1743
1744 result
1745 }
1746
1747 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1748
1749 // fixme: implement other input methods
1750
1751 use std::env::VarError::*;
1752 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
1753 Ok(p) => return Ok(p.as_bytes().to_vec()),
1754 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1755 Err(NotPresent) => {
1756 // Try another method
1757 }
1758 }
1759
1760 // If we're on a TTY, query the user for a password
1761 if tty::stdin_isatty() {
1762 return Ok(tty::read_password("Encryption Key Password: ")?);
1763 }
1764
1765 bail!("no password input mechanism available");
1766 }
1767
1768 fn key_create(
1769 param: Value,
1770 _info: &ApiMethod,
1771 _rpcenv: &mut dyn RpcEnvironment,
1772 ) -> Result<Value, Error> {
1773
1774 let path = tools::required_string_param(&param, "path")?;
1775 let path = PathBuf::from(path);
1776
1777 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1778
1779 let key = proxmox::sys::linux::random_data(32)?;
1780
1781 if kdf == "scrypt" {
1782 // always read passphrase from tty
1783 if !tty::stdin_isatty() {
1784 bail!("unable to read passphrase - no tty");
1785 }
1786
1787 let password = tty::read_and_verify_password("Encryption Key Password: ")?;
1788
1789 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1790
1791 store_key_config(&path, false, key_config)?;
1792
1793 Ok(Value::Null)
1794 } else if kdf == "none" {
1795 let created = Local.timestamp(Local::now().timestamp(), 0);
1796
1797 store_key_config(&path, false, KeyConfig {
1798 kdf: None,
1799 created,
1800 modified: created,
1801 data: key,
1802 })?;
1803
1804 Ok(Value::Null)
1805 } else {
1806 unreachable!();
1807 }
1808 }
1809
1810 fn master_pubkey_path() -> Result<PathBuf, Error> {
1811 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1812
1813 // usually $HOME/.config/proxmox-backup/master-public.pem
1814 let path = base.place_config_file("master-public.pem")?;
1815
1816 Ok(path)
1817 }
1818
1819 fn key_import_master_pubkey(
1820 param: Value,
1821 _info: &ApiMethod,
1822 _rpcenv: &mut dyn RpcEnvironment,
1823 ) -> Result<Value, Error> {
1824
1825 let path = tools::required_string_param(&param, "path")?;
1826 let path = PathBuf::from(path);
1827
1828 let pem_data = file_get_contents(&path)?;
1829
1830 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1831 bail!("Unable to decode PEM data - {}", err);
1832 }
1833
1834 let target_path = master_pubkey_path()?;
1835
1836 replace_file(&target_path, &pem_data, CreateOptions::new())?;
1837
1838 println!("Imported public master key to {:?}", target_path);
1839
1840 Ok(Value::Null)
1841 }
1842
1843 fn key_create_master_key(
1844 _param: Value,
1845 _info: &ApiMethod,
1846 _rpcenv: &mut dyn RpcEnvironment,
1847 ) -> Result<Value, Error> {
1848
1849 // we need a TTY to query the new password
1850 if !tty::stdin_isatty() {
1851 bail!("unable to create master key - no tty");
1852 }
1853
1854 let rsa = openssl::rsa::Rsa::generate(4096)?;
1855 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1856
1857
1858 let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
1859
1860 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1861 let filename_pub = "master-public.pem";
1862 println!("Writing public master key to {}", filename_pub);
1863 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
1864
1865 let cipher = openssl::symm::Cipher::aes_256_cbc();
1866 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
1867
1868 let filename_priv = "master-private.pem";
1869 println!("Writing private master key to {}", filename_priv);
1870 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
1871
1872 Ok(Value::Null)
1873 }
1874
1875 fn key_change_passphrase(
1876 param: Value,
1877 _info: &ApiMethod,
1878 _rpcenv: &mut dyn RpcEnvironment,
1879 ) -> Result<Value, Error> {
1880
1881 let path = tools::required_string_param(&param, "path")?;
1882 let path = PathBuf::from(path);
1883
1884 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1885
1886 // we need a TTY to query the new password
1887 if !tty::stdin_isatty() {
1888 bail!("unable to change passphrase - no tty");
1889 }
1890
1891 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1892
1893 if kdf == "scrypt" {
1894
1895 let password = tty::read_and_verify_password("New Password: ")?;
1896
1897 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
1898 new_key_config.created = created; // keep original value
1899
1900 store_key_config(&path, true, new_key_config)?;
1901
1902 Ok(Value::Null)
1903 } else if kdf == "none" {
1904 let modified = Local.timestamp(Local::now().timestamp(), 0);
1905
1906 store_key_config(&path, true, KeyConfig {
1907 kdf: None,
1908 created, // keep original value
1909 modified,
1910 data: key.to_vec(),
1911 })?;
1912
1913 Ok(Value::Null)
1914 } else {
1915 unreachable!();
1916 }
1917 }
1918
1919 fn key_mgmt_cli() -> CliCommandMap {
1920
1921 const KDF_SCHEMA: Schema =
1922 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1923 .format(&ApiStringFormat::Enum(&[
1924 EnumEntry::new("scrypt", "SCrypt"),
1925 EnumEntry::new("none", "Do not encrypt the key")]))
1926 .default("scrypt")
1927 .schema();
1928
1929 #[sortable]
1930 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1931 &ApiHandler::Sync(&key_create),
1932 &ObjectSchema::new(
1933 "Create a new encryption key.",
1934 &sorted!([
1935 ("path", false, &StringSchema::new("File system path.").schema()),
1936 ("kdf", true, &KDF_SCHEMA),
1937 ]),
1938 )
1939 );
1940
1941 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1942 .arg_param(&["path"])
1943 .completion_cb("path", tools::complete_file_name);
1944
1945 #[sortable]
1946 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1947 &ApiHandler::Sync(&key_change_passphrase),
1948 &ObjectSchema::new(
1949 "Change the passphrase required to decrypt the key.",
1950 &sorted!([
1951 ("path", false, &StringSchema::new("File system path.").schema()),
1952 ("kdf", true, &KDF_SCHEMA),
1953 ]),
1954 )
1955 );
1956
1957 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1958 .arg_param(&["path"])
1959 .completion_cb("path", tools::complete_file_name);
1960
1961 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1962 &ApiHandler::Sync(&key_create_master_key),
1963 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1964 );
1965
1966 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1967
1968 #[sortable]
1969 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1970 &ApiHandler::Sync(&key_import_master_pubkey),
1971 &ObjectSchema::new(
1972 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
1973 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
1974 )
1975 );
1976
1977 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1978 .arg_param(&["path"])
1979 .completion_cb("path", tools::complete_file_name);
1980
1981 CliCommandMap::new()
1982 .insert("create", key_create_cmd_def)
1983 .insert("create-master-key", key_create_master_key_cmd_def)
1984 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
1985 .insert("change-passphrase", key_change_passphrase_cmd_def)
1986 }
1987
1988 fn mount(
1989 param: Value,
1990 _info: &ApiMethod,
1991 _rpcenv: &mut dyn RpcEnvironment,
1992 ) -> Result<Value, Error> {
1993 let verbose = param["verbose"].as_bool().unwrap_or(false);
1994 if verbose {
1995 // This will stay in foreground with debug output enabled as None is
1996 // passed for the RawFd.
1997 return proxmox_backup::tools::runtime::main(mount_do(param, None));
1998 }
1999
2000 // Process should be deamonized.
2001 // Make sure to fork before the async runtime is instantiated to avoid troubles.
2002 let pipe = pipe()?;
2003 match fork() {
2004 Ok(ForkResult::Parent { .. }) => {
2005 nix::unistd::close(pipe.1).unwrap();
2006 // Blocks the parent process until we are ready to go in the child
2007 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
2008 Ok(Value::Null)
2009 }
2010 Ok(ForkResult::Child) => {
2011 nix::unistd::close(pipe.0).unwrap();
2012 nix::unistd::setsid().unwrap();
2013 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
2014 }
2015 Err(_) => bail!("failed to daemonize process"),
2016 }
2017 }
2018
2019 use proxmox_backup::client::RemoteChunkReader;
2020 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
2021 /// async use!
2022 ///
2023 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
2024 /// so that we can properly access it from multiple threads simultaneously while not issuing
2025 /// duplicate simultaneous reads over http.
2026 struct BufferedDynamicReadAt {
2027 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
2028 }
2029
2030 impl BufferedDynamicReadAt {
2031 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
2032 Self {
2033 inner: Mutex::new(inner),
2034 }
2035 }
2036 }
2037
2038 impl ReadAt for BufferedDynamicReadAt {
2039 fn start_read_at<'a>(
2040 self: Pin<&'a Self>,
2041 _cx: &mut Context,
2042 buf: &'a mut [u8],
2043 offset: u64,
2044 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
2045 use std::io::Read;
2046 MaybeReady::Ready(tokio::task::block_in_place(move || {
2047 let mut reader = self.inner.lock().unwrap();
2048 reader.seek(SeekFrom::Start(offset))?;
2049 Ok(reader.read(buf)?)
2050 }))
2051 }
2052
2053 fn poll_complete<'a>(
2054 self: Pin<&'a Self>,
2055 _op: ReadAtOperation<'a>,
2056 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
2057 panic!("LocalDynamicReadAt::start_read_at returned Pending");
2058 }
2059 }
2060
2061 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
2062 let repo = extract_repository_from_value(&param)?;
2063 let archive_name = tools::required_string_param(&param, "archive-name")?;
2064 let target = tools::required_string_param(&param, "target")?;
2065 let client = connect(repo.host(), repo.user())?;
2066
2067 record_repository(&repo);
2068
2069 let path = tools::required_string_param(&param, "snapshot")?;
2070 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2071 let group: BackupGroup = path.parse()?;
2072 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2073 } else {
2074 let snapshot: BackupDir = path.parse()?;
2075 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2076 };
2077
2078 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2079 let crypt_config = match keyfile {
2080 None => None,
2081 Some(path) => {
2082 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2083 Some(Arc::new(CryptConfig::new(key)?))
2084 }
2085 };
2086
2087 let server_archive_name = if archive_name.ends_with(".pxar") {
2088 format!("{}.didx", archive_name)
2089 } else {
2090 bail!("Can only mount pxar archives.");
2091 };
2092
2093 let client = BackupReader::start(
2094 client,
2095 crypt_config.clone(),
2096 repo.store(),
2097 &backup_type,
2098 &backup_id,
2099 backup_time,
2100 true,
2101 ).await?;
2102
2103 let manifest = client.download_manifest().await?;
2104
2105 if server_archive_name.ends_with(".didx") {
2106 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2107 let most_used = index.find_most_used_chunks(8);
2108 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2109 let reader = BufferedDynamicReader::new(index, chunk_reader);
2110 let archive_size = reader.archive_size();
2111 let reader: proxmox_backup::pxar::fuse::Reader =
2112 Arc::new(BufferedDynamicReadAt::new(reader));
2113 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
2114 let options = OsStr::new("ro,default_permissions");
2115
2116 let session = proxmox_backup::pxar::fuse::Session::mount(
2117 decoder,
2118 &options,
2119 false,
2120 Path::new(target),
2121 )
2122 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
2123
2124 if let Some(pipe) = pipe {
2125 nix::unistd::chdir(Path::new("/")).unwrap();
2126 // Finish creation of daemon by redirecting filedescriptors.
2127 let nullfd = nix::fcntl::open(
2128 "/dev/null",
2129 nix::fcntl::OFlag::O_RDWR,
2130 nix::sys::stat::Mode::empty(),
2131 ).unwrap();
2132 nix::unistd::dup2(nullfd, 0).unwrap();
2133 nix::unistd::dup2(nullfd, 1).unwrap();
2134 nix::unistd::dup2(nullfd, 2).unwrap();
2135 if nullfd > 2 {
2136 nix::unistd::close(nullfd).unwrap();
2137 }
2138 // Signal the parent process that we are done with the setup and it can
2139 // terminate.
2140 nix::unistd::write(pipe, &[0u8])?;
2141 nix::unistd::close(pipe).unwrap();
2142 }
2143
2144 let mut interrupt = signal(SignalKind::interrupt())?;
2145 select! {
2146 res = session.fuse() => res?,
2147 _ = interrupt.recv().fuse() => {
2148 // exit on interrupted
2149 }
2150 }
2151 } else {
2152 bail!("unknown archive file extension (expected .pxar)");
2153 }
2154
2155 Ok(Value::Null)
2156 }
2157
2158 #[api(
2159 input: {
2160 properties: {
2161 "snapshot": {
2162 type: String,
2163 description: "Group/Snapshot path.",
2164 },
2165 "archive-name": {
2166 type: String,
2167 description: "Backup archive name.",
2168 },
2169 "repository": {
2170 optional: true,
2171 schema: REPO_URL_SCHEMA,
2172 },
2173 "keyfile": {
2174 optional: true,
2175 type: String,
2176 description: "Path to encryption key.",
2177 },
2178 },
2179 },
2180 )]
2181 /// Shell to interactively inspect and restore snapshots.
2182 async fn catalog_shell(param: Value) -> Result<(), Error> {
2183 let repo = extract_repository_from_value(&param)?;
2184 let client = connect(repo.host(), repo.user())?;
2185 let path = tools::required_string_param(&param, "snapshot")?;
2186 let archive_name = tools::required_string_param(&param, "archive-name")?;
2187
2188 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2189 let group: BackupGroup = path.parse()?;
2190 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2191 } else {
2192 let snapshot: BackupDir = path.parse()?;
2193 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2194 };
2195
2196 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
2197 let crypt_config = match keyfile {
2198 None => None,
2199 Some(path) => {
2200 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2201 Some(Arc::new(CryptConfig::new(key)?))
2202 }
2203 };
2204
2205 let server_archive_name = if archive_name.ends_with(".pxar") {
2206 format!("{}.didx", archive_name)
2207 } else {
2208 bail!("Can only mount pxar archives.");
2209 };
2210
2211 let client = BackupReader::start(
2212 client,
2213 crypt_config.clone(),
2214 repo.store(),
2215 &backup_type,
2216 &backup_id,
2217 backup_time,
2218 true,
2219 ).await?;
2220
2221 let mut tmpfile = std::fs::OpenOptions::new()
2222 .write(true)
2223 .read(true)
2224 .custom_flags(libc::O_TMPFILE)
2225 .open("/tmp")?;
2226
2227 let manifest = client.download_manifest().await?;
2228
2229 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2230 let most_used = index.find_most_used_chunks(8);
2231 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2232 let reader = BufferedDynamicReader::new(index, chunk_reader);
2233 let archive_size = reader.archive_size();
2234 let reader: proxmox_backup::pxar::fuse::Reader =
2235 Arc::new(BufferedDynamicReadAt::new(reader));
2236 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
2237
2238 client.download(CATALOG_NAME, &mut tmpfile).await?;
2239 let index = DynamicIndexReader::new(tmpfile)
2240 .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
2241
2242 // Note: do not use values stored in index (not trusted) - instead, computed them again
2243 let (csum, size) = index.compute_csum();
2244 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2245
2246 let most_used = index.find_most_used_chunks(8);
2247 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2248 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2249 let mut catalogfile = std::fs::OpenOptions::new()
2250 .write(true)
2251 .read(true)
2252 .custom_flags(libc::O_TMPFILE)
2253 .open("/tmp")?;
2254
2255 std::io::copy(&mut reader, &mut catalogfile)
2256 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
2257
2258 catalogfile.seek(SeekFrom::Start(0))?;
2259 let catalog_reader = CatalogReader::new(catalogfile);
2260 let state = Shell::new(
2261 catalog_reader,
2262 &server_archive_name,
2263 decoder,
2264 ).await?;
2265
2266 println!("Starting interactive shell");
2267 state.shell().await?;
2268
2269 record_repository(&repo);
2270
2271 Ok(())
2272 }
2273
2274 fn catalog_mgmt_cli() -> CliCommandMap {
2275 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
2276 .arg_param(&["snapshot", "archive-name"])
2277 .completion_cb("repository", complete_repository)
2278 .completion_cb("archive-name", complete_pxar_archive_name)
2279 .completion_cb("snapshot", complete_group_or_snapshot);
2280
2281 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2282 .arg_param(&["snapshot"])
2283 .completion_cb("repository", complete_repository)
2284 .completion_cb("snapshot", complete_backup_snapshot);
2285
2286 CliCommandMap::new()
2287 .insert("dump", catalog_dump_cmd_def)
2288 .insert("shell", catalog_shell_cmd_def)
2289 }
2290
2291 #[api(
2292 input: {
2293 properties: {
2294 repository: {
2295 schema: REPO_URL_SCHEMA,
2296 optional: true,
2297 },
2298 limit: {
2299 description: "The maximal number of tasks to list.",
2300 type: Integer,
2301 optional: true,
2302 minimum: 1,
2303 maximum: 1000,
2304 default: 50,
2305 },
2306 "output-format": {
2307 schema: OUTPUT_FORMAT,
2308 optional: true,
2309 },
2310 all: {
2311 type: Boolean,
2312 description: "Also list stopped tasks.",
2313 optional: true,
2314 },
2315 }
2316 }
2317 )]
2318 /// List running server tasks for this repo user
2319 async fn task_list(param: Value) -> Result<Value, Error> {
2320
2321 let output_format = get_output_format(&param);
2322
2323 let repo = extract_repository_from_value(&param)?;
2324 let client = connect(repo.host(), repo.user())?;
2325
2326 let limit = param["limit"].as_u64().unwrap_or(50) as usize;
2327 let running = !param["all"].as_bool().unwrap_or(false);
2328
2329 let args = json!({
2330 "running": running,
2331 "start": 0,
2332 "limit": limit,
2333 "userfilter": repo.user(),
2334 "store": repo.store(),
2335 });
2336
2337 let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
2338 let mut data = result["data"].take();
2339
2340 let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
2341
2342 let options = default_table_format_options()
2343 .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
2344 .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
2345 .column(ColumnConfig::new("upid"))
2346 .column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
2347
2348 format_and_print_result_full(&mut data, schema, &output_format, &options);
2349
2350 Ok(Value::Null)
2351 }
2352
2353 #[api(
2354 input: {
2355 properties: {
2356 repository: {
2357 schema: REPO_URL_SCHEMA,
2358 optional: true,
2359 },
2360 upid: {
2361 schema: UPID_SCHEMA,
2362 },
2363 }
2364 }
2365 )]
2366 /// Display the task log.
2367 async fn task_log(param: Value) -> Result<Value, Error> {
2368
2369 let repo = extract_repository_from_value(&param)?;
2370 let upid = tools::required_string_param(&param, "upid")?;
2371
2372 let client = connect(repo.host(), repo.user())?;
2373
2374 display_task_log(client, upid, true).await?;
2375
2376 Ok(Value::Null)
2377 }
2378
2379 #[api(
2380 input: {
2381 properties: {
2382 repository: {
2383 schema: REPO_URL_SCHEMA,
2384 optional: true,
2385 },
2386 upid: {
2387 schema: UPID_SCHEMA,
2388 },
2389 }
2390 }
2391 )]
2392 /// Try to stop a specific task.
2393 async fn task_stop(param: Value) -> Result<Value, Error> {
2394
2395 let repo = extract_repository_from_value(&param)?;
2396 let upid_str = tools::required_string_param(&param, "upid")?;
2397
2398 let mut client = connect(repo.host(), repo.user())?;
2399
2400 let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
2401 let _ = client.delete(&path, None).await?;
2402
2403 Ok(Value::Null)
2404 }
2405
2406 fn task_mgmt_cli() -> CliCommandMap {
2407
2408 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2409 .completion_cb("repository", complete_repository);
2410
2411 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2412 .arg_param(&["upid"]);
2413
2414 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2415 .arg_param(&["upid"]);
2416
2417 CliCommandMap::new()
2418 .insert("log", task_log_cmd_def)
2419 .insert("list", task_list_cmd_def)
2420 .insert("stop", task_stop_cmd_def)
2421 }
2422
2423 fn main() {
2424
2425 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2426 .arg_param(&["backupspec"])
2427 .completion_cb("repository", complete_repository)
2428 .completion_cb("backupspec", complete_backup_source)
2429 .completion_cb("keyfile", tools::complete_file_name)
2430 .completion_cb("chunk-size", complete_chunk_size);
2431
2432 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2433 .arg_param(&["snapshot", "logfile"])
2434 .completion_cb("snapshot", complete_backup_snapshot)
2435 .completion_cb("logfile", tools::complete_file_name)
2436 .completion_cb("keyfile", tools::complete_file_name)
2437 .completion_cb("repository", complete_repository);
2438
2439 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2440 .completion_cb("repository", complete_repository);
2441
2442 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2443 .arg_param(&["group"])
2444 .completion_cb("group", complete_backup_group)
2445 .completion_cb("repository", complete_repository);
2446
2447 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2448 .arg_param(&["snapshot"])
2449 .completion_cb("repository", complete_repository)
2450 .completion_cb("snapshot", complete_backup_snapshot);
2451
2452 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2453 .completion_cb("repository", complete_repository);
2454
2455 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2456 .arg_param(&["snapshot", "archive-name", "target"])
2457 .completion_cb("repository", complete_repository)
2458 .completion_cb("snapshot", complete_group_or_snapshot)
2459 .completion_cb("archive-name", complete_archive_name)
2460 .completion_cb("target", tools::complete_file_name);
2461
2462 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2463 .arg_param(&["snapshot"])
2464 .completion_cb("repository", complete_repository)
2465 .completion_cb("snapshot", complete_backup_snapshot);
2466
2467 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2468 .arg_param(&["group"])
2469 .completion_cb("group", complete_backup_group)
2470 .completion_cb("repository", complete_repository);
2471
2472 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2473 .completion_cb("repository", complete_repository);
2474
2475 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2476 .completion_cb("repository", complete_repository);
2477
2478 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2479 .completion_cb("repository", complete_repository);
2480
2481 #[sortable]
2482 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2483 &ApiHandler::Sync(&mount),
2484 &ObjectSchema::new(
2485 "Mount pxar archive.",
2486 &sorted!([
2487 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2488 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2489 ("target", false, &StringSchema::new("Target directory path.").schema()),
2490 ("repository", true, &REPO_URL_SCHEMA),
2491 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2492 ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
2493 ]),
2494 )
2495 );
2496
2497 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2498 .arg_param(&["snapshot", "archive-name", "target"])
2499 .completion_cb("repository", complete_repository)
2500 .completion_cb("snapshot", complete_group_or_snapshot)
2501 .completion_cb("archive-name", complete_pxar_archive_name)
2502 .completion_cb("target", tools::complete_file_name);
2503
2504
2505 let cmd_def = CliCommandMap::new()
2506 .insert("backup", backup_cmd_def)
2507 .insert("upload-log", upload_log_cmd_def)
2508 .insert("forget", forget_cmd_def)
2509 .insert("garbage-collect", garbage_collect_cmd_def)
2510 .insert("list", list_cmd_def)
2511 .insert("login", login_cmd_def)
2512 .insert("logout", logout_cmd_def)
2513 .insert("prune", prune_cmd_def)
2514 .insert("restore", restore_cmd_def)
2515 .insert("snapshots", snapshots_cmd_def)
2516 .insert("files", files_cmd_def)
2517 .insert("status", status_cmd_def)
2518 .insert("key", key_mgmt_cli())
2519 .insert("mount", mount_cmd_def)
2520 .insert("catalog", catalog_mgmt_cli())
2521 .insert("task", task_mgmt_cli());
2522
2523 let rpcenv = CliEnvironment::new();
2524 run_cli_command(cmd_def, rpcenv, Some(|future| {
2525 proxmox_backup::tools::runtime::main(future)
2526 }));
2527 }