]> git.proxmox.com Git - proxmox-backup.git/blob - proxmox-backup-client/src/main.rs
moved key_derivation.rs from pbs_datastore to pbs-config/src/key_config.rs
[proxmox-backup.git] / proxmox-backup-client / src / main.rs
1 use std::collections::HashSet;
2 use std::io::{self, Read, Write, Seek, SeekFrom};
3 use std::path::{Path, PathBuf};
4 use std::pin::Pin;
5 use std::sync::{Arc, Mutex};
6 use std::task::Context;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::stream::{StreamExt, TryStreamExt};
10 use serde_json::{json, Value};
11 use tokio::sync::mpsc;
12 use tokio_stream::wrappers::ReceiverStream;
13 use xdg::BaseDirectories;
14
15 use pathpatterns::{MatchEntry, MatchType, PatternFlag};
16 use proxmox::{
17 tools::{
18 time::{strftime_local, epoch_i64},
19 fs::{file_get_json, replace_file, CreateOptions, image_size},
20 },
21 api::{
22 api,
23 ApiMethod,
24 RpcEnvironment,
25 cli::*,
26 },
27 };
28 use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
29
30 use pbs_api_types::{
31 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, Authid, CryptMode, GroupListItem,
32 PruneListItem, SnapshotListItem, StorageStatus, Fingerprint,
33 };
34 use pbs_client::{
35 BACKUP_SOURCE_SCHEMA,
36 BackupReader,
37 BackupRepository,
38 BackupSpecificationType,
39 BackupStats,
40 BackupWriter,
41 ChunkStream,
42 FixedChunkStream,
43 HttpClient,
44 PxarBackupStream,
45 RemoteChunkReader,
46 UploadOptions,
47 delete_ticket_info,
48 parse_backup_specification,
49 view_task_result,
50 };
51 use pbs_client::catalog_shell::Shell;
52 use pbs_client::tools::{
53 complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
54 complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
55 complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
56 extract_repository_from_value,
57 key_source::{
58 crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
59 KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
60 },
61 CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
62 };
63 use pbs_config::key_config::{KeyConfig, decrypt_key, rsa_encrypt_key_config};
64 use pbs_datastore::CATALOG_NAME;
65 use pbs_datastore::backup_info::{BackupDir, BackupGroup};
66 use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
67 use pbs_datastore::chunk_store::verify_chunk_size;
68 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
69 use pbs_datastore::fixed_index::FixedIndexReader;
70 use pbs_datastore::index::IndexFile;
71 use pbs_datastore::manifest::{
72 ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, archive_type,
73 };
74 use pbs_datastore::read_chunk::AsyncReadChunk;
75 use pbs_datastore::prune::PruneOptions;
76 use pbs_tools::sync::StdChannelWriter;
77 use pbs_tools::tokio::TokioWriterAdapter;
78 use pbs_tools::json;
79 use pbs_tools::crypt_config::CryptConfig;
80
81 mod benchmark;
82 pub use benchmark::*;
83 mod mount;
84 pub use mount::*;
85 mod task;
86 pub use task::*;
87 mod catalog;
88 pub use catalog::*;
89 mod snapshot;
90 pub use snapshot::*;
91 pub mod key;
92
93 fn record_repository(repo: &BackupRepository) {
94
95 let base = match BaseDirectories::with_prefix("proxmox-backup") {
96 Ok(v) => v,
97 _ => return,
98 };
99
100 // usually $HOME/.cache/proxmox-backup/repo-list
101 let path = match base.place_cache_file("repo-list") {
102 Ok(v) => v,
103 _ => return,
104 };
105
106 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
107
108 let repo = repo.to_string();
109
110 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
111
112 let mut map = serde_json::map::Map::new();
113
114 loop {
115 let mut max_used = 0;
116 let mut max_repo = None;
117 for (repo, count) in data.as_object().unwrap() {
118 if map.contains_key(repo) { continue; }
119 if let Some(count) = count.as_i64() {
120 if count > max_used {
121 max_used = count;
122 max_repo = Some(repo);
123 }
124 }
125 }
126 if let Some(repo) = max_repo {
127 map.insert(repo.to_owned(), json!(max_used));
128 } else {
129 break;
130 }
131 if map.len() > 10 { // store max. 10 repos
132 break;
133 }
134 }
135
136 let new_data = json!(map);
137
138 let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
139 }
140
141 async fn api_datastore_list_snapshots(
142 client: &HttpClient,
143 store: &str,
144 group: Option<BackupGroup>,
145 ) -> Result<Value, Error> {
146
147 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
148
149 let mut args = json!({});
150 if let Some(group) = group {
151 args["backup-type"] = group.backup_type().into();
152 args["backup-id"] = group.backup_id().into();
153 }
154
155 let mut result = client.get(&path, Some(args)).await?;
156
157 Ok(result["data"].take())
158 }
159
160 pub async fn api_datastore_latest_snapshot(
161 client: &HttpClient,
162 store: &str,
163 group: BackupGroup,
164 ) -> Result<(String, String, i64), Error> {
165
166 let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
167 let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
168
169 if list.is_empty() {
170 bail!("backup group {:?} does not contain any snapshots.", group.group_path());
171 }
172
173 list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
174
175 let backup_time = list[0].backup_time;
176
177 Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
178 }
179
180 async fn backup_directory<P: AsRef<Path>>(
181 client: &BackupWriter,
182 dir_path: P,
183 archive_name: &str,
184 chunk_size: Option<usize>,
185 catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
186 pxar_create_options: pbs_client::pxar::PxarCreateOptions,
187 upload_options: UploadOptions,
188 ) -> Result<BackupStats, Error> {
189
190 let pxar_stream = PxarBackupStream::open(
191 dir_path.as_ref(),
192 catalog,
193 pxar_create_options,
194 )?;
195 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
196
197 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
198
199 let stream = ReceiverStream::new(rx)
200 .map_err(Error::from);
201
202 // spawn chunker inside a separate task so that it can run parallel
203 tokio::spawn(async move {
204 while let Some(v) = chunk_stream.next().await {
205 let _ = tx.send(v).await;
206 }
207 });
208
209 if upload_options.fixed_size.is_some() {
210 bail!("cannot backup directory with fixed chunk size!");
211 }
212
213 let stats = client
214 .upload_stream(archive_name, stream, upload_options)
215 .await?;
216
217 Ok(stats)
218 }
219
220 async fn backup_image<P: AsRef<Path>>(
221 client: &BackupWriter,
222 image_path: P,
223 archive_name: &str,
224 chunk_size: Option<usize>,
225 upload_options: UploadOptions,
226 ) -> Result<BackupStats, Error> {
227
228 let path = image_path.as_ref().to_owned();
229
230 let file = tokio::fs::File::open(path).await?;
231
232 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
233 .map_err(Error::from);
234
235 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
236
237 if upload_options.fixed_size.is_none() {
238 bail!("cannot backup image with dynamic chunk size!");
239 }
240
241 let stats = client
242 .upload_stream(archive_name, stream, upload_options)
243 .await?;
244
245 Ok(stats)
246 }
247
248 #[api(
249 input: {
250 properties: {
251 repository: {
252 schema: REPO_URL_SCHEMA,
253 optional: true,
254 },
255 "output-format": {
256 schema: OUTPUT_FORMAT,
257 optional: true,
258 },
259 }
260 }
261 )]
262 /// List backup groups.
263 async fn list_backup_groups(param: Value) -> Result<Value, Error> {
264
265 let output_format = get_output_format(&param);
266
267 let repo = extract_repository_from_value(&param)?;
268
269 let client = connect(&repo)?;
270
271 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
272
273 let mut result = client.get(&path, None).await?;
274
275 record_repository(&repo);
276
277 let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
278 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
279 let group = BackupGroup::new(item.backup_type, item.backup_id);
280 Ok(group.group_path().to_str().unwrap().to_owned())
281 };
282
283 let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
284 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
285 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
286 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
287 };
288
289 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
290 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
291 Ok(pbs_tools::format::render_backup_file_list(&item.files))
292 };
293
294 let options = default_table_format_options()
295 .sortby("backup-type", false)
296 .sortby("backup-id", false)
297 .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
298 .column(
299 ColumnConfig::new("last-backup")
300 .renderer(render_last_backup)
301 .header("last snapshot")
302 .right_align(false)
303 )
304 .column(ColumnConfig::new("backup-count"))
305 .column(ColumnConfig::new("files").renderer(render_files));
306
307 let mut data: Value = result["data"].take();
308
309 let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE;
310
311 format_and_print_result_full(&mut data, return_type, &output_format, &options);
312
313 Ok(Value::Null)
314 }
315
316 #[api(
317 input: {
318 properties: {
319 repository: {
320 schema: REPO_URL_SCHEMA,
321 optional: true,
322 },
323 group: {
324 type: String,
325 description: "Backup group.",
326 },
327 "new-owner": {
328 type: Authid,
329 },
330 }
331 }
332 )]
333 /// Change owner of a backup group
334 async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
335
336 let repo = extract_repository_from_value(&param)?;
337
338 let mut client = connect(&repo)?;
339
340 param.as_object_mut().unwrap().remove("repository");
341
342 let group: BackupGroup = group.parse()?;
343
344 param["backup-type"] = group.backup_type().into();
345 param["backup-id"] = group.backup_id().into();
346
347 let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
348 client.post(&path, Some(param)).await?;
349
350 record_repository(&repo);
351
352 Ok(())
353 }
354
355 #[api(
356 input: {
357 properties: {
358 repository: {
359 schema: REPO_URL_SCHEMA,
360 optional: true,
361 },
362 }
363 }
364 )]
365 /// Try to login. If successful, store ticket.
366 async fn api_login(param: Value) -> Result<Value, Error> {
367
368 let repo = extract_repository_from_value(&param)?;
369
370 let client = connect(&repo)?;
371 client.login().await?;
372
373 record_repository(&repo);
374
375 Ok(Value::Null)
376 }
377
378 #[api(
379 input: {
380 properties: {
381 repository: {
382 schema: REPO_URL_SCHEMA,
383 optional: true,
384 },
385 }
386 }
387 )]
388 /// Logout (delete stored ticket).
389 fn api_logout(param: Value) -> Result<Value, Error> {
390
391 let repo = extract_repository_from_value(&param)?;
392
393 delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
394
395 Ok(Value::Null)
396 }
397
398 #[api(
399 input: {
400 properties: {
401 repository: {
402 schema: REPO_URL_SCHEMA,
403 optional: true,
404 },
405 "output-format": {
406 schema: OUTPUT_FORMAT,
407 optional: true,
408 },
409 }
410 }
411 )]
412 /// Show client and optional server version
413 async fn api_version(param: Value) -> Result<(), Error> {
414
415 let output_format = get_output_format(&param);
416
417 let mut version_info = json!({
418 "client": {
419 "version": pbs_buildcfg::PROXMOX_PKG_VERSION,
420 "release": pbs_buildcfg::PROXMOX_PKG_RELEASE,
421 "repoid": pbs_buildcfg::PROXMOX_PKG_REPOID,
422 }
423 });
424
425 let repo = extract_repository_from_value(&param);
426 if let Ok(repo) = repo {
427 let client = connect(&repo)?;
428
429 match client.get("api2/json/version", None).await {
430 Ok(mut result) => version_info["server"] = result["data"].take(),
431 Err(e) => eprintln!("could not connect to server - {}", e),
432 }
433 }
434 if output_format == "text" {
435 println!(
436 "client version: {}.{}",
437 pbs_buildcfg::PROXMOX_PKG_VERSION,
438 pbs_buildcfg::PROXMOX_PKG_RELEASE,
439 );
440 if let Some(server) = version_info["server"].as_object() {
441 let server_version = server["version"].as_str().unwrap();
442 let server_release = server["release"].as_str().unwrap();
443 println!("server version: {}.{}", server_version, server_release);
444 }
445 } else {
446 format_and_print_result(&version_info, &output_format);
447 }
448
449 Ok(())
450 }
451
452 #[api(
453 input: {
454 properties: {
455 repository: {
456 schema: REPO_URL_SCHEMA,
457 optional: true,
458 },
459 "output-format": {
460 schema: OUTPUT_FORMAT,
461 optional: true,
462 },
463 },
464 },
465 )]
466 /// Start garbage collection for a specific repository.
467 async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
468
469 let repo = extract_repository_from_value(&param)?;
470
471 let output_format = get_output_format(&param);
472
473 let mut client = connect(&repo)?;
474
475 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
476
477 let result = client.post(&path, None).await?;
478
479 record_repository(&repo);
480
481 view_task_result(&mut client, result, &output_format).await?;
482
483 Ok(Value::Null)
484 }
485
486 struct CatalogUploadResult {
487 catalog_writer: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
488 result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>,
489 }
490
491 fn spawn_catalog_upload(
492 client: Arc<BackupWriter>,
493 encrypt: bool,
494 ) -> Result<CatalogUploadResult, Error> {
495 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
496 let catalog_stream = pbs_tools::blocking::StdChannelStream(catalog_rx);
497 let catalog_chunk_size = 512*1024;
498 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
499
500 let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new(StdChannelWriter::new(catalog_tx)))?));
501
502 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
503
504 let upload_options = UploadOptions {
505 encrypt,
506 compress: true,
507 ..UploadOptions::default()
508 };
509
510 tokio::spawn(async move {
511 let catalog_upload_result = client
512 .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options)
513 .await;
514
515 if let Err(ref err) = catalog_upload_result {
516 eprintln!("catalog upload error - {}", err);
517 client.cancel();
518 }
519
520 let _ = catalog_result_tx.send(catalog_upload_result);
521 });
522
523 Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx })
524 }
525
526 #[api(
527 input: {
528 properties: {
529 backupspec: {
530 type: Array,
531 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
532 items: {
533 schema: BACKUP_SOURCE_SCHEMA,
534 }
535 },
536 repository: {
537 schema: REPO_URL_SCHEMA,
538 optional: true,
539 },
540 "include-dev": {
541 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
542 optional: true,
543 items: {
544 type: String,
545 description: "Path to file.",
546 }
547 },
548 "all-file-systems": {
549 type: Boolean,
550 description: "Include all mounted subdirectories.",
551 optional: true,
552 },
553 keyfile: {
554 schema: KEYFILE_SCHEMA,
555 optional: true,
556 },
557 "keyfd": {
558 schema: KEYFD_SCHEMA,
559 optional: true,
560 },
561 "master-pubkey-file": {
562 schema: MASTER_PUBKEY_FILE_SCHEMA,
563 optional: true,
564 },
565 "master-pubkey-fd": {
566 schema: MASTER_PUBKEY_FD_SCHEMA,
567 optional: true,
568 },
569 "crypt-mode": {
570 type: CryptMode,
571 optional: true,
572 },
573 "skip-lost-and-found": {
574 type: Boolean,
575 description: "Skip lost+found directory.",
576 optional: true,
577 },
578 "backup-type": {
579 schema: BACKUP_TYPE_SCHEMA,
580 optional: true,
581 },
582 "backup-id": {
583 schema: BACKUP_ID_SCHEMA,
584 optional: true,
585 },
586 "backup-time": {
587 schema: BACKUP_TIME_SCHEMA,
588 optional: true,
589 },
590 "chunk-size": {
591 schema: CHUNK_SIZE_SCHEMA,
592 optional: true,
593 },
594 "exclude": {
595 type: Array,
596 description: "List of paths or patterns for matching files to exclude.",
597 optional: true,
598 items: {
599 type: String,
600 description: "Path or match pattern.",
601 }
602 },
603 "entries-max": {
604 type: Integer,
605 description: "Max number of entries to hold in memory.",
606 optional: true,
607 default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize,
608 },
609 "verbose": {
610 type: Boolean,
611 description: "Verbose output.",
612 optional: true,
613 },
614 }
615 }
616 )]
617 /// Create (host) backup.
618 async fn create_backup(
619 param: Value,
620 _info: &ApiMethod,
621 _rpcenv: &mut dyn RpcEnvironment,
622 ) -> Result<Value, Error> {
623
624 let repo = extract_repository_from_value(&param)?;
625
626 let backupspec_list = json::required_array_param(&param, "backupspec")?;
627
628 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
629
630 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
631
632 let verbose = param["verbose"].as_bool().unwrap_or(false);
633
634 let backup_time_opt = param["backup-time"].as_i64();
635
636 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
637
638 if let Some(size) = chunk_size_opt {
639 verify_chunk_size(size)?;
640 }
641
642 let crypto = crypto_parameters(&param)?;
643
644 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
645
646 let backup_type = param["backup-type"].as_str().unwrap_or("host");
647
648 let include_dev = param["include-dev"].as_array();
649
650 let entries_max = param["entries-max"].as_u64()
651 .unwrap_or(pbs_client::pxar::ENCODER_MAX_ENTRIES as u64);
652
653 let empty = Vec::new();
654 let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
655
656 let mut pattern_list = Vec::with_capacity(exclude_args.len());
657 for entry in exclude_args {
658 let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
659 pattern_list.push(
660 MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
661 .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
662 );
663 }
664
665 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
666
667 if let Some(include_dev) = include_dev {
668 if all_file_systems {
669 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
670 }
671
672 let mut set = HashSet::new();
673 for path in include_dev {
674 let path = path.as_str().unwrap();
675 let stat = nix::sys::stat::stat(path)
676 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
677 set.insert(stat.st_dev);
678 }
679 devices = Some(set);
680 }
681
682 let mut upload_list = vec![];
683 let mut target_set = HashSet::new();
684
685 for backupspec in backupspec_list {
686 let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
687 let filename = &spec.config_string;
688 let target = &spec.archive_name;
689
690 if target_set.contains(target) {
691 bail!("got target twice: '{}'", target);
692 }
693 target_set.insert(target.to_string());
694
695 use std::os::unix::fs::FileTypeExt;
696
697 let metadata = std::fs::metadata(filename)
698 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
699 let file_type = metadata.file_type();
700
701 match spec.spec_type {
702 BackupSpecificationType::PXAR => {
703 if !file_type.is_dir() {
704 bail!("got unexpected file type (expected directory)");
705 }
706 upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
707 }
708 BackupSpecificationType::IMAGE => {
709 if !(file_type.is_file() || file_type.is_block_device()) {
710 bail!("got unexpected file type (expected file or block device)");
711 }
712
713 let size = image_size(&PathBuf::from(filename))?;
714
715 if size == 0 { bail!("got zero-sized file '{}'", filename); }
716
717 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
718 }
719 BackupSpecificationType::CONFIG => {
720 if !file_type.is_file() {
721 bail!("got unexpected file type (expected regular file)");
722 }
723 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
724 }
725 BackupSpecificationType::LOGFILE => {
726 if !file_type.is_file() {
727 bail!("got unexpected file type (expected regular file)");
728 }
729 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
730 }
731 }
732 }
733
734 let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
735
736 let client = connect(&repo)?;
737 record_repository(&repo);
738
739 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
740
741 println!("Client name: {}", proxmox::tools::nodename());
742
743 let start_time = std::time::Instant::now();
744
745 println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
746
747 let (crypt_config, rsa_encrypted_key) = match crypto.enc_key {
748 None => (None, None),
749 Some(key_with_source) => {
750 println!(
751 "{}",
752 format_key_source(&key_with_source.source, "encryption")
753 );
754
755 let (key, created, fingerprint) =
756 decrypt_key(&key_with_source.key, &get_encryption_key_password)?;
757 println!("Encryption key fingerprint: {}", fingerprint);
758
759 let crypt_config = CryptConfig::new(key)?;
760
761 match crypto.master_pubkey {
762 Some(pem_with_source) => {
763 println!("{}", format_key_source(&pem_with_source.source, "master"));
764
765 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_with_source.key)?;
766
767 let mut key_config = KeyConfig::without_password(key)?;
768 key_config.created = created; // keep original value
769
770 let enc_key = rsa_encrypt_key_config(rsa, &key_config)?;
771
772 (Some(Arc::new(crypt_config)), Some(enc_key))
773 },
774 _ => (Some(Arc::new(crypt_config)), None),
775 }
776 }
777 };
778
779 let client = BackupWriter::start(
780 client,
781 crypt_config.clone(),
782 repo.store(),
783 backup_type,
784 &backup_id,
785 backup_time,
786 verbose,
787 false
788 ).await?;
789
790 let download_previous_manifest = match client.previous_backup_time().await {
791 Ok(Some(backup_time)) => {
792 println!(
793 "Downloading previous manifest ({})",
794 strftime_local("%c", backup_time)?
795 );
796 true
797 }
798 Ok(None) => {
799 println!("No previous manifest available.");
800 false
801 }
802 Err(_) => {
803 // Fallback for outdated server, TODO remove/bubble up with 2.0
804 true
805 }
806 };
807
808 let previous_manifest = if download_previous_manifest {
809 match client.download_previous_manifest().await {
810 Ok(previous_manifest) => {
811 match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
812 Ok(()) => Some(Arc::new(previous_manifest)),
813 Err(err) => {
814 println!("Couldn't re-use previous manifest - {}", err);
815 None
816 }
817 }
818 }
819 Err(err) => {
820 println!("Couldn't download previous manifest - {}", err);
821 None
822 }
823 }
824 } else {
825 None
826 };
827
828 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
829 let mut manifest = BackupManifest::new(snapshot);
830
831 let mut catalog = None;
832 let mut catalog_result_rx = None;
833
834 for (backup_type, filename, target, size) in upload_list {
835 match backup_type {
836 BackupSpecificationType::CONFIG => {
837 let upload_options = UploadOptions {
838 compress: true,
839 encrypt: crypto.mode == CryptMode::Encrypt,
840 ..UploadOptions::default()
841 };
842
843 println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
844 let stats = client
845 .upload_blob_from_file(&filename, &target, upload_options)
846 .await?;
847 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
848 }
849 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
850 let upload_options = UploadOptions {
851 compress: true,
852 encrypt: crypto.mode == CryptMode::Encrypt,
853 ..UploadOptions::default()
854 };
855
856 println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
857 let stats = client
858 .upload_blob_from_file(&filename, &target, upload_options)
859 .await?;
860 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
861 }
862 BackupSpecificationType::PXAR => {
863 // start catalog upload on first use
864 if catalog.is_none() {
865 let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?;
866 catalog = Some(catalog_upload_res.catalog_writer);
867 catalog_result_rx = Some(catalog_upload_res.result);
868 }
869 let catalog = catalog.as_ref().unwrap();
870
871 println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
872 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
873
874 let pxar_options = pbs_client::pxar::PxarCreateOptions {
875 device_set: devices.clone(),
876 patterns: pattern_list.clone(),
877 entries_max: entries_max as usize,
878 skip_lost_and_found,
879 verbose,
880 };
881
882 let upload_options = UploadOptions {
883 previous_manifest: previous_manifest.clone(),
884 compress: true,
885 encrypt: crypto.mode == CryptMode::Encrypt,
886 ..UploadOptions::default()
887 };
888
889 let stats = backup_directory(
890 &client,
891 &filename,
892 &target,
893 chunk_size_opt,
894 catalog.clone(),
895 pxar_options,
896 upload_options,
897 ).await?;
898 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
899 catalog.lock().unwrap().end_directory()?;
900 }
901 BackupSpecificationType::IMAGE => {
902 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
903
904 let upload_options = UploadOptions {
905 previous_manifest: previous_manifest.clone(),
906 fixed_size: Some(size),
907 compress: true,
908 encrypt: crypto.mode == CryptMode::Encrypt,
909 };
910
911 let stats = backup_image(
912 &client,
913 &filename,
914 &target,
915 chunk_size_opt,
916 upload_options,
917 ).await?;
918 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
919 }
920 }
921 }
922
923 // finalize and upload catalog
924 if let Some(catalog) = catalog {
925 let mutex = Arc::try_unwrap(catalog)
926 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
927 let mut catalog = mutex.into_inner().unwrap();
928
929 catalog.finish()?;
930
931 drop(catalog); // close upload stream
932
933 if let Some(catalog_result_rx) = catalog_result_rx {
934 let stats = catalog_result_rx.await??;
935 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?;
936 }
937 }
938
939 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
940 let target = ENCRYPTED_KEY_BLOB_NAME;
941 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
942 let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() };
943 let stats = client
944 .upload_blob_from_data(rsa_encrypted_key, target, options)
945 .await?;
946 manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?;
947
948 }
949 // create manifest (index.json)
950 // manifests are never encrypted, but include a signature
951 let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
952 .map_err(|err| format_err!("unable to format manifest - {}", err))?;
953
954
955 if verbose { println!("Upload index.json to '{}'", repo) };
956 let options = UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() };
957 client
958 .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options)
959 .await?;
960
961 client.finish().await?;
962
963 let end_time = std::time::Instant::now();
964 let elapsed = end_time.duration_since(start_time);
965 println!("Duration: {:.2}s", elapsed.as_secs_f64());
966
967 println!("End Time: {}", strftime_local("%c", epoch_i64())?);
968
969 Ok(Value::Null)
970 }
971
972 async fn dump_image<W: Write>(
973 client: Arc<BackupReader>,
974 crypt_config: Option<Arc<CryptConfig>>,
975 crypt_mode: CryptMode,
976 index: FixedIndexReader,
977 mut writer: W,
978 verbose: bool,
979 ) -> Result<(), Error> {
980
981 let most_used = index.find_most_used_chunks(8);
982
983 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
984
985 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
986 // and thus slows down reading. Instead, directly use RemoteChunkReader
987 let mut per = 0;
988 let mut bytes = 0;
989 let start_time = std::time::Instant::now();
990
991 for pos in 0..index.index_count() {
992 let digest = index.index_digest(pos).unwrap();
993 let raw_data = chunk_reader.read_chunk(&digest).await?;
994 writer.write_all(&raw_data)?;
995 bytes += raw_data.len();
996 if verbose {
997 let next_per = ((pos+1)*100)/index.index_count();
998 if per != next_per {
999 eprintln!("progress {}% (read {} bytes, duration {} sec)",
1000 next_per, bytes, start_time.elapsed().as_secs());
1001 per = next_per;
1002 }
1003 }
1004 }
1005
1006 let end_time = std::time::Instant::now();
1007 let elapsed = end_time.duration_since(start_time);
1008 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1009 bytes,
1010 elapsed.as_secs_f64(),
1011 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1012 );
1013
1014
1015 Ok(())
1016 }
1017
1018 fn parse_archive_type(name: &str) -> (String, ArchiveType) {
1019 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
1020 (name.into(), archive_type(name).unwrap())
1021 } else if name.ends_with(".pxar") {
1022 (format!("{}.didx", name), ArchiveType::DynamicIndex)
1023 } else if name.ends_with(".img") {
1024 (format!("{}.fidx", name), ArchiveType::FixedIndex)
1025 } else {
1026 (format!("{}.blob", name), ArchiveType::Blob)
1027 }
1028 }
1029
1030 #[api(
1031 input: {
1032 properties: {
1033 repository: {
1034 schema: REPO_URL_SCHEMA,
1035 optional: true,
1036 },
1037 snapshot: {
1038 type: String,
1039 description: "Group/Snapshot path.",
1040 },
1041 "archive-name": {
1042 description: "Backup archive name.",
1043 type: String,
1044 },
1045 target: {
1046 type: String,
1047 description: r###"Target directory path. Use '-' to write to standard output.
1048
1049 We do not extract '.pxar' archives when writing to standard output.
1050
1051 "###
1052 },
1053 "allow-existing-dirs": {
1054 type: Boolean,
1055 description: "Do not fail if directories already exists.",
1056 optional: true,
1057 },
1058 keyfile: {
1059 schema: KEYFILE_SCHEMA,
1060 optional: true,
1061 },
1062 "keyfd": {
1063 schema: KEYFD_SCHEMA,
1064 optional: true,
1065 },
1066 "crypt-mode": {
1067 type: CryptMode,
1068 optional: true,
1069 },
1070 }
1071 }
1072 )]
1073 /// Restore backup repository.
1074 async fn restore(param: Value) -> Result<Value, Error> {
1075 let repo = extract_repository_from_value(&param)?;
1076
1077 let verbose = param["verbose"].as_bool().unwrap_or(false);
1078
1079 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
1080
1081 let archive_name = json::required_string_param(&param, "archive-name")?;
1082
1083 let client = connect(&repo)?;
1084
1085 record_repository(&repo);
1086
1087 let path = json::required_string_param(&param, "snapshot")?;
1088
1089 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1090 let group: BackupGroup = path.parse()?;
1091 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1092 } else {
1093 let snapshot: BackupDir = path.parse()?;
1094 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1095 };
1096
1097 let target = json::required_string_param(&param, "target")?;
1098 let target = if target == "-" { None } else { Some(target) };
1099
1100 let crypto = crypto_parameters(&param)?;
1101
1102 let crypt_config = match crypto.enc_key {
1103 None => None,
1104 Some(ref key) => {
1105 let (key, _, _) =
1106 decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
1107 eprintln!("{}", format_key_source(&key.source, "encryption"));
1108 err
1109 })?;
1110 Some(Arc::new(CryptConfig::new(key)?))
1111 }
1112 };
1113
1114 let client = BackupReader::start(
1115 client,
1116 crypt_config.clone(),
1117 repo.store(),
1118 &backup_type,
1119 &backup_id,
1120 backup_time,
1121 true,
1122 ).await?;
1123
1124 let (archive_name, archive_type) = parse_archive_type(archive_name);
1125
1126 let (manifest, backup_index_data) = client.download_manifest().await?;
1127
1128 if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() {
1129 eprintln!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
1130 } else {
1131 if manifest.signature.is_some() {
1132 if let Some(key) = &crypto.enc_key {
1133 eprintln!("{}", format_key_source(&key.source, "encryption"));
1134 }
1135 if let Some(config) = &crypt_config {
1136 eprintln!("Fingerprint: {}", Fingerprint::new(config.fingerprint()));
1137 }
1138 }
1139 manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
1140 }
1141
1142 if archive_name == MANIFEST_BLOB_NAME {
1143 if let Some(target) = target {
1144 replace_file(target, &backup_index_data, CreateOptions::new())?;
1145 } else {
1146 let stdout = std::io::stdout();
1147 let mut writer = stdout.lock();
1148 writer.write_all(&backup_index_data)
1149 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1150 }
1151
1152 return Ok(Value::Null);
1153 }
1154
1155 let file_info = manifest.lookup_file_info(&archive_name)?;
1156
1157 if archive_type == ArchiveType::Blob {
1158
1159 let mut reader = client.download_blob(&manifest, &archive_name).await?;
1160
1161 if let Some(target) = target {
1162 let mut writer = std::fs::OpenOptions::new()
1163 .write(true)
1164 .create(true)
1165 .create_new(true)
1166 .open(target)
1167 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1168 std::io::copy(&mut reader, &mut writer)?;
1169 } else {
1170 let stdout = std::io::stdout();
1171 let mut writer = stdout.lock();
1172 std::io::copy(&mut reader, &mut writer)
1173 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1174 }
1175
1176 } else if archive_type == ArchiveType::DynamicIndex {
1177
1178 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
1179
1180 let most_used = index.find_most_used_chunks(8);
1181
1182 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
1183
1184 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1185
1186 let options = pbs_client::pxar::PxarExtractOptions {
1187 match_list: &[],
1188 extract_match_default: true,
1189 allow_existing_dirs,
1190 on_error: None,
1191 };
1192
1193 if let Some(target) = target {
1194 pbs_client::pxar::extract_archive(
1195 pxar::decoder::Decoder::from_std(reader)?,
1196 Path::new(target),
1197 pbs_client::pxar::Flags::DEFAULT,
1198 |path| {
1199 if verbose {
1200 println!("{:?}", path);
1201 }
1202 },
1203 options,
1204 )
1205 .map_err(|err| format_err!("error extracting archive - {}", err))?;
1206 } else {
1207 let mut writer = std::fs::OpenOptions::new()
1208 .write(true)
1209 .open("/dev/stdout")
1210 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
1211
1212 std::io::copy(&mut reader, &mut writer)
1213 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1214 }
1215 } else if archive_type == ArchiveType::FixedIndex {
1216
1217 let index = client.download_fixed_index(&manifest, &archive_name).await?;
1218
1219 let mut writer = if let Some(target) = target {
1220 std::fs::OpenOptions::new()
1221 .write(true)
1222 .create(true)
1223 .create_new(true)
1224 .open(target)
1225 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
1226 } else {
1227 std::fs::OpenOptions::new()
1228 .write(true)
1229 .open("/dev/stdout")
1230 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1231 };
1232
1233 dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
1234 }
1235
1236 Ok(Value::Null)
1237 }
1238
1239 #[api(
1240 input: {
1241 properties: {
1242 "dry-run": {
1243 type: bool,
1244 optional: true,
1245 description: "Just show what prune would do, but do not delete anything.",
1246 },
1247 group: {
1248 type: String,
1249 description: "Backup group",
1250 },
1251 "prune-options": {
1252 type: PruneOptions,
1253 flatten: true,
1254 },
1255 "output-format": {
1256 schema: OUTPUT_FORMAT,
1257 optional: true,
1258 },
1259 quiet: {
1260 type: bool,
1261 optional: true,
1262 default: false,
1263 description: "Minimal output - only show removals.",
1264 },
1265 repository: {
1266 schema: REPO_URL_SCHEMA,
1267 optional: true,
1268 },
1269 },
1270 },
1271 )]
1272 /// Prune a backup repository.
1273 async fn prune(
1274 dry_run: Option<bool>,
1275 group: String,
1276 prune_options: PruneOptions,
1277 quiet: bool,
1278 mut param: Value
1279 ) -> Result<Value, Error> {
1280 let repo = extract_repository_from_value(&param)?;
1281
1282 let mut client = connect(&repo)?;
1283
1284 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
1285
1286 let group: BackupGroup = group.parse()?;
1287
1288 let output_format = extract_output_format(&mut param);
1289
1290 let mut api_param = serde_json::to_value(prune_options)?;
1291 if let Some(dry_run) = dry_run {
1292 api_param["dry-run"] = dry_run.into();
1293 }
1294 api_param["backup-type"] = group.backup_type().into();
1295 api_param["backup-id"] = group.backup_id().into();
1296
1297 let mut result = client.post(&path, Some(api_param)).await?;
1298
1299 record_repository(&repo);
1300
1301 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1302 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1303 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
1304 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1305 };
1306
1307 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1308 Ok(match v.as_bool() {
1309 Some(true) => "keep",
1310 Some(false) => "remove",
1311 None => "unknown",
1312 }.to_string())
1313 };
1314
1315 let options = default_table_format_options()
1316 .sortby("backup-type", false)
1317 .sortby("backup-id", false)
1318 .sortby("backup-time", false)
1319 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
1320 .column(ColumnConfig::new("backup-time").renderer(pbs_tools::format::render_epoch).header("date"))
1321 .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
1322 ;
1323
1324 let return_type = &pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE;
1325
1326 let mut data = result["data"].take();
1327
1328 if quiet {
1329 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1330 item["keep"].as_bool() == Some(false)
1331 }).cloned().collect();
1332 data = list.into();
1333 }
1334
1335 format_and_print_result_full(&mut data, return_type, &output_format, &options);
1336
1337 Ok(Value::Null)
1338 }
1339
1340 #[api(
1341 input: {
1342 properties: {
1343 repository: {
1344 schema: REPO_URL_SCHEMA,
1345 optional: true,
1346 },
1347 "output-format": {
1348 schema: OUTPUT_FORMAT,
1349 optional: true,
1350 },
1351 }
1352 },
1353 returns: {
1354 type: StorageStatus,
1355 },
1356 )]
1357 /// Get repository status.
1358 async fn status(param: Value) -> Result<Value, Error> {
1359
1360 let repo = extract_repository_from_value(&param)?;
1361
1362 let output_format = get_output_format(&param);
1363
1364 let client = connect(&repo)?;
1365
1366 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1367
1368 let mut result = client.get(&path, None).await?;
1369 let mut data = result["data"].take();
1370
1371 record_repository(&repo);
1372
1373 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1374 let v = v.as_u64().unwrap();
1375 let total = record["total"].as_u64().unwrap();
1376 let roundup = total/200;
1377 let per = ((v+roundup)*100)/total;
1378 let info = format!(" ({} %)", per);
1379 Ok(format!("{} {:>8}", v, info))
1380 };
1381
1382 let options = default_table_format_options()
1383 .noheader(true)
1384 .column(ColumnConfig::new("total").renderer(render_total_percentage))
1385 .column(ColumnConfig::new("used").renderer(render_total_percentage))
1386 .column(ColumnConfig::new("avail").renderer(render_total_percentage));
1387
1388 let return_type = &API_METHOD_STATUS.returns;
1389
1390 format_and_print_result_full(&mut data, return_type, &output_format, &options);
1391
1392 Ok(Value::Null)
1393 }
1394
1395 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1396 /// async use!
1397 ///
1398 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1399 /// so that we can properly access it from multiple threads simultaneously while not issuing
1400 /// duplicate simultaneous reads over http.
1401 pub struct BufferedDynamicReadAt {
1402 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
1403 }
1404
1405 impl BufferedDynamicReadAt {
1406 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
1407 Self {
1408 inner: Mutex::new(inner),
1409 }
1410 }
1411 }
1412
1413 impl ReadAt for BufferedDynamicReadAt {
1414 fn start_read_at<'a>(
1415 self: Pin<&'a Self>,
1416 _cx: &mut Context,
1417 buf: &'a mut [u8],
1418 offset: u64,
1419 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1420 MaybeReady::Ready(tokio::task::block_in_place(move || {
1421 let mut reader = self.inner.lock().unwrap();
1422 reader.seek(SeekFrom::Start(offset))?;
1423 Ok(reader.read(buf)?)
1424 }))
1425 }
1426
1427 fn poll_complete<'a>(
1428 self: Pin<&'a Self>,
1429 _op: ReadAtOperation<'a>,
1430 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1431 panic!("BufferedDynamicReadAt::start_read_at returned Pending");
1432 }
1433 }
1434
1435 fn main() {
1436
1437 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
1438 .arg_param(&["backupspec"])
1439 .completion_cb("repository", complete_repository)
1440 .completion_cb("backupspec", complete_backup_source)
1441 .completion_cb("keyfile", pbs_tools::fs::complete_file_name)
1442 .completion_cb("master-pubkey-file", pbs_tools::fs::complete_file_name)
1443 .completion_cb("chunk-size", complete_chunk_size);
1444
1445 let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
1446 .completion_cb("repository", complete_repository)
1447 .completion_cb("keyfile", pbs_tools::fs::complete_file_name);
1448
1449 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
1450 .completion_cb("repository", complete_repository);
1451
1452 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
1453 .completion_cb("repository", complete_repository);
1454
1455 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
1456 .arg_param(&["snapshot", "archive-name", "target"])
1457 .completion_cb("repository", complete_repository)
1458 .completion_cb("snapshot", complete_group_or_snapshot)
1459 .completion_cb("archive-name", complete_archive_name)
1460 .completion_cb("target", pbs_tools::fs::complete_file_name);
1461
1462 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
1463 .arg_param(&["group"])
1464 .completion_cb("group", complete_backup_group)
1465 .completion_cb("repository", complete_repository);
1466
1467 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
1468 .completion_cb("repository", complete_repository);
1469
1470 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
1471 .completion_cb("repository", complete_repository);
1472
1473 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
1474 .completion_cb("repository", complete_repository);
1475
1476 let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
1477 .completion_cb("repository", complete_repository);
1478
1479 let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
1480 .arg_param(&["group", "new-owner"])
1481 .completion_cb("group", complete_backup_group)
1482 .completion_cb("new-owner", complete_auth_id)
1483 .completion_cb("repository", complete_repository);
1484
1485 let cmd_def = CliCommandMap::new()
1486 .insert("backup", backup_cmd_def)
1487 .insert("garbage-collect", garbage_collect_cmd_def)
1488 .insert("list", list_cmd_def)
1489 .insert("login", login_cmd_def)
1490 .insert("logout", logout_cmd_def)
1491 .insert("prune", prune_cmd_def)
1492 .insert("restore", restore_cmd_def)
1493 .insert("snapshot", snapshot_mgtm_cli())
1494 .insert("status", status_cmd_def)
1495 .insert("key", key::cli())
1496 .insert("mount", mount_cmd_def())
1497 .insert("map", map_cmd_def())
1498 .insert("unmap", unmap_cmd_def())
1499 .insert("catalog", catalog_mgmt_cli())
1500 .insert("task", task_mgmt_cli())
1501 .insert("version", version_cmd_def)
1502 .insert("benchmark", benchmark_cmd_def)
1503 .insert("change-owner", change_owner_cmd_def)
1504
1505 .alias(&["files"], &["snapshot", "files"])
1506 .alias(&["forget"], &["snapshot", "forget"])
1507 .alias(&["upload-log"], &["snapshot", "upload-log"])
1508 .alias(&["snapshots"], &["snapshot", "list"])
1509 ;
1510
1511 let rpcenv = CliEnvironment::new();
1512 run_cli_command(cmd_def, rpcenv, Some(|future| {
1513 pbs_runtime::main(future)
1514 }));
1515 }