1 use std
::collections
::{HashSet, HashMap}
;
3 use std
::io
::{self, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::fs
::OpenOptionsExt
;
5 use std
::os
::unix
::io
::RawFd
;
6 use std
::path
::{Path, PathBuf}
;
8 use std
::sync
::{Arc, Mutex}
;
9 use std
::task
::Context
;
11 use anyhow
::{bail, format_err, Error}
;
12 use chrono
::{Local, DateTime, Utc, TimeZone}
;
13 use futures
::future
::FutureExt
;
15 use futures
::stream
::{StreamExt, TryStreamExt}
;
16 use nix
::unistd
::{fork, ForkResult, pipe}
;
17 use serde_json
::{json, Value}
;
18 use tokio
::signal
::unix
::{signal, SignalKind}
;
19 use tokio
::sync
::mpsc
;
20 use xdg
::BaseDirectories
;
22 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
23 use proxmox
::{sortable, identity}
;
24 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
;
25 use proxmox
::sys
::linux
::tty
;
26 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
27 use proxmox
::api
::schema
::*;
28 use proxmox
::api
::cli
::*;
29 use proxmox
::api
::api
;
30 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
32 use proxmox_backup
::tools
;
33 use proxmox_backup
::api2
::types
::*;
34 use proxmox_backup
::client
::*;
35 use proxmox_backup
::pxar
::catalog
::*;
36 use proxmox_backup
::backup
::{
38 encrypt_key_with_passphrase
,
47 BufferedDynamicReader
,
63 mod proxmox_backup_client
;
64 use proxmox_backup_client
::*;
66 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
67 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
70 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
71 .format(&BACKUP_REPO_URL
)
75 pub const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
76 "Path to encryption key. All data will be encrypted using this key.")
79 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
80 "Chunk size in KB. Must be a power of 2.")
86 fn get_default_repository() -> Option
<String
> {
87 std
::env
::var("PBS_REPOSITORY").ok()
90 pub fn extract_repository_from_value(
92 ) -> Result
<BackupRepository
, Error
> {
94 let repo_url
= param
["repository"]
97 .or_else(get_default_repository
)
98 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
100 let repo
: BackupRepository
= repo_url
.parse()?
;
105 fn extract_repository_from_map(
106 param
: &HashMap
<String
, String
>,
107 ) -> Option
<BackupRepository
> {
109 param
.get("repository")
111 .or_else(get_default_repository
)
112 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
115 fn record_repository(repo
: &BackupRepository
) {
117 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
122 // usually $HOME/.cache/proxmox-backup/repo-list
123 let path
= match base
.place_cache_file("repo-list") {
128 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
130 let repo
= repo
.to_string();
132 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
134 let mut map
= serde_json
::map
::Map
::new();
137 let mut max_used
= 0;
138 let mut max_repo
= None
;
139 for (repo
, count
) in data
.as_object().unwrap() {
140 if map
.contains_key(repo
) { continue; }
141 if let Some(count
) = count
.as_i64() {
142 if count
> max_used
{
144 max_repo
= Some(repo
);
148 if let Some(repo
) = max_repo
{
149 map
.insert(repo
.to_owned(), json
!(max_used
));
153 if map
.len() > 10 { // store max. 10 repos
158 let new_data
= json
!(map
);
160 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
163 fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
165 let mut result
= vec
![];
167 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
172 // usually $HOME/.cache/proxmox-backup/repo-list
173 let path
= match base
.place_cache_file("repo-list") {
178 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
180 if let Some(map
) = data
.as_object() {
181 for (repo
, _count
) in map
{
182 result
.push(repo
.to_owned());
189 fn connect(server
: &str, userid
: &str) -> Result
<HttpClient
, Error
> {
191 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
193 use std
::env
::VarError
::*;
194 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
196 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
197 Err(NotPresent
) => None
,
200 let options
= HttpClientOptions
::new()
201 .prefix(Some("proxmox-backup".to_string()))
204 .fingerprint(fingerprint
)
205 .fingerprint_cache(true)
208 HttpClient
::new(server
, userid
, options
)
211 async
fn view_task_result(
215 ) -> Result
<(), Error
> {
216 let data
= &result
["data"];
217 if output_format
== "text" {
218 if let Some(upid
) = data
.as_str() {
219 display_task_log(client
, upid
, true).await?
;
222 format_and_print_result(&data
, &output_format
);
228 async
fn api_datastore_list_snapshots(
231 group
: Option
<BackupGroup
>,
232 ) -> Result
<Value
, Error
> {
234 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
236 let mut args
= json
!({}
);
237 if let Some(group
) = group
{
238 args
["backup-type"] = group
.backup_type().into();
239 args
["backup-id"] = group
.backup_id().into();
242 let mut result
= client
.get(&path
, Some(args
)).await?
;
244 Ok(result
["data"].take())
247 async
fn api_datastore_latest_snapshot(
251 ) -> Result
<(String
, String
, DateTime
<Utc
>), Error
> {
253 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
254 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
257 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
260 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
262 let backup_time
= Utc
.timestamp(list
[0].backup_time
, 0);
264 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
267 async
fn backup_directory
<P
: AsRef
<Path
>>(
268 client
: &BackupWriter
,
269 previous_manifest
: Option
<Arc
<BackupManifest
>>,
272 chunk_size
: Option
<usize>,
273 device_set
: Option
<HashSet
<u64>>,
275 skip_lost_and_found
: bool
,
276 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
277 exclude_pattern
: Vec
<MatchEntry
>,
279 ) -> Result
<BackupStats
, Error
> {
281 let pxar_stream
= PxarBackupStream
::open(
290 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
292 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
295 .map_err(Error
::from
);
297 // spawn chunker inside a separate task so that it can run parallel
298 tokio
::spawn(async
move {
299 while let Some(v
) = chunk_stream
.next().await
{
300 let _
= tx
.send(v
).await
;
305 .upload_stream(previous_manifest
, archive_name
, stream
, "dynamic", None
)
311 async
fn backup_image
<P
: AsRef
<Path
>>(
312 client
: &BackupWriter
,
313 previous_manifest
: Option
<Arc
<BackupManifest
>>,
317 chunk_size
: Option
<usize>,
319 ) -> Result
<BackupStats
, Error
> {
321 let path
= image_path
.as_ref().to_owned();
323 let file
= tokio
::fs
::File
::open(path
).await?
;
325 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
326 .map_err(Error
::from
);
328 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
331 .upload_stream(previous_manifest
, archive_name
, stream
, "fixed", Some(image_size
))
341 schema
: REPO_URL_SCHEMA
,
345 schema
: OUTPUT_FORMAT
,
351 /// List backup groups.
352 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
354 let output_format
= get_output_format(¶m
);
356 let repo
= extract_repository_from_value(¶m
)?
;
358 let client
= connect(repo
.host(), repo
.user())?
;
360 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
362 let mut result
= client
.get(&path
, None
).await?
;
364 record_repository(&repo
);
366 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
367 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
368 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
369 Ok(group
.group_path().to_str().unwrap().to_owned())
372 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
373 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
374 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
);
375 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
378 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
379 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
380 Ok(tools
::format
::render_backup_file_list(&item
.files
))
383 let options
= default_table_format_options()
384 .sortby("backup-type", false)
385 .sortby("backup-id", false)
386 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
388 ColumnConfig
::new("last-backup")
389 .renderer(render_last_backup
)
390 .header("last snapshot")
393 .column(ColumnConfig
::new("backup-count"))
394 .column(ColumnConfig
::new("files").renderer(render_files
));
396 let mut data
: Value
= result
["data"].take();
398 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_GROUPS
;
400 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
409 schema
: REPO_URL_SCHEMA
,
414 description
: "Backup group.",
418 schema
: OUTPUT_FORMAT
,
424 /// List backup snapshots.
425 async
fn list_snapshots(param
: Value
) -> Result
<Value
, Error
> {
427 let repo
= extract_repository_from_value(¶m
)?
;
429 let output_format
= get_output_format(¶m
);
431 let client
= connect(repo
.host(), repo
.user())?
;
433 let group
: Option
<BackupGroup
> = if let Some(path
) = param
["group"].as_str() {
439 let mut data
= api_datastore_list_snapshots(&client
, repo
.store(), group
).await?
;
441 record_repository(&repo
);
443 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
444 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
445 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
);
446 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
449 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
450 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
451 let mut filenames
= Vec
::new();
452 for file
in &item
.files
{
453 filenames
.push(file
.filename
.to_string());
455 Ok(tools
::format
::render_backup_file_list(&filenames
[..]))
458 let options
= default_table_format_options()
459 .sortby("backup-type", false)
460 .sortby("backup-id", false)
461 .sortby("backup-time", false)
462 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
463 .column(ColumnConfig
::new("size"))
464 .column(ColumnConfig
::new("files").renderer(render_files
))
467 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOTS
;
469 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
478 schema
: REPO_URL_SCHEMA
,
483 description
: "Snapshot path.",
488 /// Forget (remove) backup snapshots.
489 async
fn forget_snapshots(param
: Value
) -> Result
<Value
, Error
> {
491 let repo
= extract_repository_from_value(¶m
)?
;
493 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
494 let snapshot
: BackupDir
= path
.parse()?
;
496 let mut client
= connect(repo
.host(), repo
.user())?
;
498 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
500 let result
= client
.delete(&path
, Some(json
!({
501 "backup-type": snapshot
.group().backup_type(),
502 "backup-id": snapshot
.group().backup_id(),
503 "backup-time": snapshot
.backup_time().timestamp(),
506 record_repository(&repo
);
515 schema
: REPO_URL_SCHEMA
,
521 /// Try to login. If successful, store ticket.
522 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
524 let repo
= extract_repository_from_value(¶m
)?
;
526 let client
= connect(repo
.host(), repo
.user())?
;
527 client
.login().await?
;
529 record_repository(&repo
);
538 schema
: REPO_URL_SCHEMA
,
544 /// Logout (delete stored ticket).
545 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
547 let repo
= extract_repository_from_value(¶m
)?
;
549 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
558 schema
: REPO_URL_SCHEMA
,
563 description
: "Snapshot path.",
569 async
fn dump_catalog(param
: Value
) -> Result
<Value
, Error
> {
571 let repo
= extract_repository_from_value(¶m
)?
;
573 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
574 let snapshot
: BackupDir
= path
.parse()?
;
576 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
578 let crypt_config
= match keyfile
{
581 let (key
, _
) = load_and_decrypt_key(&path
, &get_encryption_key_password
)?
;
582 Some(Arc
::new(CryptConfig
::new(key
)?
))
586 let client
= connect(repo
.host(), repo
.user())?
;
588 let client
= BackupReader
::start(
590 crypt_config
.clone(),
592 &snapshot
.group().backup_type(),
593 &snapshot
.group().backup_id(),
594 snapshot
.backup_time(),
598 let manifest
= client
.download_manifest().await?
;
600 let index
= client
.download_dynamic_index(&manifest
, CATALOG_NAME
).await?
;
602 let most_used
= index
.find_most_used_chunks(8);
604 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, most_used
);
606 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
608 let mut catalogfile
= std
::fs
::OpenOptions
::new()
611 .custom_flags(libc
::O_TMPFILE
)
614 std
::io
::copy(&mut reader
, &mut catalogfile
)
615 .map_err(|err
| format_err
!("unable to download catalog - {}", err
))?
;
617 catalogfile
.seek(SeekFrom
::Start(0))?
;
619 let mut catalog_reader
= CatalogReader
::new(catalogfile
);
621 catalog_reader
.dump()?
;
623 record_repository(&repo
);
632 schema
: REPO_URL_SCHEMA
,
637 description
: "Snapshot path.",
640 schema
: OUTPUT_FORMAT
,
646 /// List snapshot files.
647 async
fn list_snapshot_files(param
: Value
) -> Result
<Value
, Error
> {
649 let repo
= extract_repository_from_value(¶m
)?
;
651 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
652 let snapshot
: BackupDir
= path
.parse()?
;
654 let output_format
= get_output_format(¶m
);
656 let client
= connect(repo
.host(), repo
.user())?
;
658 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
660 let mut result
= client
.get(&path
, Some(json
!({
661 "backup-type": snapshot
.group().backup_type(),
662 "backup-id": snapshot
.group().backup_id(),
663 "backup-time": snapshot
.backup_time().timestamp(),
666 record_repository(&repo
);
668 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES
;
670 let mut data
: Value
= result
["data"].take();
672 let options
= default_table_format_options();
674 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
683 schema
: REPO_URL_SCHEMA
,
687 schema
: OUTPUT_FORMAT
,
693 /// Start garbage collection for a specific repository.
694 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
696 let repo
= extract_repository_from_value(¶m
)?
;
698 let output_format
= get_output_format(¶m
);
700 let mut client
= connect(repo
.host(), repo
.user())?
;
702 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
704 let result
= client
.post(&path
, None
).await?
;
706 record_repository(&repo
);
708 view_task_result(client
, result
, &output_format
).await?
;
713 fn spawn_catalog_upload(
714 client
: Arc
<BackupWriter
>
717 Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
718 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
721 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
722 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
723 let catalog_chunk_size
= 512*1024;
724 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
726 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
728 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
730 tokio
::spawn(async
move {
731 let catalog_upload_result
= client
732 .upload_stream(None
, CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
)
735 if let Err(ref err
) = catalog_upload_result
{
736 eprintln
!("catalog upload error - {}", err
);
740 let _
= catalog_result_tx
.send(catalog_upload_result
);
743 Ok((catalog
, catalog_result_rx
))
751 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
753 schema
: BACKUP_SOURCE_SCHEMA
,
757 schema
: REPO_URL_SCHEMA
,
761 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
765 description
: "Path to file.",
769 schema
: KEYFILE_SCHEMA
,
772 "skip-lost-and-found": {
774 description
: "Skip lost+found directory.",
778 schema
: BACKUP_TYPE_SCHEMA
,
782 schema
: BACKUP_ID_SCHEMA
,
786 schema
: BACKUP_TIME_SCHEMA
,
790 schema
: CHUNK_SIZE_SCHEMA
,
795 description
: "List of paths or patterns for matching files to exclude.",
799 description
: "Path or match pattern.",
804 description
: "Max number of entries to hold in memory.",
806 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
810 description
: "Verbose output.",
816 /// Create (host) backup.
817 async
fn create_backup(
820 _rpcenv
: &mut dyn RpcEnvironment
,
821 ) -> Result
<Value
, Error
> {
823 let repo
= extract_repository_from_value(¶m
)?
;
825 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
827 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
829 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
831 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
833 let backup_time_opt
= param
["backup-time"].as_i64();
835 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
837 if let Some(size
) = chunk_size_opt
{
838 verify_chunk_size(size
)?
;
841 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
843 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
845 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
847 let include_dev
= param
["include-dev"].as_array();
849 let entries_max
= param
["entries-max"].as_u64()
850 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
852 let empty
= Vec
::new();
853 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
855 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
856 for entry
in exclude_args
{
857 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
859 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
860 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
864 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
866 if let Some(include_dev
) = include_dev
{
867 if all_file_systems
{
868 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
871 let mut set
= HashSet
::new();
872 for path
in include_dev
{
873 let path
= path
.as_str().unwrap();
874 let stat
= nix
::sys
::stat
::stat(path
)
875 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
876 set
.insert(stat
.st_dev
);
881 let mut upload_list
= vec
![];
883 for backupspec
in backupspec_list
{
884 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
885 let filename
= &spec
.config_string
;
886 let target
= &spec
.archive_name
;
888 use std
::os
::unix
::fs
::FileTypeExt
;
890 let metadata
= std
::fs
::metadata(filename
)
891 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
892 let file_type
= metadata
.file_type();
894 match spec
.spec_type
{
895 BackupSpecificationType
::PXAR
=> {
896 if !file_type
.is_dir() {
897 bail
!("got unexpected file type (expected directory)");
899 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
901 BackupSpecificationType
::IMAGE
=> {
902 if !(file_type
.is_file() || file_type
.is_block_device()) {
903 bail
!("got unexpected file type (expected file or block device)");
906 let size
= image_size(&PathBuf
::from(filename
))?
;
908 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
910 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
912 BackupSpecificationType::CONFIG => {
913 if !file_type.is_file() {
914 bail!("got unexpected file
type (expected regular file
)");
916 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
918 BackupSpecificationType::LOGFILE => {
919 if !file_type.is_file() {
920 bail!("got unexpected file
type (expected regular file
)");
922 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
927 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
929 let client = connect(repo.host(), repo.user())?;
930 record_repository(&repo);
932 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
934 println!("Client name
: {}
", proxmox::tools::nodename());
936 let start_time = Local::now();
938 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
940 let (crypt_config, rsa_encrypted_key) = match keyfile {
941 None => (None, None),
943 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
945 let crypt_config = CryptConfig::new(key)?;
947 let path = master_pubkey_path()?;
949 let pem_data = file_get_contents(&path)?;
950 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
951 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
952 (Some(Arc::new(crypt_config)), Some(enc_key))
954 (Some(Arc::new(crypt_config)), None)
959 let is_encrypted = Some(crypt_config.is_some());
961 let client = BackupWriter::start(
963 crypt_config.clone(),
971 let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
972 Some(Arc::new(previous_manifest))
977 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
978 let mut manifest = BackupManifest::new(snapshot);
980 let mut catalog = None;
981 let mut catalog_result_tx = None;
983 for (backup_type, filename, target, size) in upload_list {
985 BackupSpecificationType::CONFIG => {
986 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
988 .upload_blob_from_file(&filename, &target, true, Some(true))
990 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
992 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
993 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
995 .upload_blob_from_file(&filename, &target, true, Some(true))
997 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
999 BackupSpecificationType::PXAR => {
1000 // start catalog upload on first use
1001 if catalog.is_none() {
1002 let (cat, res) = spawn_catalog_upload(client.clone())?;
1003 catalog = Some(cat);
1004 catalog_result_tx = Some(res);
1006 let catalog = catalog.as_ref().unwrap();
1008 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
1009 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1010 let stats = backup_directory(
1012 previous_manifest.clone(),
1018 skip_lost_and_found,
1020 pattern_list.clone(),
1021 entries_max as usize,
1023 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
1024 catalog.lock().unwrap().end_directory()?;
1026 BackupSpecificationType::IMAGE => {
1027 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1028 let stats = backup_image(
1030 previous_manifest.clone(),
1037 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
1042 // finalize and upload catalog
1043 if let Some(catalog) = catalog {
1044 let mutex = Arc::try_unwrap(catalog)
1045 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1046 let mut catalog = mutex.into_inner().unwrap();
1050 drop(catalog); // close upload stream
1052 if let Some(catalog_result_rx) = catalog_result_tx {
1053 let stats = catalog_result_rx.await??;
1054 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
1058 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1059 let target = "rsa
-encrypted
.key
";
1060 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1062 .upload_blob_from_data(rsa_encrypted_key, target, false, None)
1064 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum, is_encrypted)?;
1066 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1068 let mut buffer2 = vec![0u8; rsa.size() as usize];
1069 let pem_data = file_get_contents("master
-private
.pem
")?;
1070 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1071 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1072 println!("TEST {} {:?}
", len, buffer2);
1076 // create manifest (index.json)
1077 let manifest = manifest.into_json();
1079 println!("Upload index
.json to '{:?}'
", repo);
1080 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1082 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
1085 client.finish().await?;
1087 let end_time = Local::now();
1088 let elapsed = end_time.signed_duration_since(start_time);
1089 println!("Duration
: {}
", elapsed);
1091 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1096 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1098 let mut result = vec![];
1100 let data: Vec<&str> = arg.splitn(2, ':').collect();
1102 if data.len() != 2 {
1103 result.push(String::from("root
.pxar
:/"));
1104 result.push(String::from("etc
.pxar
:/etc
"));
1108 let files = tools::complete_file_name(data[1], param);
1111 result.push(format!("{}
:{}
", data[0], file));
1117 async fn dump_image<W: Write>(
1118 client: Arc<BackupReader>,
1119 crypt_config: Option<Arc<CryptConfig>>,
1120 index: FixedIndexReader,
1123 ) -> Result<(), Error> {
1125 let most_used = index.find_most_used_chunks(8);
1127 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1129 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1130 // and thus slows down reading. Instead, directly use RemoteChunkReader
1133 let start_time = std::time::Instant::now();
1135 for pos in 0..index.index_count() {
1136 let digest = index.index_digest(pos).unwrap();
1137 let raw_data = chunk_reader.read_chunk(&digest).await?;
1138 writer.write_all(&raw_data)?;
1139 bytes += raw_data.len();
1141 let next_per = ((pos+1)*100)/index.index_count();
1142 if per != next_per {
1143 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
1144 next_per, bytes, start_time.elapsed().as_secs());
1150 let end_time = std::time::Instant::now();
1151 let elapsed = end_time.duration_since(start_time);
1152 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
1154 elapsed.as_secs_f64(),
1155 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1162 fn parse_archive_type(name: &str) -> (String, ArchiveType) {
1163 if name.ends_with(".didx
") || name.ends_with(".fidx
") || name.ends_with(".blob
") {
1164 (name.into(), archive_type(name).unwrap())
1165 } else if name.ends_with(".pxar
") {
1166 (format!("{}
.didx
", name), ArchiveType::DynamicIndex)
1167 } else if name.ends_with(".img
") {
1168 (format!("{}
.fidx
", name), ArchiveType::FixedIndex)
1170 (format!("{}
.blob
", name), ArchiveType::Blob)
1178 schema: REPO_URL_SCHEMA,
1183 description: "Group
/Snapshot path
.",
1186 description: "Backup archive name
.",
1191 description: r###"Target directory path
. Use '
-' to write to standard output
.
1193 We
do not extraxt '
.pxar' archives when writing to standard output
.
1197 "allow
-existing
-dirs
": {
1199 description: "Do not fail
if directories already exists
.",
1203 schema: KEYFILE_SCHEMA,
1209 /// Restore backup repository.
1210 async fn restore(param: Value) -> Result<Value, Error> {
1211 let repo = extract_repository_from_value(¶m)?;
1213 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1215 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
1217 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1219 let client = connect(repo.host(), repo.user())?;
1221 record_repository(&repo);
1223 let path = tools::required_string_param(¶m, "snapshot
")?;
1225 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1226 let group: BackupGroup = path.parse()?;
1227 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1229 let snapshot: BackupDir = path.parse()?;
1230 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1233 let target = tools::required_string_param(¶m, "target
")?;
1234 let target = if target == "-" { None } else { Some(target) };
1236 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1238 let crypt_config = match keyfile {
1241 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1242 Some(Arc::new(CryptConfig::new(key)?))
1246 let client = BackupReader::start(
1248 crypt_config.clone(),
1256 let manifest = client.download_manifest().await?;
1258 let (archive_name, archive_type) = parse_archive_type(archive_name);
1260 if archive_name == MANIFEST_BLOB_NAME {
1261 let backup_index_data = manifest.into_json().to_string();
1262 if let Some(target) = target {
1263 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
1265 let stdout = std::io::stdout();
1266 let mut writer = stdout.lock();
1267 writer.write_all(backup_index_data.as_bytes())
1268 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1271 } else if archive_type == ArchiveType::Blob {
1273 let mut reader = client.download_blob(&manifest, &archive_name).await?;
1275 if let Some(target) = target {
1276 let mut writer = std::fs::OpenOptions::new()
1281 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1282 std::io::copy(&mut reader, &mut writer)?;
1284 let stdout = std::io::stdout();
1285 let mut writer = stdout.lock();
1286 std::io::copy(&mut reader, &mut writer)
1287 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1290 } else if archive_type == ArchiveType::DynamicIndex {
1292 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
1294 let most_used = index.find_most_used_chunks(8);
1296 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1298 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1300 if let Some(target) = target {
1301 proxmox_backup::pxar::extract_archive(
1302 pxar::decoder::Decoder::from_std(reader)?,
1305 proxmox_backup::pxar::Flags::DEFAULT,
1306 allow_existing_dirs,
1309 println!("{:?}
", path);
1313 .map_err(|err| format_err!("error extracting archive
- {}
", err))?;
1315 let mut writer = std::fs::OpenOptions::new()
1317 .open("/dev
/stdout
")
1318 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1320 std::io::copy(&mut reader, &mut writer)
1321 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1323 } else if archive_type == ArchiveType::FixedIndex {
1325 let index = client.download_fixed_index(&manifest, &archive_name).await?;
1327 let mut writer = if let Some(target) = target {
1328 std::fs::OpenOptions::new()
1333 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1335 std::fs::OpenOptions::new()
1337 .open("/dev
/stdout
")
1338 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1341 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
1351 schema: REPO_URL_SCHEMA,
1356 description: "Group
/Snapshot path
.",
1360 description: "The path to the log file you want to upload
.",
1363 schema: KEYFILE_SCHEMA,
1369 /// Upload backup log file.
1370 async fn upload_log(param: Value) -> Result<Value, Error> {
1372 let logfile = tools::required_string_param(¶m, "logfile
")?;
1373 let repo = extract_repository_from_value(¶m)?;
1375 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1376 let snapshot: BackupDir = snapshot.parse()?;
1378 let mut client = connect(repo.host(), repo.user())?;
1380 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1382 let crypt_config = match keyfile {
1385 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1386 let crypt_config = CryptConfig::new(key)?;
1387 Some(Arc::new(crypt_config))
1391 let data = file_get_contents(logfile)?;
1393 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1395 let raw_data = blob.into_inner();
1397 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1400 "backup
-type": snapshot.group().backup_type(),
1401 "backup
-id
": snapshot.group().backup_id(),
1402 "backup
-time
": snapshot.backup_time().timestamp(),
1405 let body = hyper::Body::from(raw_data);
1407 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1410 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1411 &ApiHandler::Async(&prune),
1413 "Prune a backup repository
.",
1414 &proxmox_backup::add_common_prune_prameters!([
1415 ("dry
-run
", true, &BooleanSchema::new(
1416 "Just show what prune would
do, but
do not delete anything
.")
1418 ("group
", false, &StringSchema::new("Backup group
.").schema()),
1420 ("output
-format
", true, &OUTPUT_FORMAT),
1424 &BooleanSchema::new("Minimal output
- only show removals
.")
1427 ("repository
", true, &REPO_URL_SCHEMA),
1435 _rpcenv: &'a mut dyn RpcEnvironment,
1436 ) -> proxmox::api::ApiFuture<'a> {
1438 prune_async(param).await
1442 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1443 let repo = extract_repository_from_value(¶m)?;
1445 let mut client = connect(repo.host(), repo.user())?;
1447 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1449 let group = tools::required_string_param(¶m, "group
")?;
1450 let group: BackupGroup = group.parse()?;
1452 let output_format = get_output_format(¶m);
1454 let quiet = param["quiet
"].as_bool().unwrap_or(false);
1456 param.as_object_mut().unwrap().remove("repository
");
1457 param.as_object_mut().unwrap().remove("group
");
1458 param.as_object_mut().unwrap().remove("output
-format
");
1459 param.as_object_mut().unwrap().remove("quiet
");
1461 param["backup
-type"] = group.backup_type().into();
1462 param["backup
-id
"] = group.backup_id().into();
1464 let mut result = client.post(&path, Some(param)).await?;
1466 record_repository(&repo);
1468 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1469 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1470 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1471 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1474 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1475 Ok(match v.as_bool() {
1476 Some(true) => "keep
",
1477 Some(false) => "remove
",
1482 let options = default_table_format_options()
1483 .sortby("backup
-type", false)
1484 .sortby("backup
-id
", false)
1485 .sortby("backup
-time
", false)
1486 .column(ColumnConfig::new("backup
-id
").renderer(render_snapshot_path).header("snapshot
"))
1487 .column(ColumnConfig::new("backup
-time
").renderer(tools::format::render_epoch).header("date
"))
1488 .column(ColumnConfig::new("keep
").renderer(render_prune_action).header("action
"))
1491 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1493 let mut data = result["data
"].take();
1496 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1497 item["keep
"].as_bool() == Some(false)
1498 }).map(|v| v.clone()).collect();
1502 format_and_print_result_full(&mut data, info, &output_format, &options);
1511 schema: REPO_URL_SCHEMA,
1515 schema: OUTPUT_FORMAT,
1521 /// Get repository status.
1522 async fn status(param: Value) -> Result<Value, Error> {
1524 let repo = extract_repository_from_value(¶m)?;
1526 let output_format = get_output_format(¶m);
1528 let client = connect(repo.host(), repo.user())?;
1530 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1532 let mut result = client.get(&path, None).await?;
1533 let mut data = result["data
"].take();
1535 record_repository(&repo);
1537 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1538 let v = v.as_u64().unwrap();
1539 let total = record["total
"].as_u64().unwrap();
1540 let roundup = total/200;
1541 let per = ((v+roundup)*100)/total;
1542 let info = format!(" ({}
%)", per);
1543 Ok(format!("{} {:>8}
", v, info))
1546 let options = default_table_format_options()
1548 .column(ColumnConfig::new("total
").renderer(render_total_percentage))
1549 .column(ColumnConfig::new("used
").renderer(render_total_percentage))
1550 .column(ColumnConfig::new("avail
").renderer(render_total_percentage));
1552 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
1554 format_and_print_result_full(&mut data, schema, &output_format, &options);
1559 // like get, but simply ignore errors and return Null instead
1560 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1562 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
1563 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
1565 let options = HttpClientOptions::new()
1566 .prefix(Some("proxmox
-backup
".to_string()))
1569 .fingerprint(fingerprint)
1570 .fingerprint_cache(true)
1571 .ticket_cache(true);
1573 let client = match HttpClient::new(repo.host(), repo.user(), options) {
1575 _ => return Value::Null,
1578 let mut resp = match client.get(url, None).await {
1580 _ => return Value::Null,
1583 if let Some(map) = resp.as_object_mut() {
1584 if let Some(data) = map.remove("data
") {
1591 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1592 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1595 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1597 let mut result = vec![];
1599 let repo = match extract_repository_from_map(param) {
1604 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1606 let data = try_get(&repo, &path).await;
1608 if let Some(list) = data.as_array() {
1610 if let (Some(backup_id), Some(backup_type)) =
1611 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1613 result.push(format!("{}
/{}
", backup_type, backup_id));
1621 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1622 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1625 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1627 if arg.matches('/').count() < 2 {
1628 let groups = complete_backup_group_do(param).await;
1629 let mut result = vec![];
1630 for group in groups {
1631 result.push(group.to_string());
1632 result.push(format!("{}
/", group));
1637 complete_backup_snapshot_do(param).await
1640 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1641 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1644 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1646 let mut result = vec![];
1648 let repo = match extract_repository_from_map(param) {
1653 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1655 let data = try_get(&repo, &path).await;
1657 if let Some(list) = data.as_array() {
1659 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1660 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1662 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1663 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1671 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1672 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1675 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1677 let mut result = vec![];
1679 let repo = match extract_repository_from_map(param) {
1684 let snapshot: BackupDir = match param.get("snapshot
") {
1686 match path.parse() {
1694 let query = tools::json_object_to_query(json!({
1695 "backup
-type": snapshot.group().backup_type(),
1696 "backup
-id
": snapshot.group().backup_id(),
1697 "backup
-time
": snapshot.backup_time().timestamp(),
1700 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1702 let data = try_get(&repo, &path).await;
1704 if let Some(list) = data.as_array() {
1706 if let Some(filename) = item["filename
"].as_str() {
1707 result.push(filename.to_owned());
1715 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1716 complete_server_file_name(arg, param)
1718 .map(|v| tools::format::strip_server_file_expenstion(&v))
1722 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1723 complete_server_file_name(arg, param)
1726 let name = tools::format::strip_server_file_expenstion(&v);
1727 if name.ends_with(".pxar
") {
1736 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1738 let mut result = vec![];
1742 result.push(size.to_string());
1744 if size > 4096 { break; }
1750 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1752 // fixme: implement other input methods
1754 use std::env::VarError::*;
1755 match std::env::var("PBS_ENCRYPTION_PASSWORD
") {
1756 Ok(p) => return Ok(p.as_bytes().to_vec()),
1757 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters
"),
1758 Err(NotPresent) => {
1759 // Try another method
1763 // If we're on a TTY, query the user for a password
1764 if tty::stdin_isatty() {
1765 return Ok(tty::read_password("Encryption Key Password
: ")?);
1768 bail!("no password input mechanism available
");
1774 _rpcenv: &mut dyn RpcEnvironment,
1775 ) -> Result<Value, Error> {
1777 let path = tools::required_string_param(¶m, "path
")?;
1778 let path = PathBuf::from(path);
1780 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1782 let key = proxmox::sys::linux::random_data(32)?;
1784 if kdf == "scrypt
" {
1785 // always read passphrase from tty
1786 if !tty::stdin_isatty() {
1787 bail!("unable to read passphrase
- no tty
");
1790 let password = tty::read_and_verify_password("Encryption Key Password
: ")?;
1792 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1794 store_key_config(&path, false, key_config)?;
1797 } else if kdf == "none
" {
1798 let created = Local.timestamp(Local::now().timestamp(), 0);
1800 store_key_config(&path, false, KeyConfig {
1813 fn master_pubkey_path() -> Result<PathBuf, Error> {
1814 let base = BaseDirectories::with_prefix("proxmox
-backup
")?;
1816 // usually $HOME/.config/proxmox-backup/master-public.pem
1817 let path = base.place_config_file("master
-public
.pem
")?;
1822 fn key_import_master_pubkey(
1825 _rpcenv: &mut dyn RpcEnvironment,
1826 ) -> Result<Value, Error> {
1828 let path = tools::required_string_param(¶m, "path
")?;
1829 let path = PathBuf::from(path);
1831 let pem_data = file_get_contents(&path)?;
1833 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1834 bail!("Unable to decode PEM data
- {}
", err);
1837 let target_path = master_pubkey_path()?;
1839 replace_file(&target_path, &pem_data, CreateOptions::new())?;
1841 println!("Imported public master key to {:?}
", target_path);
1846 fn key_create_master_key(
1849 _rpcenv: &mut dyn RpcEnvironment,
1850 ) -> Result<Value, Error> {
1852 // we need a TTY to query the new password
1853 if !tty::stdin_isatty() {
1854 bail!("unable to create master key
- no tty
");
1857 let rsa = openssl::rsa::Rsa::generate(4096)?;
1858 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1861 let password = String::from_utf8(tty::read_and_verify_password("Master Key Password
: ")?)?;
1863 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1864 let filename_pub = "master
-public
.pem
";
1865 println!("Writing public master key to {}
", filename_pub);
1866 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
1868 let cipher = openssl::symm::Cipher::aes_256_cbc();
1869 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
1871 let filename_priv = "master
-private
.pem
";
1872 println!("Writing private master key to {}
", filename_priv);
1873 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
1878 fn key_change_passphrase(
1881 _rpcenv: &mut dyn RpcEnvironment,
1882 ) -> Result<Value, Error> {
1884 let path = tools::required_string_param(¶m, "path
")?;
1885 let path = PathBuf::from(path);
1887 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1889 // we need a TTY to query the new password
1890 if !tty::stdin_isatty() {
1891 bail!("unable to change passphrase
- no tty
");
1894 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1896 if kdf == "scrypt
" {
1898 let password = tty::read_and_verify_password("New Password
: ")?;
1900 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
1901 new_key_config.created = created; // keep original value
1903 store_key_config(&path, true, new_key_config)?;
1906 } else if kdf == "none
" {
1907 let modified = Local.timestamp(Local::now().timestamp(), 0);
1909 store_key_config(&path, true, KeyConfig {
1911 created, // keep original value
1922 fn key_mgmt_cli() -> CliCommandMap {
1924 const KDF_SCHEMA: Schema =
1925 StringSchema::new("Key derivation function
. Choose 'none' to store the key unecrypted
.")
1926 .format(&ApiStringFormat::Enum(&[
1927 EnumEntry::new("scrypt
", "SCrypt
"),
1928 EnumEntry::new("none
", "Do not encrypt the key
")]))
1933 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1934 &ApiHandler::Sync(&key_create),
1936 "Create a new encryption key
.",
1938 ("path
", false, &StringSchema::new("File system path
.").schema()),
1939 ("kdf
", true, &KDF_SCHEMA),
1944 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1945 .arg_param(&["path
"])
1946 .completion_cb("path
", tools::complete_file_name);
1949 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1950 &ApiHandler::Sync(&key_change_passphrase),
1952 "Change the passphrase required to decrypt the key
.",
1954 ("path
", false, &StringSchema::new("File system path
.").schema()),
1955 ("kdf
", true, &KDF_SCHEMA),
1960 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1961 .arg_param(&["path
"])
1962 .completion_cb("path
", tools::complete_file_name);
1964 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1965 &ApiHandler::Sync(&key_create_master_key),
1966 &ObjectSchema::new("Create a new
4096 bit RSA master
pub/priv key pair
.", &[])
1969 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1972 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1973 &ApiHandler::Sync(&key_import_master_pubkey),
1975 "Import a new RSA public key and
use it
as master key
. The key is expected to be
in '
.pem' format
.",
1976 &sorted!([ ("path
", false, &StringSchema::new("File system path
.").schema()) ]),
1980 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1981 .arg_param(&["path
"])
1982 .completion_cb("path
", tools::complete_file_name);
1984 CliCommandMap::new()
1985 .insert("create
", key_create_cmd_def)
1986 .insert("create
-master
-key
", key_create_master_key_cmd_def)
1987 .insert("import
-master
-pubkey
", key_import_master_pubkey_cmd_def)
1988 .insert("change
-passphrase
", key_change_passphrase_cmd_def)
1994 _rpcenv: &mut dyn RpcEnvironment,
1995 ) -> Result<Value, Error> {
1996 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1998 // This will stay in foreground with debug output enabled as None is
1999 // passed for the RawFd.
2000 return proxmox_backup::tools::runtime::main(mount_do(param, None));
2003 // Process should be deamonized.
2004 // Make sure to fork before the async runtime is instantiated to avoid troubles.
2007 Ok(ForkResult::Parent { .. }) => {
2008 nix::unistd::close(pipe.1).unwrap();
2009 // Blocks the parent process until we are ready to go in the child
2010 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
2013 Ok(ForkResult::Child) => {
2014 nix::unistd::close(pipe.0).unwrap();
2015 nix::unistd::setsid().unwrap();
2016 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
2018 Err(_) => bail!("failed to daemonize process
"),
2022 use proxmox_backup::client::RemoteChunkReader;
2023 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
2026 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
2027 /// so that we can properly access it from multiple threads simultaneously while not issuing
2028 /// duplicate simultaneous reads over http.
2029 struct BufferedDynamicReadAt {
2030 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
2033 impl BufferedDynamicReadAt {
2034 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
2036 inner: Mutex::new(inner),
2041 impl ReadAt for BufferedDynamicReadAt {
2042 fn start_read_at<'a>(
2043 self: Pin<&'a Self>,
2047 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
2049 MaybeReady::Ready(tokio::task::block_in_place(move || {
2050 let mut reader = self.inner.lock().unwrap();
2051 reader.seek(SeekFrom::Start(offset))?;
2052 Ok(reader.read(buf)?)
2056 fn poll_complete<'a>(
2057 self: Pin<&'a Self>,
2058 _op: ReadAtOperation<'a>,
2059 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
2060 panic!("LocalDynamicReadAt
::start_read_at returned Pending
");
2064 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
2065 let repo = extract_repository_from_value(¶m)?;
2066 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
2067 let target = tools::required_string_param(¶m, "target
")?;
2068 let client = connect(repo.host(), repo.user())?;
2070 record_repository(&repo);
2072 let path = tools::required_string_param(¶m, "snapshot
")?;
2073 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2074 let group: BackupGroup = path.parse()?;
2075 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2077 let snapshot: BackupDir = path.parse()?;
2078 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2081 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
2082 let crypt_config = match keyfile {
2085 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2086 Some(Arc::new(CryptConfig::new(key)?))
2090 let server_archive_name = if archive_name.ends_with(".pxar
") {
2091 format!("{}
.didx
", archive_name)
2093 bail!("Can only mount pxar archives
.");
2096 let client = BackupReader::start(
2098 crypt_config.clone(),
2106 let manifest = client.download_manifest().await?;
2108 if server_archive_name.ends_with(".didx
") {
2109 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2110 let most_used = index.find_most_used_chunks(8);
2111 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2112 let reader = BufferedDynamicReader::new(index, chunk_reader);
2113 let archive_size = reader.archive_size();
2114 let reader: proxmox_backup::pxar::fuse::Reader =
2115 Arc::new(BufferedDynamicReadAt::new(reader));
2116 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
2117 let options = OsStr::new("ro
,default_permissions
");
2119 let session = proxmox_backup::pxar::fuse::Session::mount(
2125 .map_err(|err| format_err!("pxar mount failed
: {}
", err))?;
2127 if let Some(pipe) = pipe {
2128 nix::unistd::chdir(Path::new("/")).unwrap();
2129 // Finish creation of daemon by redirecting filedescriptors.
2130 let nullfd = nix::fcntl::open(
2132 nix::fcntl::OFlag::O_RDWR,
2133 nix::sys::stat::Mode::empty(),
2135 nix::unistd::dup2(nullfd, 0).unwrap();
2136 nix::unistd::dup2(nullfd, 1).unwrap();
2137 nix::unistd::dup2(nullfd, 2).unwrap();
2139 nix::unistd::close(nullfd).unwrap();
2141 // Signal the parent process that we are done with the setup and it can
2143 nix::unistd::write(pipe, &[0u8])?;
2144 nix::unistd::close(pipe).unwrap();
2147 let mut interrupt = signal(SignalKind::interrupt())?;
2149 res = session.fuse() => res?,
2150 _ = interrupt.recv().fuse() => {
2151 // exit on interrupted
2155 bail!("unknown archive file
extension (expected
.pxar
)");
2166 description: "Group
/Snapshot path
.",
2170 description: "Backup archive name
.",
2174 schema: REPO_URL_SCHEMA,
2179 description: "Path to encryption key
.",
2184 /// Shell to interactively inspect and restore snapshots.
2185 async fn catalog_shell(param: Value) -> Result<(), Error> {
2186 let repo = extract_repository_from_value(¶m)?;
2187 let client = connect(repo.host(), repo.user())?;
2188 let path = tools::required_string_param(¶m, "snapshot
")?;
2189 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
2191 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2192 let group: BackupGroup = path.parse()?;
2193 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2195 let snapshot: BackupDir = path.parse()?;
2196 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2199 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
2200 let crypt_config = match keyfile {
2203 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2204 Some(Arc::new(CryptConfig::new(key)?))
2208 let server_archive_name = if archive_name.ends_with(".pxar
") {
2209 format!("{}
.didx
", archive_name)
2211 bail!("Can only mount pxar archives
.");
2214 let client = BackupReader::start(
2216 crypt_config.clone(),
2224 let mut tmpfile = std::fs::OpenOptions::new()
2227 .custom_flags(libc::O_TMPFILE)
2230 let manifest = client.download_manifest().await?;
2232 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2233 let most_used = index.find_most_used_chunks(8);
2234 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2235 let reader = BufferedDynamicReader::new(index, chunk_reader);
2236 let archive_size = reader.archive_size();
2237 let reader: proxmox_backup::pxar::fuse::Reader =
2238 Arc::new(BufferedDynamicReadAt::new(reader));
2239 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
2241 client.download(CATALOG_NAME, &mut tmpfile).await?;
2242 let index = DynamicIndexReader::new(tmpfile)
2243 .map_err(|err| format_err!("unable to read catalog index
- {}
", err))?;
2245 // Note: do not use values stored in index (not trusted) - instead, computed them again
2246 let (csum, size) = index.compute_csum();
2247 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2249 let most_used = index.find_most_used_chunks(8);
2250 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2251 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2252 let mut catalogfile = std::fs::OpenOptions::new()
2255 .custom_flags(libc::O_TMPFILE)
2258 std::io::copy(&mut reader, &mut catalogfile)
2259 .map_err(|err| format_err!("unable to download catalog
- {}
", err))?;
2261 catalogfile.seek(SeekFrom::Start(0))?;
2262 let catalog_reader = CatalogReader::new(catalogfile);
2263 let state = Shell::new(
2265 &server_archive_name,
2269 println!("Starting interactive shell
");
2270 state.shell().await?;
2272 record_repository(&repo);
2277 fn catalog_mgmt_cli() -> CliCommandMap {
2278 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
2279 .arg_param(&["snapshot
", "archive
-name
"])
2280 .completion_cb("repository
", complete_repository)
2281 .completion_cb("archive
-name
", complete_pxar_archive_name)
2282 .completion_cb("snapshot
", complete_group_or_snapshot);
2284 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2285 .arg_param(&["snapshot
"])
2286 .completion_cb("repository
", complete_repository)
2287 .completion_cb("snapshot
", complete_backup_snapshot);
2289 CliCommandMap::new()
2290 .insert("dump
", catalog_dump_cmd_def)
2291 .insert("shell
", catalog_shell_cmd_def)
2298 schema: REPO_URL_SCHEMA,
2302 description: "The maximal number of tasks to list
.",
2310 schema: OUTPUT_FORMAT,
2315 description: "Also list stopped tasks
.",
2321 /// List running server tasks for this repo user
2322 async fn task_list(param: Value) -> Result<Value, Error> {
2324 let output_format = get_output_format(¶m);
2326 let repo = extract_repository_from_value(¶m)?;
2327 let client = connect(repo.host(), repo.user())?;
2329 let limit = param["limit
"].as_u64().unwrap_or(50) as usize;
2330 let running = !param["all
"].as_bool().unwrap_or(false);
2336 "userfilter
": repo.user(),
2337 "store
": repo.store(),
2340 let mut result = client.get("api2
/json
/nodes
/localhost
/tasks
", Some(args)).await?;
2341 let mut data = result["data
"].take();
2343 let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
2345 let options = default_table_format_options()
2346 .column(ColumnConfig::new("starttime
").right_align(false).renderer(tools::format::render_epoch))
2347 .column(ColumnConfig::new("endtime
").right_align(false).renderer(tools::format::render_epoch))
2348 .column(ColumnConfig::new("upid
"))
2349 .column(ColumnConfig::new("status
").renderer(tools::format::render_task_status));
2351 format_and_print_result_full(&mut data, schema, &output_format, &options);
2360 schema: REPO_URL_SCHEMA,
2364 schema: UPID_SCHEMA,
2369 /// Display the task log.
2370 async fn task_log(param: Value) -> Result<Value, Error> {
2372 let repo = extract_repository_from_value(¶m)?;
2373 let upid = tools::required_string_param(¶m, "upid
")?;
2375 let client = connect(repo.host(), repo.user())?;
2377 display_task_log(client, upid, true).await?;
2386 schema: REPO_URL_SCHEMA,
2390 schema: UPID_SCHEMA,
2395 /// Try to stop a specific task.
2396 async fn task_stop(param: Value) -> Result<Value, Error> {
2398 let repo = extract_repository_from_value(¶m)?;
2399 let upid_str = tools::required_string_param(¶m, "upid
")?;
2401 let mut client = connect(repo.host(), repo.user())?;
2403 let path = format!("api2
/json
/nodes
/localhost
/tasks
/{}
", upid_str);
2404 let _ = client.delete(&path, None).await?;
2409 fn task_mgmt_cli() -> CliCommandMap {
2411 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2412 .completion_cb("repository
", complete_repository);
2414 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2415 .arg_param(&["upid
"]);
2417 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2418 .arg_param(&["upid
"]);
2420 CliCommandMap::new()
2421 .insert("log
", task_log_cmd_def)
2422 .insert("list
", task_list_cmd_def)
2423 .insert("stop
", task_stop_cmd_def)
2428 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2429 .arg_param(&["backupspec
"])
2430 .completion_cb("repository
", complete_repository)
2431 .completion_cb("backupspec
", complete_backup_source)
2432 .completion_cb("keyfile
", tools::complete_file_name)
2433 .completion_cb("chunk
-size
", complete_chunk_size);
2435 let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
2436 .completion_cb("repository
", complete_repository)
2437 .completion_cb("keyfile
", tools::complete_file_name);
2439 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2440 .arg_param(&["snapshot
", "logfile
"])
2441 .completion_cb("snapshot
", complete_backup_snapshot)
2442 .completion_cb("logfile
", tools::complete_file_name)
2443 .completion_cb("keyfile
", tools::complete_file_name)
2444 .completion_cb("repository
", complete_repository);
2446 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2447 .completion_cb("repository
", complete_repository);
2449 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2450 .arg_param(&["group
"])
2451 .completion_cb("group
", complete_backup_group)
2452 .completion_cb("repository
", complete_repository);
2454 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2455 .arg_param(&["snapshot
"])
2456 .completion_cb("repository
", complete_repository)
2457 .completion_cb("snapshot
", complete_backup_snapshot);
2459 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2460 .completion_cb("repository
", complete_repository);
2462 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2463 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2464 .completion_cb("repository
", complete_repository)
2465 .completion_cb("snapshot
", complete_group_or_snapshot)
2466 .completion_cb("archive
-name
", complete_archive_name)
2467 .completion_cb("target
", tools::complete_file_name);
2469 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2470 .arg_param(&["snapshot
"])
2471 .completion_cb("repository
", complete_repository)
2472 .completion_cb("snapshot
", complete_backup_snapshot);
2474 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2475 .arg_param(&["group
"])
2476 .completion_cb("group
", complete_backup_group)
2477 .completion_cb("repository
", complete_repository);
2479 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2480 .completion_cb("repository
", complete_repository);
2482 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2483 .completion_cb("repository
", complete_repository);
2485 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2486 .completion_cb("repository
", complete_repository);
2489 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2490 &ApiHandler::Sync(&mount),
2492 "Mount pxar archive
.",
2494 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2495 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2496 ("target
", false, &StringSchema::new("Target directory path
.").schema()),
2497 ("repository
", true, &REPO_URL_SCHEMA),
2498 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2499 ("verbose
", true, &BooleanSchema::new("Verbose output
.").default(false).schema()),
2504 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2505 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2506 .completion_cb("repository
", complete_repository)
2507 .completion_cb("snapshot
", complete_group_or_snapshot)
2508 .completion_cb("archive
-name
", complete_pxar_archive_name)
2509 .completion_cb("target
", tools::complete_file_name);
2512 let cmd_def = CliCommandMap::new()
2513 .insert("backup
", backup_cmd_def)
2514 .insert("upload
-log
", upload_log_cmd_def)
2515 .insert("forget
", forget_cmd_def)
2516 .insert("garbage
-collect
", garbage_collect_cmd_def)
2517 .insert("list
", list_cmd_def)
2518 .insert("login
", login_cmd_def)
2519 .insert("logout
", logout_cmd_def)
2520 .insert("prune
", prune_cmd_def)
2521 .insert("restore
", restore_cmd_def)
2522 .insert("snapshots
", snapshots_cmd_def)
2523 .insert("files
", files_cmd_def)
2524 .insert("status
", status_cmd_def)
2525 .insert("key
", key_mgmt_cli())
2526 .insert("mount
", mount_cmd_def)
2527 .insert("catalog
", catalog_mgmt_cli())
2528 .insert("task
", task_mgmt_cli())
2529 .insert("benchmark
", benchmark_cmd_def);
2531 let rpcenv = CliEnvironment::new();
2532 run_cli_command(cmd_def, rpcenv, Some(|future| {
2533 proxmox_backup::tools::runtime::main(future)