1 use std
::collections
::HashSet
;
2 use std
::io
::{self, Read, Seek, SeekFrom, Write}
;
3 use std
::path
::{Path, PathBuf}
;
5 use std
::sync
::{Arc, Mutex}
;
6 use std
::task
::Context
;
8 use anyhow
::{bail, format_err, Error}
;
9 use futures
::stream
::{StreamExt, TryStreamExt}
;
10 use serde
::Deserialize
;
11 use serde_json
::{json, Value}
;
12 use tokio
::sync
::mpsc
;
13 use tokio_stream
::wrappers
::ReceiverStream
;
14 use xdg
::BaseDirectories
;
16 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
17 use proxmox_async
::blocking
::TokioWriterAdapter
;
18 use proxmox_human_byte
::HumanByte
;
19 use proxmox_io
::StdChannelWriter
;
20 use proxmox_router
::{cli::*, ApiMethod, RpcEnvironment}
;
21 use proxmox_schema
::api
;
22 use proxmox_sys
::fs
::{file_get_json, image_size, replace_file, CreateOptions}
;
23 use proxmox_time
::{epoch_i64, strftime_local}
;
24 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
27 Authid
, BackupDir
, BackupGroup
, BackupNamespace
, BackupPart
, BackupType
, CryptMode
,
28 Fingerprint
, GroupListItem
, PruneJobOptions
, PruneListItem
, RateLimitConfig
, SnapshotListItem
,
29 StorageStatus
, BACKUP_ID_SCHEMA
, BACKUP_NAMESPACE_SCHEMA
, BACKUP_TIME_SCHEMA
,
30 BACKUP_TYPE_SCHEMA
, TRAFFIC_CONTROL_BURST_SCHEMA
, TRAFFIC_CONTROL_RATE_SCHEMA
,
32 use pbs_client
::catalog_shell
::Shell
;
33 use pbs_client
::pxar
::ErrorHandler
as PxarErrorHandler
;
34 use pbs_client
::tools
::{
35 complete_archive_name
, complete_auth_id
, complete_backup_group
, complete_backup_snapshot
,
36 complete_backup_source
, complete_chunk_size
, complete_group_or_snapshot
,
37 complete_img_archive_name
, complete_namespace
, complete_pxar_archive_name
, complete_repository
,
38 connect
, connect_rate_limited
, extract_repository_from_value
,
40 crypto_parameters
, format_key_source
, get_encryption_key_password
, KEYFD_SCHEMA
,
41 KEYFILE_SCHEMA
, MASTER_PUBKEY_FD_SCHEMA
, MASTER_PUBKEY_FILE_SCHEMA
,
43 CHUNK_SIZE_SCHEMA
, REPO_URL_SCHEMA
,
46 delete_ticket_info
, parse_backup_specification
, view_task_result
, BackupReader
,
47 BackupRepository
, BackupSpecificationType
, BackupStats
, BackupWriter
, ChunkStream
,
48 FixedChunkStream
, HttpClient
, PxarBackupStream
, RemoteChunkReader
, UploadOptions
,
51 use pbs_datastore
::catalog
::{BackupCatalogWriter, CatalogReader, CatalogWriter}
;
52 use pbs_datastore
::chunk_store
::verify_chunk_size
;
53 use pbs_datastore
::dynamic_index
::{BufferedDynamicReader, DynamicIndexReader}
;
54 use pbs_datastore
::fixed_index
::FixedIndexReader
;
55 use pbs_datastore
::index
::IndexFile
;
56 use pbs_datastore
::manifest
::{
57 archive_type
, ArchiveType
, BackupManifest
, ENCRYPTED_KEY_BLOB_NAME
, MANIFEST_BLOB_NAME
,
59 use pbs_datastore
::read_chunk
::AsyncReadChunk
;
60 use pbs_datastore
::CATALOG_NAME
;
61 use pbs_key_config
::{decrypt_key, rsa_encrypt_key_config, KeyConfig}
;
62 use pbs_tools
::crypt_config
::CryptConfig
;
78 fn record_repository(repo
: &BackupRepository
) {
79 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
84 // usually $HOME/.cache/proxmox-backup/repo-list
85 let path
= match base
.place_cache_file("repo-list") {
90 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
92 let repo
= repo
.to_string();
94 data
[&repo
] = json
! { data[&repo].as_i64().unwrap_or(0) + 1 }
;
96 let mut map
= serde_json
::map
::Map
::new();
100 let mut max_repo
= None
;
101 for (repo
, count
) in data
.as_object().unwrap() {
102 if map
.contains_key(repo
) {
105 if let Some(count
) = count
.as_i64() {
106 if count
> max_used
{
108 max_repo
= Some(repo
);
112 if let Some(repo
) = max_repo
{
113 map
.insert(repo
.to_owned(), json
!(max_used
));
118 // store max. 10 repos
123 let new_data
= json
!(map
);
125 let _
= replace_file(
127 new_data
.to_string().as_bytes(),
128 CreateOptions
::new(),
133 async
fn api_datastore_list_snapshots(
136 ns
: &BackupNamespace
,
137 group
: Option
<&BackupGroup
>,
138 ) -> Result
<Value
, Error
> {
139 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
141 let mut args
= match group
{
142 Some(group
) => serde_json
::to_value(group
)?
,
146 args
["ns"] = serde_json
::to_value(ns
)?
;
149 let mut result
= client
.get(&path
, Some(args
)).await?
;
151 Ok(result
["data"].take())
154 pub async
fn api_datastore_latest_snapshot(
157 ns
: &BackupNamespace
,
159 ) -> Result
<BackupDir
, Error
> {
160 let list
= api_datastore_list_snapshots(client
, store
, ns
, Some(&group
)).await?
;
161 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
164 bail
!("backup group {} does not contain any snapshots.", group
);
167 list
.sort_unstable_by(|a
, b
| b
.backup
.time
.cmp(&a
.backup
.time
));
169 Ok((group
, list
[0].backup
.time
).into())
172 pub async
fn dir_or_last_from_group(
174 repo
: &BackupRepository
,
175 ns
: &BackupNamespace
,
177 ) -> Result
<BackupDir
, Error
> {
178 match path
.parse
::<BackupPart
>()?
{
179 BackupPart
::Dir(dir
) => Ok(dir
),
180 BackupPart
::Group(group
) => {
181 api_datastore_latest_snapshot(client
, repo
.store(), ns
, group
).await
186 async
fn backup_directory
<P
: AsRef
<Path
>>(
187 client
: &BackupWriter
,
190 chunk_size
: Option
<usize>,
191 catalog
: Arc
<Mutex
<CatalogWriter
<TokioWriterAdapter
<StdChannelWriter
<Error
>>>>>,
192 pxar_create_options
: pbs_client
::pxar
::PxarCreateOptions
,
193 upload_options
: UploadOptions
,
194 ) -> Result
<BackupStats
, Error
> {
195 if upload_options
.fixed_size
.is_some() {
196 bail
!("cannot backup directory with fixed chunk size!");
199 let pxar_stream
= PxarBackupStream
::open(dir_path
.as_ref(), catalog
, pxar_create_options
)?
;
200 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
202 let (tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
204 let stream
= ReceiverStream
::new(rx
).map_err(Error
::from
);
206 // spawn chunker inside a separate task so that it can run parallel
207 tokio
::spawn(async
move {
208 while let Some(v
) = chunk_stream
.next().await
{
209 let _
= tx
.send(v
).await
;
214 .upload_stream(archive_name
, stream
, upload_options
)
220 async
fn backup_image
<P
: AsRef
<Path
>>(
221 client
: &BackupWriter
,
224 chunk_size
: Option
<usize>,
225 upload_options
: UploadOptions
,
226 ) -> Result
<BackupStats
, Error
> {
227 let path
= image_path
.as_ref().to_owned();
229 let file
= tokio
::fs
::File
::open(path
).await?
;
231 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
232 .map_err(Error
::from
);
234 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4 * 1024 * 1024));
236 if upload_options
.fixed_size
.is_none() {
237 bail
!("cannot backup image with dynamic chunk size!");
241 .upload_stream(archive_name
, stream
, upload_options
)
247 pub fn optional_ns_param(param
: &Value
) -> Result
<BackupNamespace
, Error
> {
248 Ok(match param
.get("ns") {
249 Some(Value
::String(ns
)) => ns
.parse()?
,
250 Some(_
) => bail
!("invalid namespace parameter"),
251 None
=> BackupNamespace
::root(),
259 schema
: REPO_URL_SCHEMA
,
263 type: BackupNamespace
,
267 schema
: OUTPUT_FORMAT
,
273 /// List backup groups.
274 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
275 let output_format
= get_output_format(¶m
);
277 let repo
= extract_repository_from_value(¶m
)?
;
279 let client
= connect(&repo
)?
;
281 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
283 let backup_ns
= optional_ns_param(¶m
)?
;
284 let mut result
= client
287 match backup_ns
.is_root() {
289 false => Some(json
!({ "ns": backup_ns }
)),
294 record_repository(&repo
);
296 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
297 let item
= GroupListItem
::deserialize(record
)?
;
298 Ok(item
.backup
.to_string())
301 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
302 let item
= GroupListItem
::deserialize(record
)?
;
303 let snapshot
= BackupDir
{
305 time
: item
.last_backup
,
307 Ok(snapshot
.to_string())
310 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
311 let item
= GroupListItem
::deserialize(record
)?
;
312 Ok(pbs_tools
::format
::render_backup_file_list(&item
.files
))
315 let options
= default_table_format_options()
316 .sortby("backup-type", false)
317 .sortby("backup-id", false)
319 ColumnConfig
::new("backup-id")
320 .renderer(render_group_path
)
324 ColumnConfig
::new("last-backup")
325 .renderer(render_last_backup
)
326 .header("last snapshot")
329 .column(ColumnConfig
::new("backup-count"))
330 .column(ColumnConfig
::new("files").renderer(render_files
));
332 let mut data
: Value
= result
["data"].take();
334 let return_type
= &pbs_api_types
::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE
;
336 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
341 fn merge_group_into(to
: &mut serde_json
::Map
<String
, Value
>, group
: BackupGroup
) {
342 match serde_json
::to_value(group
).unwrap() {
343 Value
::Object(group
) => to
.extend(group
),
352 schema
: REPO_URL_SCHEMA
,
357 description
: "Backup group.",
360 type: BackupNamespace
,
369 /// Change owner of a backup group
370 async
fn change_backup_owner(group
: String
, mut param
: Value
) -> Result
<(), Error
> {
371 let repo
= extract_repository_from_value(¶m
)?
;
372 let ns
= optional_ns_param(¶m
)?
;
374 let client
= connect(&repo
)?
;
376 param
.as_object_mut().unwrap().remove("repository");
378 let group
: BackupGroup
= group
.parse()?
;
380 merge_group_into(param
.as_object_mut().unwrap(), group
);
382 param
["ns"] = serde_json
::to_value(ns
)?
;
385 let path
= format
!("api2/json/admin/datastore/{}/change-owner", repo
.store());
386 client
.post(&path
, Some(param
)).await?
;
388 record_repository(&repo
);
397 schema
: REPO_URL_SCHEMA
,
403 /// Try to login. If successful, store ticket.
404 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
405 let repo
= extract_repository_from_value(¶m
)?
;
407 let client
= connect(&repo
)?
;
408 client
.login().await?
;
410 record_repository(&repo
);
419 schema
: REPO_URL_SCHEMA
,
425 /// Logout (delete stored ticket).
426 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
427 let repo
= extract_repository_from_value(¶m
)?
;
429 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
438 schema
: REPO_URL_SCHEMA
,
442 schema
: OUTPUT_FORMAT
,
448 /// Show client and optional server version
449 async
fn api_version(param
: Value
) -> Result
<(), Error
> {
450 let output_format
= get_output_format(¶m
);
452 let mut version_info
= json
!({
454 "version": pbs_buildcfg
::PROXMOX_PKG_VERSION
,
455 "release": pbs_buildcfg
::PROXMOX_PKG_RELEASE
,
456 "repoid": pbs_buildcfg
::PROXMOX_PKG_REPOID
,
460 let repo
= extract_repository_from_value(¶m
);
461 if let Ok(repo
) = repo
{
462 let client
= connect(&repo
)?
;
464 match client
.get("api2/json/version", None
).await
{
465 Ok(mut result
) => version_info
["server"] = result
["data"].take(),
466 Err(e
) => log
::error
!("could not connect to server - {}", e
),
469 if output_format
== "text" {
471 "client version: {}.{}",
472 pbs_buildcfg
::PROXMOX_PKG_VERSION
,
473 pbs_buildcfg
::PROXMOX_PKG_RELEASE
,
475 if let Some(server
) = version_info
["server"].as_object() {
476 let server_version
= server
["version"].as_str().unwrap();
477 let server_release
= server
["release"].as_str().unwrap();
478 println
!("server version: {}.{}", server_version
, server_release
);
481 format_and_print_result(&version_info
, &output_format
);
491 schema
: REPO_URL_SCHEMA
,
495 schema
: OUTPUT_FORMAT
,
501 /// Start garbage collection for a specific repository.
502 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
503 let repo
= extract_repository_from_value(¶m
)?
;
505 let output_format
= get_output_format(¶m
);
507 let client
= connect(&repo
)?
;
509 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
511 let result
= client
.post(&path
, None
).await?
;
513 record_repository(&repo
);
515 view_task_result(&client
, result
, &output_format
).await?
;
520 struct CatalogUploadResult
{
521 catalog_writer
: Arc
<Mutex
<CatalogWriter
<TokioWriterAdapter
<StdChannelWriter
<Error
>>>>>,
522 result
: tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>,
525 fn spawn_catalog_upload(
526 client
: Arc
<BackupWriter
>,
528 ) -> Result
<CatalogUploadResult
, Error
> {
529 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
530 let catalog_stream
= proxmox_async
::blocking
::StdChannelStream(catalog_rx
);
531 let catalog_chunk_size
= 512 * 1024;
532 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
534 let catalog_writer
= Arc
::new(Mutex
::new(CatalogWriter
::new(TokioWriterAdapter
::new(
535 StdChannelWriter
::new(catalog_tx
),
538 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
540 let upload_options
= UploadOptions
{
543 ..UploadOptions
::default()
546 tokio
::spawn(async
move {
547 let catalog_upload_result
= client
548 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, upload_options
)
551 if let Err(ref err
) = catalog_upload_result
{
552 log
::error
!("catalog upload error - {}", err
);
556 let _
= catalog_result_tx
.send(catalog_upload_result
);
559 Ok(CatalogUploadResult
{
561 result
: catalog_result_rx
,
570 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
572 schema
: BACKUP_SOURCE_SCHEMA
,
576 schema
: REPO_URL_SCHEMA
,
580 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
584 description
: "Path to file.",
587 "all-file-systems": {
589 description
: "Include all mounted subdirectories.",
594 schema
: KEYFILE_SCHEMA
,
598 schema
: KEYFD_SCHEMA
,
601 "master-pubkey-file": {
602 schema
: MASTER_PUBKEY_FILE_SCHEMA
,
605 "master-pubkey-fd": {
606 schema
: MASTER_PUBKEY_FD_SCHEMA
,
613 "skip-lost-and-found": {
615 description
: "Skip lost+found directory.",
620 schema
: BACKUP_NAMESPACE_SCHEMA
,
624 schema
: BACKUP_TYPE_SCHEMA
,
628 schema
: BACKUP_ID_SCHEMA
,
632 schema
: BACKUP_TIME_SCHEMA
,
636 schema
: CHUNK_SIZE_SCHEMA
,
640 schema
: TRAFFIC_CONTROL_RATE_SCHEMA
,
644 schema
: TRAFFIC_CONTROL_BURST_SCHEMA
,
649 description
: "List of paths or patterns for matching files to exclude.",
653 description
: "Path or match pattern.",
658 description
: "Max number of entries to hold in memory.",
660 default: pbs_client
::pxar
::ENCODER_MAX_ENTRIES
as isize,
664 description
: "Just show what backup would do, but do not upload anything.",
668 "skip-e2big-xattr": {
670 description
: "Ignore the E2BIG error when retrieving xattrs. This includes the file, but discards the metadata.",
677 /// Create (host) backup.
678 async
fn create_backup(
680 all_file_systems
: bool
,
681 skip_lost_and_found
: bool
,
683 skip_e2big_xattr
: bool
,
685 _rpcenv
: &mut dyn RpcEnvironment
,
686 ) -> Result
<Value
, Error
> {
687 let repo
= extract_repository_from_value(¶m
)?
;
689 let backupspec_list
= json
::required_array_param(¶m
, "backupspec")?
;
691 let backup_time_opt
= param
["backup-time"].as_i64();
693 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
* 1024) as usize);
695 if let Some(size
) = chunk_size_opt
{
696 verify_chunk_size(size
)?
;
699 let rate
= match param
["rate"].as_str() {
700 Some(s
) => Some(s
.parse
::<HumanByte
>()?
),
703 let burst
= match param
["burst"].as_str() {
704 Some(s
) => Some(s
.parse
::<HumanByte
>()?
),
708 let rate_limit
= RateLimitConfig
::with_same_inout(rate
, burst
);
710 let crypto
= crypto_parameters(¶m
)?
;
712 let backup_id
= param
["backup-id"]
714 .unwrap_or_else(|| proxmox_sys
::nodename());
716 let backup_ns
= optional_ns_param(¶m
)?
;
718 let backup_type
: BackupType
= param
["backup-type"].as_str().unwrap_or("host").parse()?
;
720 let include_dev
= param
["include-dev"].as_array();
722 let entries_max
= param
["entries-max"]
724 .unwrap_or(pbs_client
::pxar
::ENCODER_MAX_ENTRIES
as u64);
726 let empty
= Vec
::new();
727 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
729 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
730 for entry
in exclude_args
{
733 .ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
735 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
736 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
,
740 let mut devices
= if all_file_systems
{
746 if let Some(include_dev
) = include_dev
{
747 if all_file_systems
{
748 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
751 let mut set
= HashSet
::new();
752 for path
in include_dev
{
753 let path
= path
.as_str().unwrap();
754 let stat
= nix
::sys
::stat
::stat(path
)
755 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
756 set
.insert(stat
.st_dev
);
761 let mut upload_list
= vec
![];
762 let mut target_set
= HashSet
::new();
764 for backupspec
in backupspec_list
{
765 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
766 let filename
= &spec
.config_string
;
767 let target
= &spec
.archive_name
;
769 if target_set
.contains(target
) {
770 bail
!("got target twice: '{}'", target
);
772 target_set
.insert(target
.to_string());
774 use std
::os
::unix
::fs
::FileTypeExt
;
776 let metadata
= std
::fs
::metadata(filename
)
777 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
778 let file_type
= metadata
.file_type();
780 match spec
.spec_type
{
781 BackupSpecificationType
::PXAR
=> {
782 if !file_type
.is_dir() {
783 bail
!("got unexpected file type (expected directory)");
786 BackupSpecificationType
::PXAR
,
788 format
!("{}.didx", target
),
792 BackupSpecificationType
::IMAGE
=> {
793 if !(file_type
.is_file() || file_type
.is_block_device()) {
794 bail
!("got unexpected file type (expected file or block device)");
797 let size
= image_size(&PathBuf
::from(filename
))?
;
800 bail
!("got zero-sized file '{}'", filename
);
804 BackupSpecificationType
::IMAGE
,
806 format
!("{}.fidx", target
),
810 BackupSpecificationType
::CONFIG
=> {
811 if !file_type
.is_file() {
812 bail
!("got unexpected file type (expected regular file)");
815 BackupSpecificationType
::CONFIG
,
817 format
!("{}.blob", target
),
821 BackupSpecificationType
::LOGFILE
=> {
822 if !file_type
.is_file() {
823 bail
!("got unexpected file type (expected regular file)");
826 BackupSpecificationType
::LOGFILE
,
828 format
!("{}.blob", target
),
835 let backup_time
= backup_time_opt
.unwrap_or_else(epoch_i64
);
837 let http_client
= connect_rate_limited(&repo
, rate_limit
)?
;
838 record_repository(&repo
);
840 let snapshot
= BackupDir
::from((backup_type
, backup_id
.to_owned(), backup_time
));
841 if backup_ns
.is_root() {
842 log
::info
!("Starting backup: {snapshot}");
844 log
::info
!("Starting backup: [{backup_ns}]:{snapshot}");
847 log
::info
!("Client name: {}", proxmox_sys
::nodename());
849 let start_time
= std
::time
::Instant
::now();
852 "Starting backup protocol: {}",
853 strftime_local("%c", epoch_i64())?
856 let (crypt_config
, rsa_encrypted_key
) = match crypto
.enc_key
{
857 None
=> (None
, None
),
858 Some(key_with_source
) => {
861 format_key_source(&key_with_source
.source
, "encryption")
864 let (key
, created
, fingerprint
) =
865 decrypt_key(&key_with_source
.key
, &get_encryption_key_password
)?
;
866 log
::info
!("Encryption key fingerprint: {}", fingerprint
);
868 let crypt_config
= CryptConfig
::new(key
)?
;
870 match crypto
.master_pubkey
{
871 Some(pem_with_source
) => {
872 log
::info
!("{}", format_key_source(&pem_with_source
.source
, "master"));
874 let rsa
= openssl
::rsa
::Rsa
::public_key_from_pem(&pem_with_source
.key
)?
;
876 let mut key_config
= KeyConfig
::without_password(key
)?
;
877 key_config
.created
= created
; // keep original value
879 let enc_key
= rsa_encrypt_key_config(rsa
, &key_config
)?
;
881 (Some(Arc
::new(crypt_config
)), Some(enc_key
))
883 _
=> (Some(Arc
::new(crypt_config
)), None
),
888 let client
= BackupWriter
::start(
890 crypt_config
.clone(),
899 let download_previous_manifest
= match client
.previous_backup_time().await
{
900 Ok(Some(backup_time
)) => {
902 "Downloading previous manifest ({})",
903 strftime_local("%c", backup_time
)?
908 log
::info
!("No previous manifest available.");
912 // Fallback for outdated server, TODO remove/bubble up with 2.0
917 let previous_manifest
= if download_previous_manifest
{
918 match client
.download_previous_manifest().await
{
919 Ok(previous_manifest
) => {
920 match previous_manifest
.check_fingerprint(crypt_config
.as_ref().map(Arc
::as_ref
)) {
921 Ok(()) => Some(Arc
::new(previous_manifest
)),
923 log
::error
!("Couldn't re-use previous manifest - {}", err
);
929 log
::error
!("Couldn't download previous manifest - {}", err
);
937 let mut manifest
= BackupManifest
::new(snapshot
);
939 let mut catalog
= None
;
940 let mut catalog_result_rx
= None
;
942 let log_file
= |desc
: &str, file
: &str, target
: &str| {
943 let what
= if dry_run { "Would upload" }
else { "Upload" }
;
944 log
::info
!("{} {} '{}' to '{}' as {}", what
, desc
, file
, repo
, target
);
947 for (backup_type
, filename
, target
, size
) in upload_list
{
948 match (backup_type
, dry_run
) {
950 (BackupSpecificationType
::CONFIG
, true) => log_file("config file", &filename
, &target
),
951 (BackupSpecificationType
::LOGFILE
, true) => log_file("log file", &filename
, &target
),
952 (BackupSpecificationType
::PXAR
, true) => log_file("directory", &filename
, &target
),
953 (BackupSpecificationType
::IMAGE
, true) => log_file("image", &filename
, &target
),
955 (BackupSpecificationType
::CONFIG
, false) => {
956 let upload_options
= UploadOptions
{
958 encrypt
: crypto
.mode
== CryptMode
::Encrypt
,
959 ..UploadOptions
::default()
962 log_file("config file", &filename
, &target
);
964 .upload_blob_from_file(&filename
, &target
, upload_options
)
966 manifest
.add_file(target
, stats
.size
, stats
.csum
, crypto
.mode
)?
;
968 (BackupSpecificationType
::LOGFILE
, false) => {
969 // fixme: remove - not needed anymore ?
970 let upload_options
= UploadOptions
{
972 encrypt
: crypto
.mode
== CryptMode
::Encrypt
,
973 ..UploadOptions
::default()
976 log_file("log file", &filename
, &target
);
978 .upload_blob_from_file(&filename
, &target
, upload_options
)
980 manifest
.add_file(target
, stats
.size
, stats
.csum
, crypto
.mode
)?
;
982 (BackupSpecificationType
::PXAR
, false) => {
983 // start catalog upload on first use
984 if catalog
.is_none() {
985 let catalog_upload_res
=
986 spawn_catalog_upload(client
.clone(), crypto
.mode
== CryptMode
::Encrypt
)?
;
987 catalog
= Some(catalog_upload_res
.catalog_writer
);
988 catalog_result_rx
= Some(catalog_upload_res
.result
);
990 let catalog
= catalog
.as_ref().unwrap();
992 log_file("directory", &filename
, &target
);
996 .start_directory(std
::ffi
::CString
::new(target
.as_str())?
.as_c_str())?
;
998 let pxar_options
= pbs_client
::pxar
::PxarCreateOptions
{
999 device_set
: devices
.clone(),
1000 patterns
: pattern_list
.clone(),
1001 entries_max
: entries_max
as usize,
1002 skip_lost_and_found
,
1006 let upload_options
= UploadOptions
{
1007 previous_manifest
: previous_manifest
.clone(),
1009 encrypt
: crypto
.mode
== CryptMode
::Encrypt
,
1010 ..UploadOptions
::default()
1013 let stats
= backup_directory(
1023 manifest
.add_file(target
, stats
.size
, stats
.csum
, crypto
.mode
)?
;
1024 catalog
.lock().unwrap().end_directory()?
;
1026 (BackupSpecificationType
::IMAGE
, false) => {
1027 log_file("image", &filename
, &target
);
1029 let upload_options
= UploadOptions
{
1030 previous_manifest
: previous_manifest
.clone(),
1031 fixed_size
: Some(size
),
1033 encrypt
: crypto
.mode
== CryptMode
::Encrypt
,
1037 backup_image(&client
, &filename
, &target
, chunk_size_opt
, upload_options
)
1039 manifest
.add_file(target
, stats
.size
, stats
.csum
, crypto
.mode
)?
;
1045 log
::info
!("dry-run: no upload happened");
1046 return Ok(Value
::Null
);
1049 // finalize and upload catalog
1050 if let Some(catalog
) = catalog
{
1051 let mutex
= Arc
::try_unwrap(catalog
)
1052 .map_err(|_
| format_err
!("unable to get catalog (still used)"))?
;
1053 let mut catalog
= mutex
.into_inner().unwrap();
1057 drop(catalog
); // close upload stream
1059 if let Some(catalog_result_rx
) = catalog_result_rx
{
1060 let stats
= catalog_result_rx
.await??
;
1061 manifest
.add_file(CATALOG_NAME
.to_owned(), stats
.size
, stats
.csum
, crypto
.mode
)?
;
1065 if let Some(rsa_encrypted_key
) = rsa_encrypted_key
{
1066 let target
= ENCRYPTED_KEY_BLOB_NAME
;
1067 log
::info
!("Upload RSA encoded key to '{}' as {}", repo
, target
);
1068 let options
= UploadOptions
{
1071 ..UploadOptions
::default()
1074 .upload_blob_from_data(rsa_encrypted_key
, target
, options
)
1076 manifest
.add_file(target
.to_string(), stats
.size
, stats
.csum
, crypto
.mode
)?
;
1078 // create manifest (index.json)
1079 // manifests are never encrypted, but include a signature
1080 let manifest
= manifest
1081 .to_string(crypt_config
.as_ref().map(Arc
::as_ref
))
1082 .map_err(|err
| format_err
!("unable to format manifest - {}", err
))?
;
1084 log
::debug
!("Upload index.json to '{}'", repo
);
1086 let options
= UploadOptions
{
1089 ..UploadOptions
::default()
1092 .upload_blob_from_data(manifest
.into_bytes(), MANIFEST_BLOB_NAME
, options
)
1095 client
.finish().await?
;
1097 let end_time
= std
::time
::Instant
::now();
1098 let elapsed
= end_time
.duration_since(start_time
);
1099 log
::info
!("Duration: {:.2}s", elapsed
.as_secs_f64());
1100 log
::info
!("End Time: {}", strftime_local("%c", epoch_i64())?
);
1104 async
fn dump_image
<W
: Write
>(
1105 client
: Arc
<BackupReader
>,
1106 crypt_config
: Option
<Arc
<CryptConfig
>>,
1107 crypt_mode
: CryptMode
,
1108 index
: FixedIndexReader
,
1110 ) -> Result
<(), Error
> {
1111 let most_used
= index
.find_most_used_chunks(8);
1113 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, crypt_mode
, most_used
);
1115 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1116 // and thus slows down reading. Instead, directly use RemoteChunkReader
1119 let start_time
= std
::time
::Instant
::now();
1121 for pos
in 0..index
.index_count() {
1122 let digest
= index
.index_digest(pos
).unwrap();
1123 let raw_data
= chunk_reader
.read_chunk(digest
).await?
;
1124 writer
.write_all(&raw_data
)?
;
1125 bytes
+= raw_data
.len();
1126 let next_per
= ((pos
+ 1) * 100) / index
.index_count();
1127 if per
!= next_per
{
1129 "progress {}% (read {} bytes, duration {} sec)",
1132 start_time
.elapsed().as_secs()
1138 let end_time
= std
::time
::Instant
::now();
1139 let elapsed
= end_time
.duration_since(start_time
);
1141 "restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1143 elapsed
.as_secs_f64(),
1144 bytes
as f64 / (1024.0 * 1024.0 * elapsed
.as_secs_f64())
1150 fn parse_archive_type(name
: &str) -> (String
, ArchiveType
) {
1151 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
1152 (name
.into(), archive_type(name
).unwrap())
1153 } else if name
.ends_with(".pxar") {
1154 (format
!("{}.didx", name
), ArchiveType
::DynamicIndex
)
1155 } else if name
.ends_with(".img") {
1156 (format
!("{}.fidx", name
), ArchiveType
::FixedIndex
)
1158 (format
!("{}.blob", name
), ArchiveType
::Blob
)
1166 schema
: REPO_URL_SCHEMA
,
1170 type: BackupNamespace
,
1175 description
: "Group/Snapshot path.",
1178 description
: "Backup archive name.",
1183 description
: r
###"Target directory path. Use '-' to write to standard output.
1185 We do not extract '.pxar' archives when writing to standard output.
1190 schema
: TRAFFIC_CONTROL_RATE_SCHEMA
,
1194 schema
: TRAFFIC_CONTROL_BURST_SCHEMA
,
1197 "allow-existing-dirs": {
1199 description
: "Do not fail if directories already exists.",
1204 schema
: KEYFILE_SCHEMA
,
1208 schema
: KEYFD_SCHEMA
,
1217 description
: "ignore acl settings",
1223 description
: "ignore xattr settings",
1227 "ignore-ownership": {
1229 description
: "ignore owner settings (no chown)",
1233 "ignore-permissions": {
1235 description
: "ignore permission settings (no chmod)",
1241 description
: "overwrite already existing files",
1245 "overwrite-files": {
1246 description
: "overwrite already existing files",
1250 "overwrite-symlinks": {
1251 description
: "overwrite already existing entries by archives symlink",
1255 "overwrite-hardlinks": {
1256 description
: "overwrite already existing entries by archives hardlink",
1260 "ignore-extract-device-errors": {
1262 description
: "ignore errors that occur during device node extraction",
1269 /// Restore backup repository.
1272 allow_existing_dirs
: bool
,
1274 ignore_xattrs
: bool
,
1275 ignore_ownership
: bool
,
1276 ignore_permissions
: bool
,
1278 overwrite_files
: bool
,
1279 overwrite_symlinks
: bool
,
1280 overwrite_hardlinks
: bool
,
1281 ignore_extract_device_errors
: bool
,
1282 ) -> Result
<Value
, Error
> {
1283 let repo
= extract_repository_from_value(¶m
)?
;
1285 let archive_name
= json
::required_string_param(¶m
, "archive-name")?
;
1287 let rate
= match param
["rate"].as_str() {
1288 Some(s
) => Some(s
.parse
::<HumanByte
>()?
),
1291 let burst
= match param
["burst"].as_str() {
1292 Some(s
) => Some(s
.parse
::<HumanByte
>()?
),
1296 let rate_limit
= RateLimitConfig
::with_same_inout(rate
, burst
);
1298 let client
= connect_rate_limited(&repo
, rate_limit
)?
;
1299 record_repository(&repo
);
1301 let ns
= optional_ns_param(¶m
)?
;
1302 let path
= json
::required_string_param(¶m
, "snapshot")?
;
1304 let backup_dir
= dir_or_last_from_group(&client
, &repo
, &ns
, path
).await?
;
1306 let target
= json
::required_string_param(¶m
, "target")?
;
1307 let target
= if target
== "-" { None }
else { Some(target) }
;
1309 let crypto
= crypto_parameters(¶m
)?
;
1311 let crypt_config
= match crypto
.enc_key
{
1315 decrypt_key(&key
.key
, &get_encryption_key_password
).map_err(|err
| {
1316 log
::error
!("{}", format_key_source(&key
.source
, "encryption"));
1319 Some(Arc
::new(CryptConfig
::new(key
)?
))
1323 let client
= BackupReader
::start(
1325 crypt_config
.clone(),
1333 let (archive_name
, archive_type
) = parse_archive_type(archive_name
);
1335 let (manifest
, backup_index_data
) = client
.download_manifest().await?
;
1337 if archive_name
== ENCRYPTED_KEY_BLOB_NAME
&& crypt_config
.is_none() {
1338 log
::info
!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
1340 if manifest
.signature
.is_some() {
1341 if let Some(key
) = &crypto
.enc_key
{
1342 log
::info
!("{}", format_key_source(&key
.source
, "encryption"));
1344 if let Some(config
) = &crypt_config
{
1345 log
::info
!("Fingerprint: {}", Fingerprint
::new(config
.fingerprint()));
1348 manifest
.check_fingerprint(crypt_config
.as_ref().map(Arc
::as_ref
))?
;
1351 if archive_name
== MANIFEST_BLOB_NAME
{
1352 if let Some(target
) = target
{
1353 replace_file(target
, &backup_index_data
, CreateOptions
::new(), false)?
;
1355 let stdout
= std
::io
::stdout();
1356 let mut writer
= stdout
.lock();
1358 .write_all(&backup_index_data
)
1359 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1362 return Ok(Value
::Null
);
1365 let file_info
= manifest
.lookup_file_info(&archive_name
)?
;
1367 if archive_type
== ArchiveType
::Blob
{
1368 let mut reader
= client
.download_blob(&manifest
, &archive_name
).await?
;
1370 if let Some(target
) = target
{
1371 let mut writer
= std
::fs
::OpenOptions
::new()
1377 format_err
!("unable to create target file {:?} - {}", target
, err
)
1379 std
::io
::copy(&mut reader
, &mut writer
)?
;
1381 let stdout
= std
::io
::stdout();
1382 let mut writer
= stdout
.lock();
1383 std
::io
::copy(&mut reader
, &mut writer
)
1384 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1386 } else if archive_type
== ArchiveType
::DynamicIndex
{
1388 .download_dynamic_index(&manifest
, &archive_name
)
1391 let most_used
= index
.find_most_used_chunks(8);
1393 let chunk_reader
= RemoteChunkReader
::new(
1396 file_info
.chunk_crypt_mode(),
1400 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1402 let on_error
= if ignore_extract_device_errors
{
1403 let handler
: PxarErrorHandler
= Box
::new(move |err
: Error
| {
1404 use pbs_client
::pxar
::PxarExtractContext
;
1406 match err
.downcast_ref
::<PxarExtractContext
>() {
1407 Some(PxarExtractContext
::ExtractDevice
) => Ok(()),
1417 let mut overwrite_flags
= pbs_client
::pxar
::OverwriteFlags
::empty();
1418 overwrite_flags
.set(pbs_client
::pxar
::OverwriteFlags
::FILE
, overwrite_files
);
1419 overwrite_flags
.set(
1420 pbs_client
::pxar
::OverwriteFlags
::SYMLINK
,
1423 overwrite_flags
.set(
1424 pbs_client
::pxar
::OverwriteFlags
::HARDLINK
,
1425 overwrite_hardlinks
,
1428 overwrite_flags
.insert(pbs_client
::pxar
::OverwriteFlags
::all());
1431 let options
= pbs_client
::pxar
::PxarExtractOptions
{
1433 extract_match_default
: true,
1434 allow_existing_dirs
,
1439 let mut feature_flags
= pbs_client
::pxar
::Flags
::DEFAULT
;
1442 feature_flags
.remove(pbs_client
::pxar
::Flags
::WITH_ACL
);
1445 feature_flags
.remove(pbs_client
::pxar
::Flags
::WITH_XATTRS
);
1447 if ignore_ownership
{
1448 feature_flags
.remove(pbs_client
::pxar
::Flags
::WITH_OWNER
);
1450 if ignore_permissions
{
1451 feature_flags
.remove(pbs_client
::pxar
::Flags
::WITH_PERMISSIONS
);
1454 if let Some(target
) = target
{
1455 pbs_client
::pxar
::extract_archive(
1456 pxar
::decoder
::Decoder
::from_std(reader
)?
,
1460 log
::debug
!("{:?}", path
);
1464 .map_err(|err
| format_err
!("error extracting archive - {:#}", err
))?
;
1466 let mut writer
= std
::fs
::OpenOptions
::new()
1468 .open("/dev/stdout")
1469 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
;
1471 std
::io
::copy(&mut reader
, &mut writer
)
1472 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1474 } else if archive_type
== ArchiveType
::FixedIndex
{
1476 .download_fixed_index(&manifest
, &archive_name
)
1479 let mut writer
= if let Some(target
) = target
{
1480 std
::fs
::OpenOptions
::new()
1485 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
1487 std
::fs
::OpenOptions
::new()
1489 .open("/dev/stdout")
1490 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
1495 crypt_config
.clone(),
1496 file_info
.chunk_crypt_mode(),
1512 description
: "Just show what prune would do, but do not delete anything.",
1516 description
: "Backup group",
1519 type: PruneJobOptions
,
1523 schema
: OUTPUT_FORMAT
,
1530 description
: "Minimal output - only show removals.",
1533 schema
: REPO_URL_SCHEMA
,
1539 /// Prune a backup repository.
1541 dry_run
: Option
<bool
>,
1543 prune_options
: PruneJobOptions
,
1546 ) -> Result
<Value
, Error
> {
1547 let repo
= extract_repository_from_value(¶m
)?
;
1549 let client
= connect(&repo
)?
;
1551 let path
= format
!("api2/json/admin/datastore/{}/prune", repo
.store());
1553 let group
: BackupGroup
= group
.parse()?
;
1555 let output_format
= extract_output_format(&mut param
);
1557 let mut api_param
= serde_json
::to_value(prune_options
)?
;
1558 if let Some(dry_run
) = dry_run
{
1559 api_param
["dry-run"] = dry_run
.into();
1561 merge_group_into(api_param
.as_object_mut().unwrap(), group
);
1563 let mut result
= client
.post(&path
, Some(api_param
)).await?
;
1565 record_repository(&repo
);
1567 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1568 let item
: PruneListItem
= serde_json
::from_value(record
.to_owned())?
;
1569 Ok(item
.backup
.to_string())
1572 let render_prune_action
= |v
: &Value
, _record
: &Value
| -> Result
<String
, Error
> {
1573 Ok(match v
.as_bool() {
1574 Some(true) => "keep",
1575 Some(false) => "remove",
1581 let options
= default_table_format_options()
1582 .sortby("backup-type", false)
1583 .sortby("backup-id", false)
1584 .sortby("backup-time", false)
1586 ColumnConfig
::new("backup-id")
1587 .renderer(render_snapshot_path
)
1588 .header("snapshot"),
1591 ColumnConfig
::new("backup-time")
1592 .renderer(pbs_tools
::format
::render_epoch
)
1596 ColumnConfig
::new("keep")
1597 .renderer(render_prune_action
)
1601 let return_type
= &pbs_api_types
::ADMIN_DATASTORE_PRUNE_RETURN_TYPE
;
1603 let mut data
= result
["data"].take();
1606 let list
: Vec
<Value
> = data
1610 .filter(|item
| item
["keep"].as_bool() == Some(false))
1616 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1625 schema
: REPO_URL_SCHEMA
,
1629 schema
: OUTPUT_FORMAT
,
1635 type: StorageStatus
,
1638 /// Get repository status.
1639 async
fn status(param
: Value
) -> Result
<Value
, Error
> {
1640 let repo
= extract_repository_from_value(¶m
)?
;
1642 let output_format
= get_output_format(¶m
);
1644 let client
= connect(&repo
)?
;
1646 let path
= format
!("api2/json/admin/datastore/{}/status", repo
.store());
1648 let mut result
= client
.get(&path
, None
).await?
;
1649 let mut data
= result
["data"].take();
1651 record_repository(&repo
);
1653 let render_total_percentage
= |v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1654 let v
= v
.as_u64().unwrap();
1655 let total
= record
["total"].as_u64().unwrap();
1656 let roundup
= total
/ 200;
1657 if let Some(per
) = ((v
+ roundup
) * 100).checked_div(total
) {
1658 let info
= format
!(" ({} %)", per
);
1659 Ok(format
!("{} {:>8}", v
, info
))
1661 bail
!("Cannot render total percentage: denominator is zero");
1665 let options
= default_table_format_options()
1667 .column(ColumnConfig
::new("total").renderer(render_total_percentage
))
1668 .column(ColumnConfig
::new("used").renderer(render_total_percentage
))
1669 .column(ColumnConfig
::new("avail").renderer(render_total_percentage
));
1671 let return_type
= &API_METHOD_STATUS
.returns
;
1673 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1678 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1681 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1682 /// so that we can properly access it from multiple threads simultaneously while not issuing
1683 /// duplicate simultaneous reads over http.
1684 pub struct BufferedDynamicReadAt
{
1685 inner
: Mutex
<BufferedDynamicReader
<RemoteChunkReader
>>,
1688 impl BufferedDynamicReadAt
{
1689 fn new(inner
: BufferedDynamicReader
<RemoteChunkReader
>) -> Self {
1691 inner
: Mutex
::new(inner
),
1696 impl ReadAt
for BufferedDynamicReadAt
{
1697 fn start_read_at
<'a
>(
1698 self: Pin
<&'a
Self>,
1702 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
1703 MaybeReady
::Ready(tokio
::task
::block_in_place(move || {
1704 let mut reader
= self.inner
.lock().unwrap();
1705 reader
.seek(SeekFrom
::Start(offset
))?
;
1710 fn poll_complete
<'a
>(
1711 self: Pin
<&'a
Self>,
1712 _op
: ReadAtOperation
<'a
>,
1713 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
1714 panic
!("BufferedDynamicReadAt::start_read_at returned Pending");
1719 pbs_tools
::setup_libc_malloc_opts();
1720 init_cli_logger("PBS_LOG", "info");
1722 let backup_cmd_def
= CliCommand
::new(&API_METHOD_CREATE_BACKUP
)
1723 .arg_param(&["backupspec"])
1724 .completion_cb("repository", complete_repository
)
1725 .completion_cb("backupspec", complete_backup_source
)
1726 .completion_cb("keyfile", complete_file_name
)
1727 .completion_cb("master-pubkey-file", complete_file_name
)
1728 .completion_cb("chunk-size", complete_chunk_size
);
1730 let benchmark_cmd_def
= CliCommand
::new(&API_METHOD_BENCHMARK
)
1731 .completion_cb("repository", complete_repository
)
1732 .completion_cb("keyfile", complete_file_name
);
1734 let list_cmd_def
= CliCommand
::new(&API_METHOD_LIST_BACKUP_GROUPS
)
1735 .completion_cb("ns", complete_namespace
)
1736 .completion_cb("repository", complete_repository
);
1738 let garbage_collect_cmd_def
= CliCommand
::new(&API_METHOD_START_GARBAGE_COLLECTION
)
1739 .completion_cb("repository", complete_repository
);
1741 let restore_cmd_def
= CliCommand
::new(&API_METHOD_RESTORE
)
1742 .arg_param(&["snapshot", "archive-name", "target"])
1743 .completion_cb("repository", complete_repository
)
1744 .completion_cb("ns", complete_namespace
)
1745 .completion_cb("snapshot", complete_group_or_snapshot
)
1746 .completion_cb("archive-name", complete_archive_name
)
1747 .completion_cb("target", complete_file_name
);
1749 let prune_cmd_def
= CliCommand
::new(&API_METHOD_PRUNE
)
1750 .arg_param(&["group"])
1751 .completion_cb("ns", complete_namespace
)
1752 .completion_cb("group", complete_backup_group
)
1753 .completion_cb("repository", complete_repository
);
1755 let status_cmd_def
=
1756 CliCommand
::new(&API_METHOD_STATUS
).completion_cb("repository", complete_repository
);
1759 CliCommand
::new(&API_METHOD_API_LOGIN
).completion_cb("repository", complete_repository
);
1761 let logout_cmd_def
=
1762 CliCommand
::new(&API_METHOD_API_LOGOUT
).completion_cb("repository", complete_repository
);
1764 let version_cmd_def
=
1765 CliCommand
::new(&API_METHOD_API_VERSION
).completion_cb("repository", complete_repository
);
1767 let change_owner_cmd_def
= CliCommand
::new(&API_METHOD_CHANGE_BACKUP_OWNER
)
1768 .arg_param(&["group", "new-owner"])
1769 .completion_cb("ns", complete_namespace
)
1770 .completion_cb("group", complete_backup_group
)
1771 .completion_cb("new-owner", complete_auth_id
)
1772 .completion_cb("repository", complete_repository
);
1774 let cmd_def
= CliCommandMap
::new()
1775 .insert("backup", backup_cmd_def
)
1776 .insert("garbage-collect", garbage_collect_cmd_def
)
1777 .insert("list", list_cmd_def
)
1778 .insert("login", login_cmd_def
)
1779 .insert("logout", logout_cmd_def
)
1780 .insert("prune", prune_cmd_def
)
1781 .insert("restore", restore_cmd_def
)
1782 .insert("snapshot", snapshot_mgtm_cli())
1783 .insert("status", status_cmd_def
)
1784 .insert("key", key
::cli())
1785 .insert("mount", mount_cmd_def())
1786 .insert("map", map_cmd_def())
1787 .insert("unmap", unmap_cmd_def())
1788 .insert("catalog", catalog_mgmt_cli())
1789 .insert("task", task_mgmt_cli())
1790 .insert("version", version_cmd_def
)
1791 .insert("benchmark", benchmark_cmd_def
)
1792 .insert("change-owner", change_owner_cmd_def
)
1793 .insert("namespace", namespace
::cli_map())
1794 .alias(&["files"], &["snapshot", "files"])
1795 .alias(&["forget"], &["snapshot", "forget"])
1796 .alias(&["upload-log"], &["snapshot", "upload-log"])
1797 .alias(&["snapshots"], &["snapshot", "list"]);
1799 let rpcenv
= CliEnvironment
::new();
1803 Some(|future
| proxmox_async
::runtime
::main(future
)),