1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::convert
::TryFrom
;
3 use std
::io
::{self, Read, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::io
::{FromRawFd, RawFd}
;
5 use std
::path
::{Path, PathBuf}
;
7 use std
::sync
::{Arc, Mutex}
;
8 use std
::task
::Context
;
10 use anyhow
::{bail, format_err, Error}
;
11 use futures
::future
::FutureExt
;
12 use futures
::stream
::{StreamExt, TryStreamExt}
;
13 use serde_json
::{json, Value}
;
14 use tokio
::sync
::mpsc
;
15 use tokio_stream
::wrappers
::ReceiverStream
;
16 use xdg
::BaseDirectories
;
18 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
21 time
::{strftime_local, epoch_i64}
,
22 fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
,
33 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
35 use proxmox_backup
::tools
;
36 use proxmox_backup
::api2
::access
::user
::UserWithTokens
;
37 use proxmox_backup
::api2
::types
::*;
38 use proxmox_backup
::api2
::version
;
39 use proxmox_backup
::client
::*;
40 use proxmox_backup
::pxar
::catalog
::*;
41 use proxmox_backup
::backup
::{
44 rsa_encrypt_key_config
,
51 BufferedDynamicReader
,
59 ENCRYPTED_KEY_BLOB_NAME
,
68 mod proxmox_backup_client
;
69 use proxmox_backup_client
::*;
71 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
72 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
75 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
76 .format(&BACKUP_REPO_URL
)
80 pub const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
81 "Path to encryption key. All data will be encrypted using this key.")
84 pub const KEYFD_SCHEMA
: Schema
= IntegerSchema
::new(
85 "Pass an encryption key via an already opened file descriptor.")
89 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
90 "Chunk size in KB. Must be a power of 2.")
96 fn get_default_repository() -> Option
<String
> {
97 std
::env
::var("PBS_REPOSITORY").ok()
100 pub fn extract_repository_from_value(
102 ) -> Result
<BackupRepository
, Error
> {
104 let repo_url
= param
["repository"]
107 .or_else(get_default_repository
)
108 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
110 let repo
: BackupRepository
= repo_url
.parse()?
;
115 fn extract_repository_from_map(
116 param
: &HashMap
<String
, String
>,
117 ) -> Option
<BackupRepository
> {
119 param
.get("repository")
121 .or_else(get_default_repository
)
122 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
125 fn record_repository(repo
: &BackupRepository
) {
127 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
132 // usually $HOME/.cache/proxmox-backup/repo-list
133 let path
= match base
.place_cache_file("repo-list") {
138 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
140 let repo
= repo
.to_string();
142 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
144 let mut map
= serde_json
::map
::Map
::new();
147 let mut max_used
= 0;
148 let mut max_repo
= None
;
149 for (repo
, count
) in data
.as_object().unwrap() {
150 if map
.contains_key(repo
) { continue; }
151 if let Some(count
) = count
.as_i64() {
152 if count
> max_used
{
154 max_repo
= Some(repo
);
158 if let Some(repo
) = max_repo
{
159 map
.insert(repo
.to_owned(), json
!(max_used
));
163 if map
.len() > 10 { // store max. 10 repos
168 let new_data
= json
!(map
);
170 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
173 pub fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
175 let mut result
= vec
![];
177 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
182 // usually $HOME/.cache/proxmox-backup/repo-list
183 let path
= match base
.place_cache_file("repo-list") {
188 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
190 if let Some(map
) = data
.as_object() {
191 for (repo
, _count
) in map
{
192 result
.push(repo
.to_owned());
199 fn connect(repo
: &BackupRepository
) -> Result
<HttpClient
, Error
> {
200 connect_do(repo
.host(), repo
.port(), repo
.auth_id())
201 .map_err(|err
| format_err
!("error building client for repository {} - {}", repo
, err
))
204 fn connect_do(server
: &str, port
: u16, auth_id
: &Authid
) -> Result
<HttpClient
, Error
> {
205 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
207 use std
::env
::VarError
::*;
208 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
210 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
211 Err(NotPresent
) => None
,
214 let options
= HttpClientOptions
::new()
215 .prefix(Some("proxmox-backup".to_string()))
218 .fingerprint(fingerprint
)
219 .fingerprint_cache(true)
222 HttpClient
::new(server
, port
, auth_id
, options
)
225 async
fn view_task_result(
229 ) -> Result
<(), Error
> {
230 let data
= &result
["data"];
231 if output_format
== "text" {
232 if let Some(upid
) = data
.as_str() {
233 display_task_log(client
, upid
, true).await?
;
236 format_and_print_result(&data
, &output_format
);
242 async
fn api_datastore_list_snapshots(
245 group
: Option
<BackupGroup
>,
246 ) -> Result
<Value
, Error
> {
248 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
250 let mut args
= json
!({}
);
251 if let Some(group
) = group
{
252 args
["backup-type"] = group
.backup_type().into();
253 args
["backup-id"] = group
.backup_id().into();
256 let mut result
= client
.get(&path
, Some(args
)).await?
;
258 Ok(result
["data"].take())
261 pub async
fn api_datastore_latest_snapshot(
265 ) -> Result
<(String
, String
, i64), Error
> {
267 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
268 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
271 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
274 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
276 let backup_time
= list
[0].backup_time
;
278 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
281 async
fn backup_directory
<P
: AsRef
<Path
>>(
282 client
: &BackupWriter
,
285 chunk_size
: Option
<usize>,
286 device_set
: Option
<HashSet
<u64>>,
288 skip_lost_and_found
: bool
,
289 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
290 exclude_pattern
: Vec
<MatchEntry
>,
292 upload_options
: UploadOptions
,
293 ) -> Result
<BackupStats
, Error
> {
295 let pxar_stream
= PxarBackupStream
::open(
304 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
306 let (tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
308 let stream
= ReceiverStream
::new(rx
)
309 .map_err(Error
::from
);
311 // spawn chunker inside a separate task so that it can run parallel
312 tokio
::spawn(async
move {
313 while let Some(v
) = chunk_stream
.next().await
{
314 let _
= tx
.send(v
).await
;
318 if upload_options
.fixed_size
.is_some() {
319 bail
!("cannot backup directory with fixed chunk size!");
323 .upload_stream(archive_name
, stream
, upload_options
)
329 async
fn backup_image
<P
: AsRef
<Path
>>(
330 client
: &BackupWriter
,
333 chunk_size
: Option
<usize>,
334 upload_options
: UploadOptions
,
335 ) -> Result
<BackupStats
, Error
> {
337 let path
= image_path
.as_ref().to_owned();
339 let file
= tokio
::fs
::File
::open(path
).await?
;
341 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
342 .map_err(Error
::from
);
344 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
346 if upload_options
.fixed_size
.is_none() {
347 bail
!("cannot backup image with dynamic chunk size!");
351 .upload_stream(archive_name
, stream
, upload_options
)
361 schema
: REPO_URL_SCHEMA
,
365 schema
: OUTPUT_FORMAT
,
371 /// List backup groups.
372 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
374 let output_format
= get_output_format(¶m
);
376 let repo
= extract_repository_from_value(¶m
)?
;
378 let client
= connect(&repo
)?
;
380 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
382 let mut result
= client
.get(&path
, None
).await?
;
384 record_repository(&repo
);
386 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
387 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
388 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
389 Ok(group
.group_path().to_str().unwrap().to_owned())
392 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
393 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
394 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
)?
;
395 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
398 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
399 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
400 Ok(tools
::format
::render_backup_file_list(&item
.files
))
403 let options
= default_table_format_options()
404 .sortby("backup-type", false)
405 .sortby("backup-id", false)
406 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
408 ColumnConfig
::new("last-backup")
409 .renderer(render_last_backup
)
410 .header("last snapshot")
413 .column(ColumnConfig
::new("backup-count"))
414 .column(ColumnConfig
::new("files").renderer(render_files
));
416 let mut data
: Value
= result
["data"].take();
418 let return_type
= &proxmox_backup
::api2
::admin
::datastore
::API_METHOD_LIST_GROUPS
.returns
;
420 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
429 schema
: REPO_URL_SCHEMA
,
434 description
: "Backup group.",
442 /// Change owner of a backup group
443 async
fn change_backup_owner(group
: String
, mut param
: Value
) -> Result
<(), Error
> {
445 let repo
= extract_repository_from_value(¶m
)?
;
447 let mut client
= connect(&repo
)?
;
449 param
.as_object_mut().unwrap().remove("repository");
451 let group
: BackupGroup
= group
.parse()?
;
453 param
["backup-type"] = group
.backup_type().into();
454 param
["backup-id"] = group
.backup_id().into();
456 let path
= format
!("api2/json/admin/datastore/{}/change-owner", repo
.store());
457 client
.post(&path
, Some(param
)).await?
;
459 record_repository(&repo
);
468 schema
: REPO_URL_SCHEMA
,
474 /// Try to login. If successful, store ticket.
475 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
477 let repo
= extract_repository_from_value(¶m
)?
;
479 let client
= connect(&repo
)?
;
480 client
.login().await?
;
482 record_repository(&repo
);
491 schema
: REPO_URL_SCHEMA
,
497 /// Logout (delete stored ticket).
498 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
500 let repo
= extract_repository_from_value(¶m
)?
;
502 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
511 schema
: REPO_URL_SCHEMA
,
515 schema
: OUTPUT_FORMAT
,
521 /// Show client and optional server version
522 async
fn api_version(param
: Value
) -> Result
<(), Error
> {
524 let output_format
= get_output_format(¶m
);
526 let mut version_info
= json
!({
528 "version": version
::PROXMOX_PKG_VERSION
,
529 "release": version
::PROXMOX_PKG_RELEASE
,
530 "repoid": version
::PROXMOX_PKG_REPOID
,
534 let repo
= extract_repository_from_value(¶m
);
535 if let Ok(repo
) = repo
{
536 let client
= connect(&repo
)?
;
538 match client
.get("api2/json/version", None
).await
{
539 Ok(mut result
) => version_info
["server"] = result
["data"].take(),
540 Err(e
) => eprintln
!("could not connect to server - {}", e
),
543 if output_format
== "text" {
544 println
!("client version: {}.{}", version
::PROXMOX_PKG_VERSION
, version
::PROXMOX_PKG_RELEASE
);
545 if let Some(server
) = version_info
["server"].as_object() {
546 let server_version
= server
["version"].as_str().unwrap();
547 let server_release
= server
["release"].as_str().unwrap();
548 println
!("server version: {}.{}", server_version
, server_release
);
551 format_and_print_result(&version_info
, &output_format
);
561 schema
: REPO_URL_SCHEMA
,
565 schema
: OUTPUT_FORMAT
,
571 /// Start garbage collection for a specific repository.
572 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
574 let repo
= extract_repository_from_value(¶m
)?
;
576 let output_format
= get_output_format(¶m
);
578 let mut client
= connect(&repo
)?
;
580 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
582 let result
= client
.post(&path
, None
).await?
;
584 record_repository(&repo
);
586 view_task_result(client
, result
, &output_format
).await?
;
591 struct CatalogUploadResult
{
592 catalog_writer
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
593 result
: tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>,
596 fn spawn_catalog_upload(
597 client
: Arc
<BackupWriter
>,
599 ) -> Result
<CatalogUploadResult
, Error
> {
600 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
601 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
602 let catalog_chunk_size
= 512*1024;
603 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
605 let catalog_writer
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
607 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
609 let upload_options
= UploadOptions
{
612 ..UploadOptions
::default()
615 tokio
::spawn(async
move {
616 let catalog_upload_result
= client
617 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, upload_options
)
620 if let Err(ref err
) = catalog_upload_result
{
621 eprintln
!("catalog upload error - {}", err
);
625 let _
= catalog_result_tx
.send(catalog_upload_result
);
628 Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx }
)
631 fn keyfile_parameters(param
: &Value
) -> Result
<(Option
<Vec
<u8>>, CryptMode
), Error
> {
632 let keyfile
= match param
.get("keyfile") {
633 Some(Value
::String(keyfile
)) => Some(keyfile
),
634 Some(_
) => bail
!("bad --keyfile parameter type"),
638 let key_fd
= match param
.get("keyfd") {
639 Some(Value
::Number(key_fd
)) => Some(
640 RawFd
::try_from(key_fd
642 .ok_or_else(|| format_err
!("bad key fd: {:?}", key_fd
))?
644 .map_err(|err
| format_err
!("bad key fd: {:?}: {}", key_fd
, err
))?
646 Some(_
) => bail
!("bad --keyfd parameter type"),
650 let crypt_mode
: Option
<CryptMode
> = match param
.get("crypt-mode") {
651 Some(mode
) => Some(serde_json
::from_value(mode
.clone())?
),
655 let keydata
= match (keyfile
, key_fd
) {
656 (None
, None
) => None
,
657 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
658 (Some(keyfile
), None
) => {
659 eprintln
!("Using encryption key file: {}", keyfile
);
660 Some(file_get_contents(keyfile
)?
)
662 (None
, Some(fd
)) => {
663 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
664 let mut data
= Vec
::new();
665 let _len
: usize = { input }
.read_to_end(&mut data
)
667 format_err
!("error reading encryption key from fd {}: {}", fd
, err
)
669 eprintln
!("Using encryption key from file descriptor");
674 Ok(match (keydata
, crypt_mode
) {
676 (None
, None
) => match key
::read_optional_default_encryption_key()?
{
678 eprintln
!("Encrypting with default encryption key!");
679 (Some(key
), CryptMode
::Encrypt
)
681 None
=> (None
, CryptMode
::None
),
684 // just --crypt-mode=none
685 (None
, Some(CryptMode
::None
)) => (None
, CryptMode
::None
),
687 // just --crypt-mode other than none
688 (None
, Some(crypt_mode
)) => match key
::read_optional_default_encryption_key()?
{
689 None
=> bail
!("--crypt-mode without --keyfile and no default key file available"),
691 eprintln
!("Encrypting with default encryption key!");
692 (Some(key
), crypt_mode
)
697 (Some(key
), None
) => (Some(key
), CryptMode
::Encrypt
),
699 // --keyfile and --crypt-mode=none
700 (Some(_
), Some(CryptMode
::None
)) => {
701 bail
!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
704 // --keyfile and --crypt-mode other than none
705 (Some(key
), Some(crypt_mode
)) => (Some(key
), crypt_mode
),
714 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
716 schema
: BACKUP_SOURCE_SCHEMA
,
720 schema
: REPO_URL_SCHEMA
,
724 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
728 description
: "Path to file.",
731 "all-file-systems": {
733 description
: "Include all mounted subdirectories.",
737 schema
: KEYFILE_SCHEMA
,
741 schema
: KEYFD_SCHEMA
,
748 "skip-lost-and-found": {
750 description
: "Skip lost+found directory.",
754 schema
: BACKUP_TYPE_SCHEMA
,
758 schema
: BACKUP_ID_SCHEMA
,
762 schema
: BACKUP_TIME_SCHEMA
,
766 schema
: CHUNK_SIZE_SCHEMA
,
771 description
: "List of paths or patterns for matching files to exclude.",
775 description
: "Path or match pattern.",
780 description
: "Max number of entries to hold in memory.",
782 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
786 description
: "Verbose output.",
792 /// Create (host) backup.
793 async
fn create_backup(
796 _rpcenv
: &mut dyn RpcEnvironment
,
797 ) -> Result
<Value
, Error
> {
799 let repo
= extract_repository_from_value(¶m
)?
;
801 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
803 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
805 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
807 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
809 let backup_time_opt
= param
["backup-time"].as_i64();
811 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
813 if let Some(size
) = chunk_size_opt
{
814 verify_chunk_size(size
)?
;
817 let (keydata
, crypt_mode
) = keyfile_parameters(¶m
)?
;
819 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
821 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
823 let include_dev
= param
["include-dev"].as_array();
825 let entries_max
= param
["entries-max"].as_u64()
826 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
828 let empty
= Vec
::new();
829 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
831 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
832 for entry
in exclude_args
{
833 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
835 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
836 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
840 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
842 if let Some(include_dev
) = include_dev
{
843 if all_file_systems
{
844 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
847 let mut set
= HashSet
::new();
848 for path
in include_dev
{
849 let path
= path
.as_str().unwrap();
850 let stat
= nix
::sys
::stat
::stat(path
)
851 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
852 set
.insert(stat
.st_dev
);
857 let mut upload_list
= vec
![];
858 let mut target_set
= HashSet
::new();
860 for backupspec
in backupspec_list
{
861 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
862 let filename
= &spec
.config_string
;
863 let target
= &spec
.archive_name
;
865 if target_set
.contains(target
) {
866 bail
!("got target twice: '{}'", target
);
868 target_set
.insert(target
.to_string());
870 use std
::os
::unix
::fs
::FileTypeExt
;
872 let metadata
= std
::fs
::metadata(filename
)
873 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
874 let file_type
= metadata
.file_type();
876 match spec
.spec_type
{
877 BackupSpecificationType
::PXAR
=> {
878 if !file_type
.is_dir() {
879 bail
!("got unexpected file type (expected directory)");
881 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
883 BackupSpecificationType
::IMAGE
=> {
884 if !(file_type
.is_file() || file_type
.is_block_device()) {
885 bail
!("got unexpected file type (expected file or block device)");
888 let size
= image_size(&PathBuf
::from(filename
))?
;
890 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
892 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
894 BackupSpecificationType::CONFIG => {
895 if !file_type.is_file() {
896 bail!("got unexpected file
type (expected regular file
)");
898 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
900 BackupSpecificationType::LOGFILE => {
901 if !file_type.is_file() {
902 bail!("got unexpected file
type (expected regular file
)");
904 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
909 let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
911 let client = connect(&repo)?;
912 record_repository(&repo);
914 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
916 println!("Client name
: {}
", proxmox::tools::nodename());
918 let start_time = std::time::Instant::now();
920 println!("Starting backup protocol
: {}
", strftime_local("%c
", epoch_i64())?);
922 let (crypt_config, rsa_encrypted_key) = match keydata {
923 None => (None, None),
925 let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
926 println!("Encryption key fingerprint
: {}
", fingerprint);
928 let crypt_config = CryptConfig::new(key)?;
930 match key::find_master_pubkey()? {
931 Some(ref path) if path.exists() => {
932 let pem_data = file_get_contents(path)?;
933 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
935 let mut key_config = KeyConfig::without_password(key)?;
936 key_config.created = created; // keep original value
938 let enc_key = rsa_encrypt_key_config(rsa, &key_config)?;
939 println!("Master key '{:?}'
", path);
941 (Some(Arc::new(crypt_config)), Some(enc_key))
943 _ => (Some(Arc::new(crypt_config)), None),
948 let client = BackupWriter::start(
950 crypt_config.clone(),
959 let download_previous_manifest = match client.previous_backup_time().await {
960 Ok(Some(backup_time)) => {
962 "Downloading previous
manifest ({}
)",
963 strftime_local("%c
", backup_time)?
968 println!("No previous manifest available
.");
972 // Fallback for outdated server, TODO remove/bubble up with 2.0
977 let previous_manifest = if download_previous_manifest {
978 match client.download_previous_manifest().await {
979 Ok(previous_manifest) => {
980 match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
981 Ok(()) => Some(Arc::new(previous_manifest)),
983 println!("Couldn't re
-use previous manifest
- {}
", err);
989 println!("Couldn't download previous manifest
- {}
", err);
997 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
998 let mut manifest = BackupManifest::new(snapshot);
1000 let mut catalog = None;
1001 let mut catalog_result_rx = None;
1003 for (backup_type, filename, target, size) in upload_list {
1005 BackupSpecificationType::CONFIG => {
1006 let upload_options = UploadOptions {
1008 encrypt: crypt_mode == CryptMode::Encrypt,
1009 ..UploadOptions::default()
1012 println!("Upload config file '{}' to '{}'
as {}
", filename, repo, target);
1014 .upload_blob_from_file(&filename, &target, upload_options)
1016 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1018 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
1019 let upload_options = UploadOptions {
1021 encrypt: crypt_mode == CryptMode::Encrypt,
1022 ..UploadOptions::default()
1025 println!("Upload log file '{}' to '{}'
as {}
", filename, repo, target);
1027 .upload_blob_from_file(&filename, &target, upload_options)
1029 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1031 BackupSpecificationType::PXAR => {
1032 // start catalog upload on first use
1033 if catalog.is_none() {
1034 let catalog_upload_res = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
1035 catalog = Some(catalog_upload_res.catalog_writer);
1036 catalog_result_rx = Some(catalog_upload_res.result);
1038 let catalog = catalog.as_ref().unwrap();
1040 println!("Upload directory '{}' to '{}'
as {}
", filename, repo, target);
1041 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1042 let upload_options = UploadOptions {
1043 previous_manifest: previous_manifest.clone(),
1045 encrypt: crypt_mode == CryptMode::Encrypt,
1046 ..UploadOptions::default()
1049 let stats = backup_directory(
1056 skip_lost_and_found,
1058 pattern_list.clone(),
1059 entries_max as usize,
1062 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1063 catalog.lock().unwrap().end_directory()?;
1065 BackupSpecificationType::IMAGE => {
1066 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1068 let upload_options = UploadOptions {
1069 previous_manifest: previous_manifest.clone(),
1070 fixed_size: Some(size),
1072 encrypt: crypt_mode == CryptMode::Encrypt,
1075 let stats = backup_image(
1082 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1087 // finalize and upload catalog
1088 if let Some(catalog) = catalog {
1089 let mutex = Arc::try_unwrap(catalog)
1090 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1091 let mut catalog = mutex.into_inner().unwrap();
1095 drop(catalog); // close upload stream
1097 if let Some(catalog_result_rx) = catalog_result_rx {
1098 let stats = catalog_result_rx.await??;
1099 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
1103 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1104 let target = ENCRYPTED_KEY_BLOB_NAME;
1105 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1106 let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() };
1108 .upload_blob_from_data(rsa_encrypted_key, target, options)
1110 manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
1113 // create manifest (index.json)
1114 // manifests are never encrypted, but include a signature
1115 let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
1116 .map_err(|err| format_err!("unable to format manifest
- {}
", err))?;
1119 if verbose { println!("Upload index.json to '{}'", repo
) };
1120 let options
= UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() }
;
1122 .upload_blob_from_data(manifest
.into_bytes(), MANIFEST_BLOB_NAME
, options
)
1125 client
.finish().await?
;
1127 let end_time
= std
::time
::Instant
::now();
1128 let elapsed
= end_time
.duration_since(start_time
);
1129 println
!("Duration: {:.2}s", elapsed
.as_secs_f64());
1131 println
!("End Time: {}", strftime_local("%c", epoch_i64())?
);
1136 fn complete_backup_source(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1138 let mut result
= vec
![];
1140 let data
: Vec
<&str> = arg
.splitn(2, '
:'
).collect();
1142 if data
.len() != 2 {
1143 result
.push(String
::from("root.pxar:/"));
1144 result
.push(String
::from("etc.pxar:/etc"));
1148 let files
= tools
::complete_file_name(data
[1], param
);
1151 result
.push(format
!("{}:{}", data
[0], file
));
1157 async
fn dump_image
<W
: Write
>(
1158 client
: Arc
<BackupReader
>,
1159 crypt_config
: Option
<Arc
<CryptConfig
>>,
1160 crypt_mode
: CryptMode
,
1161 index
: FixedIndexReader
,
1164 ) -> Result
<(), Error
> {
1166 let most_used
= index
.find_most_used_chunks(8);
1168 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, crypt_mode
, most_used
);
1170 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1171 // and thus slows down reading. Instead, directly use RemoteChunkReader
1174 let start_time
= std
::time
::Instant
::now();
1176 for pos
in 0..index
.index_count() {
1177 let digest
= index
.index_digest(pos
).unwrap();
1178 let raw_data
= chunk_reader
.read_chunk(&digest
).await?
;
1179 writer
.write_all(&raw_data
)?
;
1180 bytes
+= raw_data
.len();
1182 let next_per
= ((pos
+1)*100)/index
.index_count();
1183 if per
!= next_per
{
1184 eprintln
!("progress {}% (read {} bytes, duration {} sec)",
1185 next_per
, bytes
, start_time
.elapsed().as_secs());
1191 let end_time
= std
::time
::Instant
::now();
1192 let elapsed
= end_time
.duration_since(start_time
);
1193 eprintln
!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1195 elapsed
.as_secs_f64(),
1196 bytes
as f64/(1024.0*1024.0*elapsed
.as_secs_f64())
1203 fn parse_archive_type(name
: &str) -> (String
, ArchiveType
) {
1204 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
1205 (name
.into(), archive_type(name
).unwrap())
1206 } else if name
.ends_with(".pxar") {
1207 (format
!("{}.didx", name
), ArchiveType
::DynamicIndex
)
1208 } else if name
.ends_with(".img") {
1209 (format
!("{}.fidx", name
), ArchiveType
::FixedIndex
)
1211 (format
!("{}.blob", name
), ArchiveType
::Blob
)
1219 schema
: REPO_URL_SCHEMA
,
1224 description
: "Group/Snapshot path.",
1227 description
: "Backup archive name.",
1232 description
: r
###"Target directory path. Use '-' to write to standard output.
1234 We do not extraxt '.pxar' archives when writing to standard output.
1238 "allow-existing-dirs": {
1240 description
: "Do not fail if directories already exists.",
1244 schema
: KEYFILE_SCHEMA
,
1248 schema
: KEYFD_SCHEMA
,
1258 /// Restore backup repository.
1259 async
fn restore(param
: Value
) -> Result
<Value
, Error
> {
1260 let repo
= extract_repository_from_value(¶m
)?
;
1262 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
1264 let allow_existing_dirs
= param
["allow-existing-dirs"].as_bool().unwrap_or(false);
1266 let archive_name
= tools
::required_string_param(¶m
, "archive-name")?
;
1268 let client
= connect(&repo
)?
;
1270 record_repository(&repo
);
1272 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
1274 let (backup_type
, backup_id
, backup_time
) = if path
.matches('
/'
).count() == 1 {
1275 let group
: BackupGroup
= path
.parse()?
;
1276 api_datastore_latest_snapshot(&client
, repo
.store(), group
).await?
1278 let snapshot
: BackupDir
= path
.parse()?
;
1279 (snapshot
.group().backup_type().to_owned(), snapshot
.group().backup_id().to_owned(), snapshot
.backup_time())
1282 let target
= tools
::required_string_param(¶m
, "target")?
;
1283 let target
= if target
== "-" { None }
else { Some(target) }
;
1285 let (keydata
, _crypt_mode
) = keyfile_parameters(¶m
)?
;
1287 let crypt_config
= match keydata
{
1290 let (key
, _
, fingerprint
) = decrypt_key(&key
, &key
::get_encryption_key_password
)?
;
1291 eprintln
!("Encryption key fingerprint: '{}'", fingerprint
);
1292 Some(Arc
::new(CryptConfig
::new(key
)?
))
1296 let client
= BackupReader
::start(
1298 crypt_config
.clone(),
1306 let (archive_name
, archive_type
) = parse_archive_type(archive_name
);
1308 let (manifest
, backup_index_data
) = client
.download_manifest().await?
;
1310 if archive_name
== ENCRYPTED_KEY_BLOB_NAME
&& crypt_config
.is_none() {
1311 eprintln
!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
1313 manifest
.check_fingerprint(crypt_config
.as_ref().map(Arc
::as_ref
))?
;
1316 if archive_name
== MANIFEST_BLOB_NAME
{
1317 if let Some(target
) = target
{
1318 replace_file(target
, &backup_index_data
, CreateOptions
::new())?
;
1320 let stdout
= std
::io
::stdout();
1321 let mut writer
= stdout
.lock();
1322 writer
.write_all(&backup_index_data
)
1323 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1326 return Ok(Value
::Null
);
1329 let file_info
= manifest
.lookup_file_info(&archive_name
)?
;
1331 if archive_type
== ArchiveType
::Blob
{
1333 let mut reader
= client
.download_blob(&manifest
, &archive_name
).await?
;
1335 if let Some(target
) = target
{
1336 let mut writer
= std
::fs
::OpenOptions
::new()
1341 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
;
1342 std
::io
::copy(&mut reader
, &mut writer
)?
;
1344 let stdout
= std
::io
::stdout();
1345 let mut writer
= stdout
.lock();
1346 std
::io
::copy(&mut reader
, &mut writer
)
1347 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1350 } else if archive_type
== ArchiveType
::DynamicIndex
{
1352 let index
= client
.download_dynamic_index(&manifest
, &archive_name
).await?
;
1354 let most_used
= index
.find_most_used_chunks(8);
1356 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, file_info
.chunk_crypt_mode(), most_used
);
1358 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1360 if let Some(target
) = target
{
1361 proxmox_backup
::pxar
::extract_archive(
1362 pxar
::decoder
::Decoder
::from_std(reader
)?
,
1366 proxmox_backup
::pxar
::Flags
::DEFAULT
,
1367 allow_existing_dirs
,
1370 println
!("{:?}", path
);
1375 .map_err(|err
| format_err
!("error extracting archive - {}", err
))?
;
1377 let mut writer
= std
::fs
::OpenOptions
::new()
1379 .open("/dev/stdout")
1380 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
;
1382 std
::io
::copy(&mut reader
, &mut writer
)
1383 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1385 } else if archive_type
== ArchiveType
::FixedIndex
{
1387 let index
= client
.download_fixed_index(&manifest
, &archive_name
).await?
;
1389 let mut writer
= if let Some(target
) = target
{
1390 std
::fs
::OpenOptions
::new()
1395 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
1397 std
::fs
::OpenOptions
::new()
1399 .open("/dev/stdout")
1400 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
1403 dump_image(client
.clone(), crypt_config
.clone(), file_info
.chunk_crypt_mode(), index
, &mut writer
, verbose
).await?
;
1409 const API_METHOD_PRUNE
: ApiMethod
= ApiMethod
::new(
1410 &ApiHandler
::Async(&prune
),
1412 "Prune a backup repository.",
1413 &proxmox_backup
::add_common_prune_prameters
!([
1414 ("dry-run", true, &BooleanSchema
::new(
1415 "Just show what prune would do, but do not delete anything.")
1417 ("group", false, &StringSchema
::new("Backup group.").schema()),
1419 ("output-format", true, &OUTPUT_FORMAT
),
1423 &BooleanSchema
::new("Minimal output - only show removals.")
1426 ("repository", true, &REPO_URL_SCHEMA
),
1434 _rpcenv
: &'a
mut dyn RpcEnvironment
,
1435 ) -> proxmox
::api
::ApiFuture
<'a
> {
1437 prune_async(param
).await
1441 async
fn prune_async(mut param
: Value
) -> Result
<Value
, Error
> {
1442 let repo
= extract_repository_from_value(¶m
)?
;
1444 let mut client
= connect(&repo
)?
;
1446 let path
= format
!("api2/json/admin/datastore/{}/prune", repo
.store());
1448 let group
= tools
::required_string_param(¶m
, "group")?
;
1449 let group
: BackupGroup
= group
.parse()?
;
1451 let output_format
= get_output_format(¶m
);
1453 let quiet
= param
["quiet"].as_bool().unwrap_or(false);
1455 param
.as_object_mut().unwrap().remove("repository");
1456 param
.as_object_mut().unwrap().remove("group");
1457 param
.as_object_mut().unwrap().remove("output-format");
1458 param
.as_object_mut().unwrap().remove("quiet");
1460 param
["backup-type"] = group
.backup_type().into();
1461 param
["backup-id"] = group
.backup_id().into();
1463 let mut result
= client
.post(&path
, Some(param
)).await?
;
1465 record_repository(&repo
);
1467 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1468 let item
: PruneListItem
= serde_json
::from_value(record
.to_owned())?
;
1469 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
)?
;
1470 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
1473 let render_prune_action
= |v
: &Value
, _record
: &Value
| -> Result
<String
, Error
> {
1474 Ok(match v
.as_bool() {
1475 Some(true) => "keep",
1476 Some(false) => "remove",
1481 let options
= default_table_format_options()
1482 .sortby("backup-type", false)
1483 .sortby("backup-id", false)
1484 .sortby("backup-time", false)
1485 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
1486 .column(ColumnConfig
::new("backup-time").renderer(tools
::format
::render_epoch
).header("date"))
1487 .column(ColumnConfig
::new("keep").renderer(render_prune_action
).header("action"))
1490 let return_type
= &proxmox_backup
::api2
::admin
::datastore
::API_METHOD_PRUNE
.returns
;
1492 let mut data
= result
["data"].take();
1495 let list
: Vec
<Value
> = data
.as_array().unwrap().iter().filter(|item
| {
1496 item
["keep"].as_bool() == Some(false)
1497 }).cloned().collect();
1501 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1510 schema
: REPO_URL_SCHEMA
,
1514 schema
: OUTPUT_FORMAT
,
1520 type: StorageStatus
,
1523 /// Get repository status.
1524 async
fn status(param
: Value
) -> Result
<Value
, Error
> {
1526 let repo
= extract_repository_from_value(¶m
)?
;
1528 let output_format
= get_output_format(¶m
);
1530 let client
= connect(&repo
)?
;
1532 let path
= format
!("api2/json/admin/datastore/{}/status", repo
.store());
1534 let mut result
= client
.get(&path
, None
).await?
;
1535 let mut data
= result
["data"].take();
1537 record_repository(&repo
);
1539 let render_total_percentage
= |v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1540 let v
= v
.as_u64().unwrap();
1541 let total
= record
["total"].as_u64().unwrap();
1542 let roundup
= total
/200;
1543 let per
= ((v
+roundup
)*100)/total
;
1544 let info
= format
!(" ({} %)", per
);
1545 Ok(format
!("{} {:>8}", v
, info
))
1548 let options
= default_table_format_options()
1550 .column(ColumnConfig
::new("total").renderer(render_total_percentage
))
1551 .column(ColumnConfig
::new("used").renderer(render_total_percentage
))
1552 .column(ColumnConfig
::new("avail").renderer(render_total_percentage
));
1554 let return_type
= &API_METHOD_STATUS
.returns
;
1556 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1561 // like get, but simply ignore errors and return Null instead
1562 async
fn try_get(repo
: &BackupRepository
, url
: &str) -> Value
{
1564 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
1565 let password
= std
::env
::var(ENV_VAR_PBS_PASSWORD
).ok();
1567 let options
= HttpClientOptions
::new()
1568 .prefix(Some("proxmox-backup".to_string()))
1571 .fingerprint(fingerprint
)
1572 .fingerprint_cache(true)
1573 .ticket_cache(true);
1575 let client
= match HttpClient
::new(repo
.host(), repo
.port(), repo
.auth_id(), options
) {
1577 _
=> return Value
::Null
,
1580 let mut resp
= match client
.get(url
, None
).await
{
1582 _
=> return Value
::Null
,
1585 if let Some(map
) = resp
.as_object_mut() {
1586 if let Some(data
) = map
.remove("data") {
1593 fn complete_backup_group(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1594 proxmox_backup
::tools
::runtime
::main(async { complete_backup_group_do(param).await }
)
1597 async
fn complete_backup_group_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1599 let mut result
= vec
![];
1601 let repo
= match extract_repository_from_map(param
) {
1606 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
1608 let data
= try_get(&repo
, &path
).await
;
1610 if let Some(list
) = data
.as_array() {
1612 if let (Some(backup_id
), Some(backup_type
)) =
1613 (item
["backup-id"].as_str(), item
["backup-type"].as_str())
1615 result
.push(format
!("{}/{}", backup_type
, backup_id
));
1623 pub fn complete_group_or_snapshot(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1624 proxmox_backup
::tools
::runtime
::main(async { complete_group_or_snapshot_do(arg, param).await }
)
1627 async
fn complete_group_or_snapshot_do(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1629 if arg
.matches('
/'
).count() < 2 {
1630 let groups
= complete_backup_group_do(param
).await
;
1631 let mut result
= vec
![];
1632 for group
in groups
{
1633 result
.push(group
.to_string());
1634 result
.push(format
!("{}/", group
));
1639 complete_backup_snapshot_do(param
).await
1642 fn complete_backup_snapshot(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1643 proxmox_backup
::tools
::runtime
::main(async { complete_backup_snapshot_do(param).await }
)
1646 async
fn complete_backup_snapshot_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1648 let mut result
= vec
![];
1650 let repo
= match extract_repository_from_map(param
) {
1655 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
1657 let data
= try_get(&repo
, &path
).await
;
1659 if let Some(list
) = data
.as_array() {
1661 if let (Some(backup_id
), Some(backup_type
), Some(backup_time
)) =
1662 (item
["backup-id"].as_str(), item
["backup-type"].as_str(), item
["backup-time"].as_i64())
1664 if let Ok(snapshot
) = BackupDir
::new(backup_type
, backup_id
, backup_time
) {
1665 result
.push(snapshot
.relative_path().to_str().unwrap().to_owned());
1674 fn complete_server_file_name(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1675 proxmox_backup
::tools
::runtime
::main(async { complete_server_file_name_do(param).await }
)
1678 async
fn complete_server_file_name_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1680 let mut result
= vec
![];
1682 let repo
= match extract_repository_from_map(param
) {
1687 let snapshot
: BackupDir
= match param
.get("snapshot") {
1689 match path
.parse() {
1697 let query
= tools
::json_object_to_query(json
!({
1698 "backup-type": snapshot
.group().backup_type(),
1699 "backup-id": snapshot
.group().backup_id(),
1700 "backup-time": snapshot
.backup_time(),
1703 let path
= format
!("api2/json/admin/datastore/{}/files?{}", repo
.store(), query
);
1705 let data
= try_get(&repo
, &path
).await
;
1707 if let Some(list
) = data
.as_array() {
1709 if let Some(filename
) = item
["filename"].as_str() {
1710 result
.push(filename
.to_owned());
1718 fn complete_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1719 complete_server_file_name(arg
, param
)
1721 .map(|v
| tools
::format
::strip_server_file_extension(&v
))
1725 pub fn complete_pxar_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1726 complete_server_file_name(arg
, param
)
1728 .filter_map(|name
| {
1729 if name
.ends_with(".pxar.didx") {
1730 Some(tools
::format
::strip_server_file_extension(name
))
1738 pub fn complete_img_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1739 complete_server_file_name(arg
, param
)
1741 .filter_map(|name
| {
1742 if name
.ends_with(".img.fidx") {
1743 Some(tools
::format
::strip_server_file_extension(name
))
1751 fn complete_chunk_size(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1753 let mut result
= vec
![];
1757 result
.push(size
.to_string());
1759 if size
> 4096 { break; }
1765 fn complete_auth_id(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1766 proxmox_backup
::tools
::runtime
::main(async { complete_auth_id_do(param).await }
)
1769 async
fn complete_auth_id_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1771 let mut result
= vec
![];
1773 let repo
= match extract_repository_from_map(param
) {
1778 let data
= try_get(&repo
, "api2/json/access/users?include_tokens=true").await
;
1780 if let Ok(parsed
) = serde_json
::from_value
::<Vec
<UserWithTokens
>>(data
) {
1781 for user
in parsed
{
1782 result
.push(user
.userid
.to_string());
1783 for token
in user
.tokens
{
1784 result
.push(token
.tokenid
.to_string());
1792 use proxmox_backup
::client
::RemoteChunkReader
;
1793 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1796 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1797 /// so that we can properly access it from multiple threads simultaneously while not issuing
1798 /// duplicate simultaneous reads over http.
1799 pub struct BufferedDynamicReadAt
{
1800 inner
: Mutex
<BufferedDynamicReader
<RemoteChunkReader
>>,
1803 impl BufferedDynamicReadAt
{
1804 fn new(inner
: BufferedDynamicReader
<RemoteChunkReader
>) -> Self {
1806 inner
: Mutex
::new(inner
),
1811 impl ReadAt
for BufferedDynamicReadAt
{
1812 fn start_read_at
<'a
>(
1813 self: Pin
<&'a
Self>,
1817 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
1818 MaybeReady
::Ready(tokio
::task
::block_in_place(move || {
1819 let mut reader
= self.inner
.lock().unwrap();
1820 reader
.seek(SeekFrom
::Start(offset
))?
;
1821 Ok(reader
.read(buf
)?
)
1825 fn poll_complete
<'a
>(
1826 self: Pin
<&'a
Self>,
1827 _op
: ReadAtOperation
<'a
>,
1828 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
1829 panic
!("LocalDynamicReadAt::start_read_at returned Pending");
1835 let backup_cmd_def
= CliCommand
::new(&API_METHOD_CREATE_BACKUP
)
1836 .arg_param(&["backupspec"])
1837 .completion_cb("repository", complete_repository
)
1838 .completion_cb("backupspec", complete_backup_source
)
1839 .completion_cb("keyfile", tools
::complete_file_name
)
1840 .completion_cb("chunk-size", complete_chunk_size
);
1842 let benchmark_cmd_def
= CliCommand
::new(&API_METHOD_BENCHMARK
)
1843 .completion_cb("repository", complete_repository
)
1844 .completion_cb("keyfile", tools
::complete_file_name
);
1846 let list_cmd_def
= CliCommand
::new(&API_METHOD_LIST_BACKUP_GROUPS
)
1847 .completion_cb("repository", complete_repository
);
1849 let garbage_collect_cmd_def
= CliCommand
::new(&API_METHOD_START_GARBAGE_COLLECTION
)
1850 .completion_cb("repository", complete_repository
);
1852 let restore_cmd_def
= CliCommand
::new(&API_METHOD_RESTORE
)
1853 .arg_param(&["snapshot", "archive-name", "target"])
1854 .completion_cb("repository", complete_repository
)
1855 .completion_cb("snapshot", complete_group_or_snapshot
)
1856 .completion_cb("archive-name", complete_archive_name
)
1857 .completion_cb("target", tools
::complete_file_name
);
1859 let prune_cmd_def
= CliCommand
::new(&API_METHOD_PRUNE
)
1860 .arg_param(&["group"])
1861 .completion_cb("group", complete_backup_group
)
1862 .completion_cb("repository", complete_repository
);
1864 let status_cmd_def
= CliCommand
::new(&API_METHOD_STATUS
)
1865 .completion_cb("repository", complete_repository
);
1867 let login_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGIN
)
1868 .completion_cb("repository", complete_repository
);
1870 let logout_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGOUT
)
1871 .completion_cb("repository", complete_repository
);
1873 let version_cmd_def
= CliCommand
::new(&API_METHOD_API_VERSION
)
1874 .completion_cb("repository", complete_repository
);
1876 let change_owner_cmd_def
= CliCommand
::new(&API_METHOD_CHANGE_BACKUP_OWNER
)
1877 .arg_param(&["group", "new-owner"])
1878 .completion_cb("group", complete_backup_group
)
1879 .completion_cb("new-owner", complete_auth_id
)
1880 .completion_cb("repository", complete_repository
);
1882 let cmd_def
= CliCommandMap
::new()
1883 .insert("backup", backup_cmd_def
)
1884 .insert("garbage-collect", garbage_collect_cmd_def
)
1885 .insert("list", list_cmd_def
)
1886 .insert("login", login_cmd_def
)
1887 .insert("logout", logout_cmd_def
)
1888 .insert("prune", prune_cmd_def
)
1889 .insert("restore", restore_cmd_def
)
1890 .insert("snapshot", snapshot_mgtm_cli())
1891 .insert("status", status_cmd_def
)
1892 .insert("key", key
::cli())
1893 .insert("mount", mount_cmd_def())
1894 .insert("map", map_cmd_def())
1895 .insert("unmap", unmap_cmd_def())
1896 .insert("catalog", catalog_mgmt_cli())
1897 .insert("task", task_mgmt_cli())
1898 .insert("version", version_cmd_def
)
1899 .insert("benchmark", benchmark_cmd_def
)
1900 .insert("change-owner", change_owner_cmd_def
)
1902 .alias(&["files"], &["snapshot", "files"])
1903 .alias(&["forget"], &["snapshot", "forget"])
1904 .alias(&["upload-log"], &["snapshot", "upload-log"])
1905 .alias(&["snapshots"], &["snapshot", "list"])
1908 let rpcenv
= CliEnvironment
::new();
1909 run_cli_command(cmd_def
, rpcenv
, Some(|future
| {
1910 proxmox_backup
::tools
::runtime
::main(future
)