1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::convert
::TryFrom
;
3 use std
::io
::{self, Read, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::io
::{FromRawFd, RawFd}
;
5 use std
::path
::{Path, PathBuf}
;
7 use std
::sync
::{Arc, Mutex}
;
8 use std
::task
::Context
;
10 use anyhow
::{bail, format_err, Error}
;
11 use chrono
::{Local, DateTime, Utc, TimeZone}
;
12 use futures
::future
::FutureExt
;
13 use futures
::stream
::{StreamExt, TryStreamExt}
;
14 use serde_json
::{json, Value}
;
15 use tokio
::sync
::mpsc
;
16 use xdg
::BaseDirectories
;
18 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
19 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
;
20 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
21 use proxmox
::api
::schema
::*;
22 use proxmox
::api
::cli
::*;
23 use proxmox
::api
::api
;
24 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
26 use proxmox_backup
::tools
;
27 use proxmox_backup
::api2
::types
::*;
28 use proxmox_backup
::client
::*;
29 use proxmox_backup
::pxar
::catalog
::*;
30 use proxmox_backup
::backup
::{
39 BufferedDynamicReader
,
55 mod proxmox_backup_client
;
56 use proxmox_backup_client
::*;
58 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
59 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
62 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
63 .format(&BACKUP_REPO_URL
)
67 pub const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
68 "Path to encryption key. All data will be encrypted using this key.")
71 pub const KEYFD_SCHEMA
: Schema
= IntegerSchema
::new(
72 "Pass an encryption key via an already opened file descriptor.")
76 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
77 "Chunk size in KB. Must be a power of 2.")
83 fn get_default_repository() -> Option
<String
> {
84 std
::env
::var("PBS_REPOSITORY").ok()
87 pub fn extract_repository_from_value(
89 ) -> Result
<BackupRepository
, Error
> {
91 let repo_url
= param
["repository"]
94 .or_else(get_default_repository
)
95 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
97 let repo
: BackupRepository
= repo_url
.parse()?
;
102 fn extract_repository_from_map(
103 param
: &HashMap
<String
, String
>,
104 ) -> Option
<BackupRepository
> {
106 param
.get("repository")
108 .or_else(get_default_repository
)
109 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
112 fn record_repository(repo
: &BackupRepository
) {
114 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
119 // usually $HOME/.cache/proxmox-backup/repo-list
120 let path
= match base
.place_cache_file("repo-list") {
125 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
127 let repo
= repo
.to_string();
129 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
131 let mut map
= serde_json
::map
::Map
::new();
134 let mut max_used
= 0;
135 let mut max_repo
= None
;
136 for (repo
, count
) in data
.as_object().unwrap() {
137 if map
.contains_key(repo
) { continue; }
138 if let Some(count
) = count
.as_i64() {
139 if count
> max_used
{
141 max_repo
= Some(repo
);
145 if let Some(repo
) = max_repo
{
146 map
.insert(repo
.to_owned(), json
!(max_used
));
150 if map
.len() > 10 { // store max. 10 repos
155 let new_data
= json
!(map
);
157 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
160 pub fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
162 let mut result
= vec
![];
164 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
169 // usually $HOME/.cache/proxmox-backup/repo-list
170 let path
= match base
.place_cache_file("repo-list") {
175 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
177 if let Some(map
) = data
.as_object() {
178 for (repo
, _count
) in map
{
179 result
.push(repo
.to_owned());
186 fn connect(server
: &str, userid
: &str) -> Result
<HttpClient
, Error
> {
188 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
190 use std
::env
::VarError
::*;
191 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
193 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
194 Err(NotPresent
) => None
,
197 let options
= HttpClientOptions
::new()
198 .prefix(Some("proxmox-backup".to_string()))
201 .fingerprint(fingerprint
)
202 .fingerprint_cache(true)
205 HttpClient
::new(server
, userid
, options
)
208 async
fn view_task_result(
212 ) -> Result
<(), Error
> {
213 let data
= &result
["data"];
214 if output_format
== "text" {
215 if let Some(upid
) = data
.as_str() {
216 display_task_log(client
, upid
, true).await?
;
219 format_and_print_result(&data
, &output_format
);
225 async
fn api_datastore_list_snapshots(
228 group
: Option
<BackupGroup
>,
229 ) -> Result
<Value
, Error
> {
231 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
233 let mut args
= json
!({}
);
234 if let Some(group
) = group
{
235 args
["backup-type"] = group
.backup_type().into();
236 args
["backup-id"] = group
.backup_id().into();
239 let mut result
= client
.get(&path
, Some(args
)).await?
;
241 Ok(result
["data"].take())
244 pub async
fn api_datastore_latest_snapshot(
248 ) -> Result
<(String
, String
, DateTime
<Utc
>), Error
> {
250 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
251 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
254 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
257 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
259 let backup_time
= Utc
.timestamp(list
[0].backup_time
, 0);
261 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
264 async
fn backup_directory
<P
: AsRef
<Path
>>(
265 client
: &BackupWriter
,
266 previous_manifest
: Option
<Arc
<BackupManifest
>>,
269 chunk_size
: Option
<usize>,
270 device_set
: Option
<HashSet
<u64>>,
272 skip_lost_and_found
: bool
,
273 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
274 exclude_pattern
: Vec
<MatchEntry
>,
278 ) -> Result
<BackupStats
, Error
> {
280 let pxar_stream
= PxarBackupStream
::open(
289 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
291 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
294 .map_err(Error
::from
);
296 // spawn chunker inside a separate task so that it can run parallel
297 tokio
::spawn(async
move {
298 while let Some(v
) = chunk_stream
.next().await
{
299 let _
= tx
.send(v
).await
;
304 .upload_stream(previous_manifest
, archive_name
, stream
, "dynamic", None
, compress
, encrypt
)
310 async
fn backup_image
<P
: AsRef
<Path
>>(
311 client
: &BackupWriter
,
312 previous_manifest
: Option
<Arc
<BackupManifest
>>,
316 chunk_size
: Option
<usize>,
320 ) -> Result
<BackupStats
, Error
> {
322 let path
= image_path
.as_ref().to_owned();
324 let file
= tokio
::fs
::File
::open(path
).await?
;
326 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
327 .map_err(Error
::from
);
329 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
332 .upload_stream(previous_manifest
, archive_name
, stream
, "fixed", Some(image_size
), compress
, encrypt
)
342 schema
: REPO_URL_SCHEMA
,
346 schema
: OUTPUT_FORMAT
,
352 /// List backup groups.
353 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
355 let output_format
= get_output_format(¶m
);
357 let repo
= extract_repository_from_value(¶m
)?
;
359 let client
= connect(repo
.host(), repo
.user())?
;
361 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
363 let mut result
= client
.get(&path
, None
).await?
;
365 record_repository(&repo
);
367 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
368 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
369 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
370 Ok(group
.group_path().to_str().unwrap().to_owned())
373 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
374 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
375 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
);
376 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
379 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
380 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
381 Ok(tools
::format
::render_backup_file_list(&item
.files
))
384 let options
= default_table_format_options()
385 .sortby("backup-type", false)
386 .sortby("backup-id", false)
387 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
389 ColumnConfig
::new("last-backup")
390 .renderer(render_last_backup
)
391 .header("last snapshot")
394 .column(ColumnConfig
::new("backup-count"))
395 .column(ColumnConfig
::new("files").renderer(render_files
));
397 let mut data
: Value
= result
["data"].take();
399 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_GROUPS
;
401 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
410 schema
: REPO_URL_SCHEMA
,
415 description
: "Backup group.",
419 schema
: OUTPUT_FORMAT
,
425 /// List backup snapshots.
426 async
fn list_snapshots(param
: Value
) -> Result
<Value
, Error
> {
428 let repo
= extract_repository_from_value(¶m
)?
;
430 let output_format
= get_output_format(¶m
);
432 let client
= connect(repo
.host(), repo
.user())?
;
434 let group
: Option
<BackupGroup
> = if let Some(path
) = param
["group"].as_str() {
440 let mut data
= api_datastore_list_snapshots(&client
, repo
.store(), group
).await?
;
442 record_repository(&repo
);
444 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
445 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
446 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
);
447 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
450 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
451 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
452 let mut filenames
= Vec
::new();
453 for file
in &item
.files
{
454 filenames
.push(file
.filename
.to_string());
456 Ok(tools
::format
::render_backup_file_list(&filenames
[..]))
459 let options
= default_table_format_options()
460 .sortby("backup-type", false)
461 .sortby("backup-id", false)
462 .sortby("backup-time", false)
463 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
464 .column(ColumnConfig
::new("size"))
465 .column(ColumnConfig
::new("files").renderer(render_files
))
468 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOTS
;
470 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
479 schema
: REPO_URL_SCHEMA
,
484 description
: "Snapshot path.",
489 /// Forget (remove) backup snapshots.
490 async
fn forget_snapshots(param
: Value
) -> Result
<Value
, Error
> {
492 let repo
= extract_repository_from_value(¶m
)?
;
494 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
495 let snapshot
: BackupDir
= path
.parse()?
;
497 let mut client
= connect(repo
.host(), repo
.user())?
;
499 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
501 let result
= client
.delete(&path
, Some(json
!({
502 "backup-type": snapshot
.group().backup_type(),
503 "backup-id": snapshot
.group().backup_id(),
504 "backup-time": snapshot
.backup_time().timestamp(),
507 record_repository(&repo
);
516 schema
: REPO_URL_SCHEMA
,
522 /// Try to login. If successful, store ticket.
523 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
525 let repo
= extract_repository_from_value(¶m
)?
;
527 let client
= connect(repo
.host(), repo
.user())?
;
528 client
.login().await?
;
530 record_repository(&repo
);
539 schema
: REPO_URL_SCHEMA
,
545 /// Logout (delete stored ticket).
546 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
548 let repo
= extract_repository_from_value(¶m
)?
;
550 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
560 schema
: REPO_URL_SCHEMA
,
565 description
: "Snapshot path.",
568 schema
: OUTPUT_FORMAT
,
574 /// List snapshot files.
575 async
fn list_snapshot_files(param
: Value
) -> Result
<Value
, Error
> {
577 let repo
= extract_repository_from_value(¶m
)?
;
579 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
580 let snapshot
: BackupDir
= path
.parse()?
;
582 let output_format
= get_output_format(¶m
);
584 let client
= connect(repo
.host(), repo
.user())?
;
586 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
588 let mut result
= client
.get(&path
, Some(json
!({
589 "backup-type": snapshot
.group().backup_type(),
590 "backup-id": snapshot
.group().backup_id(),
591 "backup-time": snapshot
.backup_time().timestamp(),
594 record_repository(&repo
);
596 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES
;
598 let mut data
: Value
= result
["data"].take();
600 let options
= default_table_format_options();
602 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
611 schema
: REPO_URL_SCHEMA
,
615 schema
: OUTPUT_FORMAT
,
621 /// Start garbage collection for a specific repository.
622 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
624 let repo
= extract_repository_from_value(¶m
)?
;
626 let output_format
= get_output_format(¶m
);
628 let mut client
= connect(repo
.host(), repo
.user())?
;
630 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
632 let result
= client
.post(&path
, None
).await?
;
634 record_repository(&repo
);
636 view_task_result(client
, result
, &output_format
).await?
;
641 fn spawn_catalog_upload(
642 client
: Arc
<BackupWriter
>,
646 Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
647 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
650 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
651 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
652 let catalog_chunk_size
= 512*1024;
653 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
655 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
657 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
659 tokio
::spawn(async
move {
660 let catalog_upload_result
= client
661 .upload_stream(None
, CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, true, encrypt
)
664 if let Err(ref err
) = catalog_upload_result
{
665 eprintln
!("catalog upload error - {}", err
);
669 let _
= catalog_result_tx
.send(catalog_upload_result
);
672 Ok((catalog
, catalog_result_rx
))
675 fn keyfile_parameters(param
: &Value
) -> Result
<(Option
<Vec
<u8>>, CryptMode
), Error
> {
676 let keyfile
= match param
.get("keyfile") {
677 Some(Value
::String(keyfile
)) => Some(keyfile
),
678 Some(_
) => bail
!("bad --keyfile parameter type"),
682 let key_fd
= match param
.get("keyfd") {
683 Some(Value
::Number(key_fd
)) => Some(
684 RawFd
::try_from(key_fd
686 .ok_or_else(|| format_err
!("bad key fd: {:?}", key_fd
))?
688 .map_err(|err
| format_err
!("bad key fd: {:?}: {}", key_fd
, err
))?
690 Some(_
) => bail
!("bad --keyfd parameter type"),
694 let crypt_mode
: Option
<CryptMode
> = match param
.get("crypt-mode") {
695 Some(mode
) => Some(serde_json
::from_value(mode
.clone())?
),
699 let keydata
= match (keyfile
, key_fd
) {
700 (None
, None
) => None
,
701 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
702 (Some(keyfile
), None
) => Some(file_get_contents(keyfile
)?
),
703 (None
, Some(fd
)) => {
704 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
705 let mut data
= Vec
::new();
706 let _len
: usize = { input }
.read_to_end(&mut data
)
708 format_err
!("error reading encryption key from fd {}: {}", fd
, err
)
714 Ok(match (keydata
, crypt_mode
) {
716 (None
, None
) => match key
::read_optional_default_encryption_key()?
{
717 Some(key
) => (Some(key
), CryptMode
::Encrypt
),
718 None
=> (None
, CryptMode
::None
),
721 // just --crypt-mode=none
722 (None
, Some(CryptMode
::None
)) => (None
, CryptMode
::None
),
724 // just --crypt-mode other than none
725 (None
, Some(crypt_mode
)) => match key
::read_optional_default_encryption_key()?
{
726 None
=> bail
!("--crypt-mode without --keyfile and no default key file available"),
727 Some(key
) => (Some(key
), crypt_mode
),
731 (Some(key
), None
) => (Some(key
), CryptMode
::Encrypt
),
733 // --keyfile and --crypt-mode=none
734 (Some(_
), Some(CryptMode
::None
)) => {
735 bail
!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
738 // --keyfile and --crypt-mode other than none
739 (Some(key
), Some(crypt_mode
)) => (Some(key
), crypt_mode
),
748 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
750 schema
: BACKUP_SOURCE_SCHEMA
,
754 schema
: REPO_URL_SCHEMA
,
758 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
762 description
: "Path to file.",
766 schema
: KEYFILE_SCHEMA
,
770 schema
: KEYFD_SCHEMA
,
777 "skip-lost-and-found": {
779 description
: "Skip lost+found directory.",
783 schema
: BACKUP_TYPE_SCHEMA
,
787 schema
: BACKUP_ID_SCHEMA
,
791 schema
: BACKUP_TIME_SCHEMA
,
795 schema
: CHUNK_SIZE_SCHEMA
,
800 description
: "List of paths or patterns for matching files to exclude.",
804 description
: "Path or match pattern.",
809 description
: "Max number of entries to hold in memory.",
811 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
815 description
: "Verbose output.",
821 /// Create (host) backup.
822 async
fn create_backup(
825 _rpcenv
: &mut dyn RpcEnvironment
,
826 ) -> Result
<Value
, Error
> {
828 let repo
= extract_repository_from_value(¶m
)?
;
830 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
832 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
834 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
836 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
838 let backup_time_opt
= param
["backup-time"].as_i64();
840 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
842 if let Some(size
) = chunk_size_opt
{
843 verify_chunk_size(size
)?
;
846 let (keydata
, crypt_mode
) = keyfile_parameters(¶m
)?
;
848 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
850 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
852 let include_dev
= param
["include-dev"].as_array();
854 let entries_max
= param
["entries-max"].as_u64()
855 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
857 let empty
= Vec
::new();
858 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
860 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
861 for entry
in exclude_args
{
862 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
864 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
865 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
869 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
871 if let Some(include_dev
) = include_dev
{
872 if all_file_systems
{
873 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
876 let mut set
= HashSet
::new();
877 for path
in include_dev
{
878 let path
= path
.as_str().unwrap();
879 let stat
= nix
::sys
::stat
::stat(path
)
880 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
881 set
.insert(stat
.st_dev
);
886 let mut upload_list
= vec
![];
888 for backupspec
in backupspec_list
{
889 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
890 let filename
= &spec
.config_string
;
891 let target
= &spec
.archive_name
;
893 use std
::os
::unix
::fs
::FileTypeExt
;
895 let metadata
= std
::fs
::metadata(filename
)
896 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
897 let file_type
= metadata
.file_type();
899 match spec
.spec_type
{
900 BackupSpecificationType
::PXAR
=> {
901 if !file_type
.is_dir() {
902 bail
!("got unexpected file type (expected directory)");
904 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
906 BackupSpecificationType
::IMAGE
=> {
907 if !(file_type
.is_file() || file_type
.is_block_device()) {
908 bail
!("got unexpected file type (expected file or block device)");
911 let size
= image_size(&PathBuf
::from(filename
))?
;
913 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
915 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
917 BackupSpecificationType::CONFIG => {
918 if !file_type.is_file() {
919 bail!("got unexpected file
type (expected regular file
)");
921 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
923 BackupSpecificationType::LOGFILE => {
924 if !file_type.is_file() {
925 bail!("got unexpected file
type (expected regular file
)");
927 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
932 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
934 let client = connect(repo.host(), repo.user())?;
935 record_repository(&repo);
937 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
939 println!("Client name
: {}
", proxmox::tools::nodename());
941 let start_time = Local::now();
943 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
945 let (crypt_config, rsa_encrypted_key) = match keydata {
946 None => (None, None),
948 let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
950 let crypt_config = CryptConfig::new(key)?;
952 match key::find_master_pubkey()? {
953 Some(ref path) if path.exists() => {
954 let pem_data = file_get_contents(path)?;
955 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
956 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
957 (Some(Arc::new(crypt_config)), Some(enc_key))
959 _ => (Some(Arc::new(crypt_config)), None),
964 let client = BackupWriter::start(
966 crypt_config.clone(),
974 let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
975 Some(Arc::new(previous_manifest))
980 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
981 let mut manifest = BackupManifest::new(snapshot);
983 let mut catalog = None;
984 let mut catalog_result_tx = None;
986 for (backup_type, filename, target, size) in upload_list {
988 BackupSpecificationType::CONFIG => {
989 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
991 .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
993 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
995 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
996 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
998 .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
1000 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1002 BackupSpecificationType::PXAR => {
1003 // start catalog upload on first use
1004 if catalog.is_none() {
1005 let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
1006 catalog = Some(cat);
1007 catalog_result_tx = Some(res);
1009 let catalog = catalog.as_ref().unwrap();
1011 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
1012 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1013 let stats = backup_directory(
1015 previous_manifest.clone(),
1021 skip_lost_and_found,
1023 pattern_list.clone(),
1024 entries_max as usize,
1026 crypt_mode == CryptMode::Encrypt,
1028 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1029 catalog.lock().unwrap().end_directory()?;
1031 BackupSpecificationType::IMAGE => {
1032 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1033 let stats = backup_image(
1035 previous_manifest.clone(),
1041 crypt_mode == CryptMode::Encrypt,
1044 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1049 // finalize and upload catalog
1050 if let Some(catalog) = catalog {
1051 let mutex = Arc::try_unwrap(catalog)
1052 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1053 let mut catalog = mutex.into_inner().unwrap();
1057 drop(catalog); // close upload stream
1059 if let Some(catalog_result_rx) = catalog_result_tx {
1060 let stats = catalog_result_rx.await??;
1061 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
1065 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1066 let target = "rsa
-encrypted
.key
";
1067 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1069 .upload_blob_from_data(rsa_encrypted_key, target, false, false)
1071 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum, crypt_mode)?;
1073 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1075 let mut buffer2 = vec![0u8; rsa.size() as usize];
1076 let pem_data = file_get_contents("master
-private
.pem
")?;
1077 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1078 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1079 println!("TEST {} {:?}
", len, buffer2);
1083 // create manifest (index.json)
1084 let manifest = manifest.into_json();
1086 println!("Upload index
.json to '{:?}'
", repo);
1087 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1089 // manifests are never encrypted, but include a signature
1090 // fixme: sign manifest
1093 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, false)
1096 client.finish().await?;
1098 let end_time = Local::now();
1099 let elapsed = end_time.signed_duration_since(start_time);
1100 println!("Duration
: {}
", elapsed);
1102 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1107 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1109 let mut result = vec![];
1111 let data: Vec<&str> = arg.splitn(2, ':').collect();
1113 if data.len() != 2 {
1114 result.push(String::from("root
.pxar
:/"));
1115 result.push(String::from("etc
.pxar
:/etc
"));
1119 let files = tools::complete_file_name(data[1], param);
1122 result.push(format!("{}
:{}
", data[0], file));
1128 async fn dump_image<W: Write>(
1129 client: Arc<BackupReader>,
1130 crypt_config: Option<Arc<CryptConfig>>,
1131 index: FixedIndexReader,
1134 ) -> Result<(), Error> {
1136 let most_used = index.find_most_used_chunks(8);
1138 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1140 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1141 // and thus slows down reading. Instead, directly use RemoteChunkReader
1144 let start_time = std::time::Instant::now();
1146 for pos in 0..index.index_count() {
1147 let digest = index.index_digest(pos).unwrap();
1148 let raw_data = chunk_reader.read_chunk(&digest).await?;
1149 writer.write_all(&raw_data)?;
1150 bytes += raw_data.len();
1152 let next_per = ((pos+1)*100)/index.index_count();
1153 if per != next_per {
1154 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
1155 next_per, bytes, start_time.elapsed().as_secs());
1161 let end_time = std::time::Instant::now();
1162 let elapsed = end_time.duration_since(start_time);
1163 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
1165 elapsed.as_secs_f64(),
1166 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1173 fn parse_archive_type(name: &str) -> (String, ArchiveType) {
1174 if name.ends_with(".didx
") || name.ends_with(".fidx
") || name.ends_with(".blob
") {
1175 (name.into(), archive_type(name).unwrap())
1176 } else if name.ends_with(".pxar
") {
1177 (format!("{}
.didx
", name), ArchiveType::DynamicIndex)
1178 } else if name.ends_with(".img
") {
1179 (format!("{}
.fidx
", name), ArchiveType::FixedIndex)
1181 (format!("{}
.blob
", name), ArchiveType::Blob)
1189 schema: REPO_URL_SCHEMA,
1194 description: "Group
/Snapshot path
.",
1197 description: "Backup archive name
.",
1202 description: r###"Target directory path
. Use '
-' to write to standard output
.
1204 We
do not extraxt '
.pxar' archives when writing to standard output
.
1208 "allow
-existing
-dirs
": {
1210 description: "Do not fail
if directories already exists
.",
1214 schema: KEYFILE_SCHEMA,
1218 schema: KEYFD_SCHEMA,
1228 /// Restore backup repository.
1229 async fn restore(param: Value) -> Result<Value, Error> {
1230 let repo = extract_repository_from_value(¶m)?;
1232 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1234 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
1236 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1238 let client = connect(repo.host(), repo.user())?;
1240 record_repository(&repo);
1242 let path = tools::required_string_param(¶m, "snapshot
")?;
1244 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1245 let group: BackupGroup = path.parse()?;
1246 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1248 let snapshot: BackupDir = path.parse()?;
1249 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1252 let target = tools::required_string_param(¶m, "target
")?;
1253 let target = if target == "-" { None } else { Some(target) };
1255 let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
1257 let crypt_config = match keydata {
1260 let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
1261 Some(Arc::new(CryptConfig::new(key)?))
1265 let client = BackupReader::start(
1267 crypt_config.clone(),
1275 let manifest = client.download_manifest().await?;
1277 let (archive_name, archive_type) = parse_archive_type(archive_name);
1279 if archive_name == MANIFEST_BLOB_NAME {
1280 let backup_index_data = manifest.into_json().to_string();
1281 if let Some(target) = target {
1282 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
1284 let stdout = std::io::stdout();
1285 let mut writer = stdout.lock();
1286 writer.write_all(backup_index_data.as_bytes())
1287 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1290 } else if archive_type == ArchiveType::Blob {
1292 let mut reader = client.download_blob(&manifest, &archive_name).await?;
1294 if let Some(target) = target {
1295 let mut writer = std::fs::OpenOptions::new()
1300 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1301 std::io::copy(&mut reader, &mut writer)?;
1303 let stdout = std::io::stdout();
1304 let mut writer = stdout.lock();
1305 std::io::copy(&mut reader, &mut writer)
1306 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1309 } else if archive_type == ArchiveType::DynamicIndex {
1311 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
1313 let most_used = index.find_most_used_chunks(8);
1315 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1317 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1319 if let Some(target) = target {
1320 proxmox_backup::pxar::extract_archive(
1321 pxar::decoder::Decoder::from_std(reader)?,
1324 proxmox_backup::pxar::Flags::DEFAULT,
1325 allow_existing_dirs,
1328 println!("{:?}
", path);
1332 .map_err(|err| format_err!("error extracting archive
- {}
", err))?;
1334 let mut writer = std::fs::OpenOptions::new()
1336 .open("/dev
/stdout
")
1337 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1339 std::io::copy(&mut reader, &mut writer)
1340 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1342 } else if archive_type == ArchiveType::FixedIndex {
1344 let index = client.download_fixed_index(&manifest, &archive_name).await?;
1346 let mut writer = if let Some(target) = target {
1347 std::fs::OpenOptions::new()
1352 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1354 std::fs::OpenOptions::new()
1356 .open("/dev
/stdout
")
1357 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1360 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
1370 schema: REPO_URL_SCHEMA,
1375 description: "Group
/Snapshot path
.",
1379 description: "The path to the log file you want to upload
.",
1382 schema: KEYFILE_SCHEMA,
1386 schema: KEYFD_SCHEMA,
1396 /// Upload backup log file.
1397 async fn upload_log(param: Value) -> Result<Value, Error> {
1399 let logfile = tools::required_string_param(¶m, "logfile
")?;
1400 let repo = extract_repository_from_value(¶m)?;
1402 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1403 let snapshot: BackupDir = snapshot.parse()?;
1405 let mut client = connect(repo.host(), repo.user())?;
1407 let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
1409 let crypt_config = match keydata {
1412 let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
1413 let crypt_config = CryptConfig::new(key)?;
1414 Some(Arc::new(crypt_config))
1418 let data = file_get_contents(logfile)?;
1420 // fixme: howto sign log?
1421 let blob = match crypt_mode {
1422 CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
1423 CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
1426 let raw_data = blob.into_inner();
1428 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1431 "backup
-type": snapshot.group().backup_type(),
1432 "backup
-id
": snapshot.group().backup_id(),
1433 "backup
-time
": snapshot.backup_time().timestamp(),
1436 let body = hyper::Body::from(raw_data);
1438 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1441 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1442 &ApiHandler::Async(&prune),
1444 "Prune a backup repository
.",
1445 &proxmox_backup::add_common_prune_prameters!([
1446 ("dry
-run
", true, &BooleanSchema::new(
1447 "Just show what prune would
do, but
do not delete anything
.")
1449 ("group
", false, &StringSchema::new("Backup group
.").schema()),
1451 ("output
-format
", true, &OUTPUT_FORMAT),
1455 &BooleanSchema::new("Minimal output
- only show removals
.")
1458 ("repository
", true, &REPO_URL_SCHEMA),
1466 _rpcenv: &'a mut dyn RpcEnvironment,
1467 ) -> proxmox::api::ApiFuture<'a> {
1469 prune_async(param).await
1473 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1474 let repo = extract_repository_from_value(¶m)?;
1476 let mut client = connect(repo.host(), repo.user())?;
1478 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1480 let group = tools::required_string_param(¶m, "group
")?;
1481 let group: BackupGroup = group.parse()?;
1483 let output_format = get_output_format(¶m);
1485 let quiet = param["quiet
"].as_bool().unwrap_or(false);
1487 param.as_object_mut().unwrap().remove("repository
");
1488 param.as_object_mut().unwrap().remove("group
");
1489 param.as_object_mut().unwrap().remove("output
-format
");
1490 param.as_object_mut().unwrap().remove("quiet
");
1492 param["backup
-type"] = group.backup_type().into();
1493 param["backup
-id
"] = group.backup_id().into();
1495 let mut result = client.post(&path, Some(param)).await?;
1497 record_repository(&repo);
1499 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1500 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1501 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1502 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1505 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1506 Ok(match v.as_bool() {
1507 Some(true) => "keep
",
1508 Some(false) => "remove
",
1513 let options = default_table_format_options()
1514 .sortby("backup
-type", false)
1515 .sortby("backup
-id
", false)
1516 .sortby("backup
-time
", false)
1517 .column(ColumnConfig::new("backup
-id
").renderer(render_snapshot_path).header("snapshot
"))
1518 .column(ColumnConfig::new("backup
-time
").renderer(tools::format::render_epoch).header("date
"))
1519 .column(ColumnConfig::new("keep
").renderer(render_prune_action).header("action
"))
1522 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1524 let mut data = result["data
"].take();
1527 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1528 item["keep
"].as_bool() == Some(false)
1529 }).map(|v| v.clone()).collect();
1533 format_and_print_result_full(&mut data, info, &output_format, &options);
1542 schema: REPO_URL_SCHEMA,
1546 schema: OUTPUT_FORMAT,
1552 /// Get repository status.
1553 async fn status(param: Value) -> Result<Value, Error> {
1555 let repo = extract_repository_from_value(¶m)?;
1557 let output_format = get_output_format(¶m);
1559 let client = connect(repo.host(), repo.user())?;
1561 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1563 let mut result = client.get(&path, None).await?;
1564 let mut data = result["data
"].take();
1566 record_repository(&repo);
1568 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1569 let v = v.as_u64().unwrap();
1570 let total = record["total
"].as_u64().unwrap();
1571 let roundup = total/200;
1572 let per = ((v+roundup)*100)/total;
1573 let info = format!(" ({}
%)", per);
1574 Ok(format!("{} {:>8}
", v, info))
1577 let options = default_table_format_options()
1579 .column(ColumnConfig::new("total
").renderer(render_total_percentage))
1580 .column(ColumnConfig::new("used
").renderer(render_total_percentage))
1581 .column(ColumnConfig::new("avail
").renderer(render_total_percentage));
1583 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
1585 format_and_print_result_full(&mut data, schema, &output_format, &options);
1590 // like get, but simply ignore errors and return Null instead
1591 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1593 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
1594 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
1596 let options = HttpClientOptions::new()
1597 .prefix(Some("proxmox
-backup
".to_string()))
1600 .fingerprint(fingerprint)
1601 .fingerprint_cache(true)
1602 .ticket_cache(true);
1604 let client = match HttpClient::new(repo.host(), repo.user(), options) {
1606 _ => return Value::Null,
1609 let mut resp = match client.get(url, None).await {
1611 _ => return Value::Null,
1614 if let Some(map) = resp.as_object_mut() {
1615 if let Some(data) = map.remove("data
") {
1622 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1623 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1626 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1628 let mut result = vec![];
1630 let repo = match extract_repository_from_map(param) {
1635 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1637 let data = try_get(&repo, &path).await;
1639 if let Some(list) = data.as_array() {
1641 if let (Some(backup_id), Some(backup_type)) =
1642 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1644 result.push(format!("{}
/{}
", backup_type, backup_id));
1652 pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1653 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1656 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1658 if arg.matches('/').count() < 2 {
1659 let groups = complete_backup_group_do(param).await;
1660 let mut result = vec![];
1661 for group in groups {
1662 result.push(group.to_string());
1663 result.push(format!("{}
/", group));
1668 complete_backup_snapshot_do(param).await
1671 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1672 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1675 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1677 let mut result = vec![];
1679 let repo = match extract_repository_from_map(param) {
1684 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1686 let data = try_get(&repo, &path).await;
1688 if let Some(list) = data.as_array() {
1690 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1691 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1693 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1694 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1702 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1703 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1706 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1708 let mut result = vec![];
1710 let repo = match extract_repository_from_map(param) {
1715 let snapshot: BackupDir = match param.get("snapshot
") {
1717 match path.parse() {
1725 let query = tools::json_object_to_query(json!({
1726 "backup
-type": snapshot.group().backup_type(),
1727 "backup
-id
": snapshot.group().backup_id(),
1728 "backup
-time
": snapshot.backup_time().timestamp(),
1731 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1733 let data = try_get(&repo, &path).await;
1735 if let Some(list) = data.as_array() {
1737 if let Some(filename) = item["filename
"].as_str() {
1738 result.push(filename.to_owned());
1746 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1747 complete_server_file_name(arg, param)
1749 .map(|v| tools::format::strip_server_file_expenstion(&v))
1753 pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1754 complete_server_file_name(arg, param)
1757 let name = tools::format::strip_server_file_expenstion(&v);
1758 if name.ends_with(".pxar
") {
1767 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1769 let mut result = vec![];
1773 result.push(size.to_string());
1775 if size > 4096 { break; }
1781 use proxmox_backup::client::RemoteChunkReader;
1782 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1785 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1786 /// so that we can properly access it from multiple threads simultaneously while not issuing
1787 /// duplicate simultaneous reads over http.
1788 pub struct BufferedDynamicReadAt {
1789 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
1792 impl BufferedDynamicReadAt {
1793 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
1795 inner: Mutex::new(inner),
1800 impl ReadAt for BufferedDynamicReadAt {
1801 fn start_read_at<'a>(
1802 self: Pin<&'a Self>,
1806 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1807 MaybeReady::Ready(tokio::task::block_in_place(move || {
1808 let mut reader = self.inner.lock().unwrap();
1809 reader.seek(SeekFrom::Start(offset))?;
1810 Ok(reader.read(buf)?)
1814 fn poll_complete<'a>(
1815 self: Pin<&'a Self>,
1816 _op: ReadAtOperation<'a>,
1817 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1818 panic!("LocalDynamicReadAt
::start_read_at returned Pending
");
1824 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
1825 .arg_param(&["backupspec
"])
1826 .completion_cb("repository
", complete_repository)
1827 .completion_cb("backupspec
", complete_backup_source)
1828 .completion_cb("keyfile
", tools::complete_file_name)
1829 .completion_cb("chunk
-size
", complete_chunk_size);
1831 let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
1832 .completion_cb("repository
", complete_repository)
1833 .completion_cb("keyfile
", tools::complete_file_name);
1835 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
1836 .arg_param(&["snapshot
", "logfile
"])
1837 .completion_cb("snapshot
", complete_backup_snapshot)
1838 .completion_cb("logfile
", tools::complete_file_name)
1839 .completion_cb("keyfile
", tools::complete_file_name)
1840 .completion_cb("repository
", complete_repository);
1842 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
1843 .completion_cb("repository
", complete_repository);
1845 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
1846 .arg_param(&["group
"])
1847 .completion_cb("group
", complete_backup_group)
1848 .completion_cb("repository
", complete_repository);
1850 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
1851 .arg_param(&["snapshot
"])
1852 .completion_cb("repository
", complete_repository)
1853 .completion_cb("snapshot
", complete_backup_snapshot);
1855 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
1856 .completion_cb("repository
", complete_repository);
1858 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
1859 .arg_param(&["snapshot
", "archive
-name
", "target
"])
1860 .completion_cb("repository
", complete_repository)
1861 .completion_cb("snapshot
", complete_group_or_snapshot)
1862 .completion_cb("archive
-name
", complete_archive_name)
1863 .completion_cb("target
", tools::complete_file_name);
1865 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
1866 .arg_param(&["snapshot
"])
1867 .completion_cb("repository
", complete_repository)
1868 .completion_cb("snapshot
", complete_backup_snapshot);
1870 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
1871 .arg_param(&["group
"])
1872 .completion_cb("group
", complete_backup_group)
1873 .completion_cb("repository
", complete_repository);
1875 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
1876 .completion_cb("repository
", complete_repository);
1878 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
1879 .completion_cb("repository
", complete_repository);
1881 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
1882 .completion_cb("repository
", complete_repository);
1884 let cmd_def = CliCommandMap::new()
1885 .insert("backup
", backup_cmd_def)
1886 .insert("upload
-log
", upload_log_cmd_def)
1887 .insert("forget
", forget_cmd_def)
1888 .insert("garbage
-collect
", garbage_collect_cmd_def)
1889 .insert("list
", list_cmd_def)
1890 .insert("login
", login_cmd_def)
1891 .insert("logout
", logout_cmd_def)
1892 .insert("prune
", prune_cmd_def)
1893 .insert("restore
", restore_cmd_def)
1894 .insert("snapshots
", snapshots_cmd_def)
1895 .insert("files
", files_cmd_def)
1896 .insert("status
", status_cmd_def)
1897 .insert("key
", key::cli())
1898 .insert("mount
", mount_cmd_def())
1899 .insert("catalog
", catalog_mgmt_cli())
1900 .insert("task
", task_mgmt_cli())
1901 .insert("benchmark
", benchmark_cmd_def);
1903 let rpcenv = CliEnvironment::new();
1904 run_cli_command(cmd_def, rpcenv, Some(|future| {
1905 proxmox_backup::tools::runtime::main(future)