1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::convert
::TryFrom
;
3 use std
::io
::{self, Read, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::io
::{FromRawFd, RawFd}
;
5 use std
::path
::{Path, PathBuf}
;
7 use std
::sync
::{Arc, Mutex}
;
8 use std
::task
::Context
;
10 use anyhow
::{bail, format_err, Error}
;
11 use chrono
::{Local, LocalResult, DateTime, Utc, TimeZone}
;
12 use futures
::future
::FutureExt
;
13 use futures
::stream
::{StreamExt, TryStreamExt}
;
14 use serde_json
::{json, Value}
;
15 use tokio
::sync
::mpsc
;
16 use xdg
::BaseDirectories
;
18 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
19 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
;
20 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
21 use proxmox
::api
::schema
::*;
22 use proxmox
::api
::cli
::*;
23 use proxmox
::api
::api
;
24 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
26 use proxmox_backup
::tools
;
27 use proxmox_backup
::api2
::types
::*;
28 use proxmox_backup
::api2
::version
;
29 use proxmox_backup
::client
::*;
30 use proxmox_backup
::pxar
::catalog
::*;
31 use proxmox_backup
::backup
::{
40 BufferedDynamicReader
,
56 mod proxmox_backup_client
;
57 use proxmox_backup_client
::*;
59 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
60 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
63 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
64 .format(&BACKUP_REPO_URL
)
68 pub const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
69 "Path to encryption key. All data will be encrypted using this key.")
72 pub const KEYFD_SCHEMA
: Schema
= IntegerSchema
::new(
73 "Pass an encryption key via an already opened file descriptor.")
77 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
78 "Chunk size in KB. Must be a power of 2.")
84 fn get_default_repository() -> Option
<String
> {
85 std
::env
::var("PBS_REPOSITORY").ok()
88 pub fn extract_repository_from_value(
90 ) -> Result
<BackupRepository
, Error
> {
92 let repo_url
= param
["repository"]
95 .or_else(get_default_repository
)
96 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
98 let repo
: BackupRepository
= repo_url
.parse()?
;
103 fn extract_repository_from_map(
104 param
: &HashMap
<String
, String
>,
105 ) -> Option
<BackupRepository
> {
107 param
.get("repository")
109 .or_else(get_default_repository
)
110 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
113 fn record_repository(repo
: &BackupRepository
) {
115 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
120 // usually $HOME/.cache/proxmox-backup/repo-list
121 let path
= match base
.place_cache_file("repo-list") {
126 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
128 let repo
= repo
.to_string();
130 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
132 let mut map
= serde_json
::map
::Map
::new();
135 let mut max_used
= 0;
136 let mut max_repo
= None
;
137 for (repo
, count
) in data
.as_object().unwrap() {
138 if map
.contains_key(repo
) { continue; }
139 if let Some(count
) = count
.as_i64() {
140 if count
> max_used
{
142 max_repo
= Some(repo
);
146 if let Some(repo
) = max_repo
{
147 map
.insert(repo
.to_owned(), json
!(max_used
));
151 if map
.len() > 10 { // store max. 10 repos
156 let new_data
= json
!(map
);
158 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
161 pub fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
163 let mut result
= vec
![];
165 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
170 // usually $HOME/.cache/proxmox-backup/repo-list
171 let path
= match base
.place_cache_file("repo-list") {
176 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
178 if let Some(map
) = data
.as_object() {
179 for (repo
, _count
) in map
{
180 result
.push(repo
.to_owned());
187 fn connect(server
: &str, userid
: &Userid
) -> Result
<HttpClient
, Error
> {
189 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
191 use std
::env
::VarError
::*;
192 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
194 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
195 Err(NotPresent
) => None
,
198 let options
= HttpClientOptions
::new()
199 .prefix(Some("proxmox-backup".to_string()))
202 .fingerprint(fingerprint
)
203 .fingerprint_cache(true)
206 HttpClient
::new(server
, userid
, options
)
209 async
fn view_task_result(
213 ) -> Result
<(), Error
> {
214 let data
= &result
["data"];
215 if output_format
== "text" {
216 if let Some(upid
) = data
.as_str() {
217 display_task_log(client
, upid
, true).await?
;
220 format_and_print_result(&data
, &output_format
);
226 async
fn api_datastore_list_snapshots(
229 group
: Option
<BackupGroup
>,
230 ) -> Result
<Value
, Error
> {
232 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
234 let mut args
= json
!({}
);
235 if let Some(group
) = group
{
236 args
["backup-type"] = group
.backup_type().into();
237 args
["backup-id"] = group
.backup_id().into();
240 let mut result
= client
.get(&path
, Some(args
)).await?
;
242 Ok(result
["data"].take())
245 pub async
fn api_datastore_latest_snapshot(
249 ) -> Result
<(String
, String
, DateTime
<Utc
>), Error
> {
251 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
252 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
255 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
258 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
260 let backup_time
= match Utc
.timestamp_opt(list
[0].backup_time
, 0) {
261 LocalResult
::Single(time
) => time
,
262 _
=> bail
!("last snapshot of backup group {:?} has invalid timestmap {}.",
263 group
.group_path(), list
[0].backup_time
),
266 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
269 async
fn backup_directory
<P
: AsRef
<Path
>>(
270 client
: &BackupWriter
,
271 previous_manifest
: Option
<Arc
<BackupManifest
>>,
274 chunk_size
: Option
<usize>,
275 device_set
: Option
<HashSet
<u64>>,
277 skip_lost_and_found
: bool
,
278 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
279 exclude_pattern
: Vec
<MatchEntry
>,
283 ) -> Result
<BackupStats
, Error
> {
285 let pxar_stream
= PxarBackupStream
::open(
294 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
296 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
299 .map_err(Error
::from
);
301 // spawn chunker inside a separate task so that it can run parallel
302 tokio
::spawn(async
move {
303 while let Some(v
) = chunk_stream
.next().await
{
304 let _
= tx
.send(v
).await
;
309 .upload_stream(previous_manifest
, archive_name
, stream
, "dynamic", None
, compress
, encrypt
)
315 async
fn backup_image
<P
: AsRef
<Path
>>(
316 client
: &BackupWriter
,
317 previous_manifest
: Option
<Arc
<BackupManifest
>>,
321 chunk_size
: Option
<usize>,
325 ) -> Result
<BackupStats
, Error
> {
327 let path
= image_path
.as_ref().to_owned();
329 let file
= tokio
::fs
::File
::open(path
).await?
;
331 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
332 .map_err(Error
::from
);
334 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
337 .upload_stream(previous_manifest
, archive_name
, stream
, "fixed", Some(image_size
), compress
, encrypt
)
347 schema
: REPO_URL_SCHEMA
,
351 schema
: OUTPUT_FORMAT
,
357 /// List backup groups.
358 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
360 let output_format
= get_output_format(¶m
);
362 let repo
= extract_repository_from_value(¶m
)?
;
364 let client
= connect(repo
.host(), repo
.user())?
;
366 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
368 let mut result
= client
.get(&path
, None
).await?
;
370 record_repository(&repo
);
372 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
373 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
374 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
375 Ok(group
.group_path().to_str().unwrap().to_owned())
378 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
379 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
380 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
)?
;
381 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
384 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
385 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
386 Ok(tools
::format
::render_backup_file_list(&item
.files
))
389 let options
= default_table_format_options()
390 .sortby("backup-type", false)
391 .sortby("backup-id", false)
392 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
394 ColumnConfig
::new("last-backup")
395 .renderer(render_last_backup
)
396 .header("last snapshot")
399 .column(ColumnConfig
::new("backup-count"))
400 .column(ColumnConfig
::new("files").renderer(render_files
));
402 let mut data
: Value
= result
["data"].take();
404 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_GROUPS
;
406 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
415 schema
: REPO_URL_SCHEMA
,
420 description
: "Backup group.",
424 schema
: OUTPUT_FORMAT
,
430 /// List backup snapshots.
431 async
fn list_snapshots(param
: Value
) -> Result
<Value
, Error
> {
433 let repo
= extract_repository_from_value(¶m
)?
;
435 let output_format
= get_output_format(¶m
);
437 let client
= connect(repo
.host(), repo
.user())?
;
439 let group
: Option
<BackupGroup
> = if let Some(path
) = param
["group"].as_str() {
445 let mut data
= api_datastore_list_snapshots(&client
, repo
.store(), group
).await?
;
447 record_repository(&repo
);
449 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
450 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
451 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
)?
;
452 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
455 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
456 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
457 let mut filenames
= Vec
::new();
458 for file
in &item
.files
{
459 filenames
.push(file
.filename
.to_string());
461 Ok(tools
::format
::render_backup_file_list(&filenames
[..]))
464 let options
= default_table_format_options()
465 .sortby("backup-type", false)
466 .sortby("backup-id", false)
467 .sortby("backup-time", false)
468 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
469 .column(ColumnConfig
::new("size"))
470 .column(ColumnConfig
::new("files").renderer(render_files
))
473 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOTS
;
475 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
484 schema
: REPO_URL_SCHEMA
,
489 description
: "Snapshot path.",
494 /// Forget (remove) backup snapshots.
495 async
fn forget_snapshots(param
: Value
) -> Result
<Value
, Error
> {
497 let repo
= extract_repository_from_value(¶m
)?
;
499 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
500 let snapshot
: BackupDir
= path
.parse()?
;
502 let mut client
= connect(repo
.host(), repo
.user())?
;
504 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
506 let result
= client
.delete(&path
, Some(json
!({
507 "backup-type": snapshot
.group().backup_type(),
508 "backup-id": snapshot
.group().backup_id(),
509 "backup-time": snapshot
.backup_time().timestamp(),
512 record_repository(&repo
);
521 schema
: REPO_URL_SCHEMA
,
527 /// Try to login. If successful, store ticket.
528 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
530 let repo
= extract_repository_from_value(¶m
)?
;
532 let client
= connect(repo
.host(), repo
.user())?
;
533 client
.login().await?
;
535 record_repository(&repo
);
544 schema
: REPO_URL_SCHEMA
,
550 /// Logout (delete stored ticket).
551 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
553 let repo
= extract_repository_from_value(¶m
)?
;
555 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
564 schema
: REPO_URL_SCHEMA
,
568 schema
: OUTPUT_FORMAT
,
574 /// Show client and optional server version
575 async
fn api_version(param
: Value
) -> Result
<(), Error
> {
577 let output_format
= get_output_format(¶m
);
579 let mut version_info
= json
!({
581 "version": version
::PROXMOX_PKG_VERSION
,
582 "release": version
::PROXMOX_PKG_RELEASE
,
583 "repoid": version
::PROXMOX_PKG_REPOID
,
587 let repo
= extract_repository_from_value(¶m
);
588 if let Ok(repo
) = repo
{
589 let client
= connect(repo
.host(), repo
.user())?
;
591 match client
.get("api2/json/version", None
).await
{
592 Ok(mut result
) => version_info
["server"] = result
["data"].take(),
593 Err(e
) => eprintln
!("could not connect to server - {}", e
),
596 if output_format
== "text" {
597 println
!("client version: {}.{}", version
::PROXMOX_PKG_VERSION
, version
::PROXMOX_PKG_RELEASE
);
598 if let Some(server
) = version_info
["server"].as_object() {
599 let server_version
= server
["version"].as_str().unwrap();
600 let server_release
= server
["release"].as_str().unwrap();
601 println
!("server version: {}.{}", server_version
, server_release
);
604 format_and_print_result(&version_info
, &output_format
);
615 schema
: REPO_URL_SCHEMA
,
620 description
: "Snapshot path.",
623 schema
: OUTPUT_FORMAT
,
629 /// List snapshot files.
630 async
fn list_snapshot_files(param
: Value
) -> Result
<Value
, Error
> {
632 let repo
= extract_repository_from_value(¶m
)?
;
634 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
635 let snapshot
: BackupDir
= path
.parse()?
;
637 let output_format
= get_output_format(¶m
);
639 let client
= connect(repo
.host(), repo
.user())?
;
641 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
643 let mut result
= client
.get(&path
, Some(json
!({
644 "backup-type": snapshot
.group().backup_type(),
645 "backup-id": snapshot
.group().backup_id(),
646 "backup-time": snapshot
.backup_time().timestamp(),
649 record_repository(&repo
);
651 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES
;
653 let mut data
: Value
= result
["data"].take();
655 let options
= default_table_format_options();
657 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
666 schema
: REPO_URL_SCHEMA
,
670 schema
: OUTPUT_FORMAT
,
676 /// Start garbage collection for a specific repository.
677 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
679 let repo
= extract_repository_from_value(¶m
)?
;
681 let output_format
= get_output_format(¶m
);
683 let mut client
= connect(repo
.host(), repo
.user())?
;
685 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
687 let result
= client
.post(&path
, None
).await?
;
689 record_repository(&repo
);
691 view_task_result(client
, result
, &output_format
).await?
;
696 fn spawn_catalog_upload(
697 client
: Arc
<BackupWriter
>,
701 Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
702 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
705 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
706 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
707 let catalog_chunk_size
= 512*1024;
708 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
710 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
712 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
714 tokio
::spawn(async
move {
715 let catalog_upload_result
= client
716 .upload_stream(None
, CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, true, encrypt
)
719 if let Err(ref err
) = catalog_upload_result
{
720 eprintln
!("catalog upload error - {}", err
);
724 let _
= catalog_result_tx
.send(catalog_upload_result
);
727 Ok((catalog
, catalog_result_rx
))
730 fn keyfile_parameters(param
: &Value
) -> Result
<(Option
<Vec
<u8>>, CryptMode
), Error
> {
731 let keyfile
= match param
.get("keyfile") {
732 Some(Value
::String(keyfile
)) => Some(keyfile
),
733 Some(_
) => bail
!("bad --keyfile parameter type"),
737 let key_fd
= match param
.get("keyfd") {
738 Some(Value
::Number(key_fd
)) => Some(
739 RawFd
::try_from(key_fd
741 .ok_or_else(|| format_err
!("bad key fd: {:?}", key_fd
))?
743 .map_err(|err
| format_err
!("bad key fd: {:?}: {}", key_fd
, err
))?
745 Some(_
) => bail
!("bad --keyfd parameter type"),
749 let crypt_mode
: Option
<CryptMode
> = match param
.get("crypt-mode") {
750 Some(mode
) => Some(serde_json
::from_value(mode
.clone())?
),
754 let keydata
= match (keyfile
, key_fd
) {
755 (None
, None
) => None
,
756 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
757 (Some(keyfile
), None
) => Some(file_get_contents(keyfile
)?
),
758 (None
, Some(fd
)) => {
759 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
760 let mut data
= Vec
::new();
761 let _len
: usize = { input }
.read_to_end(&mut data
)
763 format_err
!("error reading encryption key from fd {}: {}", fd
, err
)
769 Ok(match (keydata
, crypt_mode
) {
771 (None
, None
) => match key
::read_optional_default_encryption_key()?
{
772 Some(key
) => (Some(key
), CryptMode
::Encrypt
),
773 None
=> (None
, CryptMode
::None
),
776 // just --crypt-mode=none
777 (None
, Some(CryptMode
::None
)) => (None
, CryptMode
::None
),
779 // just --crypt-mode other than none
780 (None
, Some(crypt_mode
)) => match key
::read_optional_default_encryption_key()?
{
781 None
=> bail
!("--crypt-mode without --keyfile and no default key file available"),
782 Some(key
) => (Some(key
), crypt_mode
),
786 (Some(key
), None
) => (Some(key
), CryptMode
::Encrypt
),
788 // --keyfile and --crypt-mode=none
789 (Some(_
), Some(CryptMode
::None
)) => {
790 bail
!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
793 // --keyfile and --crypt-mode other than none
794 (Some(key
), Some(crypt_mode
)) => (Some(key
), crypt_mode
),
803 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
805 schema
: BACKUP_SOURCE_SCHEMA
,
809 schema
: REPO_URL_SCHEMA
,
813 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
817 description
: "Path to file.",
821 schema
: KEYFILE_SCHEMA
,
825 schema
: KEYFD_SCHEMA
,
832 "skip-lost-and-found": {
834 description
: "Skip lost+found directory.",
838 schema
: BACKUP_TYPE_SCHEMA
,
842 schema
: BACKUP_ID_SCHEMA
,
846 schema
: BACKUP_TIME_SCHEMA
,
850 schema
: CHUNK_SIZE_SCHEMA
,
855 description
: "List of paths or patterns for matching files to exclude.",
859 description
: "Path or match pattern.",
864 description
: "Max number of entries to hold in memory.",
866 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
870 description
: "Verbose output.",
876 /// Create (host) backup.
877 async
fn create_backup(
880 _rpcenv
: &mut dyn RpcEnvironment
,
881 ) -> Result
<Value
, Error
> {
883 let repo
= extract_repository_from_value(¶m
)?
;
885 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
887 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
889 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
891 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
893 let backup_time_opt
= param
["backup-time"].as_i64();
895 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
897 if let Some(size
) = chunk_size_opt
{
898 verify_chunk_size(size
)?
;
901 let (keydata
, crypt_mode
) = keyfile_parameters(¶m
)?
;
903 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
905 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
907 let include_dev
= param
["include-dev"].as_array();
909 let entries_max
= param
["entries-max"].as_u64()
910 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
912 let empty
= Vec
::new();
913 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
915 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
916 for entry
in exclude_args
{
917 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
919 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
920 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
924 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
926 if let Some(include_dev
) = include_dev
{
927 if all_file_systems
{
928 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
931 let mut set
= HashSet
::new();
932 for path
in include_dev
{
933 let path
= path
.as_str().unwrap();
934 let stat
= nix
::sys
::stat
::stat(path
)
935 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
936 set
.insert(stat
.st_dev
);
941 let mut upload_list
= vec
![];
942 let mut target_set
= HashSet
::new();
944 for backupspec
in backupspec_list
{
945 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
946 let filename
= &spec
.config_string
;
947 let target
= &spec
.archive_name
;
949 if target_set
.contains(target
) {
950 bail
!("got target twice: '{}'", target
);
952 target_set
.insert(target
.to_string());
954 use std
::os
::unix
::fs
::FileTypeExt
;
956 let metadata
= std
::fs
::metadata(filename
)
957 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
958 let file_type
= metadata
.file_type();
960 match spec
.spec_type
{
961 BackupSpecificationType
::PXAR
=> {
962 if !file_type
.is_dir() {
963 bail
!("got unexpected file type (expected directory)");
965 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
967 BackupSpecificationType
::IMAGE
=> {
968 if !(file_type
.is_file() || file_type
.is_block_device()) {
969 bail
!("got unexpected file type (expected file or block device)");
972 let size
= image_size(&PathBuf
::from(filename
))?
;
974 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
976 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
978 BackupSpecificationType::CONFIG => {
979 if !file_type.is_file() {
980 bail!("got unexpected file
type (expected regular file
)");
982 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
984 BackupSpecificationType::LOGFILE => {
985 if !file_type.is_file() {
986 bail!("got unexpected file
type (expected regular file
)");
988 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
993 let backup_time = match backup_time_opt {
995 match Utc.timestamp_opt(timestamp, 0) {
996 LocalResult::Single(time) => time,
997 _ => bail!("Invalid backup
-time parameter
: {}
", timestamp),
1003 let client = connect(repo.host(), repo.user())?;
1004 record_repository(&repo);
1006 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
1008 println!("Client name
: {}
", proxmox::tools::nodename());
1010 let start_time = Local::now();
1012 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1014 let (crypt_config, rsa_encrypted_key) = match keydata {
1015 None => (None, None),
1017 let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
1019 let crypt_config = CryptConfig::new(key)?;
1021 match key::find_master_pubkey()? {
1022 Some(ref path) if path.exists() => {
1023 let pem_data = file_get_contents(path)?;
1024 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
1025 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
1026 (Some(Arc::new(crypt_config)), Some(enc_key))
1028 _ => (Some(Arc::new(crypt_config)), None),
1033 let client = BackupWriter::start(
1035 crypt_config.clone(),
1044 let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
1045 Some(Arc::new(previous_manifest))
1050 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp())?;
1051 let mut manifest = BackupManifest::new(snapshot);
1053 let mut catalog = None;
1054 let mut catalog_result_tx = None;
1056 for (backup_type, filename, target, size) in upload_list {
1058 BackupSpecificationType::CONFIG => {
1059 println!("Upload config file '{}' to '{}'
as {}
", filename, repo, target);
1061 .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
1063 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1065 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
1066 println!("Upload log file '{}' to '{}'
as {}
", filename, repo, target);
1068 .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
1070 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1072 BackupSpecificationType::PXAR => {
1073 // start catalog upload on first use
1074 if catalog.is_none() {
1075 let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
1076 catalog = Some(cat);
1077 catalog_result_tx = Some(res);
1079 let catalog = catalog.as_ref().unwrap();
1081 println!("Upload directory '{}' to '{}'
as {}
", filename, repo, target);
1082 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1083 let stats = backup_directory(
1085 previous_manifest.clone(),
1091 skip_lost_and_found,
1093 pattern_list.clone(),
1094 entries_max as usize,
1096 crypt_mode == CryptMode::Encrypt,
1098 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1099 catalog.lock().unwrap().end_directory()?;
1101 BackupSpecificationType::IMAGE => {
1102 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1103 let stats = backup_image(
1105 previous_manifest.clone(),
1111 crypt_mode == CryptMode::Encrypt,
1114 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1119 // finalize and upload catalog
1120 if let Some(catalog) = catalog {
1121 let mutex = Arc::try_unwrap(catalog)
1122 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1123 let mut catalog = mutex.into_inner().unwrap();
1127 drop(catalog); // close upload stream
1129 if let Some(catalog_result_rx) = catalog_result_tx {
1130 let stats = catalog_result_rx.await??;
1131 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
1135 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1136 let target = "rsa
-encrypted
.key
.blob
";
1137 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1139 .upload_blob_from_data(rsa_encrypted_key, target, false, false)
1141 manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
1143 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1145 let mut buffer2 = vec![0u8; rsa.size() as usize];
1146 let pem_data = file_get_contents("master
-private
.pem
")?;
1147 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1148 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1149 println!("TEST {} {:?}
", len, buffer2);
1152 // create manifest (index.json)
1153 // manifests are never encrypted, but include a signature
1154 let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
1155 .map_err(|err| format_err!("unable to format manifest
- {}
", err))?;
1158 if verbose { println!("Upload index.json to '{}'", repo
) };
1160 .upload_blob_from_data(manifest
.into_bytes(), MANIFEST_BLOB_NAME
, true, false)
1163 client
.finish().await?
;
1165 let end_time
= Local
::now();
1166 let elapsed
= end_time
.signed_duration_since(start_time
);
1167 println
!("Duration: {}", elapsed
);
1169 println
!("End Time: {}", end_time
.to_rfc3339_opts(chrono
::SecondsFormat
::Secs
, false));
1174 fn complete_backup_source(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1176 let mut result
= vec
![];
1178 let data
: Vec
<&str> = arg
.splitn(2, '
:'
).collect();
1180 if data
.len() != 2 {
1181 result
.push(String
::from("root.pxar:/"));
1182 result
.push(String
::from("etc.pxar:/etc"));
1186 let files
= tools
::complete_file_name(data
[1], param
);
1189 result
.push(format
!("{}:{}", data
[0], file
));
1195 async
fn dump_image
<W
: Write
>(
1196 client
: Arc
<BackupReader
>,
1197 crypt_config
: Option
<Arc
<CryptConfig
>>,
1198 crypt_mode
: CryptMode
,
1199 index
: FixedIndexReader
,
1202 ) -> Result
<(), Error
> {
1204 let most_used
= index
.find_most_used_chunks(8);
1206 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, crypt_mode
, most_used
);
1208 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1209 // and thus slows down reading. Instead, directly use RemoteChunkReader
1212 let start_time
= std
::time
::Instant
::now();
1214 for pos
in 0..index
.index_count() {
1215 let digest
= index
.index_digest(pos
).unwrap();
1216 let raw_data
= chunk_reader
.read_chunk(&digest
).await?
;
1217 writer
.write_all(&raw_data
)?
;
1218 bytes
+= raw_data
.len();
1220 let next_per
= ((pos
+1)*100)/index
.index_count();
1221 if per
!= next_per
{
1222 eprintln
!("progress {}% (read {} bytes, duration {} sec)",
1223 next_per
, bytes
, start_time
.elapsed().as_secs());
1229 let end_time
= std
::time
::Instant
::now();
1230 let elapsed
= end_time
.duration_since(start_time
);
1231 eprintln
!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1233 elapsed
.as_secs_f64(),
1234 bytes
as f64/(1024.0*1024.0*elapsed
.as_secs_f64())
1241 fn parse_archive_type(name
: &str) -> (String
, ArchiveType
) {
1242 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
1243 (name
.into(), archive_type(name
).unwrap())
1244 } else if name
.ends_with(".pxar") {
1245 (format
!("{}.didx", name
), ArchiveType
::DynamicIndex
)
1246 } else if name
.ends_with(".img") {
1247 (format
!("{}.fidx", name
), ArchiveType
::FixedIndex
)
1249 (format
!("{}.blob", name
), ArchiveType
::Blob
)
1257 schema
: REPO_URL_SCHEMA
,
1262 description
: "Group/Snapshot path.",
1265 description
: "Backup archive name.",
1270 description
: r
###"Target directory path. Use '-' to write to standard output.
1272 We do not extraxt '.pxar' archives when writing to standard output.
1276 "allow-existing-dirs": {
1278 description
: "Do not fail if directories already exists.",
1282 schema
: KEYFILE_SCHEMA
,
1286 schema
: KEYFD_SCHEMA
,
1296 /// Restore backup repository.
1297 async
fn restore(param
: Value
) -> Result
<Value
, Error
> {
1298 let repo
= extract_repository_from_value(¶m
)?
;
1300 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
1302 let allow_existing_dirs
= param
["allow-existing-dirs"].as_bool().unwrap_or(false);
1304 let archive_name
= tools
::required_string_param(¶m
, "archive-name")?
;
1306 let client
= connect(repo
.host(), repo
.user())?
;
1308 record_repository(&repo
);
1310 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
1312 let (backup_type
, backup_id
, backup_time
) = if path
.matches('
/'
).count() == 1 {
1313 let group
: BackupGroup
= path
.parse()?
;
1314 api_datastore_latest_snapshot(&client
, repo
.store(), group
).await?
1316 let snapshot
: BackupDir
= path
.parse()?
;
1317 (snapshot
.group().backup_type().to_owned(), snapshot
.group().backup_id().to_owned(), snapshot
.backup_time())
1320 let target
= tools
::required_string_param(¶m
, "target")?
;
1321 let target
= if target
== "-" { None }
else { Some(target) }
;
1323 let (keydata
, _crypt_mode
) = keyfile_parameters(¶m
)?
;
1325 let crypt_config
= match keydata
{
1328 let (key
, _
) = decrypt_key(&key
, &key
::get_encryption_key_password
)?
;
1329 Some(Arc
::new(CryptConfig
::new(key
)?
))
1333 let client
= BackupReader
::start(
1335 crypt_config
.clone(),
1343 let (manifest
, backup_index_data
) = client
.download_manifest().await?
;
1345 let (archive_name
, archive_type
) = parse_archive_type(archive_name
);
1347 if archive_name
== MANIFEST_BLOB_NAME
{
1348 if let Some(target
) = target
{
1349 replace_file(target
, &backup_index_data
, CreateOptions
::new())?
;
1351 let stdout
= std
::io
::stdout();
1352 let mut writer
= stdout
.lock();
1353 writer
.write_all(&backup_index_data
)
1354 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1357 return Ok(Value
::Null
);
1360 let file_info
= manifest
.lookup_file_info(&archive_name
)?
;
1362 if archive_type
== ArchiveType
::Blob
{
1364 let mut reader
= client
.download_blob(&manifest
, &archive_name
).await?
;
1366 if let Some(target
) = target
{
1367 let mut writer
= std
::fs
::OpenOptions
::new()
1372 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
;
1373 std
::io
::copy(&mut reader
, &mut writer
)?
;
1375 let stdout
= std
::io
::stdout();
1376 let mut writer
= stdout
.lock();
1377 std
::io
::copy(&mut reader
, &mut writer
)
1378 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1381 } else if archive_type
== ArchiveType
::DynamicIndex
{
1383 let index
= client
.download_dynamic_index(&manifest
, &archive_name
).await?
;
1385 let most_used
= index
.find_most_used_chunks(8);
1387 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, file_info
.chunk_crypt_mode(), most_used
);
1389 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1391 if let Some(target
) = target
{
1392 proxmox_backup
::pxar
::extract_archive(
1393 pxar
::decoder
::Decoder
::from_std(reader
)?
,
1397 proxmox_backup
::pxar
::Flags
::DEFAULT
,
1398 allow_existing_dirs
,
1401 println
!("{:?}", path
);
1406 .map_err(|err
| format_err
!("error extracting archive - {}", err
))?
;
1408 let mut writer
= std
::fs
::OpenOptions
::new()
1410 .open("/dev/stdout")
1411 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
;
1413 std
::io
::copy(&mut reader
, &mut writer
)
1414 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1416 } else if archive_type
== ArchiveType
::FixedIndex
{
1418 let index
= client
.download_fixed_index(&manifest
, &archive_name
).await?
;
1420 let mut writer
= if let Some(target
) = target
{
1421 std
::fs
::OpenOptions
::new()
1426 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
1428 std
::fs
::OpenOptions
::new()
1430 .open("/dev/stdout")
1431 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
1434 dump_image(client
.clone(), crypt_config
.clone(), file_info
.chunk_crypt_mode(), index
, &mut writer
, verbose
).await?
;
1444 schema
: REPO_URL_SCHEMA
,
1449 description
: "Group/Snapshot path.",
1453 description
: "The path to the log file you want to upload.",
1456 schema
: KEYFILE_SCHEMA
,
1460 schema
: KEYFD_SCHEMA
,
1470 /// Upload backup log file.
1471 async
fn upload_log(param
: Value
) -> Result
<Value
, Error
> {
1473 let logfile
= tools
::required_string_param(¶m
, "logfile")?
;
1474 let repo
= extract_repository_from_value(¶m
)?
;
1476 let snapshot
= tools
::required_string_param(¶m
, "snapshot")?
;
1477 let snapshot
: BackupDir
= snapshot
.parse()?
;
1479 let mut client
= connect(repo
.host(), repo
.user())?
;
1481 let (keydata
, crypt_mode
) = keyfile_parameters(¶m
)?
;
1483 let crypt_config
= match keydata
{
1486 let (key
, _created
) = decrypt_key(&key
, &key
::get_encryption_key_password
)?
;
1487 let crypt_config
= CryptConfig
::new(key
)?
;
1488 Some(Arc
::new(crypt_config
))
1492 let data
= file_get_contents(logfile
)?
;
1494 // fixme: howto sign log?
1495 let blob
= match crypt_mode
{
1496 CryptMode
::None
| CryptMode
::SignOnly
=> DataBlob
::encode(&data
, None
, true)?
,
1497 CryptMode
::Encrypt
=> DataBlob
::encode(&data
, crypt_config
.as_ref().map(Arc
::as_ref
), true)?
,
1500 let raw_data
= blob
.into_inner();
1502 let path
= format
!("api2/json/admin/datastore/{}/upload-backup-log", repo
.store());
1505 "backup-type": snapshot
.group().backup_type(),
1506 "backup-id": snapshot
.group().backup_id(),
1507 "backup-time": snapshot
.backup_time().timestamp(),
1510 let body
= hyper
::Body
::from(raw_data
);
1512 client
.upload("application/octet-stream", body
, &path
, Some(args
)).await
1515 const API_METHOD_PRUNE
: ApiMethod
= ApiMethod
::new(
1516 &ApiHandler
::Async(&prune
),
1518 "Prune a backup repository.",
1519 &proxmox_backup
::add_common_prune_prameters
!([
1520 ("dry-run", true, &BooleanSchema
::new(
1521 "Just show what prune would do, but do not delete anything.")
1523 ("group", false, &StringSchema
::new("Backup group.").schema()),
1525 ("output-format", true, &OUTPUT_FORMAT
),
1529 &BooleanSchema
::new("Minimal output - only show removals.")
1532 ("repository", true, &REPO_URL_SCHEMA
),
1540 _rpcenv
: &'a
mut dyn RpcEnvironment
,
1541 ) -> proxmox
::api
::ApiFuture
<'a
> {
1543 prune_async(param
).await
1547 async
fn prune_async(mut param
: Value
) -> Result
<Value
, Error
> {
1548 let repo
= extract_repository_from_value(¶m
)?
;
1550 let mut client
= connect(repo
.host(), repo
.user())?
;
1552 let path
= format
!("api2/json/admin/datastore/{}/prune", repo
.store());
1554 let group
= tools
::required_string_param(¶m
, "group")?
;
1555 let group
: BackupGroup
= group
.parse()?
;
1557 let output_format
= get_output_format(¶m
);
1559 let quiet
= param
["quiet"].as_bool().unwrap_or(false);
1561 param
.as_object_mut().unwrap().remove("repository");
1562 param
.as_object_mut().unwrap().remove("group");
1563 param
.as_object_mut().unwrap().remove("output-format");
1564 param
.as_object_mut().unwrap().remove("quiet");
1566 param
["backup-type"] = group
.backup_type().into();
1567 param
["backup-id"] = group
.backup_id().into();
1569 let mut result
= client
.post(&path
, Some(param
)).await?
;
1571 record_repository(&repo
);
1573 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1574 let item
: PruneListItem
= serde_json
::from_value(record
.to_owned())?
;
1575 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
)?
;
1576 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
1579 let render_prune_action
= |v
: &Value
, _record
: &Value
| -> Result
<String
, Error
> {
1580 Ok(match v
.as_bool() {
1581 Some(true) => "keep",
1582 Some(false) => "remove",
1587 let options
= default_table_format_options()
1588 .sortby("backup-type", false)
1589 .sortby("backup-id", false)
1590 .sortby("backup-time", false)
1591 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
1592 .column(ColumnConfig
::new("backup-time").renderer(tools
::format
::render_epoch
).header("date"))
1593 .column(ColumnConfig
::new("keep").renderer(render_prune_action
).header("action"))
1596 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_PRUNE
;
1598 let mut data
= result
["data"].take();
1601 let list
: Vec
<Value
> = data
.as_array().unwrap().iter().filter(|item
| {
1602 item
["keep"].as_bool() == Some(false)
1603 }).map(|v
| v
.clone()).collect();
1607 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
1616 schema
: REPO_URL_SCHEMA
,
1620 schema
: OUTPUT_FORMAT
,
1626 /// Get repository status.
1627 async
fn status(param
: Value
) -> Result
<Value
, Error
> {
1629 let repo
= extract_repository_from_value(¶m
)?
;
1631 let output_format
= get_output_format(¶m
);
1633 let client
= connect(repo
.host(), repo
.user())?
;
1635 let path
= format
!("api2/json/admin/datastore/{}/status", repo
.store());
1637 let mut result
= client
.get(&path
, None
).await?
;
1638 let mut data
= result
["data"].take();
1640 record_repository(&repo
);
1642 let render_total_percentage
= |v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1643 let v
= v
.as_u64().unwrap();
1644 let total
= record
["total"].as_u64().unwrap();
1645 let roundup
= total
/200;
1646 let per
= ((v
+roundup
)*100)/total
;
1647 let info
= format
!(" ({} %)", per
);
1648 Ok(format
!("{} {:>8}", v
, info
))
1651 let options
= default_table_format_options()
1653 .column(ColumnConfig
::new("total").renderer(render_total_percentage
))
1654 .column(ColumnConfig
::new("used").renderer(render_total_percentage
))
1655 .column(ColumnConfig
::new("avail").renderer(render_total_percentage
));
1657 let schema
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_STATUS
;
1659 format_and_print_result_full(&mut data
, schema
, &output_format
, &options
);
1664 // like get, but simply ignore errors and return Null instead
1665 async
fn try_get(repo
: &BackupRepository
, url
: &str) -> Value
{
1667 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
1668 let password
= std
::env
::var(ENV_VAR_PBS_PASSWORD
).ok();
1670 let options
= HttpClientOptions
::new()
1671 .prefix(Some("proxmox-backup".to_string()))
1674 .fingerprint(fingerprint
)
1675 .fingerprint_cache(true)
1676 .ticket_cache(true);
1678 let client
= match HttpClient
::new(repo
.host(), repo
.user(), options
) {
1680 _
=> return Value
::Null
,
1683 let mut resp
= match client
.get(url
, None
).await
{
1685 _
=> return Value
::Null
,
1688 if let Some(map
) = resp
.as_object_mut() {
1689 if let Some(data
) = map
.remove("data") {
1696 fn complete_backup_group(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1697 proxmox_backup
::tools
::runtime
::main(async { complete_backup_group_do(param).await }
)
1700 async
fn complete_backup_group_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1702 let mut result
= vec
![];
1704 let repo
= match extract_repository_from_map(param
) {
1709 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
1711 let data
= try_get(&repo
, &path
).await
;
1713 if let Some(list
) = data
.as_array() {
1715 if let (Some(backup_id
), Some(backup_type
)) =
1716 (item
["backup-id"].as_str(), item
["backup-type"].as_str())
1718 result
.push(format
!("{}/{}", backup_type
, backup_id
));
1726 pub fn complete_group_or_snapshot(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1727 proxmox_backup
::tools
::runtime
::main(async { complete_group_or_snapshot_do(arg, param).await }
)
1730 async
fn complete_group_or_snapshot_do(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1732 if arg
.matches('
/'
).count() < 2 {
1733 let groups
= complete_backup_group_do(param
).await
;
1734 let mut result
= vec
![];
1735 for group
in groups
{
1736 result
.push(group
.to_string());
1737 result
.push(format
!("{}/", group
));
1742 complete_backup_snapshot_do(param
).await
1745 fn complete_backup_snapshot(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1746 proxmox_backup
::tools
::runtime
::main(async { complete_backup_snapshot_do(param).await }
)
1749 async
fn complete_backup_snapshot_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1751 let mut result
= vec
![];
1753 let repo
= match extract_repository_from_map(param
) {
1758 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
1760 let data
= try_get(&repo
, &path
).await
;
1762 if let Some(list
) = data
.as_array() {
1764 if let (Some(backup_id
), Some(backup_type
), Some(backup_time
)) =
1765 (item
["backup-id"].as_str(), item
["backup-type"].as_str(), item
["backup-time"].as_i64())
1767 if let Ok(snapshot
) = BackupDir
::new(backup_type
, backup_id
, backup_time
) {
1768 result
.push(snapshot
.relative_path().to_str().unwrap().to_owned());
1777 fn complete_server_file_name(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1778 proxmox_backup
::tools
::runtime
::main(async { complete_server_file_name_do(param).await }
)
1781 async
fn complete_server_file_name_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1783 let mut result
= vec
![];
1785 let repo
= match extract_repository_from_map(param
) {
1790 let snapshot
: BackupDir
= match param
.get("snapshot") {
1792 match path
.parse() {
1800 let query
= tools
::json_object_to_query(json
!({
1801 "backup-type": snapshot
.group().backup_type(),
1802 "backup-id": snapshot
.group().backup_id(),
1803 "backup-time": snapshot
.backup_time().timestamp(),
1806 let path
= format
!("api2/json/admin/datastore/{}/files?{}", repo
.store(), query
);
1808 let data
= try_get(&repo
, &path
).await
;
1810 if let Some(list
) = data
.as_array() {
1812 if let Some(filename
) = item
["filename"].as_str() {
1813 result
.push(filename
.to_owned());
1821 fn complete_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1822 complete_server_file_name(arg
, param
)
1824 .map(|v
| tools
::format
::strip_server_file_expenstion(&v
))
1828 pub fn complete_pxar_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1829 complete_server_file_name(arg
, param
)
1832 let name
= tools
::format
::strip_server_file_expenstion(&v
);
1833 if name
.ends_with(".pxar") {
1842 fn complete_chunk_size(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1844 let mut result
= vec
![];
1848 result
.push(size
.to_string());
1850 if size
> 4096 { break; }
1856 use proxmox_backup
::client
::RemoteChunkReader
;
1857 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1860 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1861 /// so that we can properly access it from multiple threads simultaneously while not issuing
1862 /// duplicate simultaneous reads over http.
1863 pub struct BufferedDynamicReadAt
{
1864 inner
: Mutex
<BufferedDynamicReader
<RemoteChunkReader
>>,
1867 impl BufferedDynamicReadAt
{
1868 fn new(inner
: BufferedDynamicReader
<RemoteChunkReader
>) -> Self {
1870 inner
: Mutex
::new(inner
),
1875 impl ReadAt
for BufferedDynamicReadAt
{
1876 fn start_read_at
<'a
>(
1877 self: Pin
<&'a
Self>,
1881 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
1882 MaybeReady
::Ready(tokio
::task
::block_in_place(move || {
1883 let mut reader
= self.inner
.lock().unwrap();
1884 reader
.seek(SeekFrom
::Start(offset
))?
;
1885 Ok(reader
.read(buf
)?
)
1889 fn poll_complete
<'a
>(
1890 self: Pin
<&'a
Self>,
1891 _op
: ReadAtOperation
<'a
>,
1892 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
1893 panic
!("LocalDynamicReadAt::start_read_at returned Pending");
1899 let backup_cmd_def
= CliCommand
::new(&API_METHOD_CREATE_BACKUP
)
1900 .arg_param(&["backupspec"])
1901 .completion_cb("repository", complete_repository
)
1902 .completion_cb("backupspec", complete_backup_source
)
1903 .completion_cb("keyfile", tools
::complete_file_name
)
1904 .completion_cb("chunk-size", complete_chunk_size
);
1906 let benchmark_cmd_def
= CliCommand
::new(&API_METHOD_BENCHMARK
)
1907 .completion_cb("repository", complete_repository
)
1908 .completion_cb("keyfile", tools
::complete_file_name
);
1910 let upload_log_cmd_def
= CliCommand
::new(&API_METHOD_UPLOAD_LOG
)
1911 .arg_param(&["snapshot", "logfile"])
1912 .completion_cb("snapshot", complete_backup_snapshot
)
1913 .completion_cb("logfile", tools
::complete_file_name
)
1914 .completion_cb("keyfile", tools
::complete_file_name
)
1915 .completion_cb("repository", complete_repository
);
1917 let list_cmd_def
= CliCommand
::new(&API_METHOD_LIST_BACKUP_GROUPS
)
1918 .completion_cb("repository", complete_repository
);
1920 let snapshots_cmd_def
= CliCommand
::new(&API_METHOD_LIST_SNAPSHOTS
)
1921 .arg_param(&["group"])
1922 .completion_cb("group", complete_backup_group
)
1923 .completion_cb("repository", complete_repository
);
1925 let forget_cmd_def
= CliCommand
::new(&API_METHOD_FORGET_SNAPSHOTS
)
1926 .arg_param(&["snapshot"])
1927 .completion_cb("repository", complete_repository
)
1928 .completion_cb("snapshot", complete_backup_snapshot
);
1930 let garbage_collect_cmd_def
= CliCommand
::new(&API_METHOD_START_GARBAGE_COLLECTION
)
1931 .completion_cb("repository", complete_repository
);
1933 let restore_cmd_def
= CliCommand
::new(&API_METHOD_RESTORE
)
1934 .arg_param(&["snapshot", "archive-name", "target"])
1935 .completion_cb("repository", complete_repository
)
1936 .completion_cb("snapshot", complete_group_or_snapshot
)
1937 .completion_cb("archive-name", complete_archive_name
)
1938 .completion_cb("target", tools
::complete_file_name
);
1940 let files_cmd_def
= CliCommand
::new(&API_METHOD_LIST_SNAPSHOT_FILES
)
1941 .arg_param(&["snapshot"])
1942 .completion_cb("repository", complete_repository
)
1943 .completion_cb("snapshot", complete_backup_snapshot
);
1945 let prune_cmd_def
= CliCommand
::new(&API_METHOD_PRUNE
)
1946 .arg_param(&["group"])
1947 .completion_cb("group", complete_backup_group
)
1948 .completion_cb("repository", complete_repository
);
1950 let status_cmd_def
= CliCommand
::new(&API_METHOD_STATUS
)
1951 .completion_cb("repository", complete_repository
);
1953 let login_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGIN
)
1954 .completion_cb("repository", complete_repository
);
1956 let logout_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGOUT
)
1957 .completion_cb("repository", complete_repository
);
1959 let version_cmd_def
= CliCommand
::new(&API_METHOD_API_VERSION
)
1960 .completion_cb("repository", complete_repository
);
1962 let cmd_def
= CliCommandMap
::new()
1963 .insert("backup", backup_cmd_def
)
1964 .insert("upload-log", upload_log_cmd_def
)
1965 .insert("forget", forget_cmd_def
)
1966 .insert("garbage-collect", garbage_collect_cmd_def
)
1967 .insert("list", list_cmd_def
)
1968 .insert("login", login_cmd_def
)
1969 .insert("logout", logout_cmd_def
)
1970 .insert("prune", prune_cmd_def
)
1971 .insert("restore", restore_cmd_def
)
1972 .insert("snapshots", snapshots_cmd_def
)
1973 .insert("files", files_cmd_def
)
1974 .insert("status", status_cmd_def
)
1975 .insert("key", key
::cli())
1976 .insert("mount", mount_cmd_def())
1977 .insert("catalog", catalog_mgmt_cli())
1978 .insert("task", task_mgmt_cli())
1979 .insert("version", version_cmd_def
)
1980 .insert("benchmark", benchmark_cmd_def
);
1982 let rpcenv
= CliEnvironment
::new();
1983 run_cli_command(cmd_def
, rpcenv
, Some(|future
| {
1984 proxmox_backup
::tools
::runtime
::main(future
)