1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::convert
::TryFrom
;
3 use std
::io
::{self, Read, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::io
::{FromRawFd, RawFd}
;
5 use std
::path
::{Path, PathBuf}
;
7 use std
::sync
::{Arc, Mutex}
;
8 use std
::task
::Context
;
10 use anyhow
::{bail, format_err, Error}
;
11 use chrono
::{Local, DateTime, Utc, TimeZone}
;
12 use futures
::future
::FutureExt
;
13 use futures
::stream
::{StreamExt, TryStreamExt}
;
14 use serde_json
::{json, Value}
;
15 use tokio
::sync
::mpsc
;
16 use xdg
::BaseDirectories
;
18 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
19 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
;
20 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
21 use proxmox
::api
::schema
::*;
22 use proxmox
::api
::cli
::*;
23 use proxmox
::api
::api
;
24 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
26 use proxmox_backup
::tools
;
27 use proxmox_backup
::api2
::types
::*;
28 use proxmox_backup
::api2
::version
;
29 use proxmox_backup
::client
::*;
30 use proxmox_backup
::pxar
::catalog
::*;
31 use proxmox_backup
::backup
::{
40 BufferedDynamicReader
,
56 mod proxmox_backup_client
;
57 use proxmox_backup_client
::*;
59 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
60 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
63 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
64 .format(&BACKUP_REPO_URL
)
68 pub const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
69 "Path to encryption key. All data will be encrypted using this key.")
72 pub const KEYFD_SCHEMA
: Schema
= IntegerSchema
::new(
73 "Pass an encryption key via an already opened file descriptor.")
77 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
78 "Chunk size in KB. Must be a power of 2.")
84 fn get_default_repository() -> Option
<String
> {
85 std
::env
::var("PBS_REPOSITORY").ok()
88 pub fn extract_repository_from_value(
90 ) -> Result
<BackupRepository
, Error
> {
92 let repo_url
= param
["repository"]
95 .or_else(get_default_repository
)
96 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
98 let repo
: BackupRepository
= repo_url
.parse()?
;
103 fn extract_repository_from_map(
104 param
: &HashMap
<String
, String
>,
105 ) -> Option
<BackupRepository
> {
107 param
.get("repository")
109 .or_else(get_default_repository
)
110 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
113 fn record_repository(repo
: &BackupRepository
) {
115 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
120 // usually $HOME/.cache/proxmox-backup/repo-list
121 let path
= match base
.place_cache_file("repo-list") {
126 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
128 let repo
= repo
.to_string();
130 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
132 let mut map
= serde_json
::map
::Map
::new();
135 let mut max_used
= 0;
136 let mut max_repo
= None
;
137 for (repo
, count
) in data
.as_object().unwrap() {
138 if map
.contains_key(repo
) { continue; }
139 if let Some(count
) = count
.as_i64() {
140 if count
> max_used
{
142 max_repo
= Some(repo
);
146 if let Some(repo
) = max_repo
{
147 map
.insert(repo
.to_owned(), json
!(max_used
));
151 if map
.len() > 10 { // store max. 10 repos
156 let new_data
= json
!(map
);
158 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
161 pub fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
163 let mut result
= vec
![];
165 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
170 // usually $HOME/.cache/proxmox-backup/repo-list
171 let path
= match base
.place_cache_file("repo-list") {
176 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
178 if let Some(map
) = data
.as_object() {
179 for (repo
, _count
) in map
{
180 result
.push(repo
.to_owned());
187 fn connect(server
: &str, userid
: &str) -> Result
<HttpClient
, Error
> {
189 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
191 use std
::env
::VarError
::*;
192 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
194 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
195 Err(NotPresent
) => None
,
198 let options
= HttpClientOptions
::new()
199 .prefix(Some("proxmox-backup".to_string()))
202 .fingerprint(fingerprint
)
203 .fingerprint_cache(true)
206 HttpClient
::new(server
, userid
, options
)
209 async
fn view_task_result(
213 ) -> Result
<(), Error
> {
214 let data
= &result
["data"];
215 if output_format
== "text" {
216 if let Some(upid
) = data
.as_str() {
217 display_task_log(client
, upid
, true).await?
;
220 format_and_print_result(&data
, &output_format
);
226 async
fn api_datastore_list_snapshots(
229 group
: Option
<BackupGroup
>,
230 ) -> Result
<Value
, Error
> {
232 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
234 let mut args
= json
!({}
);
235 if let Some(group
) = group
{
236 args
["backup-type"] = group
.backup_type().into();
237 args
["backup-id"] = group
.backup_id().into();
240 let mut result
= client
.get(&path
, Some(args
)).await?
;
242 Ok(result
["data"].take())
245 pub async
fn api_datastore_latest_snapshot(
249 ) -> Result
<(String
, String
, DateTime
<Utc
>), Error
> {
251 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
252 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
255 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
258 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
260 let backup_time
= Utc
.timestamp(list
[0].backup_time
, 0);
262 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
265 async
fn backup_directory
<P
: AsRef
<Path
>>(
266 client
: &BackupWriter
,
267 previous_manifest
: Option
<Arc
<BackupManifest
>>,
270 chunk_size
: Option
<usize>,
271 device_set
: Option
<HashSet
<u64>>,
273 skip_lost_and_found
: bool
,
274 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
275 exclude_pattern
: Vec
<MatchEntry
>,
279 ) -> Result
<BackupStats
, Error
> {
281 let pxar_stream
= PxarBackupStream
::open(
290 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
292 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
295 .map_err(Error
::from
);
297 // spawn chunker inside a separate task so that it can run parallel
298 tokio
::spawn(async
move {
299 while let Some(v
) = chunk_stream
.next().await
{
300 let _
= tx
.send(v
).await
;
305 .upload_stream(previous_manifest
, archive_name
, stream
, "dynamic", None
, compress
, encrypt
)
311 async
fn backup_image
<P
: AsRef
<Path
>>(
312 client
: &BackupWriter
,
313 previous_manifest
: Option
<Arc
<BackupManifest
>>,
317 chunk_size
: Option
<usize>,
321 ) -> Result
<BackupStats
, Error
> {
323 let path
= image_path
.as_ref().to_owned();
325 let file
= tokio
::fs
::File
::open(path
).await?
;
327 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
328 .map_err(Error
::from
);
330 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
333 .upload_stream(previous_manifest
, archive_name
, stream
, "fixed", Some(image_size
), compress
, encrypt
)
343 schema
: REPO_URL_SCHEMA
,
347 schema
: OUTPUT_FORMAT
,
353 /// List backup groups.
354 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
356 let output_format
= get_output_format(¶m
);
358 let repo
= extract_repository_from_value(¶m
)?
;
360 let client
= connect(repo
.host(), repo
.user())?
;
362 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
364 let mut result
= client
.get(&path
, None
).await?
;
366 record_repository(&repo
);
368 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
369 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
370 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
371 Ok(group
.group_path().to_str().unwrap().to_owned())
374 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
375 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
376 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
);
377 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
380 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
381 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
382 Ok(tools
::format
::render_backup_file_list(&item
.files
))
385 let options
= default_table_format_options()
386 .sortby("backup-type", false)
387 .sortby("backup-id", false)
388 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
390 ColumnConfig
::new("last-backup")
391 .renderer(render_last_backup
)
392 .header("last snapshot")
395 .column(ColumnConfig
::new("backup-count"))
396 .column(ColumnConfig
::new("files").renderer(render_files
));
398 let mut data
: Value
= result
["data"].take();
400 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_GROUPS
;
402 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
411 schema
: REPO_URL_SCHEMA
,
416 description
: "Backup group.",
420 schema
: OUTPUT_FORMAT
,
426 /// List backup snapshots.
427 async
fn list_snapshots(param
: Value
) -> Result
<Value
, Error
> {
429 let repo
= extract_repository_from_value(¶m
)?
;
431 let output_format
= get_output_format(¶m
);
433 let client
= connect(repo
.host(), repo
.user())?
;
435 let group
: Option
<BackupGroup
> = if let Some(path
) = param
["group"].as_str() {
441 let mut data
= api_datastore_list_snapshots(&client
, repo
.store(), group
).await?
;
443 record_repository(&repo
);
445 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
446 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
447 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
);
448 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
451 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
452 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
453 let mut filenames
= Vec
::new();
454 for file
in &item
.files
{
455 filenames
.push(file
.filename
.to_string());
457 Ok(tools
::format
::render_backup_file_list(&filenames
[..]))
460 let options
= default_table_format_options()
461 .sortby("backup-type", false)
462 .sortby("backup-id", false)
463 .sortby("backup-time", false)
464 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
465 .column(ColumnConfig
::new("size"))
466 .column(ColumnConfig
::new("files").renderer(render_files
))
469 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOTS
;
471 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
480 schema
: REPO_URL_SCHEMA
,
485 description
: "Snapshot path.",
490 /// Forget (remove) backup snapshots.
491 async
fn forget_snapshots(param
: Value
) -> Result
<Value
, Error
> {
493 let repo
= extract_repository_from_value(¶m
)?
;
495 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
496 let snapshot
: BackupDir
= path
.parse()?
;
498 let mut client
= connect(repo
.host(), repo
.user())?
;
500 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
502 let result
= client
.delete(&path
, Some(json
!({
503 "backup-type": snapshot
.group().backup_type(),
504 "backup-id": snapshot
.group().backup_id(),
505 "backup-time": snapshot
.backup_time().timestamp(),
508 record_repository(&repo
);
517 schema
: REPO_URL_SCHEMA
,
523 /// Try to login. If successful, store ticket.
524 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
526 let repo
= extract_repository_from_value(¶m
)?
;
528 let client
= connect(repo
.host(), repo
.user())?
;
529 client
.login().await?
;
531 record_repository(&repo
);
540 schema
: REPO_URL_SCHEMA
,
546 /// Logout (delete stored ticket).
547 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
549 let repo
= extract_repository_from_value(¶m
)?
;
551 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
560 schema
: REPO_URL_SCHEMA
,
564 schema
: OUTPUT_FORMAT
,
570 /// Show client and optional server version
571 async
fn api_version(param
: Value
) -> Result
<(), Error
> {
573 let output_format
= get_output_format(¶m
);
575 let mut version_info
= json
!({
577 "version": version
::PROXMOX_PKG_VERSION
,
578 "release": version
::PROXMOX_PKG_RELEASE
,
579 "repoid": version
::PROXMOX_PKG_REPOID
,
583 let repo
= extract_repository_from_value(¶m
);
584 if let Ok(repo
) = repo
{
585 let client
= connect(repo
.host(), repo
.user())?
;
587 match client
.get("api2/json/version", None
).await
{
588 Ok(mut result
) => version_info
["server"] = result
["data"].take(),
589 Err(e
) => eprintln
!("could not connect to server - {}", e
),
592 if output_format
== "text" {
593 println
!("client version: {}.{}", version
::PROXMOX_PKG_VERSION
, version
::PROXMOX_PKG_RELEASE
);
594 if let Some(server
) = version_info
["server"].as_object() {
595 let server_version
= server
["version"].as_str().unwrap();
596 let server_release
= server
["release"].as_str().unwrap();
597 println
!("server version: {}.{}", server_version
, server_release
);
600 format_and_print_result(&version_info
, &output_format
);
611 schema
: REPO_URL_SCHEMA
,
616 description
: "Snapshot path.",
619 schema
: OUTPUT_FORMAT
,
625 /// List snapshot files.
626 async
fn list_snapshot_files(param
: Value
) -> Result
<Value
, Error
> {
628 let repo
= extract_repository_from_value(¶m
)?
;
630 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
631 let snapshot
: BackupDir
= path
.parse()?
;
633 let output_format
= get_output_format(¶m
);
635 let client
= connect(repo
.host(), repo
.user())?
;
637 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
639 let mut result
= client
.get(&path
, Some(json
!({
640 "backup-type": snapshot
.group().backup_type(),
641 "backup-id": snapshot
.group().backup_id(),
642 "backup-time": snapshot
.backup_time().timestamp(),
645 record_repository(&repo
);
647 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES
;
649 let mut data
: Value
= result
["data"].take();
651 let options
= default_table_format_options();
653 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
662 schema
: REPO_URL_SCHEMA
,
666 schema
: OUTPUT_FORMAT
,
672 /// Start garbage collection for a specific repository.
673 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
675 let repo
= extract_repository_from_value(¶m
)?
;
677 let output_format
= get_output_format(¶m
);
679 let mut client
= connect(repo
.host(), repo
.user())?
;
681 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
683 let result
= client
.post(&path
, None
).await?
;
685 record_repository(&repo
);
687 view_task_result(client
, result
, &output_format
).await?
;
692 fn spawn_catalog_upload(
693 client
: Arc
<BackupWriter
>,
697 Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
698 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
701 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
702 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
703 let catalog_chunk_size
= 512*1024;
704 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
706 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
708 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
710 tokio
::spawn(async
move {
711 let catalog_upload_result
= client
712 .upload_stream(None
, CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, true, encrypt
)
715 if let Err(ref err
) = catalog_upload_result
{
716 eprintln
!("catalog upload error - {}", err
);
720 let _
= catalog_result_tx
.send(catalog_upload_result
);
723 Ok((catalog
, catalog_result_rx
))
726 fn keyfile_parameters(param
: &Value
) -> Result
<(Option
<Vec
<u8>>, CryptMode
), Error
> {
727 let keyfile
= match param
.get("keyfile") {
728 Some(Value
::String(keyfile
)) => Some(keyfile
),
729 Some(_
) => bail
!("bad --keyfile parameter type"),
733 let key_fd
= match param
.get("keyfd") {
734 Some(Value
::Number(key_fd
)) => Some(
735 RawFd
::try_from(key_fd
737 .ok_or_else(|| format_err
!("bad key fd: {:?}", key_fd
))?
739 .map_err(|err
| format_err
!("bad key fd: {:?}: {}", key_fd
, err
))?
741 Some(_
) => bail
!("bad --keyfd parameter type"),
745 let crypt_mode
: Option
<CryptMode
> = match param
.get("crypt-mode") {
746 Some(mode
) => Some(serde_json
::from_value(mode
.clone())?
),
750 let keydata
= match (keyfile
, key_fd
) {
751 (None
, None
) => None
,
752 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
753 (Some(keyfile
), None
) => Some(file_get_contents(keyfile
)?
),
754 (None
, Some(fd
)) => {
755 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
756 let mut data
= Vec
::new();
757 let _len
: usize = { input }
.read_to_end(&mut data
)
759 format_err
!("error reading encryption key from fd {}: {}", fd
, err
)
765 Ok(match (keydata
, crypt_mode
) {
767 (None
, None
) => match key
::read_optional_default_encryption_key()?
{
768 Some(key
) => (Some(key
), CryptMode
::Encrypt
),
769 None
=> (None
, CryptMode
::None
),
772 // just --crypt-mode=none
773 (None
, Some(CryptMode
::None
)) => (None
, CryptMode
::None
),
775 // just --crypt-mode other than none
776 (None
, Some(crypt_mode
)) => match key
::read_optional_default_encryption_key()?
{
777 None
=> bail
!("--crypt-mode without --keyfile and no default key file available"),
778 Some(key
) => (Some(key
), crypt_mode
),
782 (Some(key
), None
) => (Some(key
), CryptMode
::Encrypt
),
784 // --keyfile and --crypt-mode=none
785 (Some(_
), Some(CryptMode
::None
)) => {
786 bail
!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
789 // --keyfile and --crypt-mode other than none
790 (Some(key
), Some(crypt_mode
)) => (Some(key
), crypt_mode
),
799 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
801 schema
: BACKUP_SOURCE_SCHEMA
,
805 schema
: REPO_URL_SCHEMA
,
809 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
813 description
: "Path to file.",
817 schema
: KEYFILE_SCHEMA
,
821 schema
: KEYFD_SCHEMA
,
828 "skip-lost-and-found": {
830 description
: "Skip lost+found directory.",
834 schema
: BACKUP_TYPE_SCHEMA
,
838 schema
: BACKUP_ID_SCHEMA
,
842 schema
: BACKUP_TIME_SCHEMA
,
846 schema
: CHUNK_SIZE_SCHEMA
,
851 description
: "List of paths or patterns for matching files to exclude.",
855 description
: "Path or match pattern.",
860 description
: "Max number of entries to hold in memory.",
862 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
866 description
: "Verbose output.",
872 /// Create (host) backup.
873 async
fn create_backup(
876 _rpcenv
: &mut dyn RpcEnvironment
,
877 ) -> Result
<Value
, Error
> {
879 let repo
= extract_repository_from_value(¶m
)?
;
881 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
883 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
885 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
887 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
889 let backup_time_opt
= param
["backup-time"].as_i64();
891 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
893 if let Some(size
) = chunk_size_opt
{
894 verify_chunk_size(size
)?
;
897 let (keydata
, crypt_mode
) = keyfile_parameters(¶m
)?
;
899 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
901 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
903 let include_dev
= param
["include-dev"].as_array();
905 let entries_max
= param
["entries-max"].as_u64()
906 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
908 let empty
= Vec
::new();
909 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
911 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
912 for entry
in exclude_args
{
913 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
915 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
916 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
920 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
922 if let Some(include_dev
) = include_dev
{
923 if all_file_systems
{
924 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
927 let mut set
= HashSet
::new();
928 for path
in include_dev
{
929 let path
= path
.as_str().unwrap();
930 let stat
= nix
::sys
::stat
::stat(path
)
931 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
932 set
.insert(stat
.st_dev
);
937 let mut upload_list
= vec
![];
939 for backupspec
in backupspec_list
{
940 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
941 let filename
= &spec
.config_string
;
942 let target
= &spec
.archive_name
;
944 use std
::os
::unix
::fs
::FileTypeExt
;
946 let metadata
= std
::fs
::metadata(filename
)
947 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
948 let file_type
= metadata
.file_type();
950 match spec
.spec_type
{
951 BackupSpecificationType
::PXAR
=> {
952 if !file_type
.is_dir() {
953 bail
!("got unexpected file type (expected directory)");
955 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
957 BackupSpecificationType
::IMAGE
=> {
958 if !(file_type
.is_file() || file_type
.is_block_device()) {
959 bail
!("got unexpected file type (expected file or block device)");
962 let size
= image_size(&PathBuf
::from(filename
))?
;
964 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
966 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
968 BackupSpecificationType::CONFIG => {
969 if !file_type.is_file() {
970 bail!("got unexpected file
type (expected regular file
)");
972 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
974 BackupSpecificationType::LOGFILE => {
975 if !file_type.is_file() {
976 bail!("got unexpected file
type (expected regular file
)");
978 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
983 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
985 let client = connect(repo.host(), repo.user())?;
986 record_repository(&repo);
988 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
990 println!("Client name
: {}
", proxmox::tools::nodename());
992 let start_time = Local::now();
994 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
996 let (crypt_config, rsa_encrypted_key) = match keydata {
997 None => (None, None),
999 let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
1001 let crypt_config = CryptConfig::new(key)?;
1003 match key::find_master_pubkey()? {
1004 Some(ref path) if path.exists() => {
1005 let pem_data = file_get_contents(path)?;
1006 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
1007 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
1008 (Some(Arc::new(crypt_config)), Some(enc_key))
1010 _ => (Some(Arc::new(crypt_config)), None),
1015 let client = BackupWriter::start(
1017 crypt_config.clone(),
1025 let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
1026 Some(Arc::new(previous_manifest))
1031 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
1032 let mut manifest = BackupManifest::new(snapshot);
1034 let mut catalog = None;
1035 let mut catalog_result_tx = None;
1037 for (backup_type, filename, target, size) in upload_list {
1039 BackupSpecificationType::CONFIG => {
1040 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
1042 .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
1044 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1046 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
1047 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
1049 .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
1051 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1053 BackupSpecificationType::PXAR => {
1054 // start catalog upload on first use
1055 if catalog.is_none() {
1056 let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
1057 catalog = Some(cat);
1058 catalog_result_tx = Some(res);
1060 let catalog = catalog.as_ref().unwrap();
1062 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
1063 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1064 let stats = backup_directory(
1066 previous_manifest.clone(),
1072 skip_lost_and_found,
1074 pattern_list.clone(),
1075 entries_max as usize,
1077 crypt_mode == CryptMode::Encrypt,
1079 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1080 catalog.lock().unwrap().end_directory()?;
1082 BackupSpecificationType::IMAGE => {
1083 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1084 let stats = backup_image(
1086 previous_manifest.clone(),
1092 crypt_mode == CryptMode::Encrypt,
1095 manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
1100 // finalize and upload catalog
1101 if let Some(catalog) = catalog {
1102 let mutex = Arc::try_unwrap(catalog)
1103 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1104 let mut catalog = mutex.into_inner().unwrap();
1108 drop(catalog); // close upload stream
1110 if let Some(catalog_result_rx) = catalog_result_tx {
1111 let stats = catalog_result_rx.await??;
1112 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
1116 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1117 let target = "rsa
-encrypted
.key
";
1118 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1120 .upload_blob_from_data(rsa_encrypted_key, target, false, false)
1122 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum, crypt_mode)?;
1124 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1126 let mut buffer2 = vec![0u8; rsa.size() as usize];
1127 let pem_data = file_get_contents("master
-private
.pem
")?;
1128 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1129 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1130 println!("TEST {} {:?}
", len, buffer2);
1134 // create manifest (index.json)
1135 // manifests are never encrypted, but include a signature
1136 let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
1137 .map_err(|err| format_err!("unable to format manifest
- {}
", err))?;
1140 println!("Upload index
.json to '{:?}'
", repo);
1142 .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
1145 client.finish().await?;
1147 let end_time = Local::now();
1148 let elapsed = end_time.signed_duration_since(start_time);
1149 println!("Duration
: {}
", elapsed);
1151 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1156 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1158 let mut result = vec![];
1160 let data: Vec<&str> = arg.splitn(2, ':').collect();
1162 if data.len() != 2 {
1163 result.push(String::from("root
.pxar
:/"));
1164 result.push(String::from("etc
.pxar
:/etc
"));
1168 let files = tools::complete_file_name(data[1], param);
1171 result.push(format!("{}
:{}
", data[0], file));
1177 async fn dump_image<W: Write>(
1178 client: Arc<BackupReader>,
1179 crypt_config: Option<Arc<CryptConfig>>,
1180 index: FixedIndexReader,
1183 ) -> Result<(), Error> {
1185 let most_used = index.find_most_used_chunks(8);
1187 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1189 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1190 // and thus slows down reading. Instead, directly use RemoteChunkReader
1193 let start_time = std::time::Instant::now();
1195 for pos in 0..index.index_count() {
1196 let digest = index.index_digest(pos).unwrap();
1197 let raw_data = chunk_reader.read_chunk(&digest).await?;
1198 writer.write_all(&raw_data)?;
1199 bytes += raw_data.len();
1201 let next_per = ((pos+1)*100)/index.index_count();
1202 if per != next_per {
1203 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
1204 next_per, bytes, start_time.elapsed().as_secs());
1210 let end_time = std::time::Instant::now();
1211 let elapsed = end_time.duration_since(start_time);
1212 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
1214 elapsed.as_secs_f64(),
1215 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1222 fn parse_archive_type(name: &str) -> (String, ArchiveType) {
1223 if name.ends_with(".didx
") || name.ends_with(".fidx
") || name.ends_with(".blob
") {
1224 (name.into(), archive_type(name).unwrap())
1225 } else if name.ends_with(".pxar
") {
1226 (format!("{}
.didx
", name), ArchiveType::DynamicIndex)
1227 } else if name.ends_with(".img
") {
1228 (format!("{}
.fidx
", name), ArchiveType::FixedIndex)
1230 (format!("{}
.blob
", name), ArchiveType::Blob)
1238 schema: REPO_URL_SCHEMA,
1243 description: "Group
/Snapshot path
.",
1246 description: "Backup archive name
.",
1251 description: r###"Target directory path
. Use '
-' to write to standard output
.
1253 We
do not extraxt '
.pxar' archives when writing to standard output
.
1257 "allow
-existing
-dirs
": {
1259 description: "Do not fail
if directories already exists
.",
1263 schema: KEYFILE_SCHEMA,
1267 schema: KEYFD_SCHEMA,
1277 /// Restore backup repository.
1278 async fn restore(param: Value) -> Result<Value, Error> {
1279 let repo = extract_repository_from_value(¶m)?;
1281 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1283 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
1285 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1287 let client = connect(repo.host(), repo.user())?;
1289 record_repository(&repo);
1291 let path = tools::required_string_param(¶m, "snapshot
")?;
1293 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1294 let group: BackupGroup = path.parse()?;
1295 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1297 let snapshot: BackupDir = path.parse()?;
1298 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1301 let target = tools::required_string_param(¶m, "target
")?;
1302 let target = if target == "-" { None } else { Some(target) };
1304 let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
1306 let crypt_config = match keydata {
1309 let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
1310 Some(Arc::new(CryptConfig::new(key)?))
1314 let client = BackupReader::start(
1316 crypt_config.clone(),
1324 let (manifest, backup_index_data) = client.download_manifest().await?;
1326 let (archive_name, archive_type) = parse_archive_type(archive_name);
1328 if archive_name == MANIFEST_BLOB_NAME {
1329 if let Some(target) = target {
1330 replace_file(target, &backup_index_data, CreateOptions::new())?;
1332 let stdout = std::io::stdout();
1333 let mut writer = stdout.lock();
1334 writer.write_all(&backup_index_data)
1335 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1338 } else if archive_type == ArchiveType::Blob {
1340 let mut reader = client.download_blob(&manifest, &archive_name).await?;
1342 if let Some(target) = target {
1343 let mut writer = std::fs::OpenOptions::new()
1348 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1349 std::io::copy(&mut reader, &mut writer)?;
1351 let stdout = std::io::stdout();
1352 let mut writer = stdout.lock();
1353 std::io::copy(&mut reader, &mut writer)
1354 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1357 } else if archive_type == ArchiveType::DynamicIndex {
1359 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
1361 let most_used = index.find_most_used_chunks(8);
1363 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1365 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1367 if let Some(target) = target {
1368 proxmox_backup::pxar::extract_archive(
1369 pxar::decoder::Decoder::from_std(reader)?,
1372 proxmox_backup::pxar::Flags::DEFAULT,
1373 allow_existing_dirs,
1376 println!("{:?}
", path);
1380 .map_err(|err| format_err!("error extracting archive
- {}
", err))?;
1382 let mut writer = std::fs::OpenOptions::new()
1384 .open("/dev
/stdout
")
1385 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1387 std::io::copy(&mut reader, &mut writer)
1388 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1390 } else if archive_type == ArchiveType::FixedIndex {
1392 let index = client.download_fixed_index(&manifest, &archive_name).await?;
1394 let mut writer = if let Some(target) = target {
1395 std::fs::OpenOptions::new()
1400 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1402 std::fs::OpenOptions::new()
1404 .open("/dev
/stdout
")
1405 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1408 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
1418 schema: REPO_URL_SCHEMA,
1423 description: "Group
/Snapshot path
.",
1427 description: "The path to the log file you want to upload
.",
1430 schema: KEYFILE_SCHEMA,
1434 schema: KEYFD_SCHEMA,
1444 /// Upload backup log file.
1445 async fn upload_log(param: Value) -> Result<Value, Error> {
1447 let logfile = tools::required_string_param(¶m, "logfile
")?;
1448 let repo = extract_repository_from_value(¶m)?;
1450 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1451 let snapshot: BackupDir = snapshot.parse()?;
1453 let mut client = connect(repo.host(), repo.user())?;
1455 let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
1457 let crypt_config = match keydata {
1460 let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
1461 let crypt_config = CryptConfig::new(key)?;
1462 Some(Arc::new(crypt_config))
1466 let data = file_get_contents(logfile)?;
1468 // fixme: howto sign log?
1469 let blob = match crypt_mode {
1470 CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
1471 CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
1474 let raw_data = blob.into_inner();
1476 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1479 "backup
-type": snapshot.group().backup_type(),
1480 "backup
-id
": snapshot.group().backup_id(),
1481 "backup
-time
": snapshot.backup_time().timestamp(),
1484 let body = hyper::Body::from(raw_data);
1486 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1489 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1490 &ApiHandler::Async(&prune),
1492 "Prune a backup repository
.",
1493 &proxmox_backup::add_common_prune_prameters!([
1494 ("dry
-run
", true, &BooleanSchema::new(
1495 "Just show what prune would
do, but
do not delete anything
.")
1497 ("group
", false, &StringSchema::new("Backup group
.").schema()),
1499 ("output
-format
", true, &OUTPUT_FORMAT),
1503 &BooleanSchema::new("Minimal output
- only show removals
.")
1506 ("repository
", true, &REPO_URL_SCHEMA),
1514 _rpcenv: &'a mut dyn RpcEnvironment,
1515 ) -> proxmox::api::ApiFuture<'a> {
1517 prune_async(param).await
1521 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1522 let repo = extract_repository_from_value(¶m)?;
1524 let mut client = connect(repo.host(), repo.user())?;
1526 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1528 let group = tools::required_string_param(¶m, "group
")?;
1529 let group: BackupGroup = group.parse()?;
1531 let output_format = get_output_format(¶m);
1533 let quiet = param["quiet
"].as_bool().unwrap_or(false);
1535 param.as_object_mut().unwrap().remove("repository
");
1536 param.as_object_mut().unwrap().remove("group
");
1537 param.as_object_mut().unwrap().remove("output
-format
");
1538 param.as_object_mut().unwrap().remove("quiet
");
1540 param["backup
-type"] = group.backup_type().into();
1541 param["backup
-id
"] = group.backup_id().into();
1543 let mut result = client.post(&path, Some(param)).await?;
1545 record_repository(&repo);
1547 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1548 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1549 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1550 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1553 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1554 Ok(match v.as_bool() {
1555 Some(true) => "keep
",
1556 Some(false) => "remove
",
1561 let options = default_table_format_options()
1562 .sortby("backup
-type", false)
1563 .sortby("backup
-id
", false)
1564 .sortby("backup
-time
", false)
1565 .column(ColumnConfig::new("backup
-id
").renderer(render_snapshot_path).header("snapshot
"))
1566 .column(ColumnConfig::new("backup
-time
").renderer(tools::format::render_epoch).header("date
"))
1567 .column(ColumnConfig::new("keep
").renderer(render_prune_action).header("action
"))
1570 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1572 let mut data = result["data
"].take();
1575 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1576 item["keep
"].as_bool() == Some(false)
1577 }).map(|v| v.clone()).collect();
1581 format_and_print_result_full(&mut data, info, &output_format, &options);
1590 schema: REPO_URL_SCHEMA,
1594 schema: OUTPUT_FORMAT,
1600 /// Get repository status.
1601 async fn status(param: Value) -> Result<Value, Error> {
1603 let repo = extract_repository_from_value(¶m)?;
1605 let output_format = get_output_format(¶m);
1607 let client = connect(repo.host(), repo.user())?;
1609 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1611 let mut result = client.get(&path, None).await?;
1612 let mut data = result["data
"].take();
1614 record_repository(&repo);
1616 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1617 let v = v.as_u64().unwrap();
1618 let total = record["total
"].as_u64().unwrap();
1619 let roundup = total/200;
1620 let per = ((v+roundup)*100)/total;
1621 let info = format!(" ({}
%)", per);
1622 Ok(format!("{} {:>8}
", v, info))
1625 let options = default_table_format_options()
1627 .column(ColumnConfig::new("total
").renderer(render_total_percentage))
1628 .column(ColumnConfig::new("used
").renderer(render_total_percentage))
1629 .column(ColumnConfig::new("avail
").renderer(render_total_percentage));
1631 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
1633 format_and_print_result_full(&mut data, schema, &output_format, &options);
1638 // like get, but simply ignore errors and return Null instead
1639 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1641 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
1642 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
1644 let options = HttpClientOptions::new()
1645 .prefix(Some("proxmox
-backup
".to_string()))
1648 .fingerprint(fingerprint)
1649 .fingerprint_cache(true)
1650 .ticket_cache(true);
1652 let client = match HttpClient::new(repo.host(), repo.user(), options) {
1654 _ => return Value::Null,
1657 let mut resp = match client.get(url, None).await {
1659 _ => return Value::Null,
1662 if let Some(map) = resp.as_object_mut() {
1663 if let Some(data) = map.remove("data
") {
1670 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1671 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1674 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1676 let mut result = vec![];
1678 let repo = match extract_repository_from_map(param) {
1683 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1685 let data = try_get(&repo, &path).await;
1687 if let Some(list) = data.as_array() {
1689 if let (Some(backup_id), Some(backup_type)) =
1690 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1692 result.push(format!("{}
/{}
", backup_type, backup_id));
1700 pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1701 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1704 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1706 if arg.matches('/').count() < 2 {
1707 let groups = complete_backup_group_do(param).await;
1708 let mut result = vec![];
1709 for group in groups {
1710 result.push(group.to_string());
1711 result.push(format!("{}
/", group));
1716 complete_backup_snapshot_do(param).await
1719 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1720 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1723 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1725 let mut result = vec![];
1727 let repo = match extract_repository_from_map(param) {
1732 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1734 let data = try_get(&repo, &path).await;
1736 if let Some(list) = data.as_array() {
1738 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1739 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1741 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1742 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1750 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1751 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1754 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1756 let mut result = vec![];
1758 let repo = match extract_repository_from_map(param) {
1763 let snapshot: BackupDir = match param.get("snapshot
") {
1765 match path.parse() {
1773 let query = tools::json_object_to_query(json!({
1774 "backup
-type": snapshot.group().backup_type(),
1775 "backup
-id
": snapshot.group().backup_id(),
1776 "backup
-time
": snapshot.backup_time().timestamp(),
1779 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1781 let data = try_get(&repo, &path).await;
1783 if let Some(list) = data.as_array() {
1785 if let Some(filename) = item["filename
"].as_str() {
1786 result.push(filename.to_owned());
1794 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1795 complete_server_file_name(arg, param)
1797 .map(|v| tools::format::strip_server_file_expenstion(&v))
1801 pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1802 complete_server_file_name(arg, param)
1805 let name = tools::format::strip_server_file_expenstion(&v);
1806 if name.ends_with(".pxar
") {
1815 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1817 let mut result = vec![];
1821 result.push(size.to_string());
1823 if size > 4096 { break; }
1829 use proxmox_backup::client::RemoteChunkReader;
1830 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1833 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1834 /// so that we can properly access it from multiple threads simultaneously while not issuing
1835 /// duplicate simultaneous reads over http.
1836 pub struct BufferedDynamicReadAt {
1837 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
1840 impl BufferedDynamicReadAt {
1841 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
1843 inner: Mutex::new(inner),
1848 impl ReadAt for BufferedDynamicReadAt {
1849 fn start_read_at<'a>(
1850 self: Pin<&'a Self>,
1854 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1855 MaybeReady::Ready(tokio::task::block_in_place(move || {
1856 let mut reader = self.inner.lock().unwrap();
1857 reader.seek(SeekFrom::Start(offset))?;
1858 Ok(reader.read(buf)?)
1862 fn poll_complete<'a>(
1863 self: Pin<&'a Self>,
1864 _op: ReadAtOperation<'a>,
1865 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1866 panic!("LocalDynamicReadAt
::start_read_at returned Pending
");
1872 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
1873 .arg_param(&["backupspec
"])
1874 .completion_cb("repository
", complete_repository)
1875 .completion_cb("backupspec
", complete_backup_source)
1876 .completion_cb("keyfile
", tools::complete_file_name)
1877 .completion_cb("chunk
-size
", complete_chunk_size);
1879 let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
1880 .completion_cb("repository
", complete_repository)
1881 .completion_cb("keyfile
", tools::complete_file_name);
1883 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
1884 .arg_param(&["snapshot
", "logfile
"])
1885 .completion_cb("snapshot
", complete_backup_snapshot)
1886 .completion_cb("logfile
", tools::complete_file_name)
1887 .completion_cb("keyfile
", tools::complete_file_name)
1888 .completion_cb("repository
", complete_repository);
1890 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
1891 .completion_cb("repository
", complete_repository);
1893 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
1894 .arg_param(&["group
"])
1895 .completion_cb("group
", complete_backup_group)
1896 .completion_cb("repository
", complete_repository);
1898 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
1899 .arg_param(&["snapshot
"])
1900 .completion_cb("repository
", complete_repository)
1901 .completion_cb("snapshot
", complete_backup_snapshot);
1903 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
1904 .completion_cb("repository
", complete_repository);
1906 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
1907 .arg_param(&["snapshot
", "archive
-name
", "target
"])
1908 .completion_cb("repository
", complete_repository)
1909 .completion_cb("snapshot
", complete_group_or_snapshot)
1910 .completion_cb("archive
-name
", complete_archive_name)
1911 .completion_cb("target
", tools::complete_file_name);
1913 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
1914 .arg_param(&["snapshot
"])
1915 .completion_cb("repository
", complete_repository)
1916 .completion_cb("snapshot
", complete_backup_snapshot);
1918 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
1919 .arg_param(&["group
"])
1920 .completion_cb("group
", complete_backup_group)
1921 .completion_cb("repository
", complete_repository);
1923 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
1924 .completion_cb("repository
", complete_repository);
1926 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
1927 .completion_cb("repository
", complete_repository);
1929 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
1930 .completion_cb("repository
", complete_repository);
1932 let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
1933 .completion_cb("repository
", complete_repository);
1935 let cmd_def = CliCommandMap::new()
1936 .insert("backup
", backup_cmd_def)
1937 .insert("upload
-log
", upload_log_cmd_def)
1938 .insert("forget
", forget_cmd_def)
1939 .insert("garbage
-collect
", garbage_collect_cmd_def)
1940 .insert("list
", list_cmd_def)
1941 .insert("login
", login_cmd_def)
1942 .insert("logout
", logout_cmd_def)
1943 .insert("prune
", prune_cmd_def)
1944 .insert("restore
", restore_cmd_def)
1945 .insert("snapshots
", snapshots_cmd_def)
1946 .insert("files
", files_cmd_def)
1947 .insert("status
", status_cmd_def)
1948 .insert("key
", key::cli())
1949 .insert("mount
", mount_cmd_def())
1950 .insert("catalog
", catalog_mgmt_cli())
1951 .insert("task
", task_mgmt_cli())
1952 .insert("version
", version_cmd_def)
1953 .insert("benchmark
", benchmark_cmd_def);
1955 let rpcenv = CliEnvironment::new();
1956 run_cli_command(cmd_def, rpcenv, Some(|future| {
1957 proxmox_backup::tools::runtime::main(future)