1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::convert
::TryFrom
;
3 use std
::io
::{self, Read, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::io
::{FromRawFd, RawFd}
;
5 use std
::path
::{Path, PathBuf}
;
7 use std
::sync
::{Arc, Mutex}
;
8 use std
::task
::Context
;
10 use anyhow
::{bail, format_err, Error}
;
11 use futures
::future
::FutureExt
;
12 use futures
::stream
::{StreamExt, TryStreamExt}
;
13 use serde_json
::{json, Value}
;
14 use tokio
::sync
::mpsc
;
15 use tokio_stream
::wrappers
::ReceiverStream
;
16 use xdg
::BaseDirectories
;
18 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
21 time
::{strftime_local, epoch_i64}
,
22 fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
,
33 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
35 use proxmox_backup
::tools
;
36 use proxmox_backup
::api2
::access
::user
::UserWithTokens
;
37 use proxmox_backup
::api2
::types
::*;
38 use proxmox_backup
::api2
::version
;
39 use proxmox_backup
::client
::*;
40 use proxmox_backup
::pxar
::catalog
::*;
41 use proxmox_backup
::backup
::{
44 rsa_encrypt_key_config
,
51 BufferedDynamicReader
,
59 ENCRYPTED_KEY_BLOB_NAME
,
68 mod proxmox_backup_client
;
69 use proxmox_backup_client
::*;
71 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
72 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
75 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
76 .format(&BACKUP_REPO_URL
)
80 pub const KEYFILE_SCHEMA
: Schema
=
81 StringSchema
::new("Path to encryption key. All data will be encrypted using this key.")
84 pub const KEYFD_SCHEMA
: Schema
=
85 IntegerSchema
::new("Pass an encryption key via an already opened file descriptor.")
89 pub const MASTER_PUBKEY_FILE_SCHEMA
: Schema
= StringSchema
::new(
90 "Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
93 pub const MASTER_PUBKEY_FD_SCHEMA
: Schema
=
94 IntegerSchema
::new("Pass a master public key via an already opened file descriptor.")
98 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
99 "Chunk size in KB. Must be a power of 2.")
105 fn get_default_repository() -> Option
<String
> {
106 std
::env
::var("PBS_REPOSITORY").ok()
109 pub fn extract_repository_from_value(
111 ) -> Result
<BackupRepository
, Error
> {
113 let repo_url
= param
["repository"]
116 .or_else(get_default_repository
)
117 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
119 let repo
: BackupRepository
= repo_url
.parse()?
;
124 fn extract_repository_from_map(
125 param
: &HashMap
<String
, String
>,
126 ) -> Option
<BackupRepository
> {
128 param
.get("repository")
130 .or_else(get_default_repository
)
131 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
134 fn record_repository(repo
: &BackupRepository
) {
136 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
141 // usually $HOME/.cache/proxmox-backup/repo-list
142 let path
= match base
.place_cache_file("repo-list") {
147 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
149 let repo
= repo
.to_string();
151 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
153 let mut map
= serde_json
::map
::Map
::new();
156 let mut max_used
= 0;
157 let mut max_repo
= None
;
158 for (repo
, count
) in data
.as_object().unwrap() {
159 if map
.contains_key(repo
) { continue; }
160 if let Some(count
) = count
.as_i64() {
161 if count
> max_used
{
163 max_repo
= Some(repo
);
167 if let Some(repo
) = max_repo
{
168 map
.insert(repo
.to_owned(), json
!(max_used
));
172 if map
.len() > 10 { // store max. 10 repos
177 let new_data
= json
!(map
);
179 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
182 pub fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
184 let mut result
= vec
![];
186 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
191 // usually $HOME/.cache/proxmox-backup/repo-list
192 let path
= match base
.place_cache_file("repo-list") {
197 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
199 if let Some(map
) = data
.as_object() {
200 for (repo
, _count
) in map
{
201 result
.push(repo
.to_owned());
208 fn connect(repo
: &BackupRepository
) -> Result
<HttpClient
, Error
> {
209 connect_do(repo
.host(), repo
.port(), repo
.auth_id())
210 .map_err(|err
| format_err
!("error building client for repository {} - {}", repo
, err
))
213 fn connect_do(server
: &str, port
: u16, auth_id
: &Authid
) -> Result
<HttpClient
, Error
> {
214 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
216 use std
::env
::VarError
::*;
217 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
219 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
220 Err(NotPresent
) => None
,
223 let options
= HttpClientOptions
::new_interactive(password
, fingerprint
);
225 HttpClient
::new(server
, port
, auth_id
, options
)
228 async
fn api_datastore_list_snapshots(
231 group
: Option
<BackupGroup
>,
232 ) -> Result
<Value
, Error
> {
234 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
236 let mut args
= json
!({}
);
237 if let Some(group
) = group
{
238 args
["backup-type"] = group
.backup_type().into();
239 args
["backup-id"] = group
.backup_id().into();
242 let mut result
= client
.get(&path
, Some(args
)).await?
;
244 Ok(result
["data"].take())
247 pub async
fn api_datastore_latest_snapshot(
251 ) -> Result
<(String
, String
, i64), Error
> {
253 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
254 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
257 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
260 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
262 let backup_time
= list
[0].backup_time
;
264 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
267 async
fn backup_directory
<P
: AsRef
<Path
>>(
268 client
: &BackupWriter
,
271 chunk_size
: Option
<usize>,
272 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
273 pxar_create_options
: proxmox_backup
::pxar
::PxarCreateOptions
,
274 upload_options
: UploadOptions
,
275 ) -> Result
<BackupStats
, Error
> {
277 let pxar_stream
= PxarBackupStream
::open(
282 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
284 let (tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
286 let stream
= ReceiverStream
::new(rx
)
287 .map_err(Error
::from
);
289 // spawn chunker inside a separate task so that it can run parallel
290 tokio
::spawn(async
move {
291 while let Some(v
) = chunk_stream
.next().await
{
292 let _
= tx
.send(v
).await
;
296 if upload_options
.fixed_size
.is_some() {
297 bail
!("cannot backup directory with fixed chunk size!");
301 .upload_stream(archive_name
, stream
, upload_options
)
307 async
fn backup_image
<P
: AsRef
<Path
>>(
308 client
: &BackupWriter
,
311 chunk_size
: Option
<usize>,
312 upload_options
: UploadOptions
,
313 ) -> Result
<BackupStats
, Error
> {
315 let path
= image_path
.as_ref().to_owned();
317 let file
= tokio
::fs
::File
::open(path
).await?
;
319 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
320 .map_err(Error
::from
);
322 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
324 if upload_options
.fixed_size
.is_none() {
325 bail
!("cannot backup image with dynamic chunk size!");
329 .upload_stream(archive_name
, stream
, upload_options
)
339 schema
: REPO_URL_SCHEMA
,
343 schema
: OUTPUT_FORMAT
,
349 /// List backup groups.
350 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
352 let output_format
= get_output_format(¶m
);
354 let repo
= extract_repository_from_value(¶m
)?
;
356 let client
= connect(&repo
)?
;
358 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
360 let mut result
= client
.get(&path
, None
).await?
;
362 record_repository(&repo
);
364 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
365 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
366 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
367 Ok(group
.group_path().to_str().unwrap().to_owned())
370 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
371 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
372 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
)?
;
373 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
376 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
377 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
378 Ok(tools
::format
::render_backup_file_list(&item
.files
))
381 let options
= default_table_format_options()
382 .sortby("backup-type", false)
383 .sortby("backup-id", false)
384 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
386 ColumnConfig
::new("last-backup")
387 .renderer(render_last_backup
)
388 .header("last snapshot")
391 .column(ColumnConfig
::new("backup-count"))
392 .column(ColumnConfig
::new("files").renderer(render_files
));
394 let mut data
: Value
= result
["data"].take();
396 let return_type
= &proxmox_backup
::api2
::admin
::datastore
::API_METHOD_LIST_GROUPS
.returns
;
398 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
407 schema
: REPO_URL_SCHEMA
,
412 description
: "Backup group.",
420 /// Change owner of a backup group
421 async
fn change_backup_owner(group
: String
, mut param
: Value
) -> Result
<(), Error
> {
423 let repo
= extract_repository_from_value(¶m
)?
;
425 let mut client
= connect(&repo
)?
;
427 param
.as_object_mut().unwrap().remove("repository");
429 let group
: BackupGroup
= group
.parse()?
;
431 param
["backup-type"] = group
.backup_type().into();
432 param
["backup-id"] = group
.backup_id().into();
434 let path
= format
!("api2/json/admin/datastore/{}/change-owner", repo
.store());
435 client
.post(&path
, Some(param
)).await?
;
437 record_repository(&repo
);
446 schema
: REPO_URL_SCHEMA
,
452 /// Try to login. If successful, store ticket.
453 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
455 let repo
= extract_repository_from_value(¶m
)?
;
457 let client
= connect(&repo
)?
;
458 client
.login().await?
;
460 record_repository(&repo
);
469 schema
: REPO_URL_SCHEMA
,
475 /// Logout (delete stored ticket).
476 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
478 let repo
= extract_repository_from_value(¶m
)?
;
480 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
489 schema
: REPO_URL_SCHEMA
,
493 schema
: OUTPUT_FORMAT
,
499 /// Show client and optional server version
500 async
fn api_version(param
: Value
) -> Result
<(), Error
> {
502 let output_format
= get_output_format(¶m
);
504 let mut version_info
= json
!({
506 "version": version
::PROXMOX_PKG_VERSION
,
507 "release": version
::PROXMOX_PKG_RELEASE
,
508 "repoid": version
::PROXMOX_PKG_REPOID
,
512 let repo
= extract_repository_from_value(¶m
);
513 if let Ok(repo
) = repo
{
514 let client
= connect(&repo
)?
;
516 match client
.get("api2/json/version", None
).await
{
517 Ok(mut result
) => version_info
["server"] = result
["data"].take(),
518 Err(e
) => eprintln
!("could not connect to server - {}", e
),
521 if output_format
== "text" {
522 println
!("client version: {}.{}", version
::PROXMOX_PKG_VERSION
, version
::PROXMOX_PKG_RELEASE
);
523 if let Some(server
) = version_info
["server"].as_object() {
524 let server_version
= server
["version"].as_str().unwrap();
525 let server_release
= server
["release"].as_str().unwrap();
526 println
!("server version: {}.{}", server_version
, server_release
);
529 format_and_print_result(&version_info
, &output_format
);
539 schema
: REPO_URL_SCHEMA
,
543 schema
: OUTPUT_FORMAT
,
549 /// Start garbage collection for a specific repository.
550 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
552 let repo
= extract_repository_from_value(¶m
)?
;
554 let output_format
= get_output_format(¶m
);
556 let mut client
= connect(&repo
)?
;
558 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
560 let result
= client
.post(&path
, None
).await?
;
562 record_repository(&repo
);
564 view_task_result(&mut client
, result
, &output_format
).await?
;
569 struct CatalogUploadResult
{
570 catalog_writer
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
571 result
: tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>,
574 fn spawn_catalog_upload(
575 client
: Arc
<BackupWriter
>,
577 ) -> Result
<CatalogUploadResult
, Error
> {
578 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
579 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
580 let catalog_chunk_size
= 512*1024;
581 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
583 let catalog_writer
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
585 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
587 let upload_options
= UploadOptions
{
590 ..UploadOptions
::default()
593 tokio
::spawn(async
move {
594 let catalog_upload_result
= client
595 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, upload_options
)
598 if let Err(ref err
) = catalog_upload_result
{
599 eprintln
!("catalog upload error - {}", err
);
603 let _
= catalog_result_tx
.send(catalog_upload_result
);
606 Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx }
)
609 #[derive(Clone, Debug, Eq, PartialEq)]
616 fn format_key_source(source
: &KeySource
, key_type
: &str) -> String
{
618 KeySource
::DefaultKey
=> format
!("Using default {} key..", key_type
),
619 KeySource
::Fd
=> format
!("Using {} key from file descriptor..", key_type
),
620 KeySource
::Path(path
) => format
!("Using {} key from '{}'..", key_type
, path
),
624 #[derive(Clone, Debug, Eq, PartialEq)]
625 struct KeyWithSource
{
626 pub source
: KeySource
,
631 pub fn from_fd(key
: Vec
<u8>) -> Self {
633 source
: KeySource
::Fd
,
638 pub fn from_default(key
: Vec
<u8>) -> Self {
640 source
: KeySource
::DefaultKey
,
645 pub fn from_path(path
: String
, key
: Vec
<u8>) -> Self {
647 source
: KeySource
::Path(path
),
653 #[derive(Debug, Eq, PartialEq)]
654 struct CryptoParams
{
656 enc_key
: Option
<KeyWithSource
>,
657 // FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
658 master_pubkey
: Option
<KeyWithSource
>,
661 fn crypto_parameters(param
: &Value
) -> Result
<CryptoParams
, Error
> {
662 let keyfile
= match param
.get("keyfile") {
663 Some(Value
::String(keyfile
)) => Some(keyfile
),
664 Some(_
) => bail
!("bad --keyfile parameter type"),
668 let key_fd
= match param
.get("keyfd") {
669 Some(Value
::Number(key_fd
)) => Some(
670 RawFd
::try_from(key_fd
672 .ok_or_else(|| format_err
!("bad key fd: {:?}", key_fd
))?
674 .map_err(|err
| format_err
!("bad key fd: {:?}: {}", key_fd
, err
))?
676 Some(_
) => bail
!("bad --keyfd parameter type"),
680 let master_pubkey_file
= match param
.get("master-pubkey-file") {
681 Some(Value
::String(keyfile
)) => Some(keyfile
),
682 Some(_
) => bail
!("bad --master-pubkey-file parameter type"),
686 let master_pubkey_fd
= match param
.get("master-pubkey-fd") {
687 Some(Value
::Number(key_fd
)) => Some(
688 RawFd
::try_from(key_fd
690 .ok_or_else(|| format_err
!("bad master public key fd: {:?}", key_fd
))?
692 .map_err(|err
| format_err
!("bad public master key fd: {:?}: {}", key_fd
, err
))?
694 Some(_
) => bail
!("bad --master-pubkey-fd parameter type"),
698 let mode
: Option
<CryptMode
> = match param
.get("crypt-mode") {
699 Some(mode
) => Some(serde_json
::from_value(mode
.clone())?
),
703 let key
= match (keyfile
, key_fd
) {
704 (None
, None
) => None
,
705 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
706 (Some(keyfile
), None
) => Some(KeyWithSource
::from_path(
708 file_get_contents(keyfile
)?
,
710 (None
, Some(fd
)) => {
711 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
712 let mut data
= Vec
::new();
713 let _len
: usize = { input }
.read_to_end(&mut data
).map_err(|err
| {
714 format_err
!("error reading encryption key from fd {}: {}", fd
, err
)
716 Some(KeyWithSource
::from_fd(data
))
720 let master_pubkey
= match (master_pubkey_file
, master_pubkey_fd
) {
721 (None
, None
) => None
,
722 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
723 (Some(keyfile
), None
) => Some(KeyWithSource
::from_path(
725 file_get_contents(keyfile
)?
,
727 (None
, Some(fd
)) => {
728 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
729 let mut data
= Vec
::new();
730 let _len
: usize = { input }
731 .read_to_end(&mut data
)
732 .map_err(|err
| format_err
!("error reading master key from fd {}: {}", fd
, err
))?
;
733 Some(KeyWithSource
::from_fd(data
))
737 let res
= match mode
{
738 // no crypt mode, enable encryption if keys are available
739 None
=> match (key
, master_pubkey
) {
740 // only default keys if available
741 (None
, None
) => match key
::read_optional_default_encryption_key()?
{
742 None
=> CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None }
,
744 let master_pubkey
= key
::read_optional_default_master_pubkey()?
;
746 mode
: CryptMode
::Encrypt
,
753 // explicit master key, default enc key needed
754 (None
, master_pubkey
) => match key
::read_optional_default_encryption_key()?
{
755 None
=> bail
!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
758 mode
: CryptMode
::Encrypt
,
765 // explicit keyfile, maybe default master key
766 (enc_key
, None
) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: key::read_optional_default_master_pubkey()? }
,
768 // explicit keyfile and master key
769 (enc_key
, master_pubkey
) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey }
,
772 // explicitly disabled encryption
773 Some(CryptMode
::None
) => match (key
, master_pubkey
) {
774 // no keys => OK, no encryption
775 (None
, None
) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None }
,
777 // --keyfile and --crypt-mode=none
778 (Some(_
), _
) => bail
!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
780 // --master-pubkey-file and --crypt-mode=none
781 (_
, Some(_
)) => bail
!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
784 // explicitly enabled encryption
785 Some(mode
) => match (key
, master_pubkey
) {
786 // no key, maybe master key
787 (None
, master_pubkey
) => match key
::read_optional_default_encryption_key()?
{
788 None
=> bail
!("--crypt-mode without --keyfile and no default key file available"),
790 eprintln
!("Encrypting with default encryption key!");
791 let master_pubkey
= match master_pubkey
{
792 None
=> key
::read_optional_default_master_pubkey()?
,
793 master_pubkey
=> master_pubkey
,
804 // --keyfile and --crypt-mode other than none
805 (enc_key
, master_pubkey
) => {
806 let master_pubkey
= match master_pubkey
{
807 None
=> key
::read_optional_default_master_pubkey()?
,
808 master_pubkey
=> master_pubkey
,
811 CryptoParams { mode, enc_key, master_pubkey }
820 // WARNING: there must only be one test for crypto_parameters as the default key handling is not
821 // safe w.r.t. concurrency
822 fn test_crypto_parameters_handling() -> Result
<(), Error
> {
823 let some_key
= vec
![1;1];
824 let default_key
= vec
![2;1];
826 let some_master_key
= vec
![3;1];
827 let default_master_key
= vec
![4;1];
829 let keypath
= "./target/testout/keyfile.test";
830 let master_keypath
= "./target/testout/masterkeyfile.test";
831 let invalid_keypath
= "./target/testout/invalid_keyfile.test";
833 let no_key_res
= CryptoParams
{
836 mode
: CryptMode
::None
,
838 let some_key_res
= CryptoParams
{
839 enc_key
: Some(KeyWithSource
::from_path(
844 mode
: CryptMode
::Encrypt
,
846 let some_key_some_master_res
= CryptoParams
{
847 enc_key
: Some(KeyWithSource
::from_path(
851 master_pubkey
: Some(KeyWithSource
::from_path(
852 master_keypath
.to_string(),
853 some_master_key
.clone(),
855 mode
: CryptMode
::Encrypt
,
857 let some_key_default_master_res
= CryptoParams
{
858 enc_key
: Some(KeyWithSource
::from_path(
862 master_pubkey
: Some(KeyWithSource
::from_default(default_master_key
.clone())),
863 mode
: CryptMode
::Encrypt
,
866 let some_key_sign_res
= CryptoParams
{
867 enc_key
: Some(KeyWithSource
::from_path(
872 mode
: CryptMode
::SignOnly
,
874 let default_key_res
= CryptoParams
{
875 enc_key
: Some(KeyWithSource
::from_default(default_key
.clone())),
877 mode
: CryptMode
::Encrypt
,
879 let default_key_sign_res
= CryptoParams
{
880 enc_key
: Some(KeyWithSource
::from_default(default_key
.clone())),
882 mode
: CryptMode
::SignOnly
,
885 replace_file(&keypath
, &some_key
, CreateOptions
::default())?
;
886 replace_file(&master_keypath
, &some_master_key
, CreateOptions
::default())?
;
888 // no params, no default key == no key
889 let res
= crypto_parameters(&json
!({}
));
890 assert_eq
!(res
.unwrap(), no_key_res
);
892 // keyfile param == key from keyfile
893 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
894 assert_eq
!(res
.unwrap(), some_key_res
);
896 // crypt mode none == no key
897 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
898 assert_eq
!(res
.unwrap(), no_key_res
);
900 // crypt mode encrypt/sign-only, no keyfile, no default key == Error
901 assert
!(crypto_parameters(&json
!({"crypt-mode": "sign-only"}
)).is_err());
902 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt"}
)).is_err());
904 // crypt mode none with explicit key == Error
905 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
907 // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
908 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only", "keyfile": keypath}
));
909 assert_eq
!(res
.unwrap(), some_key_sign_res
);
910 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
911 assert_eq
!(res
.unwrap(), some_key_res
);
913 // invalid keyfile parameter always errors
914 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath}
)).is_err());
915 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "none"}
)).is_err());
916 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"}
)).is_err());
917 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"}
)).is_err());
919 // now set a default key
920 unsafe { key::set_test_encryption_key(Ok(Some(default_key.clone()))); }
924 // no params but default key == default key
925 let res
= crypto_parameters(&json
!({}
));
926 assert_eq
!(res
.unwrap(), default_key_res
);
928 // keyfile param == key from keyfile
929 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
930 assert_eq
!(res
.unwrap(), some_key_res
);
932 // crypt mode none == no key
933 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
934 assert_eq
!(res
.unwrap(), no_key_res
);
936 // crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
937 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only"}
));
938 assert_eq
!(res
.unwrap(), default_key_sign_res
);
939 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt"}
));
940 assert_eq
!(res
.unwrap(), default_key_res
);
942 // crypt mode none with explicit key == Error
943 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
945 // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
946 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only", "keyfile": keypath}
));
947 assert_eq
!(res
.unwrap(), some_key_sign_res
);
948 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
949 assert_eq
!(res
.unwrap(), some_key_res
);
951 // invalid keyfile parameter always errors
952 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath}
)).is_err());
953 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "none"}
)).is_err());
954 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"}
)).is_err());
955 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"}
)).is_err());
957 // now make default key retrieval error
958 unsafe { key::set_test_encryption_key(Err(format_err!("test error"))); }
962 // no params, default key retrieval errors == Error
963 assert
!(crypto_parameters(&json
!({}
)).is_err());
965 // keyfile param == key from keyfile
966 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
967 assert_eq
!(res
.unwrap(), some_key_res
);
969 // crypt mode none == no key
970 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
971 assert_eq
!(res
.unwrap(), no_key_res
);
973 // crypt mode encrypt/sign-only, no keyfile, default key error == Error
974 assert
!(crypto_parameters(&json
!({"crypt-mode": "sign-only"}
)).is_err());
975 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt"}
)).is_err());
977 // crypt mode none with explicit key == Error
978 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
980 // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
981 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only", "keyfile": keypath}
));
982 assert_eq
!(res
.unwrap(), some_key_sign_res
);
983 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
984 assert_eq
!(res
.unwrap(), some_key_res
);
986 // invalid keyfile parameter always errors
987 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath}
)).is_err());
988 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "none"}
)).is_err());
989 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"}
)).is_err());
990 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"}
)).is_err());
992 // now remove default key again
993 unsafe { key::set_test_encryption_key(Ok(None)); }
994 // set a default master key
995 unsafe { key::set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
997 // and use an explicit master key
998 assert
!(crypto_parameters(&json
!({"master-pubkey-file": master_keypath}
)).is_err());
999 // just a default == no key
1000 let res
= crypto_parameters(&json
!({}
));
1001 assert_eq
!(res
.unwrap(), no_key_res
);
1003 // keyfile param == key from keyfile
1004 let res
= crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": master_keypath}
));
1005 assert_eq
!(res
.unwrap(), some_key_some_master_res
);
1006 // same with fallback to default master key
1007 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
1008 assert_eq
!(res
.unwrap(), some_key_default_master_res
);
1010 // crypt mode none == error
1011 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "master-pubkey-file": master_keypath}
)).is_err());
1012 // with just default master key == no key
1013 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
1014 assert_eq
!(res
.unwrap(), no_key_res
);
1016 // crypt mode encrypt without enc key == error
1017 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath}
)).is_err());
1018 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt"}
)).is_err());
1020 // crypt mode none with explicit key == Error
1021 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath}
)).is_err());
1022 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
1024 // crypt mode encrypt with keyfile == key from keyfile with correct mode
1025 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}
));
1026 assert_eq
!(res
.unwrap(), some_key_some_master_res
);
1027 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
1028 assert_eq
!(res
.unwrap(), some_key_default_master_res
);
1030 // invalid master keyfile parameter always errors when a key is passed, even with a valid
1031 // default master key
1032 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath}
)).is_err());
1033 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"}
)).is_err());
1034 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"}
)).is_err());
1035 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"}
)).is_err());
1045 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
1047 schema
: BACKUP_SOURCE_SCHEMA
,
1051 schema
: REPO_URL_SCHEMA
,
1055 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
1059 description
: "Path to file.",
1062 "all-file-systems": {
1064 description
: "Include all mounted subdirectories.",
1068 schema
: KEYFILE_SCHEMA
,
1072 schema
: KEYFD_SCHEMA
,
1075 "master-pubkey-file": {
1076 schema
: MASTER_PUBKEY_FILE_SCHEMA
,
1079 "master-pubkey-fd": {
1080 schema
: MASTER_PUBKEY_FD_SCHEMA
,
1087 "skip-lost-and-found": {
1089 description
: "Skip lost+found directory.",
1093 schema
: BACKUP_TYPE_SCHEMA
,
1097 schema
: BACKUP_ID_SCHEMA
,
1101 schema
: BACKUP_TIME_SCHEMA
,
1105 schema
: CHUNK_SIZE_SCHEMA
,
1110 description
: "List of paths or patterns for matching files to exclude.",
1114 description
: "Path or match pattern.",
1119 description
: "Max number of entries to hold in memory.",
1121 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
1125 description
: "Verbose output.",
1131 /// Create (host) backup.
1132 async
fn create_backup(
1135 _rpcenv
: &mut dyn RpcEnvironment
,
1136 ) -> Result
<Value
, Error
> {
1138 let repo
= extract_repository_from_value(¶m
)?
;
1140 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
1142 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
1144 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
1146 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
1148 let backup_time_opt
= param
["backup-time"].as_i64();
1150 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
1152 if let Some(size
) = chunk_size_opt
{
1153 verify_chunk_size(size
)?
;
1156 let crypto
= crypto_parameters(¶m
)?
;
1158 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
1160 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
1162 let include_dev
= param
["include-dev"].as_array();
1164 let entries_max
= param
["entries-max"].as_u64()
1165 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
1167 let empty
= Vec
::new();
1168 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
1170 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
1171 for entry
in exclude_args
{
1172 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
1174 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
1175 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
1179 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
1181 if let Some(include_dev
) = include_dev
{
1182 if all_file_systems
{
1183 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
1186 let mut set
= HashSet
::new();
1187 for path
in include_dev
{
1188 let path
= path
.as_str().unwrap();
1189 let stat
= nix
::sys
::stat
::stat(path
)
1190 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
1191 set
.insert(stat
.st_dev
);
1193 devices
= Some(set
);
1196 let mut upload_list
= vec
![];
1197 let mut target_set
= HashSet
::new();
1199 for backupspec
in backupspec_list
{
1200 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
1201 let filename
= &spec
.config_string
;
1202 let target
= &spec
.archive_name
;
1204 if target_set
.contains(target
) {
1205 bail
!("got target twice: '{}'", target
);
1207 target_set
.insert(target
.to_string());
1209 use std
::os
::unix
::fs
::FileTypeExt
;
1211 let metadata
= std
::fs
::metadata(filename
)
1212 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
1213 let file_type
= metadata
.file_type();
1215 match spec
.spec_type
{
1216 BackupSpecificationType
::PXAR
=> {
1217 if !file_type
.is_dir() {
1218 bail
!("got unexpected file type (expected directory)");
1220 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
1222 BackupSpecificationType
::IMAGE
=> {
1223 if !(file_type
.is_file() || file_type
.is_block_device()) {
1224 bail
!("got unexpected file type (expected file or block device)");
1227 let size
= image_size(&PathBuf
::from(filename
))?
;
1229 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
1231 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
1233 BackupSpecificationType::CONFIG => {
1234 if !file_type.is_file() {
1235 bail!("got unexpected file
type (expected regular file
)");
1237 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
1239 BackupSpecificationType::LOGFILE => {
1240 if !file_type.is_file() {
1241 bail!("got unexpected file
type (expected regular file
)");
1243 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
1248 let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
1250 let client = connect(&repo)?;
1251 record_repository(&repo);
1253 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
1255 println!("Client name
: {}
", proxmox::tools::nodename());
1257 let start_time = std::time::Instant::now();
1259 println!("Starting backup protocol
: {}
", strftime_local("%c
", epoch_i64())?);
1261 let (crypt_config, rsa_encrypted_key) = match crypto.enc_key {
1262 None => (None, None),
1263 Some(key_with_source) => {
1266 format_key_source(&key_with_source.source, "encryption
")
1269 let (key, created, fingerprint) =
1270 decrypt_key(&key_with_source.key, &key::get_encryption_key_password)?;
1271 println!("Encryption key fingerprint
: {}
", fingerprint);
1273 let crypt_config = CryptConfig::new(key)?;
1275 match crypto.master_pubkey {
1276 Some(pem_with_source) => {
1277 println!("{}
", format_key_source(&pem_with_source.source, "master
"));
1279 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_with_source.key)?;
1281 let mut key_config = KeyConfig::without_password(key)?;
1282 key_config.created = created; // keep original value
1284 let enc_key = rsa_encrypt_key_config(rsa, &key_config)?;
1286 (Some(Arc::new(crypt_config)), Some(enc_key))
1288 _ => (Some(Arc::new(crypt_config)), None),
1293 let client = BackupWriter::start(
1295 crypt_config.clone(),
1304 let download_previous_manifest = match client.previous_backup_time().await {
1305 Ok(Some(backup_time)) => {
1307 "Downloading previous
manifest ({}
)",
1308 strftime_local("%c
", backup_time)?
1313 println!("No previous manifest available
.");
1317 // Fallback for outdated server, TODO remove/bubble up with 2.0
1322 let previous_manifest = if download_previous_manifest {
1323 match client.download_previous_manifest().await {
1324 Ok(previous_manifest) => {
1325 match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
1326 Ok(()) => Some(Arc::new(previous_manifest)),
1328 println!("Couldn't re
-use previous manifest
- {}
", err);
1334 println!("Couldn't download previous manifest
- {}
", err);
1342 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
1343 let mut manifest = BackupManifest::new(snapshot);
1345 let mut catalog = None;
1346 let mut catalog_result_rx = None;
1348 for (backup_type, filename, target, size) in upload_list {
1350 BackupSpecificationType::CONFIG => {
1351 let upload_options = UploadOptions {
1353 encrypt: crypto.mode == CryptMode::Encrypt,
1354 ..UploadOptions::default()
1357 println!("Upload config file '{}' to '{}'
as {}
", filename, repo, target);
1359 .upload_blob_from_file(&filename, &target, upload_options)
1361 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1363 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
1364 let upload_options = UploadOptions {
1366 encrypt: crypto.mode == CryptMode::Encrypt,
1367 ..UploadOptions::default()
1370 println!("Upload log file '{}' to '{}'
as {}
", filename, repo, target);
1372 .upload_blob_from_file(&filename, &target, upload_options)
1374 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1376 BackupSpecificationType::PXAR => {
1377 // start catalog upload on first use
1378 if catalog.is_none() {
1379 let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?;
1380 catalog = Some(catalog_upload_res.catalog_writer);
1381 catalog_result_rx = Some(catalog_upload_res.result);
1383 let catalog = catalog.as_ref().unwrap();
1385 println!("Upload directory '{}' to '{}'
as {}
", filename, repo, target);
1386 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1388 let pxar_options = proxmox_backup::pxar::PxarCreateOptions {
1389 device_set: devices.clone(),
1390 patterns: pattern_list.clone(),
1391 entries_max: entries_max as usize,
1392 skip_lost_and_found,
1396 let upload_options = UploadOptions {
1397 previous_manifest: previous_manifest.clone(),
1399 encrypt: crypto.mode == CryptMode::Encrypt,
1400 ..UploadOptions::default()
1403 let stats = backup_directory(
1412 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1413 catalog.lock().unwrap().end_directory()?;
1415 BackupSpecificationType::IMAGE => {
1416 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1418 let upload_options = UploadOptions {
1419 previous_manifest: previous_manifest.clone(),
1420 fixed_size: Some(size),
1422 encrypt: crypto.mode == CryptMode::Encrypt,
1425 let stats = backup_image(
1432 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1437 // finalize and upload catalog
1438 if let Some(catalog) = catalog {
1439 let mutex = Arc::try_unwrap(catalog)
1440 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1441 let mut catalog = mutex.into_inner().unwrap();
1445 drop(catalog); // close upload stream
1447 if let Some(catalog_result_rx) = catalog_result_rx {
1448 let stats = catalog_result_rx.await??;
1449 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?;
1453 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1454 let target = ENCRYPTED_KEY_BLOB_NAME;
1455 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1456 let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() };
1458 .upload_blob_from_data(rsa_encrypted_key, target, options)
1460 manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?;
1463 // create manifest (index.json)
1464 // manifests are never encrypted, but include a signature
1465 let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
1466 .map_err(|err| format_err!("unable to format manifest
- {}
", err))?;
1469 if verbose { println!("Upload index.json to '{}'", repo
) };
1470 let options
= UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() }
;
1472 .upload_blob_from_data(manifest
.into_bytes(), MANIFEST_BLOB_NAME
, options
)
1475 client
.finish().await?
;
1477 let end_time
= std
::time
::Instant
::now();
1478 let elapsed
= end_time
.duration_since(start_time
);
1479 println
!("Duration: {:.2}s", elapsed
.as_secs_f64());
1481 println
!("End Time: {}", strftime_local("%c", epoch_i64())?
);
1486 fn complete_backup_source(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1488 let mut result
= vec
![];
1490 let data
: Vec
<&str> = arg
.splitn(2, '
:'
).collect();
1492 if data
.len() != 2 {
1493 result
.push(String
::from("root.pxar:/"));
1494 result
.push(String
::from("etc.pxar:/etc"));
1498 let files
= tools
::complete_file_name(data
[1], param
);
1501 result
.push(format
!("{}:{}", data
[0], file
));
1507 async
fn dump_image
<W
: Write
>(
1508 client
: Arc
<BackupReader
>,
1509 crypt_config
: Option
<Arc
<CryptConfig
>>,
1510 crypt_mode
: CryptMode
,
1511 index
: FixedIndexReader
,
1514 ) -> Result
<(), Error
> {
1516 let most_used
= index
.find_most_used_chunks(8);
1518 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, crypt_mode
, most_used
);
1520 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1521 // and thus slows down reading. Instead, directly use RemoteChunkReader
1524 let start_time
= std
::time
::Instant
::now();
1526 for pos
in 0..index
.index_count() {
1527 let digest
= index
.index_digest(pos
).unwrap();
1528 let raw_data
= chunk_reader
.read_chunk(&digest
).await?
;
1529 writer
.write_all(&raw_data
)?
;
1530 bytes
+= raw_data
.len();
1532 let next_per
= ((pos
+1)*100)/index
.index_count();
1533 if per
!= next_per
{
1534 eprintln
!("progress {}% (read {} bytes, duration {} sec)",
1535 next_per
, bytes
, start_time
.elapsed().as_secs());
1541 let end_time
= std
::time
::Instant
::now();
1542 let elapsed
= end_time
.duration_since(start_time
);
1543 eprintln
!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1545 elapsed
.as_secs_f64(),
1546 bytes
as f64/(1024.0*1024.0*elapsed
.as_secs_f64())
1553 fn parse_archive_type(name
: &str) -> (String
, ArchiveType
) {
1554 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
1555 (name
.into(), archive_type(name
).unwrap())
1556 } else if name
.ends_with(".pxar") {
1557 (format
!("{}.didx", name
), ArchiveType
::DynamicIndex
)
1558 } else if name
.ends_with(".img") {
1559 (format
!("{}.fidx", name
), ArchiveType
::FixedIndex
)
1561 (format
!("{}.blob", name
), ArchiveType
::Blob
)
1569 schema
: REPO_URL_SCHEMA
,
1574 description
: "Group/Snapshot path.",
1577 description
: "Backup archive name.",
1582 description
: r
###"Target directory path. Use '-' to write to standard output.
1584 We do not extraxt '.pxar' archives when writing to standard output.
1588 "allow-existing-dirs": {
1590 description
: "Do not fail if directories already exists.",
1594 schema
: KEYFILE_SCHEMA
,
1598 schema
: KEYFD_SCHEMA
,
1608 /// Restore backup repository.
1609 async
fn restore(param
: Value
) -> Result
<Value
, Error
> {
1610 let repo
= extract_repository_from_value(¶m
)?
;
1612 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
1614 let allow_existing_dirs
= param
["allow-existing-dirs"].as_bool().unwrap_or(false);
1616 let archive_name
= tools
::required_string_param(¶m
, "archive-name")?
;
1618 let client
= connect(&repo
)?
;
1620 record_repository(&repo
);
1622 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
1624 let (backup_type
, backup_id
, backup_time
) = if path
.matches('
/'
).count() == 1 {
1625 let group
: BackupGroup
= path
.parse()?
;
1626 api_datastore_latest_snapshot(&client
, repo
.store(), group
).await?
1628 let snapshot
: BackupDir
= path
.parse()?
;
1629 (snapshot
.group().backup_type().to_owned(), snapshot
.group().backup_id().to_owned(), snapshot
.backup_time())
1632 let target
= tools
::required_string_param(¶m
, "target")?
;
1633 let target
= if target
== "-" { None }
else { Some(target) }
;
1635 let crypto
= crypto_parameters(¶m
)?
;
1637 let crypt_config
= match crypto
.enc_key
{
1641 decrypt_key(&key
.key
, &key
::get_encryption_key_password
).map_err(|err
| {
1642 eprintln
!("{}", format_key_source(&key
.source
, "encryption"));
1645 Some(Arc
::new(CryptConfig
::new(key
)?
))
1649 let client
= BackupReader
::start(
1651 crypt_config
.clone(),
1659 let (archive_name
, archive_type
) = parse_archive_type(archive_name
);
1661 let (manifest
, backup_index_data
) = client
.download_manifest().await?
;
1663 if archive_name
== ENCRYPTED_KEY_BLOB_NAME
&& crypt_config
.is_none() {
1664 eprintln
!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
1666 if manifest
.signature
.is_some() {
1667 if let Some(key
) = &crypto
.enc_key
{
1668 eprintln
!("{}", format_key_source(&key
.source
, "encryption"));
1670 if let Some(config
) = &crypt_config
{
1671 eprintln
!("Fingerprint: {}", config
.fingerprint());
1674 manifest
.check_fingerprint(crypt_config
.as_ref().map(Arc
::as_ref
))?
;
1677 if archive_name
== MANIFEST_BLOB_NAME
{
1678 if let Some(target
) = target
{
1679 replace_file(target
, &backup_index_data
, CreateOptions
::new())?
;
1681 let stdout
= std
::io
::stdout();
1682 let mut writer
= stdout
.lock();
1683 writer
.write_all(&backup_index_data
)
1684 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1687 return Ok(Value
::Null
);
1690 let file_info
= manifest
.lookup_file_info(&archive_name
)?
;
1692 if archive_type
== ArchiveType
::Blob
{
1694 let mut reader
= client
.download_blob(&manifest
, &archive_name
).await?
;
1696 if let Some(target
) = target
{
1697 let mut writer
= std
::fs
::OpenOptions
::new()
1702 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
;
1703 std
::io
::copy(&mut reader
, &mut writer
)?
;
1705 let stdout
= std
::io
::stdout();
1706 let mut writer
= stdout
.lock();
1707 std
::io
::copy(&mut reader
, &mut writer
)
1708 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1711 } else if archive_type
== ArchiveType
::DynamicIndex
{
1713 let index
= client
.download_dynamic_index(&manifest
, &archive_name
).await?
;
1715 let most_used
= index
.find_most_used_chunks(8);
1717 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, file_info
.chunk_crypt_mode(), most_used
);
1719 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1721 let options
= proxmox_backup
::pxar
::PxarExtractOptions
{
1723 extract_match_default
: true,
1724 allow_existing_dirs
,
1728 if let Some(target
) = target
{
1729 proxmox_backup
::pxar
::extract_archive(
1730 pxar
::decoder
::Decoder
::from_std(reader
)?
,
1732 proxmox_backup
::pxar
::Flags
::DEFAULT
,
1735 println
!("{:?}", path
);
1740 .map_err(|err
| format_err
!("error extracting archive - {}", err
))?
;
1742 let mut writer
= std
::fs
::OpenOptions
::new()
1744 .open("/dev/stdout")
1745 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
;
1747 std
::io
::copy(&mut reader
, &mut writer
)
1748 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1750 } else if archive_type
== ArchiveType
::FixedIndex
{
1752 let index
= client
.download_fixed_index(&manifest
, &archive_name
).await?
;
1754 let mut writer
= if let Some(target
) = target
{
1755 std
::fs
::OpenOptions
::new()
1760 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
1762 std
::fs
::OpenOptions
::new()
1764 .open("/dev/stdout")
1765 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
1768 dump_image(client
.clone(), crypt_config
.clone(), file_info
.chunk_crypt_mode(), index
, &mut writer
, verbose
).await?
;
1774 const API_METHOD_PRUNE
: ApiMethod
= ApiMethod
::new(
1775 &ApiHandler
::Async(&prune
),
1777 "Prune a backup repository.",
1778 &proxmox_backup
::add_common_prune_prameters
!([
1779 ("dry-run", true, &BooleanSchema
::new(
1780 "Just show what prune would do, but do not delete anything.")
1782 ("group", false, &StringSchema
::new("Backup group.").schema()),
1784 ("output-format", true, &OUTPUT_FORMAT
),
1788 &BooleanSchema
::new("Minimal output - only show removals.")
1791 ("repository", true, &REPO_URL_SCHEMA
),
1799 _rpcenv
: &'a
mut dyn RpcEnvironment
,
1800 ) -> proxmox
::api
::ApiFuture
<'a
> {
1802 prune_async(param
).await
1806 async
fn prune_async(mut param
: Value
) -> Result
<Value
, Error
> {
1807 let repo
= extract_repository_from_value(¶m
)?
;
1809 let mut client
= connect(&repo
)?
;
1811 let path
= format
!("api2/json/admin/datastore/{}/prune", repo
.store());
1813 let group
= tools
::required_string_param(¶m
, "group")?
;
1814 let group
: BackupGroup
= group
.parse()?
;
1816 let output_format
= get_output_format(¶m
);
1818 let quiet
= param
["quiet"].as_bool().unwrap_or(false);
1820 param
.as_object_mut().unwrap().remove("repository");
1821 param
.as_object_mut().unwrap().remove("group");
1822 param
.as_object_mut().unwrap().remove("output-format");
1823 param
.as_object_mut().unwrap().remove("quiet");
1825 param
["backup-type"] = group
.backup_type().into();
1826 param
["backup-id"] = group
.backup_id().into();
1828 let mut result
= client
.post(&path
, Some(param
)).await?
;
1830 record_repository(&repo
);
1832 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1833 let item
: PruneListItem
= serde_json
::from_value(record
.to_owned())?
;
1834 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
)?
;
1835 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
1838 let render_prune_action
= |v
: &Value
, _record
: &Value
| -> Result
<String
, Error
> {
1839 Ok(match v
.as_bool() {
1840 Some(true) => "keep",
1841 Some(false) => "remove",
1846 let options
= default_table_format_options()
1847 .sortby("backup-type", false)
1848 .sortby("backup-id", false)
1849 .sortby("backup-time", false)
1850 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
1851 .column(ColumnConfig
::new("backup-time").renderer(tools
::format
::render_epoch
).header("date"))
1852 .column(ColumnConfig
::new("keep").renderer(render_prune_action
).header("action"))
1855 let return_type
= &proxmox_backup
::api2
::admin
::datastore
::API_METHOD_PRUNE
.returns
;
1857 let mut data
= result
["data"].take();
1860 let list
: Vec
<Value
> = data
.as_array().unwrap().iter().filter(|item
| {
1861 item
["keep"].as_bool() == Some(false)
1862 }).cloned().collect();
1866 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1875 schema
: REPO_URL_SCHEMA
,
1879 schema
: OUTPUT_FORMAT
,
1885 type: StorageStatus
,
1888 /// Get repository status.
1889 async
fn status(param
: Value
) -> Result
<Value
, Error
> {
1891 let repo
= extract_repository_from_value(¶m
)?
;
1893 let output_format
= get_output_format(¶m
);
1895 let client
= connect(&repo
)?
;
1897 let path
= format
!("api2/json/admin/datastore/{}/status", repo
.store());
1899 let mut result
= client
.get(&path
, None
).await?
;
1900 let mut data
= result
["data"].take();
1902 record_repository(&repo
);
1904 let render_total_percentage
= |v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1905 let v
= v
.as_u64().unwrap();
1906 let total
= record
["total"].as_u64().unwrap();
1907 let roundup
= total
/200;
1908 let per
= ((v
+roundup
)*100)/total
;
1909 let info
= format
!(" ({} %)", per
);
1910 Ok(format
!("{} {:>8}", v
, info
))
1913 let options
= default_table_format_options()
1915 .column(ColumnConfig
::new("total").renderer(render_total_percentage
))
1916 .column(ColumnConfig
::new("used").renderer(render_total_percentage
))
1917 .column(ColumnConfig
::new("avail").renderer(render_total_percentage
));
1919 let return_type
= &API_METHOD_STATUS
.returns
;
1921 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1926 // like get, but simply ignore errors and return Null instead
1927 async
fn try_get(repo
: &BackupRepository
, url
: &str) -> Value
{
1929 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
1930 let password
= std
::env
::var(ENV_VAR_PBS_PASSWORD
).ok();
1932 // ticket cache, but no questions asked
1933 let options
= HttpClientOptions
::new_interactive(password
, fingerprint
)
1934 .interactive(false);
1936 let client
= match HttpClient
::new(repo
.host(), repo
.port(), repo
.auth_id(), options
) {
1938 _
=> return Value
::Null
,
1941 let mut resp
= match client
.get(url
, None
).await
{
1943 _
=> return Value
::Null
,
1946 if let Some(map
) = resp
.as_object_mut() {
1947 if let Some(data
) = map
.remove("data") {
1954 fn complete_backup_group(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1955 proxmox_backup
::tools
::runtime
::main(async { complete_backup_group_do(param).await }
)
1958 async
fn complete_backup_group_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1960 let mut result
= vec
![];
1962 let repo
= match extract_repository_from_map(param
) {
1967 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
1969 let data
= try_get(&repo
, &path
).await
;
1971 if let Some(list
) = data
.as_array() {
1973 if let (Some(backup_id
), Some(backup_type
)) =
1974 (item
["backup-id"].as_str(), item
["backup-type"].as_str())
1976 result
.push(format
!("{}/{}", backup_type
, backup_id
));
1984 pub fn complete_group_or_snapshot(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1985 proxmox_backup
::tools
::runtime
::main(async { complete_group_or_snapshot_do(arg, param).await }
)
1988 async
fn complete_group_or_snapshot_do(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1990 if arg
.matches('
/'
).count() < 2 {
1991 let groups
= complete_backup_group_do(param
).await
;
1992 let mut result
= vec
![];
1993 for group
in groups
{
1994 result
.push(group
.to_string());
1995 result
.push(format
!("{}/", group
));
2000 complete_backup_snapshot_do(param
).await
2003 fn complete_backup_snapshot(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2004 proxmox_backup
::tools
::runtime
::main(async { complete_backup_snapshot_do(param).await }
)
2007 async
fn complete_backup_snapshot_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2009 let mut result
= vec
![];
2011 let repo
= match extract_repository_from_map(param
) {
2016 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
2018 let data
= try_get(&repo
, &path
).await
;
2020 if let Some(list
) = data
.as_array() {
2022 if let (Some(backup_id
), Some(backup_type
), Some(backup_time
)) =
2023 (item
["backup-id"].as_str(), item
["backup-type"].as_str(), item
["backup-time"].as_i64())
2025 if let Ok(snapshot
) = BackupDir
::new(backup_type
, backup_id
, backup_time
) {
2026 result
.push(snapshot
.relative_path().to_str().unwrap().to_owned());
2035 fn complete_server_file_name(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2036 proxmox_backup
::tools
::runtime
::main(async { complete_server_file_name_do(param).await }
)
2039 async
fn complete_server_file_name_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2041 let mut result
= vec
![];
2043 let repo
= match extract_repository_from_map(param
) {
2048 let snapshot
: BackupDir
= match param
.get("snapshot") {
2050 match path
.parse() {
2058 let query
= tools
::json_object_to_query(json
!({
2059 "backup-type": snapshot
.group().backup_type(),
2060 "backup-id": snapshot
.group().backup_id(),
2061 "backup-time": snapshot
.backup_time(),
2064 let path
= format
!("api2/json/admin/datastore/{}/files?{}", repo
.store(), query
);
2066 let data
= try_get(&repo
, &path
).await
;
2068 if let Some(list
) = data
.as_array() {
2070 if let Some(filename
) = item
["filename"].as_str() {
2071 result
.push(filename
.to_owned());
2079 fn complete_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2080 complete_server_file_name(arg
, param
)
2082 .map(|v
| tools
::format
::strip_server_file_extension(&v
))
2086 pub fn complete_pxar_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2087 complete_server_file_name(arg
, param
)
2089 .filter_map(|name
| {
2090 if name
.ends_with(".pxar.didx") {
2091 Some(tools
::format
::strip_server_file_extension(name
))
2099 pub fn complete_img_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2100 complete_server_file_name(arg
, param
)
2102 .filter_map(|name
| {
2103 if name
.ends_with(".img.fidx") {
2104 Some(tools
::format
::strip_server_file_extension(name
))
2112 fn complete_chunk_size(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2114 let mut result
= vec
![];
2118 result
.push(size
.to_string());
2120 if size
> 4096 { break; }
2126 fn complete_auth_id(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2127 proxmox_backup
::tools
::runtime
::main(async { complete_auth_id_do(param).await }
)
2130 async
fn complete_auth_id_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2132 let mut result
= vec
![];
2134 let repo
= match extract_repository_from_map(param
) {
2139 let data
= try_get(&repo
, "api2/json/access/users?include_tokens=true").await
;
2141 if let Ok(parsed
) = serde_json
::from_value
::<Vec
<UserWithTokens
>>(data
) {
2142 for user
in parsed
{
2143 result
.push(user
.userid
.to_string());
2144 for token
in user
.tokens
{
2145 result
.push(token
.tokenid
.to_string());
2153 use proxmox_backup
::client
::RemoteChunkReader
;
2154 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
2157 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
2158 /// so that we can properly access it from multiple threads simultaneously while not issuing
2159 /// duplicate simultaneous reads over http.
2160 pub struct BufferedDynamicReadAt
{
2161 inner
: Mutex
<BufferedDynamicReader
<RemoteChunkReader
>>,
2164 impl BufferedDynamicReadAt
{
2165 fn new(inner
: BufferedDynamicReader
<RemoteChunkReader
>) -> Self {
2167 inner
: Mutex
::new(inner
),
2172 impl ReadAt
for BufferedDynamicReadAt
{
2173 fn start_read_at
<'a
>(
2174 self: Pin
<&'a
Self>,
2178 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
2179 MaybeReady
::Ready(tokio
::task
::block_in_place(move || {
2180 let mut reader
= self.inner
.lock().unwrap();
2181 reader
.seek(SeekFrom
::Start(offset
))?
;
2182 Ok(reader
.read(buf
)?
)
2186 fn poll_complete
<'a
>(
2187 self: Pin
<&'a
Self>,
2188 _op
: ReadAtOperation
<'a
>,
2189 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
2190 panic
!("LocalDynamicReadAt::start_read_at returned Pending");
2196 let backup_cmd_def
= CliCommand
::new(&API_METHOD_CREATE_BACKUP
)
2197 .arg_param(&["backupspec"])
2198 .completion_cb("repository", complete_repository
)
2199 .completion_cb("backupspec", complete_backup_source
)
2200 .completion_cb("keyfile", tools
::complete_file_name
)
2201 .completion_cb("master-pubkey-file", tools
::complete_file_name
)
2202 .completion_cb("chunk-size", complete_chunk_size
);
2204 let benchmark_cmd_def
= CliCommand
::new(&API_METHOD_BENCHMARK
)
2205 .completion_cb("repository", complete_repository
)
2206 .completion_cb("keyfile", tools
::complete_file_name
);
2208 let list_cmd_def
= CliCommand
::new(&API_METHOD_LIST_BACKUP_GROUPS
)
2209 .completion_cb("repository", complete_repository
);
2211 let garbage_collect_cmd_def
= CliCommand
::new(&API_METHOD_START_GARBAGE_COLLECTION
)
2212 .completion_cb("repository", complete_repository
);
2214 let restore_cmd_def
= CliCommand
::new(&API_METHOD_RESTORE
)
2215 .arg_param(&["snapshot", "archive-name", "target"])
2216 .completion_cb("repository", complete_repository
)
2217 .completion_cb("snapshot", complete_group_or_snapshot
)
2218 .completion_cb("archive-name", complete_archive_name
)
2219 .completion_cb("target", tools
::complete_file_name
);
2221 let prune_cmd_def
= CliCommand
::new(&API_METHOD_PRUNE
)
2222 .arg_param(&["group"])
2223 .completion_cb("group", complete_backup_group
)
2224 .completion_cb("repository", complete_repository
);
2226 let status_cmd_def
= CliCommand
::new(&API_METHOD_STATUS
)
2227 .completion_cb("repository", complete_repository
);
2229 let login_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGIN
)
2230 .completion_cb("repository", complete_repository
);
2232 let logout_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGOUT
)
2233 .completion_cb("repository", complete_repository
);
2235 let version_cmd_def
= CliCommand
::new(&API_METHOD_API_VERSION
)
2236 .completion_cb("repository", complete_repository
);
2238 let change_owner_cmd_def
= CliCommand
::new(&API_METHOD_CHANGE_BACKUP_OWNER
)
2239 .arg_param(&["group", "new-owner"])
2240 .completion_cb("group", complete_backup_group
)
2241 .completion_cb("new-owner", complete_auth_id
)
2242 .completion_cb("repository", complete_repository
);
2244 let cmd_def
= CliCommandMap
::new()
2245 .insert("backup", backup_cmd_def
)
2246 .insert("garbage-collect", garbage_collect_cmd_def
)
2247 .insert("list", list_cmd_def
)
2248 .insert("login", login_cmd_def
)
2249 .insert("logout", logout_cmd_def
)
2250 .insert("prune", prune_cmd_def
)
2251 .insert("restore", restore_cmd_def
)
2252 .insert("snapshot", snapshot_mgtm_cli())
2253 .insert("status", status_cmd_def
)
2254 .insert("key", key
::cli())
2255 .insert("mount", mount_cmd_def())
2256 .insert("map", map_cmd_def())
2257 .insert("unmap", unmap_cmd_def())
2258 .insert("catalog", catalog_mgmt_cli())
2259 .insert("task", task_mgmt_cli())
2260 .insert("version", version_cmd_def
)
2261 .insert("benchmark", benchmark_cmd_def
)
2262 .insert("change-owner", change_owner_cmd_def
)
2264 .alias(&["files"], &["snapshot", "files"])
2265 .alias(&["forget"], &["snapshot", "forget"])
2266 .alias(&["upload-log"], &["snapshot", "upload-log"])
2267 .alias(&["snapshots"], &["snapshot", "list"])
2270 let rpcenv
= CliEnvironment
::new();
2271 run_cli_command(cmd_def
, rpcenv
, Some(|future
| {
2272 proxmox_backup
::tools
::runtime
::main(future
)