1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::convert
::TryFrom
;
3 use std
::io
::{self, Read, Write, Seek, SeekFrom}
;
4 use std
::os
::unix
::io
::{FromRawFd, RawFd}
;
5 use std
::path
::{Path, PathBuf}
;
7 use std
::sync
::{Arc, Mutex}
;
8 use std
::task
::Context
;
10 use anyhow
::{bail, format_err, Error}
;
11 use futures
::future
::FutureExt
;
12 use futures
::stream
::{StreamExt, TryStreamExt}
;
13 use serde_json
::{json, Value}
;
14 use tokio
::sync
::mpsc
;
15 use tokio_stream
::wrappers
::ReceiverStream
;
16 use xdg
::BaseDirectories
;
18 use pathpatterns
::{MatchEntry, MatchType, PatternFlag}
;
21 time
::{strftime_local, epoch_i64}
,
22 fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
,
33 use pxar
::accessor
::{MaybeReady, ReadAt, ReadAtOperation}
;
35 use proxmox_backup
::tools
;
36 use proxmox_backup
::api2
::access
::user
::UserWithTokens
;
37 use proxmox_backup
::api2
::types
::*;
38 use proxmox_backup
::api2
::version
;
39 use proxmox_backup
::client
::*;
40 use proxmox_backup
::pxar
::catalog
::*;
41 use proxmox_backup
::backup
::{
44 rsa_encrypt_key_config
,
51 BufferedDynamicReader
,
59 ENCRYPTED_KEY_BLOB_NAME
,
68 mod proxmox_backup_client
;
69 use proxmox_backup_client
::*;
71 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
72 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
75 pub const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
76 .format(&BACKUP_REPO_URL
)
80 pub const KEYFILE_SCHEMA
: Schema
=
81 StringSchema
::new("Path to encryption key. All data will be encrypted using this key.")
84 pub const KEYFD_SCHEMA
: Schema
=
85 IntegerSchema
::new("Pass an encryption key via an already opened file descriptor.")
89 pub const MASTER_PUBKEY_FILE_SCHEMA
: Schema
= StringSchema
::new(
90 "Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
93 pub const MASTER_PUBKEY_FD_SCHEMA
: Schema
=
94 IntegerSchema
::new("Pass a master public key via an already opened file descriptor.")
98 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
99 "Chunk size in KB. Must be a power of 2.")
105 fn get_default_repository() -> Option
<String
> {
106 std
::env
::var("PBS_REPOSITORY").ok()
109 pub fn extract_repository_from_value(
111 ) -> Result
<BackupRepository
, Error
> {
113 let repo_url
= param
["repository"]
116 .or_else(get_default_repository
)
117 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
119 let repo
: BackupRepository
= repo_url
.parse()?
;
124 fn extract_repository_from_map(
125 param
: &HashMap
<String
, String
>,
126 ) -> Option
<BackupRepository
> {
128 param
.get("repository")
130 .or_else(get_default_repository
)
131 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
134 fn record_repository(repo
: &BackupRepository
) {
136 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
141 // usually $HOME/.cache/proxmox-backup/repo-list
142 let path
= match base
.place_cache_file("repo-list") {
147 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
149 let repo
= repo
.to_string();
151 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
153 let mut map
= serde_json
::map
::Map
::new();
156 let mut max_used
= 0;
157 let mut max_repo
= None
;
158 for (repo
, count
) in data
.as_object().unwrap() {
159 if map
.contains_key(repo
) { continue; }
160 if let Some(count
) = count
.as_i64() {
161 if count
> max_used
{
163 max_repo
= Some(repo
);
167 if let Some(repo
) = max_repo
{
168 map
.insert(repo
.to_owned(), json
!(max_used
));
172 if map
.len() > 10 { // store max. 10 repos
177 let new_data
= json
!(map
);
179 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
182 pub fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
184 let mut result
= vec
![];
186 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
191 // usually $HOME/.cache/proxmox-backup/repo-list
192 let path
= match base
.place_cache_file("repo-list") {
197 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
199 if let Some(map
) = data
.as_object() {
200 for (repo
, _count
) in map
{
201 result
.push(repo
.to_owned());
208 fn connect(repo
: &BackupRepository
) -> Result
<HttpClient
, Error
> {
209 connect_do(repo
.host(), repo
.port(), repo
.auth_id())
210 .map_err(|err
| format_err
!("error building client for repository {} - {}", repo
, err
))
213 fn connect_do(server
: &str, port
: u16, auth_id
: &Authid
) -> Result
<HttpClient
, Error
> {
214 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
216 use std
::env
::VarError
::*;
217 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
219 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
220 Err(NotPresent
) => None
,
223 let options
= HttpClientOptions
::new_interactive(password
, fingerprint
);
225 HttpClient
::new(server
, port
, auth_id
, options
)
228 async
fn api_datastore_list_snapshots(
231 group
: Option
<BackupGroup
>,
232 ) -> Result
<Value
, Error
> {
234 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
236 let mut args
= json
!({}
);
237 if let Some(group
) = group
{
238 args
["backup-type"] = group
.backup_type().into();
239 args
["backup-id"] = group
.backup_id().into();
242 let mut result
= client
.get(&path
, Some(args
)).await?
;
244 Ok(result
["data"].take())
247 pub async
fn api_datastore_latest_snapshot(
251 ) -> Result
<(String
, String
, i64), Error
> {
253 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
254 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
257 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
260 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
262 let backup_time
= list
[0].backup_time
;
264 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
267 async
fn backup_directory
<P
: AsRef
<Path
>>(
268 client
: &BackupWriter
,
271 chunk_size
: Option
<usize>,
272 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
273 pxar_create_options
: proxmox_backup
::pxar
::PxarCreateOptions
,
274 upload_options
: UploadOptions
,
275 ) -> Result
<BackupStats
, Error
> {
277 let pxar_stream
= PxarBackupStream
::open(
282 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
284 let (tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
286 let stream
= ReceiverStream
::new(rx
)
287 .map_err(Error
::from
);
289 // spawn chunker inside a separate task so that it can run parallel
290 tokio
::spawn(async
move {
291 while let Some(v
) = chunk_stream
.next().await
{
292 let _
= tx
.send(v
).await
;
296 if upload_options
.fixed_size
.is_some() {
297 bail
!("cannot backup directory with fixed chunk size!");
301 .upload_stream(archive_name
, stream
, upload_options
)
307 async
fn backup_image
<P
: AsRef
<Path
>>(
308 client
: &BackupWriter
,
311 chunk_size
: Option
<usize>,
312 upload_options
: UploadOptions
,
313 ) -> Result
<BackupStats
, Error
> {
315 let path
= image_path
.as_ref().to_owned();
317 let file
= tokio
::fs
::File
::open(path
).await?
;
319 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
320 .map_err(Error
::from
);
322 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
324 if upload_options
.fixed_size
.is_none() {
325 bail
!("cannot backup image with dynamic chunk size!");
329 .upload_stream(archive_name
, stream
, upload_options
)
339 schema
: REPO_URL_SCHEMA
,
343 schema
: OUTPUT_FORMAT
,
349 /// List backup groups.
350 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
352 let output_format
= get_output_format(¶m
);
354 let repo
= extract_repository_from_value(¶m
)?
;
356 let client
= connect(&repo
)?
;
358 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
360 let mut result
= client
.get(&path
, None
).await?
;
362 record_repository(&repo
);
364 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
365 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
366 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
367 Ok(group
.group_path().to_str().unwrap().to_owned())
370 let render_last_backup
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
371 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
372 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.last_backup
)?
;
373 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
376 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
377 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
378 Ok(tools
::format
::render_backup_file_list(&item
.files
))
381 let options
= default_table_format_options()
382 .sortby("backup-type", false)
383 .sortby("backup-id", false)
384 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
386 ColumnConfig
::new("last-backup")
387 .renderer(render_last_backup
)
388 .header("last snapshot")
391 .column(ColumnConfig
::new("backup-count"))
392 .column(ColumnConfig
::new("files").renderer(render_files
));
394 let mut data
: Value
= result
["data"].take();
396 let return_type
= &proxmox_backup
::api2
::admin
::datastore
::API_METHOD_LIST_GROUPS
.returns
;
398 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
407 schema
: REPO_URL_SCHEMA
,
412 description
: "Backup group.",
420 /// Change owner of a backup group
421 async
fn change_backup_owner(group
: String
, mut param
: Value
) -> Result
<(), Error
> {
423 let repo
= extract_repository_from_value(¶m
)?
;
425 let mut client
= connect(&repo
)?
;
427 param
.as_object_mut().unwrap().remove("repository");
429 let group
: BackupGroup
= group
.parse()?
;
431 param
["backup-type"] = group
.backup_type().into();
432 param
["backup-id"] = group
.backup_id().into();
434 let path
= format
!("api2/json/admin/datastore/{}/change-owner", repo
.store());
435 client
.post(&path
, Some(param
)).await?
;
437 record_repository(&repo
);
446 schema
: REPO_URL_SCHEMA
,
452 /// Try to login. If successful, store ticket.
453 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
455 let repo
= extract_repository_from_value(¶m
)?
;
457 let client
= connect(&repo
)?
;
458 client
.login().await?
;
460 record_repository(&repo
);
469 schema
: REPO_URL_SCHEMA
,
475 /// Logout (delete stored ticket).
476 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
478 let repo
= extract_repository_from_value(¶m
)?
;
480 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
489 schema
: REPO_URL_SCHEMA
,
493 schema
: OUTPUT_FORMAT
,
499 /// Show client and optional server version
500 async
fn api_version(param
: Value
) -> Result
<(), Error
> {
502 let output_format
= get_output_format(¶m
);
504 let mut version_info
= json
!({
506 "version": version
::PROXMOX_PKG_VERSION
,
507 "release": version
::PROXMOX_PKG_RELEASE
,
508 "repoid": version
::PROXMOX_PKG_REPOID
,
512 let repo
= extract_repository_from_value(¶m
);
513 if let Ok(repo
) = repo
{
514 let client
= connect(&repo
)?
;
516 match client
.get("api2/json/version", None
).await
{
517 Ok(mut result
) => version_info
["server"] = result
["data"].take(),
518 Err(e
) => eprintln
!("could not connect to server - {}", e
),
521 if output_format
== "text" {
522 println
!("client version: {}.{}", version
::PROXMOX_PKG_VERSION
, version
::PROXMOX_PKG_RELEASE
);
523 if let Some(server
) = version_info
["server"].as_object() {
524 let server_version
= server
["version"].as_str().unwrap();
525 let server_release
= server
["release"].as_str().unwrap();
526 println
!("server version: {}.{}", server_version
, server_release
);
529 format_and_print_result(&version_info
, &output_format
);
539 schema
: REPO_URL_SCHEMA
,
543 schema
: OUTPUT_FORMAT
,
549 /// Start garbage collection for a specific repository.
550 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
552 let repo
= extract_repository_from_value(¶m
)?
;
554 let output_format
= get_output_format(¶m
);
556 let mut client
= connect(&repo
)?
;
558 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
560 let result
= client
.post(&path
, None
).await?
;
562 record_repository(&repo
);
564 view_task_result(&mut client
, result
, &output_format
).await?
;
569 struct CatalogUploadResult
{
570 catalog_writer
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
571 result
: tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>,
574 fn spawn_catalog_upload(
575 client
: Arc
<BackupWriter
>,
577 ) -> Result
<CatalogUploadResult
, Error
> {
578 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
579 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
580 let catalog_chunk_size
= 512*1024;
581 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
583 let catalog_writer
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
585 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
587 let upload_options
= UploadOptions
{
590 ..UploadOptions
::default()
593 tokio
::spawn(async
move {
594 let catalog_upload_result
= client
595 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, upload_options
)
598 if let Err(ref err
) = catalog_upload_result
{
599 eprintln
!("catalog upload error - {}", err
);
603 let _
= catalog_result_tx
.send(catalog_upload_result
);
606 Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx }
)
609 #[derive(Debug, Eq, PartialEq)]
610 struct CryptoParams
{
612 enc_key
: Option
<Vec
<u8>>,
613 // FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
614 master_pubkey
: Option
<Vec
<u8>>,
617 fn crypto_parameters(param
: &Value
) -> Result
<CryptoParams
, Error
> {
618 let keyfile
= match param
.get("keyfile") {
619 Some(Value
::String(keyfile
)) => Some(keyfile
),
620 Some(_
) => bail
!("bad --keyfile parameter type"),
624 let key_fd
= match param
.get("keyfd") {
625 Some(Value
::Number(key_fd
)) => Some(
626 RawFd
::try_from(key_fd
628 .ok_or_else(|| format_err
!("bad key fd: {:?}", key_fd
))?
630 .map_err(|err
| format_err
!("bad key fd: {:?}: {}", key_fd
, err
))?
632 Some(_
) => bail
!("bad --keyfd parameter type"),
636 let master_pubkey_file
= match param
.get("master-pubkey-file") {
637 Some(Value
::String(keyfile
)) => Some(keyfile
),
638 Some(_
) => bail
!("bad --master-pubkey-file parameter type"),
642 let master_pubkey_fd
= match param
.get("master-pubkey-fd") {
643 Some(Value
::Number(key_fd
)) => Some(
644 RawFd
::try_from(key_fd
646 .ok_or_else(|| format_err
!("bad master public key fd: {:?}", key_fd
))?
648 .map_err(|err
| format_err
!("bad public master key fd: {:?}: {}", key_fd
, err
))?
650 Some(_
) => bail
!("bad --master-pubkey-fd parameter type"),
654 let mode
: Option
<CryptMode
> = match param
.get("crypt-mode") {
655 Some(mode
) => Some(serde_json
::from_value(mode
.clone())?
),
659 let keydata
= match (keyfile
, key_fd
) {
660 (None
, None
) => None
,
661 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
662 (Some(keyfile
), None
) => {
663 eprintln
!("Using encryption key file: {}", keyfile
);
664 Some(file_get_contents(keyfile
)?
)
666 (None
, Some(fd
)) => {
667 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
668 let mut data
= Vec
::new();
669 let _len
: usize = { input }
.read_to_end(&mut data
)
671 format_err
!("error reading encryption key from fd {}: {}", fd
, err
)
673 eprintln
!("Using encryption key from file descriptor");
678 let master_pubkey_data
= match (master_pubkey_file
, master_pubkey_fd
) {
679 (None
, None
) => None
,
680 (Some(_
), Some(_
)) => bail
!("--keyfile and --keyfd are mutually exclusive"),
681 (Some(keyfile
), None
) => {
682 eprintln
!("Using master key from file: {}", keyfile
);
683 Some(file_get_contents(keyfile
)?
)
685 (None
, Some(fd
)) => {
686 let input
= unsafe { std::fs::File::from_raw_fd(fd) }
;
687 let mut data
= Vec
::new();
688 let _len
: usize = { input }
.read_to_end(&mut data
)
690 format_err
!("error reading master key from fd {}: {}", fd
, err
)
692 eprintln
!("Using master key from file descriptor");
697 let res
= match mode
{
698 // no crypt mode, enable encryption if keys are available
699 None
=> match (keydata
, master_pubkey_data
) {
700 // only default keys if available
701 (None
, None
) => match key
::read_optional_default_encryption_key()?
{
702 None
=> CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None }
,
704 eprintln
!("Encrypting with default encryption key!");
705 let master_pubkey
= key
::read_optional_default_master_pubkey()?
;
707 mode
: CryptMode
::Encrypt
,
714 // explicit master key, default enc key needed
715 (None
, master_pubkey
) => match key
::read_optional_default_encryption_key()?
{
716 None
=> bail
!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
718 eprintln
!("Encrypting with default encryption key!");
720 mode
: CryptMode
::Encrypt
,
727 // explicit keyfile, maybe default master key
728 (enc_key
, None
) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: key::read_optional_default_master_pubkey()? }
,
730 // explicit keyfile and master key
731 (enc_key
, master_pubkey
) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey }
,
734 // explicitly disabled encryption
735 Some(CryptMode
::None
) => match (keydata
, master_pubkey_data
) {
736 // no keys => OK, no encryption
737 (None
, None
) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None }
,
739 // --keyfile and --crypt-mode=none
740 (Some(_
), _
) => bail
!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
742 // --master-pubkey-file and --crypt-mode=none
743 (_
, Some(_
)) => bail
!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
746 // explicitly enabled encryption
747 Some(mode
) => match (keydata
, master_pubkey_data
) {
748 // no key, maybe master key
749 (None
, master_pubkey
) => match key
::read_optional_default_encryption_key()?
{
750 None
=> bail
!("--crypt-mode without --keyfile and no default key file available"),
752 eprintln
!("Encrypting with default encryption key!");
753 let master_pubkey
= match master_pubkey
{
754 None
=> key
::read_optional_default_master_pubkey()?
,
755 master_pubkey
=> master_pubkey
,
766 // --keyfile and --crypt-mode other than none
767 (enc_key
, master_pubkey
) => {
768 let master_pubkey
= match master_pubkey
{
769 None
=> key
::read_optional_default_master_pubkey()?
,
770 master_pubkey
=> master_pubkey
,
773 CryptoParams { mode, enc_key, master_pubkey }
782 // WARNING: there must only be one test for crypto_parameters as the default key handling is not
783 // safe w.r.t. concurrency
784 fn test_crypto_parameters_handling() -> Result
<(), Error
> {
785 let some_key
= Some(vec
![1;1]);
786 let default_key
= Some(vec
![2;1]);
788 let some_master_key
= Some(vec
![3;1]);
789 let default_master_key
= Some(vec
![4;1]);
791 let no_key_res
= CryptoParams
{
794 mode
: CryptMode
::None
,
796 let some_key_res
= CryptoParams
{
797 enc_key
: some_key
.clone(),
799 mode
: CryptMode
::Encrypt
,
801 let some_key_some_master_res
= CryptoParams
{
802 enc_key
: some_key
.clone(),
803 master_pubkey
: some_master_key
.clone(),
804 mode
: CryptMode
::Encrypt
,
806 let some_key_default_master_res
= CryptoParams
{
807 enc_key
: some_key
.clone(),
808 master_pubkey
: default_master_key
.clone(),
809 mode
: CryptMode
::Encrypt
,
812 let some_key_sign_res
= CryptoParams
{
813 enc_key
: some_key
.clone(),
815 mode
: CryptMode
::SignOnly
,
817 let default_key_res
= CryptoParams
{
818 enc_key
: default_key
.clone(),
820 mode
: CryptMode
::Encrypt
,
822 let default_key_sign_res
= CryptoParams
{
823 enc_key
: default_key
.clone(),
825 mode
: CryptMode
::SignOnly
,
828 let keypath
= "./tests/keyfile.test";
829 replace_file(&keypath
, some_key
.as_ref().unwrap(), CreateOptions
::default())?
;
830 let master_keypath
= "./tests/masterkeyfile.test";
831 replace_file(&master_keypath
, some_master_key
.as_ref().unwrap(), CreateOptions
::default())?
;
832 let invalid_keypath
= "./tests/invalid_keyfile.test";
834 // no params, no default key == no key
835 let res
= crypto_parameters(&json
!({}
));
836 assert_eq
!(res
.unwrap(), no_key_res
);
838 // keyfile param == key from keyfile
839 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
840 assert_eq
!(res
.unwrap(), some_key_res
);
842 // crypt mode none == no key
843 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
844 assert_eq
!(res
.unwrap(), no_key_res
);
846 // crypt mode encrypt/sign-only, no keyfile, no default key == Error
847 assert
!(crypto_parameters(&json
!({"crypt-mode": "sign-only"}
)).is_err());
848 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt"}
)).is_err());
850 // crypt mode none with explicit key == Error
851 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
853 // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
854 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only", "keyfile": keypath}
));
855 assert_eq
!(res
.unwrap(), some_key_sign_res
);
856 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
857 assert_eq
!(res
.unwrap(), some_key_res
);
859 // invalid keyfile parameter always errors
860 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath}
)).is_err());
861 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "none"}
)).is_err());
862 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"}
)).is_err());
863 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"}
)).is_err());
865 // now set a default key
866 unsafe { key::set_test_encryption_key(Ok(default_key.clone())); }
870 // no params but default key == default key
871 let res
= crypto_parameters(&json
!({}
));
872 assert_eq
!(res
.unwrap(), default_key_res
);
874 // keyfile param == key from keyfile
875 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
876 assert_eq
!(res
.unwrap(), some_key_res
);
878 // crypt mode none == no key
879 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
880 assert_eq
!(res
.unwrap(), no_key_res
);
882 // crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
883 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only"}
));
884 assert_eq
!(res
.unwrap(), default_key_sign_res
);
885 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt"}
));
886 assert_eq
!(res
.unwrap(), default_key_res
);
888 // crypt mode none with explicit key == Error
889 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
891 // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
892 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only", "keyfile": keypath}
));
893 assert_eq
!(res
.unwrap(), some_key_sign_res
);
894 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
895 assert_eq
!(res
.unwrap(), some_key_res
);
897 // invalid keyfile parameter always errors
898 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath}
)).is_err());
899 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "none"}
)).is_err());
900 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"}
)).is_err());
901 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"}
)).is_err());
903 // now make default key retrieval error
904 unsafe { key::set_test_encryption_key(Err(format_err!("test error"))); }
908 // no params, default key retrieval errors == Error
909 assert
!(crypto_parameters(&json
!({}
)).is_err());
911 // keyfile param == key from keyfile
912 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
913 assert_eq
!(res
.unwrap(), some_key_res
);
915 // crypt mode none == no key
916 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
917 assert_eq
!(res
.unwrap(), no_key_res
);
919 // crypt mode encrypt/sign-only, no keyfile, default key error == Error
920 assert
!(crypto_parameters(&json
!({"crypt-mode": "sign-only"}
)).is_err());
921 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt"}
)).is_err());
923 // crypt mode none with explicit key == Error
924 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
926 // crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
927 let res
= crypto_parameters(&json
!({"crypt-mode": "sign-only", "keyfile": keypath}
));
928 assert_eq
!(res
.unwrap(), some_key_sign_res
);
929 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
930 assert_eq
!(res
.unwrap(), some_key_res
);
932 // invalid keyfile parameter always errors
933 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath}
)).is_err());
934 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "none"}
)).is_err());
935 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"}
)).is_err());
936 assert
!(crypto_parameters(&json
!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"}
)).is_err());
938 // now remove default key again
939 unsafe { key::set_test_encryption_key(Ok(None)); }
940 // set a default master key
941 unsafe { key::set_test_default_master_pubkey(Ok(default_master_key.clone())); }
943 // and use an explicit master key
944 assert
!(crypto_parameters(&json
!({"master-pubkey-file": master_keypath}
)).is_err());
945 // just a default == no key
946 let res
= crypto_parameters(&json
!({}
));
947 assert_eq
!(res
.unwrap(), no_key_res
);
949 // keyfile param == key from keyfile
950 let res
= crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": master_keypath}
));
951 assert_eq
!(res
.unwrap(), some_key_some_master_res
);
952 // same with fallback to default master key
953 let res
= crypto_parameters(&json
!({"keyfile": keypath}
));
954 assert_eq
!(res
.unwrap(), some_key_default_master_res
);
956 // crypt mode none == error
957 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "master-pubkey-file": master_keypath}
)).is_err());
958 // with just default master key == no key
959 let res
= crypto_parameters(&json
!({"crypt-mode": "none"}
));
960 assert_eq
!(res
.unwrap(), no_key_res
);
962 // crypt mode encrypt without enc key == error
963 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath}
)).is_err());
964 assert
!(crypto_parameters(&json
!({"crypt-mode": "encrypt"}
)).is_err());
966 // crypt mode none with explicit key == Error
967 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath}
)).is_err());
968 assert
!(crypto_parameters(&json
!({"crypt-mode": "none", "keyfile": keypath}
)).is_err());
970 // crypt mode encrypt with keyfile == key from keyfile with correct mode
971 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}
));
972 assert_eq
!(res
.unwrap(), some_key_some_master_res
);
973 let res
= crypto_parameters(&json
!({"crypt-mode": "encrypt", "keyfile": keypath}
));
974 assert_eq
!(res
.unwrap(), some_key_default_master_res
);
976 // invalid master keyfile parameter always errors when a key is passed, even with a valid
977 // default master key
978 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath}
)).is_err());
979 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"}
)).is_err());
980 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"}
)).is_err());
981 assert
!(crypto_parameters(&json
!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"}
)).is_err());
991 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
993 schema
: BACKUP_SOURCE_SCHEMA
,
997 schema
: REPO_URL_SCHEMA
,
1001 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
1005 description
: "Path to file.",
1008 "all-file-systems": {
1010 description
: "Include all mounted subdirectories.",
1014 schema
: KEYFILE_SCHEMA
,
1018 schema
: KEYFD_SCHEMA
,
1021 "master-pubkey-file": {
1022 schema
: MASTER_PUBKEY_FILE_SCHEMA
,
1025 "master-pubkey-fd": {
1026 schema
: MASTER_PUBKEY_FD_SCHEMA
,
1033 "skip-lost-and-found": {
1035 description
: "Skip lost+found directory.",
1039 schema
: BACKUP_TYPE_SCHEMA
,
1043 schema
: BACKUP_ID_SCHEMA
,
1047 schema
: BACKUP_TIME_SCHEMA
,
1051 schema
: CHUNK_SIZE_SCHEMA
,
1056 description
: "List of paths or patterns for matching files to exclude.",
1060 description
: "Path or match pattern.",
1065 description
: "Max number of entries to hold in memory.",
1067 default: proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as isize,
1071 description
: "Verbose output.",
1077 /// Create (host) backup.
1078 async
fn create_backup(
1081 _rpcenv
: &mut dyn RpcEnvironment
,
1082 ) -> Result
<Value
, Error
> {
1084 let repo
= extract_repository_from_value(¶m
)?
;
1086 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
1088 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
1090 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
1092 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
1094 let backup_time_opt
= param
["backup-time"].as_i64();
1096 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
1098 if let Some(size
) = chunk_size_opt
{
1099 verify_chunk_size(size
)?
;
1102 let crypto
= crypto_parameters(¶m
)?
;
1104 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
1106 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
1108 let include_dev
= param
["include-dev"].as_array();
1110 let entries_max
= param
["entries-max"].as_u64()
1111 .unwrap_or(proxmox_backup
::pxar
::ENCODER_MAX_ENTRIES
as u64);
1113 let empty
= Vec
::new();
1114 let exclude_args
= param
["exclude"].as_array().unwrap_or(&empty
);
1116 let mut pattern_list
= Vec
::with_capacity(exclude_args
.len());
1117 for entry
in exclude_args
{
1118 let entry
= entry
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
1120 MatchEntry
::parse_pattern(entry
, PatternFlag
::PATH_NAME
, MatchType
::Exclude
)
1121 .map_err(|err
| format_err
!("invalid exclude pattern entry: {}", err
))?
1125 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
1127 if let Some(include_dev
) = include_dev
{
1128 if all_file_systems
{
1129 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
1132 let mut set
= HashSet
::new();
1133 for path
in include_dev
{
1134 let path
= path
.as_str().unwrap();
1135 let stat
= nix
::sys
::stat
::stat(path
)
1136 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
1137 set
.insert(stat
.st_dev
);
1139 devices
= Some(set
);
1142 let mut upload_list
= vec
![];
1143 let mut target_set
= HashSet
::new();
1145 for backupspec
in backupspec_list
{
1146 let spec
= parse_backup_specification(backupspec
.as_str().unwrap())?
;
1147 let filename
= &spec
.config_string
;
1148 let target
= &spec
.archive_name
;
1150 if target_set
.contains(target
) {
1151 bail
!("got target twice: '{}'", target
);
1153 target_set
.insert(target
.to_string());
1155 use std
::os
::unix
::fs
::FileTypeExt
;
1157 let metadata
= std
::fs
::metadata(filename
)
1158 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
1159 let file_type
= metadata
.file_type();
1161 match spec
.spec_type
{
1162 BackupSpecificationType
::PXAR
=> {
1163 if !file_type
.is_dir() {
1164 bail
!("got unexpected file type (expected directory)");
1166 upload_list
.push((BackupSpecificationType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
1168 BackupSpecificationType
::IMAGE
=> {
1169 if !(file_type
.is_file() || file_type
.is_block_device()) {
1170 bail
!("got unexpected file type (expected file or block device)");
1173 let size
= image_size(&PathBuf
::from(filename
))?
;
1175 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
1177 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
1179 BackupSpecificationType::CONFIG => {
1180 if !file_type.is_file() {
1181 bail!("got unexpected file
type (expected regular file
)");
1183 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
1185 BackupSpecificationType::LOGFILE => {
1186 if !file_type.is_file() {
1187 bail!("got unexpected file
type (expected regular file
)");
1189 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
1194 let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
1196 let client = connect(&repo)?;
1197 record_repository(&repo);
1199 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
1201 println!("Client name
: {}
", proxmox::tools::nodename());
1203 let start_time = std::time::Instant::now();
1205 println!("Starting backup protocol
: {}
", strftime_local("%c
", epoch_i64())?);
1207 let (crypt_config, rsa_encrypted_key) = match crypto.enc_key {
1208 None => (None, None),
1210 let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
1211 println!("Encryption key fingerprint
: {}
", fingerprint);
1213 let crypt_config = CryptConfig::new(key)?;
1215 match crypto.master_pubkey {
1217 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
1219 let mut key_config = KeyConfig::without_password(key)?;
1220 key_config.created = created; // keep original value
1222 let enc_key = rsa_encrypt_key_config(rsa, &key_config)?;
1224 (Some(Arc::new(crypt_config)), Some(enc_key))
1226 _ => (Some(Arc::new(crypt_config)), None),
1231 let client = BackupWriter::start(
1233 crypt_config.clone(),
1242 let download_previous_manifest = match client.previous_backup_time().await {
1243 Ok(Some(backup_time)) => {
1245 "Downloading previous
manifest ({}
)",
1246 strftime_local("%c
", backup_time)?
1251 println!("No previous manifest available
.");
1255 // Fallback for outdated server, TODO remove/bubble up with 2.0
1260 let previous_manifest = if download_previous_manifest {
1261 match client.download_previous_manifest().await {
1262 Ok(previous_manifest) => {
1263 match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
1264 Ok(()) => Some(Arc::new(previous_manifest)),
1266 println!("Couldn't re
-use previous manifest
- {}
", err);
1272 println!("Couldn't download previous manifest
- {}
", err);
1280 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
1281 let mut manifest = BackupManifest::new(snapshot);
1283 let mut catalog = None;
1284 let mut catalog_result_rx = None;
1286 for (backup_type, filename, target, size) in upload_list {
1288 BackupSpecificationType::CONFIG => {
1289 let upload_options = UploadOptions {
1291 encrypt: crypto.mode == CryptMode::Encrypt,
1292 ..UploadOptions::default()
1295 println!("Upload config file '{}' to '{}'
as {}
", filename, repo, target);
1297 .upload_blob_from_file(&filename, &target, upload_options)
1299 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1301 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
1302 let upload_options = UploadOptions {
1304 encrypt: crypto.mode == CryptMode::Encrypt,
1305 ..UploadOptions::default()
1308 println!("Upload log file '{}' to '{}'
as {}
", filename, repo, target);
1310 .upload_blob_from_file(&filename, &target, upload_options)
1312 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1314 BackupSpecificationType::PXAR => {
1315 // start catalog upload on first use
1316 if catalog.is_none() {
1317 let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?;
1318 catalog = Some(catalog_upload_res.catalog_writer);
1319 catalog_result_rx = Some(catalog_upload_res.result);
1321 let catalog = catalog.as_ref().unwrap();
1323 println!("Upload directory '{}' to '{}'
as {}
", filename, repo, target);
1324 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
1326 let pxar_options = proxmox_backup::pxar::PxarCreateOptions {
1327 device_set: devices.clone(),
1328 patterns: pattern_list.clone(),
1329 entries_max: entries_max as usize,
1330 skip_lost_and_found,
1334 let upload_options = UploadOptions {
1335 previous_manifest: previous_manifest.clone(),
1337 encrypt: crypto.mode == CryptMode::Encrypt,
1338 ..UploadOptions::default()
1341 let stats = backup_directory(
1350 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1351 catalog.lock().unwrap().end_directory()?;
1353 BackupSpecificationType::IMAGE => {
1354 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1356 let upload_options = UploadOptions {
1357 previous_manifest: previous_manifest.clone(),
1358 fixed_size: Some(size),
1360 encrypt: crypto.mode == CryptMode::Encrypt,
1363 let stats = backup_image(
1370 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
1375 // finalize and upload catalog
1376 if let Some(catalog) = catalog {
1377 let mutex = Arc::try_unwrap(catalog)
1378 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1379 let mut catalog = mutex.into_inner().unwrap();
1383 drop(catalog); // close upload stream
1385 if let Some(catalog_result_rx) = catalog_result_rx {
1386 let stats = catalog_result_rx.await??;
1387 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?;
1391 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1392 let target = ENCRYPTED_KEY_BLOB_NAME;
1393 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1394 let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() };
1396 .upload_blob_from_data(rsa_encrypted_key, target, options)
1398 manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?;
1401 // create manifest (index.json)
1402 // manifests are never encrypted, but include a signature
1403 let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
1404 .map_err(|err| format_err!("unable to format manifest
- {}
", err))?;
1407 if verbose { println!("Upload index.json to '{}'", repo
) };
1408 let options
= UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() }
;
1410 .upload_blob_from_data(manifest
.into_bytes(), MANIFEST_BLOB_NAME
, options
)
1413 client
.finish().await?
;
1415 let end_time
= std
::time
::Instant
::now();
1416 let elapsed
= end_time
.duration_since(start_time
);
1417 println
!("Duration: {:.2}s", elapsed
.as_secs_f64());
1419 println
!("End Time: {}", strftime_local("%c", epoch_i64())?
);
1424 fn complete_backup_source(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1426 let mut result
= vec
![];
1428 let data
: Vec
<&str> = arg
.splitn(2, '
:'
).collect();
1430 if data
.len() != 2 {
1431 result
.push(String
::from("root.pxar:/"));
1432 result
.push(String
::from("etc.pxar:/etc"));
1436 let files
= tools
::complete_file_name(data
[1], param
);
1439 result
.push(format
!("{}:{}", data
[0], file
));
1445 async
fn dump_image
<W
: Write
>(
1446 client
: Arc
<BackupReader
>,
1447 crypt_config
: Option
<Arc
<CryptConfig
>>,
1448 crypt_mode
: CryptMode
,
1449 index
: FixedIndexReader
,
1452 ) -> Result
<(), Error
> {
1454 let most_used
= index
.find_most_used_chunks(8);
1456 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, crypt_mode
, most_used
);
1458 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1459 // and thus slows down reading. Instead, directly use RemoteChunkReader
1462 let start_time
= std
::time
::Instant
::now();
1464 for pos
in 0..index
.index_count() {
1465 let digest
= index
.index_digest(pos
).unwrap();
1466 let raw_data
= chunk_reader
.read_chunk(&digest
).await?
;
1467 writer
.write_all(&raw_data
)?
;
1468 bytes
+= raw_data
.len();
1470 let next_per
= ((pos
+1)*100)/index
.index_count();
1471 if per
!= next_per
{
1472 eprintln
!("progress {}% (read {} bytes, duration {} sec)",
1473 next_per
, bytes
, start_time
.elapsed().as_secs());
1479 let end_time
= std
::time
::Instant
::now();
1480 let elapsed
= end_time
.duration_since(start_time
);
1481 eprintln
!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1483 elapsed
.as_secs_f64(),
1484 bytes
as f64/(1024.0*1024.0*elapsed
.as_secs_f64())
1491 fn parse_archive_type(name
: &str) -> (String
, ArchiveType
) {
1492 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
1493 (name
.into(), archive_type(name
).unwrap())
1494 } else if name
.ends_with(".pxar") {
1495 (format
!("{}.didx", name
), ArchiveType
::DynamicIndex
)
1496 } else if name
.ends_with(".img") {
1497 (format
!("{}.fidx", name
), ArchiveType
::FixedIndex
)
1499 (format
!("{}.blob", name
), ArchiveType
::Blob
)
1507 schema
: REPO_URL_SCHEMA
,
1512 description
: "Group/Snapshot path.",
1515 description
: "Backup archive name.",
1520 description
: r
###"Target directory path. Use '-' to write to standard output.
1522 We do not extraxt '.pxar' archives when writing to standard output.
1526 "allow-existing-dirs": {
1528 description
: "Do not fail if directories already exists.",
1532 schema
: KEYFILE_SCHEMA
,
1536 schema
: KEYFD_SCHEMA
,
1546 /// Restore backup repository.
1547 async
fn restore(param
: Value
) -> Result
<Value
, Error
> {
1548 let repo
= extract_repository_from_value(¶m
)?
;
1550 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
1552 let allow_existing_dirs
= param
["allow-existing-dirs"].as_bool().unwrap_or(false);
1554 let archive_name
= tools
::required_string_param(¶m
, "archive-name")?
;
1556 let client
= connect(&repo
)?
;
1558 record_repository(&repo
);
1560 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
1562 let (backup_type
, backup_id
, backup_time
) = if path
.matches('
/'
).count() == 1 {
1563 let group
: BackupGroup
= path
.parse()?
;
1564 api_datastore_latest_snapshot(&client
, repo
.store(), group
).await?
1566 let snapshot
: BackupDir
= path
.parse()?
;
1567 (snapshot
.group().backup_type().to_owned(), snapshot
.group().backup_id().to_owned(), snapshot
.backup_time())
1570 let target
= tools
::required_string_param(¶m
, "target")?
;
1571 let target
= if target
== "-" { None }
else { Some(target) }
;
1573 let crypto
= crypto_parameters(¶m
)?
;
1575 let crypt_config
= match crypto
.enc_key
{
1578 let (key
, _
, fingerprint
) = decrypt_key(&key
, &key
::get_encryption_key_password
)?
;
1579 eprintln
!("Encryption key fingerprint: '{}'", fingerprint
);
1580 Some(Arc
::new(CryptConfig
::new(key
)?
))
1584 let client
= BackupReader
::start(
1586 crypt_config
.clone(),
1594 let (archive_name
, archive_type
) = parse_archive_type(archive_name
);
1596 let (manifest
, backup_index_data
) = client
.download_manifest().await?
;
1598 if archive_name
== ENCRYPTED_KEY_BLOB_NAME
&& crypt_config
.is_none() {
1599 eprintln
!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
1601 manifest
.check_fingerprint(crypt_config
.as_ref().map(Arc
::as_ref
))?
;
1604 if archive_name
== MANIFEST_BLOB_NAME
{
1605 if let Some(target
) = target
{
1606 replace_file(target
, &backup_index_data
, CreateOptions
::new())?
;
1608 let stdout
= std
::io
::stdout();
1609 let mut writer
= stdout
.lock();
1610 writer
.write_all(&backup_index_data
)
1611 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1614 return Ok(Value
::Null
);
1617 let file_info
= manifest
.lookup_file_info(&archive_name
)?
;
1619 if archive_type
== ArchiveType
::Blob
{
1621 let mut reader
= client
.download_blob(&manifest
, &archive_name
).await?
;
1623 if let Some(target
) = target
{
1624 let mut writer
= std
::fs
::OpenOptions
::new()
1629 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
;
1630 std
::io
::copy(&mut reader
, &mut writer
)?
;
1632 let stdout
= std
::io
::stdout();
1633 let mut writer
= stdout
.lock();
1634 std
::io
::copy(&mut reader
, &mut writer
)
1635 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1638 } else if archive_type
== ArchiveType
::DynamicIndex
{
1640 let index
= client
.download_dynamic_index(&manifest
, &archive_name
).await?
;
1642 let most_used
= index
.find_most_used_chunks(8);
1644 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, file_info
.chunk_crypt_mode(), most_used
);
1646 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1648 let options
= proxmox_backup
::pxar
::PxarExtractOptions
{
1650 extract_match_default
: true,
1651 allow_existing_dirs
,
1655 if let Some(target
) = target
{
1656 proxmox_backup
::pxar
::extract_archive(
1657 pxar
::decoder
::Decoder
::from_std(reader
)?
,
1659 proxmox_backup
::pxar
::Flags
::DEFAULT
,
1662 println
!("{:?}", path
);
1667 .map_err(|err
| format_err
!("error extracting archive - {}", err
))?
;
1669 let mut writer
= std
::fs
::OpenOptions
::new()
1671 .open("/dev/stdout")
1672 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
;
1674 std
::io
::copy(&mut reader
, &mut writer
)
1675 .map_err(|err
| format_err
!("unable to pipe data - {}", err
))?
;
1677 } else if archive_type
== ArchiveType
::FixedIndex
{
1679 let index
= client
.download_fixed_index(&manifest
, &archive_name
).await?
;
1681 let mut writer
= if let Some(target
) = target
{
1682 std
::fs
::OpenOptions
::new()
1687 .map_err(|err
| format_err
!("unable to create target file {:?} - {}", target
, err
))?
1689 std
::fs
::OpenOptions
::new()
1691 .open("/dev/stdout")
1692 .map_err(|err
| format_err
!("unable to open /dev/stdout - {}", err
))?
1695 dump_image(client
.clone(), crypt_config
.clone(), file_info
.chunk_crypt_mode(), index
, &mut writer
, verbose
).await?
;
1701 const API_METHOD_PRUNE
: ApiMethod
= ApiMethod
::new(
1702 &ApiHandler
::Async(&prune
),
1704 "Prune a backup repository.",
1705 &proxmox_backup
::add_common_prune_prameters
!([
1706 ("dry-run", true, &BooleanSchema
::new(
1707 "Just show what prune would do, but do not delete anything.")
1709 ("group", false, &StringSchema
::new("Backup group.").schema()),
1711 ("output-format", true, &OUTPUT_FORMAT
),
1715 &BooleanSchema
::new("Minimal output - only show removals.")
1718 ("repository", true, &REPO_URL_SCHEMA
),
1726 _rpcenv
: &'a
mut dyn RpcEnvironment
,
1727 ) -> proxmox
::api
::ApiFuture
<'a
> {
1729 prune_async(param
).await
1733 async
fn prune_async(mut param
: Value
) -> Result
<Value
, Error
> {
1734 let repo
= extract_repository_from_value(¶m
)?
;
1736 let mut client
= connect(&repo
)?
;
1738 let path
= format
!("api2/json/admin/datastore/{}/prune", repo
.store());
1740 let group
= tools
::required_string_param(¶m
, "group")?
;
1741 let group
: BackupGroup
= group
.parse()?
;
1743 let output_format
= get_output_format(¶m
);
1745 let quiet
= param
["quiet"].as_bool().unwrap_or(false);
1747 param
.as_object_mut().unwrap().remove("repository");
1748 param
.as_object_mut().unwrap().remove("group");
1749 param
.as_object_mut().unwrap().remove("output-format");
1750 param
.as_object_mut().unwrap().remove("quiet");
1752 param
["backup-type"] = group
.backup_type().into();
1753 param
["backup-id"] = group
.backup_id().into();
1755 let mut result
= client
.post(&path
, Some(param
)).await?
;
1757 record_repository(&repo
);
1759 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1760 let item
: PruneListItem
= serde_json
::from_value(record
.to_owned())?
;
1761 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
)?
;
1762 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
1765 let render_prune_action
= |v
: &Value
, _record
: &Value
| -> Result
<String
, Error
> {
1766 Ok(match v
.as_bool() {
1767 Some(true) => "keep",
1768 Some(false) => "remove",
1773 let options
= default_table_format_options()
1774 .sortby("backup-type", false)
1775 .sortby("backup-id", false)
1776 .sortby("backup-time", false)
1777 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
1778 .column(ColumnConfig
::new("backup-time").renderer(tools
::format
::render_epoch
).header("date"))
1779 .column(ColumnConfig
::new("keep").renderer(render_prune_action
).header("action"))
1782 let return_type
= &proxmox_backup
::api2
::admin
::datastore
::API_METHOD_PRUNE
.returns
;
1784 let mut data
= result
["data"].take();
1787 let list
: Vec
<Value
> = data
.as_array().unwrap().iter().filter(|item
| {
1788 item
["keep"].as_bool() == Some(false)
1789 }).cloned().collect();
1793 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1802 schema
: REPO_URL_SCHEMA
,
1806 schema
: OUTPUT_FORMAT
,
1812 type: StorageStatus
,
1815 /// Get repository status.
1816 async
fn status(param
: Value
) -> Result
<Value
, Error
> {
1818 let repo
= extract_repository_from_value(¶m
)?
;
1820 let output_format
= get_output_format(¶m
);
1822 let client
= connect(&repo
)?
;
1824 let path
= format
!("api2/json/admin/datastore/{}/status", repo
.store());
1826 let mut result
= client
.get(&path
, None
).await?
;
1827 let mut data
= result
["data"].take();
1829 record_repository(&repo
);
1831 let render_total_percentage
= |v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
1832 let v
= v
.as_u64().unwrap();
1833 let total
= record
["total"].as_u64().unwrap();
1834 let roundup
= total
/200;
1835 let per
= ((v
+roundup
)*100)/total
;
1836 let info
= format
!(" ({} %)", per
);
1837 Ok(format
!("{} {:>8}", v
, info
))
1840 let options
= default_table_format_options()
1842 .column(ColumnConfig
::new("total").renderer(render_total_percentage
))
1843 .column(ColumnConfig
::new("used").renderer(render_total_percentage
))
1844 .column(ColumnConfig
::new("avail").renderer(render_total_percentage
));
1846 let return_type
= &API_METHOD_STATUS
.returns
;
1848 format_and_print_result_full(&mut data
, return_type
, &output_format
, &options
);
1853 // like get, but simply ignore errors and return Null instead
1854 async
fn try_get(repo
: &BackupRepository
, url
: &str) -> Value
{
1856 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
1857 let password
= std
::env
::var(ENV_VAR_PBS_PASSWORD
).ok();
1859 // ticket cache, but no questions asked
1860 let options
= HttpClientOptions
::new_interactive(password
, fingerprint
)
1861 .interactive(false);
1863 let client
= match HttpClient
::new(repo
.host(), repo
.port(), repo
.auth_id(), options
) {
1865 _
=> return Value
::Null
,
1868 let mut resp
= match client
.get(url
, None
).await
{
1870 _
=> return Value
::Null
,
1873 if let Some(map
) = resp
.as_object_mut() {
1874 if let Some(data
) = map
.remove("data") {
1881 fn complete_backup_group(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1882 proxmox_backup
::tools
::runtime
::main(async { complete_backup_group_do(param).await }
)
1885 async
fn complete_backup_group_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1887 let mut result
= vec
![];
1889 let repo
= match extract_repository_from_map(param
) {
1894 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
1896 let data
= try_get(&repo
, &path
).await
;
1898 if let Some(list
) = data
.as_array() {
1900 if let (Some(backup_id
), Some(backup_type
)) =
1901 (item
["backup-id"].as_str(), item
["backup-type"].as_str())
1903 result
.push(format
!("{}/{}", backup_type
, backup_id
));
1911 pub fn complete_group_or_snapshot(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1912 proxmox_backup
::tools
::runtime
::main(async { complete_group_or_snapshot_do(arg, param).await }
)
1915 async
fn complete_group_or_snapshot_do(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1917 if arg
.matches('
/'
).count() < 2 {
1918 let groups
= complete_backup_group_do(param
).await
;
1919 let mut result
= vec
![];
1920 for group
in groups
{
1921 result
.push(group
.to_string());
1922 result
.push(format
!("{}/", group
));
1927 complete_backup_snapshot_do(param
).await
1930 fn complete_backup_snapshot(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1931 proxmox_backup
::tools
::runtime
::main(async { complete_backup_snapshot_do(param).await }
)
1934 async
fn complete_backup_snapshot_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1936 let mut result
= vec
![];
1938 let repo
= match extract_repository_from_map(param
) {
1943 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
1945 let data
= try_get(&repo
, &path
).await
;
1947 if let Some(list
) = data
.as_array() {
1949 if let (Some(backup_id
), Some(backup_type
), Some(backup_time
)) =
1950 (item
["backup-id"].as_str(), item
["backup-type"].as_str(), item
["backup-time"].as_i64())
1952 if let Ok(snapshot
) = BackupDir
::new(backup_type
, backup_id
, backup_time
) {
1953 result
.push(snapshot
.relative_path().to_str().unwrap().to_owned());
1962 fn complete_server_file_name(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1963 proxmox_backup
::tools
::runtime
::main(async { complete_server_file_name_do(param).await }
)
1966 async
fn complete_server_file_name_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
1968 let mut result
= vec
![];
1970 let repo
= match extract_repository_from_map(param
) {
1975 let snapshot
: BackupDir
= match param
.get("snapshot") {
1977 match path
.parse() {
1985 let query
= tools
::json_object_to_query(json
!({
1986 "backup-type": snapshot
.group().backup_type(),
1987 "backup-id": snapshot
.group().backup_id(),
1988 "backup-time": snapshot
.backup_time(),
1991 let path
= format
!("api2/json/admin/datastore/{}/files?{}", repo
.store(), query
);
1993 let data
= try_get(&repo
, &path
).await
;
1995 if let Some(list
) = data
.as_array() {
1997 if let Some(filename
) = item
["filename"].as_str() {
1998 result
.push(filename
.to_owned());
2006 fn complete_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2007 complete_server_file_name(arg
, param
)
2009 .map(|v
| tools
::format
::strip_server_file_extension(&v
))
2013 pub fn complete_pxar_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2014 complete_server_file_name(arg
, param
)
2016 .filter_map(|name
| {
2017 if name
.ends_with(".pxar.didx") {
2018 Some(tools
::format
::strip_server_file_extension(name
))
2026 pub fn complete_img_archive_name(arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2027 complete_server_file_name(arg
, param
)
2029 .filter_map(|name
| {
2030 if name
.ends_with(".img.fidx") {
2031 Some(tools
::format
::strip_server_file_extension(name
))
2039 fn complete_chunk_size(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2041 let mut result
= vec
![];
2045 result
.push(size
.to_string());
2047 if size
> 4096 { break; }
2053 fn complete_auth_id(_arg
: &str, param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2054 proxmox_backup
::tools
::runtime
::main(async { complete_auth_id_do(param).await }
)
2057 async
fn complete_auth_id_do(param
: &HashMap
<String
, String
>) -> Vec
<String
> {
2059 let mut result
= vec
![];
2061 let repo
= match extract_repository_from_map(param
) {
2066 let data
= try_get(&repo
, "api2/json/access/users?include_tokens=true").await
;
2068 if let Ok(parsed
) = serde_json
::from_value
::<Vec
<UserWithTokens
>>(data
) {
2069 for user
in parsed
{
2070 result
.push(user
.userid
.to_string());
2071 for token
in user
.tokens
{
2072 result
.push(token
.tokenid
.to_string());
2080 use proxmox_backup
::client
::RemoteChunkReader
;
2081 /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
2084 /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
2085 /// so that we can properly access it from multiple threads simultaneously while not issuing
2086 /// duplicate simultaneous reads over http.
2087 pub struct BufferedDynamicReadAt
{
2088 inner
: Mutex
<BufferedDynamicReader
<RemoteChunkReader
>>,
2091 impl BufferedDynamicReadAt
{
2092 fn new(inner
: BufferedDynamicReader
<RemoteChunkReader
>) -> Self {
2094 inner
: Mutex
::new(inner
),
2099 impl ReadAt
for BufferedDynamicReadAt
{
2100 fn start_read_at
<'a
>(
2101 self: Pin
<&'a
Self>,
2105 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
2106 MaybeReady
::Ready(tokio
::task
::block_in_place(move || {
2107 let mut reader
= self.inner
.lock().unwrap();
2108 reader
.seek(SeekFrom
::Start(offset
))?
;
2109 Ok(reader
.read(buf
)?
)
2113 fn poll_complete
<'a
>(
2114 self: Pin
<&'a
Self>,
2115 _op
: ReadAtOperation
<'a
>,
2116 ) -> MaybeReady
<io
::Result
<usize>, ReadAtOperation
<'a
>> {
2117 panic
!("LocalDynamicReadAt::start_read_at returned Pending");
2123 let backup_cmd_def
= CliCommand
::new(&API_METHOD_CREATE_BACKUP
)
2124 .arg_param(&["backupspec"])
2125 .completion_cb("repository", complete_repository
)
2126 .completion_cb("backupspec", complete_backup_source
)
2127 .completion_cb("keyfile", tools
::complete_file_name
)
2128 .completion_cb("master-pubkey-file", tools
::complete_file_name
)
2129 .completion_cb("chunk-size", complete_chunk_size
);
2131 let benchmark_cmd_def
= CliCommand
::new(&API_METHOD_BENCHMARK
)
2132 .completion_cb("repository", complete_repository
)
2133 .completion_cb("keyfile", tools
::complete_file_name
);
2135 let list_cmd_def
= CliCommand
::new(&API_METHOD_LIST_BACKUP_GROUPS
)
2136 .completion_cb("repository", complete_repository
);
2138 let garbage_collect_cmd_def
= CliCommand
::new(&API_METHOD_START_GARBAGE_COLLECTION
)
2139 .completion_cb("repository", complete_repository
);
2141 let restore_cmd_def
= CliCommand
::new(&API_METHOD_RESTORE
)
2142 .arg_param(&["snapshot", "archive-name", "target"])
2143 .completion_cb("repository", complete_repository
)
2144 .completion_cb("snapshot", complete_group_or_snapshot
)
2145 .completion_cb("archive-name", complete_archive_name
)
2146 .completion_cb("target", tools
::complete_file_name
);
2148 let prune_cmd_def
= CliCommand
::new(&API_METHOD_PRUNE
)
2149 .arg_param(&["group"])
2150 .completion_cb("group", complete_backup_group
)
2151 .completion_cb("repository", complete_repository
);
2153 let status_cmd_def
= CliCommand
::new(&API_METHOD_STATUS
)
2154 .completion_cb("repository", complete_repository
);
2156 let login_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGIN
)
2157 .completion_cb("repository", complete_repository
);
2159 let logout_cmd_def
= CliCommand
::new(&API_METHOD_API_LOGOUT
)
2160 .completion_cb("repository", complete_repository
);
2162 let version_cmd_def
= CliCommand
::new(&API_METHOD_API_VERSION
)
2163 .completion_cb("repository", complete_repository
);
2165 let change_owner_cmd_def
= CliCommand
::new(&API_METHOD_CHANGE_BACKUP_OWNER
)
2166 .arg_param(&["group", "new-owner"])
2167 .completion_cb("group", complete_backup_group
)
2168 .completion_cb("new-owner", complete_auth_id
)
2169 .completion_cb("repository", complete_repository
);
2171 let cmd_def
= CliCommandMap
::new()
2172 .insert("backup", backup_cmd_def
)
2173 .insert("garbage-collect", garbage_collect_cmd_def
)
2174 .insert("list", list_cmd_def
)
2175 .insert("login", login_cmd_def
)
2176 .insert("logout", logout_cmd_def
)
2177 .insert("prune", prune_cmd_def
)
2178 .insert("restore", restore_cmd_def
)
2179 .insert("snapshot", snapshot_mgtm_cli())
2180 .insert("status", status_cmd_def
)
2181 .insert("key", key
::cli())
2182 .insert("mount", mount_cmd_def())
2183 .insert("map", map_cmd_def())
2184 .insert("unmap", unmap_cmd_def())
2185 .insert("catalog", catalog_mgmt_cli())
2186 .insert("task", task_mgmt_cli())
2187 .insert("version", version_cmd_def
)
2188 .insert("benchmark", benchmark_cmd_def
)
2189 .insert("change-owner", change_owner_cmd_def
)
2191 .alias(&["files"], &["snapshot", "files"])
2192 .alias(&["forget"], &["snapshot", "forget"])
2193 .alias(&["upload-log"], &["snapshot", "upload-log"])
2194 .alias(&["snapshots"], &["snapshot", "list"])
2197 let rpcenv
= CliEnvironment
::new();
2198 run_cli_command(cmd_def
, rpcenv
, Some(|future
| {
2199 proxmox_backup
::tools
::runtime
::main(future
)