2 use nix
::unistd
::{fork, ForkResult, pipe}
;
3 use std
::os
::unix
::io
::RawFd
;
4 use chrono
::{Local, DateTime, Utc, TimeZone}
;
5 use std
::path
::{Path, PathBuf}
;
6 use std
::collections
::{HashSet, HashMap}
;
8 use std
::io
::{Write, Seek, SeekFrom}
;
9 use std
::os
::unix
::fs
::OpenOptionsExt
;
11 use proxmox
::{sortable, identity}
;
12 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
;
13 use proxmox
::sys
::linux
::tty
;
14 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
15 use proxmox
::api
::schema
::*;
16 use proxmox
::api
::cli
::*;
17 use proxmox
::api
::api
;
19 use proxmox_backup
::tools
;
20 use proxmox_backup
::api2
::types
::*;
21 use proxmox_backup
::client
::*;
22 use proxmox_backup
::backup
::*;
23 use proxmox_backup
::pxar
::{ self, catalog::* }
;
25 //use proxmox_backup::backup::image_index::*;
26 //use proxmox_backup::config::datastore;
27 //use proxmox_backup::pxar::encoder::*;
28 //use proxmox_backup::backup::datastore::*;
30 use serde_json
::{json, Value}
;
32 use std
::sync
::{Arc, Mutex}
;
34 use xdg
::BaseDirectories
;
37 use tokio
::sync
::mpsc
;
39 const ENV_VAR_PBS_FINGERPRINT
: &str = "PBS_FINGERPRINT";
40 const ENV_VAR_PBS_PASSWORD
: &str = "PBS_PASSWORD";
42 proxmox
::const_regex
! {
43 BACKUPSPEC_REGEX
= r
"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
46 const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
47 .format(&BACKUP_REPO_URL
)
51 const BACKUP_SOURCE_SCHEMA
: Schema
= StringSchema
::new(
52 "Backup source specification ([<label>:<path>]).")
53 .format(&ApiStringFormat
::Pattern(&BACKUPSPEC_REGEX
))
56 const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
57 "Path to encryption key. All data will be encrypted using this key.")
60 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
61 "Chunk size in KB. Must be a power of 2.")
67 fn get_default_repository() -> Option
<String
> {
68 std
::env
::var("PBS_REPOSITORY").ok()
71 fn extract_repository_from_value(
73 ) -> Result
<BackupRepository
, Error
> {
75 let repo_url
= param
["repository"]
78 .or_else(get_default_repository
)
79 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
81 let repo
: BackupRepository
= repo_url
.parse()?
;
86 fn extract_repository_from_map(
87 param
: &HashMap
<String
, String
>,
88 ) -> Option
<BackupRepository
> {
90 param
.get("repository")
92 .or_else(get_default_repository
)
93 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
96 fn record_repository(repo
: &BackupRepository
) {
98 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
103 // usually $HOME/.cache/proxmox-backup/repo-list
104 let path
= match base
.place_cache_file("repo-list") {
109 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
111 let repo
= repo
.to_string();
113 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
115 let mut map
= serde_json
::map
::Map
::new();
118 let mut max_used
= 0;
119 let mut max_repo
= None
;
120 for (repo
, count
) in data
.as_object().unwrap() {
121 if map
.contains_key(repo
) { continue; }
122 if let Some(count
) = count
.as_i64() {
123 if count
> max_used
{
125 max_repo
= Some(repo
);
129 if let Some(repo
) = max_repo
{
130 map
.insert(repo
.to_owned(), json
!(max_used
));
134 if map
.len() > 10 { // store max. 10 repos
139 let new_data
= json
!(map
);
141 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
144 fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
146 let mut result
= vec
![];
148 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
153 // usually $HOME/.cache/proxmox-backup/repo-list
154 let path
= match base
.place_cache_file("repo-list") {
159 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
161 if let Some(map
) = data
.as_object() {
162 for (repo
, _count
) in map
{
163 result
.push(repo
.to_owned());
170 fn connect(server
: &str, userid
: &str) -> Result
<HttpClient
, Error
> {
172 let fingerprint
= std
::env
::var(ENV_VAR_PBS_FINGERPRINT
).ok();
174 use std
::env
::VarError
::*;
175 let password
= match std
::env
::var(ENV_VAR_PBS_PASSWORD
) {
177 Err(NotUnicode(_
)) => bail
!(format
!("{} contains bad characters", ENV_VAR_PBS_PASSWORD
)),
178 Err(NotPresent
) => None
,
181 let options
= HttpClientOptions
::new()
182 .prefix(Some("proxmox-backup".to_string()))
185 .fingerprint(fingerprint
)
186 .fingerprint_cache(true)
189 HttpClient
::new(server
, userid
, options
)
192 async
fn view_task_result(
196 ) -> Result
<(), Error
> {
197 let data
= &result
["data"];
198 if output_format
== "text" {
199 if let Some(upid
) = data
.as_str() {
200 display_task_log(client
, upid
, true).await?
;
203 format_and_print_result(&data
, &output_format
);
209 async
fn api_datastore_list_snapshots(
212 group
: Option
<BackupGroup
>,
213 ) -> Result
<Value
, Error
> {
215 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
217 let mut args
= json
!({}
);
218 if let Some(group
) = group
{
219 args
["backup-type"] = group
.backup_type().into();
220 args
["backup-id"] = group
.backup_id().into();
223 let mut result
= client
.get(&path
, Some(args
)).await?
;
225 Ok(result
["data"].take())
228 async
fn api_datastore_latest_snapshot(
232 ) -> Result
<(String
, String
, DateTime
<Utc
>), Error
> {
234 let list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
235 let mut list
: Vec
<SnapshotListItem
> = serde_json
::from_value(list
)?
;
238 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
241 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
243 let backup_time
= Utc
.timestamp(list
[0].backup_time
, 0);
245 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
249 async
fn backup_directory
<P
: AsRef
<Path
>>(
250 client
: &BackupWriter
,
253 chunk_size
: Option
<usize>,
254 device_set
: Option
<HashSet
<u64>>,
256 skip_lost_and_found
: bool
,
257 crypt_config
: Option
<Arc
<CryptConfig
>>,
258 catalog
: Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
259 exclude_pattern
: Vec
<pxar
::MatchPattern
>,
261 ) -> Result
<BackupStats
, Error
> {
263 let pxar_stream
= PxarBackupStream
::open(
272 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
274 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
277 .map_err(Error
::from
);
279 // spawn chunker inside a separate task so that it can run parallel
280 tokio
::spawn(async
move {
281 while let Some(v
) = chunk_stream
.next().await
{
282 let _
= tx
.send(v
).await
;
287 .upload_stream(archive_name
, stream
, "dynamic", None
, crypt_config
)
293 async
fn backup_image
<P
: AsRef
<Path
>>(
294 client
: &BackupWriter
,
298 chunk_size
: Option
<usize>,
300 crypt_config
: Option
<Arc
<CryptConfig
>>,
301 ) -> Result
<BackupStats
, Error
> {
303 let path
= image_path
.as_ref().to_owned();
305 let file
= tokio
::fs
::File
::open(path
).await?
;
307 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
308 .map_err(Error
::from
);
310 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
313 .upload_stream(archive_name
, stream
, "fixed", Some(image_size
), crypt_config
)
323 schema
: REPO_URL_SCHEMA
,
327 schema
: OUTPUT_FORMAT
,
333 /// List backup groups.
334 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
336 let output_format
= get_output_format(¶m
);
338 let repo
= extract_repository_from_value(¶m
)?
;
340 let client
= connect(repo
.host(), repo
.user())?
;
342 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
344 let mut result
= client
.get(&path
, None
).await?
;
346 record_repository(&repo
);
348 let render_group_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
349 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
350 let group
= BackupGroup
::new(item
.backup_type
, item
.backup_id
);
351 Ok(group
.group_path().to_str().unwrap().to_owned())
354 let render_backup_timestamp
= |v
: &Value
, _record
: &Value
| -> Result
<String
, Error
> {
355 let epoch
= v
.as_i64().unwrap();
356 let last_backup
= Utc
.timestamp(epoch
, 0);
357 Ok(BackupDir
::backup_time_to_string(last_backup
))
360 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
361 let item
: GroupListItem
= serde_json
::from_value(record
.to_owned())?
;
362 Ok(tools
::format
::render_backup_file_list(&item
.files
))
365 let options
= default_table_format_options()
366 .sortby("backup-type", false)
367 .sortby("backup-id", false)
368 .column(ColumnConfig
::new("backup-id").renderer(render_group_path
).header("group"))
369 .column(ColumnConfig
::new("last-backup").renderer(render_backup_timestamp
))
370 .column(ColumnConfig
::new("backup-count"))
371 .column(ColumnConfig
::new("files").renderer(render_files
));
373 let mut data
: Value
= result
["data"].take();
375 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_GROUPS
;
377 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
386 schema
: REPO_URL_SCHEMA
,
391 description
: "Backup group.",
395 schema
: OUTPUT_FORMAT
,
401 /// List backup snapshots.
402 async
fn list_snapshots(param
: Value
) -> Result
<Value
, Error
> {
404 let repo
= extract_repository_from_value(¶m
)?
;
406 let output_format
= get_output_format(¶m
);
408 let client
= connect(repo
.host(), repo
.user())?
;
410 let group
= if let Some(path
) = param
["group"].as_str() {
411 Some(BackupGroup
::parse(path
)?
)
416 let mut data
= api_datastore_list_snapshots(&client
, repo
.store(), group
).await?
;
418 record_repository(&repo
);
420 let render_snapshot_path
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
421 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
422 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
);
423 Ok(snapshot
.relative_path().to_str().unwrap().to_owned())
426 let render_files
= |_v
: &Value
, record
: &Value
| -> Result
<String
, Error
> {
427 let item
: SnapshotListItem
= serde_json
::from_value(record
.to_owned())?
;
428 Ok(tools
::format
::render_backup_file_list(&item
.files
))
431 let options
= default_table_format_options()
432 .sortby("backup-type", false)
433 .sortby("backup-id", false)
434 .sortby("backup-time", false)
435 .column(ColumnConfig
::new("backup-id").renderer(render_snapshot_path
).header("snapshot"))
436 .column(ColumnConfig
::new("size"))
437 .column(ColumnConfig
::new("files").renderer(render_files
))
440 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOTS
;
442 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
451 schema
: REPO_URL_SCHEMA
,
456 description
: "Snapshot path.",
461 /// Forget (remove) backup snapshots.
462 async
fn forget_snapshots(param
: Value
) -> Result
<Value
, Error
> {
464 let repo
= extract_repository_from_value(¶m
)?
;
466 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
467 let snapshot
= BackupDir
::parse(path
)?
;
469 let mut client
= connect(repo
.host(), repo
.user())?
;
471 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
473 let result
= client
.delete(&path
, Some(json
!({
474 "backup-type": snapshot
.group().backup_type(),
475 "backup-id": snapshot
.group().backup_id(),
476 "backup-time": snapshot
.backup_time().timestamp(),
479 record_repository(&repo
);
488 schema
: REPO_URL_SCHEMA
,
494 /// Try to login. If successful, store ticket.
495 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
497 let repo
= extract_repository_from_value(¶m
)?
;
499 let client
= connect(repo
.host(), repo
.user())?
;
500 client
.login().await?
;
502 record_repository(&repo
);
511 schema
: REPO_URL_SCHEMA
,
517 /// Logout (delete stored ticket).
518 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
520 let repo
= extract_repository_from_value(¶m
)?
;
522 delete_ticket_info("proxmox-backup", repo
.host(), repo
.user())?
;
531 schema
: REPO_URL_SCHEMA
,
536 description
: "Snapshot path.",
542 async
fn dump_catalog(param
: Value
) -> Result
<Value
, Error
> {
544 let repo
= extract_repository_from_value(¶m
)?
;
546 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
547 let snapshot
= BackupDir
::parse(path
)?
;
549 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
551 let crypt_config
= match keyfile
{
554 let (key
, _
) = load_and_decrypt_key(&path
, &get_encryption_key_password
)?
;
555 Some(Arc
::new(CryptConfig
::new(key
)?
))
559 let client
= connect(repo
.host(), repo
.user())?
;
561 let client
= BackupReader
::start(
563 crypt_config
.clone(),
565 &snapshot
.group().backup_type(),
566 &snapshot
.group().backup_id(),
567 snapshot
.backup_time(),
571 let manifest
= client
.download_manifest().await?
;
573 let index
= client
.download_dynamic_index(&manifest
, CATALOG_NAME
).await?
;
575 let most_used
= index
.find_most_used_chunks(8);
577 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, most_used
);
579 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
581 let mut catalogfile
= std
::fs
::OpenOptions
::new()
584 .custom_flags(libc
::O_TMPFILE
)
587 std
::io
::copy(&mut reader
, &mut catalogfile
)
588 .map_err(|err
| format_err
!("unable to download catalog - {}", err
))?
;
590 catalogfile
.seek(SeekFrom
::Start(0))?
;
592 let mut catalog_reader
= CatalogReader
::new(catalogfile
);
594 catalog_reader
.dump()?
;
596 record_repository(&repo
);
605 schema
: REPO_URL_SCHEMA
,
610 description
: "Snapshot path.",
613 schema
: OUTPUT_FORMAT
,
619 /// List snapshot files.
620 async
fn list_snapshot_files(param
: Value
) -> Result
<Value
, Error
> {
622 let repo
= extract_repository_from_value(¶m
)?
;
624 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
625 let snapshot
= BackupDir
::parse(path
)?
;
627 let output_format
= get_output_format(¶m
);
629 let client
= connect(repo
.host(), repo
.user())?
;
631 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
633 let mut result
= client
.get(&path
, Some(json
!({
634 "backup-type": snapshot
.group().backup_type(),
635 "backup-id": snapshot
.group().backup_id(),
636 "backup-time": snapshot
.backup_time().timestamp(),
639 record_repository(&repo
);
641 let info
= &proxmox_backup
::api2
::admin
::datastore
::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES
;
643 let mut data
: Value
= result
["data"].take();
645 let options
= default_table_format_options();
647 format_and_print_result_full(&mut data
, info
, &output_format
, &options
);
656 schema
: REPO_URL_SCHEMA
,
660 schema
: OUTPUT_FORMAT
,
666 /// Start garbage collection for a specific repository.
667 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
669 let repo
= extract_repository_from_value(¶m
)?
;
671 let output_format
= get_output_format(¶m
);
673 let mut client
= connect(repo
.host(), repo
.user())?
;
675 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
677 let result
= client
.post(&path
, None
).await?
;
679 record_repository(&repo
);
681 view_task_result(client
, result
, &output_format
).await?
;
686 fn parse_backupspec(value
: &str) -> Result
<(&str, &str), Error
> {
688 if let Some(caps
) = (BACKUPSPEC_REGEX
.regex_obj
)().captures(value
) {
689 return Ok((caps
.get(1).unwrap().as_str(), caps
.get(2).unwrap().as_str()));
691 bail
!("unable to parse directory specification '{}'", value
);
694 fn spawn_catalog_upload(
695 client
: Arc
<BackupWriter
>,
696 crypt_config
: Option
<Arc
<CryptConfig
>>,
699 Arc
<Mutex
<CatalogWriter
<crate::tools
::StdChannelWriter
>>>,
700 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
703 let (catalog_tx
, catalog_rx
) = std
::sync
::mpsc
::sync_channel(10); // allow to buffer 10 writes
704 let catalog_stream
= crate::tools
::StdChannelStream(catalog_rx
);
705 let catalog_chunk_size
= 512*1024;
706 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
708 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(crate::tools
::StdChannelWriter
::new(catalog_tx
))?
));
710 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
712 tokio
::spawn(async
move {
713 let catalog_upload_result
= client
714 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, crypt_config
)
717 if let Err(ref err
) = catalog_upload_result
{
718 eprintln
!("catalog upload error - {}", err
);
722 let _
= catalog_result_tx
.send(catalog_upload_result
);
725 Ok((catalog
, catalog_result_rx
))
733 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
735 schema
: BACKUP_SOURCE_SCHEMA
,
739 schema
: REPO_URL_SCHEMA
,
743 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
747 description
: "Path to file.",
751 schema
: KEYFILE_SCHEMA
,
754 "skip-lost-and-found": {
756 description
: "Skip lost+found directory.",
760 schema
: BACKUP_TYPE_SCHEMA
,
764 schema
: BACKUP_ID_SCHEMA
,
768 schema
: BACKUP_TIME_SCHEMA
,
772 schema
: CHUNK_SIZE_SCHEMA
,
777 description
: "List of paths or patterns for matching files to exclude.",
781 description
: "Path or match pattern.",
786 description
: "Max number of entries to hold in memory.",
788 default: pxar
::ENCODER_MAX_ENTRIES
as isize,
792 description
: "Verbose output.",
798 /// Create (host) backup.
799 async
fn create_backup(
802 _rpcenv
: &mut dyn RpcEnvironment
,
803 ) -> Result
<Value
, Error
> {
805 let repo
= extract_repository_from_value(¶m
)?
;
807 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
809 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
811 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
813 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
815 let backup_time_opt
= param
["backup-time"].as_i64();
817 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
819 if let Some(size
) = chunk_size_opt
{
820 verify_chunk_size(size
)?
;
823 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
825 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
827 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
829 let include_dev
= param
["include-dev"].as_array();
831 let entries_max
= param
["entries-max"].as_u64().unwrap_or(pxar
::ENCODER_MAX_ENTRIES
as u64);
833 let empty
= Vec
::new();
834 let arg_pattern
= param
["exclude"].as_array().unwrap_or(&empty
);
836 let mut pattern_list
= Vec
::with_capacity(arg_pattern
.len());
837 for s
in arg_pattern
{
838 let l
= s
.as_str().ok_or_else(|| format_err
!("Invalid pattern string slice"))?
;
839 let p
= pxar
::MatchPattern
::from_line(l
.as_bytes())?
840 .ok_or_else(|| format_err
!("Invalid match pattern in arguments"))?
;
841 pattern_list
.push(p
);
844 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
846 if let Some(include_dev
) = include_dev
{
847 if all_file_systems
{
848 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
851 let mut set
= HashSet
::new();
852 for path
in include_dev
{
853 let path
= path
.as_str().unwrap();
854 let stat
= nix
::sys
::stat
::stat(path
)
855 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
856 set
.insert(stat
.st_dev
);
861 let mut upload_list
= vec
![];
863 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE }
;
865 let mut upload_catalog
= false;
867 for backupspec
in backupspec_list
{
868 let (target
, filename
) = parse_backupspec(backupspec
.as_str().unwrap())?
;
870 use std
::os
::unix
::fs
::FileTypeExt
;
872 let metadata
= std
::fs
::metadata(filename
)
873 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
874 let file_type
= metadata
.file_type();
876 let extension
= target
.rsplit('
.'
).next()
877 .ok_or_else(|| format_err
!("missing target file extenion '{}'", target
))?
;
881 if !file_type
.is_dir() {
882 bail
!("got unexpected file type (expected directory)");
884 upload_list
.push((BackupType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
885 upload_catalog
= true;
889 if !(file_type
.is_file() || file_type
.is_block_device()) {
890 bail
!("got unexpected file type (expected file or block device)");
893 let size
= image_size(&PathBuf
::from(filename
))?
;
895 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
897 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
900 if !file_type.is_file() {
901 bail!("got unexpected file
type (expected regular file
)");
903 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
906 if !file_type.is_file() {
907 bail!("got unexpected file
type (expected regular file
)");
909 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
912 bail!("got unknown archive extension '{}'
", extension);
917 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
919 let client = connect(repo.host(), repo.user())?;
920 record_repository(&repo);
922 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
924 println!("Client name
: {}
", proxmox::tools::nodename());
926 let start_time = Local::now();
928 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
930 let (crypt_config, rsa_encrypted_key) = match keyfile {
931 None => (None, None),
933 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
935 let crypt_config = CryptConfig::new(key)?;
937 let path = master_pubkey_path()?;
939 let pem_data = file_get_contents(&path)?;
940 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
941 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
942 (Some(Arc::new(crypt_config)), Some(enc_key))
944 (Some(Arc::new(crypt_config)), None)
949 let client = BackupWriter::start(
958 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
959 let mut manifest = BackupManifest::new(snapshot);
961 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
963 for (backup_type, filename, target, size) in upload_list {
965 BackupType::CONFIG => {
966 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
968 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
970 manifest.add_file(target, stats.size, stats.csum)?;
972 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
973 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
975 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
977 manifest.add_file(target, stats.size, stats.csum)?;
979 BackupType::PXAR => {
980 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
981 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
982 let stats = backup_directory(
990 crypt_config.clone(),
992 pattern_list.clone(),
993 entries_max as usize,
995 manifest.add_file(target, stats.size, stats.csum)?;
996 catalog.lock().unwrap().end_directory()?;
998 BackupType::IMAGE => {
999 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
1000 let stats = backup_image(
1007 crypt_config.clone(),
1009 manifest.add_file(target, stats.size, stats.csum)?;
1014 // finalize and upload catalog
1016 let mutex = Arc::try_unwrap(catalog)
1017 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1018 let mut catalog = mutex.into_inner().unwrap();
1022 drop(catalog); // close upload stream
1024 let stats = catalog_result_rx.await??;
1026 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
1029 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1030 let target = "rsa
-encrypted
.key
";
1031 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1033 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
1035 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum)?;
1037 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1039 let mut buffer2 = vec![0u8; rsa.size() as usize];
1040 let pem_data = file_get_contents("master
-private
.pem
")?;
1041 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1042 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1043 println!("TEST {} {:?}
", len, buffer2);
1047 // create manifest (index.json)
1048 let manifest = manifest.into_json();
1050 println!("Upload index
.json to '{:?}'
", repo);
1051 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1053 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
1056 client.finish().await?;
1058 let end_time = Local::now();
1059 let elapsed = end_time.signed_duration_since(start_time);
1060 println!("Duration
: {}
", elapsed);
1062 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1067 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1069 let mut result = vec![];
1071 let data: Vec<&str> = arg.splitn(2, ':').collect();
1073 if data.len() != 2 {
1074 result.push(String::from("root
.pxar
:/"));
1075 result.push(String::from("etc
.pxar
:/etc
"));
1079 let files = tools::complete_file_name(data[1], param);
1082 result.push(format!("{}
:{}
", data[0], file));
1088 fn dump_image<W: Write>(
1089 client: Arc<BackupReader>,
1090 crypt_config: Option<Arc<CryptConfig>>,
1091 index: FixedIndexReader,
1094 ) -> Result<(), Error> {
1096 let most_used = index.find_most_used_chunks(8);
1098 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1100 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1101 // and thus slows down reading. Instead, directly use RemoteChunkReader
1104 let start_time = std::time::Instant::now();
1106 for pos in 0..index.index_count() {
1107 let digest = index.index_digest(pos).unwrap();
1108 let raw_data = chunk_reader.read_chunk(&digest)?;
1109 writer.write_all(&raw_data)?;
1110 bytes += raw_data.len();
1112 let next_per = ((pos+1)*100)/index.index_count();
1113 if per != next_per {
1114 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
1115 next_per, bytes, start_time.elapsed().as_secs());
1121 let end_time = std::time::Instant::now();
1122 let elapsed = end_time.duration_since(start_time);
1123 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
1125 elapsed.as_secs_f64(),
1126 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1137 schema: REPO_URL_SCHEMA,
1142 description: "Group
/Snapshot path
.",
1145 description: "Backup archive name
.",
1150 description: r###"Target directory path
. Use '
-' to write to standard output
.
1152 We
do not extraxt '
.pxar' archives when writing to standard output
.
1156 "allow
-existing
-dirs
": {
1158 description: "Do not fail
if directories already exists
.",
1162 schema: KEYFILE_SCHEMA,
1168 /// Restore backup repository.
1169 async fn restore(param: Value) -> Result<Value, Error> {
1170 let repo = extract_repository_from_value(¶m)?;
1172 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1174 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
1176 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1178 let client = connect(repo.host(), repo.user())?;
1180 record_repository(&repo);
1182 let path = tools::required_string_param(¶m, "snapshot
")?;
1184 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1185 let group = BackupGroup::parse(path)?;
1186 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1188 let snapshot = BackupDir::parse(path)?;
1189 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1192 let target = tools::required_string_param(¶m, "target
")?;
1193 let target = if target == "-" { None } else { Some(target) };
1195 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1197 let crypt_config = match keyfile {
1200 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1201 Some(Arc::new(CryptConfig::new(key)?))
1205 let server_archive_name = if archive_name.ends_with(".pxar
") {
1206 format!("{}
.didx
", archive_name)
1207 } else if archive_name.ends_with(".img
") {
1208 format!("{}
.fidx
", archive_name)
1210 format!("{}
.blob
", archive_name)
1213 let client = BackupReader::start(
1215 crypt_config.clone(),
1223 let manifest = client.download_manifest().await?;
1225 if server_archive_name == MANIFEST_BLOB_NAME {
1226 let backup_index_data = manifest.into_json().to_string();
1227 if let Some(target) = target {
1228 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
1230 let stdout = std::io::stdout();
1231 let mut writer = stdout.lock();
1232 writer.write_all(backup_index_data.as_bytes())
1233 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1236 } else if server_archive_name.ends_with(".blob
") {
1238 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
1240 if let Some(target) = target {
1241 let mut writer = std::fs::OpenOptions::new()
1246 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1247 std::io::copy(&mut reader, &mut writer)?;
1249 let stdout = std::io::stdout();
1250 let mut writer = stdout.lock();
1251 std::io::copy(&mut reader, &mut writer)
1252 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1255 } else if server_archive_name.ends_with(".didx
") {
1257 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1259 let most_used = index.find_most_used_chunks(8);
1261 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1263 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1265 if let Some(target) = target {
1267 let feature_flags = pxar::flags::DEFAULT;
1268 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1269 decoder.set_callback(move |path| {
1271 eprintln!("{:?}
", path);
1275 decoder.set_allow_existing_dirs(allow_existing_dirs);
1277 decoder.restore(Path::new(target), &Vec::new())?;
1279 let mut writer = std::fs::OpenOptions::new()
1281 .open("/dev
/stdout
")
1282 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1284 std::io::copy(&mut reader, &mut writer)
1285 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1287 } else if server_archive_name.ends_with(".fidx
") {
1289 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
1291 let mut writer = if let Some(target) = target {
1292 std::fs::OpenOptions::new()
1297 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1299 std::fs::OpenOptions::new()
1301 .open("/dev
/stdout
")
1302 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1305 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
1308 bail!("unknown archive file
extension (expected
.pxar of
.img
)");
1318 schema: REPO_URL_SCHEMA,
1323 description: "Group
/Snapshot path
.",
1327 description: "The path to the log file you want to upload
.",
1330 schema: KEYFILE_SCHEMA,
1336 /// Upload backup log file.
1337 async fn upload_log(param: Value) -> Result<Value, Error> {
1339 let logfile = tools::required_string_param(¶m, "logfile
")?;
1340 let repo = extract_repository_from_value(¶m)?;
1342 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1343 let snapshot = BackupDir::parse(snapshot)?;
1345 let mut client = connect(repo.host(), repo.user())?;
1347 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1349 let crypt_config = match keyfile {
1352 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1353 let crypt_config = CryptConfig::new(key)?;
1354 Some(Arc::new(crypt_config))
1358 let data = file_get_contents(logfile)?;
1360 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1362 let raw_data = blob.into_inner();
1364 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1367 "backup
-type": snapshot.group().backup_type(),
1368 "backup
-id
": snapshot.group().backup_id(),
1369 "backup
-time
": snapshot.backup_time().timestamp(),
1372 let body = hyper::Body::from(raw_data);
1374 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1377 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1378 &ApiHandler::Async(&prune),
1380 "Prune a backup repository
.",
1381 &proxmox_backup::add_common_prune_prameters!([
1382 ("dry
-run
", true, &BooleanSchema::new(
1383 "Just show what prune would
do, but
do not delete anything
.")
1385 ("group
", false, &StringSchema::new("Backup group
.").schema()),
1387 ("output
-format
", true, &OUTPUT_FORMAT),
1388 ("repository
", true, &REPO_URL_SCHEMA),
1396 _rpcenv: &'a mut dyn RpcEnvironment,
1397 ) -> proxmox::api::ApiFuture<'a> {
1399 prune_async(param).await
1403 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1404 let repo = extract_repository_from_value(¶m)?;
1406 let mut client = connect(repo.host(), repo.user())?;
1408 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1410 let group = tools::required_string_param(¶m, "group
")?;
1411 let group = BackupGroup::parse(group)?;
1413 let output_format = get_output_format(¶m);
1415 param.as_object_mut().unwrap().remove("repository
");
1416 param.as_object_mut().unwrap().remove("group
");
1417 param.as_object_mut().unwrap().remove("output
-format
");
1419 param["backup
-type"] = group.backup_type().into();
1420 param["backup
-id
"] = group.backup_id().into();
1422 let result = client.post(&path, Some(param)).await?;
1424 record_repository(&repo);
1426 view_task_result(client, result, &output_format).await?;
1435 schema: REPO_URL_SCHEMA,
1439 schema: OUTPUT_FORMAT,
1445 /// Get repository status.
1446 async fn status(param: Value) -> Result<Value, Error> {
1448 let repo = extract_repository_from_value(¶m)?;
1450 let output_format = get_output_format(¶m);
1452 let client = connect(repo.host(), repo.user())?;
1454 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1456 let mut result = client.get(&path, None).await?;
1457 let mut data = result["data
"].take();
1459 record_repository(&repo);
1461 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1462 let v = v.as_u64().unwrap();
1463 let total = record["total
"].as_u64().unwrap();
1464 let roundup = total/200;
1465 let per = ((v+roundup)*100)/total;
1466 let info = format!(" ({}
%)", per);
1467 Ok(format!("{} {:>8}
", v, info))
1470 let options = default_table_format_options()
1472 .column(ColumnConfig::new("total
").renderer(render_total_percentage))
1473 .column(ColumnConfig::new("used
").renderer(render_total_percentage))
1474 .column(ColumnConfig::new("avail
").renderer(render_total_percentage));
1476 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
1478 format_and_print_result_full(&mut data, schema, &output_format, &options);
1483 // like get, but simply ignore errors and return Null instead
1484 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1486 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
1487 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
1489 let options = HttpClientOptions::new()
1490 .prefix(Some("proxmox
-backup
".to_string()))
1493 .fingerprint(fingerprint)
1494 .fingerprint_cache(true)
1495 .ticket_cache(true);
1497 let client = match HttpClient::new(repo.host(), repo.user(), options) {
1499 _ => return Value::Null,
1502 let mut resp = match client.get(url, None).await {
1504 _ => return Value::Null,
1507 if let Some(map) = resp.as_object_mut() {
1508 if let Some(data) = map.remove("data
") {
1515 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1516 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1519 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1521 let mut result = vec![];
1523 let repo = match extract_repository_from_map(param) {
1528 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1530 let data = try_get(&repo, &path).await;
1532 if let Some(list) = data.as_array() {
1534 if let (Some(backup_id), Some(backup_type)) =
1535 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1537 result.push(format!("{}
/{}
", backup_type, backup_id));
1545 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1546 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1549 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1551 if arg.matches('/').count() < 2 {
1552 let groups = complete_backup_group_do(param).await;
1553 let mut result = vec![];
1554 for group in groups {
1555 result.push(group.to_string());
1556 result.push(format!("{}
/", group));
1561 complete_backup_snapshot_do(param).await
1564 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1565 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1568 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1570 let mut result = vec![];
1572 let repo = match extract_repository_from_map(param) {
1577 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1579 let data = try_get(&repo, &path).await;
1581 if let Some(list) = data.as_array() {
1583 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1584 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1586 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1587 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1595 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1596 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1599 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1601 let mut result = vec![];
1603 let repo = match extract_repository_from_map(param) {
1608 let snapshot = match param.get("snapshot
") {
1610 match BackupDir::parse(path) {
1618 let query = tools::json_object_to_query(json!({
1619 "backup
-type": snapshot.group().backup_type(),
1620 "backup
-id
": snapshot.group().backup_id(),
1621 "backup
-time
": snapshot.backup_time().timestamp(),
1624 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1626 let data = try_get(&repo, &path).await;
1628 if let Some(list) = data.as_array() {
1630 if let Some(filename) = item["filename
"].as_str() {
1631 result.push(filename.to_owned());
1639 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1640 complete_server_file_name(arg, param)
1642 .map(|v| tools::format::strip_server_file_expenstion(&v))
1646 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1647 complete_server_file_name(arg, param)
1650 let name = tools::format::strip_server_file_expenstion(&v);
1651 if name.ends_with(".pxar
") {
1660 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1662 let mut result = vec![];
1666 result.push(size.to_string());
1668 if size > 4096 { break; }
1674 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1676 // fixme: implement other input methods
1678 use std::env::VarError::*;
1679 match std::env::var("PBS_ENCRYPTION_PASSWORD
") {
1680 Ok(p) => return Ok(p.as_bytes().to_vec()),
1681 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters
"),
1682 Err(NotPresent) => {
1683 // Try another method
1687 // If we're on a TTY, query the user for a password
1688 if tty::stdin_isatty() {
1689 return Ok(tty::read_password("Encryption Key Password
: ")?);
1692 bail!("no password input mechanism available
");
1698 _rpcenv: &mut dyn RpcEnvironment,
1699 ) -> Result<Value, Error> {
1701 let path = tools::required_string_param(¶m, "path
")?;
1702 let path = PathBuf::from(path);
1704 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1706 let key = proxmox::sys::linux::random_data(32)?;
1708 if kdf == "scrypt
" {
1709 // always read passphrase from tty
1710 if !tty::stdin_isatty() {
1711 bail!("unable to read passphrase
- no tty
");
1714 let password = tty::read_and_verify_password("Encryption Key Password
: ")?;
1716 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1718 store_key_config(&path, false, key_config)?;
1721 } else if kdf == "none
" {
1722 let created = Local.timestamp(Local::now().timestamp(), 0);
1724 store_key_config(&path, false, KeyConfig {
1737 fn master_pubkey_path() -> Result<PathBuf, Error> {
1738 let base = BaseDirectories::with_prefix("proxmox
-backup
")?;
1740 // usually $HOME/.config/proxmox-backup/master-public.pem
1741 let path = base.place_config_file("master
-public
.pem
")?;
1746 fn key_import_master_pubkey(
1749 _rpcenv: &mut dyn RpcEnvironment,
1750 ) -> Result<Value, Error> {
1752 let path = tools::required_string_param(¶m, "path
")?;
1753 let path = PathBuf::from(path);
1755 let pem_data = file_get_contents(&path)?;
1757 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1758 bail!("Unable to decode PEM data
- {}
", err);
1761 let target_path = master_pubkey_path()?;
1763 replace_file(&target_path, &pem_data, CreateOptions::new())?;
1765 println!("Imported public master key to {:?}
", target_path);
1770 fn key_create_master_key(
1773 _rpcenv: &mut dyn RpcEnvironment,
1774 ) -> Result<Value, Error> {
1776 // we need a TTY to query the new password
1777 if !tty::stdin_isatty() {
1778 bail!("unable to create master key
- no tty
");
1781 let rsa = openssl::rsa::Rsa::generate(4096)?;
1782 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1785 let password = String::from_utf8(tty::read_and_verify_password("Master Key Password
: ")?)?;
1787 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1788 let filename_pub = "master
-public
.pem
";
1789 println!("Writing public master key to {}
", filename_pub);
1790 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
1792 let cipher = openssl::symm::Cipher::aes_256_cbc();
1793 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
1795 let filename_priv = "master
-private
.pem
";
1796 println!("Writing private master key to {}
", filename_priv);
1797 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
1802 fn key_change_passphrase(
1805 _rpcenv: &mut dyn RpcEnvironment,
1806 ) -> Result<Value, Error> {
1808 let path = tools::required_string_param(¶m, "path
")?;
1809 let path = PathBuf::from(path);
1811 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1813 // we need a TTY to query the new password
1814 if !tty::stdin_isatty() {
1815 bail!("unable to change passphrase
- no tty
");
1818 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1820 if kdf == "scrypt
" {
1822 let password = tty::read_and_verify_password("New Password
: ")?;
1824 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
1825 new_key_config.created = created; // keep original value
1827 store_key_config(&path, true, new_key_config)?;
1830 } else if kdf == "none
" {
1831 let modified = Local.timestamp(Local::now().timestamp(), 0);
1833 store_key_config(&path, true, KeyConfig {
1835 created, // keep original value
1846 fn key_mgmt_cli() -> CliCommandMap {
1848 const KDF_SCHEMA: Schema =
1849 StringSchema::new("Key derivation function
. Choose 'none' to store the key unecrypted
.")
1850 .format(&ApiStringFormat::Enum(&["scrypt
", "none
"]))
1855 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1856 &ApiHandler::Sync(&key_create),
1858 "Create a new encryption key
.",
1860 ("path
", false, &StringSchema::new("File system path
.").schema()),
1861 ("kdf
", true, &KDF_SCHEMA),
1866 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1867 .arg_param(&["path
"])
1868 .completion_cb("path
", tools::complete_file_name);
1871 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1872 &ApiHandler::Sync(&key_change_passphrase),
1874 "Change the passphrase required to decrypt the key
.",
1876 ("path
", false, &StringSchema::new("File system path
.").schema()),
1877 ("kdf
", true, &KDF_SCHEMA),
1882 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1883 .arg_param(&["path
"])
1884 .completion_cb("path
", tools::complete_file_name);
1886 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1887 &ApiHandler::Sync(&key_create_master_key),
1888 &ObjectSchema::new("Create a new
4096 bit RSA master
pub/priv key pair
.", &[])
1891 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1894 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1895 &ApiHandler::Sync(&key_import_master_pubkey),
1897 "Import a new RSA public key and
use it
as master key
. The key is expected to be
in '
.pem' format
.",
1898 &sorted!([ ("path
", false, &StringSchema::new("File system path
.").schema()) ]),
1902 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1903 .arg_param(&["path
"])
1904 .completion_cb("path
", tools::complete_file_name);
1906 CliCommandMap::new()
1907 .insert("create
", key_create_cmd_def)
1908 .insert("create
-master
-key
", key_create_master_key_cmd_def)
1909 .insert("import
-master
-pubkey
", key_import_master_pubkey_cmd_def)
1910 .insert("change
-passphrase
", key_change_passphrase_cmd_def)
1916 _rpcenv: &mut dyn RpcEnvironment,
1917 ) -> Result<Value, Error> {
1918 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1920 // This will stay in foreground with debug output enabled as None is
1921 // passed for the RawFd.
1922 return proxmox_backup::tools::runtime::main(mount_do(param, None));
1925 // Process should be deamonized.
1926 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1929 Ok(ForkResult::Parent { .. }) => {
1930 nix::unistd::close(pipe.1).unwrap();
1931 // Blocks the parent process until we are ready to go in the child
1932 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1935 Ok(ForkResult::Child) => {
1936 nix::unistd::close(pipe.0).unwrap();
1937 nix::unistd::setsid().unwrap();
1938 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
1940 Err(_) => bail!("failed to daemonize process
"),
1944 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1945 let repo = extract_repository_from_value(¶m)?;
1946 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1947 let target = tools::required_string_param(¶m, "target
")?;
1948 let client = connect(repo.host(), repo.user())?;
1950 record_repository(&repo);
1952 let path = tools::required_string_param(¶m, "snapshot
")?;
1953 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1954 let group = BackupGroup::parse(path)?;
1955 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1957 let snapshot = BackupDir::parse(path)?;
1958 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1961 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1962 let crypt_config = match keyfile {
1965 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1966 Some(Arc::new(CryptConfig::new(key)?))
1970 let server_archive_name = if archive_name.ends_with(".pxar
") {
1971 format!("{}
.didx
", archive_name)
1973 bail!("Can only mount pxar archives
.");
1976 let client = BackupReader::start(
1978 crypt_config.clone(),
1986 let manifest = client.download_manifest().await?;
1988 if server_archive_name.ends_with(".didx
") {
1989 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1990 let most_used = index.find_most_used_chunks(8);
1991 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1992 let reader = BufferedDynamicReader::new(index, chunk_reader);
1993 let decoder = pxar::Decoder::new(reader)?;
1994 let options = OsStr::new("ro
,default_permissions
");
1995 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
1996 .map_err(|err| format_err!("pxar mount failed
: {}
", err))?;
1998 // Mount the session but not call fuse deamonize as this will cause
1999 // issues with the runtime after the fork
2000 let deamonize = false;
2001 session.mount(&Path::new(target), deamonize)?;
2003 if let Some(pipe) = pipe {
2004 nix::unistd::chdir(Path::new("/")).unwrap();
2005 // Finish creation of deamon by redirecting filedescriptors.
2006 let nullfd = nix::fcntl::open(
2008 nix::fcntl::OFlag::O_RDWR,
2009 nix::sys::stat::Mode::empty(),
2011 nix::unistd::dup2(nullfd, 0).unwrap();
2012 nix::unistd::dup2(nullfd, 1).unwrap();
2013 nix::unistd::dup2(nullfd, 2).unwrap();
2015 nix::unistd::close(nullfd).unwrap();
2017 // Signal the parent process that we are done with the setup and it can
2019 nix::unistd::write(pipe, &[0u8])?;
2020 nix::unistd::close(pipe).unwrap();
2023 let multithreaded = true;
2024 session.run_loop(multithreaded)?;
2026 bail!("unknown archive file
extension (expected
.pxar
)");
2037 description: "Group
/Snapshot path
.",
2041 description: "Backup archive name
.",
2045 schema: REPO_URL_SCHEMA,
2050 description: "Path to encryption key
.",
2055 /// Shell to interactively inspect and restore snapshots.
2056 async fn catalog_shell(param: Value) -> Result<(), Error> {
2057 let repo = extract_repository_from_value(¶m)?;
2058 let client = connect(repo.host(), repo.user())?;
2059 let path = tools::required_string_param(¶m, "snapshot
")?;
2060 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
2062 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2063 let group = BackupGroup::parse(path)?;
2064 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2066 let snapshot = BackupDir::parse(path)?;
2067 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2070 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
2071 let crypt_config = match keyfile {
2074 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2075 Some(Arc::new(CryptConfig::new(key)?))
2079 let server_archive_name = if archive_name.ends_with(".pxar
") {
2080 format!("{}
.didx
", archive_name)
2082 bail!("Can only mount pxar archives
.");
2085 let client = BackupReader::start(
2087 crypt_config.clone(),
2095 let tmpfile = std::fs::OpenOptions::new()
2098 .custom_flags(libc::O_TMPFILE)
2101 let manifest = client.download_manifest().await?;
2103 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2104 let most_used = index.find_most_used_chunks(8);
2105 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2106 let reader = BufferedDynamicReader::new(index, chunk_reader);
2107 let mut decoder = pxar::Decoder::new(reader)?;
2108 decoder.set_callback(|path| {
2109 println!("{:?}
", path);
2113 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
2114 let index = DynamicIndexReader::new(tmpfile)
2115 .map_err(|err| format_err!("unable to read catalog index
- {}
", err))?;
2117 // Note: do not use values stored in index (not trusted) - instead, computed them again
2118 let (csum, size) = index.compute_csum();
2119 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2121 let most_used = index.find_most_used_chunks(8);
2122 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2123 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2124 let mut catalogfile = std::fs::OpenOptions::new()
2127 .custom_flags(libc::O_TMPFILE)
2130 std::io::copy(&mut reader, &mut catalogfile)
2131 .map_err(|err| format_err!("unable to download catalog
- {}
", err))?;
2133 catalogfile.seek(SeekFrom::Start(0))?;
2134 let catalog_reader = CatalogReader::new(catalogfile);
2135 let state = Shell::new(
2137 &server_archive_name,
2141 println!("Starting interactive shell
");
2144 record_repository(&repo);
2149 fn catalog_mgmt_cli() -> CliCommandMap {
2150 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
2151 .arg_param(&["snapshot
", "archive
-name
"])
2152 .completion_cb("repository
", complete_repository)
2153 .completion_cb("archive
-name
", complete_pxar_archive_name)
2154 .completion_cb("snapshot
", complete_group_or_snapshot);
2156 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2157 .arg_param(&["snapshot
"])
2158 .completion_cb("repository
", complete_repository)
2159 .completion_cb("snapshot
", complete_backup_snapshot);
2161 CliCommandMap::new()
2162 .insert("dump
", catalog_dump_cmd_def)
2163 .insert("shell
", catalog_shell_cmd_def)
2170 schema: REPO_URL_SCHEMA,
2174 description: "The maximal number of tasks to list
.",
2182 schema: OUTPUT_FORMAT,
2187 description: "Also list stopped tasks
.",
2193 /// List running server tasks for this repo user
2194 async fn task_list(param: Value) -> Result<Value, Error> {
2196 let output_format = get_output_format(¶m);
2198 let repo = extract_repository_from_value(¶m)?;
2199 let client = connect(repo.host(), repo.user())?;
2201 let limit = param["limit
"].as_u64().unwrap_or(50) as usize;
2202 let running = !param["all
"].as_bool().unwrap_or(false);
2208 "userfilter
": repo.user(),
2209 "store
": repo.store(),
2212 let mut result = client.get("api2
/json
/nodes
/localhost
/tasks
", Some(args)).await?;
2213 let mut data = result["data
"].take();
2215 let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
2217 let options = default_table_format_options()
2218 .column(ColumnConfig::new("starttime
").right_align(false).renderer(tools::format::render_epoch))
2219 .column(ColumnConfig::new("endtime
").right_align(false).renderer(tools::format::render_epoch))
2220 .column(ColumnConfig::new("upid
"))
2221 .column(ColumnConfig::new("status
").renderer(tools::format::render_task_status));
2223 format_and_print_result_full(&mut data, schema, &output_format, &options);
2232 schema: REPO_URL_SCHEMA,
2236 schema: UPID_SCHEMA,
2241 /// Display the task log.
2242 async fn task_log(param: Value) -> Result<Value, Error> {
2244 let repo = extract_repository_from_value(¶m)?;
2245 let upid = tools::required_string_param(¶m, "upid
")?;
2247 let client = connect(repo.host(), repo.user())?;
2249 display_task_log(client, upid, true).await?;
2258 schema: REPO_URL_SCHEMA,
2262 schema: UPID_SCHEMA,
2267 /// Try to stop a specific task.
2268 async fn task_stop(param: Value) -> Result<Value, Error> {
2270 let repo = extract_repository_from_value(¶m)?;
2271 let upid_str = tools::required_string_param(¶m, "upid
")?;
2273 let mut client = connect(repo.host(), repo.user())?;
2275 let path = format!("api2
/json
/nodes
/localhost
/tasks
/{}
", upid_str);
2276 let _ = client.delete(&path, None).await?;
2281 fn task_mgmt_cli() -> CliCommandMap {
2283 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2284 .completion_cb("repository
", complete_repository);
2286 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2287 .arg_param(&["upid
"]);
2289 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2290 .arg_param(&["upid
"]);
2292 CliCommandMap::new()
2293 .insert("log
", task_log_cmd_def)
2294 .insert("list
", task_list_cmd_def)
2295 .insert("stop
", task_stop_cmd_def)
2300 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2301 .arg_param(&["backupspec
"])
2302 .completion_cb("repository
", complete_repository)
2303 .completion_cb("backupspec
", complete_backup_source)
2304 .completion_cb("keyfile
", tools::complete_file_name)
2305 .completion_cb("chunk
-size
", complete_chunk_size);
2307 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2308 .arg_param(&["snapshot
", "logfile
"])
2309 .completion_cb("snapshot
", complete_backup_snapshot)
2310 .completion_cb("logfile
", tools::complete_file_name)
2311 .completion_cb("keyfile
", tools::complete_file_name)
2312 .completion_cb("repository
", complete_repository);
2314 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2315 .completion_cb("repository
", complete_repository);
2317 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2318 .arg_param(&["group
"])
2319 .completion_cb("group
", complete_backup_group)
2320 .completion_cb("repository
", complete_repository);
2322 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2323 .arg_param(&["snapshot
"])
2324 .completion_cb("repository
", complete_repository)
2325 .completion_cb("snapshot
", complete_backup_snapshot);
2327 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2328 .completion_cb("repository
", complete_repository);
2330 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2331 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2332 .completion_cb("repository
", complete_repository)
2333 .completion_cb("snapshot
", complete_group_or_snapshot)
2334 .completion_cb("archive
-name
", complete_archive_name)
2335 .completion_cb("target
", tools::complete_file_name);
2337 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2338 .arg_param(&["snapshot
"])
2339 .completion_cb("repository
", complete_repository)
2340 .completion_cb("snapshot
", complete_backup_snapshot);
2342 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2343 .arg_param(&["group
"])
2344 .completion_cb("group
", complete_backup_group)
2345 .completion_cb("repository
", complete_repository);
2347 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2348 .completion_cb("repository
", complete_repository);
2350 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2351 .completion_cb("repository
", complete_repository);
2353 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2354 .completion_cb("repository
", complete_repository);
2357 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2358 &ApiHandler::Sync(&mount),
2360 "Mount pxar archive
.",
2362 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2363 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2364 ("target
", false, &StringSchema::new("Target directory path
.").schema()),
2365 ("repository
", true, &REPO_URL_SCHEMA),
2366 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2367 ("verbose
", true, &BooleanSchema::new("Verbose output
.").default(false).schema()),
2372 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2373 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2374 .completion_cb("repository
", complete_repository)
2375 .completion_cb("snapshot
", complete_group_or_snapshot)
2376 .completion_cb("archive
-name
", complete_pxar_archive_name)
2377 .completion_cb("target
", tools::complete_file_name);
2380 let cmd_def = CliCommandMap::new()
2381 .insert("backup
", backup_cmd_def)
2382 .insert("upload
-log
", upload_log_cmd_def)
2383 .insert("forget
", forget_cmd_def)
2384 .insert("garbage
-collect
", garbage_collect_cmd_def)
2385 .insert("list
", list_cmd_def)
2386 .insert("login
", login_cmd_def)
2387 .insert("logout
", logout_cmd_def)
2388 .insert("prune
", prune_cmd_def)
2389 .insert("restore
", restore_cmd_def)
2390 .insert("snapshots
", snapshots_cmd_def)
2391 .insert("files
", files_cmd_def)
2392 .insert("status
", status_cmd_def)
2393 .insert("key
", key_mgmt_cli())
2394 .insert("mount
", mount_cmd_def)
2395 .insert("catalog
", catalog_mgmt_cli())
2396 .insert("task
", task_mgmt_cli());
2398 run_cli_command(cmd_def, Some(|future| {
2399 proxmox_backup::tools::runtime::main(future)