2 use nix
::unistd
::{fork, ForkResult, pipe}
;
3 use std
::os
::unix
::io
::RawFd
;
4 use chrono
::{Local, DateTime, Utc, TimeZone}
;
5 use std
::path
::{Path, PathBuf}
;
6 use std
::collections
::{HashSet, HashMap}
;
8 use std
::io
::{Write, Seek, SeekFrom}
;
9 use std
::os
::unix
::fs
::OpenOptionsExt
;
11 use proxmox
::{sortable, identity}
;
12 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}
;
13 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
14 use proxmox
::api
::schema
::*;
15 use proxmox
::api
::cli
::*;
16 use proxmox
::api
::api
;
18 use proxmox_backup
::tools
;
19 use proxmox_backup
::api2
::types
::*;
20 use proxmox_backup
::client
::*;
21 use proxmox_backup
::backup
::*;
22 use proxmox_backup
::pxar
::{ self, catalog::* }
;
24 //use proxmox_backup::backup::image_index::*;
25 //use proxmox_backup::config::datastore;
26 //use proxmox_backup::pxar::encoder::*;
27 //use proxmox_backup::backup::datastore::*;
29 use serde_json
::{json, Value}
;
31 use std
::sync
::{Arc, Mutex}
;
33 use xdg
::BaseDirectories
;
36 use tokio
::sync
::mpsc
;
38 proxmox
::const_regex
! {
39 BACKUPSPEC_REGEX
= r
"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
42 const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
43 .format(&BACKUP_REPO_URL
)
47 const BACKUP_SOURCE_SCHEMA
: Schema
= StringSchema
::new(
48 "Backup source specification ([<label>:<path>]).")
49 .format(&ApiStringFormat
::Pattern(&BACKUPSPEC_REGEX
))
52 const KEYFILE_SCHEMA
: Schema
= StringSchema
::new(
53 "Path to encryption key. All data will be encrypted using this key.")
56 const CHUNK_SIZE_SCHEMA
: Schema
= IntegerSchema
::new(
57 "Chunk size in KB. Must be a power of 2.")
63 fn get_default_repository() -> Option
<String
> {
64 std
::env
::var("PBS_REPOSITORY").ok()
67 fn extract_repository_from_value(
69 ) -> Result
<BackupRepository
, Error
> {
71 let repo_url
= param
["repository"]
74 .or_else(get_default_repository
)
75 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
77 let repo
: BackupRepository
= repo_url
.parse()?
;
82 fn extract_repository_from_map(
83 param
: &HashMap
<String
, String
>,
84 ) -> Option
<BackupRepository
> {
86 param
.get("repository")
88 .or_else(get_default_repository
)
89 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
92 fn record_repository(repo
: &BackupRepository
) {
94 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
99 // usually $HOME/.cache/proxmox-backup/repo-list
100 let path
= match base
.place_cache_file("repo-list") {
105 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
107 let repo
= repo
.to_string();
109 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
111 let mut map
= serde_json
::map
::Map
::new();
114 let mut max_used
= 0;
115 let mut max_repo
= None
;
116 for (repo
, count
) in data
.as_object().unwrap() {
117 if map
.contains_key(repo
) { continue; }
118 if let Some(count
) = count
.as_i64() {
119 if count
> max_used
{
121 max_repo
= Some(repo
);
125 if let Some(repo
) = max_repo
{
126 map
.insert(repo
.to_owned(), json
!(max_used
));
130 if map
.len() > 10 { // store max. 10 repos
135 let new_data
= json
!(map
);
137 let _
= replace_file(path
, new_data
.to_string().as_bytes(), CreateOptions
::new());
140 fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
142 let mut result
= vec
![];
144 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
149 // usually $HOME/.cache/proxmox-backup/repo-list
150 let path
= match base
.place_cache_file("repo-list") {
155 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
157 if let Some(map
) = data
.as_object() {
158 for (repo
, _count
) in map
{
159 result
.push(repo
.to_owned());
166 async
fn view_task_result(
170 ) -> Result
<(), Error
> {
171 let data
= &result
["data"];
172 if output_format
== "text" {
173 if let Some(upid
) = data
.as_str() {
174 display_task_log(client
, upid
, true).await?
;
177 format_and_print_result(&data
, &output_format
);
183 async
fn api_datastore_list_snapshots(
186 group
: Option
<BackupGroup
>,
187 ) -> Result
<Vec
<SnapshotListItem
>, Error
> {
189 let path
= format
!("api2/json/admin/datastore/{}/snapshots", store
);
191 let mut args
= json
!({}
);
192 if let Some(group
) = group
{
193 args
["backup-type"] = group
.backup_type().into();
194 args
["backup-id"] = group
.backup_id().into();
197 let mut result
= client
.get(&path
, Some(args
)).await?
;
199 let list
: Vec
<SnapshotListItem
> = serde_json
::from_value(result
["data"].take())?
;
204 async
fn api_datastore_latest_snapshot(
208 ) -> Result
<(String
, String
, DateTime
<Utc
>), Error
> {
210 let mut list
= api_datastore_list_snapshots(client
, store
, Some(group
.clone())).await?
;
213 bail
!("backup group {:?} does not contain any snapshots.", group
.group_path());
216 list
.sort_unstable_by(|a
, b
| b
.backup_time
.cmp(&a
.backup_time
));
218 let backup_time
= Utc
.timestamp(list
[0].backup_time
, 0);
220 Ok((group
.backup_type().to_owned(), group
.backup_id().to_owned(), backup_time
))
224 async
fn backup_directory
<P
: AsRef
<Path
>>(
225 client
: &BackupWriter
,
228 chunk_size
: Option
<usize>,
229 device_set
: Option
<HashSet
<u64>>,
231 skip_lost_and_found
: bool
,
232 crypt_config
: Option
<Arc
<CryptConfig
>>,
233 catalog
: Arc
<Mutex
<CatalogWriter
<SenderWriter
>>>,
235 ) -> Result
<BackupStats
, Error
> {
237 let pxar_stream
= PxarBackupStream
::open(
245 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
247 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
250 .map_err(Error
::from
);
252 // spawn chunker inside a separate task so that it can run parallel
253 tokio
::spawn(async
move {
254 while let Some(v
) = chunk_stream
.next().await
{
255 let _
= tx
.send(v
).await
;
260 .upload_stream(archive_name
, stream
, "dynamic", None
, crypt_config
)
266 async
fn backup_image
<P
: AsRef
<Path
>>(
267 client
: &BackupWriter
,
271 chunk_size
: Option
<usize>,
273 crypt_config
: Option
<Arc
<CryptConfig
>>,
274 ) -> Result
<BackupStats
, Error
> {
276 let path
= image_path
.as_ref().to_owned();
278 let file
= tokio
::fs
::File
::open(path
).await?
;
280 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
281 .map_err(Error
::from
);
283 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
286 .upload_stream(archive_name
, stream
, "fixed", Some(image_size
), crypt_config
)
292 fn strip_server_file_expenstion(name
: &str) -> String
{
294 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
295 name
[..name
.len()-5].to_owned()
297 name
.to_owned() // should not happen
305 schema
: REPO_URL_SCHEMA
,
309 schema
: OUTPUT_FORMAT
,
315 /// List backup groups.
316 async
fn list_backup_groups(param
: Value
) -> Result
<Value
, Error
> {
318 let repo
= extract_repository_from_value(¶m
)?
;
320 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
322 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
324 let mut result
= client
.get(&path
, None
).await?
;
326 record_repository(&repo
);
328 // fixme: implement and use output formatter instead ..
329 let list
= result
["data"].as_array_mut().unwrap();
331 list
.sort_unstable_by(|a
, b
| {
332 let a_id
= a
["backup-id"].as_str().unwrap();
333 let a_backup_type
= a
["backup-type"].as_str().unwrap();
334 let b_id
= b
["backup-id"].as_str().unwrap();
335 let b_backup_type
= b
["backup-type"].as_str().unwrap();
337 let type_order
= a_backup_type
.cmp(b_backup_type
);
338 if type_order
== std
::cmp
::Ordering
::Equal
{
345 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
347 let mut result
= vec
![];
351 let id
= item
["backup-id"].as_str().unwrap();
352 let btype
= item
["backup-type"].as_str().unwrap();
353 let epoch
= item
["last-backup"].as_i64().unwrap();
354 let last_backup
= Utc
.timestamp(epoch
, 0);
355 let backup_count
= item
["backup-count"].as_u64().unwrap();
357 let group
= BackupGroup
::new(btype
, id
);
359 let path
= group
.group_path().to_str().unwrap().to_owned();
361 let files
= item
["files"].as_array().unwrap().iter()
362 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
364 if output_format
== "text" {
366 "{:20} | {} | {:5} | {}",
368 BackupDir
::backup_time_to_string(last_backup
),
370 tools
::join(&files
, ' '
),
374 "backup-type": btype
,
376 "last-backup": epoch
,
377 "backup-count": backup_count
,
383 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
392 schema
: REPO_URL_SCHEMA
,
397 description
: "Backup group.",
401 schema
: OUTPUT_FORMAT
,
407 /// List backup snapshots.
408 async
fn list_snapshots(param
: Value
) -> Result
<Value
, Error
> {
410 let repo
= extract_repository_from_value(¶m
)?
;
412 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
414 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
416 let group
= if let Some(path
) = param
["group"].as_str() {
417 Some(BackupGroup
::parse(path
)?
)
422 let mut list
= api_datastore_list_snapshots(&client
, repo
.store(), group
).await?
;
424 list
.sort_unstable_by(|a
, b
| a
.backup_time
.cmp(&b
.backup_time
));
426 record_repository(&repo
);
428 if output_format
!= "text" {
429 format_and_print_result(&serde_json
::to_value(list
)?
, &output_format
);
430 return Ok(Value
::Null
);
435 let snapshot
= BackupDir
::new(item
.backup_type
, item
.backup_id
, item
.backup_time
);
437 let path
= snapshot
.relative_path().to_str().unwrap().to_owned();
439 let files
= item
.files
.iter()
440 .map(|v
| strip_server_file_expenstion(&v
))
443 let size_str
= if let Some(size
) = item
.size
{
448 println
!("{} | {} | {}", path
, size_str
, tools
::join(&files
, ' '
));
458 schema
: REPO_URL_SCHEMA
,
463 description
: "Snapshot path.",
468 /// Forget (remove) backup snapshots.
469 async
fn forget_snapshots(param
: Value
) -> Result
<Value
, Error
> {
471 let repo
= extract_repository_from_value(¶m
)?
;
473 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
474 let snapshot
= BackupDir
::parse(path
)?
;
476 let mut client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
478 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
480 let result
= client
.delete(&path
, Some(json
!({
481 "backup-type": snapshot
.group().backup_type(),
482 "backup-id": snapshot
.group().backup_id(),
483 "backup-time": snapshot
.backup_time().timestamp(),
486 record_repository(&repo
);
495 schema
: REPO_URL_SCHEMA
,
501 /// Try to login. If successful, store ticket.
502 async
fn api_login(param
: Value
) -> Result
<Value
, Error
> {
504 let repo
= extract_repository_from_value(¶m
)?
;
506 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
507 client
.login().await?
;
509 record_repository(&repo
);
518 schema
: REPO_URL_SCHEMA
,
524 /// Logout (delete stored ticket).
525 fn api_logout(param
: Value
) -> Result
<Value
, Error
> {
527 let repo
= extract_repository_from_value(¶m
)?
;
529 delete_ticket_info(repo
.host(), repo
.user())?
;
538 schema
: REPO_URL_SCHEMA
,
543 description
: "Snapshot path.",
549 async
fn dump_catalog(param
: Value
) -> Result
<Value
, Error
> {
551 let repo
= extract_repository_from_value(¶m
)?
;
553 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
554 let snapshot
= BackupDir
::parse(path
)?
;
556 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
558 let crypt_config
= match keyfile
{
561 let (key
, _
) = load_and_decrypt_key(&path
, &get_encryption_key_password
)?
;
562 Some(Arc
::new(CryptConfig
::new(key
)?
))
566 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
568 let client
= BackupReader
::start(
570 crypt_config
.clone(),
572 &snapshot
.group().backup_type(),
573 &snapshot
.group().backup_id(),
574 snapshot
.backup_time(),
578 let manifest
= client
.download_manifest().await?
;
580 let index
= client
.download_dynamic_index(&manifest
, CATALOG_NAME
).await?
;
582 let most_used
= index
.find_most_used_chunks(8);
584 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, most_used
);
586 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
588 let mut catalogfile
= std
::fs
::OpenOptions
::new()
591 .custom_flags(libc
::O_TMPFILE
)
594 std
::io
::copy(&mut reader
, &mut catalogfile
)
595 .map_err(|err
| format_err
!("unable to download catalog - {}", err
))?
;
597 catalogfile
.seek(SeekFrom
::Start(0))?
;
599 let mut catalog_reader
= CatalogReader
::new(catalogfile
);
601 catalog_reader
.dump()?
;
603 record_repository(&repo
);
612 schema
: REPO_URL_SCHEMA
,
617 description
: "Snapshot path.",
620 schema
: OUTPUT_FORMAT
,
626 /// List snapshot files.
627 async
fn list_snapshot_files(param
: Value
) -> Result
<Value
, Error
> {
629 let repo
= extract_repository_from_value(¶m
)?
;
631 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
632 let snapshot
= BackupDir
::parse(path
)?
;
634 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
636 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
638 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
640 let mut result
= client
.get(&path
, Some(json
!({
641 "backup-type": snapshot
.group().backup_type(),
642 "backup-id": snapshot
.group().backup_id(),
643 "backup-time": snapshot
.backup_time().timestamp(),
646 record_repository(&repo
);
648 let list
: Value
= result
["data"].take();
650 if output_format
== "text" {
651 for item
in list
.as_array().unwrap().iter() {
654 strip_server_file_expenstion(item
["filename"].as_str().unwrap()),
655 item
["size"].as_u64().unwrap_or(0),
659 format_and_print_result(&list
, &output_format
);
669 schema
: REPO_URL_SCHEMA
,
673 schema
: OUTPUT_FORMAT
,
679 /// Start garbage collection for a specific repository.
680 async
fn start_garbage_collection(param
: Value
) -> Result
<Value
, Error
> {
682 let repo
= extract_repository_from_value(¶m
)?
;
683 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
685 let mut client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
687 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
689 let result
= client
.post(&path
, None
).await?
;
691 record_repository(&repo
);
693 view_task_result(client
, result
, &output_format
).await?
;
698 fn parse_backupspec(value
: &str) -> Result
<(&str, &str), Error
> {
700 if let Some(caps
) = (BACKUPSPEC_REGEX
.regex_obj
)().captures(value
) {
701 return Ok((caps
.get(1).unwrap().as_str(), caps
.get(2).unwrap().as_str()));
703 bail
!("unable to parse directory specification '{}'", value
);
706 fn spawn_catalog_upload(
707 client
: Arc
<BackupWriter
>,
708 crypt_config
: Option
<Arc
<CryptConfig
>>,
711 Arc
<Mutex
<CatalogWriter
<SenderWriter
>>>,
712 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
715 let (catalog_tx
, catalog_rx
) = mpsc
::channel(10); // allow to buffer 10 writes
716 let catalog_stream
= catalog_rx
.map_err(Error
::from
);
717 let catalog_chunk_size
= 512*1024;
718 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
720 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(SenderWriter
::new(catalog_tx
))?
));
722 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
724 tokio
::spawn(async
move {
725 let catalog_upload_result
= client
726 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, crypt_config
)
729 if let Err(ref err
) = catalog_upload_result
{
730 eprintln
!("catalog upload error - {}", err
);
734 let _
= catalog_result_tx
.send(catalog_upload_result
);
737 Ok((catalog
, catalog_result_rx
))
745 description
: "List of backup source specifications ([<label.ext>:<path>] ...)",
747 schema
: BACKUP_SOURCE_SCHEMA
,
751 schema
: REPO_URL_SCHEMA
,
755 description
: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
759 description
: "Path to file.",
763 schema
: KEYFILE_SCHEMA
,
766 "skip-lost-and-found": {
768 description
: "Skip lost+found directory.",
772 schema
: BACKUP_TYPE_SCHEMA
,
776 schema
: BACKUP_ID_SCHEMA
,
780 schema
: BACKUP_TIME_SCHEMA
,
784 schema
: CHUNK_SIZE_SCHEMA
,
789 description
: "Max number of entries to hold in memory.",
791 default: pxar
::ENCODER_MAX_ENTRIES
as isize,
796 /// Create (host) backup.
797 async
fn create_backup(
800 _rpcenv
: &mut dyn RpcEnvironment
,
801 ) -> Result
<Value
, Error
> {
803 let repo
= extract_repository_from_value(¶m
)?
;
805 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
807 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
809 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
811 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
813 let backup_time_opt
= param
["backup-time"].as_i64();
815 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
817 if let Some(size
) = chunk_size_opt
{
818 verify_chunk_size(size
)?
;
821 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
823 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
825 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
827 let include_dev
= param
["include-dev"].as_array();
829 let entries_max
= param
["entries-max"].as_u64().unwrap_or(pxar
::ENCODER_MAX_ENTRIES
as u64);
831 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
833 if let Some(include_dev
) = include_dev
{
834 if all_file_systems
{
835 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
838 let mut set
= HashSet
::new();
839 for path
in include_dev
{
840 let path
= path
.as_str().unwrap();
841 let stat
= nix
::sys
::stat
::stat(path
)
842 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
843 set
.insert(stat
.st_dev
);
848 let mut upload_list
= vec
![];
850 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE }
;
852 let mut upload_catalog
= false;
854 for backupspec
in backupspec_list
{
855 let (target
, filename
) = parse_backupspec(backupspec
.as_str().unwrap())?
;
857 use std
::os
::unix
::fs
::FileTypeExt
;
859 let metadata
= std
::fs
::metadata(filename
)
860 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
861 let file_type
= metadata
.file_type();
863 let extension
= target
.rsplit('
.'
).next()
864 .ok_or_else(|| format_err
!("missing target file extenion '{}'", target
))?
;
868 if !file_type
.is_dir() {
869 bail
!("got unexpected file type (expected directory)");
871 upload_list
.push((BackupType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
872 upload_catalog
= true;
876 if !(file_type
.is_file() || file_type
.is_block_device()) {
877 bail
!("got unexpected file type (expected file or block device)");
880 let size
= image_size(&PathBuf
::from(filename
))?
;
882 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
884 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
887 if !file_type.is_file() {
888 bail!("got unexpected file
type (expected regular file
)");
890 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
893 if !file_type.is_file() {
894 bail!("got unexpected file
type (expected regular file
)");
896 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
899 bail!("got unknown archive extension '{}'
", extension);
904 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
906 let client = HttpClient::new(repo.host(), repo.user(), None)?;
907 record_repository(&repo);
909 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
911 println!("Client name
: {}
", proxmox::tools::nodename());
913 let start_time = Local::now();
915 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
917 let (crypt_config, rsa_encrypted_key) = match keyfile {
918 None => (None, None),
920 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
922 let crypt_config = CryptConfig::new(key)?;
924 let path = master_pubkey_path()?;
926 let pem_data = file_get_contents(&path)?;
927 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
928 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
929 (Some(Arc::new(crypt_config)), Some(enc_key))
931 (Some(Arc::new(crypt_config)), None)
936 let client = BackupWriter::start(
945 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
946 let mut manifest = BackupManifest::new(snapshot);
948 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
950 for (backup_type, filename, target, size) in upload_list {
952 BackupType::CONFIG => {
953 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
955 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
957 manifest.add_file(target, stats.size, stats.csum)?;
959 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
960 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
962 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
964 manifest.add_file(target, stats.size, stats.csum)?;
966 BackupType::PXAR => {
967 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
968 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
969 let stats = backup_directory(
977 crypt_config.clone(),
979 entries_max as usize,
981 manifest.add_file(target, stats.size, stats.csum)?;
982 catalog.lock().unwrap().end_directory()?;
984 BackupType::IMAGE => {
985 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
986 let stats = backup_image(
993 crypt_config.clone(),
995 manifest.add_file(target, stats.size, stats.csum)?;
1000 // finalize and upload catalog
1002 let mutex = Arc::try_unwrap(catalog)
1003 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
1004 let mut catalog = mutex.into_inner().unwrap();
1008 drop(catalog); // close upload stream
1010 let stats = catalog_result_rx.await??;
1012 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
1015 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1016 let target = "rsa
-encrypted
.key
";
1017 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
1019 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
1021 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum)?;
1023 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1025 let mut buffer2 = vec![0u8; rsa.size() as usize];
1026 let pem_data = file_get_contents("master
-private
.pem
")?;
1027 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1028 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1029 println!("TEST {} {:?}
", len, buffer2);
1033 // create manifest (index.json)
1034 let manifest = manifest.into_json();
1036 println!("Upload index
.json to '{:?}'
", repo);
1037 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1039 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
1042 client.finish().await?;
1044 let end_time = Local::now();
1045 let elapsed = end_time.signed_duration_since(start_time);
1046 println!("Duration
: {}
", elapsed);
1048 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
1053 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1055 let mut result = vec![];
1057 let data: Vec<&str> = arg.splitn(2, ':').collect();
1059 if data.len() != 2 {
1060 result.push(String::from("root
.pxar
:/"));
1061 result.push(String::from("etc
.pxar
:/etc
"));
1065 let files = tools::complete_file_name(data[1], param);
1068 result.push(format!("{}
:{}
", data[0], file));
1074 fn dump_image<W: Write>(
1075 client: Arc<BackupReader>,
1076 crypt_config: Option<Arc<CryptConfig>>,
1077 index: FixedIndexReader,
1080 ) -> Result<(), Error> {
1082 let most_used = index.find_most_used_chunks(8);
1084 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1086 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1087 // and thus slows down reading. Instead, directly use RemoteChunkReader
1090 let start_time = std::time::Instant::now();
1092 for pos in 0..index.index_count() {
1093 let digest = index.index_digest(pos).unwrap();
1094 let raw_data = chunk_reader.read_chunk(&digest)?;
1095 writer.write_all(&raw_data)?;
1096 bytes += raw_data.len();
1098 let next_per = ((pos+1)*100)/index.index_count();
1099 if per != next_per {
1100 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
1101 next_per, bytes, start_time.elapsed().as_secs());
1107 let end_time = std::time::Instant::now();
1108 let elapsed = end_time.duration_since(start_time);
1109 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
1111 elapsed.as_secs_f64(),
1112 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1123 schema: REPO_URL_SCHEMA,
1128 description: "Group
/Snapshot path
.",
1131 description: "Backup archive name
.",
1136 description: r###"Target directory path
. Use '
-' to write to stdandard output
.
1138 We
do not extraxt '
.pxar' archives when writing to stdandard output
.
1142 "allow
-existing
-dirs
": {
1144 description: "Do not fail
if directories already exists
.",
1148 schema: KEYFILE_SCHEMA,
1154 /// Restore backup repository.
1155 async fn restore(param: Value) -> Result<Value, Error> {
1156 let repo = extract_repository_from_value(¶m)?;
1158 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1160 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
1162 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1164 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1166 record_repository(&repo);
1168 let path = tools::required_string_param(¶m, "snapshot
")?;
1170 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1171 let group = BackupGroup::parse(path)?;
1172 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1174 let snapshot = BackupDir::parse(path)?;
1175 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1178 let target = tools::required_string_param(¶m, "target
")?;
1179 let target = if target == "-" { None } else { Some(target) };
1181 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1183 let crypt_config = match keyfile {
1186 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1187 Some(Arc::new(CryptConfig::new(key)?))
1191 let server_archive_name = if archive_name.ends_with(".pxar
") {
1192 format!("{}
.didx
", archive_name)
1193 } else if archive_name.ends_with(".img
") {
1194 format!("{}
.fidx
", archive_name)
1196 format!("{}
.blob
", archive_name)
1199 let client = BackupReader::start(
1201 crypt_config.clone(),
1209 let manifest = client.download_manifest().await?;
1211 if server_archive_name == MANIFEST_BLOB_NAME {
1212 let backup_index_data = manifest.into_json().to_string();
1213 if let Some(target) = target {
1214 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
1216 let stdout = std::io::stdout();
1217 let mut writer = stdout.lock();
1218 writer.write_all(backup_index_data.as_bytes())
1219 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1222 } else if server_archive_name.ends_with(".blob
") {
1224 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
1226 if let Some(target) = target {
1227 let mut writer = std::fs::OpenOptions::new()
1232 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1233 std::io::copy(&mut reader, &mut writer)?;
1235 let stdout = std::io::stdout();
1236 let mut writer = stdout.lock();
1237 std::io::copy(&mut reader, &mut writer)
1238 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1241 } else if server_archive_name.ends_with(".didx
") {
1243 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1245 let most_used = index.find_most_used_chunks(8);
1247 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1249 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1251 if let Some(target) = target {
1253 let feature_flags = pxar::flags::DEFAULT;
1254 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1255 decoder.set_callback(move |path| {
1257 eprintln!("{:?}
", path);
1261 decoder.set_allow_existing_dirs(allow_existing_dirs);
1263 decoder.restore(Path::new(target), &Vec::new())?;
1265 let mut writer = std::fs::OpenOptions::new()
1267 .open("/dev
/stdout
")
1268 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1270 std::io::copy(&mut reader, &mut writer)
1271 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1273 } else if server_archive_name.ends_with(".fidx
") {
1275 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
1277 let mut writer = if let Some(target) = target {
1278 std::fs::OpenOptions::new()
1283 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1285 std::fs::OpenOptions::new()
1287 .open("/dev
/stdout
")
1288 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1291 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
1294 bail!("unknown archive file
extension (expected
.pxar of
.img
)");
1304 schema: REPO_URL_SCHEMA,
1309 description: "Group
/Snapshot path
.",
1313 description: "The path to the log file you want to upload
.",
1316 schema: KEYFILE_SCHEMA,
1322 /// Upload backup log file.
1323 async fn upload_log(param: Value) -> Result<Value, Error> {
1325 let logfile = tools::required_string_param(¶m, "logfile
")?;
1326 let repo = extract_repository_from_value(¶m)?;
1328 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1329 let snapshot = BackupDir::parse(snapshot)?;
1331 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1333 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1335 let crypt_config = match keyfile {
1338 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1339 let crypt_config = CryptConfig::new(key)?;
1340 Some(Arc::new(crypt_config))
1344 let data = file_get_contents(logfile)?;
1346 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1348 let raw_data = blob.into_inner();
1350 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1353 "backup
-type": snapshot.group().backup_type(),
1354 "backup
-id
": snapshot.group().backup_id(),
1355 "backup
-time
": snapshot.backup_time().timestamp(),
1358 let body = hyper::Body::from(raw_data);
1360 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1367 schema: REPO_URL_SCHEMA,
1372 description: "Backup group
.",
1375 schema: OUTPUT_FORMAT,
1380 description: "Just show what prune would
do, but
do not delete anything
.",
1386 /// Prune a backup repository.
1387 async fn prune(mut param: Value) -> Result<Value, Error> {
1389 let repo = extract_repository_from_value(¶m)?;
1391 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1393 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1395 let group = tools::required_string_param(¶m, "group
")?;
1396 let group = BackupGroup::parse(group)?;
1397 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1399 param.as_object_mut().unwrap().remove("repository
");
1400 param.as_object_mut().unwrap().remove("group
");
1401 param.as_object_mut().unwrap().remove("output
-format
");
1403 param["backup
-type"] = group.backup_type().into();
1404 param["backup
-id
"] = group.backup_id().into();
1406 let result = client.post(&path, Some(param)).await?;
1408 record_repository(&repo);
1410 view_task_result(client, result, &output_format).await?;
1419 schema: REPO_URL_SCHEMA,
1423 schema: OUTPUT_FORMAT,
1429 /// Get repository status.
1430 async fn status(param: Value) -> Result<Value, Error> {
1432 let repo = extract_repository_from_value(¶m)?;
1434 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1436 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1438 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1440 let result = client.get(&path, None).await?;
1441 let data = &result["data
"];
1443 record_repository(&repo);
1445 if output_format == "text
" {
1446 let total = data["total
"].as_u64().unwrap();
1447 let used = data["used
"].as_u64().unwrap();
1448 let avail = data["avail
"].as_u64().unwrap();
1449 let roundup = total/200;
1452 "total
: {} used
: {}
({}
%) available
: {}
",
1455 ((used+roundup)*100)/total,
1459 format_and_print_result(data, &output_format);
1465 // like get, but simply ignore errors and return Null instead
1466 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1468 let client = match HttpClient::new(repo.host(), repo.user(), None) {
1470 _ => return Value::Null,
1473 let mut resp = match client.get(url, None).await {
1475 _ => return Value::Null,
1478 if let Some(map) = resp.as_object_mut() {
1479 if let Some(data) = map.remove("data
") {
1486 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1487 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
1490 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1492 let mut result = vec![];
1494 let repo = match extract_repository_from_map(param) {
1499 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1501 let data = try_get(&repo, &path).await;
1503 if let Some(list) = data.as_array() {
1505 if let (Some(backup_id), Some(backup_type)) =
1506 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1508 result.push(format!("{}
/{}
", backup_type, backup_id));
1516 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1517 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
1520 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1522 if arg.matches('/').count() < 2 {
1523 let groups = complete_backup_group_do(param).await;
1524 let mut result = vec![];
1525 for group in groups {
1526 result.push(group.to_string());
1527 result.push(format!("{}
/", group));
1532 complete_backup_snapshot_do(param).await
1535 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1536 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
1539 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1541 let mut result = vec![];
1543 let repo = match extract_repository_from_map(param) {
1548 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1550 let data = try_get(&repo, &path).await;
1552 if let Some(list) = data.as_array() {
1554 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1555 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1557 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1558 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1566 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1567 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
1570 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1572 let mut result = vec![];
1574 let repo = match extract_repository_from_map(param) {
1579 let snapshot = match param.get("snapshot
") {
1581 match BackupDir::parse(path) {
1589 let query = tools::json_object_to_query(json!({
1590 "backup
-type": snapshot.group().backup_type(),
1591 "backup
-id
": snapshot.group().backup_id(),
1592 "backup
-time
": snapshot.backup_time().timestamp(),
1595 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1597 let data = try_get(&repo, &path).await;
1599 if let Some(list) = data.as_array() {
1601 if let Some(filename) = item["filename
"].as_str() {
1602 result.push(filename.to_owned());
1610 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1611 complete_server_file_name(arg, param)
1613 .map(|v| strip_server_file_expenstion(&v))
1617 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1618 complete_server_file_name(arg, param)
1621 let name = strip_server_file_expenstion(&v);
1622 if name.ends_with(".pxar
") {
1631 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1633 let mut result = vec![];
1637 result.push(size.to_string());
1639 if size > 4096 { break; }
1645 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1647 // fixme: implement other input methods
1649 use std::env::VarError::*;
1650 match std::env::var("PBS_ENCRYPTION_PASSWORD
") {
1651 Ok(p) => return Ok(p.as_bytes().to_vec()),
1652 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters
"),
1653 Err(NotPresent) => {
1654 // Try another method
1658 // If we're on a TTY, query the user for a password
1659 if crate::tools::tty::stdin_isatty() {
1660 return Ok(crate::tools::tty::read_password("Encryption Key Password
: ")?);
1663 bail!("no password input mechanism available
");
1669 _rpcenv: &mut dyn RpcEnvironment,
1670 ) -> Result<Value, Error> {
1672 let path = tools::required_string_param(¶m, "path
")?;
1673 let path = PathBuf::from(path);
1675 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1677 let key = proxmox::sys::linux::random_data(32)?;
1679 if kdf == "scrypt
" {
1680 // always read passphrase from tty
1681 if !crate::tools::tty::stdin_isatty() {
1682 bail!("unable to read passphrase
- no tty
");
1685 let password = crate::tools::tty::read_and_verify_password("Encryption Key Password
: ")?;
1687 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1689 store_key_config(&path, false, key_config)?;
1692 } else if kdf == "none
" {
1693 let created = Local.timestamp(Local::now().timestamp(), 0);
1695 store_key_config(&path, false, KeyConfig {
1708 fn master_pubkey_path() -> Result<PathBuf, Error> {
1709 let base = BaseDirectories::with_prefix("proxmox
-backup
")?;
1711 // usually $HOME/.config/proxmox-backup/master-public.pem
1712 let path = base.place_config_file("master
-public
.pem
")?;
1717 fn key_import_master_pubkey(
1720 _rpcenv: &mut dyn RpcEnvironment,
1721 ) -> Result<Value, Error> {
1723 let path = tools::required_string_param(¶m, "path
")?;
1724 let path = PathBuf::from(path);
1726 let pem_data = file_get_contents(&path)?;
1728 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1729 bail!("Unable to decode PEM data
- {}
", err);
1732 let target_path = master_pubkey_path()?;
1734 replace_file(&target_path, &pem_data, CreateOptions::new())?;
1736 println!("Imported public master key to {:?}
", target_path);
1741 fn key_create_master_key(
1744 _rpcenv: &mut dyn RpcEnvironment,
1745 ) -> Result<Value, Error> {
1747 // we need a TTY to query the new password
1748 if !crate::tools::tty::stdin_isatty() {
1749 bail!("unable to create master key
- no tty
");
1752 let rsa = openssl::rsa::Rsa::generate(4096)?;
1753 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1756 let password = String::from_utf8(crate::tools::tty::read_and_verify_password("Master Key Password
: ")?)?;
1758 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1759 let filename_pub = "master
-public
.pem
";
1760 println!("Writing public master key to {}
", filename_pub);
1761 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
1763 let cipher = openssl::symm::Cipher::aes_256_cbc();
1764 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
1766 let filename_priv = "master
-private
.pem
";
1767 println!("Writing private master key to {}
", filename_priv);
1768 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
1773 fn key_change_passphrase(
1776 _rpcenv: &mut dyn RpcEnvironment,
1777 ) -> Result<Value, Error> {
1779 let path = tools::required_string_param(¶m, "path
")?;
1780 let path = PathBuf::from(path);
1782 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1784 // we need a TTY to query the new password
1785 if !crate::tools::tty::stdin_isatty() {
1786 bail!("unable to change passphrase
- no tty
");
1789 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1791 if kdf == "scrypt
" {
1793 let password = crate::tools::tty::read_and_verify_password("New Password
: ")?;
1795 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
1796 new_key_config.created = created; // keep original value
1798 store_key_config(&path, true, new_key_config)?;
1801 } else if kdf == "none
" {
1802 let modified = Local.timestamp(Local::now().timestamp(), 0);
1804 store_key_config(&path, true, KeyConfig {
1806 created, // keep original value
1817 fn key_mgmt_cli() -> CliCommandMap {
1819 const KDF_SCHEMA: Schema =
1820 StringSchema::new("Key derivation function
. Choose 'none' to store the key unecrypted
.")
1821 .format(&ApiStringFormat::Enum(&["scrypt
", "none
"]))
1826 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1827 &ApiHandler::Sync(&key_create),
1829 "Create a new encryption key
.",
1831 ("path
", false, &StringSchema::new("File system path
.").schema()),
1832 ("kdf
", true, &KDF_SCHEMA),
1837 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1838 .arg_param(&["path
"])
1839 .completion_cb("path
", tools::complete_file_name);
1842 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1843 &ApiHandler::Sync(&key_change_passphrase),
1845 "Change the passphrase required to decrypt the key
.",
1847 ("path
", false, &StringSchema::new("File system path
.").schema()),
1848 ("kdf
", true, &KDF_SCHEMA),
1853 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1854 .arg_param(&["path
"])
1855 .completion_cb("path
", tools::complete_file_name);
1857 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1858 &ApiHandler::Sync(&key_create_master_key),
1859 &ObjectSchema::new("Create a new
4096 bit RSA master
pub/priv key pair
.", &[])
1862 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1865 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1866 &ApiHandler::Sync(&key_import_master_pubkey),
1868 "Import a new RSA public key and
use it
as master key
. The key is expected to be
in '
.pem' format
.",
1869 &sorted!([ ("path
", false, &StringSchema::new("File system path
.").schema()) ]),
1873 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1874 .arg_param(&["path
"])
1875 .completion_cb("path
", tools::complete_file_name);
1877 CliCommandMap::new()
1878 .insert("create
", key_create_cmd_def)
1879 .insert("create
-master
-key
", key_create_master_key_cmd_def)
1880 .insert("import
-master
-pubkey
", key_import_master_pubkey_cmd_def)
1881 .insert("change
-passphrase
", key_change_passphrase_cmd_def)
1887 _rpcenv: &mut dyn RpcEnvironment,
1888 ) -> Result<Value, Error> {
1889 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1891 // This will stay in foreground with debug output enabled as None is
1892 // passed for the RawFd.
1893 return proxmox_backup::tools::runtime::main(mount_do(param, None));
1896 // Process should be deamonized.
1897 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1900 Ok(ForkResult::Parent { .. }) => {
1901 nix::unistd::close(pipe.1).unwrap();
1902 // Blocks the parent process until we are ready to go in the child
1903 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1906 Ok(ForkResult::Child) => {
1907 nix::unistd::close(pipe.0).unwrap();
1908 nix::unistd::setsid().unwrap();
1909 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
1911 Err(_) => bail!("failed to daemonize process
"),
1915 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1916 let repo = extract_repository_from_value(¶m)?;
1917 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1918 let target = tools::required_string_param(¶m, "target
")?;
1919 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1921 record_repository(&repo);
1923 let path = tools::required_string_param(¶m, "snapshot
")?;
1924 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1925 let group = BackupGroup::parse(path)?;
1926 api_datastore_latest_snapshot(&client, repo.store(), group).await?
1928 let snapshot = BackupDir::parse(path)?;
1929 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1932 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1933 let crypt_config = match keyfile {
1936 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
1937 Some(Arc::new(CryptConfig::new(key)?))
1941 let server_archive_name = if archive_name.ends_with(".pxar
") {
1942 format!("{}
.didx
", archive_name)
1944 bail!("Can only mount pxar archives
.");
1947 let client = BackupReader::start(
1949 crypt_config.clone(),
1957 let manifest = client.download_manifest().await?;
1959 if server_archive_name.ends_with(".didx
") {
1960 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1961 let most_used = index.find_most_used_chunks(8);
1962 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1963 let reader = BufferedDynamicReader::new(index, chunk_reader);
1964 let decoder = pxar::Decoder::new(reader)?;
1965 let options = OsStr::new("ro
,default_permissions
");
1966 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
1967 .map_err(|err| format_err!("pxar mount failed
: {}
", err))?;
1969 // Mount the session but not call fuse deamonize as this will cause
1970 // issues with the runtime after the fork
1971 let deamonize = false;
1972 session.mount(&Path::new(target), deamonize)?;
1974 if let Some(pipe) = pipe {
1975 nix::unistd::chdir(Path::new("/")).unwrap();
1976 // Finish creation of deamon by redirecting filedescriptors.
1977 let nullfd = nix::fcntl::open(
1979 nix::fcntl::OFlag::O_RDWR,
1980 nix::sys::stat::Mode::empty(),
1982 nix::unistd::dup2(nullfd, 0).unwrap();
1983 nix::unistd::dup2(nullfd, 1).unwrap();
1984 nix::unistd::dup2(nullfd, 2).unwrap();
1986 nix::unistd::close(nullfd).unwrap();
1988 // Signal the parent process that we are done with the setup and it can
1990 nix::unistd::write(pipe, &[0u8])?;
1991 nix::unistd::close(pipe).unwrap();
1994 let multithreaded = true;
1995 session.run_loop(multithreaded)?;
1997 bail!("unknown archive file
extension (expected
.pxar
)");
2008 description: "Group
/Snapshot path
.",
2012 description: "Backup archive name
.",
2016 schema: REPO_URL_SCHEMA,
2021 description: "Path to encryption key
.",
2026 /// Shell to interactively inspect and restore snapshots.
2027 async fn catalog_shell(param: Value) -> Result<(), Error> {
2028 let repo = extract_repository_from_value(¶m)?;
2029 let client = HttpClient::new(repo.host(), repo.user(), None)?;
2030 let path = tools::required_string_param(¶m, "snapshot
")?;
2031 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
2033 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2034 let group = BackupGroup::parse(path)?;
2035 api_datastore_latest_snapshot(&client, repo.store(), group).await?
2037 let snapshot = BackupDir::parse(path)?;
2038 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2041 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
2042 let crypt_config = match keyfile {
2045 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
2046 Some(Arc::new(CryptConfig::new(key)?))
2050 let server_archive_name = if archive_name.ends_with(".pxar
") {
2051 format!("{}
.didx
", archive_name)
2053 bail!("Can only mount pxar archives
.");
2056 let client = BackupReader::start(
2058 crypt_config.clone(),
2066 let tmpfile = std::fs::OpenOptions::new()
2069 .custom_flags(libc::O_TMPFILE)
2072 let manifest = client.download_manifest().await?;
2074 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2075 let most_used = index.find_most_used_chunks(8);
2076 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2077 let reader = BufferedDynamicReader::new(index, chunk_reader);
2078 let mut decoder = pxar::Decoder::new(reader)?;
2079 decoder.set_callback(|path| {
2080 println!("{:?}
", path);
2084 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
2085 let index = DynamicIndexReader::new(tmpfile)
2086 .map_err(|err| format_err!("unable to read catalog index
- {}
", err))?;
2088 // Note: do not use values stored in index (not trusted) - instead, computed them again
2089 let (csum, size) = index.compute_csum();
2090 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2092 let most_used = index.find_most_used_chunks(8);
2093 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2094 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2095 let mut catalogfile = std::fs::OpenOptions::new()
2098 .custom_flags(libc::O_TMPFILE)
2101 std::io::copy(&mut reader, &mut catalogfile)
2102 .map_err(|err| format_err!("unable to download catalog
- {}
", err))?;
2104 catalogfile.seek(SeekFrom::Start(0))?;
2105 let catalog_reader = CatalogReader::new(catalogfile);
2106 let state = Shell::new(
2108 &server_archive_name,
2112 println!("Starting interactive shell
");
2115 record_repository(&repo);
2120 fn catalog_mgmt_cli() -> CliCommandMap {
2121 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
2122 .arg_param(&["snapshot
", "archive
-name
"])
2123 .completion_cb("repository
", complete_repository)
2124 .completion_cb("archive
-name
", complete_pxar_archive_name)
2125 .completion_cb("snapshot
", complete_group_or_snapshot);
2127 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2128 .arg_param(&["snapshot
"])
2129 .completion_cb("repository
", complete_repository)
2130 .completion_cb("snapshot
", complete_backup_snapshot);
2132 CliCommandMap::new()
2133 .insert("dump
", catalog_dump_cmd_def)
2134 .insert("shell
", catalog_shell_cmd_def)
2141 schema: REPO_URL_SCHEMA,
2145 description: "The maximal number of tasks to list
.",
2153 schema: OUTPUT_FORMAT,
2159 /// List running server tasks for this repo user
2160 async fn task_list(param: Value) -> Result<Value, Error> {
2162 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
2163 let repo = extract_repository_from_value(¶m)?;
2164 let client = HttpClient::new(repo.host(), repo.user(), None)?;
2166 let limit = param["limit
"].as_u64().unwrap_or(50) as usize;
2172 "userfilter
": repo.user(),
2173 "store
": repo.store(),
2175 let result = client.get("api2
/json
/nodes
/localhost
/tasks
", Some(args)).await?;
2177 let data = &result["data
"];
2179 if output_format == "text
" {
2180 for item in data.as_array().unwrap() {
2183 item["upid
"].as_str().unwrap(),
2184 item["status
"].as_str().unwrap_or("running
"),
2188 format_and_print_result(data, &output_format);
2198 schema: REPO_URL_SCHEMA,
2202 schema: UPID_SCHEMA,
2207 /// Display the task log.
2208 async fn task_log(param: Value) -> Result<Value, Error> {
2210 let repo = extract_repository_from_value(¶m)?;
2211 let upid = tools::required_string_param(¶m, "upid
")?;
2213 let client = HttpClient::new(repo.host(), repo.user(), None)?;
2215 display_task_log(client, upid, true).await?;
2224 schema: REPO_URL_SCHEMA,
2228 schema: UPID_SCHEMA,
2233 /// Try to stop a specific task.
2234 async fn task_stop(param: Value) -> Result<Value, Error> {
2236 let repo = extract_repository_from_value(¶m)?;
2237 let upid_str = tools::required_string_param(¶m, "upid
")?;
2239 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
2241 let path = format!("api2
/json
/nodes
/localhost
/tasks
/{}
", upid_str);
2242 let _ = client.delete(&path, None).await?;
2247 fn task_mgmt_cli() -> CliCommandMap {
2249 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2250 .completion_cb("repository
", complete_repository);
2252 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2253 .arg_param(&["upid
"]);
2255 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2256 .arg_param(&["upid
"]);
2258 CliCommandMap::new()
2259 .insert("log
", task_log_cmd_def)
2260 .insert("list
", task_list_cmd_def)
2261 .insert("stop
", task_stop_cmd_def)
2266 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2267 .arg_param(&["backupspec
"])
2268 .completion_cb("repository
", complete_repository)
2269 .completion_cb("backupspec
", complete_backup_source)
2270 .completion_cb("keyfile
", tools::complete_file_name)
2271 .completion_cb("chunk
-size
", complete_chunk_size);
2273 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2274 .arg_param(&["snapshot
", "logfile
"])
2275 .completion_cb("snapshot
", complete_backup_snapshot)
2276 .completion_cb("logfile
", tools::complete_file_name)
2277 .completion_cb("keyfile
", tools::complete_file_name)
2278 .completion_cb("repository
", complete_repository);
2280 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2281 .completion_cb("repository
", complete_repository);
2283 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2284 .arg_param(&["group
"])
2285 .completion_cb("group
", complete_backup_group)
2286 .completion_cb("repository
", complete_repository);
2288 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2289 .arg_param(&["snapshot
"])
2290 .completion_cb("repository
", complete_repository)
2291 .completion_cb("snapshot
", complete_backup_snapshot);
2293 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2294 .completion_cb("repository
", complete_repository);
2296 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2297 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2298 .completion_cb("repository
", complete_repository)
2299 .completion_cb("snapshot
", complete_group_or_snapshot)
2300 .completion_cb("archive
-name
", complete_archive_name)
2301 .completion_cb("target
", tools::complete_file_name);
2303 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2304 .arg_param(&["snapshot
"])
2305 .completion_cb("repository
", complete_repository)
2306 .completion_cb("snapshot
", complete_backup_snapshot);
2308 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2309 .arg_param(&["group
"])
2310 .completion_cb("group
", complete_backup_group)
2311 .completion_cb("repository
", complete_repository);
2313 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2314 .completion_cb("repository
", complete_repository);
2316 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2317 .completion_cb("repository
", complete_repository);
2319 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2320 .completion_cb("repository
", complete_repository);
2323 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2324 &ApiHandler::Sync(&mount),
2326 "Mount pxar archive
.",
2328 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2329 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2330 ("target
", false, &StringSchema::new("Target directory path
.").schema()),
2331 ("repository
", true, &REPO_URL_SCHEMA),
2332 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2333 ("verbose
", true, &BooleanSchema::new("Verbose output
.").default(false).schema()),
2338 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2339 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2340 .completion_cb("repository
", complete_repository)
2341 .completion_cb("snapshot
", complete_group_or_snapshot)
2342 .completion_cb("archive
-name
", complete_pxar_archive_name)
2343 .completion_cb("target
", tools::complete_file_name);
2346 let cmd_def = CliCommandMap::new()
2347 .insert("backup
", backup_cmd_def)
2348 .insert("upload
-log
", upload_log_cmd_def)
2349 .insert("forget
", forget_cmd_def)
2350 .insert("garbage
-collect
", garbage_collect_cmd_def)
2351 .insert("list
", list_cmd_def)
2352 .insert("login
", login_cmd_def)
2353 .insert("logout
", logout_cmd_def)
2354 .insert("prune
", prune_cmd_def)
2355 .insert("restore
", restore_cmd_def)
2356 .insert("snapshots
", snapshots_cmd_def)
2357 .insert("files
", files_cmd_def)
2358 .insert("status
", status_cmd_def)
2359 .insert("key
", key_mgmt_cli())
2360 .insert("mount
", mount_cmd_def)
2361 .insert("catalog
", catalog_mgmt_cli())
2362 .insert("task
", task_mgmt_cli());
2364 proxmox_backup::tools::runtime::main(run_cli_command(cmd_def));