2 use nix
::unistd
::{fork, ForkResult, pipe}
;
3 use std
::os
::unix
::io
::RawFd
;
4 use chrono
::{Local, Utc, TimeZone}
;
5 use std
::path
::{Path, PathBuf}
;
6 use std
::collections
::{HashSet, HashMap}
;
8 use std
::io
::{Write, Seek, SeekFrom}
;
9 use std
::os
::unix
::fs
::OpenOptionsExt
;
11 use proxmox
::{sortable, identity}
;
12 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, file_set_contents, image_size}
;
13 use proxmox
::api
::{ApiFuture, ApiHandler, ApiMethod, RpcEnvironment}
;
14 use proxmox
::api
::schema
::*;
15 use proxmox
::api
::cli
::*;
16 use proxmox
::api
::api
;
18 use proxmox_backup
::tools
;
19 use proxmox_backup
::api2
::types
::*;
20 use proxmox_backup
::client
::*;
21 use proxmox_backup
::backup
::*;
22 use proxmox_backup
::pxar
::{ self, catalog::* }
;
24 //use proxmox_backup::backup::image_index::*;
25 //use proxmox_backup::config::datastore;
26 //use proxmox_backup::pxar::encoder::*;
27 //use proxmox_backup::backup::datastore::*;
29 use serde_json
::{json, Value}
;
31 use std
::sync
::{Arc, Mutex}
;
33 use xdg
::BaseDirectories
;
36 use tokio
::sync
::mpsc
;
38 proxmox
::api
::const_regex
! {
39 BACKUPSPEC_REGEX
= r
"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
42 const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
43 .format(&BACKUP_REPO_URL
)
47 fn get_default_repository() -> Option
<String
> {
48 std
::env
::var("PBS_REPOSITORY").ok()
51 fn extract_repository_from_value(
53 ) -> Result
<BackupRepository
, Error
> {
55 let repo_url
= param
["repository"]
58 .or_else(get_default_repository
)
59 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
61 let repo
: BackupRepository
= repo_url
.parse()?
;
66 fn extract_repository_from_map(
67 param
: &HashMap
<String
, String
>,
68 ) -> Option
<BackupRepository
> {
70 param
.get("repository")
72 .or_else(get_default_repository
)
73 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
76 fn record_repository(repo
: &BackupRepository
) {
78 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
83 // usually $HOME/.cache/proxmox-backup/repo-list
84 let path
= match base
.place_cache_file("repo-list") {
89 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
91 let repo
= repo
.to_string();
93 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
95 let mut map
= serde_json
::map
::Map
::new();
99 let mut max_repo
= None
;
100 for (repo
, count
) in data
.as_object().unwrap() {
101 if map
.contains_key(repo
) { continue; }
102 if let Some(count
) = count
.as_i64() {
103 if count
> max_used
{
105 max_repo
= Some(repo
);
109 if let Some(repo
) = max_repo
{
110 map
.insert(repo
.to_owned(), json
!(max_used
));
114 if map
.len() > 10 { // store max. 10 repos
119 let new_data
= json
!(map
);
121 let _
= file_set_contents(path
, new_data
.to_string().as_bytes(), None
);
124 fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
126 let mut result
= vec
![];
128 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
133 // usually $HOME/.cache/proxmox-backup/repo-list
134 let path
= match base
.place_cache_file("repo-list") {
139 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
141 if let Some(map
) = data
.as_object() {
142 for (repo
, _count
) in map
{
143 result
.push(repo
.to_owned());
150 async
fn view_task_result(
154 ) -> Result
<(), Error
> {
155 let data
= &result
["data"];
156 if output_format
== "text" {
157 if let Some(upid
) = data
.as_str() {
158 display_task_log(client
, upid
, true).await?
;
161 format_and_print_result(&data
, &output_format
);
167 async
fn backup_directory
<P
: AsRef
<Path
>>(
168 client
: &BackupWriter
,
171 chunk_size
: Option
<usize>,
172 device_set
: Option
<HashSet
<u64>>,
174 skip_lost_and_found
: bool
,
175 crypt_config
: Option
<Arc
<CryptConfig
>>,
176 catalog
: Arc
<Mutex
<CatalogWriter
<SenderWriter
>>>,
177 ) -> Result
<BackupStats
, Error
> {
179 let pxar_stream
= PxarBackupStream
::open(dir_path
.as_ref(), device_set
, verbose
, skip_lost_and_found
, catalog
)?
;
180 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
182 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
185 .map_err(Error
::from
);
187 // spawn chunker inside a separate task so that it can run parallel
188 tokio
::spawn(async
move {
189 while let Some(v
) = chunk_stream
.next().await
{
190 let _
= tx
.send(v
).await
;
195 .upload_stream(archive_name
, stream
, "dynamic", None
, crypt_config
)
201 async
fn backup_image
<P
: AsRef
<Path
>>(
202 client
: &BackupWriter
,
206 chunk_size
: Option
<usize>,
208 crypt_config
: Option
<Arc
<CryptConfig
>>,
209 ) -> Result
<BackupStats
, Error
> {
211 let path
= image_path
.as_ref().to_owned();
213 let file
= tokio
::fs
::File
::open(path
).await?
;
215 let stream
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
216 .map_err(Error
::from
);
218 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
221 .upload_stream(archive_name
, stream
, "fixed", Some(image_size
), crypt_config
)
227 fn strip_server_file_expenstion(name
: &str) -> String
{
229 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
230 name
[..name
.len()-5].to_owned()
232 name
.to_owned() // should not happen
236 fn list_backup_groups
<'a
>(
239 _rpcenv
: &'a
mut dyn RpcEnvironment
,
243 list_backup_groups_async(param
).await
247 async
fn list_backup_groups_async(param
: Value
) -> Result
<Value
, Error
> {
249 let repo
= extract_repository_from_value(¶m
)?
;
251 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
253 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
255 let mut result
= client
.get(&path
, None
).await?
;
257 record_repository(&repo
);
259 // fixme: implement and use output formatter instead ..
260 let list
= result
["data"].as_array_mut().unwrap();
262 list
.sort_unstable_by(|a
, b
| {
263 let a_id
= a
["backup-id"].as_str().unwrap();
264 let a_backup_type
= a
["backup-type"].as_str().unwrap();
265 let b_id
= b
["backup-id"].as_str().unwrap();
266 let b_backup_type
= b
["backup-type"].as_str().unwrap();
268 let type_order
= a_backup_type
.cmp(b_backup_type
);
269 if type_order
== std
::cmp
::Ordering
::Equal
{
276 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
278 let mut result
= vec
![];
282 let id
= item
["backup-id"].as_str().unwrap();
283 let btype
= item
["backup-type"].as_str().unwrap();
284 let epoch
= item
["last-backup"].as_i64().unwrap();
285 let last_backup
= Utc
.timestamp(epoch
, 0);
286 let backup_count
= item
["backup-count"].as_u64().unwrap();
288 let group
= BackupGroup
::new(btype
, id
);
290 let path
= group
.group_path().to_str().unwrap().to_owned();
292 let files
= item
["files"].as_array().unwrap().iter()
293 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
295 if output_format
== "text" {
297 "{:20} | {} | {:5} | {}",
299 BackupDir
::backup_time_to_string(last_backup
),
301 tools
::join(&files
, ' '
),
305 "backup-type": btype
,
307 "last-backup": epoch
,
308 "backup-count": backup_count
,
314 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
319 fn list_snapshots
<'a
>(
322 _rpcenv
: &'a
mut dyn RpcEnvironment
,
326 list_snapshots_async(param
).await
330 async
fn list_snapshots_async(param
: Value
) -> Result
<Value
, Error
> {
332 let repo
= extract_repository_from_value(¶m
)?
;
334 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
336 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
338 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
340 let mut args
= json
!({}
);
341 if let Some(path
) = param
["group"].as_str() {
342 let group
= BackupGroup
::parse(path
)?
;
343 args
["backup-type"] = group
.backup_type().into();
344 args
["backup-id"] = group
.backup_id().into();
347 let result
= client
.get(&path
, Some(args
)).await?
;
349 record_repository(&repo
);
351 let list
= result
["data"].as_array().unwrap();
353 let mut result
= vec
![];
357 let id
= item
["backup-id"].as_str().unwrap();
358 let btype
= item
["backup-type"].as_str().unwrap();
359 let epoch
= item
["backup-time"].as_i64().unwrap();
361 let snapshot
= BackupDir
::new(btype
, id
, epoch
);
363 let path
= snapshot
.relative_path().to_str().unwrap().to_owned();
365 let files
= item
["files"].as_array().unwrap().iter()
366 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
368 if output_format
== "text" {
369 let size_str
= if let Some(size
) = item
["size"].as_u64() {
374 println
!("{} | {} | {}", path
, size_str
, tools
::join(&files
, ' '
));
376 let mut data
= json
!({
377 "backup-type": btype
,
379 "backup-time": epoch
,
382 if let Some(size
) = item
["size"].as_u64() {
383 data
["size"] = size
.into();
389 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
394 fn forget_snapshots
<'a
>(
397 _rpcenv
: &'a
mut dyn RpcEnvironment
,
401 forget_snapshots_async(param
).await
405 async
fn forget_snapshots_async(param
: Value
) -> Result
<Value
, Error
> {
407 let repo
= extract_repository_from_value(¶m
)?
;
409 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
410 let snapshot
= BackupDir
::parse(path
)?
;
412 let mut client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
414 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
416 let result
= client
.delete(&path
, Some(json
!({
417 "backup-type": snapshot
.group().backup_type(),
418 "backup-id": snapshot
.group().backup_id(),
419 "backup-time": snapshot
.backup_time().timestamp(),
422 record_repository(&repo
);
430 _rpcenv
: &'a
mut dyn RpcEnvironment
,
434 api_login_async(param
).await
438 async
fn api_login_async(param
: Value
) -> Result
<Value
, Error
> {
440 let repo
= extract_repository_from_value(¶m
)?
;
442 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
443 client
.login().await?
;
445 record_repository(&repo
);
453 _rpcenv
: &mut dyn RpcEnvironment
,
454 ) -> Result
<Value
, Error
> {
456 let repo
= extract_repository_from_value(¶m
)?
;
458 delete_ticket_info(repo
.host(), repo
.user())?
;
466 _rpcenv
: &'a
mut dyn RpcEnvironment
,
470 dump_catalog_async(param
).await
474 async
fn dump_catalog_async(param
: Value
) -> Result
<Value
, Error
> {
476 let repo
= extract_repository_from_value(¶m
)?
;
478 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
479 let snapshot
= BackupDir
::parse(path
)?
;
481 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
483 let crypt_config
= match keyfile
{
486 let (key
, _
) = load_and_decrtypt_key(&path
, &get_encryption_key_password
)?
;
487 Some(Arc
::new(CryptConfig
::new(key
)?
))
491 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
493 let client
= BackupReader
::start(
495 crypt_config
.clone(),
497 &snapshot
.group().backup_type(),
498 &snapshot
.group().backup_id(),
499 snapshot
.backup_time(),
503 let manifest
= client
.download_manifest().await?
;
505 let index
= client
.download_dynamic_index(&manifest
, CATALOG_NAME
).await?
;
507 let most_used
= index
.find_most_used_chunks(8);
509 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, most_used
);
511 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
513 let mut catalogfile
= std
::fs
::OpenOptions
::new()
516 .custom_flags(libc
::O_TMPFILE
)
519 std
::io
::copy(&mut reader
, &mut catalogfile
)
520 .map_err(|err
| format_err
!("unable to download catalog - {}", err
))?
;
522 catalogfile
.seek(SeekFrom
::Start(0))?
;
524 let mut catalog_reader
= CatalogReader
::new(catalogfile
);
526 catalog_reader
.dump()?
;
528 record_repository(&repo
);
533 fn list_snapshot_files
<'a
>(
536 _rpcenv
: &'a
mut dyn RpcEnvironment
,
540 list_snapshot_files_async(param
).await
544 async
fn list_snapshot_files_async(param
: Value
) -> Result
<Value
, Error
> {
546 let repo
= extract_repository_from_value(¶m
)?
;
548 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
549 let snapshot
= BackupDir
::parse(path
)?
;
551 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
553 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
555 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
557 let mut result
= client
.get(&path
, Some(json
!({
558 "backup-type": snapshot
.group().backup_type(),
559 "backup-id": snapshot
.group().backup_id(),
560 "backup-time": snapshot
.backup_time().timestamp(),
563 record_repository(&repo
);
565 let list
: Value
= result
["data"].take();
567 if output_format
== "text" {
568 for item
in list
.as_array().unwrap().iter() {
571 strip_server_file_expenstion(item
["filename"].as_str().unwrap()),
572 item
["size"].as_u64().unwrap_or(0),
576 format_and_print_result(&list
, &output_format
);
582 fn start_garbage_collection
<'a
>(
584 info
: &'
static ApiMethod
,
585 rpcenv
: &'a
mut dyn RpcEnvironment
,
589 start_garbage_collection_async(param
, info
, rpcenv
).await
593 async
fn start_garbage_collection_async(
596 _rpcenv
: &mut dyn RpcEnvironment
,
597 ) -> Result
<Value
, Error
> {
599 let repo
= extract_repository_from_value(¶m
)?
;
600 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
602 let mut client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
604 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
606 let result
= client
.post(&path
, None
).await?
;
608 record_repository(&repo
);
610 view_task_result(client
, result
, &output_format
).await?
;
615 fn parse_backupspec(value
: &str) -> Result
<(&str, &str), Error
> {
617 if let Some(caps
) = (BACKUPSPEC_REGEX
.regex_obj
)().captures(value
) {
618 return Ok((caps
.get(1).unwrap().as_str(), caps
.get(2).unwrap().as_str()));
620 bail
!("unable to parse directory specification '{}'", value
);
623 fn spawn_catalog_upload(
624 client
: Arc
<BackupWriter
>,
625 crypt_config
: Option
<Arc
<CryptConfig
>>,
628 Arc
<Mutex
<CatalogWriter
<SenderWriter
>>>,
629 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
632 let (catalog_tx
, catalog_rx
) = mpsc
::channel(10); // allow to buffer 10 writes
633 let catalog_stream
= catalog_rx
.map_err(Error
::from
);
634 let catalog_chunk_size
= 512*1024;
635 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
637 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(SenderWriter
::new(catalog_tx
))?
));
639 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
641 tokio
::spawn(async
move {
642 let catalog_upload_result
= client
643 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, crypt_config
)
646 if let Err(ref err
) = catalog_upload_result
{
647 eprintln
!("catalog upload error - {}", err
);
651 let _
= catalog_result_tx
.send(catalog_upload_result
);
654 Ok((catalog
, catalog_result_rx
))
657 fn create_backup
<'a
>(
659 info
: &'
static ApiMethod
,
660 rpcenv
: &'a
mut dyn RpcEnvironment
,
664 create_backup_async(param
, info
, rpcenv
).await
668 async
fn create_backup_async(
671 _rpcenv
: &mut dyn RpcEnvironment
,
672 ) -> Result
<Value
, Error
> {
674 let repo
= extract_repository_from_value(¶m
)?
;
676 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
678 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
680 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
682 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
684 let backup_time_opt
= param
["backup-time"].as_i64();
686 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
688 if let Some(size
) = chunk_size_opt
{
689 verify_chunk_size(size
)?
;
692 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
694 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
696 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
698 let include_dev
= param
["include-dev"].as_array();
700 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
702 if let Some(include_dev
) = include_dev
{
703 if all_file_systems
{
704 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
707 let mut set
= HashSet
::new();
708 for path
in include_dev
{
709 let path
= path
.as_str().unwrap();
710 let stat
= nix
::sys
::stat
::stat(path
)
711 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
712 set
.insert(stat
.st_dev
);
717 let mut upload_list
= vec
![];
719 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE }
;
721 let mut upload_catalog
= false;
723 for backupspec
in backupspec_list
{
724 let (target
, filename
) = parse_backupspec(backupspec
.as_str().unwrap())?
;
726 use std
::os
::unix
::fs
::FileTypeExt
;
728 let metadata
= std
::fs
::metadata(filename
)
729 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
730 let file_type
= metadata
.file_type();
732 let extension
= target
.rsplit('
.'
).next()
733 .ok_or_else(|| format_err
!("missing target file extenion '{}'", target
))?
;
737 if !file_type
.is_dir() {
738 bail
!("got unexpected file type (expected directory)");
740 upload_list
.push((BackupType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
741 upload_catalog
= true;
745 if !(file_type
.is_file() || file_type
.is_block_device()) {
746 bail
!("got unexpected file type (expected file or block device)");
749 let size
= image_size(&PathBuf
::from(filename
))?
;
751 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
753 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
756 if !file_type.is_file() {
757 bail!("got unexpected file
type (expected regular file
)");
759 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
762 if !file_type.is_file() {
763 bail!("got unexpected file
type (expected regular file
)");
765 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
768 bail!("got unknown archive extension '{}'
", extension);
773 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
775 let client = HttpClient::new(repo.host(), repo.user(), None)?;
776 record_repository(&repo);
778 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
780 println!("Client name
: {}
", proxmox::tools::nodename());
782 let start_time = Local::now();
784 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
786 let (crypt_config, rsa_encrypted_key) = match keyfile {
787 None => (None, None),
789 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
791 let crypt_config = CryptConfig::new(key)?;
793 let path = master_pubkey_path()?;
795 let pem_data = file_get_contents(&path)?;
796 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
797 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
798 (Some(Arc::new(crypt_config)), Some(enc_key))
800 (Some(Arc::new(crypt_config)), None)
805 let client = BackupWriter::start(
814 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
815 let mut manifest = BackupManifest::new(snapshot);
817 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
819 for (backup_type, filename, target, size) in upload_list {
821 BackupType::CONFIG => {
822 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
824 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
826 manifest.add_file(target, stats.size, stats.csum);
828 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
829 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
831 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
833 manifest.add_file(target, stats.size, stats.csum);
835 BackupType::PXAR => {
836 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
837 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
838 let stats = backup_directory(
846 crypt_config.clone(),
849 manifest.add_file(target, stats.size, stats.csum);
850 catalog.lock().unwrap().end_directory()?;
852 BackupType::IMAGE => {
853 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
854 let stats = backup_image(
861 crypt_config.clone(),
863 manifest.add_file(target, stats.size, stats.csum);
868 // finalize and upload catalog
870 let mutex = Arc::try_unwrap(catalog)
871 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
872 let mut catalog = mutex.into_inner().unwrap();
876 drop(catalog); // close upload stream
878 let stats = catalog_result_rx.await??;
880 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
883 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
884 let target = "rsa
-encrypted
.key
";
885 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
887 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
889 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum);
891 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
893 let mut buffer2 = vec![0u8; rsa.size() as usize];
894 let pem_data = file_get_contents("master
-private
.pem
")?;
895 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
896 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
897 println!("TEST {} {:?}
", len, buffer2);
901 // create manifest (index.json)
902 let manifest = manifest.into_json();
904 println!("Upload index
.json to '{:?}'
", repo);
905 let manifest = serde_json::to_string_pretty(&manifest)?.into();
907 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
910 client.finish().await?;
912 let end_time = Local::now();
913 let elapsed = end_time.signed_duration_since(start_time);
914 println!("Duration
: {}
", elapsed);
916 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
921 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
923 let mut result = vec![];
925 let data: Vec<&str> = arg.splitn(2, ':').collect();
928 result.push(String::from("root
.pxar
:/"));
929 result.push(String::from("etc
.pxar
:/etc
"));
933 let files = tools::complete_file_name(data[1], param);
936 result.push(format!("{}
:{}
", data[0], file));
942 fn dump_image<W: Write>(
943 client: Arc<BackupReader>,
944 crypt_config: Option<Arc<CryptConfig>>,
945 index: FixedIndexReader,
948 ) -> Result<(), Error> {
950 let most_used = index.find_most_used_chunks(8);
952 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
954 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
955 // and thus slows down reading. Instead, directly use RemoteChunkReader
958 let start_time = std::time::Instant::now();
960 for pos in 0..index.index_count() {
961 let digest = index.index_digest(pos).unwrap();
962 let raw_data = chunk_reader.read_chunk(&digest)?;
963 writer.write_all(&raw_data)?;
964 bytes += raw_data.len();
966 let next_per = ((pos+1)*100)/index.index_count();
968 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
969 next_per, bytes, start_time.elapsed().as_secs());
975 let end_time = std::time::Instant::now();
976 let elapsed = end_time.duration_since(start_time);
977 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
979 elapsed.as_secs_f64(),
980 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
990 _rpcenv: &'a mut dyn RpcEnvironment,
994 restore_do(param).await
998 async fn restore_do(param: Value) -> Result<Value, Error> {
999 let repo = extract_repository_from_value(¶m)?;
1001 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1003 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
1005 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1007 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1009 record_repository(&repo);
1011 let path = tools::required_string_param(¶m, "snapshot
")?;
1013 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1014 let group = BackupGroup::parse(path)?;
1016 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1017 let result = client.get(&path, Some(json!({
1018 "backup
-type": group.backup_type(),
1019 "backup
-id
": group.backup_id(),
1022 let list = result["data
"].as_array().unwrap();
1023 if list.is_empty() {
1024 bail!("backup group '{}' does not contain any snapshots
:", path);
1027 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
1028 let backup_time = Utc.timestamp(epoch, 0);
1029 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1031 let snapshot = BackupDir::parse(path)?;
1032 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1035 let target = tools::required_string_param(¶m, "target
")?;
1036 let target = if target == "-" { None } else { Some(target) };
1038 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1040 let crypt_config = match keyfile {
1043 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1044 Some(Arc::new(CryptConfig::new(key)?))
1048 let server_archive_name = if archive_name.ends_with(".pxar
") {
1049 format!("{}
.didx
", archive_name)
1050 } else if archive_name.ends_with(".img
") {
1051 format!("{}
.fidx
", archive_name)
1053 format!("{}
.blob
", archive_name)
1056 let client = BackupReader::start(
1058 crypt_config.clone(),
1066 let manifest = client.download_manifest().await?;
1068 if server_archive_name == MANIFEST_BLOB_NAME {
1069 let backup_index_data = manifest.into_json().to_string();
1070 if let Some(target) = target {
1071 file_set_contents(target, backup_index_data.as_bytes(), None)?;
1073 let stdout = std::io::stdout();
1074 let mut writer = stdout.lock();
1075 writer.write_all(backup_index_data.as_bytes())
1076 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1079 } else if server_archive_name.ends_with(".blob
") {
1081 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
1083 if let Some(target) = target {
1084 let mut writer = std::fs::OpenOptions::new()
1089 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1090 std::io::copy(&mut reader, &mut writer)?;
1092 let stdout = std::io::stdout();
1093 let mut writer = stdout.lock();
1094 std::io::copy(&mut reader, &mut writer)
1095 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1098 } else if server_archive_name.ends_with(".didx
") {
1100 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1102 let most_used = index.find_most_used_chunks(8);
1104 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1106 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1108 if let Some(target) = target {
1110 let feature_flags = pxar::flags::DEFAULT;
1111 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1112 decoder.set_callback(move |path| {
1114 eprintln!("{:?}
", path);
1118 decoder.set_allow_existing_dirs(allow_existing_dirs);
1120 decoder.restore(Path::new(target), &Vec::new())?;
1122 let mut writer = std::fs::OpenOptions::new()
1124 .open("/dev
/stdout
")
1125 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1127 std::io::copy(&mut reader, &mut writer)
1128 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1130 } else if server_archive_name.ends_with(".fidx
") {
1132 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
1134 let mut writer = if let Some(target) = target {
1135 std::fs::OpenOptions::new()
1140 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1142 std::fs::OpenOptions::new()
1144 .open("/dev
/stdout
")
1145 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1148 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
1151 bail!("unknown archive file
extension (expected
.pxar of
.img
)");
1160 _rpcenv: &'a mut dyn RpcEnvironment,
1161 ) -> ApiFuture<'a> {
1163 upload_log_async(param).await
1167 async fn upload_log_async(param: Value) -> Result<Value, Error> {
1169 let logfile = tools::required_string_param(¶m, "logfile
")?;
1170 let repo = extract_repository_from_value(¶m)?;
1172 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1173 let snapshot = BackupDir::parse(snapshot)?;
1175 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1177 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1179 let crypt_config = match keyfile {
1182 let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1183 let crypt_config = CryptConfig::new(key)?;
1184 Some(Arc::new(crypt_config))
1188 let data = file_get_contents(logfile)?;
1190 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1192 let raw_data = blob.into_inner();
1194 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1197 "backup
-type": snapshot.group().backup_type(),
1198 "backup
-id
": snapshot.group().backup_id(),
1199 "backup
-time
": snapshot.backup_time().timestamp(),
1202 let body = hyper::Body::from(raw_data);
1204 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1210 _rpcenv: &'a mut dyn RpcEnvironment,
1211 ) -> ApiFuture<'a> {
1213 prune_async(param).await
1217 async fn prune_async(mut param: Value) -> Result<Value, Error> {
1219 let repo = extract_repository_from_value(¶m)?;
1221 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1223 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1225 let group = tools::required_string_param(¶m, "group
")?;
1226 let group = BackupGroup::parse(group)?;
1227 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1229 param.as_object_mut().unwrap().remove("repository
");
1230 param.as_object_mut().unwrap().remove("group
");
1231 param.as_object_mut().unwrap().remove("output
-format
");
1233 param["backup
-type"] = group.backup_type().into();
1234 param["backup
-id
"] = group.backup_id().into();
1236 let result = client.post(&path, Some(param)).await?;
1238 record_repository(&repo);
1240 view_task_result(client, result, &output_format).await?;
1248 _rpcenv: &'a mut dyn RpcEnvironment,
1249 ) -> ApiFuture<'a> {
1251 status_async(param).await
1255 async fn status_async(param: Value) -> Result<Value, Error> {
1257 let repo = extract_repository_from_value(¶m)?;
1259 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1261 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1263 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1265 let result = client.get(&path, None).await?;
1266 let data = &result["data
"];
1268 record_repository(&repo);
1270 if output_format == "text
" {
1271 let total = data["total
"].as_u64().unwrap();
1272 let used = data["used
"].as_u64().unwrap();
1273 let avail = data["avail
"].as_u64().unwrap();
1274 let roundup = total/200;
1277 "total
: {} used
: {}
({}
%) available
: {}
",
1280 ((used+roundup)*100)/total,
1284 format_and_print_result(data, &output_format);
1290 // like get, but simply ignore errors and return Null instead
1291 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1293 let client = match HttpClient::new(repo.host(), repo.user(), None) {
1295 _ => return Value::Null,
1298 let mut resp = match client.get(url, None).await {
1300 _ => return Value::Null,
1303 if let Some(map) = resp.as_object_mut() {
1304 if let Some(data) = map.remove("data
") {
1311 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1312 async_main(async { complete_backup_group_do(param).await })
1315 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1317 let mut result = vec![];
1319 let repo = match extract_repository_from_map(param) {
1324 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1326 let data = try_get(&repo, &path).await;
1328 if let Some(list) = data.as_array() {
1330 if let (Some(backup_id), Some(backup_type)) =
1331 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1333 result.push(format!("{}
/{}
", backup_type, backup_id));
1341 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1342 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1345 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1347 if arg.matches('/').count() < 2 {
1348 let groups = complete_backup_group_do(param).await;
1349 let mut result = vec![];
1350 for group in groups {
1351 result.push(group.to_string());
1352 result.push(format!("{}
/", group));
1357 complete_backup_snapshot_do(param).await
1360 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1361 async_main(async { complete_backup_snapshot_do(param).await })
1364 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1366 let mut result = vec![];
1368 let repo = match extract_repository_from_map(param) {
1373 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1375 let data = try_get(&repo, &path).await;
1377 if let Some(list) = data.as_array() {
1379 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1380 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1382 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1383 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1391 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1392 async_main(async { complete_server_file_name_do(param).await })
1395 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1397 let mut result = vec![];
1399 let repo = match extract_repository_from_map(param) {
1404 let snapshot = match param.get("snapshot
") {
1406 match BackupDir::parse(path) {
1414 let query = tools::json_object_to_query(json!({
1415 "backup
-type": snapshot.group().backup_type(),
1416 "backup
-id
": snapshot.group().backup_id(),
1417 "backup
-time
": snapshot.backup_time().timestamp(),
1420 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1422 let data = try_get(&repo, &path).await;
1424 if let Some(list) = data.as_array() {
1426 if let Some(filename) = item["filename
"].as_str() {
1427 result.push(filename.to_owned());
1435 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1436 complete_server_file_name(arg, param)
1438 .map(|v| strip_server_file_expenstion(&v))
1442 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1443 complete_server_file_name(arg, param)
1446 let name = strip_server_file_expenstion(&v);
1447 if name.ends_with(".pxar
") {
1456 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1458 let mut result = vec![];
1462 result.push(size.to_string());
1464 if size > 4096 { break; }
1470 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1472 // fixme: implement other input methods
1474 use std::env::VarError::*;
1475 match std::env::var("PBS_ENCRYPTION_PASSWORD
") {
1476 Ok(p) => return Ok(p.as_bytes().to_vec()),
1477 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters
"),
1478 Err(NotPresent) => {
1479 // Try another method
1483 // If we're on a TTY, query the user for a password
1484 if crate::tools::tty::stdin_isatty() {
1485 return Ok(crate::tools::tty::read_password("Encryption Key Password
: ")?);
1488 bail!("no password input mechanism available
");
1494 _rpcenv: &mut dyn RpcEnvironment,
1495 ) -> Result<Value, Error> {
1497 let path = tools::required_string_param(¶m, "path
")?;
1498 let path = PathBuf::from(path);
1500 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1502 let key = proxmox::sys::linux::random_data(32)?;
1504 if kdf == "scrypt
" {
1505 // always read passphrase from tty
1506 if !crate::tools::tty::stdin_isatty() {
1507 bail!("unable to read passphrase
- no tty
");
1510 let password = crate::tools::tty::read_password("Encryption Key Password
: ")?;
1512 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1514 store_key_config(&path, false, key_config)?;
1517 } else if kdf == "none
" {
1518 let created = Local.timestamp(Local::now().timestamp(), 0);
1520 store_key_config(&path, false, KeyConfig {
1533 fn master_pubkey_path() -> Result<PathBuf, Error> {
1534 let base = BaseDirectories::with_prefix("proxmox
-backup
")?;
1536 // usually $HOME/.config/proxmox-backup/master-public.pem
1537 let path = base.place_config_file("master
-public
.pem
")?;
1542 fn key_import_master_pubkey(
1545 _rpcenv: &mut dyn RpcEnvironment,
1546 ) -> Result<Value, Error> {
1548 let path = tools::required_string_param(¶m, "path
")?;
1549 let path = PathBuf::from(path);
1551 let pem_data = file_get_contents(&path)?;
1553 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1554 bail!("Unable to decode PEM data
- {}
", err);
1557 let target_path = master_pubkey_path()?;
1559 file_set_contents(&target_path, &pem_data, None)?;
1561 println!("Imported public master key to {:?}
", target_path);
1566 fn key_create_master_key(
1569 _rpcenv: &mut dyn RpcEnvironment,
1570 ) -> Result<Value, Error> {
1572 // we need a TTY to query the new password
1573 if !crate::tools::tty::stdin_isatty() {
1574 bail!("unable to create master key
- no tty
");
1577 let rsa = openssl::rsa::Rsa::generate(4096)?;
1578 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1580 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password
: ")?)?;
1581 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password
: ")?)?;
1583 if new_pw != verify_pw {
1584 bail!("Password verification fail
!");
1587 if new_pw.len() < 5 {
1588 bail!("Password is too short
!");
1591 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1592 let filename_pub = "master
-public
.pem
";
1593 println!("Writing public master key to {}
", filename_pub);
1594 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1596 let cipher = openssl::symm::Cipher::aes_256_cbc();
1597 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1599 let filename_priv = "master
-private
.pem
";
1600 println!("Writing private master key to {}
", filename_priv);
1601 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1606 fn key_change_passphrase(
1609 _rpcenv: &mut dyn RpcEnvironment,
1610 ) -> Result<Value, Error> {
1612 let path = tools::required_string_param(¶m, "path
")?;
1613 let path = PathBuf::from(path);
1615 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1617 // we need a TTY to query the new password
1618 if !crate::tools::tty::stdin_isatty() {
1619 bail!("unable to change passphrase
- no tty
");
1622 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1624 if kdf == "scrypt
" {
1626 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password
: ")?)?;
1627 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password
: ")?)?;
1629 if new_pw != verify_pw {
1630 bail!("Password verification fail
!");
1633 if new_pw.len() < 5 {
1634 bail!("Password is too short
!");
1637 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1638 new_key_config.created = created; // keep original value
1640 store_key_config(&path, true, new_key_config)?;
1643 } else if kdf == "none
" {
1644 let modified = Local.timestamp(Local::now().timestamp(), 0);
1646 store_key_config(&path, true, KeyConfig {
1648 created, // keep original value
1659 fn key_mgmt_cli() -> CliCommandMap {
1661 const KDF_SCHEMA: Schema =
1662 StringSchema::new("Key derivation function
. Choose 'none' to store the key unecrypted
.")
1663 .format(&ApiStringFormat::Enum(&["scrypt
", "none
"]))
1668 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1669 &ApiHandler::Sync(&key_create),
1671 "Create a new encryption key
.",
1673 ("path
", false, &StringSchema::new("File system path
.").schema()),
1674 ("kdf
", true, &KDF_SCHEMA),
1679 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1680 .arg_param(&["path
"])
1681 .completion_cb("path
", tools::complete_file_name);
1684 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1685 &ApiHandler::Sync(&key_change_passphrase),
1687 "Change the passphrase required to decrypt the key
.",
1689 ("path
", false, &StringSchema::new("File system path
.").schema()),
1690 ("kdf
", true, &KDF_SCHEMA),
1695 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1696 .arg_param(&["path
"])
1697 .completion_cb("path
", tools::complete_file_name);
1699 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1700 &ApiHandler::Sync(&key_create_master_key),
1701 &ObjectSchema::new("Create a new
4096 bit RSA master
pub/priv key pair
.", &[])
1704 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1707 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1708 &ApiHandler::Sync(&key_import_master_pubkey),
1710 "Import a new RSA public key and
use it
as master key
. The key is expected to be
in '
.pem' format
.",
1711 &sorted!([ ("path
", false, &StringSchema::new("File system path
.").schema()) ]),
1715 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1716 .arg_param(&["path
"])
1717 .completion_cb("path
", tools::complete_file_name);
1719 CliCommandMap::new()
1720 .insert("create
", key_create_cmd_def)
1721 .insert("create
-master
-key
", key_create_master_key_cmd_def)
1722 .insert("import
-master
-pubkey
", key_import_master_pubkey_cmd_def)
1723 .insert("change
-passphrase
", key_change_passphrase_cmd_def)
1729 _rpcenv: &mut dyn RpcEnvironment,
1730 ) -> Result<Value, Error> {
1731 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1733 // This will stay in foreground with debug output enabled as None is
1734 // passed for the RawFd.
1735 return async_main(mount_do(param, None));
1738 // Process should be deamonized.
1739 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1742 Ok(ForkResult::Parent { .. }) => {
1743 nix::unistd::close(pipe.1).unwrap();
1744 // Blocks the parent process until we are ready to go in the child
1745 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1748 Ok(ForkResult::Child) => {
1749 nix::unistd::close(pipe.0).unwrap();
1750 nix::unistd::setsid().unwrap();
1751 async_main(mount_do(param, Some(pipe.1)))
1753 Err(_) => bail!("failed to daemonize process
"),
1757 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1758 let repo = extract_repository_from_value(¶m)?;
1759 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1760 let target = tools::required_string_param(¶m, "target
")?;
1761 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1763 record_repository(&repo);
1765 let path = tools::required_string_param(¶m, "snapshot
")?;
1766 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1767 let group = BackupGroup::parse(path)?;
1769 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1770 let result = client.get(&path, Some(json!({
1771 "backup
-type": group.backup_type(),
1772 "backup
-id
": group.backup_id(),
1775 let list = result["data
"].as_array().unwrap();
1776 if list.is_empty() {
1777 bail!("backup group '{}' does not contain any snapshots
:", path);
1780 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
1781 let backup_time = Utc.timestamp(epoch, 0);
1782 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1784 let snapshot = BackupDir::parse(path)?;
1785 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1788 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1789 let crypt_config = match keyfile {
1792 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1793 Some(Arc::new(CryptConfig::new(key)?))
1797 let server_archive_name = if archive_name.ends_with(".pxar
") {
1798 format!("{}
.didx
", archive_name)
1800 bail!("Can only mount pxar archives
.");
1803 let client = BackupReader::start(
1805 crypt_config.clone(),
1813 let manifest = client.download_manifest().await?;
1815 if server_archive_name.ends_with(".didx
") {
1816 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1817 let most_used = index.find_most_used_chunks(8);
1818 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1819 let reader = BufferedDynamicReader::new(index, chunk_reader);
1820 let decoder = pxar::Decoder::new(reader)?;
1821 let options = OsStr::new("ro
,default_permissions
");
1822 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
1823 .map_err(|err| format_err!("pxar mount failed
: {}
", err))?;
1825 // Mount the session but not call fuse deamonize as this will cause
1826 // issues with the runtime after the fork
1827 let deamonize = false;
1828 session.mount(&Path::new(target), deamonize)?;
1830 if let Some(pipe) = pipe {
1831 nix::unistd::chdir(Path::new("/")).unwrap();
1832 // Finish creation of deamon by redirecting filedescriptors.
1833 let nullfd = nix::fcntl::open(
1835 nix::fcntl::OFlag::O_RDWR,
1836 nix::sys::stat::Mode::empty(),
1838 nix::unistd::dup2(nullfd, 0).unwrap();
1839 nix::unistd::dup2(nullfd, 1).unwrap();
1840 nix::unistd::dup2(nullfd, 2).unwrap();
1842 nix::unistd::close(nullfd).unwrap();
1844 // Signal the parent process that we are done with the setup and it can
1846 nix::unistd::write(pipe, &[0u8])?;
1847 nix::unistd::close(pipe).unwrap();
1850 let multithreaded = true;
1851 session.run_loop(multithreaded)?;
1853 bail!("unknown archive file
extension (expected
.pxar
)");
1864 description: "Group
/Snapshot path
.",
1868 description: "Backup archive name
.",
1872 schema: REPO_URL_SCHEMA,
1877 description: "Path to encryption key
.",
1882 /// Shell to interactively inspect and restore snapshots.
1883 async fn catalog_shell(param: Value) -> Result<(), Error> {
1884 let repo = extract_repository_from_value(¶m)?;
1885 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1886 let path = tools::required_string_param(¶m, "snapshot
")?;
1887 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1889 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1890 let group = BackupGroup::parse(path)?;
1892 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1893 let result = client.get(&path, Some(json!({
1894 "backup
-type": group.backup_type(),
1895 "backup
-id
": group.backup_id(),
1898 let list = result["data
"].as_array().unwrap();
1899 if list.is_empty() {
1900 bail!("backup group '{}' does not contain any snapshots
:", path);
1903 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
1904 let backup_time = Utc.timestamp(epoch, 0);
1905 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1907 let snapshot = BackupDir::parse(path)?;
1908 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1911 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
1912 let crypt_config = match keyfile {
1915 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1916 Some(Arc::new(CryptConfig::new(key)?))
1920 let server_archive_name = if archive_name.ends_with(".pxar
") {
1921 format!("{}
.didx
", archive_name)
1923 bail!("Can only mount pxar archives
.");
1926 let client = BackupReader::start(
1928 crypt_config.clone(),
1936 let tmpfile = std::fs::OpenOptions::new()
1939 .custom_flags(libc::O_TMPFILE)
1942 let manifest = client.download_manifest().await?;
1944 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1945 let most_used = index.find_most_used_chunks(8);
1946 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
1947 let reader = BufferedDynamicReader::new(index, chunk_reader);
1948 let mut decoder = pxar::Decoder::new(reader)?;
1949 decoder.set_callback(|path| {
1950 println!("{:?}
", path);
1954 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
1955 let index = DynamicIndexReader::new(tmpfile)
1956 .map_err(|err| format_err!("unable to read catalog index
- {}
", err))?;
1958 // Note: do not use values stored in index (not trusted) - instead, computed them again
1959 let (csum, size) = index.compute_csum();
1960 manifest.verify_file(CATALOG_NAME, &csum, size)?;
1962 let most_used = index.find_most_used_chunks(8);
1963 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1964 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1965 let mut catalogfile = std::fs::OpenOptions::new()
1968 .custom_flags(libc::O_TMPFILE)
1971 std::io::copy(&mut reader, &mut catalogfile)
1972 .map_err(|err| format_err!("unable to download catalog
- {}
", err))?;
1974 catalogfile.seek(SeekFrom::Start(0))?;
1975 let catalog_reader = CatalogReader::new(catalogfile);
1976 let state = Shell::new(
1978 &server_archive_name,
1982 println!("Starting interactive shell
");
1985 record_repository(&repo);
1990 fn catalog_mgmt_cli() -> CliCommandMap {
1991 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
1992 .arg_param(&["snapshot
", "archive
-name
"])
1993 .completion_cb("repository
", complete_repository)
1994 .completion_cb("archive
-name
", complete_pxar_archive_name)
1995 .completion_cb("snapshot
", complete_group_or_snapshot);
1998 const API_METHOD_DUMP_CATALOG: ApiMethod = ApiMethod::new(
1999 &ApiHandler::Async(&dump_catalog),
2003 ("snapshot
", false, &StringSchema::new("Snapshot path
.").schema()),
2004 ("repository
", true, &REPO_URL_SCHEMA),
2009 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2010 .arg_param(&["snapshot
"])
2011 .completion_cb("repository
", complete_repository)
2012 .completion_cb("snapshot
", complete_backup_snapshot);
2014 CliCommandMap::new()
2015 .insert("dump
", catalog_dump_cmd_def)
2016 .insert("shell
", catalog_shell_cmd_def)
2023 schema: REPO_URL_SCHEMA,
2027 description: "The maximal number of tasks to list
.",
2035 schema: OUTPUT_FORMAT,
2041 /// List running server tasks for this repo user
2042 fn task_list(param: Value) -> Result<Value, Error> {
2045 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
2046 let repo = extract_repository_from_value(¶m)?;
2047 let client = HttpClient::new(repo.host(), repo.user(), None)?;
2049 let limit = param["limit
"].as_u64().unwrap_or(50) as usize;
2055 "userfilter
": repo.user(),
2056 "store
": repo.store(),
2058 let result = client.get("api2
/json
/nodes
/localhost
/tasks
", Some(args)).await?;
2060 let data = &result["data
"];
2062 if output_format == "text
" {
2063 for item in data.as_array().unwrap() {
2066 item["upid
"].as_str().unwrap(),
2067 item["status
"].as_str().unwrap_or("running
"),
2071 format_and_print_result(data, &output_format);
2084 schema: REPO_URL_SCHEMA,
2088 schema: UPID_SCHEMA,
2093 /// Display the task log.
2094 fn task_log(param: Value) -> Result<Value, Error> {
2097 let repo = extract_repository_from_value(¶m)?;
2098 let upid = tools::required_string_param(¶m, "upid
")?;
2100 let client = HttpClient::new(repo.host(), repo.user(), None)?;
2102 display_task_log(client, upid, true).await?;
2114 schema: REPO_URL_SCHEMA,
2118 schema: UPID_SCHEMA,
2123 /// Try to stop a specific task.
2124 fn task_stop(param: Value) -> Result<Value, Error> {
2127 let repo = extract_repository_from_value(¶m)?;
2128 let upid_str = tools::required_string_param(¶m, "upid
")?;
2130 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
2132 let path = format!("api2
/json
/nodes
/localhost
/tasks
/{}
", upid_str);
2133 let _ = client.delete(&path, None).await?;
2141 fn task_mgmt_cli() -> CliCommandMap {
2143 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2144 .completion_cb("repository
", complete_repository);
2146 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2147 .arg_param(&["upid
"]);
2149 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2150 .arg_param(&["upid
"]);
2152 CliCommandMap::new()
2153 .insert("log
", task_log_cmd_def)
2154 .insert("list
", task_list_cmd_def)
2155 .insert("stop
", task_stop_cmd_def)
2160 const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new("Backup source
specification ([<label
>:<path
>]).")
2161 .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
2165 const API_METHOD_CREATE_BACKUP: ApiMethod = ApiMethod::new(
2166 &ApiHandler::Async(&create_backup),
2168 "Create (host
) backup
.",
2174 "List of backup source
specifications ([<label
.ext
>:<path
>] ...)",
2175 &BACKUP_SOURCE_SCHEMA,
2176 ).min_length(1).schema()
2187 "Include mountpoints with same st_dev
number (see ``man fstat``
) as specified files
.",
2188 &StringSchema::new("Path to file
.").schema()
2194 &StringSchema::new("Path to encryption key
. All data will be encrypted using this key
.").schema()
2199 &BooleanSchema::new("Verbose output
.")
2204 "skip
-lost
-and
-found
",
2206 &BooleanSchema::new("Skip lost
+found directory
")
2213 &BACKUP_TYPE_SCHEMA,
2228 &IntegerSchema::new("Chunk size
in KB
. Must be a power of
2.")
2238 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2239 .arg_param(&["backupspec
"])
2240 .completion_cb("repository
", complete_repository)
2241 .completion_cb("backupspec
", complete_backup_source)
2242 .completion_cb("keyfile
", tools::complete_file_name)
2243 .completion_cb("chunk
-size
", complete_chunk_size);
2246 const API_METHOD_UPLOAD_LOG: ApiMethod = ApiMethod::new(
2247 &ApiHandler::Async(&upload_log),
2249 "Upload backup log file
.",
2254 &StringSchema::new("Snapshot path
.").schema()
2259 &StringSchema::new("The path to the log file you want to upload
.").schema()
2269 &StringSchema::new("Path to encryption key
. All data will be encrypted using this key
.").schema()
2275 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2276 .arg_param(&["snapshot
", "logfile
"])
2277 .completion_cb("snapshot
", complete_backup_snapshot)
2278 .completion_cb("logfile
", tools::complete_file_name)
2279 .completion_cb("keyfile
", tools::complete_file_name)
2280 .completion_cb("repository
", complete_repository);
2283 const API_METHOD_LIST_BACKUP_GROUPS: ApiMethod = ApiMethod::new(
2284 &ApiHandler::Async(&list_backup_groups),
2286 "List backup groups
.",
2288 ("repository
", true, &REPO_URL_SCHEMA),
2289 ("output
-format
", true, &OUTPUT_FORMAT),
2294 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2295 .completion_cb("repository
", complete_repository);
2298 const API_METHOD_LIST_SNAPSHOTS: ApiMethod = ApiMethod::new(
2299 &ApiHandler::Async(&list_snapshots),
2301 "List backup snapshots
.",
2303 ("group
", true, &StringSchema::new("Backup group
.").schema()),
2304 ("repository
", true, &REPO_URL_SCHEMA),
2305 ("output
-format
", true, &OUTPUT_FORMAT),
2310 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2311 .arg_param(&["group
"])
2312 .completion_cb("group
", complete_backup_group)
2313 .completion_cb("repository
", complete_repository);
2316 const API_METHOD_FORGET_SNAPSHOTS: ApiMethod = ApiMethod::new(
2317 &ApiHandler::Async(&forget_snapshots),
2319 "Forget (remove
) backup snapshots
.",
2321 ("snapshot
", false, &StringSchema::new("Snapshot path
.").schema()),
2322 ("repository
", true, &REPO_URL_SCHEMA),
2327 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2328 .arg_param(&["snapshot
"])
2329 .completion_cb("repository
", complete_repository)
2330 .completion_cb("snapshot
", complete_backup_snapshot);
2333 const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
2334 &ApiHandler::Async(&start_garbage_collection),
2336 "Start garbage collection
for a specific repository
.",
2337 &sorted!([ ("repository
", true, &REPO_URL_SCHEMA) ]),
2341 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2342 .completion_cb("repository
", complete_repository);
2345 const API_METHOD_RESTORE: ApiMethod = ApiMethod::new(
2346 &ApiHandler::Async(&restore),
2348 "Restore backup repository
.",
2350 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2351 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2356 r###"Target directory path
. Use '
-' to write to stdandard output
.
2358 We
do not extraxt '
.pxar' archives when writing to stdandard output
.
2364 "allow
-existing
-dirs
",
2366 &BooleanSchema::new("Do not fail
if directories already exists
.")
2370 ("repository
", true, &REPO_URL_SCHEMA),
2371 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2375 &BooleanSchema::new("Verbose output
.")
2383 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2384 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2385 .completion_cb("repository
", complete_repository)
2386 .completion_cb("snapshot
", complete_group_or_snapshot)
2387 .completion_cb("archive
-name
", complete_archive_name)
2388 .completion_cb("target
", tools::complete_file_name);
2391 const API_METHOD_LIST_SNAPSHOT_FILES: ApiMethod = ApiMethod::new(
2392 &ApiHandler::Async(&list_snapshot_files),
2394 "List snapshot files
.",
2396 ("snapshot
", false, &StringSchema::new("Snapshot path
.").schema()),
2397 ("repository
", true, &REPO_URL_SCHEMA),
2398 ("output
-format
", true, &OUTPUT_FORMAT),
2403 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2404 .arg_param(&["snapshot
"])
2405 .completion_cb("repository
", complete_repository)
2406 .completion_cb("snapshot
", complete_backup_snapshot);
2408 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
2409 &ApiHandler::Async(&prune),
2411 "Prune backup repository
.",
2412 &proxmox_backup::add_common_prune_prameters!([
2413 ("dry
-run
", true, &BooleanSchema::new(
2414 "Just show what prune would
do, but
do not delete anything
.")
2416 ("group
", false, &StringSchema::new("Backup group
.").schema()),
2418 ("output
-format
", true, &OUTPUT_FORMAT),
2419 ("repository
", true, &REPO_URL_SCHEMA),
2424 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2425 .arg_param(&["group
"])
2426 .completion_cb("group
", complete_backup_group)
2427 .completion_cb("repository
", complete_repository);
2430 const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
2431 &ApiHandler::Async(&status),
2433 "Get repository status
.",
2435 ("repository
", true, &REPO_URL_SCHEMA),
2436 ("output
-format
", true, &OUTPUT_FORMAT),
2441 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2442 .completion_cb("repository
", complete_repository);
2445 const API_METHOD_API_LOGIN: ApiMethod = ApiMethod::new(
2446 &ApiHandler::Async(&api_login),
2448 "Try to login
. If successful
, store ticket
.",
2449 &sorted!([ ("repository
", true, &REPO_URL_SCHEMA) ]),
2453 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2454 .completion_cb("repository
", complete_repository);
2457 const API_METHOD_API_LOGOUT: ApiMethod = ApiMethod::new(
2458 &ApiHandler::Sync(&api_logout),
2460 "Logout (delete stored ticket
).",
2461 &sorted!([ ("repository
", true, &REPO_URL_SCHEMA) ]),
2465 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2466 .completion_cb("repository
", complete_repository);
2469 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2470 &ApiHandler::Sync(&mount),
2472 "Mount pxar archive
.",
2474 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2475 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2476 ("target
", false, &StringSchema::new("Target directory path
.").schema()),
2477 ("repository
", true, &REPO_URL_SCHEMA),
2478 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2479 ("verbose
", true, &BooleanSchema::new("Verbose output
.").default(false).schema()),
2484 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2485 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2486 .completion_cb("repository
", complete_repository)
2487 .completion_cb("snapshot
", complete_group_or_snapshot)
2488 .completion_cb("archive
-name
", complete_pxar_archive_name)
2489 .completion_cb("target
", tools::complete_file_name);
2492 let cmd_def = CliCommandMap::new()
2493 .insert("backup
", backup_cmd_def)
2494 .insert("upload
-log
", upload_log_cmd_def)
2495 .insert("forget
", forget_cmd_def)
2496 .insert("garbage
-collect
", garbage_collect_cmd_def)
2497 .insert("list
", list_cmd_def)
2498 .insert("login
", login_cmd_def)
2499 .insert("logout
", logout_cmd_def)
2500 .insert("prune
", prune_cmd_def)
2501 .insert("restore
", restore_cmd_def)
2502 .insert("snapshots
", snapshots_cmd_def)
2503 .insert("files
", files_cmd_def)
2504 .insert("status
", status_cmd_def)
2505 .insert("key
", key_mgmt_cli())
2506 .insert("mount
", mount_cmd_def)
2507 .insert("catalog
", catalog_mgmt_cli())
2508 .insert("task
", task_mgmt_cli());
2510 run_cli_command(cmd_def);
2513 fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
2514 let mut rt = tokio::runtime::Runtime::new().unwrap();
2515 let ret = rt.block_on(fut);
2516 // This does not exist anymore. We need to actually stop our runaways instead...
2517 // rt.shutdown_now();