2 use nix
::unistd
::{fork, ForkResult, pipe}
;
3 use std
::os
::unix
::io
::RawFd
;
4 use chrono
::{Local, Utc, TimeZone}
;
5 use std
::path
::{Path, PathBuf}
;
6 use std
::collections
::{HashSet, HashMap}
;
8 use std
::io
::{Write, Seek, SeekFrom}
;
9 use std
::os
::unix
::fs
::OpenOptionsExt
;
11 use proxmox
::{sortable, identity}
;
12 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, file_set_contents, image_size}
;
13 use proxmox
::api
::{ApiHandler, ApiMethod, RpcEnvironment}
;
14 use proxmox
::api
::schema
::*;
15 use proxmox
::api
::cli
::*;
17 use proxmox_backup
::tools
;
18 use proxmox_backup
::api2
::types
::*;
19 use proxmox_backup
::client
::*;
20 use proxmox_backup
::backup
::*;
21 use proxmox_backup
::pxar
::{ self, catalog::* }
;
23 //use proxmox_backup::backup::image_index::*;
24 //use proxmox_backup::config::datastore;
25 //use proxmox_backup::pxar::encoder::*;
26 //use proxmox_backup::backup::datastore::*;
28 use serde_json
::{json, Value}
;
30 use std
::sync
::{Arc, Mutex}
;
32 use xdg
::BaseDirectories
;
35 use tokio
::sync
::mpsc
;
37 proxmox
::api
::const_regex
! {
38 BACKUPSPEC_REGEX
= r
"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
41 const REPO_URL_SCHEMA
: Schema
= StringSchema
::new("Repository URL.")
42 .format(&BACKUP_REPO_URL
)
46 fn get_default_repository() -> Option
<String
> {
47 std
::env
::var("PBS_REPOSITORY").ok()
50 fn extract_repository_from_value(
52 ) -> Result
<BackupRepository
, Error
> {
54 let repo_url
= param
["repository"]
57 .or_else(get_default_repository
)
58 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
60 let repo
: BackupRepository
= repo_url
.parse()?
;
65 fn extract_repository_from_map(
66 param
: &HashMap
<String
, String
>,
67 ) -> Option
<BackupRepository
> {
69 param
.get("repository")
71 .or_else(get_default_repository
)
72 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
75 fn record_repository(repo
: &BackupRepository
) {
77 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
82 // usually $HOME/.cache/proxmox-backup/repo-list
83 let path
= match base
.place_cache_file("repo-list") {
88 let mut data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
90 let repo
= repo
.to_string();
92 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
94 let mut map
= serde_json
::map
::Map
::new();
98 let mut max_repo
= None
;
99 for (repo
, count
) in data
.as_object().unwrap() {
100 if map
.contains_key(repo
) { continue; }
101 if let Some(count
) = count
.as_i64() {
102 if count
> max_used
{
104 max_repo
= Some(repo
);
108 if let Some(repo
) = max_repo
{
109 map
.insert(repo
.to_owned(), json
!(max_used
));
113 if map
.len() > 10 { // store max. 10 repos
118 let new_data
= json
!(map
);
120 let _
= file_set_contents(path
, new_data
.to_string().as_bytes(), None
);
123 fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
125 let mut result
= vec
![];
127 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
132 // usually $HOME/.cache/proxmox-backup/repo-list
133 let path
= match base
.place_cache_file("repo-list") {
138 let data
= file_get_json(&path
, None
).unwrap_or_else(|_
| json
!({}
));
140 if let Some(map
) = data
.as_object() {
141 for (repo
, _count
) in map
{
142 result
.push(repo
.to_owned());
149 async
fn backup_directory
<P
: AsRef
<Path
>>(
150 client
: &BackupWriter
,
153 chunk_size
: Option
<usize>,
154 device_set
: Option
<HashSet
<u64>>,
156 skip_lost_and_found
: bool
,
157 crypt_config
: Option
<Arc
<CryptConfig
>>,
158 catalog
: Arc
<Mutex
<CatalogWriter
<SenderWriter
>>>,
159 ) -> Result
<BackupStats
, Error
> {
161 let pxar_stream
= PxarBackupStream
::open(dir_path
.as_ref(), device_set
, verbose
, skip_lost_and_found
, catalog
)?
;
162 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
164 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
167 .map_err(Error
::from
);
169 // spawn chunker inside a separate task so that it can run parallel
170 tokio
::spawn(async
move {
171 let _
= tx
.send_all(&mut chunk_stream
).await
;
175 .upload_stream(archive_name
, stream
, "dynamic", None
, crypt_config
)
181 async
fn backup_image
<P
: AsRef
<Path
>>(
182 client
: &BackupWriter
,
186 chunk_size
: Option
<usize>,
188 crypt_config
: Option
<Arc
<CryptConfig
>>,
189 ) -> Result
<BackupStats
, Error
> {
191 let path
= image_path
.as_ref().to_owned();
193 let file
= tokio
::fs
::File
::open(path
).await?
;
195 let stream
= tokio
::codec
::FramedRead
::new(file
, tokio
::codec
::BytesCodec
::new())
196 .map_err(Error
::from
);
198 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
201 .upload_stream(archive_name
, stream
, "fixed", Some(image_size
), crypt_config
)
207 fn strip_server_file_expenstion(name
: &str) -> String
{
209 if name
.ends_with(".didx") || name
.ends_with(".fidx") || name
.ends_with(".blob") {
210 name
[..name
.len()-5].to_owned()
212 name
.to_owned() // should not happen
216 fn list_backup_groups(
219 _rpcenv
: &mut dyn RpcEnvironment
,
220 ) -> Result
<Value
, Error
> {
222 let repo
= extract_repository_from_value(¶m
)?
;
224 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
226 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
228 let mut result
= async_main(async
move {
229 client
.get(&path
, None
).await
232 record_repository(&repo
);
234 // fixme: implement and use output formatter instead ..
235 let list
= result
["data"].as_array_mut().unwrap();
237 list
.sort_unstable_by(|a
, b
| {
238 let a_id
= a
["backup-id"].as_str().unwrap();
239 let a_backup_type
= a
["backup-type"].as_str().unwrap();
240 let b_id
= b
["backup-id"].as_str().unwrap();
241 let b_backup_type
= b
["backup-type"].as_str().unwrap();
243 let type_order
= a_backup_type
.cmp(b_backup_type
);
244 if type_order
== std
::cmp
::Ordering
::Equal
{
251 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
253 let mut result
= vec
![];
257 let id
= item
["backup-id"].as_str().unwrap();
258 let btype
= item
["backup-type"].as_str().unwrap();
259 let epoch
= item
["last-backup"].as_i64().unwrap();
260 let last_backup
= Utc
.timestamp(epoch
, 0);
261 let backup_count
= item
["backup-count"].as_u64().unwrap();
263 let group
= BackupGroup
::new(btype
, id
);
265 let path
= group
.group_path().to_str().unwrap().to_owned();
267 let files
= item
["files"].as_array().unwrap().iter()
268 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
270 if output_format
== "text" {
272 "{:20} | {} | {:5} | {}",
274 BackupDir
::backup_time_to_string(last_backup
),
276 tools
::join(&files
, ' '
),
280 "backup-type": btype
,
282 "last-backup": epoch
,
283 "backup-count": backup_count
,
289 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
297 _rpcenv
: &mut dyn RpcEnvironment
,
298 ) -> Result
<Value
, Error
> {
300 let repo
= extract_repository_from_value(¶m
)?
;
302 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
304 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
306 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
308 let mut args
= json
!({}
);
309 if let Some(path
) = param
["group"].as_str() {
310 let group
= BackupGroup
::parse(path
)?
;
311 args
["backup-type"] = group
.backup_type().into();
312 args
["backup-id"] = group
.backup_id().into();
315 let result
= async_main(async
move {
316 client
.get(&path
, Some(args
)).await
319 record_repository(&repo
);
321 let list
= result
["data"].as_array().unwrap();
323 let mut result
= vec
![];
327 let id
= item
["backup-id"].as_str().unwrap();
328 let btype
= item
["backup-type"].as_str().unwrap();
329 let epoch
= item
["backup-time"].as_i64().unwrap();
331 let snapshot
= BackupDir
::new(btype
, id
, epoch
);
333 let path
= snapshot
.relative_path().to_str().unwrap().to_owned();
335 let files
= item
["files"].as_array().unwrap().iter()
336 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
338 if output_format
== "text" {
339 let size_str
= if let Some(size
) = item
["size"].as_u64() {
344 println
!("{} | {} | {}", path
, size_str
, tools
::join(&files
, ' '
));
346 let mut data
= json
!({
347 "backup-type": btype
,
349 "backup-time": epoch
,
352 if let Some(size
) = item
["size"].as_u64() {
353 data
["size"] = size
.into();
359 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
367 _rpcenv
: &mut dyn RpcEnvironment
,
368 ) -> Result
<Value
, Error
> {
370 let repo
= extract_repository_from_value(¶m
)?
;
372 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
373 let snapshot
= BackupDir
::parse(path
)?
;
375 let mut client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
377 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
379 let result
= async_main(async
move {
380 client
.delete(&path
, Some(json
!({
381 "backup-type": snapshot
.group().backup_type(),
382 "backup-id": snapshot
.group().backup_id(),
383 "backup-time": snapshot
.backup_time().timestamp(),
387 record_repository(&repo
);
395 _rpcenv
: &mut dyn RpcEnvironment
,
396 ) -> Result
<Value
, Error
> {
398 let repo
= extract_repository_from_value(¶m
)?
;
400 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
401 async_main(async
move { client.login().await }
)?
;
403 record_repository(&repo
);
411 _rpcenv
: &mut dyn RpcEnvironment
,
412 ) -> Result
<Value
, Error
> {
414 let repo
= extract_repository_from_value(¶m
)?
;
416 delete_ticket_info(repo
.host(), repo
.user())?
;
424 _rpcenv
: &mut dyn RpcEnvironment
,
425 ) -> Result
<Value
, Error
> {
427 let repo
= extract_repository_from_value(¶m
)?
;
429 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
430 let snapshot
= BackupDir
::parse(path
)?
;
432 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
434 let crypt_config
= match keyfile
{
437 let (key
, _
) = load_and_decrtypt_key(&path
, &get_encryption_key_password
)?
;
438 Some(Arc
::new(CryptConfig
::new(key
)?
))
442 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
444 async_main(async
move {
445 let client
= BackupReader
::start(
447 crypt_config
.clone(),
449 &snapshot
.group().backup_type(),
450 &snapshot
.group().backup_id(),
451 snapshot
.backup_time(),
455 let manifest
= client
.download_manifest().await?
;
457 let index
= client
.download_dynamic_index(&manifest
, CATALOG_NAME
).await?
;
459 let most_used
= index
.find_most_used_chunks(8);
461 let chunk_reader
= RemoteChunkReader
::new(client
.clone(), crypt_config
, most_used
);
463 let mut reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
465 let mut catalogfile
= std
::fs
::OpenOptions
::new()
468 .custom_flags(libc
::O_TMPFILE
)
471 std
::io
::copy(&mut reader
, &mut catalogfile
)
472 .map_err(|err
| format_err
!("unable to download catalog - {}", err
))?
;
474 catalogfile
.seek(SeekFrom
::Start(0))?
;
476 let mut catalog_reader
= CatalogReader
::new(catalogfile
);
478 catalog_reader
.dump()?
;
480 record_repository(&repo
);
488 fn list_snapshot_files(
491 _rpcenv
: &mut dyn RpcEnvironment
,
492 ) -> Result
<Value
, Error
> {
494 let repo
= extract_repository_from_value(¶m
)?
;
496 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
497 let snapshot
= BackupDir
::parse(path
)?
;
499 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
501 let client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
503 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
505 let mut result
= async_main(async
move {
506 client
.get(&path
, Some(json
!({
507 "backup-type": snapshot
.group().backup_type(),
508 "backup-id": snapshot
.group().backup_id(),
509 "backup-time": snapshot
.backup_time().timestamp(),
513 record_repository(&repo
);
515 let list
: Value
= result
["data"].take();
517 if output_format
== "text" {
518 for item
in list
.as_array().unwrap().iter() {
521 strip_server_file_expenstion(item
["filename"].as_str().unwrap()),
522 item
["size"].as_u64().unwrap_or(0),
526 format_and_print_result(&list
, &output_format
);
532 fn start_garbage_collection(
535 _rpcenv
: &mut dyn RpcEnvironment
,
536 ) -> Result
<Value
, Error
> {
538 let repo
= extract_repository_from_value(¶m
)?
;
540 let mut client
= HttpClient
::new(repo
.host(), repo
.user(), None
)?
;
542 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
544 let result
= async_main(async
move { client.post(&path, None).await }
)?
;
546 record_repository(&repo
);
551 fn parse_backupspec(value
: &str) -> Result
<(&str, &str), Error
> {
553 if let Some(caps
) = (BACKUPSPEC_REGEX
.regex_obj
)().captures(value
) {
554 return Ok((caps
.get(1).unwrap().as_str(), caps
.get(2).unwrap().as_str()));
556 bail
!("unable to parse directory specification '{}'", value
);
559 fn spawn_catalog_upload(
560 client
: Arc
<BackupWriter
>,
561 crypt_config
: Option
<Arc
<CryptConfig
>>,
564 Arc
<Mutex
<CatalogWriter
<SenderWriter
>>>,
565 tokio
::sync
::oneshot
::Receiver
<Result
<BackupStats
, Error
>>
568 let (catalog_tx
, catalog_rx
) = mpsc
::channel(10); // allow to buffer 10 writes
569 let catalog_stream
= catalog_rx
.map_err(Error
::from
);
570 let catalog_chunk_size
= 512*1024;
571 let catalog_chunk_stream
= ChunkStream
::new(catalog_stream
, Some(catalog_chunk_size
));
573 let catalog
= Arc
::new(Mutex
::new(CatalogWriter
::new(SenderWriter
::new(catalog_tx
))?
));
575 let (catalog_result_tx
, catalog_result_rx
) = tokio
::sync
::oneshot
::channel();
577 tokio
::spawn(async
move {
578 let catalog_upload_result
= client
579 .upload_stream(CATALOG_NAME
, catalog_chunk_stream
, "dynamic", None
, crypt_config
)
582 if let Err(ref err
) = catalog_upload_result
{
583 eprintln
!("catalog upload error - {}", err
);
587 let _
= catalog_result_tx
.send(catalog_upload_result
);
590 Ok((catalog
, catalog_result_rx
))
596 _rpcenv
: &mut dyn RpcEnvironment
,
597 ) -> Result
<Value
, Error
> {
599 let repo
= extract_repository_from_value(¶m
)?
;
601 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
603 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
605 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
607 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
609 let backup_time_opt
= param
["backup-time"].as_i64();
611 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
613 if let Some(size
) = chunk_size_opt
{
614 verify_chunk_size(size
)?
;
617 let keyfile
= param
["keyfile"].as_str().map(PathBuf
::from
);
619 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
621 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
623 let include_dev
= param
["include-dev"].as_array();
625 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
627 if let Some(include_dev
) = include_dev
{
628 if all_file_systems
{
629 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
632 let mut set
= HashSet
::new();
633 for path
in include_dev
{
634 let path
= path
.as_str().unwrap();
635 let stat
= nix
::sys
::stat
::stat(path
)
636 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
637 set
.insert(stat
.st_dev
);
642 let mut upload_list
= vec
![];
644 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE }
;
646 let mut upload_catalog
= false;
648 for backupspec
in backupspec_list
{
649 let (target
, filename
) = parse_backupspec(backupspec
.as_str().unwrap())?
;
651 use std
::os
::unix
::fs
::FileTypeExt
;
653 let metadata
= std
::fs
::metadata(filename
)
654 .map_err(|err
| format_err
!("unable to access '{}' - {}", filename
, err
))?
;
655 let file_type
= metadata
.file_type();
657 let extension
= target
.rsplit('
.'
).next()
658 .ok_or_else(|| format_err
!("missing target file extenion '{}'", target
))?
;
662 if !file_type
.is_dir() {
663 bail
!("got unexpected file type (expected directory)");
665 upload_list
.push((BackupType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
666 upload_catalog
= true;
670 if !(file_type
.is_file() || file_type
.is_block_device()) {
671 bail
!("got unexpected file type (expected file or block device)");
674 let size
= image_size(&PathBuf
::from(filename
))?
;
676 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
678 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
681 if !file_type.is_file() {
682 bail!("got unexpected file
type (expected regular file
)");
684 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
687 if !file_type.is_file() {
688 bail!("got unexpected file
type (expected regular file
)");
690 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
693 bail!("got unknown archive extension '{}'
", extension);
698 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
700 let client = HttpClient::new(repo.host(), repo.user(), None)?;
701 record_repository(&repo);
703 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
705 println!("Client name
: {}
", proxmox::tools::nodename());
707 let start_time = Local::now();
709 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
711 let (crypt_config, rsa_encrypted_key) = match keyfile {
712 None => (None, None),
714 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
716 let crypt_config = CryptConfig::new(key)?;
718 let path = master_pubkey_path()?;
720 let pem_data = file_get_contents(&path)?;
721 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
722 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
723 (Some(Arc::new(crypt_config)), Some(enc_key))
725 (Some(Arc::new(crypt_config)), None)
730 async_main(async move {
731 let client = BackupWriter::start(
740 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
741 let mut manifest = BackupManifest::new(snapshot);
743 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
745 for (backup_type, filename, target, size) in upload_list {
747 BackupType::CONFIG => {
748 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
750 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
752 manifest.add_file(target, stats.size, stats.csum);
754 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
755 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
757 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
759 manifest.add_file(target, stats.size, stats.csum);
761 BackupType::PXAR => {
762 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
763 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
764 let stats = backup_directory(
772 crypt_config.clone(),
775 manifest.add_file(target, stats.size, stats.csum);
776 catalog.lock().unwrap().end_directory()?;
778 BackupType::IMAGE => {
779 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
780 let stats = backup_image(
787 crypt_config.clone(),
789 manifest.add_file(target, stats.size, stats.csum);
794 // finalize and upload catalog
796 let mutex = Arc::try_unwrap(catalog)
797 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
798 let mut catalog = mutex.into_inner().unwrap();
802 drop(catalog); // close upload stream
804 let stats = catalog_result_rx.await??;
806 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
809 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
810 let target = "rsa
-encrypted
.key
";
811 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
813 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
815 manifest.add_file(format!("{}
.blob
", target), stats.size, stats.csum);
817 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
819 let mut buffer2 = vec![0u8; rsa.size() as usize];
820 let pem_data = file_get_contents("master
-private
.pem
")?;
821 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
822 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
823 println!("TEST {} {:?}
", len, buffer2);
827 // create manifest (index.json)
828 let manifest = manifest.into_json();
830 println!("Upload index
.json to '{:?}'
", repo);
831 let manifest = serde_json::to_string_pretty(&manifest)?.into();
833 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
836 client.finish().await?;
838 let end_time = Local::now();
839 let elapsed = end_time.signed_duration_since(start_time);
840 println!("Duration
: {}
", elapsed);
842 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
848 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
850 let mut result = vec![];
852 let data: Vec<&str> = arg.splitn(2, ':').collect();
855 result.push(String::from("root
.pxar
:/"));
856 result.push(String::from("etc
.pxar
:/etc
"));
860 let files = tools::complete_file_name(data[1], param);
863 result.push(format!("{}
:{}
", data[0], file));
872 _rpcenv: &mut dyn RpcEnvironment,
873 ) -> Result<Value, Error> {
874 async_main(restore_do(param))
877 fn dump_image<W: Write>(
878 client: Arc<BackupReader>,
879 crypt_config: Option<Arc<CryptConfig>>,
880 index: FixedIndexReader,
883 ) -> Result<(), Error> {
885 let most_used = index.find_most_used_chunks(8);
887 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
889 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
890 // and thus slows down reading. Instead, directly use RemoteChunkReader
893 let start_time = std::time::Instant::now();
895 for pos in 0..index.index_count() {
896 let digest = index.index_digest(pos).unwrap();
897 let raw_data = chunk_reader.read_chunk(&digest)?;
898 writer.write_all(&raw_data)?;
899 bytes += raw_data.len();
901 let next_per = ((pos+1)*100)/index.index_count();
903 eprintln!("progress {}
% (read {} bytes
, duration {} sec
)",
904 next_per, bytes, start_time.elapsed().as_secs());
910 let end_time = std::time::Instant::now();
911 let elapsed = end_time.duration_since(start_time);
912 eprintln!("restore image
complete (bytes
={}
, duration
={:.2}s
, speed
={:.2}MB
/s
)",
914 elapsed.as_secs_f64(),
915 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
922 async fn restore_do(param: Value) -> Result<Value, Error> {
923 let repo = extract_repository_from_value(¶m)?;
925 let verbose = param["verbose
"].as_bool().unwrap_or(false);
927 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
929 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
931 let client = HttpClient::new(repo.host(), repo.user(), None)?;
933 record_repository(&repo);
935 let path = tools::required_string_param(¶m, "snapshot
")?;
937 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
938 let group = BackupGroup::parse(path)?;
940 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
941 let result = client.get(&path, Some(json!({
942 "backup
-type": group.backup_type(),
943 "backup
-id
": group.backup_id(),
946 let list = result["data
"].as_array().unwrap();
948 bail!("backup group '{}' does not contain any snapshots
:", path);
951 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
952 let backup_time = Utc.timestamp(epoch, 0);
953 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
955 let snapshot = BackupDir::parse(path)?;
956 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
959 let target = tools::required_string_param(¶m, "target
")?;
960 let target = if target == "-" { None } else { Some(target) };
962 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
964 let crypt_config = match keyfile {
967 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
968 Some(Arc::new(CryptConfig::new(key)?))
972 let server_archive_name = if archive_name.ends_with(".pxar
") {
973 format!("{}
.didx
", archive_name)
974 } else if archive_name.ends_with(".img
") {
975 format!("{}
.fidx
", archive_name)
977 format!("{}
.blob
", archive_name)
980 let client = BackupReader::start(
982 crypt_config.clone(),
990 let manifest = client.download_manifest().await?;
992 if server_archive_name == MANIFEST_BLOB_NAME {
993 let backup_index_data = manifest.into_json().to_string();
994 if let Some(target) = target {
995 file_set_contents(target, backup_index_data.as_bytes(), None)?;
997 let stdout = std::io::stdout();
998 let mut writer = stdout.lock();
999 writer.write_all(backup_index_data.as_bytes())
1000 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1003 } else if server_archive_name.ends_with(".blob
") {
1005 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
1007 if let Some(target) = target {
1008 let mut writer = std::fs::OpenOptions::new()
1013 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1014 std::io::copy(&mut reader, &mut writer)?;
1016 let stdout = std::io::stdout();
1017 let mut writer = stdout.lock();
1018 std::io::copy(&mut reader, &mut writer)
1019 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1022 } else if server_archive_name.ends_with(".didx
") {
1024 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1026 let most_used = index.find_most_used_chunks(8);
1028 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1030 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1032 if let Some(target) = target {
1034 let feature_flags = pxar::flags::DEFAULT;
1035 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1036 decoder.set_callback(move |path| {
1038 eprintln!("{:?}
", path);
1042 decoder.set_allow_existing_dirs(allow_existing_dirs);
1044 decoder.restore(Path::new(target), &Vec::new())?;
1046 let mut writer = std::fs::OpenOptions::new()
1048 .open("/dev
/stdout
")
1049 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?;
1051 std::io::copy(&mut reader, &mut writer)
1052 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1054 } else if server_archive_name.ends_with(".fidx
") {
1056 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
1058 let mut writer = if let Some(target) = target {
1059 std::fs::OpenOptions::new()
1064 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?
1066 std::fs::OpenOptions::new()
1068 .open("/dev
/stdout
")
1069 .map_err(|err| format_err!("unable to open
/dev
/stdout
- {}
", err))?
1072 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
1075 bail!("unknown archive file
extension (expected
.pxar of
.img
)");
1084 _rpcenv: &mut dyn RpcEnvironment,
1085 ) -> Result<Value, Error> {
1087 let logfile = tools::required_string_param(¶m, "logfile
")?;
1088 let repo = extract_repository_from_value(¶m)?;
1090 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1091 let snapshot = BackupDir::parse(snapshot)?;
1093 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1095 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1097 let crypt_config = match keyfile {
1100 let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1101 let crypt_config = CryptConfig::new(key)?;
1102 Some(Arc::new(crypt_config))
1106 let data = file_get_contents(logfile)?;
1108 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1110 let raw_data = blob.into_inner();
1112 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1115 "backup
-type": snapshot.group().backup_type(),
1116 "backup
-id
": snapshot.group().backup_id(),
1117 "backup
-time
": snapshot.backup_time().timestamp(),
1120 let body = hyper::Body::from(raw_data);
1122 async_main(async move {
1123 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1127 fn display_task_log(
1131 ) -> Result<(), Error> {
1133 let path = format!("api2
/json
/nodes
/localhost
/tasks
/{}
/log
", upid_str);
1139 let param = json!({ "start": start, "limit": limit, "test-status": true });
1140 let result = async_main(async { client.get(&path, Some(param)).await })?;
1142 let active = result["active
"].as_bool().unwrap();
1143 let total = result["total
"].as_u64().unwrap();
1144 let data = result["data
"].as_array().unwrap();
1146 let lines = data.len();
1149 let n = item["n
"].as_u64().unwrap();
1150 let t = item["t
"].as_str().unwrap();
1151 if n != start { bail!("got wrong line number in response data ({} != {}", n
, start
); }
1152 let b
= t
.as_bytes();
1153 if strip_date
&& b
.len() > 27 && b
[25] == b'
:'
&& b
[26] == b' '
{
1154 let line
= &t
[27..];
1155 println
!("{}", line
);
1164 std
::thread
::sleep(std
::time
::Duration
::from_millis(1000));
1169 if lines
!= limit { bail!("got wrong number of lines from server ({}
!= {}
)", lines, limit); }
1179 _rpcenv: &mut dyn RpcEnvironment,
1180 ) -> Result<Value, Error> {
1182 let repo = extract_repository_from_value(¶m)?;
1184 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1186 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1188 let group = tools::required_string_param(¶m, "group
")?;
1189 let group = BackupGroup::parse(group)?;
1190 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1192 let dry_run = param["dry
-run
"].as_bool().unwrap_or(false);
1194 param.as_object_mut().unwrap().remove("repository
");
1195 param.as_object_mut().unwrap().remove("group
");
1196 param.as_object_mut().unwrap().remove("dry
-run
");
1197 param.as_object_mut().unwrap().remove("output
-format
");
1199 param["backup
-type"] = group.backup_type().into();
1200 param["backup
-id
"] = group.backup_id().into();
1203 let result = async_main(async { client.get(&path, Some(param)).await })?;
1204 let data = &result["data
"];
1206 if output_format == "text
" {
1207 for item in data.as_array().unwrap() {
1208 let timestamp = item["backup
-time
"].as_i64().unwrap();
1209 let timestamp = BackupDir::backup_time_to_string(Utc.timestamp(timestamp, 0));
1210 let keep = item["keep
"].as_bool().unwrap();
1211 println!("{}
/{}
/{} {}
",
1212 group.backup_type(),
1215 if keep { "keep" } else { "remove" },
1219 format_and_print_result(&data, &output_format);
1222 let result = async_main(async { client.post(&path, Some(param)).await })?;
1223 let data = &result["data
"];
1224 if output_format == "text
" {
1225 if let Some(upid) = data.as_str() {
1226 display_task_log(client, upid, true)?;
1229 format_and_print_result(&data, &output_format);
1232 record_repository(&repo);
1240 _rpcenv: &mut dyn RpcEnvironment,
1241 ) -> Result<Value, Error> {
1243 let repo = extract_repository_from_value(¶m)?;
1245 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1247 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1249 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1251 let result = async_main(async move { client.get(&path, None).await })?;
1252 let data = &result["data
"];
1254 record_repository(&repo);
1256 if output_format == "text
" {
1257 let total = data["total
"].as_u64().unwrap();
1258 let used = data["used
"].as_u64().unwrap();
1259 let avail = data["avail
"].as_u64().unwrap();
1260 let roundup = total/200;
1263 "total
: {} used
: {}
({}
%) available
: {}
",
1266 ((used+roundup)*100)/total,
1270 format_and_print_result(data, &output_format);
1276 // like get, but simply ignore errors and return Null instead
1277 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1279 let client = match HttpClient::new(repo.host(), repo.user(), None) {
1281 _ => return Value::Null,
1284 let mut resp = match client.get(url, None).await {
1286 _ => return Value::Null,
1289 if let Some(map) = resp.as_object_mut() {
1290 if let Some(data) = map.remove("data
") {
1297 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1298 async_main(async { complete_backup_group_do(param).await })
1301 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1303 let mut result = vec![];
1305 let repo = match extract_repository_from_map(param) {
1310 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1312 let data = try_get(&repo, &path).await;
1314 if let Some(list) = data.as_array() {
1316 if let (Some(backup_id), Some(backup_type)) =
1317 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1319 result.push(format!("{}
/{}
", backup_type, backup_id));
1327 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1328 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1331 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1333 if arg.matches('/').count() < 2 {
1334 let groups = complete_backup_group_do(param).await;
1335 let mut result = vec![];
1336 for group in groups {
1337 result.push(group.to_string());
1338 result.push(format!("{}
/", group));
1343 complete_backup_snapshot_do(param).await
1346 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1347 async_main(async { complete_backup_snapshot_do(param).await })
1350 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1352 let mut result = vec![];
1354 let repo = match extract_repository_from_map(param) {
1359 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1361 let data = try_get(&repo, &path).await;
1363 if let Some(list) = data.as_array() {
1365 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1366 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1368 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1369 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1377 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1378 async_main(async { complete_server_file_name_do(param).await })
1381 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1383 let mut result = vec![];
1385 let repo = match extract_repository_from_map(param) {
1390 let snapshot = match param.get("snapshot
") {
1392 match BackupDir::parse(path) {
1400 let query = tools::json_object_to_query(json!({
1401 "backup
-type": snapshot.group().backup_type(),
1402 "backup
-id
": snapshot.group().backup_id(),
1403 "backup
-time
": snapshot.backup_time().timestamp(),
1406 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1408 let data = try_get(&repo, &path).await;
1410 if let Some(list) = data.as_array() {
1412 if let Some(filename) = item["filename
"].as_str() {
1413 result.push(filename.to_owned());
1421 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1422 complete_server_file_name(arg, param)
1424 .map(|v| strip_server_file_expenstion(&v))
1428 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1430 let mut result = vec![];
1434 result.push(size.to_string());
1436 if size > 4096 { break; }
1442 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1444 // fixme: implement other input methods
1446 use std::env::VarError::*;
1447 match std::env::var("PBS_ENCRYPTION_PASSWORD
") {
1448 Ok(p) => return Ok(p.as_bytes().to_vec()),
1449 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters
"),
1450 Err(NotPresent) => {
1451 // Try another method
1455 // If we're on a TTY, query the user for a password
1456 if crate::tools::tty::stdin_isatty() {
1457 return Ok(crate::tools::tty::read_password("Encryption Key Password
: ")?);
1460 bail!("no password input mechanism available
");
1466 _rpcenv: &mut dyn RpcEnvironment,
1467 ) -> Result<Value, Error> {
1469 let path = tools::required_string_param(¶m, "path
")?;
1470 let path = PathBuf::from(path);
1472 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1474 let key = proxmox::sys::linux::random_data(32)?;
1476 if kdf == "scrypt
" {
1477 // always read passphrase from tty
1478 if !crate::tools::tty::stdin_isatty() {
1479 bail!("unable to read passphrase
- no tty
");
1482 let password = crate::tools::tty::read_password("Encryption Key Password
: ")?;
1484 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1486 store_key_config(&path, false, key_config)?;
1489 } else if kdf == "none
" {
1490 let created = Local.timestamp(Local::now().timestamp(), 0);
1492 store_key_config(&path, false, KeyConfig {
1505 fn master_pubkey_path() -> Result<PathBuf, Error> {
1506 let base = BaseDirectories::with_prefix("proxmox
-backup
")?;
1508 // usually $HOME/.config/proxmox-backup/master-public.pem
1509 let path = base.place_config_file("master
-public
.pem
")?;
1514 fn key_import_master_pubkey(
1517 _rpcenv: &mut dyn RpcEnvironment,
1518 ) -> Result<Value, Error> {
1520 let path = tools::required_string_param(¶m, "path
")?;
1521 let path = PathBuf::from(path);
1523 let pem_data = file_get_contents(&path)?;
1525 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1526 bail!("Unable to decode PEM data
- {}
", err);
1529 let target_path = master_pubkey_path()?;
1531 file_set_contents(&target_path, &pem_data, None)?;
1533 println!("Imported public master key to {:?}
", target_path);
1538 fn key_create_master_key(
1541 _rpcenv: &mut dyn RpcEnvironment,
1542 ) -> Result<Value, Error> {
1544 // we need a TTY to query the new password
1545 if !crate::tools::tty::stdin_isatty() {
1546 bail!("unable to create master key
- no tty
");
1549 let rsa = openssl::rsa::Rsa::generate(4096)?;
1550 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1552 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password
: ")?)?;
1553 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password
: ")?)?;
1555 if new_pw != verify_pw {
1556 bail!("Password verification fail
!");
1559 if new_pw.len() < 5 {
1560 bail!("Password is too short
!");
1563 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1564 let filename_pub = "master
-public
.pem
";
1565 println!("Writing public master key to {}
", filename_pub);
1566 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1568 let cipher = openssl::symm::Cipher::aes_256_cbc();
1569 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1571 let filename_priv = "master
-private
.pem
";
1572 println!("Writing private master key to {}
", filename_priv);
1573 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1578 fn key_change_passphrase(
1581 _rpcenv: &mut dyn RpcEnvironment,
1582 ) -> Result<Value, Error> {
1584 let path = tools::required_string_param(¶m, "path
")?;
1585 let path = PathBuf::from(path);
1587 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1589 // we need a TTY to query the new password
1590 if !crate::tools::tty::stdin_isatty() {
1591 bail!("unable to change passphrase
- no tty
");
1594 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1596 if kdf == "scrypt
" {
1598 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password
: ")?)?;
1599 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password
: ")?)?;
1601 if new_pw != verify_pw {
1602 bail!("Password verification fail
!");
1605 if new_pw.len() < 5 {
1606 bail!("Password is too short
!");
1609 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1610 new_key_config.created = created; // keep original value
1612 store_key_config(&path, true, new_key_config)?;
1615 } else if kdf == "none
" {
1616 let modified = Local.timestamp(Local::now().timestamp(), 0);
1618 store_key_config(&path, true, KeyConfig {
1620 created, // keep original value
1631 fn key_mgmt_cli() -> CliCommandMap {
1633 const KDF_SCHEMA: Schema =
1634 StringSchema::new("Key derivation function
. Choose 'none' to store the key unecrypted
.")
1635 .format(&ApiStringFormat::Enum(&["scrypt
", "none
"]))
1640 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1641 &ApiHandler::Sync(&key_create),
1643 "Create a new encryption key
.",
1645 ("path
", false, &StringSchema::new("File system path
.").schema()),
1646 ("kdf
", true, &KDF_SCHEMA),
1651 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1652 .arg_param(&["path
"])
1653 .completion_cb("path
", tools::complete_file_name);
1656 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1657 &ApiHandler::Sync(&key_change_passphrase),
1659 "Change the passphrase required to decrypt the key
.",
1661 ("path
", false, &StringSchema::new("File system path
.").schema()),
1662 ("kdf
", true, &KDF_SCHEMA),
1667 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1668 .arg_param(&["path
"])
1669 .completion_cb("path
", tools::complete_file_name);
1671 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1672 &ApiHandler::Sync(&key_create_master_key),
1673 &ObjectSchema::new("Create a new
4096 bit RSA master
pub/priv key pair
.", &[])
1676 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1679 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1680 &ApiHandler::Sync(&key_import_master_pubkey),
1682 "Import a new RSA public key and
use it
as master key
. The key is expected to be
in '
.pem' format
.",
1683 &sorted!([ ("path
", false, &StringSchema::new("File system path
.").schema()) ]),
1687 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1688 .arg_param(&["path
"])
1689 .completion_cb("path
", tools::complete_file_name);
1691 CliCommandMap::new()
1692 .insert("create
".to_owned(), key_create_cmd_def.into())
1693 .insert("create
-master
-key
".to_owned(), key_create_master_key_cmd_def.into())
1694 .insert("import
-master
-pubkey
".to_owned(), key_import_master_pubkey_cmd_def.into())
1695 .insert("change
-passphrase
".to_owned(), key_change_passphrase_cmd_def.into())
1701 _rpcenv: &mut dyn RpcEnvironment,
1702 ) -> Result<Value, Error> {
1703 let verbose = param["verbose
"].as_bool().unwrap_or(false);
1705 // This will stay in foreground with debug output enabled as None is
1706 // passed for the RawFd.
1707 return async_main(mount_do(param, None));
1710 // Process should be deamonized.
1711 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1714 Ok(ForkResult::Parent { .. }) => {
1715 nix::unistd::close(pipe.1).unwrap();
1716 // Blocks the parent process until we are ready to go in the child
1717 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1720 Ok(ForkResult::Child) => {
1721 nix::unistd::close(pipe.0).unwrap();
1722 nix::unistd::setsid().unwrap();
1723 async_main(mount_do(param, Some(pipe.1)))
1725 Err(_) => bail!("failed to daemonize process
"),
1729 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1730 let repo = extract_repository_from_value(¶m)?;
1731 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1732 let target = tools::required_string_param(¶m, "target
")?;
1733 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1735 record_repository(&repo);
1737 let path = tools::required_string_param(¶m, "snapshot
")?;
1738 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1739 let group = BackupGroup::parse(path)?;
1741 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1742 let result = client.get(&path, Some(json!({
1743 "backup
-type": group.backup_type(),
1744 "backup
-id
": group.backup_id(),
1747 let list = result["data
"].as_array().unwrap();
1748 if list.is_empty() {
1749 bail!("backup group '{}' does not contain any snapshots
:", path);
1752 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
1753 let backup_time = Utc.timestamp(epoch, 0);
1754 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1756 let snapshot = BackupDir::parse(path)?;
1757 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1760 let keyfile = param["keyfile
"].as_str().map(PathBuf::from);
1761 let crypt_config = match keyfile {
1764 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1765 Some(Arc::new(CryptConfig::new(key)?))
1769 let server_archive_name = if archive_name.ends_with(".pxar
") {
1770 format!("{}
.didx
", archive_name)
1772 bail!("Can only mount pxar archives
.");
1775 let client = BackupReader::start(
1777 crypt_config.clone(),
1785 let manifest = client.download_manifest().await?;
1787 if server_archive_name.ends_with(".didx
") {
1788 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1789 let most_used = index.find_most_used_chunks(8);
1790 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1791 let reader = BufferedDynamicReader::new(index, chunk_reader);
1792 let decoder = pxar::Decoder::new(reader)?;
1793 let options = OsStr::new("ro
,default_permissions
");
1794 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
1795 .map_err(|err| format_err!("pxar mount failed
: {}
", err))?;
1797 // Mount the session but not call fuse deamonize as this will cause
1798 // issues with the runtime after the fork
1799 let deamonize = false;
1800 session.mount(&Path::new(target), deamonize)?;
1802 if let Some(pipe) = pipe {
1803 nix::unistd::chdir(Path::new("/")).unwrap();
1804 // Finish creation of deamon by redirecting filedescriptors.
1805 let nullfd = nix::fcntl::open(
1807 nix::fcntl::OFlag::O_RDWR,
1808 nix::sys::stat::Mode::empty(),
1810 nix::unistd::dup2(nullfd, 0).unwrap();
1811 nix::unistd::dup2(nullfd, 1).unwrap();
1812 nix::unistd::dup2(nullfd, 2).unwrap();
1814 nix::unistd::close(nullfd).unwrap();
1816 // Signal the parent process that we are done with the setup and it can
1818 nix::unistd::write(pipe, &[0u8])?;
1819 nix::unistd::close(pipe).unwrap();
1822 let multithreaded = true;
1823 session.run_loop(multithreaded)?;
1825 bail!("unknown archive file
extension (expected
.pxar
)");
1834 _rpcenv: &mut dyn RpcEnvironment,
1835 ) -> Result<Value, Error> {
1836 async_main(catalog_shell(param))
1839 async fn catalog_shell(param: Value) -> Result<Value, Error> {
1840 let repo = extract_repository_from_value(¶m)?;
1841 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1842 let path = tools::required_string_param(¶m, "snapshot
")?;
1843 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
1845 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1846 let group = BackupGroup::parse(path)?;
1848 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1849 let result = client.get(&path, Some(json!({
1850 "backup
-type": group.backup_type(),
1851 "backup
-id
": group.backup_id(),
1854 let list = result["data
"].as_array().unwrap();
1855 if list.is_empty() {
1856 bail!("backup group '{}' does not contain any snapshots
:", path);
1859 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
1860 let backup_time = Utc.timestamp(epoch, 0);
1861 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1863 let snapshot = BackupDir::parse(path)?;
1864 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1867 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
1868 let crypt_config = match keyfile {
1871 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1872 Some(Arc::new(CryptConfig::new(key)?))
1876 let server_archive_name = if archive_name.ends_with(".pxar
") {
1877 format!("{}
.didx
", archive_name)
1879 bail!("Can only mount pxar archives
.");
1882 let client = BackupReader::start(
1884 crypt_config.clone(),
1892 let tmpfile = std::fs::OpenOptions::new()
1895 .custom_flags(libc::O_TMPFILE)
1898 let manifest = client.download_manifest().await?;
1900 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1901 let most_used = index.find_most_used_chunks(8);
1902 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
1903 let reader = BufferedDynamicReader::new(index, chunk_reader);
1904 let mut decoder = pxar::Decoder::new(reader)?;
1905 decoder.set_callback(|path| {
1906 println!("{:?}
", path);
1910 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
1911 let index = DynamicIndexReader::new(tmpfile)
1912 .map_err(|err| format_err!("unable to read catalog index
- {}
", err))?;
1914 // Note: do not use values stored in index (not trusted) - instead, computed them again
1915 let (csum, size) = index.compute_csum();
1916 manifest.verify_file(CATALOG_NAME, &csum, size)?;
1918 let most_used = index.find_most_used_chunks(8);
1919 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1920 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1921 let mut catalogfile = std::fs::OpenOptions::new()
1924 .custom_flags(libc::O_TMPFILE)
1927 std::io::copy(&mut reader, &mut catalogfile)
1928 .map_err(|err| format_err!("unable to download catalog
- {}
", err))?;
1930 catalogfile.seek(SeekFrom::Start(0))?;
1931 let catalog_reader = CatalogReader::new(catalogfile);
1932 let state = Shell::new(
1934 &server_archive_name,
1938 println!("Starting interactive shell
");
1941 record_repository(&repo);
1948 const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new("Backup source
specification ([<label
>:<path
>]).")
1949 .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
1953 const API_METHOD_CREATE_BACKUP: ApiMethod = ApiMethod::new(
1954 &ApiHandler::Sync(&create_backup),
1956 "Create (host
) backup
.",
1962 "List of backup source
specifications ([<label
.ext
>:<path
>] ...)",
1963 &BACKUP_SOURCE_SCHEMA,
1964 ).min_length(1).schema()
1975 "Include mountpoints with same st_dev
number (see ``man fstat``
) as specified files
.",
1976 &StringSchema::new("Path to file
.").schema()
1982 &StringSchema::new("Path to encryption key
. All data will be encrypted using this key
.").schema()
1987 &BooleanSchema::new("Verbose output
.")
1992 "skip
-lost
-and
-found
",
1994 &BooleanSchema::new("Skip lost
+found directory
")
2001 &BACKUP_TYPE_SCHEMA,
2016 &IntegerSchema::new("Chunk size
in KB
. Must be a power of
2.")
2026 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2027 .arg_param(&["backupspec
"])
2028 .completion_cb("repository
", complete_repository)
2029 .completion_cb("backupspec
", complete_backup_source)
2030 .completion_cb("keyfile
", tools::complete_file_name)
2031 .completion_cb("chunk
-size
", complete_chunk_size);
2034 const API_METHOD_UPLOAD_LOG: ApiMethod = ApiMethod::new(
2035 &ApiHandler::Sync(&upload_log),
2037 "Upload backup log file
.",
2042 &StringSchema::new("Snapshot path
.").schema()
2047 &StringSchema::new("The path to the log file you want to upload
.").schema()
2057 &StringSchema::new("Path to encryption key
. All data will be encrypted using this key
.").schema()
2063 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2064 .arg_param(&["snapshot
", "logfile
"])
2065 .completion_cb("snapshot
", complete_backup_snapshot)
2066 .completion_cb("logfile
", tools::complete_file_name)
2067 .completion_cb("keyfile
", tools::complete_file_name)
2068 .completion_cb("repository
", complete_repository);
2071 const API_METHOD_LIST_BACKUP_GROUPS: ApiMethod = ApiMethod::new(
2072 &ApiHandler::Sync(&list_backup_groups),
2074 "List backup groups
.",
2076 ("repository
", true, &REPO_URL_SCHEMA),
2077 ("output
-format
", true, &OUTPUT_FORMAT),
2082 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2083 .completion_cb("repository
", complete_repository);
2086 const API_METHOD_LIST_SNAPSHOTS: ApiMethod = ApiMethod::new(
2087 &ApiHandler::Sync(&list_snapshots),
2089 "List backup snapshots
.",
2091 ("group
", true, &StringSchema::new("Backup group
.").schema()),
2092 ("repository
", true, &REPO_URL_SCHEMA),
2093 ("output
-format
", true, &OUTPUT_FORMAT),
2098 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2099 .arg_param(&["group
"])
2100 .completion_cb("group
", complete_backup_group)
2101 .completion_cb("repository
", complete_repository);
2104 const API_METHOD_FORGET_SNAPSHOTS: ApiMethod = ApiMethod::new(
2105 &ApiHandler::Sync(&forget_snapshots),
2107 "Forget (remove
) backup snapshots
.",
2109 ("snapshot
", false, &StringSchema::new("Snapshot path
.").schema()),
2110 ("repository
", true, &REPO_URL_SCHEMA),
2115 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2116 .arg_param(&["snapshot
"])
2117 .completion_cb("repository
", complete_repository)
2118 .completion_cb("snapshot
", complete_backup_snapshot);
2121 const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
2122 &ApiHandler::Sync(&start_garbage_collection),
2124 "Start garbage collection
for a specific repository
.",
2125 &sorted!([ ("repository
", true, &REPO_URL_SCHEMA) ]),
2129 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2130 .completion_cb("repository
", complete_repository);
2133 const API_METHOD_RESTORE: ApiMethod = ApiMethod::new(
2134 &ApiHandler::Sync(&restore),
2136 "Restore backup repository
.",
2138 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2139 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2144 r###"Target directory path
. Use '
-' to write to stdandard output
.
2146 We
do not extraxt '
.pxar' archives when writing to stdandard output
.
2152 "allow
-existing
-dirs
",
2154 &BooleanSchema::new("Do not fail
if directories already exists
.")
2158 ("repository
", true, &REPO_URL_SCHEMA),
2159 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2163 &BooleanSchema::new("Verbose output
.")
2171 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2172 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2173 .completion_cb("repository
", complete_repository)
2174 .completion_cb("snapshot
", complete_group_or_snapshot)
2175 .completion_cb("archive
-name
", complete_archive_name)
2176 .completion_cb("target
", tools::complete_file_name);
2179 const API_METHOD_LIST_SNAPSHOT_FILES: ApiMethod = ApiMethod::new(
2180 &ApiHandler::Sync(&list_snapshot_files),
2182 "List snapshot files
.",
2184 ("snapshot
", false, &StringSchema::new("Snapshot path
.").schema()),
2185 ("repository
", true, &REPO_URL_SCHEMA),
2186 ("output
-format
", true, &OUTPUT_FORMAT),
2191 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2192 .arg_param(&["snapshot
"])
2193 .completion_cb("repository
", complete_repository)
2194 .completion_cb("snapshot
", complete_backup_snapshot);
2197 const API_METHOD_DUMP_CATALOG: ApiMethod = ApiMethod::new(
2198 &ApiHandler::Sync(&dump_catalog),
2202 ("snapshot
", false, &StringSchema::new("Snapshot path
.").schema()),
2203 ("repository
", true, &REPO_URL_SCHEMA),
2208 let catalog_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2209 .arg_param(&["snapshot
"])
2210 .completion_cb("repository
", complete_repository)
2211 .completion_cb("snapshot
", complete_backup_snapshot);
2213 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
2214 &ApiHandler::Sync(&prune),
2216 "Prune backup repository
.",
2217 &proxmox_backup::add_common_prune_prameters!([
2218 ("dry
-run
", true, &BooleanSchema::new(
2219 "Just show what prune would
do, but
do not delete anything
.")
2221 ("group
", false, &StringSchema::new("Backup group
.").schema()),
2223 ("output
-format
", true, &OUTPUT_FORMAT),
2224 ("repository
", true, &REPO_URL_SCHEMA),
2229 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2230 .arg_param(&["group
"])
2231 .completion_cb("group
", complete_backup_group)
2232 .completion_cb("repository
", complete_repository);
2235 const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
2236 &ApiHandler::Sync(&status),
2238 "Get repository status
.",
2240 ("repository
", true, &REPO_URL_SCHEMA),
2241 ("output
-format
", true, &OUTPUT_FORMAT),
2246 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2247 .completion_cb("repository
", complete_repository);
2250 const API_METHOD_API_LOGIN: ApiMethod = ApiMethod::new(
2251 &ApiHandler::Sync(&api_login),
2253 "Try to login
. If successful
, store ticket
.",
2254 &sorted!([ ("repository
", true, &REPO_URL_SCHEMA) ]),
2258 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2259 .completion_cb("repository
", complete_repository);
2262 const API_METHOD_API_LOGOUT: ApiMethod = ApiMethod::new(
2263 &ApiHandler::Sync(&api_logout),
2265 "Logout (delete stored ticket
).",
2266 &sorted!([ ("repository
", true, &REPO_URL_SCHEMA) ]),
2270 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2271 .completion_cb("repository
", complete_repository);
2274 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2275 &ApiHandler::Sync(&mount),
2277 "Mount pxar archive
.",
2279 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2280 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2281 ("target
", false, &StringSchema::new("Target directory path
.").schema()),
2282 ("repository
", true, &REPO_URL_SCHEMA),
2283 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2284 ("verbose
", true, &BooleanSchema::new("Verbose output
.").default(false).schema()),
2289 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2290 .arg_param(&["snapshot
", "archive
-name
", "target
"])
2291 .completion_cb("repository
", complete_repository)
2292 .completion_cb("snapshot
", complete_group_or_snapshot)
2293 .completion_cb("archive
-name
", complete_archive_name)
2294 .completion_cb("target
", tools::complete_file_name);
2297 const API_METHOD_SHELL: ApiMethod = ApiMethod::new(
2298 &ApiHandler::Sync(&shell),
2300 "Shell to interactively inspect and restore snapshots
.",
2302 ("snapshot
", false, &StringSchema::new("Group
/Snapshot path
.").schema()),
2303 ("archive
-name
", false, &StringSchema::new("Backup archive name
.").schema()),
2304 ("repository
", true, &REPO_URL_SCHEMA),
2305 ("keyfile
", true, &StringSchema::new("Path to encryption key
.").schema()),
2310 let shell_cmd_def = CliCommand::new(&API_METHOD_SHELL)
2311 .arg_param(&["snapshot
", "archive
-name
"])
2312 .completion_cb("repository
", complete_repository)
2313 .completion_cb("archive
-name
", complete_archive_name)
2314 .completion_cb("snapshot
", complete_group_or_snapshot);
2316 let cmd_def = CliCommandMap::new()
2317 .insert("backup
".to_owned(), backup_cmd_def.into())
2318 .insert("upload
-log
".to_owned(), upload_log_cmd_def.into())
2319 .insert("forget
".to_owned(), forget_cmd_def.into())
2320 .insert("catalog
".to_owned(), catalog_cmd_def.into())
2321 .insert("garbage
-collect
".to_owned(), garbage_collect_cmd_def.into())
2322 .insert("list
".to_owned(), list_cmd_def.into())
2323 .insert("login
".to_owned(), login_cmd_def.into())
2324 .insert("logout
".to_owned(), logout_cmd_def.into())
2325 .insert("prune
".to_owned(), prune_cmd_def.into())
2326 .insert("restore
".to_owned(), restore_cmd_def.into())
2327 .insert("snapshots
".to_owned(), snapshots_cmd_def.into())
2328 .insert("files
".to_owned(), files_cmd_def.into())
2329 .insert("status
".to_owned(), status_cmd_def.into())
2330 .insert("key
".to_owned(), key_mgmt_cli().into())
2331 .insert("mount
".to_owned(), mount_cmd_def.into())
2332 .insert("shell
".to_owned(), shell_cmd_def.into());
2334 run_cli_command(cmd_def.into());
2337 fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
2338 let rt = tokio::runtime::Runtime::new().unwrap();
2339 let ret = rt.block_on(fut);