2 extern crate proxmox_backup
;
5 //use std::os::unix::io::AsRawFd;
6 use chrono
::{Local, Utc, TimeZone}
;
7 use std
::path
::{Path, PathBuf}
;
8 use std
::collections
::{HashSet, HashMap}
;
9 use std
::io
::{BufReader, Write, Seek, SeekFrom}
;
10 use std
::os
::unix
::fs
::OpenOptionsExt
;
12 use proxmox
::tools
::fs
::{file_get_contents, file_get_json, file_set_contents, image_size}
;
14 use proxmox_backup
::tools
;
15 use proxmox_backup
::cli
::*;
16 use proxmox_backup
::api2
::types
::*;
17 use proxmox_backup
::api_schema
::*;
18 use proxmox_backup
::api_schema
::router
::*;
19 use proxmox_backup
::client
::*;
20 use proxmox_backup
::backup
::*;
21 use proxmox_backup
::pxar
::{ self, catalog::* }
;
23 //use proxmox_backup::backup::image_index::*;
24 //use proxmox_backup::config::datastore;
25 //use proxmox_backup::pxar::encoder::*;
26 //use proxmox_backup::backup::datastore::*;
28 use serde_json
::{json, Value}
;
30 use std
::sync
::{Arc, Mutex}
;
32 use xdg
::BaseDirectories
;
34 use lazy_static
::lazy_static
;
36 use tokio
::sync
::mpsc
;
39 static ref BACKUPSPEC_REGEX
: Regex
= Regex
::new(r
"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$").unwrap();
41 static ref REPO_URL_SCHEMA
: Arc
<Schema
> = Arc
::new(
42 StringSchema
::new("Repository URL.")
43 .format(BACKUP_REPO_URL
.clone())
50 fn get_default_repository() -> Option
<String
> {
51 std
::env
::var("PBS_REPOSITORY").ok()
54 fn extract_repository_from_value(
56 ) -> Result
<BackupRepository
, Error
> {
58 let repo_url
= param
["repository"]
61 .or_else(get_default_repository
)
62 .ok_or_else(|| format_err
!("unable to get (default) repository"))?
;
64 let repo
: BackupRepository
= repo_url
.parse()?
;
69 fn extract_repository_from_map(
70 param
: &HashMap
<String
, String
>,
71 ) -> Option
<BackupRepository
> {
73 param
.get("repository")
75 .or_else(get_default_repository
)
76 .and_then(|repo_url
| repo_url
.parse
::<BackupRepository
>().ok())
79 fn record_repository(repo
: &BackupRepository
) {
81 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
86 // usually $HOME/.cache/proxmox-backup/repo-list
87 let path
= match base
.place_cache_file("repo-list") {
92 let mut data
= file_get_json(&path
, None
).unwrap_or(json
!({}
));
94 let repo
= repo
.to_string();
96 data
[&repo
] = json
!{ data[&repo].as_i64().unwrap_or(0) + 1 }
;
98 let mut map
= serde_json
::map
::Map
::new();
101 let mut max_used
= 0;
102 let mut max_repo
= None
;
103 for (repo
, count
) in data
.as_object().unwrap() {
104 if map
.contains_key(repo
) { continue; }
105 if let Some(count
) = count
.as_i64() {
106 if count
> max_used
{
108 max_repo
= Some(repo
);
112 if let Some(repo
) = max_repo
{
113 map
.insert(repo
.to_owned(), json
!(max_used
));
117 if map
.len() > 10 { // store max. 10 repos
122 let new_data
= json
!(map
);
124 let _
= file_set_contents(path
, new_data
.to_string().as_bytes(), None
);
127 fn complete_repository(_arg
: &str, _param
: &HashMap
<String
, String
>) -> Vec
<String
> {
129 let mut result
= vec
![];
131 let base
= match BaseDirectories
::with_prefix("proxmox-backup") {
136 // usually $HOME/.cache/proxmox-backup/repo-list
137 let path
= match base
.place_cache_file("repo-list") {
142 let data
= file_get_json(&path
, None
).unwrap_or(json
!({}
));
144 if let Some(map
) = data
.as_object() {
145 for (repo
, _count
) in map
{
146 result
.push(repo
.to_owned());
153 async
fn backup_directory
<P
: AsRef
<Path
>>(
154 client
: &BackupClient
,
157 chunk_size
: Option
<usize>,
158 device_set
: Option
<HashSet
<u64>>,
160 skip_lost_and_found
: bool
,
161 crypt_config
: Option
<Arc
<CryptConfig
>>,
162 catalog
: Arc
<Mutex
<CatalogBlobWriter
<std
::fs
::File
>>>,
163 ) -> Result
<BackupStats
, Error
> {
165 let pxar_stream
= PxarBackupStream
::open(dir_path
.as_ref(), device_set
, verbose
, skip_lost_and_found
, catalog
)?
;
166 let mut chunk_stream
= ChunkStream
::new(pxar_stream
, chunk_size
);
168 let (mut tx
, rx
) = mpsc
::channel(10); // allow to buffer 10 chunks
171 .map_err(Error
::from
);
173 // spawn chunker inside a separate task so that it can run parallel
174 tokio
::spawn(async
move {
175 let _
= tx
.send_all(&mut chunk_stream
).await
;
179 .upload_stream(archive_name
, stream
, "dynamic", None
, crypt_config
)
185 async
fn backup_image
<P
: AsRef
<Path
>>(
186 client
: &BackupClient
,
190 chunk_size
: Option
<usize>,
192 crypt_config
: Option
<Arc
<CryptConfig
>>,
193 ) -> Result
<BackupStats
, Error
> {
195 let path
= image_path
.as_ref().to_owned();
197 let file
= tokio
::fs
::File
::open(path
).await?
;
199 let stream
= tokio
::codec
::FramedRead
::new(file
, tokio
::codec
::BytesCodec
::new())
200 .map_err(Error
::from
);
202 let stream
= FixedChunkStream
::new(stream
, chunk_size
.unwrap_or(4*1024*1024));
205 .upload_stream(archive_name
, stream
, "fixed", Some(image_size
), crypt_config
)
211 fn strip_server_file_expenstion(name
: &str) -> String
{
213 if name
.ends_with(".didx") {
214 return name
[..name
.len()-5].to_owned();
215 } else if name
.ends_with(".fidx") {
216 return name
[..name
.len()-5].to_owned();
217 } else if name
.ends_with(".blob") {
218 return name
[..name
.len()-5].to_owned();
220 return name
.to_owned(); // should not happen
224 fn list_backup_groups(
227 _rpcenv
: &mut dyn RpcEnvironment
,
228 ) -> Result
<Value
, Error
> {
230 let repo
= extract_repository_from_value(¶m
)?
;
232 let client
= HttpClient
::new(repo
.host(), repo
.user())?
;
234 let path
= format
!("api2/json/admin/datastore/{}/groups", repo
.store());
236 let mut result
= async_main(async
move {
237 client
.get(&path
, None
).await
240 record_repository(&repo
);
242 // fixme: implement and use output formatter instead ..
243 let list
= result
["data"].as_array_mut().unwrap();
245 list
.sort_unstable_by(|a
, b
| {
246 let a_id
= a
["backup-id"].as_str().unwrap();
247 let a_backup_type
= a
["backup-type"].as_str().unwrap();
248 let b_id
= b
["backup-id"].as_str().unwrap();
249 let b_backup_type
= b
["backup-type"].as_str().unwrap();
251 let type_order
= a_backup_type
.cmp(b_backup_type
);
252 if type_order
== std
::cmp
::Ordering
::Equal
{
259 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
261 let mut result
= vec
![];
265 let id
= item
["backup-id"].as_str().unwrap();
266 let btype
= item
["backup-type"].as_str().unwrap();
267 let epoch
= item
["last-backup"].as_i64().unwrap();
268 let last_backup
= Utc
.timestamp(epoch
, 0);
269 let backup_count
= item
["backup-count"].as_u64().unwrap();
271 let group
= BackupGroup
::new(btype
, id
);
273 let path
= group
.group_path().to_str().unwrap().to_owned();
275 let files
= item
["files"].as_array().unwrap().iter()
276 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
278 if output_format
== "text" {
280 "{:20} | {} | {:5} | {}",
282 BackupDir
::backup_time_to_string(last_backup
),
284 tools
::join(&files
, ' '
),
288 "backup-type": btype
,
290 "last-backup": epoch
,
291 "backup-count": backup_count
,
297 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
305 _rpcenv
: &mut dyn RpcEnvironment
,
306 ) -> Result
<Value
, Error
> {
308 let repo
= extract_repository_from_value(¶m
)?
;
310 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
312 let client
= HttpClient
::new(repo
.host(), repo
.user())?
;
314 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
316 let mut args
= json
!({}
);
317 if let Some(path
) = param
["group"].as_str() {
318 let group
= BackupGroup
::parse(path
)?
;
319 args
["backup-type"] = group
.backup_type().into();
320 args
["backup-id"] = group
.backup_id().into();
323 let result
= async_main(async
move {
324 client
.get(&path
, Some(args
)).await
327 record_repository(&repo
);
329 let list
= result
["data"].as_array().unwrap();
331 let mut result
= vec
![];
335 let id
= item
["backup-id"].as_str().unwrap();
336 let btype
= item
["backup-type"].as_str().unwrap();
337 let epoch
= item
["backup-time"].as_i64().unwrap();
339 let snapshot
= BackupDir
::new(btype
, id
, epoch
);
341 let path
= snapshot
.relative_path().to_str().unwrap().to_owned();
343 let files
= item
["files"].as_array().unwrap().iter()
344 .map(|v
| strip_server_file_expenstion(v
.as_str().unwrap())).collect();
346 if output_format
== "text" {
347 let size_str
= if let Some(size
) = item
["size"].as_u64() {
352 println
!("{} | {} | {}", path
, size_str
, tools
::join(&files
, ' '
));
354 let mut data
= json
!({
355 "backup-type": btype
,
357 "backup-time": epoch
,
360 if let Some(size
) = item
["size"].as_u64() {
361 data
["size"] = size
.into();
367 if output_format
!= "text" { format_and_print_result(&result.into(), &output_format); }
375 _rpcenv
: &mut dyn RpcEnvironment
,
376 ) -> Result
<Value
, Error
> {
378 let repo
= extract_repository_from_value(¶m
)?
;
380 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
381 let snapshot
= BackupDir
::parse(path
)?
;
383 let mut client
= HttpClient
::new(repo
.host(), repo
.user())?
;
385 let path
= format
!("api2/json/admin/datastore/{}/snapshots", repo
.store());
387 let result
= async_main(async
move {
388 client
.delete(&path
, Some(json
!({
389 "backup-type": snapshot
.group().backup_type(),
390 "backup-id": snapshot
.group().backup_id(),
391 "backup-time": snapshot
.backup_time().timestamp(),
395 record_repository(&repo
);
403 _rpcenv
: &mut dyn RpcEnvironment
,
404 ) -> Result
<Value
, Error
> {
406 let repo
= extract_repository_from_value(¶m
)?
;
408 let client
= HttpClient
::new(repo
.host(), repo
.user())?
;
409 async_main(async
move { client.login().await }
)?
;
411 record_repository(&repo
);
419 _rpcenv
: &mut dyn RpcEnvironment
,
420 ) -> Result
<Value
, Error
> {
422 let repo
= extract_repository_from_value(¶m
)?
;
424 delete_ticket_info(repo
.host(), repo
.user())?
;
432 _rpcenv
: &mut dyn RpcEnvironment
,
433 ) -> Result
<Value
, Error
> {
435 let repo
= extract_repository_from_value(¶m
)?
;
437 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
438 let snapshot
= BackupDir
::parse(path
)?
;
440 let keyfile
= param
["keyfile"].as_str().map(|p
| PathBuf
::from(p
));
442 let crypt_config
= match keyfile
{
445 let (key
, _
) = load_and_decrtypt_key(&path
, get_encryption_key_password
)?
;
446 Some(Arc
::new(CryptConfig
::new(key
)?
))
450 let client
= HttpClient
::new(repo
.host(), repo
.user())?
;
452 async_main(async
move {
453 let client
= client
.start_backup_reader(
455 &snapshot
.group().backup_type(),
456 &snapshot
.group().backup_id(),
457 snapshot
.backup_time(), true).await?
;
459 let blob_file
= std
::fs
::OpenOptions
::new()
462 .custom_flags(libc
::O_TMPFILE
)
465 let mut blob_file
= client
.download("catalog.blob", blob_file
).await?
;
467 blob_file
.seek(SeekFrom
::Start(0))?
;
469 let reader
= BufReader
::new(blob_file
);
470 let mut catalog_reader
= CatalogBlobReader
::new(reader
, crypt_config
)?
;
472 catalog_reader
.dump()?
;
474 record_repository(&repo
);
482 fn list_snapshot_files(
485 _rpcenv
: &mut dyn RpcEnvironment
,
486 ) -> Result
<Value
, Error
> {
488 let repo
= extract_repository_from_value(¶m
)?
;
490 let path
= tools
::required_string_param(¶m
, "snapshot")?
;
491 let snapshot
= BackupDir
::parse(path
)?
;
493 let output_format
= param
["output-format"].as_str().unwrap_or("text").to_owned();
495 let client
= HttpClient
::new(repo
.host(), repo
.user())?
;
497 let path
= format
!("api2/json/admin/datastore/{}/files", repo
.store());
499 let mut result
= async_main(async
move {
500 client
.get(&path
, Some(json
!({
501 "backup-type": snapshot
.group().backup_type(),
502 "backup-id": snapshot
.group().backup_id(),
503 "backup-time": snapshot
.backup_time().timestamp(),
507 record_repository(&repo
);
509 let list
: Value
= result
["data"].take();
511 if output_format
== "text" {
512 for item
in list
.as_array().unwrap().iter() {
515 strip_server_file_expenstion(item
["filename"].as_str().unwrap()),
516 item
["size"].as_u64().unwrap_or(0),
520 format_and_print_result(&list
, &output_format
);
526 fn start_garbage_collection(
529 _rpcenv
: &mut dyn RpcEnvironment
,
530 ) -> Result
<Value
, Error
> {
532 let repo
= extract_repository_from_value(¶m
)?
;
534 let mut client
= HttpClient
::new(repo
.host(), repo
.user())?
;
536 let path
= format
!("api2/json/admin/datastore/{}/gc", repo
.store());
538 let result
= async_main(async
move { client.post(&path, None).await }
)?
;
540 record_repository(&repo
);
545 fn parse_backupspec(value
: &str) -> Result
<(&str, &str), Error
> {
547 if let Some(caps
) = BACKUPSPEC_REGEX
.captures(value
) {
548 return Ok((caps
.get(1).unwrap().as_str(), caps
.get(2).unwrap().as_str()));
550 bail
!("unable to parse directory specification '{}'", value
);
556 _rpcenv
: &mut dyn RpcEnvironment
,
557 ) -> Result
<Value
, Error
> {
559 let repo
= extract_repository_from_value(¶m
)?
;
561 let backupspec_list
= tools
::required_array_param(¶m
, "backupspec")?
;
563 let all_file_systems
= param
["all-file-systems"].as_bool().unwrap_or(false);
565 let skip_lost_and_found
= param
["skip-lost-and-found"].as_bool().unwrap_or(false);
567 let verbose
= param
["verbose"].as_bool().unwrap_or(false);
569 let backup_time_opt
= param
["backup-time"].as_i64();
571 let chunk_size_opt
= param
["chunk-size"].as_u64().map(|v
| (v
*1024) as usize);
573 if let Some(size
) = chunk_size_opt
{
574 verify_chunk_size(size
)?
;
577 let keyfile
= param
["keyfile"].as_str().map(|p
| PathBuf
::from(p
));
579 let backup_id
= param
["backup-id"].as_str().unwrap_or(&proxmox
::tools
::nodename());
581 let backup_type
= param
["backup-type"].as_str().unwrap_or("host");
583 let include_dev
= param
["include-dev"].as_array();
585 let mut devices
= if all_file_systems { None }
else { Some(HashSet::new()) }
;
587 if let Some(include_dev
) = include_dev
{
588 if all_file_systems
{
589 bail
!("option 'all-file-systems' conflicts with option 'include-dev'");
592 let mut set
= HashSet
::new();
593 for path
in include_dev
{
594 let path
= path
.as_str().unwrap();
595 let stat
= nix
::sys
::stat
::stat(path
)
596 .map_err(|err
| format_err
!("fstat {:?} failed - {}", path
, err
))?
;
597 set
.insert(stat
.st_dev
);
602 let mut upload_list
= vec
![];
604 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE }
;
606 for backupspec
in backupspec_list
{
607 let (target
, filename
) = parse_backupspec(backupspec
.as_str().unwrap())?
;
609 use std
::os
::unix
::fs
::FileTypeExt
;
611 let metadata
= match std
::fs
::metadata(filename
) {
613 Err(err
) => bail
!("unable to access '{}' - {}", filename
, err
),
615 let file_type
= metadata
.file_type();
617 let extension
= target
.rsplit('
.'
).next()
618 .ok_or(format_err
!("missing target file extenion '{}'", target
))?
;
622 if !file_type
.is_dir() {
623 bail
!("got unexpected file type (expected directory)");
625 upload_list
.push((BackupType
::PXAR
, filename
.to_owned(), format
!("{}.didx", target
), 0));
629 if !(file_type
.is_file() || file_type
.is_block_device()) {
630 bail
!("got unexpected file type (expected file or block device)");
633 let size
= image_size(&PathBuf
::from(filename
))?
;
635 if size
== 0 { bail!("got zero-sized file '{}'
", filename); }
637 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}
.fidx
", target), size));
640 if !file_type.is_file() {
641 bail!("got unexpected file
type (expected regular file
)");
643 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
646 if !file_type.is_file() {
647 bail!("got unexpected file
type (expected regular file
)");
649 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}
.blob
", target), metadata.len()));
652 bail!("got unknown archive extension '{}'
", extension);
657 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or(Utc::now().timestamp()), 0);
659 let client = HttpClient::new(repo.host(), repo.user())?;
660 record_repository(&repo);
662 println!("Starting backup
: {}
/{}
/{}
", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
664 println!("Client name
: {}
", proxmox::tools::nodename());
666 let start_time = Local::now();
668 println!("Starting protocol
: {}
", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
670 let (crypt_config, rsa_encrypted_key) = match keyfile {
671 None => (None, None),
673 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
675 let crypt_config = CryptConfig::new(key)?;
677 let path = master_pubkey_path()?;
679 let pem_data = file_get_contents(&path)?;
680 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
681 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
682 (Some(Arc::new(crypt_config)), Some(enc_key))
684 (Some(Arc::new(crypt_config)), None)
689 async_main(async move {
691 .start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose)
694 let mut file_list = vec![];
696 // fixme: encrypt/sign catalog?
697 let catalog_file = std::fs::OpenOptions::new()
700 .custom_flags(libc::O_TMPFILE)
703 let catalog = Arc::new(Mutex::new(CatalogBlobWriter::new_compressed(catalog_file)?));
704 let mut upload_catalog = false;
706 for (backup_type, filename, target, size) in upload_list {
708 BackupType::CONFIG => {
709 println!("Upload config file '{}' to '{:?}'
as {}
", filename, repo, target);
711 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
713 file_list.push((target, stats));
715 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
716 println!("Upload log file '{}' to '{:?}'
as {}
", filename, repo, target);
718 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
720 file_list.push((target, stats));
722 BackupType::PXAR => {
723 upload_catalog = true;
724 println!("Upload directory '{}' to '{:?}'
as {}
", filename, repo, target);
725 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
726 let stats = backup_directory(
734 crypt_config.clone(),
737 file_list.push((target, stats));
738 catalog.lock().unwrap().end_directory()?;
740 BackupType::IMAGE => {
741 println!("Upload image '{}' to '{:?}'
as {}
", filename, repo, target);
742 let stats = backup_image(
749 crypt_config.clone(),
751 file_list.push((target, stats));
756 // finalize and upload catalog
758 let mutex = Arc::try_unwrap(catalog)
759 .map_err(|_| format_err!("unable to get
catalog (still used
)"))?;
760 let mut catalog_file = mutex.into_inner().unwrap().finish()?;
762 let target = "catalog
.blob
";
764 catalog_file.seek(SeekFrom::Start(0))?;
766 let stats = client.upload_blob(catalog_file, target).await?;
767 file_list.push((target.to_owned(), stats));
770 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
771 let target = "rsa
-encrypted
.key
";
772 println!("Upload RSA encoded key to '{:?}'
as {}
", repo, target);
774 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
776 file_list.push((format!("{}
.blob
", target), stats));
778 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
780 let mut buffer2 = vec![0u8; rsa.size() as usize];
781 let pem_data = file_get_contents("master
-private
.pem
")?;
782 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
783 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
784 println!("TEST {} {:?}
", len, buffer2);
789 let file_list = file_list.iter()
790 .fold(vec![], |mut acc, (filename, stats)| {
792 "filename
": filename,
794 "csum
": proxmox::tools::digest_to_hex(&stats.csum),
800 "backup
-type": backup_type,
801 "backup
-id
": backup_id,
802 "backup
-time
": backup_time.timestamp(),
806 println!("Upload index
.json to '{:?}'
", repo);
807 let index_data = serde_json::to_string_pretty(&index)?.into();
809 .upload_blob_from_data(index_data, "index
.json
.blob
", crypt_config.clone(), true, true)
812 client.finish().await?;
814 let end_time = Local::now();
815 let elapsed = end_time.signed_duration_since(start_time);
816 println!("Duration
: {}
", elapsed);
818 println!("End Time
: {}
", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
824 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
826 let mut result = vec![];
828 let data: Vec<&str> = arg.splitn(2, ':').collect();
831 result.push(String::from("root
.pxar
:/"));
832 result.push(String::from("etc
.pxar
:/etc
"));
836 let files = tools::complete_file_name(data[1], param);
839 result.push(format!("{}
:{}
", data[0], file));
848 _rpcenv: &mut dyn RpcEnvironment,
849 ) -> Result<Value, Error> {
850 async_main(restore_do(param))
853 async fn restore_do(param: Value) -> Result<Value, Error> {
854 let repo = extract_repository_from_value(¶m)?;
856 let verbose = param["verbose
"].as_bool().unwrap_or(false);
858 let allow_existing_dirs = param["allow
-existing
-dirs
"].as_bool().unwrap_or(false);
860 let archive_name = tools::required_string_param(¶m, "archive
-name
")?;
862 let client = HttpClient::new(repo.host(), repo.user())?;
864 record_repository(&repo);
866 let path = tools::required_string_param(¶m, "snapshot
")?;
868 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
869 let group = BackupGroup::parse(path)?;
871 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
872 let result = client.get(&path, Some(json!({
873 "backup
-type": group.backup_type(),
874 "backup
-id
": group.backup_id(),
877 let list = result["data
"].as_array().unwrap();
879 bail!("backup group '{}' does not contain any snapshots
:", path);
882 let epoch = list[0]["backup
-time
"].as_i64().unwrap();
883 let backup_time = Utc.timestamp(epoch, 0);
884 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
886 let snapshot = BackupDir::parse(path)?;
887 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
890 let target = tools::required_string_param(¶m, "target
")?;
891 let target = if target == "-" { None } else { Some(target) };
893 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
895 let crypt_config = match keyfile {
898 let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
899 Some(Arc::new(CryptConfig::new(key)?))
903 let server_archive_name = if archive_name.ends_with(".pxar
") {
904 format!("{}
.didx
", archive_name)
905 } else if archive_name.ends_with(".img
") {
906 format!("{}
.fidx
", archive_name)
908 format!("{}
.blob
", archive_name)
912 .start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true)
915 let tmpfile = std::fs::OpenOptions::new()
918 .custom_flags(libc::O_TMPFILE)
921 const INDEX_BLOB_NAME: &str = "index
.json
.blob
";
923 let index_data = client.download(INDEX_BLOB_NAME, Vec::with_capacity(64*1024)).await?;
924 let blob = DataBlob::from_raw(index_data)?;
926 let backup_index_data = blob.decode(crypt_config.clone())?;
927 let backup_index: Value = serde_json::from_slice(&backup_index_data[..])?;
929 if server_archive_name == INDEX_BLOB_NAME {
930 if let Some(target) = target {
931 file_set_contents(target, &backup_index_data, None)?;
933 let stdout = std::io::stdout();
934 let mut writer = stdout.lock();
935 writer.write_all(&backup_index_data)
936 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
939 } else if server_archive_name.ends_with(".blob
") {
940 let mut tmpfile = client.download(&server_archive_name, tmpfile).await?;
941 tmpfile.seek(SeekFrom::Start(0))?;
942 let mut reader = DataBlobReader::new(tmpfile, crypt_config)?;
944 if let Some(target) = target {
945 let mut writer = std::fs::OpenOptions::new()
950 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
951 std::io::copy(&mut reader, &mut writer)?;
953 let stdout = std::io::stdout();
954 let mut writer = stdout.lock();
955 std::io::copy(&mut reader, &mut writer)
956 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
959 } else if server_archive_name.ends_with(".didx
") {
960 let tmpfile = client.download(&server_archive_name, tmpfile).await?;
962 let index = DynamicIndexReader::new(tmpfile)
963 .map_err(|err| format_err!("unable to read dynamic index '{}'
- {}
", archive_name, err))?;
965 let most_used = index.find_most_used_chunks(8);
967 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
969 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
971 if let Some(target) = target {
973 let feature_flags = pxar::flags::DEFAULT;
974 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
976 println!("{:?}
", path);
980 decoder.set_allow_existing_dirs(allow_existing_dirs);
983 decoder.restore(Path::new(target), &Vec::new())?;
985 let stdout = std::io::stdout();
986 let mut writer = stdout.lock();
988 std::io::copy(&mut reader, &mut writer)
989 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
991 } else if server_archive_name.ends_with(".fidx
") {
992 let tmpfile = client.download(&server_archive_name, tmpfile).await?;
994 let index = FixedIndexReader::new(tmpfile)
995 .map_err(|err| format_err!("unable to read fixed index '{}'
- {}
", archive_name, err))?;
997 let most_used = index.find_most_used_chunks(8);
999 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1001 let mut reader = BufferedFixedReader::new(index, chunk_reader);
1003 if let Some(target) = target {
1004 let mut writer = std::fs::OpenOptions::new()
1009 .map_err(|err| format_err!("unable to create target file {:?}
- {}
", target, err))?;
1011 std::io::copy(&mut reader, &mut writer)
1012 .map_err(|err| format_err!("unable to store data
- {}
", err))?;
1014 let stdout = std::io::stdout();
1015 let mut writer = stdout.lock();
1017 std::io::copy(&mut reader, &mut writer)
1018 .map_err(|err| format_err!("unable to pipe data
- {}
", err))?;
1021 bail!("unknown archive file
extension (expected
.pxar of
.img
)");
1030 _rpcenv: &mut dyn RpcEnvironment,
1031 ) -> Result<Value, Error> {
1033 let logfile = tools::required_string_param(¶m, "logfile
")?;
1034 let repo = extract_repository_from_value(¶m)?;
1036 let snapshot = tools::required_string_param(¶m, "snapshot
")?;
1037 let snapshot = BackupDir::parse(snapshot)?;
1039 let mut client = HttpClient::new(repo.host(), repo.user())?;
1041 let keyfile = param["keyfile
"].as_str().map(|p| PathBuf::from(p));
1043 let crypt_config = match keyfile {
1046 let (key, _created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
1047 let crypt_config = CryptConfig::new(key)?;
1048 Some(Arc::new(crypt_config))
1052 let data = file_get_contents(logfile)?;
1054 let blob = DataBlob::encode(&data, crypt_config, true)?;
1056 let raw_data = blob.into_inner();
1058 let path = format!("api2
/json
/admin
/datastore
/{}
/upload
-backup
-log
", repo.store());
1061 "backup
-type": snapshot.group().backup_type(),
1062 "backup
-id
": snapshot.group().backup_id(),
1063 "backup
-time
": snapshot.backup_time().timestamp(),
1066 let body = hyper::Body::from(raw_data);
1068 async_main(async move {
1069 client.upload("application
/octet
-stream
", body, &path, Some(args)).await
1076 _rpcenv: &mut dyn RpcEnvironment,
1077 ) -> Result<Value, Error> {
1079 let repo = extract_repository_from_value(¶m)?;
1081 let mut client = HttpClient::new(repo.host(), repo.user())?;
1083 let path = format!("api2
/json
/admin
/datastore
/{}
/prune
", repo.store());
1085 let group = tools::required_string_param(¶m, "group
")?;
1086 let group = BackupGroup::parse(group)?;
1088 param.as_object_mut().unwrap().remove("repository
");
1089 param.as_object_mut().unwrap().remove("group
");
1091 param["backup
-type"] = group.backup_type().into();
1092 param["backup
-id
"] = group.backup_id().into();
1094 let _result = async_main(async move { client.post(&path, Some(param)).await })?;
1096 record_repository(&repo);
1104 _rpcenv: &mut dyn RpcEnvironment,
1105 ) -> Result<Value, Error> {
1107 let repo = extract_repository_from_value(¶m)?;
1109 let output_format = param["output
-format
"].as_str().unwrap_or("text
").to_owned();
1111 let client = HttpClient::new(repo.host(), repo.user())?;
1113 let path = format!("api2
/json
/admin
/datastore
/{}
/status
", repo.store());
1115 let result = async_main(async move { client.get(&path, None).await })?;
1116 let data = &result["data
"];
1118 record_repository(&repo);
1120 if output_format == "text
" {
1121 let total = data["total
"].as_u64().unwrap();
1122 let used = data["used
"].as_u64().unwrap();
1123 let avail = data["avail
"].as_u64().unwrap();
1124 let roundup = total/200;
1127 "total
: {} used
: {}
({}
%) available
: {}
",
1130 ((used+roundup)*100)/total,
1134 format_and_print_result(data, &output_format);
1140 // like get, but simply ignore errors and return Null instead
1141 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1143 let client = match HttpClient::new(repo.host(), repo.user()) {
1145 _ => return Value::Null,
1148 let mut resp = match client.get(url, None).await {
1150 _ => return Value::Null,
1153 if let Some(map) = resp.as_object_mut() {
1154 if let Some(data) = map.remove("data
") {
1161 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1162 async_main(async { complete_backup_group_do(param).await })
1165 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1167 let mut result = vec![];
1169 let repo = match extract_repository_from_map(param) {
1174 let path = format!("api2
/json
/admin
/datastore
/{}
/groups
", repo.store());
1176 let data = try_get(&repo, &path).await;
1178 if let Some(list) = data.as_array() {
1180 if let (Some(backup_id), Some(backup_type)) =
1181 (item["backup
-id
"].as_str(), item["backup
-type"].as_str())
1183 result.push(format!("{}
/{}
", backup_type, backup_id));
1191 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1192 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1195 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1197 if arg.matches('/').count() < 2 {
1198 let groups = complete_backup_group_do(param).await;
1199 let mut result = vec![];
1200 for group in groups {
1201 result.push(group.to_string());
1202 result.push(format!("{}
/", group));
1207 complete_backup_snapshot_do(param).await
1210 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1211 async_main(async { complete_backup_snapshot_do(param).await })
1214 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1216 let mut result = vec![];
1218 let repo = match extract_repository_from_map(param) {
1223 let path = format!("api2
/json
/admin
/datastore
/{}
/snapshots
", repo.store());
1225 let data = try_get(&repo, &path).await;
1227 if let Some(list) = data.as_array() {
1229 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1230 (item["backup
-id
"].as_str(), item["backup
-type"].as_str(), item["backup
-time
"].as_i64())
1232 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1233 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1241 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1242 async_main(async { complete_server_file_name_do(param).await })
1245 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1247 let mut result = vec![];
1249 let repo = match extract_repository_from_map(param) {
1254 let snapshot = match param.get("snapshot
") {
1256 match BackupDir::parse(path) {
1264 let query = tools::json_object_to_query(json!({
1265 "backup
-type": snapshot.group().backup_type(),
1266 "backup
-id
": snapshot.group().backup_id(),
1267 "backup
-time
": snapshot.backup_time().timestamp(),
1270 let path = format!("api2
/json
/admin
/datastore
/{}
/files?{}
", repo.store(), query);
1272 let data = try_get(&repo, &path).await;
1274 if let Some(list) = data.as_array() {
1276 if let Some(filename) = item["filename
"].as_str() {
1277 result.push(filename.to_owned());
1285 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1286 complete_server_file_name(arg, param)
1288 .map(|v| strip_server_file_expenstion(&v))
1292 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1294 let mut result = vec![];
1298 result.push(size.to_string());
1300 if size > 4096 { break; }
1306 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1308 // fixme: implement other input methods
1310 use std::env::VarError::*;
1311 match std::env::var("PBS_ENCRYPTION_PASSWORD
") {
1312 Ok(p) => return Ok(p.as_bytes().to_vec()),
1313 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters
"),
1314 Err(NotPresent) => {
1315 // Try another method
1319 // If we're on a TTY, query the user for a password
1320 if crate::tools::tty::stdin_isatty() {
1321 return Ok(crate::tools::tty::read_password("Encryption Key Password
: ")?);
1324 bail!("no password input mechanism available
");
1330 _rpcenv: &mut dyn RpcEnvironment,
1331 ) -> Result<Value, Error> {
1333 let path = tools::required_string_param(¶m, "path
")?;
1334 let path = PathBuf::from(path);
1336 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1338 let key = proxmox::sys::linux::random_data(32)?;
1340 if kdf == "scrypt
" {
1341 // always read passphrase from tty
1342 if !crate::tools::tty::stdin_isatty() {
1343 bail!("unable to read passphrase
- no tty
");
1346 let password = crate::tools::tty::read_password("Encryption Key Password
: ")?;
1348 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1350 store_key_config(&path, false, key_config)?;
1353 } else if kdf == "none
" {
1354 let created = Local.timestamp(Local::now().timestamp(), 0);
1356 store_key_config(&path, false, KeyConfig {
1369 fn master_pubkey_path() -> Result<PathBuf, Error> {
1370 let base = BaseDirectories::with_prefix("proxmox
-backup
")?;
1372 // usually $HOME/.config/proxmox-backup/master-public.pem
1373 let path = base.place_config_file("master
-public
.pem
")?;
1378 fn key_import_master_pubkey(
1381 _rpcenv: &mut dyn RpcEnvironment,
1382 ) -> Result<Value, Error> {
1384 let path = tools::required_string_param(¶m, "path
")?;
1385 let path = PathBuf::from(path);
1387 let pem_data = file_get_contents(&path)?;
1389 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1390 bail!("Unable to decode PEM data
- {}
", err);
1393 let target_path = master_pubkey_path()?;
1395 file_set_contents(&target_path, &pem_data, None)?;
1397 println!("Imported public master key to {:?}
", target_path);
1402 fn key_create_master_key(
1405 _rpcenv: &mut dyn RpcEnvironment,
1406 ) -> Result<Value, Error> {
1408 // we need a TTY to query the new password
1409 if !crate::tools::tty::stdin_isatty() {
1410 bail!("unable to create master key
- no tty
");
1413 let rsa = openssl::rsa::Rsa::generate(4096)?;
1414 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1416 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password
: ")?)?;
1417 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password
: ")?)?;
1419 if new_pw != verify_pw {
1420 bail!("Password verification fail
!");
1423 if new_pw.len() < 5 {
1424 bail!("Password is too short
!");
1427 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1428 let filename_pub = "master
-public
.pem
";
1429 println!("Writing public master key to {}
", filename_pub);
1430 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1432 let cipher = openssl::symm::Cipher::aes_256_cbc();
1433 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1435 let filename_priv = "master
-private
.pem
";
1436 println!("Writing private master key to {}
", filename_priv);
1437 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1442 fn key_change_passphrase(
1445 _rpcenv: &mut dyn RpcEnvironment,
1446 ) -> Result<Value, Error> {
1448 let path = tools::required_string_param(¶m, "path
")?;
1449 let path = PathBuf::from(path);
1451 let kdf = param["kdf
"].as_str().unwrap_or("scrypt
");
1453 // we need a TTY to query the new password
1454 if !crate::tools::tty::stdin_isatty() {
1455 bail!("unable to change passphrase
- no tty
");
1458 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
1460 if kdf == "scrypt
" {
1462 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password
: ")?)?;
1463 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password
: ")?)?;
1465 if new_pw != verify_pw {
1466 bail!("Password verification fail
!");
1469 if new_pw.len() < 5 {
1470 bail!("Password is too short
!");
1473 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1474 new_key_config.created = created; // keep original value
1476 store_key_config(&path, true, new_key_config)?;
1479 } else if kdf == "none
" {
1480 let modified = Local.timestamp(Local::now().timestamp(), 0);
1482 store_key_config(&path, true, KeyConfig {
1484 created, // keep original value
1495 fn key_mgmt_cli() -> CliCommandMap {
1497 let kdf_schema: Arc<Schema> = Arc::new(
1498 StringSchema::new("Key derivation function
. Choose 'none' to store the key unecrypted
.")
1499 .format(Arc::new(ApiStringFormat::Enum(&["scrypt
", "none
"])))
1504 let key_create_cmd_def = CliCommand::new(
1507 ObjectSchema::new("Create a new encryption key
.")
1508 .required("path
", StringSchema::new("File system path
."))
1509 .optional("kdf
", kdf_schema.clone())
1511 .arg_param(vec!["path
"])
1512 .completion_cb("path
", tools::complete_file_name);
1514 let key_change_passphrase_cmd_def = CliCommand::new(
1516 key_change_passphrase,
1517 ObjectSchema::new("Change the passphrase required to decrypt the key
.")
1518 .required("path
", StringSchema::new("File system path
."))
1519 .optional("kdf
", kdf_schema.clone())
1521 .arg_param(vec!["path
"])
1522 .completion_cb("path
", tools::complete_file_name);
1524 let key_create_master_key_cmd_def = CliCommand::new(
1526 key_create_master_key,
1527 ObjectSchema::new("Create a new
4096 bit RSA master
pub/priv key pair
.")
1530 let key_import_master_pubkey_cmd_def = CliCommand::new(
1532 key_import_master_pubkey,
1533 ObjectSchema::new("Import a new RSA public key and
use it
as master key
. The key is expected to be
in '
.pem' format
.")
1534 .required("path
", StringSchema::new("File system path
."))
1536 .arg_param(vec!["path
"])
1537 .completion_cb("path
", tools::complete_file_name);
1539 let cmd_def = CliCommandMap::new()
1540 .insert("create
".to_owned(), key_create_cmd_def.into())
1541 .insert("create
-master
-key
".to_owned(), key_create_master_key_cmd_def.into())
1542 .insert("import
-master
-pubkey
".to_owned(), key_import_master_pubkey_cmd_def.into())
1543 .insert("change
-passphrase
".to_owned(), key_change_passphrase_cmd_def.into());
1550 let backup_source_schema: Arc<Schema> = Arc::new(
1551 StringSchema::new("Backup source
specification ([<label
>:<path
>]).")
1552 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1556 let backup_cmd_def = CliCommand::new(
1559 ObjectSchema::new("Create (host
) backup
.")
1563 "List of backup source
specifications ([<label
.ext
>:<path
>] ...)",
1564 backup_source_schema,
1567 .optional("repository
", REPO_URL_SCHEMA.clone())
1571 "Include mountpoints with same st_dev
number (see ``man fstat``
) as specified files
.",
1572 StringSchema::new("Path to file
.").into()
1577 StringSchema::new("Path to encryption key
. All data will be encrypted using this key
."))
1580 BooleanSchema::new("Verbose output
.").default(false))
1582 "skip
-lost
-and
-found
",
1583 BooleanSchema::new("Skip lost
+found directory
").default(false))
1586 BACKUP_TYPE_SCHEMA.clone()
1590 BACKUP_ID_SCHEMA.clone()
1594 BACKUP_TIME_SCHEMA.clone()
1598 IntegerSchema::new("Chunk size
in KB
. Must be a power of
2.")
1604 .arg_param(vec!["backupspec
"])
1605 .completion_cb("repository
", complete_repository)
1606 .completion_cb("backupspec
", complete_backup_source)
1607 .completion_cb("keyfile
", tools::complete_file_name)
1608 .completion_cb("chunk
-size
", complete_chunk_size);
1610 let upload_log_cmd_def = CliCommand::new(
1613 ObjectSchema::new("Upload backup log file
.")
1614 .required("snapshot
", StringSchema::new("Snapshot path
."))
1615 .required("logfile
", StringSchema::new("The path to the log file you want to upload
."))
1616 .optional("repository
", REPO_URL_SCHEMA.clone())
1619 StringSchema::new("Path to encryption key
. All data will be encrypted using this key
."))
1621 .arg_param(vec!["snapshot
", "logfile
"])
1622 .completion_cb("snapshot
", complete_backup_snapshot)
1623 .completion_cb("logfile
", tools::complete_file_name)
1624 .completion_cb("keyfile
", tools::complete_file_name)
1625 .completion_cb("repository
", complete_repository);
1627 let list_cmd_def = CliCommand::new(
1630 ObjectSchema::new("List backup groups
.")
1631 .optional("repository
", REPO_URL_SCHEMA.clone())
1632 .optional("output
-format
", OUTPUT_FORMAT.clone())
1634 .completion_cb("repository
", complete_repository);
1636 let snapshots_cmd_def = CliCommand::new(
1639 ObjectSchema::new("List backup snapshots
.")
1640 .optional("group
", StringSchema::new("Backup group
."))
1641 .optional("repository
", REPO_URL_SCHEMA.clone())
1642 .optional("output
-format
", OUTPUT_FORMAT.clone())
1644 .arg_param(vec!["group
"])
1645 .completion_cb("group
", complete_backup_group)
1646 .completion_cb("repository
", complete_repository);
1648 let forget_cmd_def = CliCommand::new(
1651 ObjectSchema::new("Forget (remove
) backup snapshots
.")
1652 .required("snapshot
", StringSchema::new("Snapshot path
."))
1653 .optional("repository
", REPO_URL_SCHEMA.clone())
1655 .arg_param(vec!["snapshot
"])
1656 .completion_cb("repository
", complete_repository)
1657 .completion_cb("snapshot
", complete_backup_snapshot);
1659 let garbage_collect_cmd_def = CliCommand::new(
1661 start_garbage_collection,
1662 ObjectSchema::new("Start garbage collection
for a specific repository
.")
1663 .optional("repository
", REPO_URL_SCHEMA.clone())
1665 .completion_cb("repository
", complete_repository);
1667 let restore_cmd_def = CliCommand::new(
1670 ObjectSchema::new("Restore backup repository
.")
1671 .required("snapshot
", StringSchema::new("Group
/Snapshot path
."))
1672 .required("archive
-name
", StringSchema::new("Backup archive name
."))
1673 .required("target
", StringSchema::new(r###"Target directory path
. Use '
-' to write to stdandard output
.
1675 We
do not extraxt '
.pxar' archives when writing to stdandard output
.
1680 "allow
-existing
-dirs
",
1681 BooleanSchema::new("Do not fail
if directories already exists
.").default(false))
1682 .optional("repository
", REPO_URL_SCHEMA.clone())
1683 .optional("keyfile
", StringSchema::new("Path to encryption key
."))
1686 BooleanSchema::new("Verbose output
.").default(false)
1689 .arg_param(vec!["snapshot
", "archive
-name
", "target
"])
1690 .completion_cb("repository
", complete_repository)
1691 .completion_cb("snapshot
", complete_group_or_snapshot)
1692 .completion_cb("archive
-name
", complete_archive_name)
1693 .completion_cb("target
", tools::complete_file_name);
1695 let files_cmd_def = CliCommand::new(
1697 list_snapshot_files,
1698 ObjectSchema::new("List snapshot files
.")
1699 .required("snapshot
", StringSchema::new("Snapshot path
."))
1700 .optional("repository
", REPO_URL_SCHEMA.clone())
1701 .optional("output
-format
", OUTPUT_FORMAT.clone())
1703 .arg_param(vec!["snapshot
"])
1704 .completion_cb("repository
", complete_repository)
1705 .completion_cb("snapshot
", complete_backup_snapshot);
1707 let catalog_cmd_def = CliCommand::new(
1710 ObjectSchema::new("Dump catalog
.")
1711 .required("snapshot
", StringSchema::new("Snapshot path
."))
1712 .optional("repository
", REPO_URL_SCHEMA.clone())
1714 .arg_param(vec!["snapshot
"])
1715 .completion_cb("repository
", complete_repository)
1716 .completion_cb("snapshot
", complete_backup_snapshot);
1718 let prune_cmd_def = CliCommand::new(
1721 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1722 ObjectSchema::new("Prune backup repository
.")
1723 .required("group
", StringSchema::new("Backup group
."))
1724 .optional("repository
", REPO_URL_SCHEMA.clone())
1727 .arg_param(vec!["group
"])
1728 .completion_cb("group
", complete_backup_group)
1729 .completion_cb("repository
", complete_repository);
1731 let status_cmd_def = CliCommand::new(
1734 ObjectSchema::new("Get repository status
.")
1735 .optional("repository
", REPO_URL_SCHEMA.clone())
1736 .optional("output
-format
", OUTPUT_FORMAT.clone())
1738 .completion_cb("repository
", complete_repository);
1740 let login_cmd_def = CliCommand::new(
1743 ObjectSchema::new("Try to login
. If successful
, store ticket
.")
1744 .optional("repository
", REPO_URL_SCHEMA.clone())
1746 .completion_cb("repository
", complete_repository);
1748 let logout_cmd_def = CliCommand::new(
1751 ObjectSchema::new("Logout (delete stored ticket
).")
1752 .optional("repository
", REPO_URL_SCHEMA.clone())
1754 .completion_cb("repository
", complete_repository);
1756 let cmd_def = CliCommandMap::new()
1757 .insert("backup
".to_owned(), backup_cmd_def.into())
1758 .insert("upload
-log
".to_owned(), upload_log_cmd_def.into())
1759 .insert("forget
".to_owned(), forget_cmd_def.into())
1760 .insert("catalog
".to_owned(), catalog_cmd_def.into())
1761 .insert("garbage
-collect
".to_owned(), garbage_collect_cmd_def.into())
1762 .insert("list
".to_owned(), list_cmd_def.into())
1763 .insert("login
".to_owned(), login_cmd_def.into())
1764 .insert("logout
".to_owned(), logout_cmd_def.into())
1765 .insert("prune
".to_owned(), prune_cmd_def.into())
1766 .insert("restore
".to_owned(), restore_cmd_def.into())
1767 .insert("snapshots
".to_owned(), snapshots_cmd_def.into())
1768 .insert("files
".to_owned(), files_cmd_def.into())
1769 .insert("status
".to_owned(), status_cmd_def.into())
1770 .insert("key
".to_owned(), key_mgmt_cli().into());
1772 run_cli_command(cmd_def.into());
1775 fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
1776 let rt = tokio::runtime::Runtime::new().unwrap();
1777 let ret = rt.block_on(fut);