]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
src/bin/proxmox-backup-client.rs: add simple task management cli
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 use failure::*;
2 use nix::unistd::{fork, ForkResult, pipe};
3 use std::os::unix::io::RawFd;
4 use chrono::{Local, Utc, TimeZone};
5 use std::path::{Path, PathBuf};
6 use std::collections::{HashSet, HashMap};
7 use std::ffi::OsStr;
8 use std::io::{Write, Seek, SeekFrom};
9 use std::os::unix::fs::OpenOptionsExt;
10
11 use proxmox::{sortable, identity};
12 use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
13 use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
14 use proxmox::api::schema::*;
15 use proxmox::api::cli::*;
16 use proxmox::api::api;
17
18 use proxmox_backup::tools;
19 use proxmox_backup::api2::types::*;
20 use proxmox_backup::client::*;
21 use proxmox_backup::backup::*;
22 use proxmox_backup::pxar::{ self, catalog::* };
23
24 //use proxmox_backup::backup::image_index::*;
25 //use proxmox_backup::config::datastore;
26 //use proxmox_backup::pxar::encoder::*;
27 //use proxmox_backup::backup::datastore::*;
28
29 use serde_json::{json, Value};
30 //use hyper::Body;
31 use std::sync::{Arc, Mutex};
32 //use regex::Regex;
33 use xdg::BaseDirectories;
34
35 use futures::*;
36 use tokio::sync::mpsc;
37
38 proxmox::api::const_regex! {
39 BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
40 }
41
42 const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
43 .format(&BACKUP_REPO_URL)
44 .max_length(256)
45 .schema();
46
47 fn get_default_repository() -> Option<String> {
48 std::env::var("PBS_REPOSITORY").ok()
49 }
50
51 fn extract_repository_from_value(
52 param: &Value,
53 ) -> Result<BackupRepository, Error> {
54
55 let repo_url = param["repository"]
56 .as_str()
57 .map(String::from)
58 .or_else(get_default_repository)
59 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
60
61 let repo: BackupRepository = repo_url.parse()?;
62
63 Ok(repo)
64 }
65
66 fn extract_repository_from_map(
67 param: &HashMap<String, String>,
68 ) -> Option<BackupRepository> {
69
70 param.get("repository")
71 .map(String::from)
72 .or_else(get_default_repository)
73 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
74 }
75
76 fn record_repository(repo: &BackupRepository) {
77
78 let base = match BaseDirectories::with_prefix("proxmox-backup") {
79 Ok(v) => v,
80 _ => return,
81 };
82
83 // usually $HOME/.cache/proxmox-backup/repo-list
84 let path = match base.place_cache_file("repo-list") {
85 Ok(v) => v,
86 _ => return,
87 };
88
89 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
90
91 let repo = repo.to_string();
92
93 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
94
95 let mut map = serde_json::map::Map::new();
96
97 loop {
98 let mut max_used = 0;
99 let mut max_repo = None;
100 for (repo, count) in data.as_object().unwrap() {
101 if map.contains_key(repo) { continue; }
102 if let Some(count) = count.as_i64() {
103 if count > max_used {
104 max_used = count;
105 max_repo = Some(repo);
106 }
107 }
108 }
109 if let Some(repo) = max_repo {
110 map.insert(repo.to_owned(), json!(max_used));
111 } else {
112 break;
113 }
114 if map.len() > 10 { // store max. 10 repos
115 break;
116 }
117 }
118
119 let new_data = json!(map);
120
121 let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
122 }
123
124 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
125
126 let mut result = vec![];
127
128 let base = match BaseDirectories::with_prefix("proxmox-backup") {
129 Ok(v) => v,
130 _ => return result,
131 };
132
133 // usually $HOME/.cache/proxmox-backup/repo-list
134 let path = match base.place_cache_file("repo-list") {
135 Ok(v) => v,
136 _ => return result,
137 };
138
139 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
140
141 if let Some(map) = data.as_object() {
142 for (repo, _count) in map {
143 result.push(repo.to_owned());
144 }
145 }
146
147 result
148 }
149
150 async fn view_task_result(
151 client: HttpClient,
152 result: Value,
153 output_format: &str,
154 ) -> Result<(), Error> {
155 let data = &result["data"];
156 if output_format == "text" {
157 if let Some(upid) = data.as_str() {
158 display_task_log(client, upid, true).await?;
159 }
160 } else {
161 format_and_print_result(&data, &output_format);
162 }
163
164 Ok(())
165 }
166
167 async fn backup_directory<P: AsRef<Path>>(
168 client: &BackupWriter,
169 dir_path: P,
170 archive_name: &str,
171 chunk_size: Option<usize>,
172 device_set: Option<HashSet<u64>>,
173 verbose: bool,
174 skip_lost_and_found: bool,
175 crypt_config: Option<Arc<CryptConfig>>,
176 catalog: Arc<Mutex<CatalogWriter<SenderWriter>>>,
177 ) -> Result<BackupStats, Error> {
178
179 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
180 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
181
182 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
183
184 let stream = rx
185 .map_err(Error::from);
186
187 // spawn chunker inside a separate task so that it can run parallel
188 tokio::spawn(async move {
189 let _ = tx.send_all(&mut chunk_stream).await;
190 });
191
192 let stats = client
193 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
194 .await?;
195
196 Ok(stats)
197 }
198
199 async fn backup_image<P: AsRef<Path>>(
200 client: &BackupWriter,
201 image_path: P,
202 archive_name: &str,
203 image_size: u64,
204 chunk_size: Option<usize>,
205 _verbose: bool,
206 crypt_config: Option<Arc<CryptConfig>>,
207 ) -> Result<BackupStats, Error> {
208
209 let path = image_path.as_ref().to_owned();
210
211 let file = tokio::fs::File::open(path).await?;
212
213 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
214 .map_err(Error::from);
215
216 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
217
218 let stats = client
219 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
220 .await?;
221
222 Ok(stats)
223 }
224
225 fn strip_server_file_expenstion(name: &str) -> String {
226
227 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
228 name[..name.len()-5].to_owned()
229 } else {
230 name.to_owned() // should not happen
231 }
232 }
233
234 fn list_backup_groups(
235 param: Value,
236 _info: &ApiMethod,
237 _rpcenv: &mut dyn RpcEnvironment,
238 ) -> Result<Value, Error> {
239
240 let repo = extract_repository_from_value(&param)?;
241
242 let client = HttpClient::new(repo.host(), repo.user(), None)?;
243
244 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
245
246 let mut result = async_main(async move {
247 client.get(&path, None).await
248 })?;
249
250 record_repository(&repo);
251
252 // fixme: implement and use output formatter instead ..
253 let list = result["data"].as_array_mut().unwrap();
254
255 list.sort_unstable_by(|a, b| {
256 let a_id = a["backup-id"].as_str().unwrap();
257 let a_backup_type = a["backup-type"].as_str().unwrap();
258 let b_id = b["backup-id"].as_str().unwrap();
259 let b_backup_type = b["backup-type"].as_str().unwrap();
260
261 let type_order = a_backup_type.cmp(b_backup_type);
262 if type_order == std::cmp::Ordering::Equal {
263 a_id.cmp(b_id)
264 } else {
265 type_order
266 }
267 });
268
269 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
270
271 let mut result = vec![];
272
273 for item in list {
274
275 let id = item["backup-id"].as_str().unwrap();
276 let btype = item["backup-type"].as_str().unwrap();
277 let epoch = item["last-backup"].as_i64().unwrap();
278 let last_backup = Utc.timestamp(epoch, 0);
279 let backup_count = item["backup-count"].as_u64().unwrap();
280
281 let group = BackupGroup::new(btype, id);
282
283 let path = group.group_path().to_str().unwrap().to_owned();
284
285 let files = item["files"].as_array().unwrap().iter()
286 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
287
288 if output_format == "text" {
289 println!(
290 "{:20} | {} | {:5} | {}",
291 path,
292 BackupDir::backup_time_to_string(last_backup),
293 backup_count,
294 tools::join(&files, ' '),
295 );
296 } else {
297 result.push(json!({
298 "backup-type": btype,
299 "backup-id": id,
300 "last-backup": epoch,
301 "backup-count": backup_count,
302 "files": files,
303 }));
304 }
305 }
306
307 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
308
309 Ok(Value::Null)
310 }
311
312 fn list_snapshots(
313 param: Value,
314 _info: &ApiMethod,
315 _rpcenv: &mut dyn RpcEnvironment,
316 ) -> Result<Value, Error> {
317
318 let repo = extract_repository_from_value(&param)?;
319
320 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
321
322 let client = HttpClient::new(repo.host(), repo.user(), None)?;
323
324 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
325
326 let mut args = json!({});
327 if let Some(path) = param["group"].as_str() {
328 let group = BackupGroup::parse(path)?;
329 args["backup-type"] = group.backup_type().into();
330 args["backup-id"] = group.backup_id().into();
331 }
332
333 let result = async_main(async move {
334 client.get(&path, Some(args)).await
335 })?;
336
337 record_repository(&repo);
338
339 let list = result["data"].as_array().unwrap();
340
341 let mut result = vec![];
342
343 for item in list {
344
345 let id = item["backup-id"].as_str().unwrap();
346 let btype = item["backup-type"].as_str().unwrap();
347 let epoch = item["backup-time"].as_i64().unwrap();
348
349 let snapshot = BackupDir::new(btype, id, epoch);
350
351 let path = snapshot.relative_path().to_str().unwrap().to_owned();
352
353 let files = item["files"].as_array().unwrap().iter()
354 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
355
356 if output_format == "text" {
357 let size_str = if let Some(size) = item["size"].as_u64() {
358 size.to_string()
359 } else {
360 String::from("-")
361 };
362 println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
363 } else {
364 let mut data = json!({
365 "backup-type": btype,
366 "backup-id": id,
367 "backup-time": epoch,
368 "files": files,
369 });
370 if let Some(size) = item["size"].as_u64() {
371 data["size"] = size.into();
372 }
373 result.push(data);
374 }
375 }
376
377 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
378
379 Ok(Value::Null)
380 }
381
382 fn forget_snapshots(
383 param: Value,
384 _info: &ApiMethod,
385 _rpcenv: &mut dyn RpcEnvironment,
386 ) -> Result<Value, Error> {
387
388 let repo = extract_repository_from_value(&param)?;
389
390 let path = tools::required_string_param(&param, "snapshot")?;
391 let snapshot = BackupDir::parse(path)?;
392
393 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
394
395 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
396
397 let result = async_main(async move {
398 client.delete(&path, Some(json!({
399 "backup-type": snapshot.group().backup_type(),
400 "backup-id": snapshot.group().backup_id(),
401 "backup-time": snapshot.backup_time().timestamp(),
402 }))).await
403 })?;
404
405 record_repository(&repo);
406
407 Ok(result)
408 }
409
410 fn api_login(
411 param: Value,
412 _info: &ApiMethod,
413 _rpcenv: &mut dyn RpcEnvironment,
414 ) -> Result<Value, Error> {
415
416 let repo = extract_repository_from_value(&param)?;
417
418 let client = HttpClient::new(repo.host(), repo.user(), None)?;
419 async_main(async move { client.login().await })?;
420
421 record_repository(&repo);
422
423 Ok(Value::Null)
424 }
425
426 fn api_logout(
427 param: Value,
428 _info: &ApiMethod,
429 _rpcenv: &mut dyn RpcEnvironment,
430 ) -> Result<Value, Error> {
431
432 let repo = extract_repository_from_value(&param)?;
433
434 delete_ticket_info(repo.host(), repo.user())?;
435
436 Ok(Value::Null)
437 }
438
439 fn dump_catalog(
440 param: Value,
441 _info: &ApiMethod,
442 _rpcenv: &mut dyn RpcEnvironment,
443 ) -> Result<Value, Error> {
444
445 let repo = extract_repository_from_value(&param)?;
446
447 let path = tools::required_string_param(&param, "snapshot")?;
448 let snapshot = BackupDir::parse(path)?;
449
450 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
451
452 let crypt_config = match keyfile {
453 None => None,
454 Some(path) => {
455 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
456 Some(Arc::new(CryptConfig::new(key)?))
457 }
458 };
459
460 let client = HttpClient::new(repo.host(), repo.user(), None)?;
461
462 async_main(async move {
463 let client = BackupReader::start(
464 client,
465 crypt_config.clone(),
466 repo.store(),
467 &snapshot.group().backup_type(),
468 &snapshot.group().backup_id(),
469 snapshot.backup_time(),
470 true,
471 ).await?;
472
473 let manifest = client.download_manifest().await?;
474
475 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
476
477 let most_used = index.find_most_used_chunks(8);
478
479 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
480
481 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
482
483 let mut catalogfile = std::fs::OpenOptions::new()
484 .write(true)
485 .read(true)
486 .custom_flags(libc::O_TMPFILE)
487 .open("/tmp")?;
488
489 std::io::copy(&mut reader, &mut catalogfile)
490 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
491
492 catalogfile.seek(SeekFrom::Start(0))?;
493
494 let mut catalog_reader = CatalogReader::new(catalogfile);
495
496 catalog_reader.dump()?;
497
498 record_repository(&repo);
499
500 Ok::<(), Error>(())
501 })?;
502
503 Ok(Value::Null)
504 }
505
506 fn list_snapshot_files(
507 param: Value,
508 _info: &ApiMethod,
509 _rpcenv: &mut dyn RpcEnvironment,
510 ) -> Result<Value, Error> {
511
512 let repo = extract_repository_from_value(&param)?;
513
514 let path = tools::required_string_param(&param, "snapshot")?;
515 let snapshot = BackupDir::parse(path)?;
516
517 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
518
519 let client = HttpClient::new(repo.host(), repo.user(), None)?;
520
521 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
522
523 let mut result = async_main(async move {
524 client.get(&path, Some(json!({
525 "backup-type": snapshot.group().backup_type(),
526 "backup-id": snapshot.group().backup_id(),
527 "backup-time": snapshot.backup_time().timestamp(),
528 }))).await
529 })?;
530
531 record_repository(&repo);
532
533 let list: Value = result["data"].take();
534
535 if output_format == "text" {
536 for item in list.as_array().unwrap().iter() {
537 println!(
538 "{} {}",
539 strip_server_file_expenstion(item["filename"].as_str().unwrap()),
540 item["size"].as_u64().unwrap_or(0),
541 );
542 }
543 } else {
544 format_and_print_result(&list, &output_format);
545 }
546
547 Ok(Value::Null)
548 }
549
550 fn start_garbage_collection(
551 param: Value,
552 _info: &ApiMethod,
553 _rpcenv: &mut dyn RpcEnvironment,
554 ) -> Result<Value, Error> {
555
556 let repo = extract_repository_from_value(&param)?;
557 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
558
559 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
560
561 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
562
563 async_main(async {
564 let result = client.post(&path, None).await?;
565
566 record_repository(&repo);
567
568 view_task_result(client, result, &output_format).await
569 })?;
570
571 Ok(Value::Null)
572 }
573
574 fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
575
576 if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
577 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
578 }
579 bail!("unable to parse directory specification '{}'", value);
580 }
581
582 fn spawn_catalog_upload(
583 client: Arc<BackupWriter>,
584 crypt_config: Option<Arc<CryptConfig>>,
585 ) -> Result<
586 (
587 Arc<Mutex<CatalogWriter<SenderWriter>>>,
588 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
589 ), Error>
590 {
591 let (catalog_tx, catalog_rx) = mpsc::channel(10); // allow to buffer 10 writes
592 let catalog_stream = catalog_rx.map_err(Error::from);
593 let catalog_chunk_size = 512*1024;
594 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
595
596 let catalog = Arc::new(Mutex::new(CatalogWriter::new(SenderWriter::new(catalog_tx))?));
597
598 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
599
600 tokio::spawn(async move {
601 let catalog_upload_result = client
602 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
603 .await;
604
605 if let Err(ref err) = catalog_upload_result {
606 eprintln!("catalog upload error - {}", err);
607 client.cancel();
608 }
609
610 let _ = catalog_result_tx.send(catalog_upload_result);
611 });
612
613 Ok((catalog, catalog_result_rx))
614 }
615
616 fn create_backup(
617 param: Value,
618 _info: &ApiMethod,
619 _rpcenv: &mut dyn RpcEnvironment,
620 ) -> Result<Value, Error> {
621
622 let repo = extract_repository_from_value(&param)?;
623
624 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
625
626 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
627
628 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
629
630 let verbose = param["verbose"].as_bool().unwrap_or(false);
631
632 let backup_time_opt = param["backup-time"].as_i64();
633
634 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
635
636 if let Some(size) = chunk_size_opt {
637 verify_chunk_size(size)?;
638 }
639
640 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
641
642 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
643
644 let backup_type = param["backup-type"].as_str().unwrap_or("host");
645
646 let include_dev = param["include-dev"].as_array();
647
648 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
649
650 if let Some(include_dev) = include_dev {
651 if all_file_systems {
652 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
653 }
654
655 let mut set = HashSet::new();
656 for path in include_dev {
657 let path = path.as_str().unwrap();
658 let stat = nix::sys::stat::stat(path)
659 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
660 set.insert(stat.st_dev);
661 }
662 devices = Some(set);
663 }
664
665 let mut upload_list = vec![];
666
667 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
668
669 let mut upload_catalog = false;
670
671 for backupspec in backupspec_list {
672 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
673
674 use std::os::unix::fs::FileTypeExt;
675
676 let metadata = std::fs::metadata(filename)
677 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
678 let file_type = metadata.file_type();
679
680 let extension = target.rsplit('.').next()
681 .ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
682
683 match extension {
684 "pxar" => {
685 if !file_type.is_dir() {
686 bail!("got unexpected file type (expected directory)");
687 }
688 upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
689 upload_catalog = true;
690 }
691 "img" => {
692
693 if !(file_type.is_file() || file_type.is_block_device()) {
694 bail!("got unexpected file type (expected file or block device)");
695 }
696
697 let size = image_size(&PathBuf::from(filename))?;
698
699 if size == 0 { bail!("got zero-sized file '{}'", filename); }
700
701 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
702 }
703 "conf" => {
704 if !file_type.is_file() {
705 bail!("got unexpected file type (expected regular file)");
706 }
707 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
708 }
709 "log" => {
710 if !file_type.is_file() {
711 bail!("got unexpected file type (expected regular file)");
712 }
713 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
714 }
715 _ => {
716 bail!("got unknown archive extension '{}'", extension);
717 }
718 }
719 }
720
721 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
722
723 let client = HttpClient::new(repo.host(), repo.user(), None)?;
724 record_repository(&repo);
725
726 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
727
728 println!("Client name: {}", proxmox::tools::nodename());
729
730 let start_time = Local::now();
731
732 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
733
734 let (crypt_config, rsa_encrypted_key) = match keyfile {
735 None => (None, None),
736 Some(path) => {
737 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
738
739 let crypt_config = CryptConfig::new(key)?;
740
741 let path = master_pubkey_path()?;
742 if path.exists() {
743 let pem_data = file_get_contents(&path)?;
744 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
745 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
746 (Some(Arc::new(crypt_config)), Some(enc_key))
747 } else {
748 (Some(Arc::new(crypt_config)), None)
749 }
750 }
751 };
752
753 async_main(async move {
754 let client = BackupWriter::start(
755 client,
756 repo.store(),
757 backup_type,
758 &backup_id,
759 backup_time,
760 verbose,
761 ).await?;
762
763 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
764 let mut manifest = BackupManifest::new(snapshot);
765
766 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
767
768 for (backup_type, filename, target, size) in upload_list {
769 match backup_type {
770 BackupType::CONFIG => {
771 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
772 let stats = client
773 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
774 .await?;
775 manifest.add_file(target, stats.size, stats.csum);
776 }
777 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
778 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
779 let stats = client
780 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
781 .await?;
782 manifest.add_file(target, stats.size, stats.csum);
783 }
784 BackupType::PXAR => {
785 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
786 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
787 let stats = backup_directory(
788 &client,
789 &filename,
790 &target,
791 chunk_size_opt,
792 devices.clone(),
793 verbose,
794 skip_lost_and_found,
795 crypt_config.clone(),
796 catalog.clone(),
797 ).await?;
798 manifest.add_file(target, stats.size, stats.csum);
799 catalog.lock().unwrap().end_directory()?;
800 }
801 BackupType::IMAGE => {
802 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
803 let stats = backup_image(
804 &client,
805 &filename,
806 &target,
807 size,
808 chunk_size_opt,
809 verbose,
810 crypt_config.clone(),
811 ).await?;
812 manifest.add_file(target, stats.size, stats.csum);
813 }
814 }
815 }
816
817 // finalize and upload catalog
818 if upload_catalog {
819 let mutex = Arc::try_unwrap(catalog)
820 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
821 let mut catalog = mutex.into_inner().unwrap();
822
823 catalog.finish()?;
824
825 drop(catalog); // close upload stream
826
827 let stats = catalog_result_rx.await??;
828
829 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
830 }
831
832 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
833 let target = "rsa-encrypted.key";
834 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
835 let stats = client
836 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
837 .await?;
838 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum);
839
840 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
841 /*
842 let mut buffer2 = vec![0u8; rsa.size() as usize];
843 let pem_data = file_get_contents("master-private.pem")?;
844 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
845 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
846 println!("TEST {} {:?}", len, buffer2);
847 */
848 }
849
850 // create manifest (index.json)
851 let manifest = manifest.into_json();
852
853 println!("Upload index.json to '{:?}'", repo);
854 let manifest = serde_json::to_string_pretty(&manifest)?.into();
855 client
856 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
857 .await?;
858
859 client.finish().await?;
860
861 let end_time = Local::now();
862 let elapsed = end_time.signed_duration_since(start_time);
863 println!("Duration: {}", elapsed);
864
865 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
866
867 Ok(Value::Null)
868 })
869 }
870
871 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
872
873 let mut result = vec![];
874
875 let data: Vec<&str> = arg.splitn(2, ':').collect();
876
877 if data.len() != 2 {
878 result.push(String::from("root.pxar:/"));
879 result.push(String::from("etc.pxar:/etc"));
880 return result;
881 }
882
883 let files = tools::complete_file_name(data[1], param);
884
885 for file in files {
886 result.push(format!("{}:{}", data[0], file));
887 }
888
889 result
890 }
891
892 fn restore(
893 param: Value,
894 _info: &ApiMethod,
895 _rpcenv: &mut dyn RpcEnvironment,
896 ) -> Result<Value, Error> {
897 async_main(restore_do(param))
898 }
899
900 fn dump_image<W: Write>(
901 client: Arc<BackupReader>,
902 crypt_config: Option<Arc<CryptConfig>>,
903 index: FixedIndexReader,
904 mut writer: W,
905 verbose: bool,
906 ) -> Result<(), Error> {
907
908 let most_used = index.find_most_used_chunks(8);
909
910 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
911
912 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
913 // and thus slows down reading. Instead, directly use RemoteChunkReader
914 let mut per = 0;
915 let mut bytes = 0;
916 let start_time = std::time::Instant::now();
917
918 for pos in 0..index.index_count() {
919 let digest = index.index_digest(pos).unwrap();
920 let raw_data = chunk_reader.read_chunk(&digest)?;
921 writer.write_all(&raw_data)?;
922 bytes += raw_data.len();
923 if verbose {
924 let next_per = ((pos+1)*100)/index.index_count();
925 if per != next_per {
926 eprintln!("progress {}% (read {} bytes, duration {} sec)",
927 next_per, bytes, start_time.elapsed().as_secs());
928 per = next_per;
929 }
930 }
931 }
932
933 let end_time = std::time::Instant::now();
934 let elapsed = end_time.duration_since(start_time);
935 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
936 bytes,
937 elapsed.as_secs_f64(),
938 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
939 );
940
941
942 Ok(())
943 }
944
945 async fn restore_do(param: Value) -> Result<Value, Error> {
946 let repo = extract_repository_from_value(&param)?;
947
948 let verbose = param["verbose"].as_bool().unwrap_or(false);
949
950 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
951
952 let archive_name = tools::required_string_param(&param, "archive-name")?;
953
954 let client = HttpClient::new(repo.host(), repo.user(), None)?;
955
956 record_repository(&repo);
957
958 let path = tools::required_string_param(&param, "snapshot")?;
959
960 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
961 let group = BackupGroup::parse(path)?;
962
963 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
964 let result = client.get(&path, Some(json!({
965 "backup-type": group.backup_type(),
966 "backup-id": group.backup_id(),
967 }))).await?;
968
969 let list = result["data"].as_array().unwrap();
970 if list.is_empty() {
971 bail!("backup group '{}' does not contain any snapshots:", path);
972 }
973
974 let epoch = list[0]["backup-time"].as_i64().unwrap();
975 let backup_time = Utc.timestamp(epoch, 0);
976 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
977 } else {
978 let snapshot = BackupDir::parse(path)?;
979 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
980 };
981
982 let target = tools::required_string_param(&param, "target")?;
983 let target = if target == "-" { None } else { Some(target) };
984
985 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
986
987 let crypt_config = match keyfile {
988 None => None,
989 Some(path) => {
990 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
991 Some(Arc::new(CryptConfig::new(key)?))
992 }
993 };
994
995 let server_archive_name = if archive_name.ends_with(".pxar") {
996 format!("{}.didx", archive_name)
997 } else if archive_name.ends_with(".img") {
998 format!("{}.fidx", archive_name)
999 } else {
1000 format!("{}.blob", archive_name)
1001 };
1002
1003 let client = BackupReader::start(
1004 client,
1005 crypt_config.clone(),
1006 repo.store(),
1007 &backup_type,
1008 &backup_id,
1009 backup_time,
1010 true,
1011 ).await?;
1012
1013 let manifest = client.download_manifest().await?;
1014
1015 if server_archive_name == MANIFEST_BLOB_NAME {
1016 let backup_index_data = manifest.into_json().to_string();
1017 if let Some(target) = target {
1018 file_set_contents(target, backup_index_data.as_bytes(), None)?;
1019 } else {
1020 let stdout = std::io::stdout();
1021 let mut writer = stdout.lock();
1022 writer.write_all(backup_index_data.as_bytes())
1023 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1024 }
1025
1026 } else if server_archive_name.ends_with(".blob") {
1027
1028 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
1029
1030 if let Some(target) = target {
1031 let mut writer = std::fs::OpenOptions::new()
1032 .write(true)
1033 .create(true)
1034 .create_new(true)
1035 .open(target)
1036 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1037 std::io::copy(&mut reader, &mut writer)?;
1038 } else {
1039 let stdout = std::io::stdout();
1040 let mut writer = stdout.lock();
1041 std::io::copy(&mut reader, &mut writer)
1042 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1043 }
1044
1045 } else if server_archive_name.ends_with(".didx") {
1046
1047 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1048
1049 let most_used = index.find_most_used_chunks(8);
1050
1051 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1052
1053 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1054
1055 if let Some(target) = target {
1056
1057 let feature_flags = pxar::flags::DEFAULT;
1058 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1059 decoder.set_callback(move |path| {
1060 if verbose {
1061 eprintln!("{:?}", path);
1062 }
1063 Ok(())
1064 });
1065 decoder.set_allow_existing_dirs(allow_existing_dirs);
1066
1067 decoder.restore(Path::new(target), &Vec::new())?;
1068 } else {
1069 let mut writer = std::fs::OpenOptions::new()
1070 .write(true)
1071 .open("/dev/stdout")
1072 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
1073
1074 std::io::copy(&mut reader, &mut writer)
1075 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1076 }
1077 } else if server_archive_name.ends_with(".fidx") {
1078
1079 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
1080
1081 let mut writer = if let Some(target) = target {
1082 std::fs::OpenOptions::new()
1083 .write(true)
1084 .create(true)
1085 .create_new(true)
1086 .open(target)
1087 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
1088 } else {
1089 std::fs::OpenOptions::new()
1090 .write(true)
1091 .open("/dev/stdout")
1092 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1093 };
1094
1095 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
1096
1097 } else {
1098 bail!("unknown archive file extension (expected .pxar of .img)");
1099 }
1100
1101 Ok(Value::Null)
1102 }
1103
1104 fn upload_log(
1105 param: Value,
1106 _info: &ApiMethod,
1107 _rpcenv: &mut dyn RpcEnvironment,
1108 ) -> Result<Value, Error> {
1109
1110 let logfile = tools::required_string_param(&param, "logfile")?;
1111 let repo = extract_repository_from_value(&param)?;
1112
1113 let snapshot = tools::required_string_param(&param, "snapshot")?;
1114 let snapshot = BackupDir::parse(snapshot)?;
1115
1116 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1117
1118 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1119
1120 let crypt_config = match keyfile {
1121 None => None,
1122 Some(path) => {
1123 let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1124 let crypt_config = CryptConfig::new(key)?;
1125 Some(Arc::new(crypt_config))
1126 }
1127 };
1128
1129 let data = file_get_contents(logfile)?;
1130
1131 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1132
1133 let raw_data = blob.into_inner();
1134
1135 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1136
1137 let args = json!({
1138 "backup-type": snapshot.group().backup_type(),
1139 "backup-id": snapshot.group().backup_id(),
1140 "backup-time": snapshot.backup_time().timestamp(),
1141 });
1142
1143 let body = hyper::Body::from(raw_data);
1144
1145 async_main(async move {
1146 client.upload("application/octet-stream", body, &path, Some(args)).await
1147 })
1148 }
1149
1150 fn prune(
1151 mut param: Value,
1152 _info: &ApiMethod,
1153 _rpcenv: &mut dyn RpcEnvironment,
1154 ) -> Result<Value, Error> {
1155
1156 let repo = extract_repository_from_value(&param)?;
1157
1158 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1159
1160 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
1161
1162 let group = tools::required_string_param(&param, "group")?;
1163 let group = BackupGroup::parse(group)?;
1164 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1165
1166 param.as_object_mut().unwrap().remove("repository");
1167 param.as_object_mut().unwrap().remove("group");
1168 param.as_object_mut().unwrap().remove("output-format");
1169
1170 param["backup-type"] = group.backup_type().into();
1171 param["backup-id"] = group.backup_id().into();
1172
1173 async_main(async {
1174 let result = client.post(&path, Some(param)).await?;
1175
1176 record_repository(&repo);
1177
1178 view_task_result(client, result, &output_format).await
1179 })?;
1180
1181 Ok(Value::Null)
1182 }
1183
1184 fn status(
1185 param: Value,
1186 _info: &ApiMethod,
1187 _rpcenv: &mut dyn RpcEnvironment,
1188 ) -> Result<Value, Error> {
1189
1190 let repo = extract_repository_from_value(&param)?;
1191
1192 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1193
1194 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1195
1196 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1197
1198 let result = async_main(async move { client.get(&path, None).await })?;
1199 let data = &result["data"];
1200
1201 record_repository(&repo);
1202
1203 if output_format == "text" {
1204 let total = data["total"].as_u64().unwrap();
1205 let used = data["used"].as_u64().unwrap();
1206 let avail = data["avail"].as_u64().unwrap();
1207 let roundup = total/200;
1208
1209 println!(
1210 "total: {} used: {} ({} %) available: {}",
1211 total,
1212 used,
1213 ((used+roundup)*100)/total,
1214 avail,
1215 );
1216 } else {
1217 format_and_print_result(data, &output_format);
1218 }
1219
1220 Ok(Value::Null)
1221 }
1222
1223 // like get, but simply ignore errors and return Null instead
1224 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1225
1226 let client = match HttpClient::new(repo.host(), repo.user(), None) {
1227 Ok(v) => v,
1228 _ => return Value::Null,
1229 };
1230
1231 let mut resp = match client.get(url, None).await {
1232 Ok(v) => v,
1233 _ => return Value::Null,
1234 };
1235
1236 if let Some(map) = resp.as_object_mut() {
1237 if let Some(data) = map.remove("data") {
1238 return data;
1239 }
1240 }
1241 Value::Null
1242 }
1243
1244 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1245 async_main(async { complete_backup_group_do(param).await })
1246 }
1247
1248 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1249
1250 let mut result = vec![];
1251
1252 let repo = match extract_repository_from_map(param) {
1253 Some(v) => v,
1254 _ => return result,
1255 };
1256
1257 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1258
1259 let data = try_get(&repo, &path).await;
1260
1261 if let Some(list) = data.as_array() {
1262 for item in list {
1263 if let (Some(backup_id), Some(backup_type)) =
1264 (item["backup-id"].as_str(), item["backup-type"].as_str())
1265 {
1266 result.push(format!("{}/{}", backup_type, backup_id));
1267 }
1268 }
1269 }
1270
1271 result
1272 }
1273
1274 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1275 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1276 }
1277
1278 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1279
1280 if arg.matches('/').count() < 2 {
1281 let groups = complete_backup_group_do(param).await;
1282 let mut result = vec![];
1283 for group in groups {
1284 result.push(group.to_string());
1285 result.push(format!("{}/", group));
1286 }
1287 return result;
1288 }
1289
1290 complete_backup_snapshot_do(param).await
1291 }
1292
1293 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1294 async_main(async { complete_backup_snapshot_do(param).await })
1295 }
1296
1297 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1298
1299 let mut result = vec![];
1300
1301 let repo = match extract_repository_from_map(param) {
1302 Some(v) => v,
1303 _ => return result,
1304 };
1305
1306 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1307
1308 let data = try_get(&repo, &path).await;
1309
1310 if let Some(list) = data.as_array() {
1311 for item in list {
1312 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1313 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1314 {
1315 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1316 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1317 }
1318 }
1319 }
1320
1321 result
1322 }
1323
1324 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1325 async_main(async { complete_server_file_name_do(param).await })
1326 }
1327
1328 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1329
1330 let mut result = vec![];
1331
1332 let repo = match extract_repository_from_map(param) {
1333 Some(v) => v,
1334 _ => return result,
1335 };
1336
1337 let snapshot = match param.get("snapshot") {
1338 Some(path) => {
1339 match BackupDir::parse(path) {
1340 Ok(v) => v,
1341 _ => return result,
1342 }
1343 }
1344 _ => return result,
1345 };
1346
1347 let query = tools::json_object_to_query(json!({
1348 "backup-type": snapshot.group().backup_type(),
1349 "backup-id": snapshot.group().backup_id(),
1350 "backup-time": snapshot.backup_time().timestamp(),
1351 })).unwrap();
1352
1353 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1354
1355 let data = try_get(&repo, &path).await;
1356
1357 if let Some(list) = data.as_array() {
1358 for item in list {
1359 if let Some(filename) = item["filename"].as_str() {
1360 result.push(filename.to_owned());
1361 }
1362 }
1363 }
1364
1365 result
1366 }
1367
1368 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1369 complete_server_file_name(arg, param)
1370 .iter()
1371 .map(|v| strip_server_file_expenstion(&v))
1372 .collect()
1373 }
1374
1375 fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1376 complete_server_file_name(arg, param)
1377 .iter()
1378 .filter_map(|v| {
1379 let name = strip_server_file_expenstion(&v);
1380 if name.ends_with(".pxar") {
1381 Some(name)
1382 } else {
1383 None
1384 }
1385 })
1386 .collect()
1387 }
1388
1389 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1390
1391 let mut result = vec![];
1392
1393 let mut size = 64;
1394 loop {
1395 result.push(size.to_string());
1396 size *= 2;
1397 if size > 4096 { break; }
1398 }
1399
1400 result
1401 }
1402
1403 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1404
1405 // fixme: implement other input methods
1406
1407 use std::env::VarError::*;
1408 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
1409 Ok(p) => return Ok(p.as_bytes().to_vec()),
1410 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1411 Err(NotPresent) => {
1412 // Try another method
1413 }
1414 }
1415
1416 // If we're on a TTY, query the user for a password
1417 if crate::tools::tty::stdin_isatty() {
1418 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
1419 }
1420
1421 bail!("no password input mechanism available");
1422 }
1423
1424 fn key_create(
1425 param: Value,
1426 _info: &ApiMethod,
1427 _rpcenv: &mut dyn RpcEnvironment,
1428 ) -> Result<Value, Error> {
1429
1430 let path = tools::required_string_param(&param, "path")?;
1431 let path = PathBuf::from(path);
1432
1433 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1434
1435 let key = proxmox::sys::linux::random_data(32)?;
1436
1437 if kdf == "scrypt" {
1438 // always read passphrase from tty
1439 if !crate::tools::tty::stdin_isatty() {
1440 bail!("unable to read passphrase - no tty");
1441 }
1442
1443 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
1444
1445 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1446
1447 store_key_config(&path, false, key_config)?;
1448
1449 Ok(Value::Null)
1450 } else if kdf == "none" {
1451 let created = Local.timestamp(Local::now().timestamp(), 0);
1452
1453 store_key_config(&path, false, KeyConfig {
1454 kdf: None,
1455 created,
1456 modified: created,
1457 data: key,
1458 })?;
1459
1460 Ok(Value::Null)
1461 } else {
1462 unreachable!();
1463 }
1464 }
1465
1466 fn master_pubkey_path() -> Result<PathBuf, Error> {
1467 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1468
1469 // usually $HOME/.config/proxmox-backup/master-public.pem
1470 let path = base.place_config_file("master-public.pem")?;
1471
1472 Ok(path)
1473 }
1474
1475 fn key_import_master_pubkey(
1476 param: Value,
1477 _info: &ApiMethod,
1478 _rpcenv: &mut dyn RpcEnvironment,
1479 ) -> Result<Value, Error> {
1480
1481 let path = tools::required_string_param(&param, "path")?;
1482 let path = PathBuf::from(path);
1483
1484 let pem_data = file_get_contents(&path)?;
1485
1486 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1487 bail!("Unable to decode PEM data - {}", err);
1488 }
1489
1490 let target_path = master_pubkey_path()?;
1491
1492 file_set_contents(&target_path, &pem_data, None)?;
1493
1494 println!("Imported public master key to {:?}", target_path);
1495
1496 Ok(Value::Null)
1497 }
1498
1499 fn key_create_master_key(
1500 _param: Value,
1501 _info: &ApiMethod,
1502 _rpcenv: &mut dyn RpcEnvironment,
1503 ) -> Result<Value, Error> {
1504
1505 // we need a TTY to query the new password
1506 if !crate::tools::tty::stdin_isatty() {
1507 bail!("unable to create master key - no tty");
1508 }
1509
1510 let rsa = openssl::rsa::Rsa::generate(4096)?;
1511 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1512
1513 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1514 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1515
1516 if new_pw != verify_pw {
1517 bail!("Password verification fail!");
1518 }
1519
1520 if new_pw.len() < 5 {
1521 bail!("Password is too short!");
1522 }
1523
1524 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1525 let filename_pub = "master-public.pem";
1526 println!("Writing public master key to {}", filename_pub);
1527 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1528
1529 let cipher = openssl::symm::Cipher::aes_256_cbc();
1530 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1531
1532 let filename_priv = "master-private.pem";
1533 println!("Writing private master key to {}", filename_priv);
1534 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1535
1536 Ok(Value::Null)
1537 }
1538
1539 fn key_change_passphrase(
1540 param: Value,
1541 _info: &ApiMethod,
1542 _rpcenv: &mut dyn RpcEnvironment,
1543 ) -> Result<Value, Error> {
1544
1545 let path = tools::required_string_param(&param, "path")?;
1546 let path = PathBuf::from(path);
1547
1548 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1549
1550 // we need a TTY to query the new password
1551 if !crate::tools::tty::stdin_isatty() {
1552 bail!("unable to change passphrase - no tty");
1553 }
1554
1555 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1556
1557 if kdf == "scrypt" {
1558
1559 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1560 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1561
1562 if new_pw != verify_pw {
1563 bail!("Password verification fail!");
1564 }
1565
1566 if new_pw.len() < 5 {
1567 bail!("Password is too short!");
1568 }
1569
1570 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1571 new_key_config.created = created; // keep original value
1572
1573 store_key_config(&path, true, new_key_config)?;
1574
1575 Ok(Value::Null)
1576 } else if kdf == "none" {
1577 let modified = Local.timestamp(Local::now().timestamp(), 0);
1578
1579 store_key_config(&path, true, KeyConfig {
1580 kdf: None,
1581 created, // keep original value
1582 modified,
1583 data: key.to_vec(),
1584 })?;
1585
1586 Ok(Value::Null)
1587 } else {
1588 unreachable!();
1589 }
1590 }
1591
1592 fn key_mgmt_cli() -> CliCommandMap {
1593
1594 const KDF_SCHEMA: Schema =
1595 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1596 .format(&ApiStringFormat::Enum(&["scrypt", "none"]))
1597 .default("scrypt")
1598 .schema();
1599
1600 #[sortable]
1601 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1602 &ApiHandler::Sync(&key_create),
1603 &ObjectSchema::new(
1604 "Create a new encryption key.",
1605 &sorted!([
1606 ("path", false, &StringSchema::new("File system path.").schema()),
1607 ("kdf", true, &KDF_SCHEMA),
1608 ]),
1609 )
1610 );
1611
1612 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
1613 .arg_param(&["path"])
1614 .completion_cb("path", tools::complete_file_name);
1615
1616 #[sortable]
1617 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1618 &ApiHandler::Sync(&key_change_passphrase),
1619 &ObjectSchema::new(
1620 "Change the passphrase required to decrypt the key.",
1621 &sorted!([
1622 ("path", false, &StringSchema::new("File system path.").schema()),
1623 ("kdf", true, &KDF_SCHEMA),
1624 ]),
1625 )
1626 );
1627
1628 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
1629 .arg_param(&["path"])
1630 .completion_cb("path", tools::complete_file_name);
1631
1632 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1633 &ApiHandler::Sync(&key_create_master_key),
1634 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1635 );
1636
1637 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1638
1639 #[sortable]
1640 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1641 &ApiHandler::Sync(&key_import_master_pubkey),
1642 &ObjectSchema::new(
1643 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
1644 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
1645 )
1646 );
1647
1648 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
1649 .arg_param(&["path"])
1650 .completion_cb("path", tools::complete_file_name);
1651
1652 CliCommandMap::new()
1653 .insert("create", key_create_cmd_def)
1654 .insert("create-master-key", key_create_master_key_cmd_def)
1655 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
1656 .insert("change-passphrase", key_change_passphrase_cmd_def)
1657 }
1658
1659 fn mount(
1660 param: Value,
1661 _info: &ApiMethod,
1662 _rpcenv: &mut dyn RpcEnvironment,
1663 ) -> Result<Value, Error> {
1664 let verbose = param["verbose"].as_bool().unwrap_or(false);
1665 if verbose {
1666 // This will stay in foreground with debug output enabled as None is
1667 // passed for the RawFd.
1668 return async_main(mount_do(param, None));
1669 }
1670
1671 // Process should be deamonized.
1672 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1673 let pipe = pipe()?;
1674 match fork() {
1675 Ok(ForkResult::Parent { .. }) => {
1676 nix::unistd::close(pipe.1).unwrap();
1677 // Blocks the parent process until we are ready to go in the child
1678 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1679 Ok(Value::Null)
1680 }
1681 Ok(ForkResult::Child) => {
1682 nix::unistd::close(pipe.0).unwrap();
1683 nix::unistd::setsid().unwrap();
1684 async_main(mount_do(param, Some(pipe.1)))
1685 }
1686 Err(_) => bail!("failed to daemonize process"),
1687 }
1688 }
1689
1690 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1691 let repo = extract_repository_from_value(&param)?;
1692 let archive_name = tools::required_string_param(&param, "archive-name")?;
1693 let target = tools::required_string_param(&param, "target")?;
1694 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1695
1696 record_repository(&repo);
1697
1698 let path = tools::required_string_param(&param, "snapshot")?;
1699 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1700 let group = BackupGroup::parse(path)?;
1701
1702 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1703 let result = client.get(&path, Some(json!({
1704 "backup-type": group.backup_type(),
1705 "backup-id": group.backup_id(),
1706 }))).await?;
1707
1708 let list = result["data"].as_array().unwrap();
1709 if list.is_empty() {
1710 bail!("backup group '{}' does not contain any snapshots:", path);
1711 }
1712
1713 let epoch = list[0]["backup-time"].as_i64().unwrap();
1714 let backup_time = Utc.timestamp(epoch, 0);
1715 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1716 } else {
1717 let snapshot = BackupDir::parse(path)?;
1718 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1719 };
1720
1721 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1722 let crypt_config = match keyfile {
1723 None => None,
1724 Some(path) => {
1725 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1726 Some(Arc::new(CryptConfig::new(key)?))
1727 }
1728 };
1729
1730 let server_archive_name = if archive_name.ends_with(".pxar") {
1731 format!("{}.didx", archive_name)
1732 } else {
1733 bail!("Can only mount pxar archives.");
1734 };
1735
1736 let client = BackupReader::start(
1737 client,
1738 crypt_config.clone(),
1739 repo.store(),
1740 &backup_type,
1741 &backup_id,
1742 backup_time,
1743 true,
1744 ).await?;
1745
1746 let manifest = client.download_manifest().await?;
1747
1748 if server_archive_name.ends_with(".didx") {
1749 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1750 let most_used = index.find_most_used_chunks(8);
1751 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1752 let reader = BufferedDynamicReader::new(index, chunk_reader);
1753 let decoder = pxar::Decoder::new(reader)?;
1754 let options = OsStr::new("ro,default_permissions");
1755 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
1756 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
1757
1758 // Mount the session but not call fuse deamonize as this will cause
1759 // issues with the runtime after the fork
1760 let deamonize = false;
1761 session.mount(&Path::new(target), deamonize)?;
1762
1763 if let Some(pipe) = pipe {
1764 nix::unistd::chdir(Path::new("/")).unwrap();
1765 // Finish creation of deamon by redirecting filedescriptors.
1766 let nullfd = nix::fcntl::open(
1767 "/dev/null",
1768 nix::fcntl::OFlag::O_RDWR,
1769 nix::sys::stat::Mode::empty(),
1770 ).unwrap();
1771 nix::unistd::dup2(nullfd, 0).unwrap();
1772 nix::unistd::dup2(nullfd, 1).unwrap();
1773 nix::unistd::dup2(nullfd, 2).unwrap();
1774 if nullfd > 2 {
1775 nix::unistd::close(nullfd).unwrap();
1776 }
1777 // Signal the parent process that we are done with the setup and it can
1778 // terminate.
1779 nix::unistd::write(pipe, &[0u8])?;
1780 nix::unistd::close(pipe).unwrap();
1781 }
1782
1783 let multithreaded = true;
1784 session.run_loop(multithreaded)?;
1785 } else {
1786 bail!("unknown archive file extension (expected .pxar)");
1787 }
1788
1789 Ok(Value::Null)
1790 }
1791
1792 fn catalog_shell(
1793 param: Value,
1794 _info: &ApiMethod,
1795 _rpcenv: &mut dyn RpcEnvironment,
1796 ) -> Result<Value, Error> {
1797 async_main(catalog_shell_async(param))
1798 }
1799
1800 async fn catalog_shell_async(param: Value) -> Result<Value, Error> {
1801 let repo = extract_repository_from_value(&param)?;
1802 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1803 let path = tools::required_string_param(&param, "snapshot")?;
1804 let archive_name = tools::required_string_param(&param, "archive-name")?;
1805
1806 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1807 let group = BackupGroup::parse(path)?;
1808
1809 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1810 let result = client.get(&path, Some(json!({
1811 "backup-type": group.backup_type(),
1812 "backup-id": group.backup_id(),
1813 }))).await?;
1814
1815 let list = result["data"].as_array().unwrap();
1816 if list.is_empty() {
1817 bail!("backup group '{}' does not contain any snapshots:", path);
1818 }
1819
1820 let epoch = list[0]["backup-time"].as_i64().unwrap();
1821 let backup_time = Utc.timestamp(epoch, 0);
1822 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1823 } else {
1824 let snapshot = BackupDir::parse(path)?;
1825 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1826 };
1827
1828 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
1829 let crypt_config = match keyfile {
1830 None => None,
1831 Some(path) => {
1832 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1833 Some(Arc::new(CryptConfig::new(key)?))
1834 }
1835 };
1836
1837 let server_archive_name = if archive_name.ends_with(".pxar") {
1838 format!("{}.didx", archive_name)
1839 } else {
1840 bail!("Can only mount pxar archives.");
1841 };
1842
1843 let client = BackupReader::start(
1844 client,
1845 crypt_config.clone(),
1846 repo.store(),
1847 &backup_type,
1848 &backup_id,
1849 backup_time,
1850 true,
1851 ).await?;
1852
1853 let tmpfile = std::fs::OpenOptions::new()
1854 .write(true)
1855 .read(true)
1856 .custom_flags(libc::O_TMPFILE)
1857 .open("/tmp")?;
1858
1859 let manifest = client.download_manifest().await?;
1860
1861 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1862 let most_used = index.find_most_used_chunks(8);
1863 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
1864 let reader = BufferedDynamicReader::new(index, chunk_reader);
1865 let mut decoder = pxar::Decoder::new(reader)?;
1866 decoder.set_callback(|path| {
1867 println!("{:?}", path);
1868 Ok(())
1869 });
1870
1871 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
1872 let index = DynamicIndexReader::new(tmpfile)
1873 .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
1874
1875 // Note: do not use values stored in index (not trusted) - instead, computed them again
1876 let (csum, size) = index.compute_csum();
1877 manifest.verify_file(CATALOG_NAME, &csum, size)?;
1878
1879 let most_used = index.find_most_used_chunks(8);
1880 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1881 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1882 let mut catalogfile = std::fs::OpenOptions::new()
1883 .write(true)
1884 .read(true)
1885 .custom_flags(libc::O_TMPFILE)
1886 .open("/tmp")?;
1887
1888 std::io::copy(&mut reader, &mut catalogfile)
1889 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
1890
1891 catalogfile.seek(SeekFrom::Start(0))?;
1892 let catalog_reader = CatalogReader::new(catalogfile);
1893 let state = Shell::new(
1894 catalog_reader,
1895 &server_archive_name,
1896 decoder,
1897 )?;
1898
1899 println!("Starting interactive shell");
1900 state.shell()?;
1901
1902 record_repository(&repo);
1903
1904 Ok(Value::Null)
1905 }
1906
1907 fn catalog_mgmt_cli() -> CliCommandMap {
1908
1909 #[sortable]
1910 const API_METHOD_SHELL: ApiMethod = ApiMethod::new(
1911 &ApiHandler::Sync(&catalog_shell),
1912 &ObjectSchema::new(
1913 "Shell to interactively inspect and restore snapshots.",
1914 &sorted!([
1915 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
1916 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
1917 ("repository", true, &REPO_URL_SCHEMA),
1918 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
1919 ]),
1920 )
1921 );
1922
1923 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_SHELL)
1924 .arg_param(&["snapshot", "archive-name"])
1925 .completion_cb("repository", complete_repository)
1926 .completion_cb("archive-name", complete_pxar_archive_name)
1927 .completion_cb("snapshot", complete_group_or_snapshot);
1928
1929 #[sortable]
1930 const API_METHOD_DUMP_CATALOG: ApiMethod = ApiMethod::new(
1931 &ApiHandler::Sync(&dump_catalog),
1932 &ObjectSchema::new(
1933 "Dump catalog.",
1934 &sorted!([
1935 ("snapshot", false, &StringSchema::new("Snapshot path.").schema()),
1936 ("repository", true, &REPO_URL_SCHEMA),
1937 ]),
1938 )
1939 );
1940
1941 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
1942 .arg_param(&["snapshot"])
1943 .completion_cb("repository", complete_repository)
1944 .completion_cb("snapshot", complete_backup_snapshot);
1945
1946 CliCommandMap::new()
1947 .insert("dump", catalog_dump_cmd_def)
1948 .insert("shell", catalog_shell_cmd_def)
1949 }
1950
1951 #[api(
1952 input: {
1953 properties: {
1954 repository: {
1955 schema: REPO_URL_SCHEMA,
1956 optional: true,
1957 },
1958 limit: {
1959 description: "The maximal number of tasks to list.",
1960 type: Integer,
1961 optional: true,
1962 minimum: 1,
1963 maximum: 1000,
1964 default: 50,
1965 },
1966 "output-format": {
1967 schema: OUTPUT_FORMAT,
1968 optional: true,
1969 },
1970 }
1971 }
1972 )]
1973 /// List running server tasks for this repo user
1974 fn task_list(param: Value) -> Result<Value, Error> {
1975
1976 async_main(async {
1977 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1978 let repo = extract_repository_from_value(&param)?;
1979 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1980
1981 let limit = param["limit"].as_u64().unwrap_or(50) as usize;
1982
1983 let args = json!({ "start": 0, "limit": limit, "userfilter": repo.user()});
1984 let result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
1985
1986 let data = &result["data"];
1987
1988 if output_format == "text" {
1989 for item in data.as_array().unwrap() {
1990 println!(
1991 "{} {}",
1992 item["upid"].as_str().unwrap(),
1993 item["status"].as_str().unwrap_or("running"),
1994 );
1995 }
1996 } else {
1997 format_and_print_result(data, &output_format);
1998 }
1999
2000 Ok::<_, Error>(())
2001 })?;
2002
2003 Ok(Value::Null)
2004 }
2005
2006 #[api(
2007 input: {
2008 properties: {
2009 repository: {
2010 schema: REPO_URL_SCHEMA,
2011 optional: true,
2012 },
2013 upid: {
2014 schema: UPID_SCHEMA,
2015 },
2016 }
2017 }
2018 )]
2019 /// Display the task log.
2020 fn task_log(param: Value) -> Result<Value, Error> {
2021
2022 async_main(async {
2023 let repo = extract_repository_from_value(&param)?;
2024 let upid = tools::required_string_param(&param, "upid")?;
2025
2026 let client = HttpClient::new(repo.host(), repo.user(), None)?;
2027
2028 display_task_log(client, upid, true).await?;
2029
2030 Ok::<_, Error>(())
2031 })?;
2032
2033 Ok(Value::Null)
2034 }
2035
2036 fn task_mgmt_cli() -> CliCommandMap {
2037
2038 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2039 .completion_cb("repository", complete_repository);
2040
2041 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2042 .arg_param(&["upid"]);
2043
2044 CliCommandMap::new()
2045 .insert("log", task_log_cmd_def)
2046 .insert("list", task_list_cmd_def)
2047
2048 }
2049
2050 fn main() {
2051
2052 const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new("Backup source specification ([<label>:<path>]).")
2053 .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
2054 .schema();
2055
2056 #[sortable]
2057 const API_METHOD_CREATE_BACKUP: ApiMethod = ApiMethod::new(
2058 &ApiHandler::Sync(&create_backup),
2059 &ObjectSchema::new(
2060 "Create (host) backup.",
2061 &sorted!([
2062 (
2063 "backupspec",
2064 false,
2065 &ArraySchema::new(
2066 "List of backup source specifications ([<label.ext>:<path>] ...)",
2067 &BACKUP_SOURCE_SCHEMA,
2068 ).min_length(1).schema()
2069 ),
2070 (
2071 "repository",
2072 true,
2073 &REPO_URL_SCHEMA
2074 ),
2075 (
2076 "include-dev",
2077 true,
2078 &ArraySchema::new(
2079 "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
2080 &StringSchema::new("Path to file.").schema()
2081 ).schema()
2082 ),
2083 (
2084 "keyfile",
2085 true,
2086 &StringSchema::new("Path to encryption key. All data will be encrypted using this key.").schema()
2087 ),
2088 (
2089 "verbose",
2090 true,
2091 &BooleanSchema::new("Verbose output.")
2092 .default(false)
2093 .schema()
2094 ),
2095 (
2096 "skip-lost-and-found",
2097 true,
2098 &BooleanSchema::new("Skip lost+found directory")
2099 .default(false)
2100 .schema()
2101 ),
2102 (
2103 "backup-type",
2104 true,
2105 &BACKUP_TYPE_SCHEMA,
2106 ),
2107 (
2108 "backup-id",
2109 true,
2110 &BACKUP_ID_SCHEMA
2111 ),
2112 (
2113 "backup-time",
2114 true,
2115 &BACKUP_TIME_SCHEMA
2116 ),
2117 (
2118 "chunk-size",
2119 true,
2120 &IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
2121 .minimum(64)
2122 .maximum(4096)
2123 .default(4096)
2124 .schema()
2125 ),
2126 ]),
2127 )
2128 );
2129
2130 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
2131 .arg_param(&["backupspec"])
2132 .completion_cb("repository", complete_repository)
2133 .completion_cb("backupspec", complete_backup_source)
2134 .completion_cb("keyfile", tools::complete_file_name)
2135 .completion_cb("chunk-size", complete_chunk_size);
2136
2137 #[sortable]
2138 const API_METHOD_UPLOAD_LOG: ApiMethod = ApiMethod::new(
2139 &ApiHandler::Sync(&upload_log),
2140 &ObjectSchema::new(
2141 "Upload backup log file.",
2142 &sorted!([
2143 (
2144 "snapshot",
2145 false,
2146 &StringSchema::new("Snapshot path.").schema()
2147 ),
2148 (
2149 "logfile",
2150 false,
2151 &StringSchema::new("The path to the log file you want to upload.").schema()
2152 ),
2153 (
2154 "repository",
2155 true,
2156 &REPO_URL_SCHEMA
2157 ),
2158 (
2159 "keyfile",
2160 true,
2161 &StringSchema::new("Path to encryption key. All data will be encrypted using this key.").schema()
2162 ),
2163 ]),
2164 )
2165 );
2166
2167 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
2168 .arg_param(&["snapshot", "logfile"])
2169 .completion_cb("snapshot", complete_backup_snapshot)
2170 .completion_cb("logfile", tools::complete_file_name)
2171 .completion_cb("keyfile", tools::complete_file_name)
2172 .completion_cb("repository", complete_repository);
2173
2174 #[sortable]
2175 const API_METHOD_LIST_BACKUP_GROUPS: ApiMethod = ApiMethod::new(
2176 &ApiHandler::Sync(&list_backup_groups),
2177 &ObjectSchema::new(
2178 "List backup groups.",
2179 &sorted!([
2180 ("repository", true, &REPO_URL_SCHEMA),
2181 ("output-format", true, &OUTPUT_FORMAT),
2182 ]),
2183 )
2184 );
2185
2186 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
2187 .completion_cb("repository", complete_repository);
2188
2189 #[sortable]
2190 const API_METHOD_LIST_SNAPSHOTS: ApiMethod = ApiMethod::new(
2191 &ApiHandler::Sync(&list_snapshots),
2192 &ObjectSchema::new(
2193 "List backup snapshots.",
2194 &sorted!([
2195 ("group", true, &StringSchema::new("Backup group.").schema()),
2196 ("repository", true, &REPO_URL_SCHEMA),
2197 ("output-format", true, &OUTPUT_FORMAT),
2198 ]),
2199 )
2200 );
2201
2202 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
2203 .arg_param(&["group"])
2204 .completion_cb("group", complete_backup_group)
2205 .completion_cb("repository", complete_repository);
2206
2207 #[sortable]
2208 const API_METHOD_FORGET_SNAPSHOTS: ApiMethod = ApiMethod::new(
2209 &ApiHandler::Sync(&forget_snapshots),
2210 &ObjectSchema::new(
2211 "Forget (remove) backup snapshots.",
2212 &sorted!([
2213 ("snapshot", false, &StringSchema::new("Snapshot path.").schema()),
2214 ("repository", true, &REPO_URL_SCHEMA),
2215 ]),
2216 )
2217 );
2218
2219 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
2220 .arg_param(&["snapshot"])
2221 .completion_cb("repository", complete_repository)
2222 .completion_cb("snapshot", complete_backup_snapshot);
2223
2224 #[sortable]
2225 const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
2226 &ApiHandler::Sync(&start_garbage_collection),
2227 &ObjectSchema::new(
2228 "Start garbage collection for a specific repository.",
2229 &sorted!([ ("repository", true, &REPO_URL_SCHEMA) ]),
2230 )
2231 );
2232
2233 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
2234 .completion_cb("repository", complete_repository);
2235
2236 #[sortable]
2237 const API_METHOD_RESTORE: ApiMethod = ApiMethod::new(
2238 &ApiHandler::Sync(&restore),
2239 &ObjectSchema::new(
2240 "Restore backup repository.",
2241 &sorted!([
2242 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2243 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2244 (
2245 "target",
2246 false,
2247 &StringSchema::new(
2248 r###"Target directory path. Use '-' to write to stdandard output.
2249
2250 We do not extraxt '.pxar' archives when writing to stdandard output.
2251
2252 "###
2253 ).schema()
2254 ),
2255 (
2256 "allow-existing-dirs",
2257 true,
2258 &BooleanSchema::new("Do not fail if directories already exists.")
2259 .default(false)
2260 .schema()
2261 ),
2262 ("repository", true, &REPO_URL_SCHEMA),
2263 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2264 (
2265 "verbose",
2266 true,
2267 &BooleanSchema::new("Verbose output.")
2268 .default(false)
2269 .schema()
2270 ),
2271 ]),
2272 )
2273 );
2274
2275 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
2276 .arg_param(&["snapshot", "archive-name", "target"])
2277 .completion_cb("repository", complete_repository)
2278 .completion_cb("snapshot", complete_group_or_snapshot)
2279 .completion_cb("archive-name", complete_archive_name)
2280 .completion_cb("target", tools::complete_file_name);
2281
2282 #[sortable]
2283 const API_METHOD_LIST_SNAPSHOT_FILES: ApiMethod = ApiMethod::new(
2284 &ApiHandler::Sync(&list_snapshot_files),
2285 &ObjectSchema::new(
2286 "List snapshot files.",
2287 &sorted!([
2288 ("snapshot", false, &StringSchema::new("Snapshot path.").schema()),
2289 ("repository", true, &REPO_URL_SCHEMA),
2290 ("output-format", true, &OUTPUT_FORMAT),
2291 ]),
2292 )
2293 );
2294
2295 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
2296 .arg_param(&["snapshot"])
2297 .completion_cb("repository", complete_repository)
2298 .completion_cb("snapshot", complete_backup_snapshot);
2299
2300 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
2301 &ApiHandler::Sync(&prune),
2302 &ObjectSchema::new(
2303 "Prune backup repository.",
2304 &proxmox_backup::add_common_prune_prameters!([
2305 ("dry-run", true, &BooleanSchema::new(
2306 "Just show what prune would do, but do not delete anything.")
2307 .schema()),
2308 ("group", false, &StringSchema::new("Backup group.").schema()),
2309 ], [
2310 ("output-format", true, &OUTPUT_FORMAT),
2311 ("repository", true, &REPO_URL_SCHEMA),
2312 ])
2313 )
2314 );
2315
2316 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
2317 .arg_param(&["group"])
2318 .completion_cb("group", complete_backup_group)
2319 .completion_cb("repository", complete_repository);
2320
2321 #[sortable]
2322 const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
2323 &ApiHandler::Sync(&status),
2324 &ObjectSchema::new(
2325 "Get repository status.",
2326 &sorted!([
2327 ("repository", true, &REPO_URL_SCHEMA),
2328 ("output-format", true, &OUTPUT_FORMAT),
2329 ]),
2330 )
2331 );
2332
2333 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
2334 .completion_cb("repository", complete_repository);
2335
2336 #[sortable]
2337 const API_METHOD_API_LOGIN: ApiMethod = ApiMethod::new(
2338 &ApiHandler::Sync(&api_login),
2339 &ObjectSchema::new(
2340 "Try to login. If successful, store ticket.",
2341 &sorted!([ ("repository", true, &REPO_URL_SCHEMA) ]),
2342 )
2343 );
2344
2345 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
2346 .completion_cb("repository", complete_repository);
2347
2348 #[sortable]
2349 const API_METHOD_API_LOGOUT: ApiMethod = ApiMethod::new(
2350 &ApiHandler::Sync(&api_logout),
2351 &ObjectSchema::new(
2352 "Logout (delete stored ticket).",
2353 &sorted!([ ("repository", true, &REPO_URL_SCHEMA) ]),
2354 )
2355 );
2356
2357 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
2358 .completion_cb("repository", complete_repository);
2359
2360 #[sortable]
2361 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2362 &ApiHandler::Sync(&mount),
2363 &ObjectSchema::new(
2364 "Mount pxar archive.",
2365 &sorted!([
2366 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2367 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2368 ("target", false, &StringSchema::new("Target directory path.").schema()),
2369 ("repository", true, &REPO_URL_SCHEMA),
2370 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2371 ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
2372 ]),
2373 )
2374 );
2375
2376 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
2377 .arg_param(&["snapshot", "archive-name", "target"])
2378 .completion_cb("repository", complete_repository)
2379 .completion_cb("snapshot", complete_group_or_snapshot)
2380 .completion_cb("archive-name", complete_pxar_archive_name)
2381 .completion_cb("target", tools::complete_file_name);
2382
2383
2384 let cmd_def = CliCommandMap::new()
2385 .insert("backup", backup_cmd_def)
2386 .insert("upload-log", upload_log_cmd_def)
2387 .insert("forget", forget_cmd_def)
2388 .insert("garbage-collect", garbage_collect_cmd_def)
2389 .insert("list", list_cmd_def)
2390 .insert("login", login_cmd_def)
2391 .insert("logout", logout_cmd_def)
2392 .insert("prune", prune_cmd_def)
2393 .insert("restore", restore_cmd_def)
2394 .insert("snapshots", snapshots_cmd_def)
2395 .insert("files", files_cmd_def)
2396 .insert("status", status_cmd_def)
2397 .insert("key", key_mgmt_cli())
2398 .insert("mount", mount_cmd_def)
2399 .insert("catalog", catalog_mgmt_cli())
2400 .insert("task", task_mgmt_cli());
2401
2402 run_cli_command(cmd_def);
2403 }
2404
2405 fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
2406 let rt = tokio::runtime::Runtime::new().unwrap();
2407 let ret = rt.block_on(fut);
2408 rt.shutdown_now();
2409 ret
2410 }