]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
add pxar.1 manual page
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 //#[macro_use]
2 extern crate proxmox_backup;
3
4 use failure::*;
5 use nix::unistd::{fork, ForkResult, pipe};
6 use std::os::unix::io::RawFd;
7 use chrono::{Local, Utc, TimeZone};
8 use std::path::{Path, PathBuf};
9 use std::collections::{HashSet, HashMap};
10 use std::ffi::OsStr;
11 use std::io::{Write, Seek, SeekFrom};
12 use std::os::unix::fs::OpenOptionsExt;
13
14 use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
15
16 use proxmox_backup::tools;
17 use proxmox_backup::cli::*;
18 use proxmox_backup::api2::types::*;
19 use proxmox_backup::api_schema::*;
20 use proxmox_backup::api_schema::router::*;
21 use proxmox_backup::client::*;
22 use proxmox_backup::backup::*;
23 use proxmox_backup::pxar::{ self, catalog::* };
24
25 //use proxmox_backup::backup::image_index::*;
26 //use proxmox_backup::config::datastore;
27 //use proxmox_backup::pxar::encoder::*;
28 //use proxmox_backup::backup::datastore::*;
29
30 use serde_json::{json, Value};
31 //use hyper::Body;
32 use std::sync::{Arc, Mutex};
33 use regex::Regex;
34 use xdg::BaseDirectories;
35
36 use lazy_static::lazy_static;
37 use futures::*;
38 use tokio::sync::mpsc;
39
40 lazy_static! {
41 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$").unwrap();
42
43 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
44 StringSchema::new("Repository URL.")
45 .format(BACKUP_REPO_URL.clone())
46 .max_length(256)
47 .into()
48 );
49 }
50
51
52 fn get_default_repository() -> Option<String> {
53 std::env::var("PBS_REPOSITORY").ok()
54 }
55
56 fn extract_repository_from_value(
57 param: &Value,
58 ) -> Result<BackupRepository, Error> {
59
60 let repo_url = param["repository"]
61 .as_str()
62 .map(String::from)
63 .or_else(get_default_repository)
64 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
65
66 let repo: BackupRepository = repo_url.parse()?;
67
68 Ok(repo)
69 }
70
71 fn extract_repository_from_map(
72 param: &HashMap<String, String>,
73 ) -> Option<BackupRepository> {
74
75 param.get("repository")
76 .map(String::from)
77 .or_else(get_default_repository)
78 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
79 }
80
81 fn record_repository(repo: &BackupRepository) {
82
83 let base = match BaseDirectories::with_prefix("proxmox-backup") {
84 Ok(v) => v,
85 _ => return,
86 };
87
88 // usually $HOME/.cache/proxmox-backup/repo-list
89 let path = match base.place_cache_file("repo-list") {
90 Ok(v) => v,
91 _ => return,
92 };
93
94 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
95
96 let repo = repo.to_string();
97
98 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
99
100 let mut map = serde_json::map::Map::new();
101
102 loop {
103 let mut max_used = 0;
104 let mut max_repo = None;
105 for (repo, count) in data.as_object().unwrap() {
106 if map.contains_key(repo) { continue; }
107 if let Some(count) = count.as_i64() {
108 if count > max_used {
109 max_used = count;
110 max_repo = Some(repo);
111 }
112 }
113 }
114 if let Some(repo) = max_repo {
115 map.insert(repo.to_owned(), json!(max_used));
116 } else {
117 break;
118 }
119 if map.len() > 10 { // store max. 10 repos
120 break;
121 }
122 }
123
124 let new_data = json!(map);
125
126 let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
127 }
128
129 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
130
131 let mut result = vec![];
132
133 let base = match BaseDirectories::with_prefix("proxmox-backup") {
134 Ok(v) => v,
135 _ => return result,
136 };
137
138 // usually $HOME/.cache/proxmox-backup/repo-list
139 let path = match base.place_cache_file("repo-list") {
140 Ok(v) => v,
141 _ => return result,
142 };
143
144 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
145
146 if let Some(map) = data.as_object() {
147 for (repo, _count) in map {
148 result.push(repo.to_owned());
149 }
150 }
151
152 result
153 }
154
155 async fn backup_directory<P: AsRef<Path>>(
156 client: &BackupWriter,
157 dir_path: P,
158 archive_name: &str,
159 chunk_size: Option<usize>,
160 device_set: Option<HashSet<u64>>,
161 verbose: bool,
162 skip_lost_and_found: bool,
163 crypt_config: Option<Arc<CryptConfig>>,
164 catalog: Arc<Mutex<CatalogWriter<SenderWriter>>>,
165 ) -> Result<BackupStats, Error> {
166
167 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
168 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
169
170 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
171
172 let stream = rx
173 .map_err(Error::from);
174
175 // spawn chunker inside a separate task so that it can run parallel
176 tokio::spawn(async move {
177 let _ = tx.send_all(&mut chunk_stream).await;
178 });
179
180 let stats = client
181 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
182 .await?;
183
184 Ok(stats)
185 }
186
187 async fn backup_image<P: AsRef<Path>>(
188 client: &BackupWriter,
189 image_path: P,
190 archive_name: &str,
191 image_size: u64,
192 chunk_size: Option<usize>,
193 _verbose: bool,
194 crypt_config: Option<Arc<CryptConfig>>,
195 ) -> Result<BackupStats, Error> {
196
197 let path = image_path.as_ref().to_owned();
198
199 let file = tokio::fs::File::open(path).await?;
200
201 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
202 .map_err(Error::from);
203
204 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
205
206 let stats = client
207 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
208 .await?;
209
210 Ok(stats)
211 }
212
213 fn strip_server_file_expenstion(name: &str) -> String {
214
215 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
216 name[..name.len()-5].to_owned()
217 } else {
218 name.to_owned() // should not happen
219 }
220 }
221
222 fn list_backup_groups(
223 param: Value,
224 _info: &ApiMethod,
225 _rpcenv: &mut dyn RpcEnvironment,
226 ) -> Result<Value, Error> {
227
228 let repo = extract_repository_from_value(&param)?;
229
230 let client = HttpClient::new(repo.host(), repo.user(), None)?;
231
232 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
233
234 let mut result = async_main(async move {
235 client.get(&path, None).await
236 })?;
237
238 record_repository(&repo);
239
240 // fixme: implement and use output formatter instead ..
241 let list = result["data"].as_array_mut().unwrap();
242
243 list.sort_unstable_by(|a, b| {
244 let a_id = a["backup-id"].as_str().unwrap();
245 let a_backup_type = a["backup-type"].as_str().unwrap();
246 let b_id = b["backup-id"].as_str().unwrap();
247 let b_backup_type = b["backup-type"].as_str().unwrap();
248
249 let type_order = a_backup_type.cmp(b_backup_type);
250 if type_order == std::cmp::Ordering::Equal {
251 a_id.cmp(b_id)
252 } else {
253 type_order
254 }
255 });
256
257 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
258
259 let mut result = vec![];
260
261 for item in list {
262
263 let id = item["backup-id"].as_str().unwrap();
264 let btype = item["backup-type"].as_str().unwrap();
265 let epoch = item["last-backup"].as_i64().unwrap();
266 let last_backup = Utc.timestamp(epoch, 0);
267 let backup_count = item["backup-count"].as_u64().unwrap();
268
269 let group = BackupGroup::new(btype, id);
270
271 let path = group.group_path().to_str().unwrap().to_owned();
272
273 let files = item["files"].as_array().unwrap().iter()
274 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
275
276 if output_format == "text" {
277 println!(
278 "{:20} | {} | {:5} | {}",
279 path,
280 BackupDir::backup_time_to_string(last_backup),
281 backup_count,
282 tools::join(&files, ' '),
283 );
284 } else {
285 result.push(json!({
286 "backup-type": btype,
287 "backup-id": id,
288 "last-backup": epoch,
289 "backup-count": backup_count,
290 "files": files,
291 }));
292 }
293 }
294
295 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
296
297 Ok(Value::Null)
298 }
299
300 fn list_snapshots(
301 param: Value,
302 _info: &ApiMethod,
303 _rpcenv: &mut dyn RpcEnvironment,
304 ) -> Result<Value, Error> {
305
306 let repo = extract_repository_from_value(&param)?;
307
308 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
309
310 let client = HttpClient::new(repo.host(), repo.user(), None)?;
311
312 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
313
314 let mut args = json!({});
315 if let Some(path) = param["group"].as_str() {
316 let group = BackupGroup::parse(path)?;
317 args["backup-type"] = group.backup_type().into();
318 args["backup-id"] = group.backup_id().into();
319 }
320
321 let result = async_main(async move {
322 client.get(&path, Some(args)).await
323 })?;
324
325 record_repository(&repo);
326
327 let list = result["data"].as_array().unwrap();
328
329 let mut result = vec![];
330
331 for item in list {
332
333 let id = item["backup-id"].as_str().unwrap();
334 let btype = item["backup-type"].as_str().unwrap();
335 let epoch = item["backup-time"].as_i64().unwrap();
336
337 let snapshot = BackupDir::new(btype, id, epoch);
338
339 let path = snapshot.relative_path().to_str().unwrap().to_owned();
340
341 let files = item["files"].as_array().unwrap().iter()
342 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
343
344 if output_format == "text" {
345 let size_str = if let Some(size) = item["size"].as_u64() {
346 size.to_string()
347 } else {
348 String::from("-")
349 };
350 println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
351 } else {
352 let mut data = json!({
353 "backup-type": btype,
354 "backup-id": id,
355 "backup-time": epoch,
356 "files": files,
357 });
358 if let Some(size) = item["size"].as_u64() {
359 data["size"] = size.into();
360 }
361 result.push(data);
362 }
363 }
364
365 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
366
367 Ok(Value::Null)
368 }
369
370 fn forget_snapshots(
371 param: Value,
372 _info: &ApiMethod,
373 _rpcenv: &mut dyn RpcEnvironment,
374 ) -> Result<Value, Error> {
375
376 let repo = extract_repository_from_value(&param)?;
377
378 let path = tools::required_string_param(&param, "snapshot")?;
379 let snapshot = BackupDir::parse(path)?;
380
381 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
382
383 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
384
385 let result = async_main(async move {
386 client.delete(&path, Some(json!({
387 "backup-type": snapshot.group().backup_type(),
388 "backup-id": snapshot.group().backup_id(),
389 "backup-time": snapshot.backup_time().timestamp(),
390 }))).await
391 })?;
392
393 record_repository(&repo);
394
395 Ok(result)
396 }
397
398 fn api_login(
399 param: Value,
400 _info: &ApiMethod,
401 _rpcenv: &mut dyn RpcEnvironment,
402 ) -> Result<Value, Error> {
403
404 let repo = extract_repository_from_value(&param)?;
405
406 let client = HttpClient::new(repo.host(), repo.user(), None)?;
407 async_main(async move { client.login().await })?;
408
409 record_repository(&repo);
410
411 Ok(Value::Null)
412 }
413
414 fn api_logout(
415 param: Value,
416 _info: &ApiMethod,
417 _rpcenv: &mut dyn RpcEnvironment,
418 ) -> Result<Value, Error> {
419
420 let repo = extract_repository_from_value(&param)?;
421
422 delete_ticket_info(repo.host(), repo.user())?;
423
424 Ok(Value::Null)
425 }
426
427 fn dump_catalog(
428 param: Value,
429 _info: &ApiMethod,
430 _rpcenv: &mut dyn RpcEnvironment,
431 ) -> Result<Value, Error> {
432
433 let repo = extract_repository_from_value(&param)?;
434
435 let path = tools::required_string_param(&param, "snapshot")?;
436 let snapshot = BackupDir::parse(path)?;
437
438 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
439
440 let crypt_config = match keyfile {
441 None => None,
442 Some(path) => {
443 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
444 Some(Arc::new(CryptConfig::new(key)?))
445 }
446 };
447
448 let client = HttpClient::new(repo.host(), repo.user(), None)?;
449
450 async_main(async move {
451 let client = BackupReader::start(
452 client,
453 crypt_config.clone(),
454 repo.store(),
455 &snapshot.group().backup_type(),
456 &snapshot.group().backup_id(),
457 snapshot.backup_time(),
458 true,
459 ).await?;
460
461 let manifest = client.download_manifest().await?;
462
463 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
464
465 let most_used = index.find_most_used_chunks(8);
466
467 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
468
469 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
470
471 let mut catalogfile = std::fs::OpenOptions::new()
472 .write(true)
473 .read(true)
474 .custom_flags(libc::O_TMPFILE)
475 .open("/tmp")?;
476
477 std::io::copy(&mut reader, &mut catalogfile)
478 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
479
480 catalogfile.seek(SeekFrom::Start(0))?;
481
482 let mut catalog_reader = CatalogReader::new(catalogfile);
483
484 catalog_reader.dump()?;
485
486 record_repository(&repo);
487
488 Ok::<(), Error>(())
489 })?;
490
491 Ok(Value::Null)
492 }
493
494 fn list_snapshot_files(
495 param: Value,
496 _info: &ApiMethod,
497 _rpcenv: &mut dyn RpcEnvironment,
498 ) -> Result<Value, Error> {
499
500 let repo = extract_repository_from_value(&param)?;
501
502 let path = tools::required_string_param(&param, "snapshot")?;
503 let snapshot = BackupDir::parse(path)?;
504
505 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
506
507 let client = HttpClient::new(repo.host(), repo.user(), None)?;
508
509 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
510
511 let mut result = async_main(async move {
512 client.get(&path, Some(json!({
513 "backup-type": snapshot.group().backup_type(),
514 "backup-id": snapshot.group().backup_id(),
515 "backup-time": snapshot.backup_time().timestamp(),
516 }))).await
517 })?;
518
519 record_repository(&repo);
520
521 let list: Value = result["data"].take();
522
523 if output_format == "text" {
524 for item in list.as_array().unwrap().iter() {
525 println!(
526 "{} {}",
527 strip_server_file_expenstion(item["filename"].as_str().unwrap()),
528 item["size"].as_u64().unwrap_or(0),
529 );
530 }
531 } else {
532 format_and_print_result(&list, &output_format);
533 }
534
535 Ok(Value::Null)
536 }
537
538 fn start_garbage_collection(
539 param: Value,
540 _info: &ApiMethod,
541 _rpcenv: &mut dyn RpcEnvironment,
542 ) -> Result<Value, Error> {
543
544 let repo = extract_repository_from_value(&param)?;
545
546 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
547
548 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
549
550 let result = async_main(async move { client.post(&path, None).await })?;
551
552 record_repository(&repo);
553
554 Ok(result)
555 }
556
557 fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
558
559 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
560 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
561 }
562 bail!("unable to parse directory specification '{}'", value);
563 }
564
565 fn spawn_catalog_upload(
566 client: Arc<BackupWriter>,
567 crypt_config: Option<Arc<CryptConfig>>,
568 ) -> Result<
569 (
570 Arc<Mutex<CatalogWriter<SenderWriter>>>,
571 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
572 ), Error>
573 {
574 let (catalog_tx, catalog_rx) = mpsc::channel(10); // allow to buffer 10 writes
575 let catalog_stream = catalog_rx.map_err(Error::from);
576 let catalog_chunk_size = 512*1024;
577 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
578
579 let catalog = Arc::new(Mutex::new(CatalogWriter::new(SenderWriter::new(catalog_tx))?));
580
581 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
582
583 tokio::spawn(async move {
584 let catalog_upload_result = client
585 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
586 .await;
587
588 if let Err(ref err) = catalog_upload_result {
589 eprintln!("catalog upload error - {}", err);
590 client.cancel();
591 }
592
593 let _ = catalog_result_tx.send(catalog_upload_result);
594 });
595
596 Ok((catalog, catalog_result_rx))
597 }
598
599 fn create_backup(
600 param: Value,
601 _info: &ApiMethod,
602 _rpcenv: &mut dyn RpcEnvironment,
603 ) -> Result<Value, Error> {
604
605 let repo = extract_repository_from_value(&param)?;
606
607 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
608
609 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
610
611 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
612
613 let verbose = param["verbose"].as_bool().unwrap_or(false);
614
615 let backup_time_opt = param["backup-time"].as_i64();
616
617 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
618
619 if let Some(size) = chunk_size_opt {
620 verify_chunk_size(size)?;
621 }
622
623 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
624
625 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
626
627 let backup_type = param["backup-type"].as_str().unwrap_or("host");
628
629 let include_dev = param["include-dev"].as_array();
630
631 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
632
633 if let Some(include_dev) = include_dev {
634 if all_file_systems {
635 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
636 }
637
638 let mut set = HashSet::new();
639 for path in include_dev {
640 let path = path.as_str().unwrap();
641 let stat = nix::sys::stat::stat(path)
642 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
643 set.insert(stat.st_dev);
644 }
645 devices = Some(set);
646 }
647
648 let mut upload_list = vec![];
649
650 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
651
652 let mut upload_catalog = false;
653
654 for backupspec in backupspec_list {
655 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
656
657 use std::os::unix::fs::FileTypeExt;
658
659 let metadata = std::fs::metadata(filename)
660 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
661 let file_type = metadata.file_type();
662
663 let extension = target.rsplit('.').next()
664 .ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
665
666 match extension {
667 "pxar" => {
668 if !file_type.is_dir() {
669 bail!("got unexpected file type (expected directory)");
670 }
671 upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
672 upload_catalog = true;
673 }
674 "img" => {
675
676 if !(file_type.is_file() || file_type.is_block_device()) {
677 bail!("got unexpected file type (expected file or block device)");
678 }
679
680 let size = image_size(&PathBuf::from(filename))?;
681
682 if size == 0 { bail!("got zero-sized file '{}'", filename); }
683
684 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
685 }
686 "conf" => {
687 if !file_type.is_file() {
688 bail!("got unexpected file type (expected regular file)");
689 }
690 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
691 }
692 "log" => {
693 if !file_type.is_file() {
694 bail!("got unexpected file type (expected regular file)");
695 }
696 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
697 }
698 _ => {
699 bail!("got unknown archive extension '{}'", extension);
700 }
701 }
702 }
703
704 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
705
706 let client = HttpClient::new(repo.host(), repo.user(), None)?;
707 record_repository(&repo);
708
709 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
710
711 println!("Client name: {}", proxmox::tools::nodename());
712
713 let start_time = Local::now();
714
715 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
716
717 let (crypt_config, rsa_encrypted_key) = match keyfile {
718 None => (None, None),
719 Some(path) => {
720 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
721
722 let crypt_config = CryptConfig::new(key)?;
723
724 let path = master_pubkey_path()?;
725 if path.exists() {
726 let pem_data = file_get_contents(&path)?;
727 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
728 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
729 (Some(Arc::new(crypt_config)), Some(enc_key))
730 } else {
731 (Some(Arc::new(crypt_config)), None)
732 }
733 }
734 };
735
736 async_main(async move {
737 let client = BackupWriter::start(
738 client,
739 repo.store(),
740 backup_type,
741 &backup_id,
742 backup_time,
743 verbose,
744 ).await?;
745
746 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
747 let mut manifest = BackupManifest::new(snapshot);
748
749 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
750
751 for (backup_type, filename, target, size) in upload_list {
752 match backup_type {
753 BackupType::CONFIG => {
754 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
755 let stats = client
756 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
757 .await?;
758 manifest.add_file(target, stats.size, stats.csum);
759 }
760 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
761 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
762 let stats = client
763 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
764 .await?;
765 manifest.add_file(target, stats.size, stats.csum);
766 }
767 BackupType::PXAR => {
768 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
769 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
770 let stats = backup_directory(
771 &client,
772 &filename,
773 &target,
774 chunk_size_opt,
775 devices.clone(),
776 verbose,
777 skip_lost_and_found,
778 crypt_config.clone(),
779 catalog.clone(),
780 ).await?;
781 manifest.add_file(target, stats.size, stats.csum);
782 catalog.lock().unwrap().end_directory()?;
783 }
784 BackupType::IMAGE => {
785 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
786 let stats = backup_image(
787 &client,
788 &filename,
789 &target,
790 size,
791 chunk_size_opt,
792 verbose,
793 crypt_config.clone(),
794 ).await?;
795 manifest.add_file(target, stats.size, stats.csum);
796 }
797 }
798 }
799
800 // finalize and upload catalog
801 if upload_catalog {
802 let mutex = Arc::try_unwrap(catalog)
803 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
804 let mut catalog = mutex.into_inner().unwrap();
805
806 catalog.finish()?;
807
808 drop(catalog); // close upload stream
809
810 let stats = catalog_result_rx.await??;
811
812 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
813 }
814
815 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
816 let target = "rsa-encrypted.key";
817 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
818 let stats = client
819 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
820 .await?;
821 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum);
822
823 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
824 /*
825 let mut buffer2 = vec![0u8; rsa.size() as usize];
826 let pem_data = file_get_contents("master-private.pem")?;
827 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
828 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
829 println!("TEST {} {:?}", len, buffer2);
830 */
831 }
832
833 // create manifest (index.json)
834 let manifest = manifest.into_json();
835
836 println!("Upload index.json to '{:?}'", repo);
837 let manifest = serde_json::to_string_pretty(&manifest)?.into();
838 client
839 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
840 .await?;
841
842 client.finish().await?;
843
844 let end_time = Local::now();
845 let elapsed = end_time.signed_duration_since(start_time);
846 println!("Duration: {}", elapsed);
847
848 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
849
850 Ok(Value::Null)
851 })
852 }
853
854 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
855
856 let mut result = vec![];
857
858 let data: Vec<&str> = arg.splitn(2, ':').collect();
859
860 if data.len() != 2 {
861 result.push(String::from("root.pxar:/"));
862 result.push(String::from("etc.pxar:/etc"));
863 return result;
864 }
865
866 let files = tools::complete_file_name(data[1], param);
867
868 for file in files {
869 result.push(format!("{}:{}", data[0], file));
870 }
871
872 result
873 }
874
875 fn restore(
876 param: Value,
877 _info: &ApiMethod,
878 _rpcenv: &mut dyn RpcEnvironment,
879 ) -> Result<Value, Error> {
880 async_main(restore_do(param))
881 }
882
883 fn dump_image<W: Write>(
884 client: Arc<BackupReader>,
885 crypt_config: Option<Arc<CryptConfig>>,
886 index: FixedIndexReader,
887 mut writer: W,
888 verbose: bool,
889 ) -> Result<(), Error> {
890
891 let most_used = index.find_most_used_chunks(8);
892
893 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
894
895 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
896 // and thus slows down reading. Instead, directly use RemoteChunkReader
897 let mut per = 0;
898 let mut bytes = 0;
899 let start_time = std::time::Instant::now();
900
901 for pos in 0..index.index_count() {
902 let digest = index.index_digest(pos).unwrap();
903 let raw_data = chunk_reader.read_chunk(&digest)?;
904 writer.write_all(&raw_data)?;
905 bytes += raw_data.len();
906 if verbose {
907 let next_per = ((pos+1)*100)/index.index_count();
908 if per != next_per {
909 eprintln!("progress {}% (read {} bytes, duration {} sec)",
910 next_per, bytes, start_time.elapsed().as_secs());
911 per = next_per;
912 }
913 }
914 }
915
916 let end_time = std::time::Instant::now();
917 let elapsed = end_time.duration_since(start_time);
918 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
919 bytes,
920 elapsed.as_secs_f64(),
921 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
922 );
923
924
925 Ok(())
926 }
927
928 async fn restore_do(param: Value) -> Result<Value, Error> {
929 let repo = extract_repository_from_value(&param)?;
930
931 let verbose = param["verbose"].as_bool().unwrap_or(false);
932
933 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
934
935 let archive_name = tools::required_string_param(&param, "archive-name")?;
936
937 let client = HttpClient::new(repo.host(), repo.user(), None)?;
938
939 record_repository(&repo);
940
941 let path = tools::required_string_param(&param, "snapshot")?;
942
943 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
944 let group = BackupGroup::parse(path)?;
945
946 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
947 let result = client.get(&path, Some(json!({
948 "backup-type": group.backup_type(),
949 "backup-id": group.backup_id(),
950 }))).await?;
951
952 let list = result["data"].as_array().unwrap();
953 if list.is_empty() {
954 bail!("backup group '{}' does not contain any snapshots:", path);
955 }
956
957 let epoch = list[0]["backup-time"].as_i64().unwrap();
958 let backup_time = Utc.timestamp(epoch, 0);
959 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
960 } else {
961 let snapshot = BackupDir::parse(path)?;
962 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
963 };
964
965 let target = tools::required_string_param(&param, "target")?;
966 let target = if target == "-" { None } else { Some(target) };
967
968 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
969
970 let crypt_config = match keyfile {
971 None => None,
972 Some(path) => {
973 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
974 Some(Arc::new(CryptConfig::new(key)?))
975 }
976 };
977
978 let server_archive_name = if archive_name.ends_with(".pxar") {
979 format!("{}.didx", archive_name)
980 } else if archive_name.ends_with(".img") {
981 format!("{}.fidx", archive_name)
982 } else {
983 format!("{}.blob", archive_name)
984 };
985
986 let client = BackupReader::start(
987 client,
988 crypt_config.clone(),
989 repo.store(),
990 &backup_type,
991 &backup_id,
992 backup_time,
993 true,
994 ).await?;
995
996 let manifest = client.download_manifest().await?;
997
998 if server_archive_name == MANIFEST_BLOB_NAME {
999 let backup_index_data = manifest.into_json().to_string();
1000 if let Some(target) = target {
1001 file_set_contents(target, backup_index_data.as_bytes(), None)?;
1002 } else {
1003 let stdout = std::io::stdout();
1004 let mut writer = stdout.lock();
1005 writer.write_all(backup_index_data.as_bytes())
1006 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1007 }
1008
1009 } else if server_archive_name.ends_with(".blob") {
1010
1011 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
1012
1013 if let Some(target) = target {
1014 let mut writer = std::fs::OpenOptions::new()
1015 .write(true)
1016 .create(true)
1017 .create_new(true)
1018 .open(target)
1019 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1020 std::io::copy(&mut reader, &mut writer)?;
1021 } else {
1022 let stdout = std::io::stdout();
1023 let mut writer = stdout.lock();
1024 std::io::copy(&mut reader, &mut writer)
1025 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1026 }
1027
1028 } else if server_archive_name.ends_with(".didx") {
1029
1030 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1031
1032 let most_used = index.find_most_used_chunks(8);
1033
1034 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1035
1036 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1037
1038 if let Some(target) = target {
1039
1040 let feature_flags = pxar::flags::DEFAULT;
1041 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
1042 if verbose {
1043 eprintln!("{:?}", path);
1044 }
1045 Ok(())
1046 });
1047 decoder.set_allow_existing_dirs(allow_existing_dirs);
1048
1049 decoder.restore(Path::new(target), &Vec::new())?;
1050 } else {
1051 let mut writer = std::fs::OpenOptions::new()
1052 .write(true)
1053 .open("/dev/stdout")
1054 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
1055
1056 std::io::copy(&mut reader, &mut writer)
1057 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1058 }
1059 } else if server_archive_name.ends_with(".fidx") {
1060
1061 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
1062
1063 let mut writer = if let Some(target) = target {
1064 std::fs::OpenOptions::new()
1065 .write(true)
1066 .create(true)
1067 .create_new(true)
1068 .open(target)
1069 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
1070 } else {
1071 std::fs::OpenOptions::new()
1072 .write(true)
1073 .open("/dev/stdout")
1074 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1075 };
1076
1077 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
1078
1079 } else {
1080 bail!("unknown archive file extension (expected .pxar of .img)");
1081 }
1082
1083 Ok(Value::Null)
1084 }
1085
1086 fn upload_log(
1087 param: Value,
1088 _info: &ApiMethod,
1089 _rpcenv: &mut dyn RpcEnvironment,
1090 ) -> Result<Value, Error> {
1091
1092 let logfile = tools::required_string_param(&param, "logfile")?;
1093 let repo = extract_repository_from_value(&param)?;
1094
1095 let snapshot = tools::required_string_param(&param, "snapshot")?;
1096 let snapshot = BackupDir::parse(snapshot)?;
1097
1098 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1099
1100 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1101
1102 let crypt_config = match keyfile {
1103 None => None,
1104 Some(path) => {
1105 let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1106 let crypt_config = CryptConfig::new(key)?;
1107 Some(Arc::new(crypt_config))
1108 }
1109 };
1110
1111 let data = file_get_contents(logfile)?;
1112
1113 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
1114
1115 let raw_data = blob.into_inner();
1116
1117 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1118
1119 let args = json!({
1120 "backup-type": snapshot.group().backup_type(),
1121 "backup-id": snapshot.group().backup_id(),
1122 "backup-time": snapshot.backup_time().timestamp(),
1123 });
1124
1125 let body = hyper::Body::from(raw_data);
1126
1127 async_main(async move {
1128 client.upload("application/octet-stream", body, &path, Some(args)).await
1129 })
1130 }
1131
1132 fn prune(
1133 mut param: Value,
1134 _info: &ApiMethod,
1135 _rpcenv: &mut dyn RpcEnvironment,
1136 ) -> Result<Value, Error> {
1137
1138 let repo = extract_repository_from_value(&param)?;
1139
1140 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
1141
1142 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
1143
1144 let group = tools::required_string_param(&param, "group")?;
1145 let group = BackupGroup::parse(group)?;
1146
1147 param.as_object_mut().unwrap().remove("repository");
1148 param.as_object_mut().unwrap().remove("group");
1149
1150 param["backup-type"] = group.backup_type().into();
1151 param["backup-id"] = group.backup_id().into();
1152
1153 let _result = async_main(async move { client.post(&path, Some(param)).await })?;
1154
1155 record_repository(&repo);
1156
1157 Ok(Value::Null)
1158 }
1159
1160 fn status(
1161 param: Value,
1162 _info: &ApiMethod,
1163 _rpcenv: &mut dyn RpcEnvironment,
1164 ) -> Result<Value, Error> {
1165
1166 let repo = extract_repository_from_value(&param)?;
1167
1168 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1169
1170 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1171
1172 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1173
1174 let result = async_main(async move { client.get(&path, None).await })?;
1175 let data = &result["data"];
1176
1177 record_repository(&repo);
1178
1179 if output_format == "text" {
1180 let total = data["total"].as_u64().unwrap();
1181 let used = data["used"].as_u64().unwrap();
1182 let avail = data["avail"].as_u64().unwrap();
1183 let roundup = total/200;
1184
1185 println!(
1186 "total: {} used: {} ({} %) available: {}",
1187 total,
1188 used,
1189 ((used+roundup)*100)/total,
1190 avail,
1191 );
1192 } else {
1193 format_and_print_result(data, &output_format);
1194 }
1195
1196 Ok(Value::Null)
1197 }
1198
1199 // like get, but simply ignore errors and return Null instead
1200 async fn try_get(repo: &BackupRepository, url: &str) -> Value {
1201
1202 let client = match HttpClient::new(repo.host(), repo.user(), None) {
1203 Ok(v) => v,
1204 _ => return Value::Null,
1205 };
1206
1207 let mut resp = match client.get(url, None).await {
1208 Ok(v) => v,
1209 _ => return Value::Null,
1210 };
1211
1212 if let Some(map) = resp.as_object_mut() {
1213 if let Some(data) = map.remove("data") {
1214 return data;
1215 }
1216 }
1217 Value::Null
1218 }
1219
1220 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1221 async_main(async { complete_backup_group_do(param).await })
1222 }
1223
1224 async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
1225
1226 let mut result = vec![];
1227
1228 let repo = match extract_repository_from_map(param) {
1229 Some(v) => v,
1230 _ => return result,
1231 };
1232
1233 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1234
1235 let data = try_get(&repo, &path).await;
1236
1237 if let Some(list) = data.as_array() {
1238 for item in list {
1239 if let (Some(backup_id), Some(backup_type)) =
1240 (item["backup-id"].as_str(), item["backup-type"].as_str())
1241 {
1242 result.push(format!("{}/{}", backup_type, backup_id));
1243 }
1244 }
1245 }
1246
1247 result
1248 }
1249
1250 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1251 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1252 }
1253
1254 async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1255
1256 if arg.matches('/').count() < 2 {
1257 let groups = complete_backup_group_do(param).await;
1258 let mut result = vec![];
1259 for group in groups {
1260 result.push(group.to_string());
1261 result.push(format!("{}/", group));
1262 }
1263 return result;
1264 }
1265
1266 complete_backup_snapshot_do(param).await
1267 }
1268
1269 fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1270 async_main(async { complete_backup_snapshot_do(param).await })
1271 }
1272
1273 async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
1274
1275 let mut result = vec![];
1276
1277 let repo = match extract_repository_from_map(param) {
1278 Some(v) => v,
1279 _ => return result,
1280 };
1281
1282 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1283
1284 let data = try_get(&repo, &path).await;
1285
1286 if let Some(list) = data.as_array() {
1287 for item in list {
1288 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1289 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1290 {
1291 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1292 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1293 }
1294 }
1295 }
1296
1297 result
1298 }
1299
1300 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1301 async_main(async { complete_server_file_name_do(param).await })
1302 }
1303
1304 async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
1305
1306 let mut result = vec![];
1307
1308 let repo = match extract_repository_from_map(param) {
1309 Some(v) => v,
1310 _ => return result,
1311 };
1312
1313 let snapshot = match param.get("snapshot") {
1314 Some(path) => {
1315 match BackupDir::parse(path) {
1316 Ok(v) => v,
1317 _ => return result,
1318 }
1319 }
1320 _ => return result,
1321 };
1322
1323 let query = tools::json_object_to_query(json!({
1324 "backup-type": snapshot.group().backup_type(),
1325 "backup-id": snapshot.group().backup_id(),
1326 "backup-time": snapshot.backup_time().timestamp(),
1327 })).unwrap();
1328
1329 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1330
1331 let data = try_get(&repo, &path).await;
1332
1333 if let Some(list) = data.as_array() {
1334 for item in list {
1335 if let Some(filename) = item["filename"].as_str() {
1336 result.push(filename.to_owned());
1337 }
1338 }
1339 }
1340
1341 result
1342 }
1343
1344 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1345 complete_server_file_name(arg, param)
1346 .iter()
1347 .map(|v| strip_server_file_expenstion(&v))
1348 .collect()
1349 }
1350
1351 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1352
1353 let mut result = vec![];
1354
1355 let mut size = 64;
1356 loop {
1357 result.push(size.to_string());
1358 size *= 2;
1359 if size > 4096 { break; }
1360 }
1361
1362 result
1363 }
1364
1365 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
1366
1367 // fixme: implement other input methods
1368
1369 use std::env::VarError::*;
1370 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
1371 Ok(p) => return Ok(p.as_bytes().to_vec()),
1372 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1373 Err(NotPresent) => {
1374 // Try another method
1375 }
1376 }
1377
1378 // If we're on a TTY, query the user for a password
1379 if crate::tools::tty::stdin_isatty() {
1380 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
1381 }
1382
1383 bail!("no password input mechanism available");
1384 }
1385
1386 fn key_create(
1387 param: Value,
1388 _info: &ApiMethod,
1389 _rpcenv: &mut dyn RpcEnvironment,
1390 ) -> Result<Value, Error> {
1391
1392 let path = tools::required_string_param(&param, "path")?;
1393 let path = PathBuf::from(path);
1394
1395 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1396
1397 let key = proxmox::sys::linux::random_data(32)?;
1398
1399 if kdf == "scrypt" {
1400 // always read passphrase from tty
1401 if !crate::tools::tty::stdin_isatty() {
1402 bail!("unable to read passphrase - no tty");
1403 }
1404
1405 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
1406
1407 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1408
1409 store_key_config(&path, false, key_config)?;
1410
1411 Ok(Value::Null)
1412 } else if kdf == "none" {
1413 let created = Local.timestamp(Local::now().timestamp(), 0);
1414
1415 store_key_config(&path, false, KeyConfig {
1416 kdf: None,
1417 created,
1418 modified: created,
1419 data: key,
1420 })?;
1421
1422 Ok(Value::Null)
1423 } else {
1424 unreachable!();
1425 }
1426 }
1427
1428 fn master_pubkey_path() -> Result<PathBuf, Error> {
1429 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1430
1431 // usually $HOME/.config/proxmox-backup/master-public.pem
1432 let path = base.place_config_file("master-public.pem")?;
1433
1434 Ok(path)
1435 }
1436
1437 fn key_import_master_pubkey(
1438 param: Value,
1439 _info: &ApiMethod,
1440 _rpcenv: &mut dyn RpcEnvironment,
1441 ) -> Result<Value, Error> {
1442
1443 let path = tools::required_string_param(&param, "path")?;
1444 let path = PathBuf::from(path);
1445
1446 let pem_data = file_get_contents(&path)?;
1447
1448 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1449 bail!("Unable to decode PEM data - {}", err);
1450 }
1451
1452 let target_path = master_pubkey_path()?;
1453
1454 file_set_contents(&target_path, &pem_data, None)?;
1455
1456 println!("Imported public master key to {:?}", target_path);
1457
1458 Ok(Value::Null)
1459 }
1460
1461 fn key_create_master_key(
1462 _param: Value,
1463 _info: &ApiMethod,
1464 _rpcenv: &mut dyn RpcEnvironment,
1465 ) -> Result<Value, Error> {
1466
1467 // we need a TTY to query the new password
1468 if !crate::tools::tty::stdin_isatty() {
1469 bail!("unable to create master key - no tty");
1470 }
1471
1472 let rsa = openssl::rsa::Rsa::generate(4096)?;
1473 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1474
1475 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1476 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1477
1478 if new_pw != verify_pw {
1479 bail!("Password verification fail!");
1480 }
1481
1482 if new_pw.len() < 5 {
1483 bail!("Password is too short!");
1484 }
1485
1486 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1487 let filename_pub = "master-public.pem";
1488 println!("Writing public master key to {}", filename_pub);
1489 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1490
1491 let cipher = openssl::symm::Cipher::aes_256_cbc();
1492 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1493
1494 let filename_priv = "master-private.pem";
1495 println!("Writing private master key to {}", filename_priv);
1496 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1497
1498 Ok(Value::Null)
1499 }
1500
1501 fn key_change_passphrase(
1502 param: Value,
1503 _info: &ApiMethod,
1504 _rpcenv: &mut dyn RpcEnvironment,
1505 ) -> Result<Value, Error> {
1506
1507 let path = tools::required_string_param(&param, "path")?;
1508 let path = PathBuf::from(path);
1509
1510 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1511
1512 // we need a TTY to query the new password
1513 if !crate::tools::tty::stdin_isatty() {
1514 bail!("unable to change passphrase - no tty");
1515 }
1516
1517 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1518
1519 if kdf == "scrypt" {
1520
1521 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1522 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1523
1524 if new_pw != verify_pw {
1525 bail!("Password verification fail!");
1526 }
1527
1528 if new_pw.len() < 5 {
1529 bail!("Password is too short!");
1530 }
1531
1532 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1533 new_key_config.created = created; // keep original value
1534
1535 store_key_config(&path, true, new_key_config)?;
1536
1537 Ok(Value::Null)
1538 } else if kdf == "none" {
1539 let modified = Local.timestamp(Local::now().timestamp(), 0);
1540
1541 store_key_config(&path, true, KeyConfig {
1542 kdf: None,
1543 created, // keep original value
1544 modified,
1545 data: key.to_vec(),
1546 })?;
1547
1548 Ok(Value::Null)
1549 } else {
1550 unreachable!();
1551 }
1552 }
1553
1554 fn key_mgmt_cli() -> CliCommandMap {
1555
1556 let kdf_schema: Arc<Schema> = Arc::new(
1557 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1558 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
1559 .default("scrypt")
1560 .into()
1561 );
1562
1563 let key_create_cmd_def = CliCommand::new(
1564 ApiMethod::new(
1565 key_create,
1566 ObjectSchema::new("Create a new encryption key.")
1567 .required("path", StringSchema::new("File system path."))
1568 .optional("kdf", kdf_schema.clone())
1569 ))
1570 .arg_param(vec!["path"])
1571 .completion_cb("path", tools::complete_file_name);
1572
1573 let key_change_passphrase_cmd_def = CliCommand::new(
1574 ApiMethod::new(
1575 key_change_passphrase,
1576 ObjectSchema::new("Change the passphrase required to decrypt the key.")
1577 .required("path", StringSchema::new("File system path."))
1578 .optional("kdf", kdf_schema.clone())
1579 ))
1580 .arg_param(vec!["path"])
1581 .completion_cb("path", tools::complete_file_name);
1582
1583 let key_create_master_key_cmd_def = CliCommand::new(
1584 ApiMethod::new(
1585 key_create_master_key,
1586 ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
1587 ));
1588
1589 let key_import_master_pubkey_cmd_def = CliCommand::new(
1590 ApiMethod::new(
1591 key_import_master_pubkey,
1592 ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
1593 .required("path", StringSchema::new("File system path."))
1594 ))
1595 .arg_param(vec!["path"])
1596 .completion_cb("path", tools::complete_file_name);
1597
1598 CliCommandMap::new()
1599 .insert("create".to_owned(), key_create_cmd_def.into())
1600 .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
1601 .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
1602 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into())
1603 }
1604
1605
1606 fn mount(
1607 param: Value,
1608 _info: &ApiMethod,
1609 _rpcenv: &mut dyn RpcEnvironment,
1610 ) -> Result<Value, Error> {
1611 let verbose = param["verbose"].as_bool().unwrap_or(false);
1612 if verbose {
1613 // This will stay in foreground with debug output enabled as None is
1614 // passed for the RawFd.
1615 return async_main(mount_do(param, None));
1616 }
1617
1618 // Process should be deamonized.
1619 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1620 let pipe = pipe()?;
1621 match fork() {
1622 Ok(ForkResult::Parent { .. }) => {
1623 nix::unistd::close(pipe.1).unwrap();
1624 // Blocks the parent process until we are ready to go in the child
1625 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1626 Ok(Value::Null)
1627 }
1628 Ok(ForkResult::Child) => {
1629 nix::unistd::close(pipe.0).unwrap();
1630 nix::unistd::setsid().unwrap();
1631 async_main(mount_do(param, Some(pipe.1)))
1632 }
1633 Err(_) => bail!("failed to daemonize process"),
1634 }
1635 }
1636
1637 async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1638 let repo = extract_repository_from_value(&param)?;
1639 let archive_name = tools::required_string_param(&param, "archive-name")?;
1640 let target = tools::required_string_param(&param, "target")?;
1641 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1642
1643 record_repository(&repo);
1644
1645 let path = tools::required_string_param(&param, "snapshot")?;
1646 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1647 let group = BackupGroup::parse(path)?;
1648
1649 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1650 let result = client.get(&path, Some(json!({
1651 "backup-type": group.backup_type(),
1652 "backup-id": group.backup_id(),
1653 }))).await?;
1654
1655 let list = result["data"].as_array().unwrap();
1656 if list.is_empty() {
1657 bail!("backup group '{}' does not contain any snapshots:", path);
1658 }
1659
1660 let epoch = list[0]["backup-time"].as_i64().unwrap();
1661 let backup_time = Utc.timestamp(epoch, 0);
1662 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1663 } else {
1664 let snapshot = BackupDir::parse(path)?;
1665 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1666 };
1667
1668 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
1669 let crypt_config = match keyfile {
1670 None => None,
1671 Some(path) => {
1672 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1673 Some(Arc::new(CryptConfig::new(key)?))
1674 }
1675 };
1676
1677 let server_archive_name = if archive_name.ends_with(".pxar") {
1678 format!("{}.didx", archive_name)
1679 } else {
1680 bail!("Can only mount pxar archives.");
1681 };
1682
1683 let client = BackupReader::start(
1684 client,
1685 crypt_config.clone(),
1686 repo.store(),
1687 &backup_type,
1688 &backup_id,
1689 backup_time,
1690 true,
1691 ).await?;
1692
1693 let manifest = client.download_manifest().await?;
1694
1695 if server_archive_name.ends_with(".didx") {
1696 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1697 let most_used = index.find_most_used_chunks(8);
1698 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1699 let reader = BufferedDynamicReader::new(index, chunk_reader);
1700 let decoder =
1701 pxar::Decoder::<Box<dyn pxar::fuse::ReadSeek>, fn(&Path) -> Result<(), Error>>::new(
1702 Box::new(reader),
1703 |_| Ok(()),
1704 )?;
1705 let options = OsStr::new("ro,default_permissions");
1706 let mut session = pxar::fuse::Session::from_decoder(decoder, &options, pipe.is_none())
1707 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
1708
1709 // Mount the session but not call fuse deamonize as this will cause
1710 // issues with the runtime after the fork
1711 let deamonize = false;
1712 session.mount(&Path::new(target), deamonize)?;
1713
1714 if let Some(pipe) = pipe {
1715 nix::unistd::chdir(Path::new("/")).unwrap();
1716 // Finish creation of deamon by redirecting filedescriptors.
1717 let nullfd = nix::fcntl::open(
1718 "/dev/null",
1719 nix::fcntl::OFlag::O_RDWR,
1720 nix::sys::stat::Mode::empty(),
1721 ).unwrap();
1722 nix::unistd::dup2(nullfd, 0).unwrap();
1723 nix::unistd::dup2(nullfd, 1).unwrap();
1724 nix::unistd::dup2(nullfd, 2).unwrap();
1725 if nullfd > 2 {
1726 nix::unistd::close(nullfd).unwrap();
1727 }
1728 // Signal the parent process that we are done with the setup and it can
1729 // terminate.
1730 nix::unistd::write(pipe, &[0u8])?;
1731 nix::unistd::close(pipe).unwrap();
1732 }
1733
1734 let multithreaded = true;
1735 session.run_loop(multithreaded)?;
1736 } else {
1737 bail!("unknown archive file extension (expected .pxar)");
1738 }
1739
1740 Ok(Value::Null)
1741 }
1742
1743 fn main() {
1744
1745 let backup_source_schema: Arc<Schema> = Arc::new(
1746 StringSchema::new("Backup source specification ([<label>:<path>]).")
1747 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1748 .into()
1749 );
1750
1751 let backup_cmd_def = CliCommand::new(
1752 ApiMethod::new(
1753 create_backup,
1754 ObjectSchema::new("Create (host) backup.")
1755 .required(
1756 "backupspec",
1757 ArraySchema::new(
1758 "List of backup source specifications ([<label.ext>:<path>] ...)",
1759 backup_source_schema,
1760 ).min_length(1)
1761 )
1762 .optional("repository", REPO_URL_SCHEMA.clone())
1763 .optional(
1764 "include-dev",
1765 ArraySchema::new(
1766 "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
1767 StringSchema::new("Path to file.").into()
1768 )
1769 )
1770 .optional(
1771 "keyfile",
1772 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1773 .optional(
1774 "verbose",
1775 BooleanSchema::new("Verbose output.").default(false))
1776 .optional(
1777 "skip-lost-and-found",
1778 BooleanSchema::new("Skip lost+found directory").default(false))
1779 .optional(
1780 "backup-type",
1781 BACKUP_TYPE_SCHEMA.clone()
1782 )
1783 .optional(
1784 "backup-id",
1785 BACKUP_ID_SCHEMA.clone()
1786 )
1787 .optional(
1788 "backup-time",
1789 BACKUP_TIME_SCHEMA.clone()
1790 )
1791 .optional(
1792 "chunk-size",
1793 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
1794 .minimum(64)
1795 .maximum(4096)
1796 .default(4096)
1797 )
1798 ))
1799 .arg_param(vec!["backupspec"])
1800 .completion_cb("repository", complete_repository)
1801 .completion_cb("backupspec", complete_backup_source)
1802 .completion_cb("keyfile", tools::complete_file_name)
1803 .completion_cb("chunk-size", complete_chunk_size);
1804
1805 let upload_log_cmd_def = CliCommand::new(
1806 ApiMethod::new(
1807 upload_log,
1808 ObjectSchema::new("Upload backup log file.")
1809 .required("snapshot", StringSchema::new("Snapshot path."))
1810 .required("logfile", StringSchema::new("The path to the log file you want to upload."))
1811 .optional("repository", REPO_URL_SCHEMA.clone())
1812 .optional(
1813 "keyfile",
1814 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1815 ))
1816 .arg_param(vec!["snapshot", "logfile"])
1817 .completion_cb("snapshot", complete_backup_snapshot)
1818 .completion_cb("logfile", tools::complete_file_name)
1819 .completion_cb("keyfile", tools::complete_file_name)
1820 .completion_cb("repository", complete_repository);
1821
1822 let list_cmd_def = CliCommand::new(
1823 ApiMethod::new(
1824 list_backup_groups,
1825 ObjectSchema::new("List backup groups.")
1826 .optional("repository", REPO_URL_SCHEMA.clone())
1827 .optional("output-format", OUTPUT_FORMAT.clone())
1828 ))
1829 .completion_cb("repository", complete_repository);
1830
1831 let snapshots_cmd_def = CliCommand::new(
1832 ApiMethod::new(
1833 list_snapshots,
1834 ObjectSchema::new("List backup snapshots.")
1835 .optional("group", StringSchema::new("Backup group."))
1836 .optional("repository", REPO_URL_SCHEMA.clone())
1837 .optional("output-format", OUTPUT_FORMAT.clone())
1838 ))
1839 .arg_param(vec!["group"])
1840 .completion_cb("group", complete_backup_group)
1841 .completion_cb("repository", complete_repository);
1842
1843 let forget_cmd_def = CliCommand::new(
1844 ApiMethod::new(
1845 forget_snapshots,
1846 ObjectSchema::new("Forget (remove) backup snapshots.")
1847 .required("snapshot", StringSchema::new("Snapshot path."))
1848 .optional("repository", REPO_URL_SCHEMA.clone())
1849 ))
1850 .arg_param(vec!["snapshot"])
1851 .completion_cb("repository", complete_repository)
1852 .completion_cb("snapshot", complete_backup_snapshot);
1853
1854 let garbage_collect_cmd_def = CliCommand::new(
1855 ApiMethod::new(
1856 start_garbage_collection,
1857 ObjectSchema::new("Start garbage collection for a specific repository.")
1858 .optional("repository", REPO_URL_SCHEMA.clone())
1859 ))
1860 .completion_cb("repository", complete_repository);
1861
1862 let restore_cmd_def = CliCommand::new(
1863 ApiMethod::new(
1864 restore,
1865 ObjectSchema::new("Restore backup repository.")
1866 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1867 .required("archive-name", StringSchema::new("Backup archive name."))
1868 .required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
1869
1870 We do not extraxt '.pxar' archives when writing to stdandard output.
1871
1872 "###
1873 ))
1874 .optional(
1875 "allow-existing-dirs",
1876 BooleanSchema::new("Do not fail if directories already exists.").default(false))
1877 .optional("repository", REPO_URL_SCHEMA.clone())
1878 .optional("keyfile", StringSchema::new("Path to encryption key."))
1879 .optional(
1880 "verbose",
1881 BooleanSchema::new("Verbose output.").default(false)
1882 )
1883 ))
1884 .arg_param(vec!["snapshot", "archive-name", "target"])
1885 .completion_cb("repository", complete_repository)
1886 .completion_cb("snapshot", complete_group_or_snapshot)
1887 .completion_cb("archive-name", complete_archive_name)
1888 .completion_cb("target", tools::complete_file_name);
1889
1890 let files_cmd_def = CliCommand::new(
1891 ApiMethod::new(
1892 list_snapshot_files,
1893 ObjectSchema::new("List snapshot files.")
1894 .required("snapshot", StringSchema::new("Snapshot path."))
1895 .optional("repository", REPO_URL_SCHEMA.clone())
1896 .optional("output-format", OUTPUT_FORMAT.clone())
1897 ))
1898 .arg_param(vec!["snapshot"])
1899 .completion_cb("repository", complete_repository)
1900 .completion_cb("snapshot", complete_backup_snapshot);
1901
1902 let catalog_cmd_def = CliCommand::new(
1903 ApiMethod::new(
1904 dump_catalog,
1905 ObjectSchema::new("Dump catalog.")
1906 .required("snapshot", StringSchema::new("Snapshot path."))
1907 .optional("repository", REPO_URL_SCHEMA.clone())
1908 ))
1909 .arg_param(vec!["snapshot"])
1910 .completion_cb("repository", complete_repository)
1911 .completion_cb("snapshot", complete_backup_snapshot);
1912
1913 let prune_cmd_def = CliCommand::new(
1914 ApiMethod::new(
1915 prune,
1916 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1917 ObjectSchema::new("Prune backup repository.")
1918 .required("group", StringSchema::new("Backup group."))
1919 .optional("repository", REPO_URL_SCHEMA.clone())
1920 )
1921 ))
1922 .arg_param(vec!["group"])
1923 .completion_cb("group", complete_backup_group)
1924 .completion_cb("repository", complete_repository);
1925
1926 let status_cmd_def = CliCommand::new(
1927 ApiMethod::new(
1928 status,
1929 ObjectSchema::new("Get repository status.")
1930 .optional("repository", REPO_URL_SCHEMA.clone())
1931 .optional("output-format", OUTPUT_FORMAT.clone())
1932 ))
1933 .completion_cb("repository", complete_repository);
1934
1935 let login_cmd_def = CliCommand::new(
1936 ApiMethod::new(
1937 api_login,
1938 ObjectSchema::new("Try to login. If successful, store ticket.")
1939 .optional("repository", REPO_URL_SCHEMA.clone())
1940 ))
1941 .completion_cb("repository", complete_repository);
1942
1943 let logout_cmd_def = CliCommand::new(
1944 ApiMethod::new(
1945 api_logout,
1946 ObjectSchema::new("Logout (delete stored ticket).")
1947 .optional("repository", REPO_URL_SCHEMA.clone())
1948 ))
1949 .completion_cb("repository", complete_repository);
1950
1951 let mount_cmd_def = CliCommand::new(
1952 ApiMethod::new(
1953 mount,
1954 ObjectSchema::new("Mount pxar archive.")
1955 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1956 .required("archive-name", StringSchema::new("Backup archive name."))
1957 .required("target", StringSchema::new("Target directory path."))
1958 .optional("repository", REPO_URL_SCHEMA.clone())
1959 .optional("keyfile", StringSchema::new("Path to encryption key."))
1960 .optional("verbose", BooleanSchema::new("Verbose output.").default(false))
1961 ))
1962 .arg_param(vec!["snapshot", "archive-name", "target"])
1963 .completion_cb("repository", complete_repository)
1964 .completion_cb("snapshot", complete_group_or_snapshot)
1965 .completion_cb("archive-name", complete_archive_name)
1966 .completion_cb("target", tools::complete_file_name);
1967
1968 let cmd_def = CliCommandMap::new()
1969 .insert("backup".to_owned(), backup_cmd_def.into())
1970 .insert("upload-log".to_owned(), upload_log_cmd_def.into())
1971 .insert("forget".to_owned(), forget_cmd_def.into())
1972 .insert("catalog".to_owned(), catalog_cmd_def.into())
1973 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
1974 .insert("list".to_owned(), list_cmd_def.into())
1975 .insert("login".to_owned(), login_cmd_def.into())
1976 .insert("logout".to_owned(), logout_cmd_def.into())
1977 .insert("prune".to_owned(), prune_cmd_def.into())
1978 .insert("restore".to_owned(), restore_cmd_def.into())
1979 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
1980 .insert("files".to_owned(), files_cmd_def.into())
1981 .insert("status".to_owned(), status_cmd_def.into())
1982 .insert("key".to_owned(), key_mgmt_cli().into())
1983 .insert("mount".to_owned(), mount_cmd_def.into());
1984
1985 run_cli_command(cmd_def.into());
1986 }
1987
1988 fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
1989 let rt = tokio::runtime::Runtime::new().unwrap();
1990 let ret = rt.block_on(fut);
1991 rt.shutdown_now();
1992 ret
1993 }