]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
Revert previous, commit, use UTC RFC3339 without timezone (Z)
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 //#[macro_use]
2 extern crate proxmox_backup;
3
4 use failure::*;
5 //use std::os::unix::io::AsRawFd;
6 use chrono::{Local, Utc, TimeZone};
7 use std::path::{Path, PathBuf};
8 use std::collections::HashMap;
9 use std::io::Write;
10
11 use proxmox_backup::tools;
12 use proxmox_backup::cli::*;
13 use proxmox_backup::api_schema::*;
14 use proxmox_backup::api_schema::router::*;
15 use proxmox_backup::client::*;
16 use proxmox_backup::backup::*;
17 use proxmox_backup::pxar;
18
19 //use proxmox_backup::backup::image_index::*;
20 //use proxmox_backup::config::datastore;
21 //use proxmox_backup::pxar::encoder::*;
22 //use proxmox_backup::backup::datastore::*;
23
24 use serde_json::{json, Value};
25 //use hyper::Body;
26 use std::sync::Arc;
27 use regex::Regex;
28 use xdg::BaseDirectories;
29
30 use lazy_static::lazy_static;
31 use futures::*;
32 use tokio::sync::mpsc;
33
34 lazy_static! {
35 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf)):(.+)$").unwrap();
36
37 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
38 StringSchema::new("Repository URL.")
39 .format(BACKUP_REPO_URL.clone())
40 .max_length(256)
41 .into()
42 );
43 }
44
45
46 fn get_default_repository() -> Option<String> {
47 std::env::var("PBS_REPOSITORY").ok()
48 }
49
50 fn extract_repository_from_value(
51 param: &Value,
52 ) -> Result<BackupRepository, Error> {
53
54 let repo_url = param["repository"]
55 .as_str()
56 .map(String::from)
57 .or_else(get_default_repository)
58 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
59
60 let repo: BackupRepository = repo_url.parse()?;
61
62 Ok(repo)
63 }
64
65 fn extract_repository_from_map(
66 param: &HashMap<String, String>,
67 ) -> Option<BackupRepository> {
68
69 param.get("repository")
70 .map(String::from)
71 .or_else(get_default_repository)
72 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
73 }
74
75 fn record_repository(repo: &BackupRepository) {
76
77 let base = match BaseDirectories::with_prefix("proxmox-backup") {
78 Ok(v) => v,
79 _ => return,
80 };
81
82 // usually $HOME/.cache/proxmox-backup/repo-list
83 let path = match base.place_cache_file("repo-list") {
84 Ok(v) => v,
85 _ => return,
86 };
87
88 let mut data = tools::file_get_json(&path, None).unwrap_or(json!({}));
89
90 let repo = repo.to_string();
91
92 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
93
94 let mut map = serde_json::map::Map::new();
95
96 loop {
97 let mut max_used = 0;
98 let mut max_repo = None;
99 for (repo, count) in data.as_object().unwrap() {
100 if map.contains_key(repo) { continue; }
101 if let Some(count) = count.as_i64() {
102 if count > max_used {
103 max_used = count;
104 max_repo = Some(repo);
105 }
106 }
107 }
108 if let Some(repo) = max_repo {
109 map.insert(repo.to_owned(), json!(max_used));
110 } else {
111 break;
112 }
113 if map.len() > 10 { // store max. 10 repos
114 break;
115 }
116 }
117
118 let new_data = json!(map);
119
120 let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
121 }
122
123 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
124
125 let mut result = vec![];
126
127 let base = match BaseDirectories::with_prefix("proxmox-backup") {
128 Ok(v) => v,
129 _ => return result,
130 };
131
132 // usually $HOME/.cache/proxmox-backup/repo-list
133 let path = match base.place_cache_file("repo-list") {
134 Ok(v) => v,
135 _ => return result,
136 };
137
138 let data = tools::file_get_json(&path, None).unwrap_or(json!({}));
139
140 if let Some(map) = data.as_object() {
141 for (repo, _count) in map {
142 result.push(repo.to_owned());
143 }
144 }
145
146 result
147 }
148
149 fn backup_directory<P: AsRef<Path>>(
150 client: &BackupClient,
151 dir_path: P,
152 archive_name: &str,
153 chunk_size: Option<usize>,
154 all_file_systems: bool,
155 verbose: bool,
156 crypt_config: Option<Arc<CryptConfig>>,
157 ) -> Result<(), Error> {
158
159 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?;
160 let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
161
162 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
163
164 let stream = rx
165 .map_err(Error::from)
166 .and_then(|x| x); // flatten
167
168 // spawn chunker inside a separate task so that it can run parallel
169 tokio::spawn(
170 tx.send_all(chunk_stream.then(|r| Ok(r)))
171 .map_err(|_| {}).map(|_| ())
172 );
173
174 client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
175
176 Ok(())
177 }
178
179 fn backup_image<P: AsRef<Path>>(
180 client: &BackupClient,
181 image_path: P,
182 archive_name: &str,
183 image_size: u64,
184 chunk_size: Option<usize>,
185 _verbose: bool,
186 crypt_config: Option<Arc<CryptConfig>>,
187 ) -> Result<(), Error> {
188
189 let path = image_path.as_ref().to_owned();
190
191 let file = tokio::fs::File::open(path).wait()?;
192
193 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
194 .map_err(Error::from);
195
196 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
197
198 client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
199
200 Ok(())
201 }
202
203 fn strip_server_file_expenstions(list: Vec<String>) -> Vec<String> {
204
205 let mut result = vec![];
206
207 for file in list.into_iter() {
208 if file.ends_with(".didx") {
209 result.push(file[..file.len()-5].to_owned());
210 } else if file.ends_with(".fidx") {
211 result.push(file[..file.len()-5].to_owned());
212 } else if file.ends_with(".blob") {
213 result.push(file[..file.len()-5].to_owned());
214 } else {
215 result.push(file); // should not happen
216 }
217 }
218
219 result
220 }
221
222 fn list_backup_groups(
223 param: Value,
224 _info: &ApiMethod,
225 _rpcenv: &mut dyn RpcEnvironment,
226 ) -> Result<Value, Error> {
227
228 let repo = extract_repository_from_value(&param)?;
229
230 let client = HttpClient::new(repo.host(), repo.user())?;
231
232 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
233
234 let mut result = client.get(&path, None).wait()?;
235
236 record_repository(&repo);
237
238 // fixme: implement and use output formatter instead ..
239 let list = result["data"].as_array_mut().unwrap();
240
241 list.sort_unstable_by(|a, b| {
242 let a_id = a["backup-id"].as_str().unwrap();
243 let a_backup_type = a["backup-type"].as_str().unwrap();
244 let b_id = b["backup-id"].as_str().unwrap();
245 let b_backup_type = b["backup-type"].as_str().unwrap();
246
247 let type_order = a_backup_type.cmp(b_backup_type);
248 if type_order == std::cmp::Ordering::Equal {
249 a_id.cmp(b_id)
250 } else {
251 type_order
252 }
253 });
254
255 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
256
257 let mut result = vec![];
258
259 for item in list {
260
261 let id = item["backup-id"].as_str().unwrap();
262 let btype = item["backup-type"].as_str().unwrap();
263 let epoch = item["last-backup"].as_i64().unwrap();
264 let last_backup = Utc.timestamp(epoch, 0);
265 let backup_count = item["backup-count"].as_u64().unwrap();
266
267 let group = BackupGroup::new(btype, id);
268
269 let path = group.group_path().to_str().unwrap().to_owned();
270
271 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
272 let files = strip_server_file_expenstions(files);
273
274 if output_format == "text" {
275 println!(
276 "{:20} | {} | {:5} | {}",
277 path,
278 BackupDir::backup_time_to_string(last_backup),
279 backup_count,
280 tools::join(&files, ' '),
281 );
282 } else {
283 result.push(json!({
284 "backup-type": btype,
285 "backup-id": id,
286 "last-backup": epoch,
287 "backup-count": backup_count,
288 "files": files,
289 }));
290 }
291 }
292
293 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
294
295 Ok(Value::Null)
296 }
297
298 fn list_snapshots(
299 param: Value,
300 _info: &ApiMethod,
301 _rpcenv: &mut dyn RpcEnvironment,
302 ) -> Result<Value, Error> {
303
304 let repo = extract_repository_from_value(&param)?;
305
306 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
307
308 let client = HttpClient::new(repo.host(), repo.user())?;
309
310 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
311
312 let mut args = json!({});
313 if let Some(path) = param["group"].as_str() {
314 let group = BackupGroup::parse(path)?;
315 args["backup-type"] = group.backup_type().into();
316 args["backup-id"] = group.backup_id().into();
317 }
318
319 let result = client.get(&path, Some(args)).wait()?;
320
321 record_repository(&repo);
322
323 let list = result["data"].as_array().unwrap();
324
325 let mut result = vec![];
326
327 for item in list {
328
329 let id = item["backup-id"].as_str().unwrap();
330 let btype = item["backup-type"].as_str().unwrap();
331 let epoch = item["backup-time"].as_i64().unwrap();
332
333 let snapshot = BackupDir::new(btype, id, epoch);
334
335 let path = snapshot.relative_path().to_str().unwrap().to_owned();
336
337 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
338 let files = strip_server_file_expenstions(files);
339
340 if output_format == "text" {
341 println!("{} | {}", path, tools::join(&files, ' '));
342 } else {
343 result.push(json!({
344 "backup-type": btype,
345 "backup-id": id,
346 "backup-time": epoch,
347 "files": files,
348 }));
349 }
350 }
351
352 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
353
354 Ok(Value::Null)
355 }
356
357 fn forget_snapshots(
358 param: Value,
359 _info: &ApiMethod,
360 _rpcenv: &mut dyn RpcEnvironment,
361 ) -> Result<Value, Error> {
362
363 let repo = extract_repository_from_value(&param)?;
364
365 let path = tools::required_string_param(&param, "snapshot")?;
366 let snapshot = BackupDir::parse(path)?;
367
368 let mut client = HttpClient::new(repo.host(), repo.user())?;
369
370 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
371
372 let result = client.delete(&path, Some(json!({
373 "backup-type": snapshot.group().backup_type(),
374 "backup-id": snapshot.group().backup_id(),
375 "backup-time": snapshot.backup_time().timestamp(),
376 }))).wait()?;
377
378 record_repository(&repo);
379
380 Ok(result)
381 }
382
383 fn start_garbage_collection(
384 param: Value,
385 _info: &ApiMethod,
386 _rpcenv: &mut dyn RpcEnvironment,
387 ) -> Result<Value, Error> {
388
389 let repo = extract_repository_from_value(&param)?;
390
391 let mut client = HttpClient::new(repo.host(), repo.user())?;
392
393 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
394
395 let result = client.post(&path, None).wait()?;
396
397 record_repository(&repo);
398
399 Ok(result)
400 }
401
402 fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
403
404 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
405 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
406 }
407 bail!("unable to parse directory specification '{}'", value);
408 }
409
410 fn create_backup(
411 param: Value,
412 _info: &ApiMethod,
413 _rpcenv: &mut dyn RpcEnvironment,
414 ) -> Result<Value, Error> {
415
416 let repo = extract_repository_from_value(&param)?;
417
418 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
419
420 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
421
422 let verbose = param["verbose"].as_bool().unwrap_or(false);
423
424 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
425
426 if let Some(size) = chunk_size_opt {
427 verify_chunk_size(size)?;
428 }
429
430 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
431
432 let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
433
434 let mut upload_list = vec![];
435
436 enum BackupType { PXAR, IMAGE, CONFIG };
437
438 for backupspec in backupspec_list {
439 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
440
441 use std::os::unix::fs::FileTypeExt;
442
443 let metadata = match std::fs::metadata(filename) {
444 Ok(m) => m,
445 Err(err) => bail!("unable to access '{}' - {}", filename, err),
446 };
447 let file_type = metadata.file_type();
448
449 let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
450
451 match extension {
452 "pxar" => {
453 if !file_type.is_dir() {
454 bail!("got unexpected file type (expected directory)");
455 }
456 upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
457 }
458 "img" => {
459
460 if !(file_type.is_file() || file_type.is_block_device()) {
461 bail!("got unexpected file type (expected file or block device)");
462 }
463
464 let size = tools::image_size(&PathBuf::from(filename))?;
465
466 if size == 0 { bail!("got zero-sized file '{}'", filename); }
467
468 upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
469 }
470 "conf" => {
471 if !file_type.is_file() {
472 bail!("got unexpected file type (expected regular file)");
473 }
474 upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
475 }
476 _ => {
477 bail!("got unknown archive extension '{}'", extension);
478 }
479 }
480 }
481
482 let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
483
484 let client = HttpClient::new(repo.host(), repo.user())?;
485 record_repository(&repo);
486
487 println!("Starting backup");
488 println!("Client name: {}", tools::nodename());
489 println!("Start Time: {}", backup_time.to_rfc3339());
490
491 let (crypt_config, rsa_encrypted_key) = match keyfile {
492 None => (None, None),
493 Some(path) => {
494 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
495
496 let crypt_config = CryptConfig::new(key)?;
497
498 let path = master_pubkey_path()?;
499 if path.exists() {
500 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
501 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
502 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
503 (Some(Arc::new(crypt_config)), Some(enc_key))
504 } else {
505 (Some(Arc::new(crypt_config)), None)
506 }
507 }
508 };
509
510 let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
511
512 for (backup_type, filename, target, size) in upload_list {
513 match backup_type {
514 BackupType::CONFIG => {
515 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
516 client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
517 }
518 BackupType::PXAR => {
519 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
520 backup_directory(
521 &client,
522 &filename,
523 &target,
524 chunk_size_opt,
525 all_file_systems,
526 verbose,
527 crypt_config.clone(),
528 )?;
529 }
530 BackupType::IMAGE => {
531 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
532 backup_image(
533 &client,
534 &filename,
535 &target,
536 size,
537 chunk_size_opt,
538 verbose,
539 crypt_config.clone(),
540 )?;
541 }
542 }
543 }
544
545 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
546 let target = "rsa-encrypted.key";
547 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
548 client.upload_blob_from_data(rsa_encrypted_key, target, None, false).wait()?;
549
550 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
551 /*
552 let mut buffer2 = vec![0u8; rsa.size() as usize];
553 let pem_data = proxmox_backup::tools::file_get_contents("master-private.pem")?;
554 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
555 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
556 println!("TEST {} {:?}", len, buffer2);
557 */
558 }
559
560 client.finish().wait()?;
561
562 let end_time = Utc.timestamp(Utc::now().timestamp(), 0);
563 let elapsed = end_time.signed_duration_since(backup_time);
564 println!("Duration: {}", elapsed);
565
566 println!("End Time: {}", end_time.to_rfc3339());
567
568 Ok(Value::Null)
569 }
570
571 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
572
573 let mut result = vec![];
574
575 let data: Vec<&str> = arg.splitn(2, ':').collect();
576
577 if data.len() != 2 {
578 result.push(String::from("root.pxar:/"));
579 result.push(String::from("etc.pxar:/etc"));
580 return result;
581 }
582
583 let files = tools::complete_file_name(data[1], param);
584
585 for file in files {
586 result.push(format!("{}:{}", data[0], file));
587 }
588
589 result
590 }
591
592 fn restore(
593 param: Value,
594 _info: &ApiMethod,
595 _rpcenv: &mut dyn RpcEnvironment,
596 ) -> Result<Value, Error> {
597
598 let repo = extract_repository_from_value(&param)?;
599
600 let verbose = param["verbose"].as_bool().unwrap_or(false);
601
602 let archive_name = tools::required_string_param(&param, "archive-name")?;
603
604 let client = HttpClient::new(repo.host(), repo.user())?;
605
606 record_repository(&repo);
607
608 let path = tools::required_string_param(&param, "snapshot")?;
609
610 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
611 let group = BackupGroup::parse(path)?;
612
613 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
614 let result = client.get(&path, Some(json!({
615 "backup-type": group.backup_type(),
616 "backup-id": group.backup_id(),
617 }))).wait()?;
618
619 let list = result["data"].as_array().unwrap();
620 if list.len() == 0 {
621 bail!("backup group '{}' does not contain any snapshots:", path);
622 }
623
624 let epoch = list[0]["backup-time"].as_i64().unwrap();
625 let backup_time = Utc.timestamp(epoch, 0);
626 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
627 } else {
628 let snapshot = BackupDir::parse(path)?;
629 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
630 };
631
632 let target = tools::required_string_param(&param, "target")?;
633 let target = if target == "-" { None } else { Some(target) };
634
635 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
636
637 let crypt_config = match keyfile {
638 None => None,
639 Some(path) => {
640 let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
641 Some(Arc::new(CryptConfig::new(key)?))
642 }
643 };
644
645 let server_archive_name = if archive_name.ends_with(".pxar") {
646 format!("{}.didx", archive_name)
647 } else if archive_name.ends_with(".img") {
648 format!("{}.fidx", archive_name)
649 } else {
650 format!("{}.blob", archive_name)
651 };
652
653 let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
654
655 use std::os::unix::fs::OpenOptionsExt;
656
657 let tmpfile = std::fs::OpenOptions::new()
658 .write(true)
659 .read(true)
660 .custom_flags(libc::O_TMPFILE)
661 .open("/tmp")?;
662
663 if server_archive_name.ends_with(".blob") {
664
665 let writer = Vec::with_capacity(1024*1024);
666 let blob_data = client.download(&server_archive_name, writer).wait()?;
667 let blob = DataBlob::from_raw(blob_data)?;
668 blob.verify_crc()?;
669
670 let raw_data = match crypt_config {
671 Some(ref crypt_config) => blob.decode(Some(crypt_config))?,
672 None => blob.decode(None)?,
673 };
674
675 if let Some(target) = target {
676 crate::tools::file_set_contents(target, &raw_data, None)?;
677 } else {
678 let stdout = std::io::stdout();
679 let mut writer = stdout.lock();
680 writer.write_all(&raw_data)
681 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
682 }
683
684 } else if server_archive_name.ends_with(".didx") {
685 let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
686
687 let index = DynamicIndexReader::new(tmpfile)
688 .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
689
690 let most_used = index.find_most_used_chunks(8);
691
692 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
693
694 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
695
696 if let Some(target) = target {
697
698 let feature_flags = pxar::CA_FORMAT_DEFAULT;
699 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
700 if verbose {
701 println!("{:?}", path);
702 }
703 Ok(())
704 });
705
706 decoder.restore(Path::new(target), &Vec::new())?;
707 } else {
708 let stdout = std::io::stdout();
709 let mut writer = stdout.lock();
710
711 std::io::copy(&mut reader, &mut writer)
712 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
713 }
714 } else if server_archive_name.ends_with(".fidx") {
715 let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
716
717 let index = FixedIndexReader::new(tmpfile)
718 .map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
719
720 let most_used = index.find_most_used_chunks(8);
721
722 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
723
724 let mut reader = BufferedFixedReader::new(index, chunk_reader);
725
726 if let Some(target) = target {
727 let mut writer = std::fs::OpenOptions::new()
728 .write(true)
729 .create(true)
730 .create_new(true)
731 .open(target)
732 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
733
734 std::io::copy(&mut reader, &mut writer)
735 .map_err(|err| format_err!("unable to store data - {}", err))?;
736 } else {
737 let stdout = std::io::stdout();
738 let mut writer = stdout.lock();
739
740 std::io::copy(&mut reader, &mut writer)
741 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
742 }
743 } else {
744 bail!("unknown archive file extension (expected .pxar of .img)");
745 }
746
747 Ok(Value::Null)
748 }
749
750 fn prune(
751 mut param: Value,
752 _info: &ApiMethod,
753 _rpcenv: &mut dyn RpcEnvironment,
754 ) -> Result<Value, Error> {
755
756 let repo = extract_repository_from_value(&param)?;
757
758 let mut client = HttpClient::new(repo.host(), repo.user())?;
759
760 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
761
762 param.as_object_mut().unwrap().remove("repository");
763
764 let result = client.post(&path, Some(param)).wait()?;
765
766 record_repository(&repo);
767
768 Ok(result)
769 }
770
771 fn status(
772 param: Value,
773 _info: &ApiMethod,
774 _rpcenv: &mut dyn RpcEnvironment,
775 ) -> Result<Value, Error> {
776
777 let repo = extract_repository_from_value(&param)?;
778
779 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
780
781 let client = HttpClient::new(repo.host(), repo.user())?;
782
783 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
784
785 let result = client.get(&path, None).wait()?;
786 let data = &result["data"];
787
788 record_repository(&repo);
789
790 if output_format == "text" {
791 let total = data["total"].as_u64().unwrap();
792 let used = data["used"].as_u64().unwrap();
793 let avail = data["avail"].as_u64().unwrap();
794 let roundup = total/200;
795
796 println!(
797 "total: {} used: {} ({} %) available: {}",
798 total,
799 used,
800 ((used+roundup)*100)/total,
801 avail,
802 );
803 } else {
804 format_and_print_result(data, &output_format);
805 }
806
807 Ok(Value::Null)
808 }
809
810 // like get, but simply ignore errors and return Null instead
811 fn try_get(repo: &BackupRepository, url: &str) -> Value {
812
813 let client = match HttpClient::new(repo.host(), repo.user()) {
814 Ok(v) => v,
815 _ => return Value::Null,
816 };
817
818 let mut resp = match client.get(url, None).wait() {
819 Ok(v) => v,
820 _ => return Value::Null,
821 };
822
823 if let Some(map) = resp.as_object_mut() {
824 if let Some(data) = map.remove("data") {
825 return data;
826 }
827 }
828 Value::Null
829 }
830
831 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
832
833 let mut result = vec![];
834
835 let repo = match extract_repository_from_map(param) {
836 Some(v) => v,
837 _ => return result,
838 };
839
840 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
841
842 let data = try_get(&repo, &path);
843
844 if let Some(list) = data.as_array() {
845 for item in list {
846 if let (Some(backup_id), Some(backup_type)) =
847 (item["backup-id"].as_str(), item["backup-type"].as_str())
848 {
849 result.push(format!("{}/{}", backup_type, backup_id));
850 }
851 }
852 }
853
854 result
855 }
856
857 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
858
859 let mut result = vec![];
860
861 let repo = match extract_repository_from_map(param) {
862 Some(v) => v,
863 _ => return result,
864 };
865
866 if arg.matches('/').count() < 2 {
867 let groups = complete_backup_group(arg, param);
868 for group in groups {
869 result.push(group.to_string());
870 result.push(format!("{}/", group));
871 }
872 return result;
873 }
874
875 let mut parts = arg.split('/');
876 let query = tools::json_object_to_query(json!({
877 "backup-type": parts.next().unwrap(),
878 "backup-id": parts.next().unwrap(),
879 })).unwrap();
880
881 let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
882
883 let data = try_get(&repo, &path);
884
885 if let Some(list) = data.as_array() {
886 for item in list {
887 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
888 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
889 {
890 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
891 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
892 }
893 }
894 }
895
896 result
897 }
898
899 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
900
901 let mut result = vec![];
902
903 let repo = match extract_repository_from_map(param) {
904 Some(v) => v,
905 _ => return result,
906 };
907
908 let snapshot = match param.get("snapshot") {
909 Some(path) => {
910 match BackupDir::parse(path) {
911 Ok(v) => v,
912 _ => return result,
913 }
914 }
915 _ => return result,
916 };
917
918 let query = tools::json_object_to_query(json!({
919 "backup-type": snapshot.group().backup_type(),
920 "backup-id": snapshot.group().backup_id(),
921 "backup-time": snapshot.backup_time().timestamp(),
922 })).unwrap();
923
924 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
925
926 let data = try_get(&repo, &path);
927
928 if let Some(list) = data.as_array() {
929 for item in list {
930 if let Some(filename) = item.as_str() {
931 result.push(filename.to_owned());
932 }
933 }
934 }
935
936 result
937 }
938
939 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
940
941 let result = complete_server_file_name(arg, param);
942
943 strip_server_file_expenstions(result)
944 }
945
946 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
947
948 let mut result = vec![];
949
950 let mut size = 64;
951 loop {
952 result.push(size.to_string());
953 size = size * 2;
954 if size > 4096 { break; }
955 }
956
957 result
958 }
959
960 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
961
962 // fixme: implement other input methods
963
964 use std::env::VarError::*;
965 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
966 Ok(p) => return Ok(p.as_bytes().to_vec()),
967 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
968 Err(NotPresent) => {
969 // Try another method
970 }
971 }
972
973 // If we're on a TTY, query the user for a password
974 if crate::tools::tty::stdin_isatty() {
975 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
976 }
977
978 bail!("no password input mechanism available");
979 }
980
981 fn key_create(
982 param: Value,
983 _info: &ApiMethod,
984 _rpcenv: &mut dyn RpcEnvironment,
985 ) -> Result<Value, Error> {
986
987 let path = tools::required_string_param(&param, "path")?;
988 let path = PathBuf::from(path);
989
990 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
991
992 let key = proxmox::sys::linux::random_data(32)?;
993
994 if kdf == "scrypt" {
995 // always read passphrase from tty
996 if !crate::tools::tty::stdin_isatty() {
997 bail!("unable to read passphrase - no tty");
998 }
999
1000 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
1001
1002 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1003
1004 store_key_config(&path, false, key_config)?;
1005
1006 Ok(Value::Null)
1007 } else if kdf == "none" {
1008 let created = Local.timestamp(Local::now().timestamp(), 0);
1009
1010 store_key_config(&path, false, KeyConfig {
1011 kdf: None,
1012 created,
1013 modified: created,
1014 data: key,
1015 })?;
1016
1017 Ok(Value::Null)
1018 } else {
1019 unreachable!();
1020 }
1021 }
1022
1023 fn master_pubkey_path() -> Result<PathBuf, Error> {
1024 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1025
1026 // usually $HOME/.config/proxmox-backup/master-public.pem
1027 let path = base.place_config_file("master-public.pem")?;
1028
1029 Ok(path)
1030 }
1031
1032 fn key_import_master_pubkey(
1033 param: Value,
1034 _info: &ApiMethod,
1035 _rpcenv: &mut dyn RpcEnvironment,
1036 ) -> Result<Value, Error> {
1037
1038 let path = tools::required_string_param(&param, "path")?;
1039 let path = PathBuf::from(path);
1040
1041 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
1042
1043 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1044 bail!("Unable to decode PEM data - {}", err);
1045 }
1046
1047 let target_path = master_pubkey_path()?;
1048
1049 proxmox_backup::tools::file_set_contents(&target_path, &pem_data, None)?;
1050
1051 println!("Imported public master key to {:?}", target_path);
1052
1053 Ok(Value::Null)
1054 }
1055
1056 fn key_create_master_key(
1057 _param: Value,
1058 _info: &ApiMethod,
1059 _rpcenv: &mut dyn RpcEnvironment,
1060 ) -> Result<Value, Error> {
1061
1062 // we need a TTY to query the new password
1063 if !crate::tools::tty::stdin_isatty() {
1064 bail!("unable to create master key - no tty");
1065 }
1066
1067 let rsa = openssl::rsa::Rsa::generate(4096)?;
1068 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1069
1070 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1071 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1072
1073 if new_pw != verify_pw {
1074 bail!("Password verification fail!");
1075 }
1076
1077 if new_pw.len() < 5 {
1078 bail!("Password is too short!");
1079 }
1080
1081 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1082 let filename_pub = "master-public.pem";
1083 println!("Writing public master key to {}", filename_pub);
1084 proxmox_backup::tools::file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1085
1086 let cipher = openssl::symm::Cipher::aes_256_cbc();
1087 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1088
1089 let filename_priv = "master-private.pem";
1090 println!("Writing private master key to {}", filename_priv);
1091 proxmox_backup::tools::file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1092
1093 Ok(Value::Null)
1094 }
1095
1096 fn key_change_passphrase(
1097 param: Value,
1098 _info: &ApiMethod,
1099 _rpcenv: &mut dyn RpcEnvironment,
1100 ) -> Result<Value, Error> {
1101
1102 let path = tools::required_string_param(&param, "path")?;
1103 let path = PathBuf::from(path);
1104
1105 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1106
1107 // we need a TTY to query the new password
1108 if !crate::tools::tty::stdin_isatty() {
1109 bail!("unable to change passphrase - no tty");
1110 }
1111
1112 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
1113
1114 if kdf == "scrypt" {
1115
1116 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1117 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1118
1119 if new_pw != verify_pw {
1120 bail!("Password verification fail!");
1121 }
1122
1123 if new_pw.len() < 5 {
1124 bail!("Password is too short!");
1125 }
1126
1127 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1128 new_key_config.created = created; // keep original value
1129
1130 store_key_config(&path, true, new_key_config)?;
1131
1132 Ok(Value::Null)
1133 } else if kdf == "none" {
1134 let modified = Local.timestamp(Local::now().timestamp(), 0);
1135
1136 store_key_config(&path, true, KeyConfig {
1137 kdf: None,
1138 created, // keep original value
1139 modified,
1140 data: key.to_vec(),
1141 })?;
1142
1143 Ok(Value::Null)
1144 } else {
1145 unreachable!();
1146 }
1147 }
1148
1149 fn key_mgmt_cli() -> CliCommandMap {
1150
1151 let kdf_schema: Arc<Schema> = Arc::new(
1152 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1153 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
1154 .default("scrypt")
1155 .into()
1156 );
1157
1158 let key_create_cmd_def = CliCommand::new(
1159 ApiMethod::new(
1160 key_create,
1161 ObjectSchema::new("Create a new encryption key.")
1162 .required("path", StringSchema::new("File system path."))
1163 .optional("kdf", kdf_schema.clone())
1164 ))
1165 .arg_param(vec!["path"])
1166 .completion_cb("path", tools::complete_file_name);
1167
1168 let key_change_passphrase_cmd_def = CliCommand::new(
1169 ApiMethod::new(
1170 key_change_passphrase,
1171 ObjectSchema::new("Change the passphrase required to decrypt the key.")
1172 .required("path", StringSchema::new("File system path."))
1173 .optional("kdf", kdf_schema.clone())
1174 ))
1175 .arg_param(vec!["path"])
1176 .completion_cb("path", tools::complete_file_name);
1177
1178 let key_create_master_key_cmd_def = CliCommand::new(
1179 ApiMethod::new(
1180 key_create_master_key,
1181 ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
1182 ));
1183
1184 let key_import_master_pubkey_cmd_def = CliCommand::new(
1185 ApiMethod::new(
1186 key_import_master_pubkey,
1187 ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
1188 .required("path", StringSchema::new("File system path."))
1189 ))
1190 .arg_param(vec!["path"])
1191 .completion_cb("path", tools::complete_file_name);
1192
1193 let cmd_def = CliCommandMap::new()
1194 .insert("create".to_owned(), key_create_cmd_def.into())
1195 .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
1196 .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
1197 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
1198
1199 cmd_def
1200 }
1201
1202 fn main() {
1203
1204 let backup_source_schema: Arc<Schema> = Arc::new(
1205 StringSchema::new("Backup source specification ([<label>:<path>]).")
1206 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1207 .into()
1208 );
1209
1210 let backup_cmd_def = CliCommand::new(
1211 ApiMethod::new(
1212 create_backup,
1213 ObjectSchema::new("Create (host) backup.")
1214 .required(
1215 "backupspec",
1216 ArraySchema::new(
1217 "List of backup source specifications ([<label.ext>:<path>] ...)",
1218 backup_source_schema,
1219 ).min_length(1)
1220 )
1221 .optional("repository", REPO_URL_SCHEMA.clone())
1222 .optional(
1223 "keyfile",
1224 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1225 .optional(
1226 "verbose",
1227 BooleanSchema::new("Verbose output.").default(false))
1228 .optional(
1229 "host-id",
1230 StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
1231 .optional(
1232 "chunk-size",
1233 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
1234 .minimum(64)
1235 .maximum(4096)
1236 .default(4096)
1237 )
1238 ))
1239 .arg_param(vec!["backupspec"])
1240 .completion_cb("repository", complete_repository)
1241 .completion_cb("backupspec", complete_backup_source)
1242 .completion_cb("keyfile", tools::complete_file_name)
1243 .completion_cb("chunk-size", complete_chunk_size);
1244
1245 let list_cmd_def = CliCommand::new(
1246 ApiMethod::new(
1247 list_backup_groups,
1248 ObjectSchema::new("List backup groups.")
1249 .optional("repository", REPO_URL_SCHEMA.clone())
1250 .optional("output-format", OUTPUT_FORMAT.clone())
1251 ))
1252 .completion_cb("repository", complete_repository);
1253
1254 let snapshots_cmd_def = CliCommand::new(
1255 ApiMethod::new(
1256 list_snapshots,
1257 ObjectSchema::new("List backup snapshots.")
1258 .optional("group", StringSchema::new("Backup group."))
1259 .optional("repository", REPO_URL_SCHEMA.clone())
1260 .optional("output-format", OUTPUT_FORMAT.clone())
1261 ))
1262 .arg_param(vec!["group"])
1263 .completion_cb("group", complete_backup_group)
1264 .completion_cb("repository", complete_repository);
1265
1266 let forget_cmd_def = CliCommand::new(
1267 ApiMethod::new(
1268 forget_snapshots,
1269 ObjectSchema::new("Forget (remove) backup snapshots.")
1270 .required("snapshot", StringSchema::new("Snapshot path."))
1271 .optional("repository", REPO_URL_SCHEMA.clone())
1272 ))
1273 .arg_param(vec!["snapshot"])
1274 .completion_cb("repository", complete_repository)
1275 .completion_cb("snapshot", complete_group_or_snapshot);
1276
1277 let garbage_collect_cmd_def = CliCommand::new(
1278 ApiMethod::new(
1279 start_garbage_collection,
1280 ObjectSchema::new("Start garbage collection for a specific repository.")
1281 .optional("repository", REPO_URL_SCHEMA.clone())
1282 ))
1283 .completion_cb("repository", complete_repository);
1284
1285 let restore_cmd_def = CliCommand::new(
1286 ApiMethod::new(
1287 restore,
1288 ObjectSchema::new("Restore backup repository.")
1289 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1290 .required("archive-name", StringSchema::new("Backup archive name."))
1291 .required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
1292
1293 We do not extraxt '.pxar' archives when writing to stdandard output.
1294
1295 "###
1296 ))
1297 .optional("repository", REPO_URL_SCHEMA.clone())
1298 .optional("keyfile", StringSchema::new("Path to encryption key."))
1299 .optional(
1300 "verbose",
1301 BooleanSchema::new("Verbose output.").default(false)
1302 )
1303 ))
1304 .arg_param(vec!["snapshot", "archive-name", "target"])
1305 .completion_cb("repository", complete_repository)
1306 .completion_cb("snapshot", complete_group_or_snapshot)
1307 .completion_cb("archive-name", complete_archive_name)
1308 .completion_cb("target", tools::complete_file_name);
1309
1310 let prune_cmd_def = CliCommand::new(
1311 ApiMethod::new(
1312 prune,
1313 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1314 ObjectSchema::new("Prune backup repository.")
1315 .optional("repository", REPO_URL_SCHEMA.clone())
1316 )
1317 ))
1318 .completion_cb("repository", complete_repository);
1319
1320 let status_cmd_def = CliCommand::new(
1321 ApiMethod::new(
1322 status,
1323 ObjectSchema::new("Get repository status.")
1324 .optional("repository", REPO_URL_SCHEMA.clone())
1325 .optional("output-format", OUTPUT_FORMAT.clone())
1326 ))
1327 .completion_cb("repository", complete_repository);
1328
1329 let cmd_def = CliCommandMap::new()
1330 .insert("backup".to_owned(), backup_cmd_def.into())
1331 .insert("forget".to_owned(), forget_cmd_def.into())
1332 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
1333 .insert("list".to_owned(), list_cmd_def.into())
1334 .insert("prune".to_owned(), prune_cmd_def.into())
1335 .insert("restore".to_owned(), restore_cmd_def.into())
1336 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
1337 .insert("status".to_owned(), status_cmd_def.into())
1338 .insert("key".to_owned(), key_mgmt_cli().into());
1339
1340 hyper::rt::run(futures::future::lazy(move || {
1341 run_cli_command(cmd_def.into());
1342 Ok(())
1343 }));
1344
1345 }