]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
src/bin/proxmox-backup-client.rs: implement download command
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 //#[macro_use]
2 extern crate proxmox_backup;
3
4 use failure::*;
5 //use std::os::unix::io::AsRawFd;
6 use chrono::{Local, TimeZone};
7 use std::path::{Path, PathBuf};
8 use std::collections::HashMap;
9
10 use proxmox_backup::tools;
11 use proxmox_backup::cli::*;
12 use proxmox_backup::api_schema::*;
13 use proxmox_backup::api_schema::router::*;
14 use proxmox_backup::client::*;
15 use proxmox_backup::backup::*;
16 //use proxmox_backup::backup::image_index::*;
17 //use proxmox_backup::config::datastore;
18 //use proxmox_backup::pxar::encoder::*;
19 //use proxmox_backup::backup::datastore::*;
20
21 use serde_json::{json, Value};
22 //use hyper::Body;
23 use std::sync::Arc;
24 use regex::Regex;
25 use xdg::BaseDirectories;
26
27 use lazy_static::lazy_static;
28 use futures::*;
29 use tokio::sync::mpsc;
30
31 lazy_static! {
32 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf)):(.+)$").unwrap();
33
34 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
35 StringSchema::new("Repository URL.")
36 .format(BACKUP_REPO_URL.clone())
37 .max_length(256)
38 .into()
39 );
40 }
41
42
43 fn record_repository(repo: &BackupRepository) {
44
45 let base = match BaseDirectories::with_prefix("proxmox-backup") {
46 Ok(v) => v,
47 _ => return,
48 };
49
50 // usually $HOME/.cache/proxmox-backup/repo-list
51 let path = match base.place_cache_file("repo-list") {
52 Ok(v) => v,
53 _ => return,
54 };
55
56 let mut data = tools::file_get_json(&path, None).unwrap_or(json!({}));
57
58 let repo = repo.to_string();
59
60 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
61
62 let mut map = serde_json::map::Map::new();
63
64 loop {
65 let mut max_used = 0;
66 let mut max_repo = None;
67 for (repo, count) in data.as_object().unwrap() {
68 if map.contains_key(repo) { continue; }
69 if let Some(count) = count.as_i64() {
70 if count > max_used {
71 max_used = count;
72 max_repo = Some(repo);
73 }
74 }
75 }
76 if let Some(repo) = max_repo {
77 map.insert(repo.to_owned(), json!(max_used));
78 } else {
79 break;
80 }
81 if map.len() > 10 { // store max. 10 repos
82 break;
83 }
84 }
85
86 let new_data = json!(map);
87
88 let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
89 }
90
91 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
92
93 let mut result = vec![];
94
95 let base = match BaseDirectories::with_prefix("proxmox-backup") {
96 Ok(v) => v,
97 _ => return result,
98 };
99
100 // usually $HOME/.cache/proxmox-backup/repo-list
101 let path = match base.place_cache_file("repo-list") {
102 Ok(v) => v,
103 _ => return result,
104 };
105
106 let data = tools::file_get_json(&path, None).unwrap_or(json!({}));
107
108 if let Some(map) = data.as_object() {
109 for (repo, _count) in map {
110 result.push(repo.to_owned());
111 }
112 }
113
114 result
115 }
116
117 fn backup_directory<P: AsRef<Path>>(
118 client: &BackupClient,
119 dir_path: P,
120 archive_name: &str,
121 chunk_size: Option<usize>,
122 all_file_systems: bool,
123 verbose: bool,
124 crypt_config: Option<Arc<CryptConfig>>,
125 ) -> Result<(), Error> {
126
127 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?;
128 let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
129
130 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
131
132 let stream = rx
133 .map_err(Error::from)
134 .and_then(|x| x); // flatten
135
136 // spawn chunker inside a separate task so that it can run parallel
137 tokio::spawn(
138 tx.send_all(chunk_stream.then(|r| Ok(r)))
139 .map_err(|_| {}).map(|_| ())
140 );
141
142 client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
143
144 Ok(())
145 }
146
147 fn backup_image<P: AsRef<Path>>(
148 client: &BackupClient,
149 image_path: P,
150 archive_name: &str,
151 image_size: u64,
152 chunk_size: Option<usize>,
153 _verbose: bool,
154 crypt_config: Option<Arc<CryptConfig>>,
155 ) -> Result<(), Error> {
156
157 let path = image_path.as_ref().to_owned();
158
159 let file = tokio::fs::File::open(path).wait()?;
160
161 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
162 .map_err(Error::from);
163
164 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
165
166 client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
167
168 Ok(())
169 }
170
171 fn strip_server_file_expenstions(list: Vec<String>) -> Vec<String> {
172
173 let mut result = vec![];
174
175 for file in list.into_iter() {
176 if file.ends_with(".didx") {
177 result.push(file[..file.len()-5].to_owned());
178 } else if file.ends_with(".fidx") {
179 result.push(file[..file.len()-5].to_owned());
180 } else if file.ends_with(".blob") {
181 result.push(file[..file.len()-5].to_owned());
182 } else {
183 result.push(file); // should not happen
184 }
185 }
186
187 result
188 }
189
190 /* not used:
191 fn list_backups(
192 param: Value,
193 _info: &ApiMethod,
194 _rpcenv: &mut dyn RpcEnvironment,
195 ) -> Result<Value, Error> {
196
197 let repo_url = tools::required_string_param(&param, "repository")?;
198 let repo: BackupRepository = repo_url.parse()?;
199
200 let mut client = HttpClient::new(repo.host(), repo.user())?;
201
202 let path = format!("api2/json/admin/datastore/{}/backups", repo.store());
203
204 let result = client.get(&path, None)?;
205
206 record_repository(&repo);
207
208 // fixme: implement and use output formatter instead ..
209 let list = result["data"].as_array().unwrap();
210
211 for item in list {
212
213 let id = item["backup-id"].as_str().unwrap();
214 let btype = item["backup-type"].as_str().unwrap();
215 let epoch = item["backup-time"].as_i64().unwrap();
216
217 let backup_dir = BackupDir::new(btype, id, epoch);
218
219 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
220 let files = strip_server_file_expenstions(files);
221
222 for filename in files {
223 let path = backup_dir.relative_path().to_str().unwrap().to_owned();
224 println!("{} | {}/{}", backup_dir.backup_time().format("%c"), path, filename);
225 }
226 }
227
228 //Ok(result)
229 Ok(Value::Null)
230 }
231 */
232
233 fn list_backup_groups(
234 param: Value,
235 _info: &ApiMethod,
236 _rpcenv: &mut dyn RpcEnvironment,
237 ) -> Result<Value, Error> {
238
239 let repo_url = tools::required_string_param(&param, "repository")?;
240 let repo: BackupRepository = repo_url.parse()?;
241
242 let client = HttpClient::new(repo.host(), repo.user())?;
243
244 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
245
246 let mut result = client.get(&path, None).wait()?;
247
248 record_repository(&repo);
249
250 // fixme: implement and use output formatter instead ..
251 let list = result["data"].as_array_mut().unwrap();
252
253 list.sort_unstable_by(|a, b| {
254 let a_id = a["backup-id"].as_str().unwrap();
255 let a_backup_type = a["backup-type"].as_str().unwrap();
256 let b_id = b["backup-id"].as_str().unwrap();
257 let b_backup_type = b["backup-type"].as_str().unwrap();
258
259 let type_order = a_backup_type.cmp(b_backup_type);
260 if type_order == std::cmp::Ordering::Equal {
261 a_id.cmp(b_id)
262 } else {
263 type_order
264 }
265 });
266
267 for item in list {
268
269 let id = item["backup-id"].as_str().unwrap();
270 let btype = item["backup-type"].as_str().unwrap();
271 let epoch = item["last-backup"].as_i64().unwrap();
272 let last_backup = Local.timestamp(epoch, 0);
273 let backup_count = item["backup-count"].as_u64().unwrap();
274
275 let group = BackupGroup::new(btype, id);
276
277 let path = group.group_path().to_str().unwrap().to_owned();
278
279 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
280 let files = strip_server_file_expenstions(files);
281
282 println!("{:20} | {} | {:5} | {}", path, last_backup.format("%c"),
283 backup_count, tools::join(&files, ' '));
284 }
285
286 //Ok(result)
287 Ok(Value::Null)
288 }
289
290 fn list_snapshots(
291 param: Value,
292 _info: &ApiMethod,
293 _rpcenv: &mut dyn RpcEnvironment,
294 ) -> Result<Value, Error> {
295
296 let repo_url = tools::required_string_param(&param, "repository")?;
297 let repo: BackupRepository = repo_url.parse()?;
298
299 let path = tools::required_string_param(&param, "group")?;
300 let group = BackupGroup::parse(path)?;
301
302 let client = HttpClient::new(repo.host(), repo.user())?;
303
304 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
305
306 let result = client.get(&path, Some(json!({
307 "backup-type": group.backup_type(),
308 "backup-id": group.backup_id(),
309 }))).wait()?;
310
311 record_repository(&repo);
312
313 // fixme: implement and use output formatter instead ..
314 let list = result["data"].as_array().unwrap();
315
316 for item in list {
317
318 let id = item["backup-id"].as_str().unwrap();
319 let btype = item["backup-type"].as_str().unwrap();
320 let epoch = item["backup-time"].as_i64().unwrap();
321
322 let snapshot = BackupDir::new(btype, id, epoch);
323
324 let path = snapshot.relative_path().to_str().unwrap().to_owned();
325
326 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
327 let files = strip_server_file_expenstions(files);
328
329 println!("{} | {} | {}", path, snapshot.backup_time().format("%c"), tools::join(&files, ' '));
330 }
331
332 Ok(Value::Null)
333 }
334
335 fn forget_snapshots(
336 param: Value,
337 _info: &ApiMethod,
338 _rpcenv: &mut dyn RpcEnvironment,
339 ) -> Result<Value, Error> {
340
341 let repo_url = tools::required_string_param(&param, "repository")?;
342 let repo: BackupRepository = repo_url.parse()?;
343
344 let path = tools::required_string_param(&param, "snapshot")?;
345 let snapshot = BackupDir::parse(path)?;
346
347 let mut client = HttpClient::new(repo.host(), repo.user())?;
348
349 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
350
351 let result = client.delete(&path, Some(json!({
352 "backup-type": snapshot.group().backup_type(),
353 "backup-id": snapshot.group().backup_id(),
354 "backup-time": snapshot.backup_time().timestamp(),
355 }))).wait()?;
356
357 record_repository(&repo);
358
359 Ok(result)
360 }
361
362 fn start_garbage_collection(
363 param: Value,
364 _info: &ApiMethod,
365 _rpcenv: &mut dyn RpcEnvironment,
366 ) -> Result<Value, Error> {
367
368 let repo_url = tools::required_string_param(&param, "repository")?;
369 let repo: BackupRepository = repo_url.parse()?;
370
371 let mut client = HttpClient::new(repo.host(), repo.user())?;
372
373 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
374
375 let result = client.post(&path, None).wait()?;
376
377 record_repository(&repo);
378
379 Ok(result)
380 }
381
382 fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
383
384 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
385 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
386 }
387 bail!("unable to parse directory specification '{}'", value);
388 }
389
390 fn create_backup(
391 param: Value,
392 _info: &ApiMethod,
393 _rpcenv: &mut dyn RpcEnvironment,
394 ) -> Result<Value, Error> {
395
396 let repo_url = tools::required_string_param(&param, "repository")?;
397
398 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
399
400 let repo: BackupRepository = repo_url.parse()?;
401
402 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
403
404 let verbose = param["verbose"].as_bool().unwrap_or(false);
405
406 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
407
408 if let Some(size) = chunk_size_opt {
409 verify_chunk_size(size)?;
410 }
411
412 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
413
414 let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
415
416 let mut upload_list = vec![];
417
418 enum BackupType { PXAR, IMAGE, CONFIG };
419
420 for backupspec in backupspec_list {
421 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
422
423 use std::os::unix::fs::FileTypeExt;
424
425 let metadata = match std::fs::metadata(filename) {
426 Ok(m) => m,
427 Err(err) => bail!("unable to access '{}' - {}", filename, err),
428 };
429 let file_type = metadata.file_type();
430
431 let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
432
433 match extension {
434 "pxar" => {
435 if !file_type.is_dir() {
436 bail!("got unexpected file type (expected directory)");
437 }
438 upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
439 }
440 "img" => {
441
442 if !(file_type.is_file() || file_type.is_block_device()) {
443 bail!("got unexpected file type (expected file or block device)");
444 }
445
446 let size = tools::image_size(&PathBuf::from(filename))?;
447
448 if size == 0 { bail!("got zero-sized file '{}'", filename); }
449
450 upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
451 }
452 "conf" => {
453 if !file_type.is_file() {
454 bail!("got unexpected file type (expected regular file)");
455 }
456 upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
457 }
458 _ => {
459 bail!("got unknown archive extension '{}'", extension);
460 }
461 }
462 }
463
464 let backup_time = Local.timestamp(Local::now().timestamp(), 0);
465
466 let client = HttpClient::new(repo.host(), repo.user())?;
467 record_repository(&repo);
468
469 println!("Starting backup");
470 println!("Client name: {}", tools::nodename());
471 println!("Start Time: {}", backup_time.to_rfc3339());
472
473 let crypt_config = match keyfile {
474 None => None,
475 Some(path) => {
476 let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
477 Some(Arc::new(CryptConfig::new(key)?))
478 }
479 };
480
481 let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
482
483 for (backup_type, filename, target, size) in upload_list {
484 match backup_type {
485 BackupType::CONFIG => {
486 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
487 client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
488 }
489 BackupType::PXAR => {
490 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
491 backup_directory(
492 &client,
493 &filename,
494 &target,
495 chunk_size_opt,
496 all_file_systems,
497 verbose,
498 crypt_config.clone(),
499 )?;
500 }
501 BackupType::IMAGE => {
502 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
503 backup_image(
504 &client,
505 &filename,
506 &target,
507 size,
508 chunk_size_opt,
509 verbose,
510 crypt_config.clone(),
511 )?;
512 }
513 }
514 }
515
516 if let Some(crypt_config) = crypt_config {
517 let path = master_pubkey_path()?;
518 if path.exists() {
519 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
520 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
521 let enc_key = crypt_config.generate_rsa_encoded_key(rsa)?;
522 let target = "rsa-encrypted.key";
523 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
524 client.upload_blob_from_data(enc_key, target, None, false).wait()?;
525
526 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
527 /*
528 let mut buffer2 = vec![0u8; rsa.size() as usize];
529 let pem_data = proxmox_backup::tools::file_get_contents("master-private.pem")?;
530 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
531 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
532 println!("TEST {} {:?}", len, buffer2);
533 */
534 }
535 }
536
537 client.finish().wait()?;
538
539 let end_time = Local.timestamp(Local::now().timestamp(), 0);
540 let elapsed = end_time.signed_duration_since(backup_time);
541 println!("Duration: {}", elapsed);
542
543 println!("End Time: {}", end_time.to_rfc3339());
544
545 Ok(Value::Null)
546 }
547
548 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
549
550 let mut result = vec![];
551
552 let data: Vec<&str> = arg.splitn(2, ':').collect();
553
554 if data.len() != 2 {
555 result.push(String::from("root.pxar:/"));
556 result.push(String::from("etc.pxar:/etc"));
557 return result;
558 }
559
560 let files = tools::complete_file_name(data[1], param);
561
562 for file in files {
563 result.push(format!("{}:{}", data[0], file));
564 }
565
566 result
567 }
568
569 fn restore(
570 param: Value,
571 _info: &ApiMethod,
572 _rpcenv: &mut dyn RpcEnvironment,
573 ) -> Result<Value, Error> {
574
575 let repo_url = tools::required_string_param(&param, "repository")?;
576 let repo: BackupRepository = repo_url.parse()?;
577
578 let archive_name = tools::required_string_param(&param, "archive-name")?;
579
580 let mut client = HttpClient::new(repo.host(), repo.user())?;
581
582 record_repository(&repo);
583
584 let path = tools::required_string_param(&param, "snapshot")?;
585
586 let query;
587
588 if path.matches('/').count() == 1 {
589 let group = BackupGroup::parse(path)?;
590
591 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
592 let result = client.get(&path, Some(json!({
593 "backup-type": group.backup_type(),
594 "backup-id": group.backup_id(),
595 }))).wait()?;
596
597 let list = result["data"].as_array().unwrap();
598 if list.len() == 0 {
599 bail!("backup group '{}' does not contain any snapshots:", path);
600 }
601
602 query = tools::json_object_to_query(json!({
603 "backup-type": group.backup_type(),
604 "backup-id": group.backup_id(),
605 "backup-time": list[0]["backup-time"].as_i64().unwrap(),
606 "archive-name": archive_name,
607 }))?;
608 } else {
609 let snapshot = BackupDir::parse(path)?;
610
611 query = tools::json_object_to_query(json!({
612 "backup-type": snapshot.group().backup_type(),
613 "backup-id": snapshot.group().backup_id(),
614 "backup-time": snapshot.backup_time().timestamp(),
615 "archive-name": archive_name,
616 }))?;
617 }
618
619 let target = tools::required_string_param(&param, "target")?;
620
621 if archive_name.ends_with(".pxar") {
622 let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query);
623
624 println!("DOWNLOAD FILE {} to {}", path, target);
625
626 let target = PathBuf::from(target);
627 let writer = PxarDecodeWriter::new(&target, true)?;
628 client.download(&path, Box::new(writer)).wait()?;
629 } else {
630 bail!("unknown file extensions - unable to download '{}'", archive_name);
631 }
632
633 Ok(Value::Null)
634 }
635
636 fn download(
637 param: Value,
638 _info: &ApiMethod,
639 _rpcenv: &mut dyn RpcEnvironment,
640 ) -> Result<Value, Error> {
641
642 let repo_url = tools::required_string_param(&param, "repository")?;
643 let repo: BackupRepository = repo_url.parse()?;
644
645 let file_name = tools::required_string_param(&param, "file-name")?;
646
647 let mut client = HttpClient::new(repo.host(), repo.user())?;
648
649 record_repository(&repo);
650
651 let path = tools::required_string_param(&param, "snapshot")?;
652
653 let query;
654
655 if path.matches('/').count() == 1 {
656 let group = BackupGroup::parse(path)?;
657
658 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
659 let result = client.get(&path, Some(json!({
660 "backup-type": group.backup_type(),
661 "backup-id": group.backup_id(),
662 }))).wait()?;
663
664 let list = result["data"].as_array().unwrap();
665 if list.len() == 0 {
666 bail!("backup group '{}' does not contain any snapshots:", path);
667 }
668
669 query = tools::json_object_to_query(json!({
670 "backup-type": group.backup_type(),
671 "backup-id": group.backup_id(),
672 "backup-time": list[0]["backup-time"].as_i64().unwrap(),
673 "file-name": file_name,
674 }))?;
675 } else {
676 let snapshot = BackupDir::parse(path)?;
677
678 query = tools::json_object_to_query(json!({
679 "backup-type": snapshot.group().backup_type(),
680 "backup-id": snapshot.group().backup_id(),
681 "backup-time": snapshot.backup_time().timestamp(),
682 "file-name": file_name,
683 }))?;
684 }
685
686 let target = tools::required_string_param(&param, "target")?;
687
688 let path = format!("api2/json/admin/datastore/{}/download?{}", repo.store(), query);
689
690 println!("DOWNLOAD FILE {} to {}", path, target);
691
692 let target = PathBuf::from(target);
693 let writer = std::fs::OpenOptions::new()
694 .create(true)
695 .create_new(true)
696 .write(true)
697 .open(target)?;
698
699 client.download(&path, Box::new(writer)).wait()?;
700
701 Ok(Value::Null)
702 }
703
704 fn prune(
705 mut param: Value,
706 _info: &ApiMethod,
707 _rpcenv: &mut dyn RpcEnvironment,
708 ) -> Result<Value, Error> {
709
710 let repo_url = tools::required_string_param(&param, "repository")?;
711 let repo: BackupRepository = repo_url.parse()?;
712
713 let mut client = HttpClient::new(repo.host(), repo.user())?;
714
715 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
716
717 param.as_object_mut().unwrap().remove("repository");
718
719 let result = client.post(&path, Some(param)).wait()?;
720
721 record_repository(&repo);
722
723 Ok(result)
724 }
725
726 // like get, but simply ignore errors and return Null instead
727 fn try_get(repo: &BackupRepository, url: &str) -> Value {
728
729 let client = match HttpClient::new(repo.host(), repo.user()) {
730 Ok(v) => v,
731 _ => return Value::Null,
732 };
733
734 let mut resp = match client.get(url, None).wait() {
735 Ok(v) => v,
736 _ => return Value::Null,
737 };
738
739 if let Some(map) = resp.as_object_mut() {
740 if let Some(data) = map.remove("data") {
741 return data;
742 }
743 }
744 Value::Null
745 }
746
747 fn extract_repo(param: &HashMap<String, String>) -> Option<BackupRepository> {
748
749 let repo_url = match param.get("repository") {
750 Some(v) => v,
751 _ => return None,
752 };
753
754 let repo: BackupRepository = match repo_url.parse() {
755 Ok(v) => v,
756 _ => return None,
757 };
758
759 Some(repo)
760 }
761
762 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
763
764 let mut result = vec![];
765
766 let repo = match extract_repo(param) {
767 Some(v) => v,
768 _ => return result,
769 };
770
771 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
772
773 let data = try_get(&repo, &path);
774
775 if let Some(list) = data.as_array() {
776 for item in list {
777 if let (Some(backup_id), Some(backup_type)) =
778 (item["backup-id"].as_str(), item["backup-type"].as_str())
779 {
780 result.push(format!("{}/{}", backup_type, backup_id));
781 }
782 }
783 }
784
785 result
786 }
787
788 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
789
790 let mut result = vec![];
791
792 let repo = match extract_repo(param) {
793 Some(v) => v,
794 _ => return result,
795 };
796
797 if arg.matches('/').count() < 2 {
798 let groups = complete_backup_group(arg, param);
799 for group in groups {
800 result.push(group.to_string());
801 result.push(format!("{}/", group));
802 }
803 return result;
804 }
805
806 let mut parts = arg.split('/');
807 let query = tools::json_object_to_query(json!({
808 "backup-type": parts.next().unwrap(),
809 "backup-id": parts.next().unwrap(),
810 })).unwrap();
811
812 let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
813
814 let data = try_get(&repo, &path);
815
816 if let Some(list) = data.as_array() {
817 for item in list {
818 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
819 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
820 {
821 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
822 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
823 }
824 }
825 }
826
827 result
828 }
829
830 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
831
832 let mut result = vec![];
833
834 let repo = match extract_repo(param) {
835 Some(v) => v,
836 _ => return result,
837 };
838
839 let snapshot = match param.get("snapshot") {
840 Some(path) => {
841 match BackupDir::parse(path) {
842 Ok(v) => v,
843 _ => return result,
844 }
845 }
846 _ => return result,
847 };
848
849 let query = tools::json_object_to_query(json!({
850 "backup-type": snapshot.group().backup_type(),
851 "backup-id": snapshot.group().backup_id(),
852 "backup-time": snapshot.backup_time().timestamp(),
853 })).unwrap();
854
855 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
856
857 let data = try_get(&repo, &path);
858
859 if let Some(list) = data.as_array() {
860 for item in list {
861 if let Some(filename) = item.as_str() {
862 result.push(filename.to_owned());
863 }
864 }
865 }
866
867 result
868 }
869
870 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
871
872 let result = complete_server_file_name(arg, param);
873
874 strip_server_file_expenstions(result)
875 }
876
877 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
878
879 let mut result = vec![];
880
881 let mut size = 64;
882 loop {
883 result.push(size.to_string());
884 size = size * 2;
885 if size > 4096 { break; }
886 }
887
888 result
889 }
890
891 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
892
893 // fixme: implement other input methods
894
895 use std::env::VarError::*;
896 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
897 Ok(p) => return Ok(p.as_bytes().to_vec()),
898 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
899 Err(NotPresent) => {
900 // Try another method
901 }
902 }
903
904 // If we're on a TTY, query the user for a password
905 if crate::tools::tty::stdin_isatty() {
906 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
907 }
908
909 bail!("no password input mechanism available");
910 }
911
912 fn key_create(
913 param: Value,
914 _info: &ApiMethod,
915 _rpcenv: &mut dyn RpcEnvironment,
916 ) -> Result<Value, Error> {
917
918 let path = tools::required_string_param(&param, "path")?;
919 let path = PathBuf::from(path);
920
921 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
922
923 let key = proxmox::sys::linux::random_data(32)?;
924
925 if kdf == "scrypt" {
926 // always read passphrase from tty
927 if !crate::tools::tty::stdin_isatty() {
928 bail!("unable to read passphrase - no tty");
929 }
930
931 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
932
933 let key_config = encrypt_key_with_passphrase(&key, &password)?;
934
935 store_key_config(&path, false, key_config)?;
936
937 Ok(Value::Null)
938 } else if kdf == "none" {
939 let created = Local.timestamp(Local::now().timestamp(), 0);
940
941 store_key_config(&path, false, KeyConfig {
942 kdf: None,
943 created,
944 modified: created,
945 data: key,
946 })?;
947
948 Ok(Value::Null)
949 } else {
950 unreachable!();
951 }
952 }
953
954 fn master_pubkey_path() -> Result<PathBuf, Error> {
955 let base = BaseDirectories::with_prefix("proxmox-backup")?;
956
957 // usually $HOME/.config/proxmox-backup/master-public.pem
958 let path = base.place_config_file("master-public.pem")?;
959
960 Ok(path)
961 }
962
963 fn key_import_master_pubkey(
964 param: Value,
965 _info: &ApiMethod,
966 _rpcenv: &mut dyn RpcEnvironment,
967 ) -> Result<Value, Error> {
968
969 let path = tools::required_string_param(&param, "path")?;
970 let path = PathBuf::from(path);
971
972 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
973
974 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
975 bail!("Unable to decode PEM data - {}", err);
976 }
977
978 let target_path = master_pubkey_path()?;
979
980 proxmox_backup::tools::file_set_contents(&target_path, &pem_data, None)?;
981
982 println!("Imported public master key to {:?}", target_path);
983
984 Ok(Value::Null)
985 }
986
987 fn key_create_master_key(
988 _param: Value,
989 _info: &ApiMethod,
990 _rpcenv: &mut dyn RpcEnvironment,
991 ) -> Result<Value, Error> {
992
993 // we need a TTY to query the new password
994 if !crate::tools::tty::stdin_isatty() {
995 bail!("unable to create master key - no tty");
996 }
997
998 let rsa = openssl::rsa::Rsa::generate(4096)?;
999 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1000
1001 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1002 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1003
1004 if new_pw != verify_pw {
1005 bail!("Password verification fail!");
1006 }
1007
1008 if new_pw.len() < 5 {
1009 bail!("Password is too short!");
1010 }
1011
1012 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1013 let filename_pub = "master-public.pem";
1014 println!("Writing public master key to {}", filename_pub);
1015 proxmox_backup::tools::file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1016
1017 let cipher = openssl::symm::Cipher::aes_256_cbc();
1018 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1019
1020 let filename_priv = "master-private.pem";
1021 println!("Writing private master key to {}", filename_priv);
1022 proxmox_backup::tools::file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1023
1024 Ok(Value::Null)
1025 }
1026
1027 fn key_change_passphrase(
1028 param: Value,
1029 _info: &ApiMethod,
1030 _rpcenv: &mut dyn RpcEnvironment,
1031 ) -> Result<Value, Error> {
1032
1033 let path = tools::required_string_param(&param, "path")?;
1034 let path = PathBuf::from(path);
1035
1036 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1037
1038 // we need a TTY to query the new password
1039 if !crate::tools::tty::stdin_isatty() {
1040 bail!("unable to change passphrase - no tty");
1041 }
1042
1043 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
1044
1045 if kdf == "scrypt" {
1046
1047 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1048 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1049
1050 if new_pw != verify_pw {
1051 bail!("Password verification fail!");
1052 }
1053
1054 if new_pw.len() < 5 {
1055 bail!("Password is too short!");
1056 }
1057
1058 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1059 new_key_config.created = created; // keep original value
1060
1061 store_key_config(&path, true, new_key_config)?;
1062
1063 Ok(Value::Null)
1064 } else if kdf == "none" {
1065 let modified = Local.timestamp(Local::now().timestamp(), 0);
1066
1067 store_key_config(&path, true, KeyConfig {
1068 kdf: None,
1069 created, // keep original value
1070 modified,
1071 data: key.to_vec(),
1072 })?;
1073
1074 Ok(Value::Null)
1075 } else {
1076 unreachable!();
1077 }
1078 }
1079
1080 fn key_mgmt_cli() -> CliCommandMap {
1081
1082 let kdf_schema: Arc<Schema> = Arc::new(
1083 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1084 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
1085 .default("scrypt")
1086 .into()
1087 );
1088
1089 let key_create_cmd_def = CliCommand::new(
1090 ApiMethod::new(
1091 key_create,
1092 ObjectSchema::new("Create a new encryption key.")
1093 .required("path", StringSchema::new("File system path."))
1094 .optional("kdf", kdf_schema.clone())
1095 ))
1096 .arg_param(vec!["path"])
1097 .completion_cb("path", tools::complete_file_name);
1098
1099 let key_change_passphrase_cmd_def = CliCommand::new(
1100 ApiMethod::new(
1101 key_change_passphrase,
1102 ObjectSchema::new("Change the passphrase required to decrypt the key.")
1103 .required("path", StringSchema::new("File system path."))
1104 .optional("kdf", kdf_schema.clone())
1105 ))
1106 .arg_param(vec!["path"])
1107 .completion_cb("path", tools::complete_file_name);
1108
1109 let key_create_master_key_cmd_def = CliCommand::new(
1110 ApiMethod::new(
1111 key_create_master_key,
1112 ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
1113 ));
1114
1115 let key_import_master_pubkey_cmd_def = CliCommand::new(
1116 ApiMethod::new(
1117 key_import_master_pubkey,
1118 ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
1119 .required("path", StringSchema::new("File system path."))
1120 ))
1121 .arg_param(vec!["path"])
1122 .completion_cb("path", tools::complete_file_name);
1123
1124 let cmd_def = CliCommandMap::new()
1125 .insert("create".to_owned(), key_create_cmd_def.into())
1126 .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
1127 .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
1128 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
1129
1130 cmd_def
1131 }
1132
1133
1134 fn main() {
1135
1136 let backup_source_schema: Arc<Schema> = Arc::new(
1137 StringSchema::new("Backup source specification ([<label>:<path>]).")
1138 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1139 .into()
1140 );
1141
1142 let backup_cmd_def = CliCommand::new(
1143 ApiMethod::new(
1144 create_backup,
1145 ObjectSchema::new("Create (host) backup.")
1146 .required("repository", REPO_URL_SCHEMA.clone())
1147 .required(
1148 "backupspec",
1149 ArraySchema::new(
1150 "List of backup source specifications ([<label.ext>:<path>] ...)",
1151 backup_source_schema,
1152 ).min_length(1)
1153 )
1154 .optional(
1155 "keyfile",
1156 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1157 .optional(
1158 "verbose",
1159 BooleanSchema::new("Verbose output.").default(false))
1160 .optional(
1161 "host-id",
1162 StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
1163 .optional(
1164 "chunk-size",
1165 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
1166 .minimum(64)
1167 .maximum(4096)
1168 .default(4096)
1169 )
1170 ))
1171 .arg_param(vec!["repository", "backupspec"])
1172 .completion_cb("repository", complete_repository)
1173 .completion_cb("backupspec", complete_backup_source)
1174 .completion_cb("keyfile", tools::complete_file_name)
1175 .completion_cb("chunk-size", complete_chunk_size);
1176
1177 let list_cmd_def = CliCommand::new(
1178 ApiMethod::new(
1179 list_backup_groups,
1180 ObjectSchema::new("List backup groups.")
1181 .required("repository", REPO_URL_SCHEMA.clone())
1182 ))
1183 .arg_param(vec!["repository"])
1184 .completion_cb("repository", complete_repository);
1185
1186 let snapshots_cmd_def = CliCommand::new(
1187 ApiMethod::new(
1188 list_snapshots,
1189 ObjectSchema::new("List backup snapshots.")
1190 .required("repository", REPO_URL_SCHEMA.clone())
1191 .required("group", StringSchema::new("Backup group."))
1192 ))
1193 .arg_param(vec!["repository", "group"])
1194 .completion_cb("group", complete_backup_group)
1195 .completion_cb("repository", complete_repository);
1196
1197 let forget_cmd_def = CliCommand::new(
1198 ApiMethod::new(
1199 forget_snapshots,
1200 ObjectSchema::new("Forget (remove) backup snapshots.")
1201 .required("repository", REPO_URL_SCHEMA.clone())
1202 .required("snapshot", StringSchema::new("Snapshot path."))
1203 ))
1204 .arg_param(vec!["repository", "snapshot"])
1205 .completion_cb("repository", complete_repository)
1206 .completion_cb("snapshot", complete_group_or_snapshot);
1207
1208 let garbage_collect_cmd_def = CliCommand::new(
1209 ApiMethod::new(
1210 start_garbage_collection,
1211 ObjectSchema::new("Start garbage collection for a specific repository.")
1212 .required("repository", REPO_URL_SCHEMA.clone())
1213 ))
1214 .arg_param(vec!["repository"])
1215 .completion_cb("repository", complete_repository);
1216
1217 let download_cmd_def = CliCommand::new(
1218 ApiMethod::new(
1219 download,
1220 ObjectSchema::new("Download data from backup repository.")
1221 .required("repository", REPO_URL_SCHEMA.clone())
1222 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1223 .required("file-name", StringSchema::new("File name."))
1224 .required("target", StringSchema::new("Target directory path."))
1225 ))
1226 .arg_param(vec!["repository", "snapshot", "file-name", "target"])
1227 .completion_cb("repository", complete_repository)
1228 .completion_cb("snapshot", complete_group_or_snapshot)
1229 .completion_cb("file-name", complete_server_file_name)
1230 .completion_cb("target", tools::complete_file_name);
1231
1232 let restore_cmd_def = CliCommand::new(
1233 ApiMethod::new(
1234 restore,
1235 ObjectSchema::new("Restore backup repository.")
1236 .required("repository", REPO_URL_SCHEMA.clone())
1237 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1238 .required("archive-name", StringSchema::new("Backup archive name."))
1239 .required("target", StringSchema::new("Target directory path."))
1240 ))
1241 .arg_param(vec!["repository", "snapshot", "archive-name", "target"])
1242 .completion_cb("repository", complete_repository)
1243 .completion_cb("snapshot", complete_group_or_snapshot)
1244 .completion_cb("archive-name", complete_archive_name)
1245 .completion_cb("target", tools::complete_file_name);
1246
1247 let prune_cmd_def = CliCommand::new(
1248 ApiMethod::new(
1249 prune,
1250 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1251 ObjectSchema::new("Prune backup repository.")
1252 .required("repository", REPO_URL_SCHEMA.clone())
1253 )
1254 ))
1255 .arg_param(vec!["repository"])
1256 .completion_cb("repository", complete_repository);
1257
1258 let cmd_def = CliCommandMap::new()
1259 .insert("backup".to_owned(), backup_cmd_def.into())
1260 .insert("forget".to_owned(), forget_cmd_def.into())
1261 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
1262 .insert("list".to_owned(), list_cmd_def.into())
1263 .insert("prune".to_owned(), prune_cmd_def.into())
1264 .insert("download".to_owned(), download_cmd_def.into())
1265 .insert("restore".to_owned(), restore_cmd_def.into())
1266 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
1267 .insert("key".to_owned(), key_mgmt_cli().into());
1268
1269 hyper::rt::run(futures::future::lazy(move || {
1270 run_cli_command(cmd_def.into());
1271 Ok(())
1272 }));
1273
1274 }