]> git.proxmox.com Git - proxmox-backup.git/blame_incremental - src/bin/proxmox-backup-client.rs
src/bin/proxmox-backup-client.rs - key API: pass kdf parameter
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
... / ...
CommitLineData
1//#[macro_use]
2extern crate proxmox_backup;
3
4use failure::*;
5//use std::os::unix::io::AsRawFd;
6use chrono::{Local, TimeZone};
7use std::path::{Path, PathBuf};
8use std::collections::HashMap;
9
10use proxmox_backup::tools;
11use proxmox_backup::cli::*;
12use proxmox_backup::api_schema::*;
13use proxmox_backup::api_schema::router::*;
14use proxmox_backup::client::*;
15use proxmox_backup::backup::*;
16//use proxmox_backup::backup::image_index::*;
17//use proxmox_backup::config::datastore;
18//use proxmox_backup::pxar::encoder::*;
19//use proxmox_backup::backup::datastore::*;
20
21use serde_json::{json, Value};
22//use hyper::Body;
23use std::sync::Arc;
24use regex::Regex;
25use xdg::BaseDirectories;
26
27use lazy_static::lazy_static;
28use futures::*;
29use tokio::sync::mpsc;
30
31lazy_static! {
32 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf)):(.+)$").unwrap();
33
34 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
35 StringSchema::new("Repository URL.")
36 .format(BACKUP_REPO_URL.clone())
37 .max_length(256)
38 .into()
39 );
40}
41
42
43fn record_repository(repo: &BackupRepository) {
44
45 let base = match BaseDirectories::with_prefix("proxmox-backup") {
46 Ok(v) => v,
47 _ => return,
48 };
49
50 // usually $HOME/.cache/proxmox-backup/repo-list
51 let path = match base.place_cache_file("repo-list") {
52 Ok(v) => v,
53 _ => return,
54 };
55
56 let mut data = tools::file_get_json(&path, None).unwrap_or(json!({}));
57
58 let repo = repo.to_string();
59
60 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
61
62 let mut map = serde_json::map::Map::new();
63
64 loop {
65 let mut max_used = 0;
66 let mut max_repo = None;
67 for (repo, count) in data.as_object().unwrap() {
68 if map.contains_key(repo) { continue; }
69 if let Some(count) = count.as_i64() {
70 if count > max_used {
71 max_used = count;
72 max_repo = Some(repo);
73 }
74 }
75 }
76 if let Some(repo) = max_repo {
77 map.insert(repo.to_owned(), json!(max_used));
78 } else {
79 break;
80 }
81 if map.len() > 10 { // store max. 10 repos
82 break;
83 }
84 }
85
86 let new_data = json!(map);
87
88 let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
89}
90
91fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
92
93 let mut result = vec![];
94
95 let base = match BaseDirectories::with_prefix("proxmox-backup") {
96 Ok(v) => v,
97 _ => return result,
98 };
99
100 // usually $HOME/.cache/proxmox-backup/repo-list
101 let path = match base.place_cache_file("repo-list") {
102 Ok(v) => v,
103 _ => return result,
104 };
105
106 let data = tools::file_get_json(&path, None).unwrap_or(json!({}));
107
108 if let Some(map) = data.as_object() {
109 for (repo, _count) in map {
110 result.push(repo.to_owned());
111 }
112 }
113
114 result
115}
116
117fn backup_directory<P: AsRef<Path>>(
118 client: &BackupClient,
119 dir_path: P,
120 archive_name: &str,
121 chunk_size: Option<usize>,
122 all_file_systems: bool,
123 verbose: bool,
124 crypt_config: Option<Arc<CryptConfig>>,
125) -> Result<(), Error> {
126
127 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?;
128 let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
129
130 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
131
132 let stream = rx
133 .map_err(Error::from)
134 .and_then(|x| x); // flatten
135
136 // spawn chunker inside a separate task so that it can run parallel
137 tokio::spawn(
138 tx.send_all(chunk_stream.then(|r| Ok(r)))
139 .map_err(|_| {}).map(|_| ())
140 );
141
142 client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
143
144 Ok(())
145}
146
147fn backup_image<P: AsRef<Path>>(
148 client: &BackupClient,
149 image_path: P,
150 archive_name: &str,
151 image_size: u64,
152 chunk_size: Option<usize>,
153 _verbose: bool,
154 crypt_config: Option<Arc<CryptConfig>>,
155) -> Result<(), Error> {
156
157 let path = image_path.as_ref().to_owned();
158
159 let file = tokio::fs::File::open(path).wait()?;
160
161 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
162 .map_err(Error::from);
163
164 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
165
166 client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
167
168 Ok(())
169}
170
171fn strip_chunked_file_expenstions(list: Vec<String>) -> Vec<String> {
172
173 let mut result = vec![];
174
175 for file in list.into_iter() {
176 if file.ends_with(".didx") {
177 result.push(file[..file.len()-5].to_owned());
178 } else if file.ends_with(".fidx") {
179 result.push(file[..file.len()-5].to_owned());
180 } else {
181 result.push(file); // should not happen
182 }
183 }
184
185 result
186}
187
188/* not used:
189fn list_backups(
190 param: Value,
191 _info: &ApiMethod,
192 _rpcenv: &mut dyn RpcEnvironment,
193) -> Result<Value, Error> {
194
195 let repo_url = tools::required_string_param(&param, "repository")?;
196 let repo: BackupRepository = repo_url.parse()?;
197
198 let mut client = HttpClient::new(repo.host(), repo.user())?;
199
200 let path = format!("api2/json/admin/datastore/{}/backups", repo.store());
201
202 let result = client.get(&path, None)?;
203
204 record_repository(&repo);
205
206 // fixme: implement and use output formatter instead ..
207 let list = result["data"].as_array().unwrap();
208
209 for item in list {
210
211 let id = item["backup-id"].as_str().unwrap();
212 let btype = item["backup-type"].as_str().unwrap();
213 let epoch = item["backup-time"].as_i64().unwrap();
214
215 let backup_dir = BackupDir::new(btype, id, epoch);
216
217 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
218 let files = strip_chunked_file_expenstions(files);
219
220 for filename in files {
221 let path = backup_dir.relative_path().to_str().unwrap().to_owned();
222 println!("{} | {}/{}", backup_dir.backup_time().format("%c"), path, filename);
223 }
224 }
225
226 //Ok(result)
227 Ok(Value::Null)
228}
229 */
230
231fn list_backup_groups(
232 param: Value,
233 _info: &ApiMethod,
234 _rpcenv: &mut dyn RpcEnvironment,
235) -> Result<Value, Error> {
236
237 let repo_url = tools::required_string_param(&param, "repository")?;
238 let repo: BackupRepository = repo_url.parse()?;
239
240 let client = HttpClient::new(repo.host(), repo.user())?;
241
242 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
243
244 let mut result = client.get(&path, None).wait()?;
245
246 record_repository(&repo);
247
248 // fixme: implement and use output formatter instead ..
249 let list = result["data"].as_array_mut().unwrap();
250
251 list.sort_unstable_by(|a, b| {
252 let a_id = a["backup-id"].as_str().unwrap();
253 let a_backup_type = a["backup-type"].as_str().unwrap();
254 let b_id = b["backup-id"].as_str().unwrap();
255 let b_backup_type = b["backup-type"].as_str().unwrap();
256
257 let type_order = a_backup_type.cmp(b_backup_type);
258 if type_order == std::cmp::Ordering::Equal {
259 a_id.cmp(b_id)
260 } else {
261 type_order
262 }
263 });
264
265 for item in list {
266
267 let id = item["backup-id"].as_str().unwrap();
268 let btype = item["backup-type"].as_str().unwrap();
269 let epoch = item["last-backup"].as_i64().unwrap();
270 let last_backup = Local.timestamp(epoch, 0);
271 let backup_count = item["backup-count"].as_u64().unwrap();
272
273 let group = BackupGroup::new(btype, id);
274
275 let path = group.group_path().to_str().unwrap().to_owned();
276
277 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
278 let files = strip_chunked_file_expenstions(files);
279
280 println!("{:20} | {} | {:5} | {}", path, last_backup.format("%c"),
281 backup_count, tools::join(&files, ' '));
282 }
283
284 //Ok(result)
285 Ok(Value::Null)
286}
287
288fn list_snapshots(
289 param: Value,
290 _info: &ApiMethod,
291 _rpcenv: &mut dyn RpcEnvironment,
292) -> Result<Value, Error> {
293
294 let repo_url = tools::required_string_param(&param, "repository")?;
295 let repo: BackupRepository = repo_url.parse()?;
296
297 let path = tools::required_string_param(&param, "group")?;
298 let group = BackupGroup::parse(path)?;
299
300 let client = HttpClient::new(repo.host(), repo.user())?;
301
302 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
303
304 let result = client.get(&path, Some(json!({
305 "backup-type": group.backup_type(),
306 "backup-id": group.backup_id(),
307 }))).wait()?;
308
309 record_repository(&repo);
310
311 // fixme: implement and use output formatter instead ..
312 let list = result["data"].as_array().unwrap();
313
314 for item in list {
315
316 let id = item["backup-id"].as_str().unwrap();
317 let btype = item["backup-type"].as_str().unwrap();
318 let epoch = item["backup-time"].as_i64().unwrap();
319
320 let snapshot = BackupDir::new(btype, id, epoch);
321
322 let path = snapshot.relative_path().to_str().unwrap().to_owned();
323
324 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
325 let files = strip_chunked_file_expenstions(files);
326
327 println!("{} | {} | {}", path, snapshot.backup_time().format("%c"), tools::join(&files, ' '));
328 }
329
330 Ok(Value::Null)
331}
332
333fn forget_snapshots(
334 param: Value,
335 _info: &ApiMethod,
336 _rpcenv: &mut dyn RpcEnvironment,
337) -> Result<Value, Error> {
338
339 let repo_url = tools::required_string_param(&param, "repository")?;
340 let repo: BackupRepository = repo_url.parse()?;
341
342 let path = tools::required_string_param(&param, "snapshot")?;
343 let snapshot = BackupDir::parse(path)?;
344
345 let mut client = HttpClient::new(repo.host(), repo.user())?;
346
347 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
348
349 let result = client.delete(&path, Some(json!({
350 "backup-type": snapshot.group().backup_type(),
351 "backup-id": snapshot.group().backup_id(),
352 "backup-time": snapshot.backup_time().timestamp(),
353 }))).wait()?;
354
355 record_repository(&repo);
356
357 Ok(result)
358}
359
360fn start_garbage_collection(
361 param: Value,
362 _info: &ApiMethod,
363 _rpcenv: &mut dyn RpcEnvironment,
364) -> Result<Value, Error> {
365
366 let repo_url = tools::required_string_param(&param, "repository")?;
367 let repo: BackupRepository = repo_url.parse()?;
368
369 let mut client = HttpClient::new(repo.host(), repo.user())?;
370
371 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
372
373 let result = client.post(&path, None).wait()?;
374
375 record_repository(&repo);
376
377 Ok(result)
378}
379
380fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
381
382 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
383 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
384 }
385 bail!("unable to parse directory specification '{}'", value);
386}
387
388fn create_backup(
389 param: Value,
390 _info: &ApiMethod,
391 _rpcenv: &mut dyn RpcEnvironment,
392) -> Result<Value, Error> {
393
394 let repo_url = tools::required_string_param(&param, "repository")?;
395
396 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
397
398 let repo: BackupRepository = repo_url.parse()?;
399
400 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
401
402 let verbose = param["verbose"].as_bool().unwrap_or(false);
403
404 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
405
406 if let Some(size) = chunk_size_opt {
407 verify_chunk_size(size)?;
408 }
409
410 let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
411
412 let mut upload_list = vec![];
413
414 enum BackupType { PXAR, IMAGE, CONFIG };
415
416 for backupspec in backupspec_list {
417 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
418
419 use std::os::unix::fs::FileTypeExt;
420
421 let metadata = match std::fs::metadata(filename) {
422 Ok(m) => m,
423 Err(err) => bail!("unable to access '{}' - {}", filename, err),
424 };
425 let file_type = metadata.file_type();
426
427 let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
428
429 match extension {
430 "pxar" => {
431 if !file_type.is_dir() {
432 bail!("got unexpected file type (expected directory)");
433 }
434 upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
435 }
436 "img" => {
437
438 if !(file_type.is_file() || file_type.is_block_device()) {
439 bail!("got unexpected file type (expected file or block device)");
440 }
441
442 let size = tools::image_size(&PathBuf::from(filename))?;
443
444 if size == 0 { bail!("got zero-sized file '{}'", filename); }
445
446 upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
447 }
448 "conf" => {
449 if !file_type.is_file() {
450 bail!("got unexpected file type (expected regular file)");
451 }
452 upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
453 }
454 _ => {
455 bail!("got unknown archive extension '{}'", extension);
456 }
457 }
458 }
459
460 let backup_time = Local.timestamp(Local::now().timestamp(), 0);
461
462 let client = HttpClient::new(repo.host(), repo.user())?;
463 record_repository(&repo);
464
465 println!("Starting backup");
466 println!("Client name: {}", tools::nodename());
467 println!("Start Time: {}", backup_time.to_rfc3339());
468
469 let crypt_config = None;
470
471 let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
472
473 for (backup_type, filename, target, size) in upload_list {
474 match backup_type {
475 BackupType::CONFIG => {
476 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
477 client.upload_config(&filename, &target).wait()?;
478 }
479 BackupType::PXAR => {
480 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
481 backup_directory(
482 &client,
483 &filename,
484 &target,
485 chunk_size_opt,
486 all_file_systems,
487 verbose,
488 crypt_config.clone(),
489 )?;
490 }
491 BackupType::IMAGE => {
492 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
493 backup_image(
494 &client,
495 &filename,
496 &target,
497 size,
498 chunk_size_opt,
499 verbose,
500 crypt_config.clone(),
501 )?;
502 }
503 }
504 }
505
506 client.finish().wait()?;
507
508 let end_time = Local.timestamp(Local::now().timestamp(), 0);
509 let elapsed = end_time.signed_duration_since(backup_time);
510 println!("Duration: {}", elapsed);
511
512 println!("End Time: {}", end_time.to_rfc3339());
513
514 Ok(Value::Null)
515}
516
517fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
518
519 let mut result = vec![];
520
521 let data: Vec<&str> = arg.splitn(2, ':').collect();
522
523 if data.len() != 2 {
524 result.push(String::from("root.pxar:/"));
525 result.push(String::from("etc.pxar:/etc"));
526 return result;
527 }
528
529 let files = tools::complete_file_name(data[1], param);
530
531 for file in files {
532 result.push(format!("{}:{}", data[0], file));
533 }
534
535 result
536}
537
538fn restore(
539 param: Value,
540 _info: &ApiMethod,
541 _rpcenv: &mut dyn RpcEnvironment,
542) -> Result<Value, Error> {
543
544 let repo_url = tools::required_string_param(&param, "repository")?;
545 let repo: BackupRepository = repo_url.parse()?;
546
547 let archive_name = tools::required_string_param(&param, "archive-name")?;
548
549 let mut client = HttpClient::new(repo.host(), repo.user())?;
550
551 record_repository(&repo);
552
553 let path = tools::required_string_param(&param, "snapshot")?;
554
555 let query;
556
557 if path.matches('/').count() == 1 {
558 let group = BackupGroup::parse(path)?;
559
560 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
561 let result = client.get(&path, Some(json!({
562 "backup-type": group.backup_type(),
563 "backup-id": group.backup_id(),
564 }))).wait()?;
565
566 let list = result["data"].as_array().unwrap();
567 if list.len() == 0 {
568 bail!("backup group '{}' does not contain any snapshots:", path);
569 }
570
571 query = tools::json_object_to_query(json!({
572 "backup-type": group.backup_type(),
573 "backup-id": group.backup_id(),
574 "backup-time": list[0]["backup-time"].as_i64().unwrap(),
575 "archive-name": archive_name,
576 }))?;
577 } else {
578 let snapshot = BackupDir::parse(path)?;
579
580 query = tools::json_object_to_query(json!({
581 "backup-type": snapshot.group().backup_type(),
582 "backup-id": snapshot.group().backup_id(),
583 "backup-time": snapshot.backup_time().timestamp(),
584 "archive-name": archive_name,
585 }))?;
586 }
587
588 let target = tools::required_string_param(&param, "target")?;
589
590 if archive_name.ends_with(".pxar") {
591 let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query);
592
593 println!("DOWNLOAD FILE {} to {}", path, target);
594
595 let target = PathBuf::from(target);
596 let writer = PxarDecodeWriter::new(&target, true)?;
597 client.download(&path, Box::new(writer)).wait()?;
598 } else {
599 bail!("unknown file extensions - unable to download '{}'", archive_name);
600 }
601
602 Ok(Value::Null)
603}
604
605fn prune(
606 mut param: Value,
607 _info: &ApiMethod,
608 _rpcenv: &mut dyn RpcEnvironment,
609) -> Result<Value, Error> {
610
611 let repo_url = tools::required_string_param(&param, "repository")?;
612 let repo: BackupRepository = repo_url.parse()?;
613
614 let mut client = HttpClient::new(repo.host(), repo.user())?;
615
616 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
617
618 param.as_object_mut().unwrap().remove("repository");
619
620 let result = client.post(&path, Some(param)).wait()?;
621
622 record_repository(&repo);
623
624 Ok(result)
625}
626
627// like get, but simply ignore errors and return Null instead
628fn try_get(repo: &BackupRepository, url: &str) -> Value {
629
630 let client = match HttpClient::new(repo.host(), repo.user()) {
631 Ok(v) => v,
632 _ => return Value::Null,
633 };
634
635 let mut resp = match client.get(url, None).wait() {
636 Ok(v) => v,
637 _ => return Value::Null,
638 };
639
640 if let Some(map) = resp.as_object_mut() {
641 if let Some(data) = map.remove("data") {
642 return data;
643 }
644 }
645 Value::Null
646}
647
648fn extract_repo(param: &HashMap<String, String>) -> Option<BackupRepository> {
649
650 let repo_url = match param.get("repository") {
651 Some(v) => v,
652 _ => return None,
653 };
654
655 let repo: BackupRepository = match repo_url.parse() {
656 Ok(v) => v,
657 _ => return None,
658 };
659
660 Some(repo)
661}
662
663fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
664
665 let mut result = vec![];
666
667 let repo = match extract_repo(param) {
668 Some(v) => v,
669 _ => return result,
670 };
671
672 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
673
674 let data = try_get(&repo, &path);
675
676 if let Some(list) = data.as_array() {
677 for item in list {
678 if let (Some(backup_id), Some(backup_type)) =
679 (item["backup-id"].as_str(), item["backup-type"].as_str())
680 {
681 result.push(format!("{}/{}", backup_type, backup_id));
682 }
683 }
684 }
685
686 result
687}
688
689fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
690
691 let mut result = vec![];
692
693 let repo = match extract_repo(param) {
694 Some(v) => v,
695 _ => return result,
696 };
697
698 if arg.matches('/').count() < 2 {
699 let groups = complete_backup_group(arg, param);
700 for group in groups {
701 result.push(group.to_string());
702 result.push(format!("{}/", group));
703 }
704 return result;
705 }
706
707 let mut parts = arg.split('/');
708 let query = tools::json_object_to_query(json!({
709 "backup-type": parts.next().unwrap(),
710 "backup-id": parts.next().unwrap(),
711 })).unwrap();
712
713 let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
714
715 let data = try_get(&repo, &path);
716
717 if let Some(list) = data.as_array() {
718 for item in list {
719 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
720 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
721 {
722 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
723 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
724 }
725 }
726 }
727
728 result
729}
730
731fn complete_archive_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
732
733 let mut result = vec![];
734
735 let repo = match extract_repo(param) {
736 Some(v) => v,
737 _ => return result,
738 };
739
740 let snapshot = match param.get("snapshot") {
741 Some(path) => {
742 match BackupDir::parse(path) {
743 Ok(v) => v,
744 _ => return result,
745 }
746 }
747 _ => return result,
748 };
749
750 let query = tools::json_object_to_query(json!({
751 "backup-type": snapshot.group().backup_type(),
752 "backup-id": snapshot.group().backup_id(),
753 "backup-time": snapshot.backup_time().timestamp(),
754 })).unwrap();
755
756 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
757
758 let data = try_get(&repo, &path);
759
760 if let Some(list) = data.as_array() {
761 for item in list {
762 if let Some(filename) = item.as_str() {
763 result.push(filename.to_owned());
764 }
765 }
766 }
767
768 strip_chunked_file_expenstions(result)
769}
770
771fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
772
773 let mut result = vec![];
774
775 let mut size = 64;
776 loop {
777 result.push(size.to_string());
778 size = size * 2;
779 if size > 4096 { break; }
780 }
781
782 result
783}
784
785fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
786
787 // fixme: implement other input methods
788
789 use std::env::VarError::*;
790 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
791 Ok(p) => return Ok(p.as_bytes().to_vec()),
792 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
793 Err(NotPresent) => {
794 // Try another method
795 }
796 }
797
798 // If we're on a TTY, query the user for a password
799 if crate::tools::tty::stdin_isatty() {
800 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
801 }
802
803 bail!("no password input mechanism available");
804}
805
806fn key_create(
807 param: Value,
808 _info: &ApiMethod,
809 _rpcenv: &mut dyn RpcEnvironment,
810) -> Result<Value, Error> {
811
812 let path = tools::required_string_param(&param, "path")?;
813 let path = PathBuf::from(path);
814
815 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
816
817 let key = proxmox::sys::linux::random_data(32)?;
818
819 if kdf == "scrypt" {
820 // always read passphrase from tty
821 if !crate::tools::tty::stdin_isatty() {
822 bail!("unable to read passphrase - no tty");
823 }
824
825 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
826
827 store_key_with_passphrase(&path, &key, &password, false)?;
828
829 Ok(Value::Null)
830 } else if kdf == "none" {
831 let created = Local.timestamp(Local::now().timestamp(), 0);
832
833 store_key_config(&path, false, KeyConfig {
834 kdf: None,
835 created,
836 data: key,
837 })?;
838
839 Ok(Value::Null)
840 } else {
841 unreachable!();
842 }
843}
844
845
846fn key_change_passphrase(
847 param: Value,
848 _info: &ApiMethod,
849 _rpcenv: &mut dyn RpcEnvironment,
850) -> Result<Value, Error> {
851
852 let path = tools::required_string_param(&param, "path")?;
853 let path = PathBuf::from(path);
854
855 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
856
857 // we need a TTY to query the new password
858 if !crate::tools::tty::stdin_isatty() {
859 bail!("unable to change passphrase - no tty");
860 }
861
862 let key = load_and_decrtypt_key(&path, get_encryption_key_password)?;
863
864 if kdf == "scrypt" {
865
866 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
867 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
868
869 if new_pw != verify_pw {
870 bail!("Password verification fail!");
871 }
872
873 if new_pw.len() < 5 {
874 bail!("Password is too short!");
875 }
876
877 store_key_with_passphrase(&path, &key, new_pw.as_bytes(), true)?;
878
879 Ok(Value::Null)
880 } else if kdf == "none" {
881 // fixme: keep original creation time, add modified timestamp ??
882 let created = Local.timestamp(Local::now().timestamp(), 0);
883
884 store_key_config(&path, true, KeyConfig {
885 kdf: None,
886 created,
887 data: key,
888 })?;
889
890 Ok(Value::Null)
891 } else {
892 unreachable!();
893 }
894}
895
896fn key_mgmt_cli() -> CliCommandMap {
897
898 let kdf_schema: Arc<Schema> = Arc::new(
899 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
900 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
901 .default("scrypt")
902 .into()
903 );
904
905 // fixme: change-passphrase, import, export, list
906 let key_create_cmd_def = CliCommand::new(
907 ApiMethod::new(
908 key_create,
909 ObjectSchema::new("Create a new encryption key.")
910 .required("path", StringSchema::new("File system path."))
911 .optional("kdf", kdf_schema.clone())
912 ))
913 .arg_param(vec!["path"])
914 .completion_cb("path", tools::complete_file_name);
915
916 let key_change_passphrase_cmd_def = CliCommand::new(
917 ApiMethod::new(
918 key_change_passphrase,
919 ObjectSchema::new("Change the passphrase required to decrypt the key.")
920 .required("path", StringSchema::new("File system path."))
921 .optional("kdf", kdf_schema.clone())
922 ))
923 .arg_param(vec!["path"])
924 .completion_cb("path", tools::complete_file_name);
925
926 let cmd_def = CliCommandMap::new()
927 .insert("create".to_owned(), key_create_cmd_def.into())
928 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
929
930 cmd_def
931}
932
933
934fn main() {
935
936 let backup_source_schema: Arc<Schema> = Arc::new(
937 StringSchema::new("Backup source specification ([<label>:<path>]).")
938 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
939 .into()
940 );
941
942 let backup_cmd_def = CliCommand::new(
943 ApiMethod::new(
944 create_backup,
945 ObjectSchema::new("Create (host) backup.")
946 .required("repository", REPO_URL_SCHEMA.clone())
947 .required(
948 "backupspec",
949 ArraySchema::new(
950 "List of backup source specifications ([<label.ext>:<path>] ...)",
951 backup_source_schema,
952 ).min_length(1)
953 )
954 .optional(
955 "verbose",
956 BooleanSchema::new("Verbose output.").default(false))
957 .optional(
958 "host-id",
959 StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
960 .optional(
961 "chunk-size",
962 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
963 .minimum(64)
964 .maximum(4096)
965 .default(4096)
966 )
967 ))
968 .arg_param(vec!["repository", "backupspec"])
969 .completion_cb("repository", complete_repository)
970 .completion_cb("backupspec", complete_backup_source)
971 .completion_cb("chunk-size", complete_chunk_size);
972
973 let list_cmd_def = CliCommand::new(
974 ApiMethod::new(
975 list_backup_groups,
976 ObjectSchema::new("List backup groups.")
977 .required("repository", REPO_URL_SCHEMA.clone())
978 ))
979 .arg_param(vec!["repository"])
980 .completion_cb("repository", complete_repository);
981
982 let snapshots_cmd_def = CliCommand::new(
983 ApiMethod::new(
984 list_snapshots,
985 ObjectSchema::new("List backup snapshots.")
986 .required("repository", REPO_URL_SCHEMA.clone())
987 .required("group", StringSchema::new("Backup group."))
988 ))
989 .arg_param(vec!["repository", "group"])
990 .completion_cb("group", complete_backup_group)
991 .completion_cb("repository", complete_repository);
992
993 let forget_cmd_def = CliCommand::new(
994 ApiMethod::new(
995 forget_snapshots,
996 ObjectSchema::new("Forget (remove) backup snapshots.")
997 .required("repository", REPO_URL_SCHEMA.clone())
998 .required("snapshot", StringSchema::new("Snapshot path."))
999 ))
1000 .arg_param(vec!["repository", "snapshot"])
1001 .completion_cb("repository", complete_repository)
1002 .completion_cb("snapshot", complete_group_or_snapshot);
1003
1004 let garbage_collect_cmd_def = CliCommand::new(
1005 ApiMethod::new(
1006 start_garbage_collection,
1007 ObjectSchema::new("Start garbage collection for a specific repository.")
1008 .required("repository", REPO_URL_SCHEMA.clone())
1009 ))
1010 .arg_param(vec!["repository"])
1011 .completion_cb("repository", complete_repository);
1012
1013 let restore_cmd_def = CliCommand::new(
1014 ApiMethod::new(
1015 restore,
1016 ObjectSchema::new("Restore backup repository.")
1017 .required("repository", REPO_URL_SCHEMA.clone())
1018 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1019 .required("archive-name", StringSchema::new("Backup archive name."))
1020 .required("target", StringSchema::new("Target directory path."))
1021 ))
1022 .arg_param(vec!["repository", "snapshot", "archive-name", "target"])
1023 .completion_cb("repository", complete_repository)
1024 .completion_cb("snapshot", complete_group_or_snapshot)
1025 .completion_cb("archive-name", complete_archive_name)
1026 .completion_cb("target", tools::complete_file_name);
1027
1028 let prune_cmd_def = CliCommand::new(
1029 ApiMethod::new(
1030 prune,
1031 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1032 ObjectSchema::new("Prune backup repository.")
1033 .required("repository", REPO_URL_SCHEMA.clone())
1034 )
1035 ))
1036 .arg_param(vec!["repository"])
1037 .completion_cb("repository", complete_repository);
1038
1039 let cmd_def = CliCommandMap::new()
1040 .insert("backup".to_owned(), backup_cmd_def.into())
1041 .insert("forget".to_owned(), forget_cmd_def.into())
1042 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
1043 .insert("list".to_owned(), list_cmd_def.into())
1044 .insert("prune".to_owned(), prune_cmd_def.into())
1045 .insert("restore".to_owned(), restore_cmd_def.into())
1046 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
1047 .insert("key".to_owned(), key_mgmt_cli().into());
1048
1049 hyper::rt::run(futures::future::lazy(move || {
1050 run_cli_command(cmd_def.into());
1051 Ok(())
1052 }));
1053
1054}