]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
src/pxar/encoder.rs: allow to pass list of devices
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 //#[macro_use]
2 extern crate proxmox_backup;
3
4 use failure::*;
5 //use std::os::unix::io::AsRawFd;
6 use chrono::{Local, Utc, TimeZone};
7 use std::path::{Path, PathBuf};
8 use std::collections::{HashSet, HashMap};
9 use std::io::Write;
10
11 use proxmox_backup::tools;
12 use proxmox_backup::cli::*;
13 use proxmox_backup::api_schema::*;
14 use proxmox_backup::api_schema::router::*;
15 use proxmox_backup::client::*;
16 use proxmox_backup::backup::*;
17 use proxmox_backup::pxar;
18
19 //use proxmox_backup::backup::image_index::*;
20 //use proxmox_backup::config::datastore;
21 //use proxmox_backup::pxar::encoder::*;
22 //use proxmox_backup::backup::datastore::*;
23
24 use serde_json::{json, Value};
25 //use hyper::Body;
26 use std::sync::Arc;
27 use regex::Regex;
28 use xdg::BaseDirectories;
29
30 use lazy_static::lazy_static;
31 use futures::*;
32 use tokio::sync::mpsc;
33
34 lazy_static! {
35 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf)):(.+)$").unwrap();
36
37 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
38 StringSchema::new("Repository URL.")
39 .format(BACKUP_REPO_URL.clone())
40 .max_length(256)
41 .into()
42 );
43 }
44
45
46 fn get_default_repository() -> Option<String> {
47 std::env::var("PBS_REPOSITORY").ok()
48 }
49
50 fn extract_repository_from_value(
51 param: &Value,
52 ) -> Result<BackupRepository, Error> {
53
54 let repo_url = param["repository"]
55 .as_str()
56 .map(String::from)
57 .or_else(get_default_repository)
58 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
59
60 let repo: BackupRepository = repo_url.parse()?;
61
62 Ok(repo)
63 }
64
65 fn extract_repository_from_map(
66 param: &HashMap<String, String>,
67 ) -> Option<BackupRepository> {
68
69 param.get("repository")
70 .map(String::from)
71 .or_else(get_default_repository)
72 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
73 }
74
75 fn record_repository(repo: &BackupRepository) {
76
77 let base = match BaseDirectories::with_prefix("proxmox-backup") {
78 Ok(v) => v,
79 _ => return,
80 };
81
82 // usually $HOME/.cache/proxmox-backup/repo-list
83 let path = match base.place_cache_file("repo-list") {
84 Ok(v) => v,
85 _ => return,
86 };
87
88 let mut data = tools::file_get_json(&path, None).unwrap_or(json!({}));
89
90 let repo = repo.to_string();
91
92 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
93
94 let mut map = serde_json::map::Map::new();
95
96 loop {
97 let mut max_used = 0;
98 let mut max_repo = None;
99 for (repo, count) in data.as_object().unwrap() {
100 if map.contains_key(repo) { continue; }
101 if let Some(count) = count.as_i64() {
102 if count > max_used {
103 max_used = count;
104 max_repo = Some(repo);
105 }
106 }
107 }
108 if let Some(repo) = max_repo {
109 map.insert(repo.to_owned(), json!(max_used));
110 } else {
111 break;
112 }
113 if map.len() > 10 { // store max. 10 repos
114 break;
115 }
116 }
117
118 let new_data = json!(map);
119
120 let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
121 }
122
123 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
124
125 let mut result = vec![];
126
127 let base = match BaseDirectories::with_prefix("proxmox-backup") {
128 Ok(v) => v,
129 _ => return result,
130 };
131
132 // usually $HOME/.cache/proxmox-backup/repo-list
133 let path = match base.place_cache_file("repo-list") {
134 Ok(v) => v,
135 _ => return result,
136 };
137
138 let data = tools::file_get_json(&path, None).unwrap_or(json!({}));
139
140 if let Some(map) = data.as_object() {
141 for (repo, _count) in map {
142 result.push(repo.to_owned());
143 }
144 }
145
146 result
147 }
148
149 fn backup_directory<P: AsRef<Path>>(
150 client: &BackupClient,
151 dir_path: P,
152 archive_name: &str,
153 chunk_size: Option<usize>,
154 device_set: Option<HashSet<u64>>,
155 verbose: bool,
156 crypt_config: Option<Arc<CryptConfig>>,
157 ) -> Result<(), Error> {
158
159 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose)?;
160 let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
161
162 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
163
164 let stream = rx
165 .map_err(Error::from)
166 .and_then(|x| x); // flatten
167
168 // spawn chunker inside a separate task so that it can run parallel
169 tokio::spawn(
170 tx.send_all(chunk_stream.then(|r| Ok(r)))
171 .map_err(|_| {}).map(|_| ())
172 );
173
174 client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
175
176 Ok(())
177 }
178
179 fn backup_image<P: AsRef<Path>>(
180 client: &BackupClient,
181 image_path: P,
182 archive_name: &str,
183 image_size: u64,
184 chunk_size: Option<usize>,
185 _verbose: bool,
186 crypt_config: Option<Arc<CryptConfig>>,
187 ) -> Result<(), Error> {
188
189 let path = image_path.as_ref().to_owned();
190
191 let file = tokio::fs::File::open(path).wait()?;
192
193 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
194 .map_err(Error::from);
195
196 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
197
198 client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
199
200 Ok(())
201 }
202
203 fn strip_server_file_expenstions(list: Vec<String>) -> Vec<String> {
204
205 let mut result = vec![];
206
207 for file in list.into_iter() {
208 if file.ends_with(".didx") {
209 result.push(file[..file.len()-5].to_owned());
210 } else if file.ends_with(".fidx") {
211 result.push(file[..file.len()-5].to_owned());
212 } else if file.ends_with(".blob") {
213 result.push(file[..file.len()-5].to_owned());
214 } else {
215 result.push(file); // should not happen
216 }
217 }
218
219 result
220 }
221
222 fn list_backup_groups(
223 param: Value,
224 _info: &ApiMethod,
225 _rpcenv: &mut dyn RpcEnvironment,
226 ) -> Result<Value, Error> {
227
228 let repo = extract_repository_from_value(&param)?;
229
230 let client = HttpClient::new(repo.host(), repo.user())?;
231
232 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
233
234 let mut result = client.get(&path, None).wait()?;
235
236 record_repository(&repo);
237
238 // fixme: implement and use output formatter instead ..
239 let list = result["data"].as_array_mut().unwrap();
240
241 list.sort_unstable_by(|a, b| {
242 let a_id = a["backup-id"].as_str().unwrap();
243 let a_backup_type = a["backup-type"].as_str().unwrap();
244 let b_id = b["backup-id"].as_str().unwrap();
245 let b_backup_type = b["backup-type"].as_str().unwrap();
246
247 let type_order = a_backup_type.cmp(b_backup_type);
248 if type_order == std::cmp::Ordering::Equal {
249 a_id.cmp(b_id)
250 } else {
251 type_order
252 }
253 });
254
255 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
256
257 let mut result = vec![];
258
259 for item in list {
260
261 let id = item["backup-id"].as_str().unwrap();
262 let btype = item["backup-type"].as_str().unwrap();
263 let epoch = item["last-backup"].as_i64().unwrap();
264 let last_backup = Utc.timestamp(epoch, 0);
265 let backup_count = item["backup-count"].as_u64().unwrap();
266
267 let group = BackupGroup::new(btype, id);
268
269 let path = group.group_path().to_str().unwrap().to_owned();
270
271 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
272 let files = strip_server_file_expenstions(files);
273
274 if output_format == "text" {
275 println!(
276 "{:20} | {} | {:5} | {}",
277 path,
278 BackupDir::backup_time_to_string(last_backup),
279 backup_count,
280 tools::join(&files, ' '),
281 );
282 } else {
283 result.push(json!({
284 "backup-type": btype,
285 "backup-id": id,
286 "last-backup": epoch,
287 "backup-count": backup_count,
288 "files": files,
289 }));
290 }
291 }
292
293 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
294
295 Ok(Value::Null)
296 }
297
298 fn list_snapshots(
299 param: Value,
300 _info: &ApiMethod,
301 _rpcenv: &mut dyn RpcEnvironment,
302 ) -> Result<Value, Error> {
303
304 let repo = extract_repository_from_value(&param)?;
305
306 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
307
308 let client = HttpClient::new(repo.host(), repo.user())?;
309
310 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
311
312 let mut args = json!({});
313 if let Some(path) = param["group"].as_str() {
314 let group = BackupGroup::parse(path)?;
315 args["backup-type"] = group.backup_type().into();
316 args["backup-id"] = group.backup_id().into();
317 }
318
319 let result = client.get(&path, Some(args)).wait()?;
320
321 record_repository(&repo);
322
323 let list = result["data"].as_array().unwrap();
324
325 let mut result = vec![];
326
327 for item in list {
328
329 let id = item["backup-id"].as_str().unwrap();
330 let btype = item["backup-type"].as_str().unwrap();
331 let epoch = item["backup-time"].as_i64().unwrap();
332
333 let snapshot = BackupDir::new(btype, id, epoch);
334
335 let path = snapshot.relative_path().to_str().unwrap().to_owned();
336
337 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
338 let files = strip_server_file_expenstions(files);
339
340 if output_format == "text" {
341 println!("{} | {}", path, tools::join(&files, ' '));
342 } else {
343 result.push(json!({
344 "backup-type": btype,
345 "backup-id": id,
346 "backup-time": epoch,
347 "files": files,
348 }));
349 }
350 }
351
352 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
353
354 Ok(Value::Null)
355 }
356
357 fn forget_snapshots(
358 param: Value,
359 _info: &ApiMethod,
360 _rpcenv: &mut dyn RpcEnvironment,
361 ) -> Result<Value, Error> {
362
363 let repo = extract_repository_from_value(&param)?;
364
365 let path = tools::required_string_param(&param, "snapshot")?;
366 let snapshot = BackupDir::parse(path)?;
367
368 let mut client = HttpClient::new(repo.host(), repo.user())?;
369
370 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
371
372 let result = client.delete(&path, Some(json!({
373 "backup-type": snapshot.group().backup_type(),
374 "backup-id": snapshot.group().backup_id(),
375 "backup-time": snapshot.backup_time().timestamp(),
376 }))).wait()?;
377
378 record_repository(&repo);
379
380 Ok(result)
381 }
382
383 fn start_garbage_collection(
384 param: Value,
385 _info: &ApiMethod,
386 _rpcenv: &mut dyn RpcEnvironment,
387 ) -> Result<Value, Error> {
388
389 let repo = extract_repository_from_value(&param)?;
390
391 let mut client = HttpClient::new(repo.host(), repo.user())?;
392
393 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
394
395 let result = client.post(&path, None).wait()?;
396
397 record_repository(&repo);
398
399 Ok(result)
400 }
401
402 fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
403
404 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
405 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
406 }
407 bail!("unable to parse directory specification '{}'", value);
408 }
409
410 fn create_backup(
411 param: Value,
412 _info: &ApiMethod,
413 _rpcenv: &mut dyn RpcEnvironment,
414 ) -> Result<Value, Error> {
415
416 let repo = extract_repository_from_value(&param)?;
417
418 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
419
420 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
421
422 let verbose = param["verbose"].as_bool().unwrap_or(false);
423
424 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
425
426 if let Some(size) = chunk_size_opt {
427 verify_chunk_size(size)?;
428 }
429
430 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
431
432 let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
433
434 let include_dev = param["include-dev"].as_array();
435
436 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
437
438 if let Some(include_dev) = include_dev {
439 if all_file_systems {
440 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
441 }
442
443 let mut set = HashSet::new();
444 for path in include_dev {
445 let path = path.as_str().unwrap();
446 let stat = nix::sys::stat::stat(path)
447 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
448 set.insert(stat.st_dev);
449 }
450 devices = Some(set);
451 }
452
453 let mut upload_list = vec![];
454
455 enum BackupType { PXAR, IMAGE, CONFIG };
456
457 for backupspec in backupspec_list {
458 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
459
460 use std::os::unix::fs::FileTypeExt;
461
462 let metadata = match std::fs::metadata(filename) {
463 Ok(m) => m,
464 Err(err) => bail!("unable to access '{}' - {}", filename, err),
465 };
466 let file_type = metadata.file_type();
467
468 let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
469
470 match extension {
471 "pxar" => {
472 if !file_type.is_dir() {
473 bail!("got unexpected file type (expected directory)");
474 }
475 upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
476 }
477 "img" => {
478
479 if !(file_type.is_file() || file_type.is_block_device()) {
480 bail!("got unexpected file type (expected file or block device)");
481 }
482
483 let size = tools::image_size(&PathBuf::from(filename))?;
484
485 if size == 0 { bail!("got zero-sized file '{}'", filename); }
486
487 upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
488 }
489 "conf" => {
490 if !file_type.is_file() {
491 bail!("got unexpected file type (expected regular file)");
492 }
493 upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
494 }
495 _ => {
496 bail!("got unknown archive extension '{}'", extension);
497 }
498 }
499 }
500
501 let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
502
503 let client = HttpClient::new(repo.host(), repo.user())?;
504 record_repository(&repo);
505
506 println!("Starting backup");
507 println!("Client name: {}", tools::nodename());
508 println!("Start Time: {}", backup_time.to_rfc3339());
509
510 let (crypt_config, rsa_encrypted_key) = match keyfile {
511 None => (None, None),
512 Some(path) => {
513 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
514
515 let crypt_config = CryptConfig::new(key)?;
516
517 let path = master_pubkey_path()?;
518 if path.exists() {
519 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
520 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
521 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
522 (Some(Arc::new(crypt_config)), Some(enc_key))
523 } else {
524 (Some(Arc::new(crypt_config)), None)
525 }
526 }
527 };
528
529 let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
530
531 for (backup_type, filename, target, size) in upload_list {
532 match backup_type {
533 BackupType::CONFIG => {
534 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
535 client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
536 }
537 BackupType::PXAR => {
538 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
539 backup_directory(
540 &client,
541 &filename,
542 &target,
543 chunk_size_opt,
544 devices.clone(),
545 verbose,
546 crypt_config.clone(),
547 )?;
548 }
549 BackupType::IMAGE => {
550 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
551 backup_image(
552 &client,
553 &filename,
554 &target,
555 size,
556 chunk_size_opt,
557 verbose,
558 crypt_config.clone(),
559 )?;
560 }
561 }
562 }
563
564 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
565 let target = "rsa-encrypted.key";
566 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
567 client.upload_blob_from_data(rsa_encrypted_key, target, None, false).wait()?;
568
569 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
570 /*
571 let mut buffer2 = vec![0u8; rsa.size() as usize];
572 let pem_data = proxmox_backup::tools::file_get_contents("master-private.pem")?;
573 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
574 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
575 println!("TEST {} {:?}", len, buffer2);
576 */
577 }
578
579 client.finish().wait()?;
580
581 let end_time = Utc.timestamp(Utc::now().timestamp(), 0);
582 let elapsed = end_time.signed_duration_since(backup_time);
583 println!("Duration: {}", elapsed);
584
585 println!("End Time: {}", end_time.to_rfc3339());
586
587 Ok(Value::Null)
588 }
589
590 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
591
592 let mut result = vec![];
593
594 let data: Vec<&str> = arg.splitn(2, ':').collect();
595
596 if data.len() != 2 {
597 result.push(String::from("root.pxar:/"));
598 result.push(String::from("etc.pxar:/etc"));
599 return result;
600 }
601
602 let files = tools::complete_file_name(data[1], param);
603
604 for file in files {
605 result.push(format!("{}:{}", data[0], file));
606 }
607
608 result
609 }
610
611 fn restore(
612 param: Value,
613 _info: &ApiMethod,
614 _rpcenv: &mut dyn RpcEnvironment,
615 ) -> Result<Value, Error> {
616
617 let repo = extract_repository_from_value(&param)?;
618
619 let verbose = param["verbose"].as_bool().unwrap_or(false);
620
621 let archive_name = tools::required_string_param(&param, "archive-name")?;
622
623 let client = HttpClient::new(repo.host(), repo.user())?;
624
625 record_repository(&repo);
626
627 let path = tools::required_string_param(&param, "snapshot")?;
628
629 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
630 let group = BackupGroup::parse(path)?;
631
632 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
633 let result = client.get(&path, Some(json!({
634 "backup-type": group.backup_type(),
635 "backup-id": group.backup_id(),
636 }))).wait()?;
637
638 let list = result["data"].as_array().unwrap();
639 if list.len() == 0 {
640 bail!("backup group '{}' does not contain any snapshots:", path);
641 }
642
643 let epoch = list[0]["backup-time"].as_i64().unwrap();
644 let backup_time = Utc.timestamp(epoch, 0);
645 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
646 } else {
647 let snapshot = BackupDir::parse(path)?;
648 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
649 };
650
651 let target = tools::required_string_param(&param, "target")?;
652 let target = if target == "-" { None } else { Some(target) };
653
654 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
655
656 let crypt_config = match keyfile {
657 None => None,
658 Some(path) => {
659 let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
660 Some(Arc::new(CryptConfig::new(key)?))
661 }
662 };
663
664 let server_archive_name = if archive_name.ends_with(".pxar") {
665 format!("{}.didx", archive_name)
666 } else if archive_name.ends_with(".img") {
667 format!("{}.fidx", archive_name)
668 } else {
669 format!("{}.blob", archive_name)
670 };
671
672 let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
673
674 use std::os::unix::fs::OpenOptionsExt;
675
676 let tmpfile = std::fs::OpenOptions::new()
677 .write(true)
678 .read(true)
679 .custom_flags(libc::O_TMPFILE)
680 .open("/tmp")?;
681
682 if server_archive_name.ends_with(".blob") {
683
684 let writer = Vec::with_capacity(1024*1024);
685 let blob_data = client.download(&server_archive_name, writer).wait()?;
686 let blob = DataBlob::from_raw(blob_data)?;
687 blob.verify_crc()?;
688
689 let raw_data = match crypt_config {
690 Some(ref crypt_config) => blob.decode(Some(crypt_config))?,
691 None => blob.decode(None)?,
692 };
693
694 if let Some(target) = target {
695 crate::tools::file_set_contents(target, &raw_data, None)?;
696 } else {
697 let stdout = std::io::stdout();
698 let mut writer = stdout.lock();
699 writer.write_all(&raw_data)
700 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
701 }
702
703 } else if server_archive_name.ends_with(".didx") {
704 let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
705
706 let index = DynamicIndexReader::new(tmpfile)
707 .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
708
709 let most_used = index.find_most_used_chunks(8);
710
711 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
712
713 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
714
715 if let Some(target) = target {
716
717 let feature_flags = pxar::CA_FORMAT_DEFAULT;
718 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
719 if verbose {
720 println!("{:?}", path);
721 }
722 Ok(())
723 });
724
725 decoder.restore(Path::new(target), &Vec::new())?;
726 } else {
727 let stdout = std::io::stdout();
728 let mut writer = stdout.lock();
729
730 std::io::copy(&mut reader, &mut writer)
731 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
732 }
733 } else if server_archive_name.ends_with(".fidx") {
734 let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
735
736 let index = FixedIndexReader::new(tmpfile)
737 .map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
738
739 let most_used = index.find_most_used_chunks(8);
740
741 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
742
743 let mut reader = BufferedFixedReader::new(index, chunk_reader);
744
745 if let Some(target) = target {
746 let mut writer = std::fs::OpenOptions::new()
747 .write(true)
748 .create(true)
749 .create_new(true)
750 .open(target)
751 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
752
753 std::io::copy(&mut reader, &mut writer)
754 .map_err(|err| format_err!("unable to store data - {}", err))?;
755 } else {
756 let stdout = std::io::stdout();
757 let mut writer = stdout.lock();
758
759 std::io::copy(&mut reader, &mut writer)
760 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
761 }
762 } else {
763 bail!("unknown archive file extension (expected .pxar of .img)");
764 }
765
766 Ok(Value::Null)
767 }
768
769 fn prune(
770 mut param: Value,
771 _info: &ApiMethod,
772 _rpcenv: &mut dyn RpcEnvironment,
773 ) -> Result<Value, Error> {
774
775 let repo = extract_repository_from_value(&param)?;
776
777 let mut client = HttpClient::new(repo.host(), repo.user())?;
778
779 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
780
781 param.as_object_mut().unwrap().remove("repository");
782
783 let result = client.post(&path, Some(param)).wait()?;
784
785 record_repository(&repo);
786
787 Ok(result)
788 }
789
790 fn status(
791 param: Value,
792 _info: &ApiMethod,
793 _rpcenv: &mut dyn RpcEnvironment,
794 ) -> Result<Value, Error> {
795
796 let repo = extract_repository_from_value(&param)?;
797
798 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
799
800 let client = HttpClient::new(repo.host(), repo.user())?;
801
802 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
803
804 let result = client.get(&path, None).wait()?;
805 let data = &result["data"];
806
807 record_repository(&repo);
808
809 if output_format == "text" {
810 let total = data["total"].as_u64().unwrap();
811 let used = data["used"].as_u64().unwrap();
812 let avail = data["avail"].as_u64().unwrap();
813 let roundup = total/200;
814
815 println!(
816 "total: {} used: {} ({} %) available: {}",
817 total,
818 used,
819 ((used+roundup)*100)/total,
820 avail,
821 );
822 } else {
823 format_and_print_result(data, &output_format);
824 }
825
826 Ok(Value::Null)
827 }
828
829 // like get, but simply ignore errors and return Null instead
830 fn try_get(repo: &BackupRepository, url: &str) -> Value {
831
832 let client = match HttpClient::new(repo.host(), repo.user()) {
833 Ok(v) => v,
834 _ => return Value::Null,
835 };
836
837 let mut resp = match client.get(url, None).wait() {
838 Ok(v) => v,
839 _ => return Value::Null,
840 };
841
842 if let Some(map) = resp.as_object_mut() {
843 if let Some(data) = map.remove("data") {
844 return data;
845 }
846 }
847 Value::Null
848 }
849
850 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
851
852 let mut result = vec![];
853
854 let repo = match extract_repository_from_map(param) {
855 Some(v) => v,
856 _ => return result,
857 };
858
859 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
860
861 let data = try_get(&repo, &path);
862
863 if let Some(list) = data.as_array() {
864 for item in list {
865 if let (Some(backup_id), Some(backup_type)) =
866 (item["backup-id"].as_str(), item["backup-type"].as_str())
867 {
868 result.push(format!("{}/{}", backup_type, backup_id));
869 }
870 }
871 }
872
873 result
874 }
875
876 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
877
878 let mut result = vec![];
879
880 let repo = match extract_repository_from_map(param) {
881 Some(v) => v,
882 _ => return result,
883 };
884
885 if arg.matches('/').count() < 2 {
886 let groups = complete_backup_group(arg, param);
887 for group in groups {
888 result.push(group.to_string());
889 result.push(format!("{}/", group));
890 }
891 return result;
892 }
893
894 let mut parts = arg.split('/');
895 let query = tools::json_object_to_query(json!({
896 "backup-type": parts.next().unwrap(),
897 "backup-id": parts.next().unwrap(),
898 })).unwrap();
899
900 let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
901
902 let data = try_get(&repo, &path);
903
904 if let Some(list) = data.as_array() {
905 for item in list {
906 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
907 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
908 {
909 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
910 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
911 }
912 }
913 }
914
915 result
916 }
917
918 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
919
920 let mut result = vec![];
921
922 let repo = match extract_repository_from_map(param) {
923 Some(v) => v,
924 _ => return result,
925 };
926
927 let snapshot = match param.get("snapshot") {
928 Some(path) => {
929 match BackupDir::parse(path) {
930 Ok(v) => v,
931 _ => return result,
932 }
933 }
934 _ => return result,
935 };
936
937 let query = tools::json_object_to_query(json!({
938 "backup-type": snapshot.group().backup_type(),
939 "backup-id": snapshot.group().backup_id(),
940 "backup-time": snapshot.backup_time().timestamp(),
941 })).unwrap();
942
943 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
944
945 let data = try_get(&repo, &path);
946
947 if let Some(list) = data.as_array() {
948 for item in list {
949 if let Some(filename) = item.as_str() {
950 result.push(filename.to_owned());
951 }
952 }
953 }
954
955 result
956 }
957
958 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
959
960 let result = complete_server_file_name(arg, param);
961
962 strip_server_file_expenstions(result)
963 }
964
965 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
966
967 let mut result = vec![];
968
969 let mut size = 64;
970 loop {
971 result.push(size.to_string());
972 size = size * 2;
973 if size > 4096 { break; }
974 }
975
976 result
977 }
978
979 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
980
981 // fixme: implement other input methods
982
983 use std::env::VarError::*;
984 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
985 Ok(p) => return Ok(p.as_bytes().to_vec()),
986 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
987 Err(NotPresent) => {
988 // Try another method
989 }
990 }
991
992 // If we're on a TTY, query the user for a password
993 if crate::tools::tty::stdin_isatty() {
994 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
995 }
996
997 bail!("no password input mechanism available");
998 }
999
1000 fn key_create(
1001 param: Value,
1002 _info: &ApiMethod,
1003 _rpcenv: &mut dyn RpcEnvironment,
1004 ) -> Result<Value, Error> {
1005
1006 let path = tools::required_string_param(&param, "path")?;
1007 let path = PathBuf::from(path);
1008
1009 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1010
1011 let key = proxmox::sys::linux::random_data(32)?;
1012
1013 if kdf == "scrypt" {
1014 // always read passphrase from tty
1015 if !crate::tools::tty::stdin_isatty() {
1016 bail!("unable to read passphrase - no tty");
1017 }
1018
1019 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
1020
1021 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1022
1023 store_key_config(&path, false, key_config)?;
1024
1025 Ok(Value::Null)
1026 } else if kdf == "none" {
1027 let created = Local.timestamp(Local::now().timestamp(), 0);
1028
1029 store_key_config(&path, false, KeyConfig {
1030 kdf: None,
1031 created,
1032 modified: created,
1033 data: key,
1034 })?;
1035
1036 Ok(Value::Null)
1037 } else {
1038 unreachable!();
1039 }
1040 }
1041
1042 fn master_pubkey_path() -> Result<PathBuf, Error> {
1043 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1044
1045 // usually $HOME/.config/proxmox-backup/master-public.pem
1046 let path = base.place_config_file("master-public.pem")?;
1047
1048 Ok(path)
1049 }
1050
1051 fn key_import_master_pubkey(
1052 param: Value,
1053 _info: &ApiMethod,
1054 _rpcenv: &mut dyn RpcEnvironment,
1055 ) -> Result<Value, Error> {
1056
1057 let path = tools::required_string_param(&param, "path")?;
1058 let path = PathBuf::from(path);
1059
1060 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
1061
1062 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1063 bail!("Unable to decode PEM data - {}", err);
1064 }
1065
1066 let target_path = master_pubkey_path()?;
1067
1068 proxmox_backup::tools::file_set_contents(&target_path, &pem_data, None)?;
1069
1070 println!("Imported public master key to {:?}", target_path);
1071
1072 Ok(Value::Null)
1073 }
1074
1075 fn key_create_master_key(
1076 _param: Value,
1077 _info: &ApiMethod,
1078 _rpcenv: &mut dyn RpcEnvironment,
1079 ) -> Result<Value, Error> {
1080
1081 // we need a TTY to query the new password
1082 if !crate::tools::tty::stdin_isatty() {
1083 bail!("unable to create master key - no tty");
1084 }
1085
1086 let rsa = openssl::rsa::Rsa::generate(4096)?;
1087 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1088
1089 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1090 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1091
1092 if new_pw != verify_pw {
1093 bail!("Password verification fail!");
1094 }
1095
1096 if new_pw.len() < 5 {
1097 bail!("Password is too short!");
1098 }
1099
1100 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1101 let filename_pub = "master-public.pem";
1102 println!("Writing public master key to {}", filename_pub);
1103 proxmox_backup::tools::file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1104
1105 let cipher = openssl::symm::Cipher::aes_256_cbc();
1106 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1107
1108 let filename_priv = "master-private.pem";
1109 println!("Writing private master key to {}", filename_priv);
1110 proxmox_backup::tools::file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1111
1112 Ok(Value::Null)
1113 }
1114
1115 fn key_change_passphrase(
1116 param: Value,
1117 _info: &ApiMethod,
1118 _rpcenv: &mut dyn RpcEnvironment,
1119 ) -> Result<Value, Error> {
1120
1121 let path = tools::required_string_param(&param, "path")?;
1122 let path = PathBuf::from(path);
1123
1124 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1125
1126 // we need a TTY to query the new password
1127 if !crate::tools::tty::stdin_isatty() {
1128 bail!("unable to change passphrase - no tty");
1129 }
1130
1131 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
1132
1133 if kdf == "scrypt" {
1134
1135 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1136 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1137
1138 if new_pw != verify_pw {
1139 bail!("Password verification fail!");
1140 }
1141
1142 if new_pw.len() < 5 {
1143 bail!("Password is too short!");
1144 }
1145
1146 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1147 new_key_config.created = created; // keep original value
1148
1149 store_key_config(&path, true, new_key_config)?;
1150
1151 Ok(Value::Null)
1152 } else if kdf == "none" {
1153 let modified = Local.timestamp(Local::now().timestamp(), 0);
1154
1155 store_key_config(&path, true, KeyConfig {
1156 kdf: None,
1157 created, // keep original value
1158 modified,
1159 data: key.to_vec(),
1160 })?;
1161
1162 Ok(Value::Null)
1163 } else {
1164 unreachable!();
1165 }
1166 }
1167
1168 fn key_mgmt_cli() -> CliCommandMap {
1169
1170 let kdf_schema: Arc<Schema> = Arc::new(
1171 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1172 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
1173 .default("scrypt")
1174 .into()
1175 );
1176
1177 let key_create_cmd_def = CliCommand::new(
1178 ApiMethod::new(
1179 key_create,
1180 ObjectSchema::new("Create a new encryption key.")
1181 .required("path", StringSchema::new("File system path."))
1182 .optional("kdf", kdf_schema.clone())
1183 ))
1184 .arg_param(vec!["path"])
1185 .completion_cb("path", tools::complete_file_name);
1186
1187 let key_change_passphrase_cmd_def = CliCommand::new(
1188 ApiMethod::new(
1189 key_change_passphrase,
1190 ObjectSchema::new("Change the passphrase required to decrypt the key.")
1191 .required("path", StringSchema::new("File system path."))
1192 .optional("kdf", kdf_schema.clone())
1193 ))
1194 .arg_param(vec!["path"])
1195 .completion_cb("path", tools::complete_file_name);
1196
1197 let key_create_master_key_cmd_def = CliCommand::new(
1198 ApiMethod::new(
1199 key_create_master_key,
1200 ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
1201 ));
1202
1203 let key_import_master_pubkey_cmd_def = CliCommand::new(
1204 ApiMethod::new(
1205 key_import_master_pubkey,
1206 ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
1207 .required("path", StringSchema::new("File system path."))
1208 ))
1209 .arg_param(vec!["path"])
1210 .completion_cb("path", tools::complete_file_name);
1211
1212 let cmd_def = CliCommandMap::new()
1213 .insert("create".to_owned(), key_create_cmd_def.into())
1214 .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
1215 .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
1216 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
1217
1218 cmd_def
1219 }
1220
1221 fn main() {
1222
1223 let backup_source_schema: Arc<Schema> = Arc::new(
1224 StringSchema::new("Backup source specification ([<label>:<path>]).")
1225 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1226 .into()
1227 );
1228
1229 let backup_cmd_def = CliCommand::new(
1230 ApiMethod::new(
1231 create_backup,
1232 ObjectSchema::new("Create (host) backup.")
1233 .required(
1234 "backupspec",
1235 ArraySchema::new(
1236 "List of backup source specifications ([<label.ext>:<path>] ...)",
1237 backup_source_schema,
1238 ).min_length(1)
1239 )
1240 .optional("repository", REPO_URL_SCHEMA.clone())
1241 .optional(
1242 "include-dev",
1243 ArraySchema::new(
1244 "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
1245 StringSchema::new("Path to file.").into()
1246 )
1247 )
1248 .optional(
1249 "keyfile",
1250 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1251 .optional(
1252 "verbose",
1253 BooleanSchema::new("Verbose output.").default(false))
1254 .optional(
1255 "host-id",
1256 StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
1257 .optional(
1258 "chunk-size",
1259 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
1260 .minimum(64)
1261 .maximum(4096)
1262 .default(4096)
1263 )
1264 ))
1265 .arg_param(vec!["backupspec"])
1266 .completion_cb("repository", complete_repository)
1267 .completion_cb("backupspec", complete_backup_source)
1268 .completion_cb("keyfile", tools::complete_file_name)
1269 .completion_cb("chunk-size", complete_chunk_size);
1270
1271 let list_cmd_def = CliCommand::new(
1272 ApiMethod::new(
1273 list_backup_groups,
1274 ObjectSchema::new("List backup groups.")
1275 .optional("repository", REPO_URL_SCHEMA.clone())
1276 .optional("output-format", OUTPUT_FORMAT.clone())
1277 ))
1278 .completion_cb("repository", complete_repository);
1279
1280 let snapshots_cmd_def = CliCommand::new(
1281 ApiMethod::new(
1282 list_snapshots,
1283 ObjectSchema::new("List backup snapshots.")
1284 .optional("group", StringSchema::new("Backup group."))
1285 .optional("repository", REPO_URL_SCHEMA.clone())
1286 .optional("output-format", OUTPUT_FORMAT.clone())
1287 ))
1288 .arg_param(vec!["group"])
1289 .completion_cb("group", complete_backup_group)
1290 .completion_cb("repository", complete_repository);
1291
1292 let forget_cmd_def = CliCommand::new(
1293 ApiMethod::new(
1294 forget_snapshots,
1295 ObjectSchema::new("Forget (remove) backup snapshots.")
1296 .required("snapshot", StringSchema::new("Snapshot path."))
1297 .optional("repository", REPO_URL_SCHEMA.clone())
1298 ))
1299 .arg_param(vec!["snapshot"])
1300 .completion_cb("repository", complete_repository)
1301 .completion_cb("snapshot", complete_group_or_snapshot);
1302
1303 let garbage_collect_cmd_def = CliCommand::new(
1304 ApiMethod::new(
1305 start_garbage_collection,
1306 ObjectSchema::new("Start garbage collection for a specific repository.")
1307 .optional("repository", REPO_URL_SCHEMA.clone())
1308 ))
1309 .completion_cb("repository", complete_repository);
1310
1311 let restore_cmd_def = CliCommand::new(
1312 ApiMethod::new(
1313 restore,
1314 ObjectSchema::new("Restore backup repository.")
1315 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1316 .required("archive-name", StringSchema::new("Backup archive name."))
1317 .required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
1318
1319 We do not extraxt '.pxar' archives when writing to stdandard output.
1320
1321 "###
1322 ))
1323 .optional("repository", REPO_URL_SCHEMA.clone())
1324 .optional("keyfile", StringSchema::new("Path to encryption key."))
1325 .optional(
1326 "verbose",
1327 BooleanSchema::new("Verbose output.").default(false)
1328 )
1329 ))
1330 .arg_param(vec!["snapshot", "archive-name", "target"])
1331 .completion_cb("repository", complete_repository)
1332 .completion_cb("snapshot", complete_group_or_snapshot)
1333 .completion_cb("archive-name", complete_archive_name)
1334 .completion_cb("target", tools::complete_file_name);
1335
1336 let prune_cmd_def = CliCommand::new(
1337 ApiMethod::new(
1338 prune,
1339 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1340 ObjectSchema::new("Prune backup repository.")
1341 .optional("repository", REPO_URL_SCHEMA.clone())
1342 )
1343 ))
1344 .completion_cb("repository", complete_repository);
1345
1346 let status_cmd_def = CliCommand::new(
1347 ApiMethod::new(
1348 status,
1349 ObjectSchema::new("Get repository status.")
1350 .optional("repository", REPO_URL_SCHEMA.clone())
1351 .optional("output-format", OUTPUT_FORMAT.clone())
1352 ))
1353 .completion_cb("repository", complete_repository);
1354
1355 let cmd_def = CliCommandMap::new()
1356 .insert("backup".to_owned(), backup_cmd_def.into())
1357 .insert("forget".to_owned(), forget_cmd_def.into())
1358 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
1359 .insert("list".to_owned(), list_cmd_def.into())
1360 .insert("prune".to_owned(), prune_cmd_def.into())
1361 .insert("restore".to_owned(), restore_cmd_def.into())
1362 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
1363 .insert("status".to_owned(), status_cmd_def.into())
1364 .insert("key".to_owned(), key_mgmt_cli().into());
1365
1366 hyper::rt::run(futures::future::lazy(move || {
1367 run_cli_command(cmd_def.into());
1368 Ok(())
1369 }));
1370
1371 }