]> git.proxmox.com Git - proxmox-backup.git/blob - src/bin/proxmox-backup-client.rs
src/backup/fixed_index.rs: implement BufferedFixedReader
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
1 //#[macro_use]
2 extern crate proxmox_backup;
3
4 use failure::*;
5 //use std::os::unix::io::AsRawFd;
6 use chrono::{Local, TimeZone};
7 use std::path::{Path, PathBuf};
8 use std::collections::HashMap;
9
10 use proxmox_backup::tools;
11 use proxmox_backup::cli::*;
12 use proxmox_backup::api_schema::*;
13 use proxmox_backup::api_schema::router::*;
14 use proxmox_backup::client::*;
15 use proxmox_backup::backup::*;
16 use proxmox_backup::pxar;
17
18 //use proxmox_backup::backup::image_index::*;
19 //use proxmox_backup::config::datastore;
20 //use proxmox_backup::pxar::encoder::*;
21 //use proxmox_backup::backup::datastore::*;
22
23 use serde_json::{json, Value};
24 //use hyper::Body;
25 use std::sync::Arc;
26 use regex::Regex;
27 use xdg::BaseDirectories;
28
29 use lazy_static::lazy_static;
30 use futures::*;
31 use tokio::sync::mpsc;
32
33 lazy_static! {
34 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf)):(.+)$").unwrap();
35
36 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
37 StringSchema::new("Repository URL.")
38 .format(BACKUP_REPO_URL.clone())
39 .max_length(256)
40 .into()
41 );
42 }
43
44
45 fn record_repository(repo: &BackupRepository) {
46
47 let base = match BaseDirectories::with_prefix("proxmox-backup") {
48 Ok(v) => v,
49 _ => return,
50 };
51
52 // usually $HOME/.cache/proxmox-backup/repo-list
53 let path = match base.place_cache_file("repo-list") {
54 Ok(v) => v,
55 _ => return,
56 };
57
58 let mut data = tools::file_get_json(&path, None).unwrap_or(json!({}));
59
60 let repo = repo.to_string();
61
62 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
63
64 let mut map = serde_json::map::Map::new();
65
66 loop {
67 let mut max_used = 0;
68 let mut max_repo = None;
69 for (repo, count) in data.as_object().unwrap() {
70 if map.contains_key(repo) { continue; }
71 if let Some(count) = count.as_i64() {
72 if count > max_used {
73 max_used = count;
74 max_repo = Some(repo);
75 }
76 }
77 }
78 if let Some(repo) = max_repo {
79 map.insert(repo.to_owned(), json!(max_used));
80 } else {
81 break;
82 }
83 if map.len() > 10 { // store max. 10 repos
84 break;
85 }
86 }
87
88 let new_data = json!(map);
89
90 let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
91 }
92
93 fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
94
95 let mut result = vec![];
96
97 let base = match BaseDirectories::with_prefix("proxmox-backup") {
98 Ok(v) => v,
99 _ => return result,
100 };
101
102 // usually $HOME/.cache/proxmox-backup/repo-list
103 let path = match base.place_cache_file("repo-list") {
104 Ok(v) => v,
105 _ => return result,
106 };
107
108 let data = tools::file_get_json(&path, None).unwrap_or(json!({}));
109
110 if let Some(map) = data.as_object() {
111 for (repo, _count) in map {
112 result.push(repo.to_owned());
113 }
114 }
115
116 result
117 }
118
119 fn backup_directory<P: AsRef<Path>>(
120 client: &BackupClient,
121 dir_path: P,
122 archive_name: &str,
123 chunk_size: Option<usize>,
124 all_file_systems: bool,
125 verbose: bool,
126 crypt_config: Option<Arc<CryptConfig>>,
127 ) -> Result<(), Error> {
128
129 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?;
130 let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
131
132 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
133
134 let stream = rx
135 .map_err(Error::from)
136 .and_then(|x| x); // flatten
137
138 // spawn chunker inside a separate task so that it can run parallel
139 tokio::spawn(
140 tx.send_all(chunk_stream.then(|r| Ok(r)))
141 .map_err(|_| {}).map(|_| ())
142 );
143
144 client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
145
146 Ok(())
147 }
148
149 fn backup_image<P: AsRef<Path>>(
150 client: &BackupClient,
151 image_path: P,
152 archive_name: &str,
153 image_size: u64,
154 chunk_size: Option<usize>,
155 _verbose: bool,
156 crypt_config: Option<Arc<CryptConfig>>,
157 ) -> Result<(), Error> {
158
159 let path = image_path.as_ref().to_owned();
160
161 let file = tokio::fs::File::open(path).wait()?;
162
163 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
164 .map_err(Error::from);
165
166 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
167
168 client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
169
170 Ok(())
171 }
172
173 fn strip_server_file_expenstions(list: Vec<String>) -> Vec<String> {
174
175 let mut result = vec![];
176
177 for file in list.into_iter() {
178 if file.ends_with(".didx") {
179 result.push(file[..file.len()-5].to_owned());
180 } else if file.ends_with(".fidx") {
181 result.push(file[..file.len()-5].to_owned());
182 } else if file.ends_with(".blob") {
183 result.push(file[..file.len()-5].to_owned());
184 } else {
185 result.push(file); // should not happen
186 }
187 }
188
189 result
190 }
191
192 /* not used:
193 fn list_backups(
194 param: Value,
195 _info: &ApiMethod,
196 _rpcenv: &mut dyn RpcEnvironment,
197 ) -> Result<Value, Error> {
198
199 let repo_url = tools::required_string_param(&param, "repository")?;
200 let repo: BackupRepository = repo_url.parse()?;
201
202 let mut client = HttpClient::new(repo.host(), repo.user())?;
203
204 let path = format!("api2/json/admin/datastore/{}/backups", repo.store());
205
206 let result = client.get(&path, None)?;
207
208 record_repository(&repo);
209
210 // fixme: implement and use output formatter instead ..
211 let list = result["data"].as_array().unwrap();
212
213 for item in list {
214
215 let id = item["backup-id"].as_str().unwrap();
216 let btype = item["backup-type"].as_str().unwrap();
217 let epoch = item["backup-time"].as_i64().unwrap();
218
219 let backup_dir = BackupDir::new(btype, id, epoch);
220
221 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
222 let files = strip_server_file_expenstions(files);
223
224 for filename in files {
225 let path = backup_dir.relative_path().to_str().unwrap().to_owned();
226 println!("{} | {}/{}", backup_dir.backup_time().format("%c"), path, filename);
227 }
228 }
229
230 //Ok(result)
231 Ok(Value::Null)
232 }
233 */
234
235 fn list_backup_groups(
236 param: Value,
237 _info: &ApiMethod,
238 _rpcenv: &mut dyn RpcEnvironment,
239 ) -> Result<Value, Error> {
240
241 let repo_url = tools::required_string_param(&param, "repository")?;
242 let repo: BackupRepository = repo_url.parse()?;
243
244 let client = HttpClient::new(repo.host(), repo.user())?;
245
246 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
247
248 let mut result = client.get(&path, None).wait()?;
249
250 record_repository(&repo);
251
252 // fixme: implement and use output formatter instead ..
253 let list = result["data"].as_array_mut().unwrap();
254
255 list.sort_unstable_by(|a, b| {
256 let a_id = a["backup-id"].as_str().unwrap();
257 let a_backup_type = a["backup-type"].as_str().unwrap();
258 let b_id = b["backup-id"].as_str().unwrap();
259 let b_backup_type = b["backup-type"].as_str().unwrap();
260
261 let type_order = a_backup_type.cmp(b_backup_type);
262 if type_order == std::cmp::Ordering::Equal {
263 a_id.cmp(b_id)
264 } else {
265 type_order
266 }
267 });
268
269 for item in list {
270
271 let id = item["backup-id"].as_str().unwrap();
272 let btype = item["backup-type"].as_str().unwrap();
273 let epoch = item["last-backup"].as_i64().unwrap();
274 let last_backup = Local.timestamp(epoch, 0);
275 let backup_count = item["backup-count"].as_u64().unwrap();
276
277 let group = BackupGroup::new(btype, id);
278
279 let path = group.group_path().to_str().unwrap().to_owned();
280
281 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
282 let files = strip_server_file_expenstions(files);
283
284 println!("{:20} | {} | {:5} | {}", path, last_backup.format("%c"),
285 backup_count, tools::join(&files, ' '));
286 }
287
288 //Ok(result)
289 Ok(Value::Null)
290 }
291
292 fn list_snapshots(
293 param: Value,
294 _info: &ApiMethod,
295 _rpcenv: &mut dyn RpcEnvironment,
296 ) -> Result<Value, Error> {
297
298 let repo_url = tools::required_string_param(&param, "repository")?;
299 let repo: BackupRepository = repo_url.parse()?;
300
301 let path = tools::required_string_param(&param, "group")?;
302 let group = BackupGroup::parse(path)?;
303
304 let client = HttpClient::new(repo.host(), repo.user())?;
305
306 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
307
308 let result = client.get(&path, Some(json!({
309 "backup-type": group.backup_type(),
310 "backup-id": group.backup_id(),
311 }))).wait()?;
312
313 record_repository(&repo);
314
315 // fixme: implement and use output formatter instead ..
316 let list = result["data"].as_array().unwrap();
317
318 for item in list {
319
320 let id = item["backup-id"].as_str().unwrap();
321 let btype = item["backup-type"].as_str().unwrap();
322 let epoch = item["backup-time"].as_i64().unwrap();
323
324 let snapshot = BackupDir::new(btype, id, epoch);
325
326 let path = snapshot.relative_path().to_str().unwrap().to_owned();
327
328 let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
329 let files = strip_server_file_expenstions(files);
330
331 println!("{} | {} | {}", path, snapshot.backup_time().format("%c"), tools::join(&files, ' '));
332 }
333
334 Ok(Value::Null)
335 }
336
337 fn forget_snapshots(
338 param: Value,
339 _info: &ApiMethod,
340 _rpcenv: &mut dyn RpcEnvironment,
341 ) -> Result<Value, Error> {
342
343 let repo_url = tools::required_string_param(&param, "repository")?;
344 let repo: BackupRepository = repo_url.parse()?;
345
346 let path = tools::required_string_param(&param, "snapshot")?;
347 let snapshot = BackupDir::parse(path)?;
348
349 let mut client = HttpClient::new(repo.host(), repo.user())?;
350
351 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
352
353 let result = client.delete(&path, Some(json!({
354 "backup-type": snapshot.group().backup_type(),
355 "backup-id": snapshot.group().backup_id(),
356 "backup-time": snapshot.backup_time().timestamp(),
357 }))).wait()?;
358
359 record_repository(&repo);
360
361 Ok(result)
362 }
363
364 fn start_garbage_collection(
365 param: Value,
366 _info: &ApiMethod,
367 _rpcenv: &mut dyn RpcEnvironment,
368 ) -> Result<Value, Error> {
369
370 let repo_url = tools::required_string_param(&param, "repository")?;
371 let repo: BackupRepository = repo_url.parse()?;
372
373 let mut client = HttpClient::new(repo.host(), repo.user())?;
374
375 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
376
377 let result = client.post(&path, None).wait()?;
378
379 record_repository(&repo);
380
381 Ok(result)
382 }
383
384 fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
385
386 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
387 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
388 }
389 bail!("unable to parse directory specification '{}'", value);
390 }
391
392 fn create_backup(
393 param: Value,
394 _info: &ApiMethod,
395 _rpcenv: &mut dyn RpcEnvironment,
396 ) -> Result<Value, Error> {
397
398 let repo_url = tools::required_string_param(&param, "repository")?;
399
400 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
401
402 let repo: BackupRepository = repo_url.parse()?;
403
404 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
405
406 let verbose = param["verbose"].as_bool().unwrap_or(false);
407
408 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
409
410 if let Some(size) = chunk_size_opt {
411 verify_chunk_size(size)?;
412 }
413
414 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
415
416 let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
417
418 let mut upload_list = vec![];
419
420 enum BackupType { PXAR, IMAGE, CONFIG };
421
422 for backupspec in backupspec_list {
423 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
424
425 use std::os::unix::fs::FileTypeExt;
426
427 let metadata = match std::fs::metadata(filename) {
428 Ok(m) => m,
429 Err(err) => bail!("unable to access '{}' - {}", filename, err),
430 };
431 let file_type = metadata.file_type();
432
433 let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
434
435 match extension {
436 "pxar" => {
437 if !file_type.is_dir() {
438 bail!("got unexpected file type (expected directory)");
439 }
440 upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
441 }
442 "img" => {
443
444 if !(file_type.is_file() || file_type.is_block_device()) {
445 bail!("got unexpected file type (expected file or block device)");
446 }
447
448 let size = tools::image_size(&PathBuf::from(filename))?;
449
450 if size == 0 { bail!("got zero-sized file '{}'", filename); }
451
452 upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
453 }
454 "conf" => {
455 if !file_type.is_file() {
456 bail!("got unexpected file type (expected regular file)");
457 }
458 upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
459 }
460 _ => {
461 bail!("got unknown archive extension '{}'", extension);
462 }
463 }
464 }
465
466 let backup_time = Local.timestamp(Local::now().timestamp(), 0);
467
468 let client = HttpClient::new(repo.host(), repo.user())?;
469 record_repository(&repo);
470
471 println!("Starting backup");
472 println!("Client name: {}", tools::nodename());
473 println!("Start Time: {}", backup_time.to_rfc3339());
474
475 let (crypt_config, rsa_encrypted_key) = match keyfile {
476 None => (None, None),
477 Some(path) => {
478 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
479
480 let crypt_config = CryptConfig::new(key)?;
481
482 let path = master_pubkey_path()?;
483 if path.exists() {
484 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
485 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
486 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
487 (Some(Arc::new(crypt_config)), Some(enc_key))
488 } else {
489 (Some(Arc::new(crypt_config)), None)
490 }
491 }
492 };
493
494 let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
495
496 for (backup_type, filename, target, size) in upload_list {
497 match backup_type {
498 BackupType::CONFIG => {
499 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
500 client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
501 }
502 BackupType::PXAR => {
503 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
504 backup_directory(
505 &client,
506 &filename,
507 &target,
508 chunk_size_opt,
509 all_file_systems,
510 verbose,
511 crypt_config.clone(),
512 )?;
513 }
514 BackupType::IMAGE => {
515 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
516 backup_image(
517 &client,
518 &filename,
519 &target,
520 size,
521 chunk_size_opt,
522 verbose,
523 crypt_config.clone(),
524 )?;
525 }
526 }
527 }
528
529 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
530 let target = "rsa-encrypted.key";
531 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
532 client.upload_blob_from_data(rsa_encrypted_key, target, None, false).wait()?;
533
534 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
535 /*
536 let mut buffer2 = vec![0u8; rsa.size() as usize];
537 let pem_data = proxmox_backup::tools::file_get_contents("master-private.pem")?;
538 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
539 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
540 println!("TEST {} {:?}", len, buffer2);
541 */
542 }
543
544 client.finish().wait()?;
545
546 let end_time = Local.timestamp(Local::now().timestamp(), 0);
547 let elapsed = end_time.signed_duration_since(backup_time);
548 println!("Duration: {}", elapsed);
549
550 println!("End Time: {}", end_time.to_rfc3339());
551
552 Ok(Value::Null)
553 }
554
555 fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
556
557 let mut result = vec![];
558
559 let data: Vec<&str> = arg.splitn(2, ':').collect();
560
561 if data.len() != 2 {
562 result.push(String::from("root.pxar:/"));
563 result.push(String::from("etc.pxar:/etc"));
564 return result;
565 }
566
567 let files = tools::complete_file_name(data[1], param);
568
569 for file in files {
570 result.push(format!("{}:{}", data[0], file));
571 }
572
573 result
574 }
575
576 fn restore(
577 param: Value,
578 _info: &ApiMethod,
579 _rpcenv: &mut dyn RpcEnvironment,
580 ) -> Result<Value, Error> {
581
582 let repo_url = tools::required_string_param(&param, "repository")?;
583 let repo: BackupRepository = repo_url.parse()?;
584
585 let verbose = param["verbose"].as_bool().unwrap_or(false);
586
587 let archive_name = tools::required_string_param(&param, "archive-name")?;
588
589 let client = HttpClient::new(repo.host(), repo.user())?;
590
591 record_repository(&repo);
592
593 let path = tools::required_string_param(&param, "snapshot")?;
594
595 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
596 let group = BackupGroup::parse(path)?;
597
598 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
599 let result = client.get(&path, Some(json!({
600 "backup-type": group.backup_type(),
601 "backup-id": group.backup_id(),
602 }))).wait()?;
603
604 let list = result["data"].as_array().unwrap();
605 if list.len() == 0 {
606 bail!("backup group '{}' does not contain any snapshots:", path);
607 }
608
609 let epoch = list[0]["backup-time"].as_i64().unwrap();
610 let backup_time = Local.timestamp(epoch, 0);
611 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
612 } else {
613 let snapshot = BackupDir::parse(path)?;
614 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
615 };
616
617 let target = tools::required_string_param(&param, "target")?;
618
619 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
620
621 let crypt_config = match keyfile {
622 None => None,
623 Some(path) => {
624 let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
625 Some(Arc::new(CryptConfig::new(key)?))
626 }
627 };
628
629 let server_archive_name = if archive_name.ends_with(".pxar") {
630 format!("{}.didx", archive_name)
631 } else if archive_name.ends_with(".img") {
632 format!("{}.fidx", archive_name)
633 } else {
634 bail!("unknown archive file extension (expected .pxar of .img)");
635 };
636
637 let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
638 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config);
639
640 use std::os::unix::fs::OpenOptionsExt;
641
642 let tmpfile = std::fs::OpenOptions::new()
643 .write(true)
644 .read(true)
645 .custom_flags(libc::O_TMPFILE)
646 .open("/tmp")?;
647
648 if server_archive_name.ends_with(".didx") {
649 let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
650
651 let index = DynamicIndexReader::new(tmpfile)
652 .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
653
654 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
655
656 let feature_flags = pxar::CA_FORMAT_DEFAULT;
657 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
658 if verbose {
659 println!("{:?}", path);
660 }
661 Ok(())
662 });
663
664 decoder.restore(Path::new(target))?;
665
666 } else if server_archive_name.ends_with(".fidx") {
667 let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
668
669 let index = FixedIndexReader::new(tmpfile)
670 .map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
671
672 let mut reader = BufferedFixedReader::new(index, chunk_reader);
673
674 let mut writer = std::fs::OpenOptions::new()
675 .write(true)
676 .create(true)
677 .create_new(true)
678 .open(target)
679 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
680
681 std::io::copy(&mut reader, &mut writer)
682 .map_err(|err| format_err!("unable to store data - {}", err))?;
683 }
684
685 Ok(Value::Null)
686 }
687
688 fn download(
689 param: Value,
690 _info: &ApiMethod,
691 _rpcenv: &mut dyn RpcEnvironment,
692 ) -> Result<Value, Error> {
693
694 let repo_url = tools::required_string_param(&param, "repository")?;
695 let repo: BackupRepository = repo_url.parse()?;
696
697 let file_name = tools::required_string_param(&param, "file-name")?;
698
699 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
700
701 let crypt_config = match keyfile {
702 None => None,
703 Some(path) => {
704 let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
705 Some(CryptConfig::new(key)?)
706 }
707 };
708
709 let mut client = HttpClient::new(repo.host(), repo.user())?;
710
711 record_repository(&repo);
712
713 let path = tools::required_string_param(&param, "snapshot")?;
714
715 let query;
716
717 if path.matches('/').count() == 1 {
718 let group = BackupGroup::parse(path)?;
719
720 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
721 let result = client.get(&path, Some(json!({
722 "backup-type": group.backup_type(),
723 "backup-id": group.backup_id(),
724 }))).wait()?;
725
726 let list = result["data"].as_array().unwrap();
727 if list.len() == 0 {
728 bail!("backup group '{}' does not contain any snapshots:", path);
729 }
730
731 query = tools::json_object_to_query(json!({
732 "backup-type": group.backup_type(),
733 "backup-id": group.backup_id(),
734 "backup-time": list[0]["backup-time"].as_i64().unwrap(),
735 "file-name": file_name,
736 }))?;
737 } else {
738 let snapshot = BackupDir::parse(path)?;
739
740 query = tools::json_object_to_query(json!({
741 "backup-type": snapshot.group().backup_type(),
742 "backup-id": snapshot.group().backup_id(),
743 "backup-time": snapshot.backup_time().timestamp(),
744 "file-name": file_name,
745 }))?;
746 }
747
748 let target = tools::required_string_param(&param, "target")?;
749
750 let path = format!("api2/json/admin/datastore/{}/download?{}", repo.store(), query);
751
752 println!("DOWNLOAD FILE {} to {}", path, target);
753
754 if file_name.ends_with(".blob") {
755 let writer = Vec::with_capacity(1024*1024);
756 let blob_data = client.download(&path, writer).wait()?;
757 let blob = DataBlob::from_raw(blob_data)?;
758 blob.verify_crc()?;
759 let raw_data = blob.decode(crypt_config.as_ref())?; // fixme
760
761 crate::tools::file_set_contents(target, &raw_data, None)?;
762
763 } else {
764 unimplemented!();
765 }
766
767 Ok(Value::Null)
768 }
769
770 fn prune(
771 mut param: Value,
772 _info: &ApiMethod,
773 _rpcenv: &mut dyn RpcEnvironment,
774 ) -> Result<Value, Error> {
775
776 let repo_url = tools::required_string_param(&param, "repository")?;
777 let repo: BackupRepository = repo_url.parse()?;
778
779 let mut client = HttpClient::new(repo.host(), repo.user())?;
780
781 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
782
783 param.as_object_mut().unwrap().remove("repository");
784
785 let result = client.post(&path, Some(param)).wait()?;
786
787 record_repository(&repo);
788
789 Ok(result)
790 }
791
792 // like get, but simply ignore errors and return Null instead
793 fn try_get(repo: &BackupRepository, url: &str) -> Value {
794
795 let client = match HttpClient::new(repo.host(), repo.user()) {
796 Ok(v) => v,
797 _ => return Value::Null,
798 };
799
800 let mut resp = match client.get(url, None).wait() {
801 Ok(v) => v,
802 _ => return Value::Null,
803 };
804
805 if let Some(map) = resp.as_object_mut() {
806 if let Some(data) = map.remove("data") {
807 return data;
808 }
809 }
810 Value::Null
811 }
812
813 fn extract_repo(param: &HashMap<String, String>) -> Option<BackupRepository> {
814
815 let repo_url = match param.get("repository") {
816 Some(v) => v,
817 _ => return None,
818 };
819
820 let repo: BackupRepository = match repo_url.parse() {
821 Ok(v) => v,
822 _ => return None,
823 };
824
825 Some(repo)
826 }
827
828 fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
829
830 let mut result = vec![];
831
832 let repo = match extract_repo(param) {
833 Some(v) => v,
834 _ => return result,
835 };
836
837 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
838
839 let data = try_get(&repo, &path);
840
841 if let Some(list) = data.as_array() {
842 for item in list {
843 if let (Some(backup_id), Some(backup_type)) =
844 (item["backup-id"].as_str(), item["backup-type"].as_str())
845 {
846 result.push(format!("{}/{}", backup_type, backup_id));
847 }
848 }
849 }
850
851 result
852 }
853
854 fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
855
856 let mut result = vec![];
857
858 let repo = match extract_repo(param) {
859 Some(v) => v,
860 _ => return result,
861 };
862
863 if arg.matches('/').count() < 2 {
864 let groups = complete_backup_group(arg, param);
865 for group in groups {
866 result.push(group.to_string());
867 result.push(format!("{}/", group));
868 }
869 return result;
870 }
871
872 let mut parts = arg.split('/');
873 let query = tools::json_object_to_query(json!({
874 "backup-type": parts.next().unwrap(),
875 "backup-id": parts.next().unwrap(),
876 })).unwrap();
877
878 let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
879
880 let data = try_get(&repo, &path);
881
882 if let Some(list) = data.as_array() {
883 for item in list {
884 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
885 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
886 {
887 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
888 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
889 }
890 }
891 }
892
893 result
894 }
895
896 fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
897
898 let mut result = vec![];
899
900 let repo = match extract_repo(param) {
901 Some(v) => v,
902 _ => return result,
903 };
904
905 let snapshot = match param.get("snapshot") {
906 Some(path) => {
907 match BackupDir::parse(path) {
908 Ok(v) => v,
909 _ => return result,
910 }
911 }
912 _ => return result,
913 };
914
915 let query = tools::json_object_to_query(json!({
916 "backup-type": snapshot.group().backup_type(),
917 "backup-id": snapshot.group().backup_id(),
918 "backup-time": snapshot.backup_time().timestamp(),
919 })).unwrap();
920
921 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
922
923 let data = try_get(&repo, &path);
924
925 if let Some(list) = data.as_array() {
926 for item in list {
927 if let Some(filename) = item.as_str() {
928 result.push(filename.to_owned());
929 }
930 }
931 }
932
933 result
934 }
935
936 fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
937
938 let result = complete_server_file_name(arg, param);
939
940 strip_server_file_expenstions(result)
941 }
942
943 fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
944
945 let mut result = vec![];
946
947 let mut size = 64;
948 loop {
949 result.push(size.to_string());
950 size = size * 2;
951 if size > 4096 { break; }
952 }
953
954 result
955 }
956
957 fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
958
959 // fixme: implement other input methods
960
961 use std::env::VarError::*;
962 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
963 Ok(p) => return Ok(p.as_bytes().to_vec()),
964 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
965 Err(NotPresent) => {
966 // Try another method
967 }
968 }
969
970 // If we're on a TTY, query the user for a password
971 if crate::tools::tty::stdin_isatty() {
972 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
973 }
974
975 bail!("no password input mechanism available");
976 }
977
978 fn key_create(
979 param: Value,
980 _info: &ApiMethod,
981 _rpcenv: &mut dyn RpcEnvironment,
982 ) -> Result<Value, Error> {
983
984 let path = tools::required_string_param(&param, "path")?;
985 let path = PathBuf::from(path);
986
987 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
988
989 let key = proxmox::sys::linux::random_data(32)?;
990
991 if kdf == "scrypt" {
992 // always read passphrase from tty
993 if !crate::tools::tty::stdin_isatty() {
994 bail!("unable to read passphrase - no tty");
995 }
996
997 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
998
999 let key_config = encrypt_key_with_passphrase(&key, &password)?;
1000
1001 store_key_config(&path, false, key_config)?;
1002
1003 Ok(Value::Null)
1004 } else if kdf == "none" {
1005 let created = Local.timestamp(Local::now().timestamp(), 0);
1006
1007 store_key_config(&path, false, KeyConfig {
1008 kdf: None,
1009 created,
1010 modified: created,
1011 data: key,
1012 })?;
1013
1014 Ok(Value::Null)
1015 } else {
1016 unreachable!();
1017 }
1018 }
1019
1020 fn master_pubkey_path() -> Result<PathBuf, Error> {
1021 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1022
1023 // usually $HOME/.config/proxmox-backup/master-public.pem
1024 let path = base.place_config_file("master-public.pem")?;
1025
1026 Ok(path)
1027 }
1028
1029 fn key_import_master_pubkey(
1030 param: Value,
1031 _info: &ApiMethod,
1032 _rpcenv: &mut dyn RpcEnvironment,
1033 ) -> Result<Value, Error> {
1034
1035 let path = tools::required_string_param(&param, "path")?;
1036 let path = PathBuf::from(path);
1037
1038 let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
1039
1040 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1041 bail!("Unable to decode PEM data - {}", err);
1042 }
1043
1044 let target_path = master_pubkey_path()?;
1045
1046 proxmox_backup::tools::file_set_contents(&target_path, &pem_data, None)?;
1047
1048 println!("Imported public master key to {:?}", target_path);
1049
1050 Ok(Value::Null)
1051 }
1052
1053 fn key_create_master_key(
1054 _param: Value,
1055 _info: &ApiMethod,
1056 _rpcenv: &mut dyn RpcEnvironment,
1057 ) -> Result<Value, Error> {
1058
1059 // we need a TTY to query the new password
1060 if !crate::tools::tty::stdin_isatty() {
1061 bail!("unable to create master key - no tty");
1062 }
1063
1064 let rsa = openssl::rsa::Rsa::generate(4096)?;
1065 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1066
1067 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1068 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1069
1070 if new_pw != verify_pw {
1071 bail!("Password verification fail!");
1072 }
1073
1074 if new_pw.len() < 5 {
1075 bail!("Password is too short!");
1076 }
1077
1078 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1079 let filename_pub = "master-public.pem";
1080 println!("Writing public master key to {}", filename_pub);
1081 proxmox_backup::tools::file_set_contents(filename_pub, pub_key.as_slice(), None)?;
1082
1083 let cipher = openssl::symm::Cipher::aes_256_cbc();
1084 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1085
1086 let filename_priv = "master-private.pem";
1087 println!("Writing private master key to {}", filename_priv);
1088 proxmox_backup::tools::file_set_contents(filename_priv, priv_key.as_slice(), None)?;
1089
1090 Ok(Value::Null)
1091 }
1092
1093 fn key_change_passphrase(
1094 param: Value,
1095 _info: &ApiMethod,
1096 _rpcenv: &mut dyn RpcEnvironment,
1097 ) -> Result<Value, Error> {
1098
1099 let path = tools::required_string_param(&param, "path")?;
1100 let path = PathBuf::from(path);
1101
1102 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1103
1104 // we need a TTY to query the new password
1105 if !crate::tools::tty::stdin_isatty() {
1106 bail!("unable to change passphrase - no tty");
1107 }
1108
1109 let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
1110
1111 if kdf == "scrypt" {
1112
1113 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1114 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1115
1116 if new_pw != verify_pw {
1117 bail!("Password verification fail!");
1118 }
1119
1120 if new_pw.len() < 5 {
1121 bail!("Password is too short!");
1122 }
1123
1124 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1125 new_key_config.created = created; // keep original value
1126
1127 store_key_config(&path, true, new_key_config)?;
1128
1129 Ok(Value::Null)
1130 } else if kdf == "none" {
1131 let modified = Local.timestamp(Local::now().timestamp(), 0);
1132
1133 store_key_config(&path, true, KeyConfig {
1134 kdf: None,
1135 created, // keep original value
1136 modified,
1137 data: key.to_vec(),
1138 })?;
1139
1140 Ok(Value::Null)
1141 } else {
1142 unreachable!();
1143 }
1144 }
1145
1146 fn key_mgmt_cli() -> CliCommandMap {
1147
1148 let kdf_schema: Arc<Schema> = Arc::new(
1149 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1150 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
1151 .default("scrypt")
1152 .into()
1153 );
1154
1155 let key_create_cmd_def = CliCommand::new(
1156 ApiMethod::new(
1157 key_create,
1158 ObjectSchema::new("Create a new encryption key.")
1159 .required("path", StringSchema::new("File system path."))
1160 .optional("kdf", kdf_schema.clone())
1161 ))
1162 .arg_param(vec!["path"])
1163 .completion_cb("path", tools::complete_file_name);
1164
1165 let key_change_passphrase_cmd_def = CliCommand::new(
1166 ApiMethod::new(
1167 key_change_passphrase,
1168 ObjectSchema::new("Change the passphrase required to decrypt the key.")
1169 .required("path", StringSchema::new("File system path."))
1170 .optional("kdf", kdf_schema.clone())
1171 ))
1172 .arg_param(vec!["path"])
1173 .completion_cb("path", tools::complete_file_name);
1174
1175 let key_create_master_key_cmd_def = CliCommand::new(
1176 ApiMethod::new(
1177 key_create_master_key,
1178 ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
1179 ));
1180
1181 let key_import_master_pubkey_cmd_def = CliCommand::new(
1182 ApiMethod::new(
1183 key_import_master_pubkey,
1184 ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
1185 .required("path", StringSchema::new("File system path."))
1186 ))
1187 .arg_param(vec!["path"])
1188 .completion_cb("path", tools::complete_file_name);
1189
1190 let cmd_def = CliCommandMap::new()
1191 .insert("create".to_owned(), key_create_cmd_def.into())
1192 .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
1193 .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
1194 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
1195
1196 cmd_def
1197 }
1198
1199 fn main() {
1200
1201 let backup_source_schema: Arc<Schema> = Arc::new(
1202 StringSchema::new("Backup source specification ([<label>:<path>]).")
1203 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1204 .into()
1205 );
1206
1207 let backup_cmd_def = CliCommand::new(
1208 ApiMethod::new(
1209 create_backup,
1210 ObjectSchema::new("Create (host) backup.")
1211 .required("repository", REPO_URL_SCHEMA.clone())
1212 .required(
1213 "backupspec",
1214 ArraySchema::new(
1215 "List of backup source specifications ([<label.ext>:<path>] ...)",
1216 backup_source_schema,
1217 ).min_length(1)
1218 )
1219 .optional(
1220 "keyfile",
1221 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1222 .optional(
1223 "verbose",
1224 BooleanSchema::new("Verbose output.").default(false))
1225 .optional(
1226 "host-id",
1227 StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
1228 .optional(
1229 "chunk-size",
1230 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
1231 .minimum(64)
1232 .maximum(4096)
1233 .default(4096)
1234 )
1235 ))
1236 .arg_param(vec!["repository", "backupspec"])
1237 .completion_cb("repository", complete_repository)
1238 .completion_cb("backupspec", complete_backup_source)
1239 .completion_cb("keyfile", tools::complete_file_name)
1240 .completion_cb("chunk-size", complete_chunk_size);
1241
1242 let list_cmd_def = CliCommand::new(
1243 ApiMethod::new(
1244 list_backup_groups,
1245 ObjectSchema::new("List backup groups.")
1246 .required("repository", REPO_URL_SCHEMA.clone())
1247 ))
1248 .arg_param(vec!["repository"])
1249 .completion_cb("repository", complete_repository);
1250
1251 let snapshots_cmd_def = CliCommand::new(
1252 ApiMethod::new(
1253 list_snapshots,
1254 ObjectSchema::new("List backup snapshots.")
1255 .required("repository", REPO_URL_SCHEMA.clone())
1256 .required("group", StringSchema::new("Backup group."))
1257 ))
1258 .arg_param(vec!["repository", "group"])
1259 .completion_cb("group", complete_backup_group)
1260 .completion_cb("repository", complete_repository);
1261
1262 let forget_cmd_def = CliCommand::new(
1263 ApiMethod::new(
1264 forget_snapshots,
1265 ObjectSchema::new("Forget (remove) backup snapshots.")
1266 .required("repository", REPO_URL_SCHEMA.clone())
1267 .required("snapshot", StringSchema::new("Snapshot path."))
1268 ))
1269 .arg_param(vec!["repository", "snapshot"])
1270 .completion_cb("repository", complete_repository)
1271 .completion_cb("snapshot", complete_group_or_snapshot);
1272
1273 let garbage_collect_cmd_def = CliCommand::new(
1274 ApiMethod::new(
1275 start_garbage_collection,
1276 ObjectSchema::new("Start garbage collection for a specific repository.")
1277 .required("repository", REPO_URL_SCHEMA.clone())
1278 ))
1279 .arg_param(vec!["repository"])
1280 .completion_cb("repository", complete_repository);
1281
1282 let download_cmd_def = CliCommand::new(
1283 ApiMethod::new(
1284 download,
1285 ObjectSchema::new("Download data from backup repository.")
1286 .required("repository", REPO_URL_SCHEMA.clone())
1287 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1288 .required("file-name", StringSchema::new("File name."))
1289 .required("target", StringSchema::new("Target directory path."))
1290 .optional("keyfile", StringSchema::new("Path to encryption key."))
1291 ))
1292 .arg_param(vec!["repository", "snapshot", "file-name", "target"])
1293 .completion_cb("repository", complete_repository)
1294 .completion_cb("snapshot", complete_group_or_snapshot)
1295 .completion_cb("file-name", complete_server_file_name)
1296 .completion_cb("keyfile", tools::complete_file_name)
1297 .completion_cb("target", tools::complete_file_name);
1298
1299 let restore_cmd_def = CliCommand::new(
1300 ApiMethod::new(
1301 restore,
1302 ObjectSchema::new("Restore backup repository.")
1303 .required("repository", REPO_URL_SCHEMA.clone())
1304 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1305 .required("archive-name", StringSchema::new("Backup archive name."))
1306 .required("target", StringSchema::new("Target directory path."))
1307 .optional("keyfile", StringSchema::new("Path to encryption key."))
1308 .optional(
1309 "verbose",
1310 BooleanSchema::new("Verbose output.").default(false)
1311 )
1312 ))
1313 .arg_param(vec!["repository", "snapshot", "archive-name", "target"])
1314 .completion_cb("repository", complete_repository)
1315 .completion_cb("snapshot", complete_group_or_snapshot)
1316 .completion_cb("archive-name", complete_archive_name)
1317 .completion_cb("target", tools::complete_file_name);
1318
1319 let prune_cmd_def = CliCommand::new(
1320 ApiMethod::new(
1321 prune,
1322 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1323 ObjectSchema::new("Prune backup repository.")
1324 .required("repository", REPO_URL_SCHEMA.clone())
1325 )
1326 ))
1327 .arg_param(vec!["repository"])
1328 .completion_cb("repository", complete_repository);
1329
1330 let cmd_def = CliCommandMap::new()
1331 .insert("backup".to_owned(), backup_cmd_def.into())
1332 .insert("forget".to_owned(), forget_cmd_def.into())
1333 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
1334 .insert("list".to_owned(), list_cmd_def.into())
1335 .insert("prune".to_owned(), prune_cmd_def.into())
1336 .insert("download".to_owned(), download_cmd_def.into())
1337 .insert("restore".to_owned(), restore_cmd_def.into())
1338 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
1339 .insert("key".to_owned(), key_mgmt_cli().into());
1340
1341 hyper::rt::run(futures::future::lazy(move || {
1342 run_cli_command(cmd_def.into());
1343 Ok(())
1344 }));
1345
1346 }