]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-client.rs
add pxar.1 manual page
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
CommitLineData
826f309b 1//#[macro_use]
fe0e04c6 2extern crate proxmox_backup;
ff5d3707 3
4use failure::*;
70235f72
CE
5use nix::unistd::{fork, ForkResult, pipe};
6use std::os::unix::io::RawFd;
fa5d6977 7use chrono::{Local, Utc, TimeZone};
e9c9409a 8use std::path::{Path, PathBuf};
2eeaacb9 9use std::collections::{HashSet, HashMap};
70235f72 10use std::ffi::OsStr;
bb19af73 11use std::io::{Write, Seek, SeekFrom};
2761d6a4
DM
12use std::os::unix::fs::OpenOptionsExt;
13
e18a6c9e 14use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
ff5d3707 15
fe0e04c6 16use proxmox_backup::tools;
4de0e142 17use proxmox_backup::cli::*;
bbf9e7e9 18use proxmox_backup::api2::types::*;
ef2f2efb 19use proxmox_backup::api_schema::*;
dc9a007b 20use proxmox_backup::api_schema::router::*;
151c6ce2 21use proxmox_backup::client::*;
247cdbce 22use proxmox_backup::backup::*;
7926a3a1 23use proxmox_backup::pxar::{ self, catalog::* };
86eda3eb 24
fe0e04c6
DM
25//use proxmox_backup::backup::image_index::*;
26//use proxmox_backup::config::datastore;
8968258b 27//use proxmox_backup::pxar::encoder::*;
728797d0 28//use proxmox_backup::backup::datastore::*;
23bb8780 29
f5f13ebc 30use serde_json::{json, Value};
1c0472e8 31//use hyper::Body;
2761d6a4 32use std::sync::{Arc, Mutex};
ae0be2dd 33use regex::Regex;
d0a03d40 34use xdg::BaseDirectories;
ae0be2dd
DM
35
36use lazy_static::lazy_static;
5a2df000 37use futures::*;
c4ff3dce 38use tokio::sync::mpsc;
ae0be2dd
DM
39
40lazy_static! {
79679c2d 41 static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$").unwrap();
f2401311
DM
42
43 static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
44 StringSchema::new("Repository URL.")
45 .format(BACKUP_REPO_URL.clone())
46 .max_length(256)
47 .into()
48 );
ae0be2dd 49}
33d64b81 50
d0a03d40 51
2665cef7
DM
52fn get_default_repository() -> Option<String> {
53 std::env::var("PBS_REPOSITORY").ok()
54}
55
56fn extract_repository_from_value(
57 param: &Value,
58) -> Result<BackupRepository, Error> {
59
60 let repo_url = param["repository"]
61 .as_str()
62 .map(String::from)
63 .or_else(get_default_repository)
64 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
65
66 let repo: BackupRepository = repo_url.parse()?;
67
68 Ok(repo)
69}
70
71fn extract_repository_from_map(
72 param: &HashMap<String, String>,
73) -> Option<BackupRepository> {
74
75 param.get("repository")
76 .map(String::from)
77 .or_else(get_default_repository)
78 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
79}
80
d0a03d40
DM
81fn record_repository(repo: &BackupRepository) {
82
83 let base = match BaseDirectories::with_prefix("proxmox-backup") {
84 Ok(v) => v,
85 _ => return,
86 };
87
88 // usually $HOME/.cache/proxmox-backup/repo-list
89 let path = match base.place_cache_file("repo-list") {
90 Ok(v) => v,
91 _ => return,
92 };
93
11377a47 94 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
95
96 let repo = repo.to_string();
97
98 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
99
100 let mut map = serde_json::map::Map::new();
101
102 loop {
103 let mut max_used = 0;
104 let mut max_repo = None;
105 for (repo, count) in data.as_object().unwrap() {
106 if map.contains_key(repo) { continue; }
107 if let Some(count) = count.as_i64() {
108 if count > max_used {
109 max_used = count;
110 max_repo = Some(repo);
111 }
112 }
113 }
114 if let Some(repo) = max_repo {
115 map.insert(repo.to_owned(), json!(max_used));
116 } else {
117 break;
118 }
119 if map.len() > 10 { // store max. 10 repos
120 break;
121 }
122 }
123
124 let new_data = json!(map);
125
e18a6c9e 126 let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
d0a03d40
DM
127}
128
49811347 129fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
d0a03d40
DM
130
131 let mut result = vec![];
132
133 let base = match BaseDirectories::with_prefix("proxmox-backup") {
134 Ok(v) => v,
135 _ => return result,
136 };
137
138 // usually $HOME/.cache/proxmox-backup/repo-list
139 let path = match base.place_cache_file("repo-list") {
140 Ok(v) => v,
141 _ => return result,
142 };
143
11377a47 144 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
145
146 if let Some(map) = data.as_object() {
49811347 147 for (repo, _count) in map {
d0a03d40
DM
148 result.push(repo.to_owned());
149 }
150 }
151
152 result
153}
154
e9722f8b 155async fn backup_directory<P: AsRef<Path>>(
cf9271e2 156 client: &BackupWriter,
17d6979a 157 dir_path: P,
247cdbce 158 archive_name: &str,
36898ffc 159 chunk_size: Option<usize>,
2eeaacb9 160 device_set: Option<HashSet<u64>>,
219ef0e6 161 verbose: bool,
5b72c9b4 162 skip_lost_and_found: bool,
f98ac774 163 crypt_config: Option<Arc<CryptConfig>>,
bf6e3217 164 catalog: Arc<Mutex<CatalogWriter<SenderWriter>>>,
2c3891d1 165) -> Result<BackupStats, Error> {
33d64b81 166
2761d6a4 167 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
e9722f8b 168 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
ff3d3100 169
e9722f8b 170 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
5e7a09be 171
c4ff3dce 172 let stream = rx
e9722f8b 173 .map_err(Error::from);
17d6979a 174
c4ff3dce 175 // spawn chunker inside a separate task so that it can run parallel
e9722f8b
WB
176 tokio::spawn(async move {
177 let _ = tx.send_all(&mut chunk_stream).await;
178 });
17d6979a 179
e9722f8b
WB
180 let stats = client
181 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
182 .await?;
bcd879cf 183
2c3891d1 184 Ok(stats)
bcd879cf
DM
185}
186
e9722f8b 187async fn backup_image<P: AsRef<Path>>(
cf9271e2 188 client: &BackupWriter,
6af905c1
DM
189 image_path: P,
190 archive_name: &str,
191 image_size: u64,
36898ffc 192 chunk_size: Option<usize>,
1c0472e8 193 _verbose: bool,
f98ac774 194 crypt_config: Option<Arc<CryptConfig>>,
2c3891d1 195) -> Result<BackupStats, Error> {
6af905c1 196
6af905c1
DM
197 let path = image_path.as_ref().to_owned();
198
e9722f8b 199 let file = tokio::fs::File::open(path).await?;
6af905c1
DM
200
201 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
202 .map_err(Error::from);
203
36898ffc 204 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
6af905c1 205
e9722f8b
WB
206 let stats = client
207 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
208 .await?;
6af905c1 209
2c3891d1 210 Ok(stats)
6af905c1
DM
211}
212
52c171e4
DM
213fn strip_server_file_expenstion(name: &str) -> String {
214
11377a47
DM
215 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
216 name[..name.len()-5].to_owned()
52c171e4 217 } else {
11377a47 218 name.to_owned() // should not happen
8e39232a 219 }
8e39232a
DM
220}
221
812c6f87
DM
222fn list_backup_groups(
223 param: Value,
224 _info: &ApiMethod,
dd5495d6 225 _rpcenv: &mut dyn RpcEnvironment,
812c6f87
DM
226) -> Result<Value, Error> {
227
2665cef7 228 let repo = extract_repository_from_value(&param)?;
812c6f87 229
cc2ce4a9 230 let client = HttpClient::new(repo.host(), repo.user(), None)?;
812c6f87 231
d0a03d40 232 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
812c6f87 233
e9722f8b
WB
234 let mut result = async_main(async move {
235 client.get(&path, None).await
236 })?;
812c6f87 237
d0a03d40
DM
238 record_repository(&repo);
239
812c6f87 240 // fixme: implement and use output formatter instead ..
80822b95
DM
241 let list = result["data"].as_array_mut().unwrap();
242
243 list.sort_unstable_by(|a, b| {
244 let a_id = a["backup-id"].as_str().unwrap();
245 let a_backup_type = a["backup-type"].as_str().unwrap();
246 let b_id = b["backup-id"].as_str().unwrap();
247 let b_backup_type = b["backup-type"].as_str().unwrap();
248
249 let type_order = a_backup_type.cmp(b_backup_type);
250 if type_order == std::cmp::Ordering::Equal {
251 a_id.cmp(b_id)
252 } else {
253 type_order
254 }
255 });
812c6f87 256
34a816cc
DM
257 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
258
259 let mut result = vec![];
260
812c6f87
DM
261 for item in list {
262
ad20d198
DM
263 let id = item["backup-id"].as_str().unwrap();
264 let btype = item["backup-type"].as_str().unwrap();
265 let epoch = item["last-backup"].as_i64().unwrap();
fa5d6977 266 let last_backup = Utc.timestamp(epoch, 0);
ad20d198 267 let backup_count = item["backup-count"].as_u64().unwrap();
812c6f87 268
1e9a94e5 269 let group = BackupGroup::new(btype, id);
812c6f87
DM
270
271 let path = group.group_path().to_str().unwrap().to_owned();
ad20d198 272
52c171e4
DM
273 let files = item["files"].as_array().unwrap().iter()
274 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
ad20d198 275
34a816cc 276 if output_format == "text" {
fa5d6977
DM
277 println!(
278 "{:20} | {} | {:5} | {}",
279 path,
280 BackupDir::backup_time_to_string(last_backup),
281 backup_count,
282 tools::join(&files, ' '),
283 );
34a816cc
DM
284 } else {
285 result.push(json!({
286 "backup-type": btype,
287 "backup-id": id,
288 "last-backup": epoch,
289 "backup-count": backup_count,
290 "files": files,
291 }));
292 }
812c6f87
DM
293 }
294
9aa3f682 295 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
34a816cc 296
812c6f87
DM
297 Ok(Value::Null)
298}
299
184f17af
DM
300fn list_snapshots(
301 param: Value,
302 _info: &ApiMethod,
dd5495d6 303 _rpcenv: &mut dyn RpcEnvironment,
184f17af
DM
304) -> Result<Value, Error> {
305
2665cef7 306 let repo = extract_repository_from_value(&param)?;
184f17af 307
34a816cc
DM
308 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
309
cc2ce4a9 310 let client = HttpClient::new(repo.host(), repo.user(), None)?;
184f17af 311
9e391bb7 312 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
184f17af 313
15c847f1
DM
314 let mut args = json!({});
315 if let Some(path) = param["group"].as_str() {
316 let group = BackupGroup::parse(path)?;
317 args["backup-type"] = group.backup_type().into();
318 args["backup-id"] = group.backup_id().into();
319 }
320
e9722f8b
WB
321 let result = async_main(async move {
322 client.get(&path, Some(args)).await
323 })?;
184f17af 324
d0a03d40
DM
325 record_repository(&repo);
326
184f17af
DM
327 let list = result["data"].as_array().unwrap();
328
34a816cc
DM
329 let mut result = vec![];
330
184f17af
DM
331 for item in list {
332
333 let id = item["backup-id"].as_str().unwrap();
334 let btype = item["backup-type"].as_str().unwrap();
335 let epoch = item["backup-time"].as_i64().unwrap();
184f17af 336
391d3107 337 let snapshot = BackupDir::new(btype, id, epoch);
184f17af
DM
338
339 let path = snapshot.relative_path().to_str().unwrap().to_owned();
340
52c171e4
DM
341 let files = item["files"].as_array().unwrap().iter()
342 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
184f17af 343
34a816cc 344 if output_format == "text" {
a17a0e7a
DM
345 let size_str = if let Some(size) = item["size"].as_u64() {
346 size.to_string()
347 } else {
348 String::from("-")
349 };
350 println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
34a816cc 351 } else {
a17a0e7a 352 let mut data = json!({
34a816cc
DM
353 "backup-type": btype,
354 "backup-id": id,
355 "backup-time": epoch,
356 "files": files,
a17a0e7a
DM
357 });
358 if let Some(size) = item["size"].as_u64() {
359 data["size"] = size.into();
360 }
361 result.push(data);
34a816cc 362 }
184f17af
DM
363 }
364
f6ede796 365 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
34a816cc 366
184f17af
DM
367 Ok(Value::Null)
368}
369
6f62c924
DM
370fn forget_snapshots(
371 param: Value,
372 _info: &ApiMethod,
dd5495d6 373 _rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
374) -> Result<Value, Error> {
375
2665cef7 376 let repo = extract_repository_from_value(&param)?;
6f62c924
DM
377
378 let path = tools::required_string_param(&param, "snapshot")?;
379 let snapshot = BackupDir::parse(path)?;
380
cc2ce4a9 381 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
6f62c924 382
9e391bb7 383 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
6f62c924 384
e9722f8b
WB
385 let result = async_main(async move {
386 client.delete(&path, Some(json!({
387 "backup-type": snapshot.group().backup_type(),
388 "backup-id": snapshot.group().backup_id(),
389 "backup-time": snapshot.backup_time().timestamp(),
390 }))).await
391 })?;
6f62c924 392
d0a03d40
DM
393 record_repository(&repo);
394
6f62c924
DM
395 Ok(result)
396}
397
e240d8be
DM
398fn api_login(
399 param: Value,
400 _info: &ApiMethod,
401 _rpcenv: &mut dyn RpcEnvironment,
402) -> Result<Value, Error> {
403
404 let repo = extract_repository_from_value(&param)?;
405
cc2ce4a9 406 let client = HttpClient::new(repo.host(), repo.user(), None)?;
e9722f8b 407 async_main(async move { client.login().await })?;
e240d8be
DM
408
409 record_repository(&repo);
410
411 Ok(Value::Null)
412}
413
414fn api_logout(
415 param: Value,
416 _info: &ApiMethod,
417 _rpcenv: &mut dyn RpcEnvironment,
418) -> Result<Value, Error> {
419
420 let repo = extract_repository_from_value(&param)?;
421
422 delete_ticket_info(repo.host(), repo.user())?;
423
424 Ok(Value::Null)
425}
426
9049a8cf
DM
427fn dump_catalog(
428 param: Value,
429 _info: &ApiMethod,
430 _rpcenv: &mut dyn RpcEnvironment,
431) -> Result<Value, Error> {
432
433 let repo = extract_repository_from_value(&param)?;
434
435 let path = tools::required_string_param(&param, "snapshot")?;
436 let snapshot = BackupDir::parse(path)?;
437
11377a47 438 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
9049a8cf
DM
439
440 let crypt_config = match keyfile {
441 None => None,
442 Some(path) => {
a8f10f84 443 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
9025312a 444 Some(Arc::new(CryptConfig::new(key)?))
9049a8cf
DM
445 }
446 };
447
cc2ce4a9 448 let client = HttpClient::new(repo.host(), repo.user(), None)?;
9049a8cf 449
e9722f8b 450 async_main(async move {
9e490a74
DM
451 let client = BackupReader::start(
452 client,
296c50ba 453 crypt_config.clone(),
e9722f8b
WB
454 repo.store(),
455 &snapshot.group().backup_type(),
456 &snapshot.group().backup_id(),
9e490a74
DM
457 snapshot.backup_time(),
458 true,
459 ).await?;
9049a8cf 460
f06b820a 461 let manifest = client.download_manifest().await?;
d2267b11 462
c3d84a22 463 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
bf6e3217
DM
464
465 let most_used = index.find_most_used_chunks(8);
466
467 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
468
469 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
470
471 let mut catalogfile = std::fs::OpenOptions::new()
e9722f8b 472 .write(true)
bf6e3217 473 .read(true)
e9722f8b
WB
474 .custom_flags(libc::O_TMPFILE)
475 .open("/tmp")?;
9049a8cf 476
bf6e3217
DM
477 std::io::copy(&mut reader, &mut catalogfile)
478 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
d2267b11 479
bf6e3217 480 catalogfile.seek(SeekFrom::Start(0))?;
a84ef4c2 481
bf6e3217 482 let mut catalog_reader = CatalogReader::new(catalogfile);
9049a8cf 483
e9722f8b 484 catalog_reader.dump()?;
9049a8cf 485
e9722f8b
WB
486 record_repository(&repo);
487
488 Ok::<(), Error>(())
489 })?;
9049a8cf
DM
490
491 Ok(Value::Null)
492}
493
52c171e4
DM
494fn list_snapshot_files(
495 param: Value,
496 _info: &ApiMethod,
497 _rpcenv: &mut dyn RpcEnvironment,
498) -> Result<Value, Error> {
499
500 let repo = extract_repository_from_value(&param)?;
501
502 let path = tools::required_string_param(&param, "snapshot")?;
503 let snapshot = BackupDir::parse(path)?;
504
505 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
506
cc2ce4a9 507 let client = HttpClient::new(repo.host(), repo.user(), None)?;
52c171e4
DM
508
509 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
510
e9722f8b
WB
511 let mut result = async_main(async move {
512 client.get(&path, Some(json!({
513 "backup-type": snapshot.group().backup_type(),
514 "backup-id": snapshot.group().backup_id(),
515 "backup-time": snapshot.backup_time().timestamp(),
516 }))).await
517 })?;
52c171e4
DM
518
519 record_repository(&repo);
520
8c70e3eb 521 let list: Value = result["data"].take();
52c171e4
DM
522
523 if output_format == "text" {
8c70e3eb
DM
524 for item in list.as_array().unwrap().iter() {
525 println!(
526 "{} {}",
527 strip_server_file_expenstion(item["filename"].as_str().unwrap()),
528 item["size"].as_u64().unwrap_or(0),
529 );
52c171e4
DM
530 }
531 } else {
8c70e3eb 532 format_and_print_result(&list, &output_format);
52c171e4
DM
533 }
534
535 Ok(Value::Null)
536}
537
8cc0d6af
DM
538fn start_garbage_collection(
539 param: Value,
540 _info: &ApiMethod,
dd5495d6 541 _rpcenv: &mut dyn RpcEnvironment,
8cc0d6af
DM
542) -> Result<Value, Error> {
543
2665cef7 544 let repo = extract_repository_from_value(&param)?;
8cc0d6af 545
cc2ce4a9 546 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
8cc0d6af 547
d0a03d40 548 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
8cc0d6af 549
e9722f8b 550 let result = async_main(async move { client.post(&path, None).await })?;
8cc0d6af 551
d0a03d40
DM
552 record_repository(&repo);
553
8cc0d6af
DM
554 Ok(result)
555}
33d64b81 556
ae0be2dd
DM
557fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
558
559 if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
560 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
561 }
562 bail!("unable to parse directory specification '{}'", value);
563}
564
bf6e3217
DM
565fn spawn_catalog_upload(
566 client: Arc<BackupWriter>,
567 crypt_config: Option<Arc<CryptConfig>>,
568) -> Result<
569 (
570 Arc<Mutex<CatalogWriter<SenderWriter>>>,
571 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
572 ), Error>
573{
574 let (catalog_tx, catalog_rx) = mpsc::channel(10); // allow to buffer 10 writes
575 let catalog_stream = catalog_rx.map_err(Error::from);
576 let catalog_chunk_size = 512*1024;
577 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
578
579 let catalog = Arc::new(Mutex::new(CatalogWriter::new(SenderWriter::new(catalog_tx))?));
580
581 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
582
583 tokio::spawn(async move {
584 let catalog_upload_result = client
585 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
586 .await;
587
588 if let Err(ref err) = catalog_upload_result {
589 eprintln!("catalog upload error - {}", err);
590 client.cancel();
591 }
592
593 let _ = catalog_result_tx.send(catalog_upload_result);
594 });
595
596 Ok((catalog, catalog_result_rx))
597}
598
6049b71f
DM
599fn create_backup(
600 param: Value,
601 _info: &ApiMethod,
dd5495d6 602 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 603) -> Result<Value, Error> {
ff5d3707 604
2665cef7 605 let repo = extract_repository_from_value(&param)?;
ae0be2dd
DM
606
607 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
a914a774 608
eed6db39
DM
609 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
610
5b72c9b4
DM
611 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
612
219ef0e6
DM
613 let verbose = param["verbose"].as_bool().unwrap_or(false);
614
ca5d0b61
DM
615 let backup_time_opt = param["backup-time"].as_i64();
616
36898ffc 617 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
2d9d143a 618
247cdbce
DM
619 if let Some(size) = chunk_size_opt {
620 verify_chunk_size(size)?;
2d9d143a
DM
621 }
622
11377a47 623 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
6d0983db 624
f69adc81 625 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
fba30411 626
bbf9e7e9 627 let backup_type = param["backup-type"].as_str().unwrap_or("host");
ca5d0b61 628
2eeaacb9
DM
629 let include_dev = param["include-dev"].as_array();
630
631 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
632
633 if let Some(include_dev) = include_dev {
634 if all_file_systems {
635 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
636 }
637
638 let mut set = HashSet::new();
639 for path in include_dev {
640 let path = path.as_str().unwrap();
641 let stat = nix::sys::stat::stat(path)
642 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
643 set.insert(stat.st_dev);
644 }
645 devices = Some(set);
646 }
647
ae0be2dd 648 let mut upload_list = vec![];
a914a774 649
79679c2d 650 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
6af905c1 651
bf6e3217
DM
652 let mut upload_catalog = false;
653
ae0be2dd
DM
654 for backupspec in backupspec_list {
655 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
bcd879cf 656
eb1804c5
DM
657 use std::os::unix::fs::FileTypeExt;
658
3fa71727
CE
659 let metadata = std::fs::metadata(filename)
660 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
eb1804c5 661 let file_type = metadata.file_type();
23bb8780 662
4af0ee05 663 let extension = target.rsplit('.').next()
11377a47 664 .ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
bcd879cf 665
ec8a9bb9
DM
666 match extension {
667 "pxar" => {
668 if !file_type.is_dir() {
669 bail!("got unexpected file type (expected directory)");
670 }
4af0ee05 671 upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
bf6e3217 672 upload_catalog = true;
ec8a9bb9
DM
673 }
674 "img" => {
eb1804c5 675
ec8a9bb9
DM
676 if !(file_type.is_file() || file_type.is_block_device()) {
677 bail!("got unexpected file type (expected file or block device)");
678 }
eb1804c5 679
e18a6c9e 680 let size = image_size(&PathBuf::from(filename))?;
23bb8780 681
ec8a9bb9 682 if size == 0 { bail!("got zero-sized file '{}'", filename); }
ae0be2dd 683
4af0ee05 684 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
ec8a9bb9
DM
685 }
686 "conf" => {
687 if !file_type.is_file() {
688 bail!("got unexpected file type (expected regular file)");
689 }
4af0ee05 690 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 691 }
79679c2d
DM
692 "log" => {
693 if !file_type.is_file() {
694 bail!("got unexpected file type (expected regular file)");
695 }
4af0ee05 696 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
79679c2d 697 }
ec8a9bb9
DM
698 _ => {
699 bail!("got unknown archive extension '{}'", extension);
700 }
ae0be2dd
DM
701 }
702 }
703
11377a47 704 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
ae0be2dd 705
cc2ce4a9 706 let client = HttpClient::new(repo.host(), repo.user(), None)?;
d0a03d40
DM
707 record_repository(&repo);
708
ca5d0b61
DM
709 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
710
f69adc81 711 println!("Client name: {}", proxmox::tools::nodename());
ca5d0b61
DM
712
713 let start_time = Local::now();
714
7a6cfbd9 715 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
51144821 716
bb823140
DM
717 let (crypt_config, rsa_encrypted_key) = match keyfile {
718 None => (None, None),
6d0983db 719 Some(path) => {
a8f10f84 720 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
bb823140
DM
721
722 let crypt_config = CryptConfig::new(key)?;
723
724 let path = master_pubkey_path()?;
725 if path.exists() {
e18a6c9e 726 let pem_data = file_get_contents(&path)?;
bb823140
DM
727 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
728 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
729 (Some(Arc::new(crypt_config)), Some(enc_key))
730 } else {
731 (Some(Arc::new(crypt_config)), None)
732 }
6d0983db
DM
733 }
734 };
f98ac774 735
e9722f8b 736 async_main(async move {
cf9271e2
DM
737 let client = BackupWriter::start(
738 client,
739 repo.store(),
740 backup_type,
741 &backup_id,
742 backup_time,
743 verbose,
744 ).await?;
e9722f8b 745
59e9ba01
DM
746 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
747 let mut manifest = BackupManifest::new(snapshot);
e9722f8b 748
bf6e3217 749 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
e9722f8b
WB
750
751 for (backup_type, filename, target, size) in upload_list {
752 match backup_type {
753 BackupType::CONFIG => {
754 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
755 let stats = client
756 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
757 .await?;
59e9ba01 758 manifest.add_file(target, stats.size, stats.csum);
e9722f8b
WB
759 }
760 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
761 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
762 let stats = client
763 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
764 .await?;
59e9ba01 765 manifest.add_file(target, stats.size, stats.csum);
e9722f8b
WB
766 }
767 BackupType::PXAR => {
e9722f8b
WB
768 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
769 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
770 let stats = backup_directory(
771 &client,
772 &filename,
773 &target,
774 chunk_size_opt,
775 devices.clone(),
776 verbose,
777 skip_lost_and_found,
778 crypt_config.clone(),
779 catalog.clone(),
780 ).await?;
59e9ba01 781 manifest.add_file(target, stats.size, stats.csum);
e9722f8b
WB
782 catalog.lock().unwrap().end_directory()?;
783 }
784 BackupType::IMAGE => {
785 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
786 let stats = backup_image(
787 &client,
788 &filename,
789 &target,
790 size,
791 chunk_size_opt,
792 verbose,
793 crypt_config.clone(),
794 ).await?;
59e9ba01 795 manifest.add_file(target, stats.size, stats.csum);
e9722f8b 796 }
6af905c1
DM
797 }
798 }
4818c8b6 799
e9722f8b
WB
800 // finalize and upload catalog
801 if upload_catalog {
802 let mutex = Arc::try_unwrap(catalog)
803 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
bf6e3217
DM
804 let mut catalog = mutex.into_inner().unwrap();
805
806 catalog.finish()?;
2761d6a4 807
bf6e3217 808 drop(catalog); // close upload stream
2761d6a4 809
bf6e3217 810 let stats = catalog_result_rx.await??;
9d135fe6 811
bf6e3217 812 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
e9722f8b 813 }
2761d6a4 814
e9722f8b
WB
815 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
816 let target = "rsa-encrypted.key";
817 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
818 let stats = client
819 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
820 .await?;
59e9ba01 821 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum);
e9722f8b
WB
822
823 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
824 /*
825 let mut buffer2 = vec![0u8; rsa.size() as usize];
826 let pem_data = file_get_contents("master-private.pem")?;
827 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
828 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
829 println!("TEST {} {:?}", len, buffer2);
830 */
831 }
9f46c7de 832
59e9ba01
DM
833 // create manifest (index.json)
834 let manifest = manifest.into_json();
2c3891d1 835
e9722f8b 836 println!("Upload index.json to '{:?}'", repo);
59e9ba01 837 let manifest = serde_json::to_string_pretty(&manifest)?.into();
e9722f8b 838 client
59e9ba01 839 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
e9722f8b 840 .await?;
2c3891d1 841
e9722f8b 842 client.finish().await?;
c4ff3dce 843
e9722f8b
WB
844 let end_time = Local::now();
845 let elapsed = end_time.signed_duration_since(start_time);
846 println!("Duration: {}", elapsed);
3ec3ec3f 847
e9722f8b 848 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
3d5c11e5 849
e9722f8b
WB
850 Ok(Value::Null)
851 })
f98ea63d
DM
852}
853
d0a03d40 854fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
f98ea63d
DM
855
856 let mut result = vec![];
857
858 let data: Vec<&str> = arg.splitn(2, ':').collect();
859
bff11030 860 if data.len() != 2 {
8968258b
DM
861 result.push(String::from("root.pxar:/"));
862 result.push(String::from("etc.pxar:/etc"));
bff11030
DM
863 return result;
864 }
f98ea63d 865
496a6784 866 let files = tools::complete_file_name(data[1], param);
f98ea63d
DM
867
868 for file in files {
869 result.push(format!("{}:{}", data[0], file));
870 }
871
872 result
ff5d3707 873}
874
9f912493
DM
875fn restore(
876 param: Value,
877 _info: &ApiMethod,
dd5495d6 878 _rpcenv: &mut dyn RpcEnvironment,
9f912493 879) -> Result<Value, Error> {
e9722f8b
WB
880 async_main(restore_do(param))
881}
9f912493 882
88892ea8
DM
883fn dump_image<W: Write>(
884 client: Arc<BackupReader>,
885 crypt_config: Option<Arc<CryptConfig>>,
886 index: FixedIndexReader,
887 mut writer: W,
fd04ca7a 888 verbose: bool,
88892ea8
DM
889) -> Result<(), Error> {
890
891 let most_used = index.find_most_used_chunks(8);
892
893 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
894
895 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
896 // and thus slows down reading. Instead, directly use RemoteChunkReader
fd04ca7a
DM
897 let mut per = 0;
898 let mut bytes = 0;
899 let start_time = std::time::Instant::now();
900
88892ea8
DM
901 for pos in 0..index.index_count() {
902 let digest = index.index_digest(pos).unwrap();
903 let raw_data = chunk_reader.read_chunk(&digest)?;
904 writer.write_all(&raw_data)?;
fd04ca7a
DM
905 bytes += raw_data.len();
906 if verbose {
907 let next_per = ((pos+1)*100)/index.index_count();
908 if per != next_per {
909 eprintln!("progress {}% (read {} bytes, duration {} sec)",
910 next_per, bytes, start_time.elapsed().as_secs());
911 per = next_per;
912 }
913 }
88892ea8
DM
914 }
915
fd04ca7a
DM
916 let end_time = std::time::Instant::now();
917 let elapsed = end_time.duration_since(start_time);
918 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
919 bytes,
920 elapsed.as_secs_f64(),
921 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
922 );
923
924
88892ea8
DM
925 Ok(())
926}
927
e9722f8b 928async fn restore_do(param: Value) -> Result<Value, Error> {
2665cef7 929 let repo = extract_repository_from_value(&param)?;
9f912493 930
86eda3eb
DM
931 let verbose = param["verbose"].as_bool().unwrap_or(false);
932
46d5aa0a
DM
933 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
934
d5c34d98
DM
935 let archive_name = tools::required_string_param(&param, "archive-name")?;
936
cc2ce4a9 937 let client = HttpClient::new(repo.host(), repo.user(), None)?;
d0a03d40 938
d0a03d40 939 record_repository(&repo);
d5c34d98 940
9f912493 941 let path = tools::required_string_param(&param, "snapshot")?;
9f912493 942
86eda3eb 943 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
d5c34d98 944 let group = BackupGroup::parse(path)?;
9f912493 945
9e391bb7
DM
946 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
947 let result = client.get(&path, Some(json!({
d5c34d98
DM
948 "backup-type": group.backup_type(),
949 "backup-id": group.backup_id(),
e9722f8b 950 }))).await?;
9f912493 951
d5c34d98 952 let list = result["data"].as_array().unwrap();
11377a47 953 if list.is_empty() {
d5c34d98
DM
954 bail!("backup group '{}' does not contain any snapshots:", path);
955 }
9f912493 956
86eda3eb 957 let epoch = list[0]["backup-time"].as_i64().unwrap();
fa5d6977 958 let backup_time = Utc.timestamp(epoch, 0);
86eda3eb 959 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
d5c34d98
DM
960 } else {
961 let snapshot = BackupDir::parse(path)?;
86eda3eb
DM
962 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
963 };
9f912493 964
d5c34d98 965 let target = tools::required_string_param(&param, "target")?;
bf125261 966 let target = if target == "-" { None } else { Some(target) };
2ae7d196 967
11377a47 968 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2ae7d196 969
86eda3eb
DM
970 let crypt_config = match keyfile {
971 None => None,
972 Some(path) => {
a8f10f84 973 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
86eda3eb
DM
974 Some(Arc::new(CryptConfig::new(key)?))
975 }
976 };
d5c34d98 977
afb4cd28
DM
978 let server_archive_name = if archive_name.ends_with(".pxar") {
979 format!("{}.didx", archive_name)
980 } else if archive_name.ends_with(".img") {
981 format!("{}.fidx", archive_name)
982 } else {
f8100e96 983 format!("{}.blob", archive_name)
afb4cd28 984 };
9f912493 985
296c50ba
DM
986 let client = BackupReader::start(
987 client,
988 crypt_config.clone(),
989 repo.store(),
990 &backup_type,
991 &backup_id,
992 backup_time,
993 true,
994 ).await?;
86eda3eb 995
f06b820a 996 let manifest = client.download_manifest().await?;
02fcf372 997
ad6e5a6f 998 if server_archive_name == MANIFEST_BLOB_NAME {
f06b820a 999 let backup_index_data = manifest.into_json().to_string();
02fcf372 1000 if let Some(target) = target {
296c50ba 1001 file_set_contents(target, backup_index_data.as_bytes(), None)?;
02fcf372
DM
1002 } else {
1003 let stdout = std::io::stdout();
1004 let mut writer = stdout.lock();
296c50ba 1005 writer.write_all(backup_index_data.as_bytes())
02fcf372
DM
1006 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1007 }
1008
1009 } else if server_archive_name.ends_with(".blob") {
d2267b11 1010
bb19af73 1011 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
f8100e96 1012
bf125261 1013 if let Some(target) = target {
0d986280
DM
1014 let mut writer = std::fs::OpenOptions::new()
1015 .write(true)
1016 .create(true)
1017 .create_new(true)
1018 .open(target)
1019 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1020 std::io::copy(&mut reader, &mut writer)?;
bf125261
DM
1021 } else {
1022 let stdout = std::io::stdout();
1023 let mut writer = stdout.lock();
0d986280 1024 std::io::copy(&mut reader, &mut writer)
bf125261
DM
1025 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1026 }
f8100e96
DM
1027
1028 } else if server_archive_name.ends_with(".didx") {
86eda3eb 1029
c3d84a22 1030 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
df65bd3d 1031
f4bf7dfc
DM
1032 let most_used = index.find_most_used_chunks(8);
1033
1034 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1035
afb4cd28 1036 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
86eda3eb 1037
bf125261 1038 if let Some(target) = target {
86eda3eb 1039
47651f95 1040 let feature_flags = pxar::flags::DEFAULT;
bf125261
DM
1041 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
1042 if verbose {
fd04ca7a 1043 eprintln!("{:?}", path);
bf125261
DM
1044 }
1045 Ok(())
1046 });
6a879109
CE
1047 decoder.set_allow_existing_dirs(allow_existing_dirs);
1048
fa7e957c 1049 decoder.restore(Path::new(target), &Vec::new())?;
bf125261 1050 } else {
88892ea8
DM
1051 let mut writer = std::fs::OpenOptions::new()
1052 .write(true)
1053 .open("/dev/stdout")
1054 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
afb4cd28 1055
bf125261
DM
1056 std::io::copy(&mut reader, &mut writer)
1057 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1058 }
afb4cd28 1059 } else if server_archive_name.ends_with(".fidx") {
afb4cd28 1060
72050500 1061 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
df65bd3d 1062
88892ea8
DM
1063 let mut writer = if let Some(target) = target {
1064 std::fs::OpenOptions::new()
bf125261
DM
1065 .write(true)
1066 .create(true)
1067 .create_new(true)
1068 .open(target)
88892ea8 1069 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
bf125261 1070 } else {
88892ea8
DM
1071 std::fs::OpenOptions::new()
1072 .write(true)
1073 .open("/dev/stdout")
1074 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1075 };
afb4cd28 1076
fd04ca7a 1077 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
88892ea8
DM
1078
1079 } else {
f8100e96 1080 bail!("unknown archive file extension (expected .pxar of .img)");
3031e44c 1081 }
fef44d4f
DM
1082
1083 Ok(Value::Null)
45db6f89
DM
1084}
1085
ec34f7eb
DM
1086fn upload_log(
1087 param: Value,
1088 _info: &ApiMethod,
1089 _rpcenv: &mut dyn RpcEnvironment,
1090) -> Result<Value, Error> {
1091
1092 let logfile = tools::required_string_param(&param, "logfile")?;
1093 let repo = extract_repository_from_value(&param)?;
1094
1095 let snapshot = tools::required_string_param(&param, "snapshot")?;
1096 let snapshot = BackupDir::parse(snapshot)?;
1097
cc2ce4a9 1098 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
ec34f7eb 1099
11377a47 1100 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
ec34f7eb
DM
1101
1102 let crypt_config = match keyfile {
1103 None => None,
1104 Some(path) => {
a8f10f84 1105 let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
ec34f7eb 1106 let crypt_config = CryptConfig::new(key)?;
9025312a 1107 Some(Arc::new(crypt_config))
ec34f7eb
DM
1108 }
1109 };
1110
e18a6c9e 1111 let data = file_get_contents(logfile)?;
ec34f7eb 1112
7123ff7d 1113 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
ec34f7eb
DM
1114
1115 let raw_data = blob.into_inner();
1116
1117 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1118
1119 let args = json!({
1120 "backup-type": snapshot.group().backup_type(),
1121 "backup-id": snapshot.group().backup_id(),
1122 "backup-time": snapshot.backup_time().timestamp(),
1123 });
1124
1125 let body = hyper::Body::from(raw_data);
1126
e9722f8b
WB
1127 async_main(async move {
1128 client.upload("application/octet-stream", body, &path, Some(args)).await
1129 })
ec34f7eb
DM
1130}
1131
83b7db02 1132fn prune(
ea7a7ef2 1133 mut param: Value,
83b7db02 1134 _info: &ApiMethod,
dd5495d6 1135 _rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
1136) -> Result<Value, Error> {
1137
2665cef7 1138 let repo = extract_repository_from_value(&param)?;
83b7db02 1139
cc2ce4a9 1140 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
83b7db02 1141
d0a03d40 1142 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
83b7db02 1143
9fdc3ef4
DM
1144 let group = tools::required_string_param(&param, "group")?;
1145 let group = BackupGroup::parse(group)?;
1146
ea7a7ef2
DM
1147 param.as_object_mut().unwrap().remove("repository");
1148 param.as_object_mut().unwrap().remove("group");
1149
1150 param["backup-type"] = group.backup_type().into();
1151 param["backup-id"] = group.backup_id().into();
83b7db02 1152
e9722f8b 1153 let _result = async_main(async move { client.post(&path, Some(param)).await })?;
83b7db02 1154
d0a03d40
DM
1155 record_repository(&repo);
1156
43a406fd 1157 Ok(Value::Null)
83b7db02
DM
1158}
1159
34a816cc
DM
1160fn status(
1161 param: Value,
1162 _info: &ApiMethod,
1163 _rpcenv: &mut dyn RpcEnvironment,
1164) -> Result<Value, Error> {
1165
1166 let repo = extract_repository_from_value(&param)?;
1167
1168 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1169
cc2ce4a9 1170 let client = HttpClient::new(repo.host(), repo.user(), None)?;
34a816cc
DM
1171
1172 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1173
e9722f8b 1174 let result = async_main(async move { client.get(&path, None).await })?;
34a816cc
DM
1175 let data = &result["data"];
1176
1177 record_repository(&repo);
1178
1179 if output_format == "text" {
1180 let total = data["total"].as_u64().unwrap();
1181 let used = data["used"].as_u64().unwrap();
1182 let avail = data["avail"].as_u64().unwrap();
1183 let roundup = total/200;
1184
1185 println!(
1186 "total: {} used: {} ({} %) available: {}",
1187 total,
1188 used,
1189 ((used+roundup)*100)/total,
1190 avail,
1191 );
1192 } else {
f6ede796 1193 format_and_print_result(data, &output_format);
34a816cc
DM
1194 }
1195
1196 Ok(Value::Null)
1197}
1198
5a2df000 1199// like get, but simply ignore errors and return Null instead
e9722f8b 1200async fn try_get(repo: &BackupRepository, url: &str) -> Value {
024f11bb 1201
cc2ce4a9 1202 let client = match HttpClient::new(repo.host(), repo.user(), None) {
45cdce06
DM
1203 Ok(v) => v,
1204 _ => return Value::Null,
1205 };
b2388518 1206
e9722f8b 1207 let mut resp = match client.get(url, None).await {
b2388518
DM
1208 Ok(v) => v,
1209 _ => return Value::Null,
1210 };
1211
1212 if let Some(map) = resp.as_object_mut() {
1213 if let Some(data) = map.remove("data") {
1214 return data;
1215 }
1216 }
1217 Value::Null
1218}
1219
b2388518 1220fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1221 async_main(async { complete_backup_group_do(param).await })
1222}
1223
1224async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
024f11bb 1225
b2388518
DM
1226 let mut result = vec![];
1227
2665cef7 1228 let repo = match extract_repository_from_map(param) {
b2388518 1229 Some(v) => v,
024f11bb
DM
1230 _ => return result,
1231 };
1232
b2388518
DM
1233 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1234
e9722f8b 1235 let data = try_get(&repo, &path).await;
b2388518
DM
1236
1237 if let Some(list) = data.as_array() {
024f11bb 1238 for item in list {
98f0b972
DM
1239 if let (Some(backup_id), Some(backup_type)) =
1240 (item["backup-id"].as_str(), item["backup-type"].as_str())
1241 {
1242 result.push(format!("{}/{}", backup_type, backup_id));
024f11bb
DM
1243 }
1244 }
1245 }
1246
1247 result
1248}
1249
b2388518 1250fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1251 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1252}
1253
1254async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
b2388518 1255
b2388518 1256 if arg.matches('/').count() < 2 {
e9722f8b 1257 let groups = complete_backup_group_do(param).await;
543a260f 1258 let mut result = vec![];
b2388518
DM
1259 for group in groups {
1260 result.push(group.to_string());
1261 result.push(format!("{}/", group));
1262 }
1263 return result;
1264 }
1265
e9722f8b 1266 complete_backup_snapshot_do(param).await
543a260f 1267}
b2388518 1268
3fb53e07 1269fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1270 async_main(async { complete_backup_snapshot_do(param).await })
1271}
1272
1273async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
543a260f
DM
1274
1275 let mut result = vec![];
1276
1277 let repo = match extract_repository_from_map(param) {
1278 Some(v) => v,
1279 _ => return result,
1280 };
1281
1282 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
b2388518 1283
e9722f8b 1284 let data = try_get(&repo, &path).await;
b2388518
DM
1285
1286 if let Some(list) = data.as_array() {
1287 for item in list {
1288 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1289 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1290 {
1291 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1292 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1293 }
1294 }
1295 }
1296
1297 result
1298}
1299
45db6f89 1300fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1301 async_main(async { complete_server_file_name_do(param).await })
1302}
1303
1304async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
08dc340a
DM
1305
1306 let mut result = vec![];
1307
2665cef7 1308 let repo = match extract_repository_from_map(param) {
08dc340a
DM
1309 Some(v) => v,
1310 _ => return result,
1311 };
1312
1313 let snapshot = match param.get("snapshot") {
1314 Some(path) => {
1315 match BackupDir::parse(path) {
1316 Ok(v) => v,
1317 _ => return result,
1318 }
1319 }
1320 _ => return result,
1321 };
1322
1323 let query = tools::json_object_to_query(json!({
1324 "backup-type": snapshot.group().backup_type(),
1325 "backup-id": snapshot.group().backup_id(),
1326 "backup-time": snapshot.backup_time().timestamp(),
1327 })).unwrap();
1328
1329 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1330
e9722f8b 1331 let data = try_get(&repo, &path).await;
08dc340a
DM
1332
1333 if let Some(list) = data.as_array() {
1334 for item in list {
c4f025eb 1335 if let Some(filename) = item["filename"].as_str() {
08dc340a
DM
1336 result.push(filename.to_owned());
1337 }
1338 }
1339 }
1340
45db6f89
DM
1341 result
1342}
1343
1344fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
52c171e4 1345 complete_server_file_name(arg, param)
e9722f8b
WB
1346 .iter()
1347 .map(|v| strip_server_file_expenstion(&v))
1348 .collect()
08dc340a
DM
1349}
1350
49811347
DM
1351fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1352
1353 let mut result = vec![];
1354
1355 let mut size = 64;
1356 loop {
1357 result.push(size.to_string());
11377a47 1358 size *= 2;
49811347
DM
1359 if size > 4096 { break; }
1360 }
1361
1362 result
1363}
1364
826f309b 1365fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
ff5d3707 1366
f2401311
DM
1367 // fixme: implement other input methods
1368
1369 use std::env::VarError::*;
1370 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
826f309b 1371 Ok(p) => return Ok(p.as_bytes().to_vec()),
f2401311
DM
1372 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1373 Err(NotPresent) => {
1374 // Try another method
1375 }
1376 }
1377
1378 // If we're on a TTY, query the user for a password
1379 if crate::tools::tty::stdin_isatty() {
826f309b 1380 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
f2401311
DM
1381 }
1382
1383 bail!("no password input mechanism available");
1384}
1385
ac716234
DM
1386fn key_create(
1387 param: Value,
1388 _info: &ApiMethod,
1389 _rpcenv: &mut dyn RpcEnvironment,
1390) -> Result<Value, Error> {
1391
9b06db45
DM
1392 let path = tools::required_string_param(&param, "path")?;
1393 let path = PathBuf::from(path);
ac716234 1394
181f097a 1395 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
ac716234
DM
1396
1397 let key = proxmox::sys::linux::random_data(32)?;
1398
181f097a
DM
1399 if kdf == "scrypt" {
1400 // always read passphrase from tty
1401 if !crate::tools::tty::stdin_isatty() {
1402 bail!("unable to read passphrase - no tty");
1403 }
ac716234 1404
181f097a
DM
1405 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
1406
ab44acff 1407 let key_config = encrypt_key_with_passphrase(&key, &password)?;
37c5a175 1408
ab44acff 1409 store_key_config(&path, false, key_config)?;
181f097a
DM
1410
1411 Ok(Value::Null)
1412 } else if kdf == "none" {
1413 let created = Local.timestamp(Local::now().timestamp(), 0);
1414
1415 store_key_config(&path, false, KeyConfig {
1416 kdf: None,
1417 created,
ab44acff 1418 modified: created,
181f097a
DM
1419 data: key,
1420 })?;
1421
1422 Ok(Value::Null)
1423 } else {
1424 unreachable!();
1425 }
ac716234
DM
1426}
1427
9f46c7de
DM
1428fn master_pubkey_path() -> Result<PathBuf, Error> {
1429 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1430
1431 // usually $HOME/.config/proxmox-backup/master-public.pem
1432 let path = base.place_config_file("master-public.pem")?;
1433
1434 Ok(path)
1435}
1436
3ea8bfc9
DM
1437fn key_import_master_pubkey(
1438 param: Value,
1439 _info: &ApiMethod,
1440 _rpcenv: &mut dyn RpcEnvironment,
1441) -> Result<Value, Error> {
1442
1443 let path = tools::required_string_param(&param, "path")?;
1444 let path = PathBuf::from(path);
1445
e18a6c9e 1446 let pem_data = file_get_contents(&path)?;
3ea8bfc9
DM
1447
1448 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1449 bail!("Unable to decode PEM data - {}", err);
1450 }
1451
9f46c7de 1452 let target_path = master_pubkey_path()?;
3ea8bfc9 1453
e18a6c9e 1454 file_set_contents(&target_path, &pem_data, None)?;
3ea8bfc9
DM
1455
1456 println!("Imported public master key to {:?}", target_path);
1457
1458 Ok(Value::Null)
1459}
1460
37c5a175
DM
1461fn key_create_master_key(
1462 _param: Value,
1463 _info: &ApiMethod,
1464 _rpcenv: &mut dyn RpcEnvironment,
1465) -> Result<Value, Error> {
1466
1467 // we need a TTY to query the new password
1468 if !crate::tools::tty::stdin_isatty() {
1469 bail!("unable to create master key - no tty");
1470 }
1471
1472 let rsa = openssl::rsa::Rsa::generate(4096)?;
1473 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1474
1475 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1476 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1477
1478 if new_pw != verify_pw {
1479 bail!("Password verification fail!");
1480 }
1481
1482 if new_pw.len() < 5 {
1483 bail!("Password is too short!");
1484 }
1485
1486 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1487 let filename_pub = "master-public.pem";
1488 println!("Writing public master key to {}", filename_pub);
e18a6c9e 1489 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
37c5a175
DM
1490
1491 let cipher = openssl::symm::Cipher::aes_256_cbc();
1492 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1493
1494 let filename_priv = "master-private.pem";
1495 println!("Writing private master key to {}", filename_priv);
e18a6c9e 1496 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
37c5a175
DM
1497
1498 Ok(Value::Null)
1499}
ac716234
DM
1500
1501fn key_change_passphrase(
1502 param: Value,
1503 _info: &ApiMethod,
1504 _rpcenv: &mut dyn RpcEnvironment,
1505) -> Result<Value, Error> {
1506
9b06db45
DM
1507 let path = tools::required_string_param(&param, "path")?;
1508 let path = PathBuf::from(path);
ac716234 1509
181f097a
DM
1510 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1511
ac716234
DM
1512 // we need a TTY to query the new password
1513 if !crate::tools::tty::stdin_isatty() {
1514 bail!("unable to change passphrase - no tty");
1515 }
1516
a8f10f84 1517 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
ac716234 1518
181f097a 1519 if kdf == "scrypt" {
ac716234 1520
181f097a
DM
1521 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1522 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
ac716234 1523
181f097a
DM
1524 if new_pw != verify_pw {
1525 bail!("Password verification fail!");
1526 }
1527
1528 if new_pw.len() < 5 {
1529 bail!("Password is too short!");
1530 }
ac716234 1531
ab44acff
DM
1532 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1533 new_key_config.created = created; // keep original value
1534
1535 store_key_config(&path, true, new_key_config)?;
ac716234 1536
181f097a
DM
1537 Ok(Value::Null)
1538 } else if kdf == "none" {
ab44acff 1539 let modified = Local.timestamp(Local::now().timestamp(), 0);
181f097a
DM
1540
1541 store_key_config(&path, true, KeyConfig {
1542 kdf: None,
ab44acff
DM
1543 created, // keep original value
1544 modified,
6d0983db 1545 data: key.to_vec(),
181f097a
DM
1546 })?;
1547
1548 Ok(Value::Null)
1549 } else {
1550 unreachable!();
1551 }
f2401311
DM
1552}
1553
1554fn key_mgmt_cli() -> CliCommandMap {
1555
181f097a
DM
1556 let kdf_schema: Arc<Schema> = Arc::new(
1557 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
1558 .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
1559 .default("scrypt")
1560 .into()
1561 );
1562
f2401311
DM
1563 let key_create_cmd_def = CliCommand::new(
1564 ApiMethod::new(
1565 key_create,
1566 ObjectSchema::new("Create a new encryption key.")
9b06db45 1567 .required("path", StringSchema::new("File system path."))
181f097a 1568 .optional("kdf", kdf_schema.clone())
f2401311 1569 ))
9b06db45
DM
1570 .arg_param(vec!["path"])
1571 .completion_cb("path", tools::complete_file_name);
f2401311 1572
ac716234
DM
1573 let key_change_passphrase_cmd_def = CliCommand::new(
1574 ApiMethod::new(
1575 key_change_passphrase,
1576 ObjectSchema::new("Change the passphrase required to decrypt the key.")
9b06db45 1577 .required("path", StringSchema::new("File system path."))
181f097a 1578 .optional("kdf", kdf_schema.clone())
9b06db45
DM
1579 ))
1580 .arg_param(vec!["path"])
1581 .completion_cb("path", tools::complete_file_name);
ac716234 1582
37c5a175
DM
1583 let key_create_master_key_cmd_def = CliCommand::new(
1584 ApiMethod::new(
1585 key_create_master_key,
1586 ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
1587 ));
1588
3ea8bfc9
DM
1589 let key_import_master_pubkey_cmd_def = CliCommand::new(
1590 ApiMethod::new(
1591 key_import_master_pubkey,
1592 ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
1593 .required("path", StringSchema::new("File system path."))
1594 ))
1595 .arg_param(vec!["path"])
1596 .completion_cb("path", tools::complete_file_name);
1597
11377a47 1598 CliCommandMap::new()
ac716234 1599 .insert("create".to_owned(), key_create_cmd_def.into())
37c5a175 1600 .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
3ea8bfc9 1601 .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
11377a47 1602 .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into())
f2401311
DM
1603}
1604
70235f72
CE
1605
1606fn mount(
1607 param: Value,
1608 _info: &ApiMethod,
1609 _rpcenv: &mut dyn RpcEnvironment,
1610) -> Result<Value, Error> {
1611 let verbose = param["verbose"].as_bool().unwrap_or(false);
1612 if verbose {
1613 // This will stay in foreground with debug output enabled as None is
1614 // passed for the RawFd.
1615 return async_main(mount_do(param, None));
1616 }
1617
1618 // Process should be deamonized.
1619 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1620 let pipe = pipe()?;
1621 match fork() {
11377a47 1622 Ok(ForkResult::Parent { .. }) => {
70235f72
CE
1623 nix::unistd::close(pipe.1).unwrap();
1624 // Blocks the parent process until we are ready to go in the child
1625 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1626 Ok(Value::Null)
1627 }
1628 Ok(ForkResult::Child) => {
1629 nix::unistd::close(pipe.0).unwrap();
1630 nix::unistd::setsid().unwrap();
1631 async_main(mount_do(param, Some(pipe.1)))
1632 }
1633 Err(_) => bail!("failed to daemonize process"),
1634 }
1635}
1636
1637async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1638 let repo = extract_repository_from_value(&param)?;
1639 let archive_name = tools::required_string_param(&param, "archive-name")?;
1640 let target = tools::required_string_param(&param, "target")?;
1641 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1642
1643 record_repository(&repo);
1644
1645 let path = tools::required_string_param(&param, "snapshot")?;
1646 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1647 let group = BackupGroup::parse(path)?;
1648
1649 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1650 let result = client.get(&path, Some(json!({
1651 "backup-type": group.backup_type(),
1652 "backup-id": group.backup_id(),
1653 }))).await?;
1654
1655 let list = result["data"].as_array().unwrap();
11377a47 1656 if list.is_empty() {
70235f72
CE
1657 bail!("backup group '{}' does not contain any snapshots:", path);
1658 }
1659
1660 let epoch = list[0]["backup-time"].as_i64().unwrap();
1661 let backup_time = Utc.timestamp(epoch, 0);
1662 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1663 } else {
1664 let snapshot = BackupDir::parse(path)?;
1665 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1666 };
1667
11377a47 1668 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
70235f72
CE
1669 let crypt_config = match keyfile {
1670 None => None,
1671 Some(path) => {
a8f10f84 1672 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
70235f72
CE
1673 Some(Arc::new(CryptConfig::new(key)?))
1674 }
1675 };
1676
1677 let server_archive_name = if archive_name.ends_with(".pxar") {
1678 format!("{}.didx", archive_name)
1679 } else {
1680 bail!("Can only mount pxar archives.");
1681 };
1682
296c50ba
DM
1683 let client = BackupReader::start(
1684 client,
1685 crypt_config.clone(),
1686 repo.store(),
1687 &backup_type,
1688 &backup_id,
1689 backup_time,
1690 true,
1691 ).await?;
70235f72 1692
f06b820a 1693 let manifest = client.download_manifest().await?;
296c50ba 1694
70235f72 1695 if server_archive_name.ends_with(".didx") {
c3d84a22 1696 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
70235f72
CE
1697 let most_used = index.find_most_used_chunks(8);
1698 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1699 let reader = BufferedDynamicReader::new(index, chunk_reader);
1700 let decoder =
1701 pxar::Decoder::<Box<dyn pxar::fuse::ReadSeek>, fn(&Path) -> Result<(), Error>>::new(
1702 Box::new(reader),
1703 |_| Ok(()),
1704 )?;
1705 let options = OsStr::new("ro,default_permissions");
1706 let mut session = pxar::fuse::Session::from_decoder(decoder, &options, pipe.is_none())
1707 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
1708
1709 // Mount the session but not call fuse deamonize as this will cause
1710 // issues with the runtime after the fork
1711 let deamonize = false;
1712 session.mount(&Path::new(target), deamonize)?;
1713
1714 if let Some(pipe) = pipe {
1715 nix::unistd::chdir(Path::new("/")).unwrap();
1716 // Finish creation of deamon by redirecting filedescriptors.
1717 let nullfd = nix::fcntl::open(
1718 "/dev/null",
1719 nix::fcntl::OFlag::O_RDWR,
1720 nix::sys::stat::Mode::empty(),
1721 ).unwrap();
1722 nix::unistd::dup2(nullfd, 0).unwrap();
1723 nix::unistd::dup2(nullfd, 1).unwrap();
1724 nix::unistd::dup2(nullfd, 2).unwrap();
1725 if nullfd > 2 {
1726 nix::unistd::close(nullfd).unwrap();
1727 }
1728 // Signal the parent process that we are done with the setup and it can
1729 // terminate.
11377a47 1730 nix::unistd::write(pipe, &[0u8])?;
70235f72
CE
1731 nix::unistd::close(pipe).unwrap();
1732 }
1733
1734 let multithreaded = true;
1735 session.run_loop(multithreaded)?;
1736 } else {
1737 bail!("unknown archive file extension (expected .pxar)");
1738 }
1739
1740 Ok(Value::Null)
1741}
1742
f2401311 1743fn main() {
33d64b81 1744
25f1650b
DM
1745 let backup_source_schema: Arc<Schema> = Arc::new(
1746 StringSchema::new("Backup source specification ([<label>:<path>]).")
1747 .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
1748 .into()
1749 );
1750
597a9203 1751 let backup_cmd_def = CliCommand::new(
ff5d3707 1752 ApiMethod::new(
bcd879cf 1753 create_backup,
597a9203 1754 ObjectSchema::new("Create (host) backup.")
ae0be2dd
DM
1755 .required(
1756 "backupspec",
1757 ArraySchema::new(
74cdb521 1758 "List of backup source specifications ([<label.ext>:<path>] ...)",
25f1650b 1759 backup_source_schema,
ae0be2dd
DM
1760 ).min_length(1)
1761 )
2665cef7 1762 .optional("repository", REPO_URL_SCHEMA.clone())
2eeaacb9
DM
1763 .optional(
1764 "include-dev",
1765 ArraySchema::new(
1766 "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
1767 StringSchema::new("Path to file.").into()
1768 )
1769 )
6d0983db
DM
1770 .optional(
1771 "keyfile",
1772 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
219ef0e6
DM
1773 .optional(
1774 "verbose",
1775 BooleanSchema::new("Verbose output.").default(false))
5b72c9b4
DM
1776 .optional(
1777 "skip-lost-and-found",
1778 BooleanSchema::new("Skip lost+found directory").default(false))
fba30411 1779 .optional(
bbf9e7e9
DM
1780 "backup-type",
1781 BACKUP_TYPE_SCHEMA.clone()
1782 )
1783 .optional(
1784 "backup-id",
1785 BACKUP_ID_SCHEMA.clone()
1786 )
ca5d0b61
DM
1787 .optional(
1788 "backup-time",
bbf9e7e9 1789 BACKUP_TIME_SCHEMA.clone()
ca5d0b61 1790 )
2d9d143a
DM
1791 .optional(
1792 "chunk-size",
1793 IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
1794 .minimum(64)
1795 .maximum(4096)
1796 .default(4096)
1797 )
ff5d3707 1798 ))
2665cef7 1799 .arg_param(vec!["backupspec"])
d0a03d40 1800 .completion_cb("repository", complete_repository)
49811347 1801 .completion_cb("backupspec", complete_backup_source)
6d0983db 1802 .completion_cb("keyfile", tools::complete_file_name)
49811347 1803 .completion_cb("chunk-size", complete_chunk_size);
f8838fe9 1804
ec34f7eb
DM
1805 let upload_log_cmd_def = CliCommand::new(
1806 ApiMethod::new(
1807 upload_log,
1808 ObjectSchema::new("Upload backup log file.")
1809 .required("snapshot", StringSchema::new("Snapshot path."))
1810 .required("logfile", StringSchema::new("The path to the log file you want to upload."))
1811 .optional("repository", REPO_URL_SCHEMA.clone())
1812 .optional(
1813 "keyfile",
1814 StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
1815 ))
1816 .arg_param(vec!["snapshot", "logfile"])
543a260f 1817 .completion_cb("snapshot", complete_backup_snapshot)
ec34f7eb
DM
1818 .completion_cb("logfile", tools::complete_file_name)
1819 .completion_cb("keyfile", tools::complete_file_name)
1820 .completion_cb("repository", complete_repository);
1821
41c039e1
DM
1822 let list_cmd_def = CliCommand::new(
1823 ApiMethod::new(
812c6f87
DM
1824 list_backup_groups,
1825 ObjectSchema::new("List backup groups.")
2665cef7 1826 .optional("repository", REPO_URL_SCHEMA.clone())
34a816cc 1827 .optional("output-format", OUTPUT_FORMAT.clone())
41c039e1 1828 ))
d0a03d40 1829 .completion_cb("repository", complete_repository);
41c039e1 1830
184f17af
DM
1831 let snapshots_cmd_def = CliCommand::new(
1832 ApiMethod::new(
1833 list_snapshots,
1834 ObjectSchema::new("List backup snapshots.")
15c847f1 1835 .optional("group", StringSchema::new("Backup group."))
2665cef7 1836 .optional("repository", REPO_URL_SCHEMA.clone())
34a816cc 1837 .optional("output-format", OUTPUT_FORMAT.clone())
184f17af 1838 ))
2665cef7 1839 .arg_param(vec!["group"])
024f11bb 1840 .completion_cb("group", complete_backup_group)
d0a03d40 1841 .completion_cb("repository", complete_repository);
184f17af 1842
6f62c924
DM
1843 let forget_cmd_def = CliCommand::new(
1844 ApiMethod::new(
1845 forget_snapshots,
1846 ObjectSchema::new("Forget (remove) backup snapshots.")
6f62c924 1847 .required("snapshot", StringSchema::new("Snapshot path."))
2665cef7 1848 .optional("repository", REPO_URL_SCHEMA.clone())
6f62c924 1849 ))
2665cef7 1850 .arg_param(vec!["snapshot"])
b2388518 1851 .completion_cb("repository", complete_repository)
543a260f 1852 .completion_cb("snapshot", complete_backup_snapshot);
6f62c924 1853
8cc0d6af
DM
1854 let garbage_collect_cmd_def = CliCommand::new(
1855 ApiMethod::new(
1856 start_garbage_collection,
1857 ObjectSchema::new("Start garbage collection for a specific repository.")
2665cef7 1858 .optional("repository", REPO_URL_SCHEMA.clone())
8cc0d6af 1859 ))
d0a03d40 1860 .completion_cb("repository", complete_repository);
8cc0d6af 1861
9f912493
DM
1862 let restore_cmd_def = CliCommand::new(
1863 ApiMethod::new(
1864 restore,
1865 ObjectSchema::new("Restore backup repository.")
d5c34d98
DM
1866 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1867 .required("archive-name", StringSchema::new("Backup archive name."))
bf125261
DM
1868 .required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
1869
1870We do not extraxt '.pxar' archives when writing to stdandard output.
1871
1872"###
1873 ))
46d5aa0a
DM
1874 .optional(
1875 "allow-existing-dirs",
1876 BooleanSchema::new("Do not fail if directories already exists.").default(false))
2665cef7 1877 .optional("repository", REPO_URL_SCHEMA.clone())
86eda3eb
DM
1878 .optional("keyfile", StringSchema::new("Path to encryption key."))
1879 .optional(
1880 "verbose",
1881 BooleanSchema::new("Verbose output.").default(false)
1882 )
9f912493 1883 ))
2665cef7 1884 .arg_param(vec!["snapshot", "archive-name", "target"])
b2388518 1885 .completion_cb("repository", complete_repository)
08dc340a
DM
1886 .completion_cb("snapshot", complete_group_or_snapshot)
1887 .completion_cb("archive-name", complete_archive_name)
1888 .completion_cb("target", tools::complete_file_name);
9f912493 1889
52c171e4
DM
1890 let files_cmd_def = CliCommand::new(
1891 ApiMethod::new(
1892 list_snapshot_files,
1893 ObjectSchema::new("List snapshot files.")
1894 .required("snapshot", StringSchema::new("Snapshot path."))
cec17a3e 1895 .optional("repository", REPO_URL_SCHEMA.clone())
52c171e4
DM
1896 .optional("output-format", OUTPUT_FORMAT.clone())
1897 ))
1898 .arg_param(vec!["snapshot"])
1899 .completion_cb("repository", complete_repository)
543a260f 1900 .completion_cb("snapshot", complete_backup_snapshot);
52c171e4 1901
9049a8cf
DM
1902 let catalog_cmd_def = CliCommand::new(
1903 ApiMethod::new(
1904 dump_catalog,
1905 ObjectSchema::new("Dump catalog.")
1906 .required("snapshot", StringSchema::new("Snapshot path."))
1907 .optional("repository", REPO_URL_SCHEMA.clone())
1908 ))
1909 .arg_param(vec!["snapshot"])
1910 .completion_cb("repository", complete_repository)
1911 .completion_cb("snapshot", complete_backup_snapshot);
1912
83b7db02
DM
1913 let prune_cmd_def = CliCommand::new(
1914 ApiMethod::new(
1915 prune,
1916 proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
1917 ObjectSchema::new("Prune backup repository.")
9fdc3ef4 1918 .required("group", StringSchema::new("Backup group."))
2665cef7 1919 .optional("repository", REPO_URL_SCHEMA.clone())
83b7db02
DM
1920 )
1921 ))
9fdc3ef4
DM
1922 .arg_param(vec!["group"])
1923 .completion_cb("group", complete_backup_group)
d0a03d40 1924 .completion_cb("repository", complete_repository);
9f912493 1925
34a816cc
DM
1926 let status_cmd_def = CliCommand::new(
1927 ApiMethod::new(
1928 status,
1929 ObjectSchema::new("Get repository status.")
1930 .optional("repository", REPO_URL_SCHEMA.clone())
1931 .optional("output-format", OUTPUT_FORMAT.clone())
1932 ))
1933 .completion_cb("repository", complete_repository);
1934
e240d8be
DM
1935 let login_cmd_def = CliCommand::new(
1936 ApiMethod::new(
1937 api_login,
1938 ObjectSchema::new("Try to login. If successful, store ticket.")
1939 .optional("repository", REPO_URL_SCHEMA.clone())
1940 ))
1941 .completion_cb("repository", complete_repository);
1942
1943 let logout_cmd_def = CliCommand::new(
1944 ApiMethod::new(
1945 api_logout,
1946 ObjectSchema::new("Logout (delete stored ticket).")
1947 .optional("repository", REPO_URL_SCHEMA.clone())
1948 ))
1949 .completion_cb("repository", complete_repository);
32efac1c 1950
70235f72
CE
1951 let mount_cmd_def = CliCommand::new(
1952 ApiMethod::new(
1953 mount,
1954 ObjectSchema::new("Mount pxar archive.")
1955 .required("snapshot", StringSchema::new("Group/Snapshot path."))
1956 .required("archive-name", StringSchema::new("Backup archive name."))
1957 .required("target", StringSchema::new("Target directory path."))
1958 .optional("repository", REPO_URL_SCHEMA.clone())
1959 .optional("keyfile", StringSchema::new("Path to encryption key."))
1960 .optional("verbose", BooleanSchema::new("Verbose output.").default(false))
1961 ))
1962 .arg_param(vec!["snapshot", "archive-name", "target"])
1963 .completion_cb("repository", complete_repository)
1964 .completion_cb("snapshot", complete_group_or_snapshot)
1965 .completion_cb("archive-name", complete_archive_name)
1966 .completion_cb("target", tools::complete_file_name);
e240d8be 1967
41c039e1 1968 let cmd_def = CliCommandMap::new()
597a9203 1969 .insert("backup".to_owned(), backup_cmd_def.into())
ec34f7eb 1970 .insert("upload-log".to_owned(), upload_log_cmd_def.into())
6f62c924 1971 .insert("forget".to_owned(), forget_cmd_def.into())
9049a8cf 1972 .insert("catalog".to_owned(), catalog_cmd_def.into())
8cc0d6af 1973 .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
83b7db02 1974 .insert("list".to_owned(), list_cmd_def.into())
e240d8be
DM
1975 .insert("login".to_owned(), login_cmd_def.into())
1976 .insert("logout".to_owned(), logout_cmd_def.into())
184f17af 1977 .insert("prune".to_owned(), prune_cmd_def.into())
9f912493 1978 .insert("restore".to_owned(), restore_cmd_def.into())
f2401311 1979 .insert("snapshots".to_owned(), snapshots_cmd_def.into())
52c171e4 1980 .insert("files".to_owned(), files_cmd_def.into())
34a816cc 1981 .insert("status".to_owned(), status_cmd_def.into())
70235f72
CE
1982 .insert("key".to_owned(), key_mgmt_cli().into())
1983 .insert("mount".to_owned(), mount_cmd_def.into());
a914a774 1984
e9722f8b
WB
1985 run_cli_command(cmd_def.into());
1986}
496a6784 1987
e9722f8b
WB
1988fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
1989 let rt = tokio::runtime::Runtime::new().unwrap();
1990 let ret = rt.block_on(fut);
1991 rt.shutdown_now();
1992 ret
ff5d3707 1993}