]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-client.rs
src/bin/proxmox-backup-client.rs: remove unnecessary .to_owned()
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
CommitLineData
ff5d3707 1use failure::*;
70235f72
CE
2use nix::unistd::{fork, ForkResult, pipe};
3use std::os::unix::io::RawFd;
fa5d6977 4use chrono::{Local, Utc, TimeZone};
e9c9409a 5use std::path::{Path, PathBuf};
2eeaacb9 6use std::collections::{HashSet, HashMap};
70235f72 7use std::ffi::OsStr;
bb19af73 8use std::io::{Write, Seek, SeekFrom};
2761d6a4
DM
9use std::os::unix::fs::OpenOptionsExt;
10
552c2259 11use proxmox::{sortable, identity};
e18a6c9e 12use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
3d482025
WB
13use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
14use proxmox::api::schema::*;
7eea56ca 15use proxmox::api::cli::*;
ff5d3707 16
fe0e04c6 17use proxmox_backup::tools;
bbf9e7e9 18use proxmox_backup::api2::types::*;
151c6ce2 19use proxmox_backup::client::*;
247cdbce 20use proxmox_backup::backup::*;
7926a3a1 21use proxmox_backup::pxar::{ self, catalog::* };
86eda3eb 22
fe0e04c6
DM
23//use proxmox_backup::backup::image_index::*;
24//use proxmox_backup::config::datastore;
8968258b 25//use proxmox_backup::pxar::encoder::*;
728797d0 26//use proxmox_backup::backup::datastore::*;
23bb8780 27
f5f13ebc 28use serde_json::{json, Value};
1c0472e8 29//use hyper::Body;
2761d6a4 30use std::sync::{Arc, Mutex};
255f378a 31//use regex::Regex;
d0a03d40 32use xdg::BaseDirectories;
ae0be2dd 33
5a2df000 34use futures::*;
c4ff3dce 35use tokio::sync::mpsc;
ae0be2dd 36
3d482025 37proxmox::api::const_regex! {
255f378a 38 BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
ae0be2dd 39}
33d64b81 40
255f378a
DM
41const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
42 .format(&BACKUP_REPO_URL)
43 .max_length(256)
44 .schema();
d0a03d40 45
2665cef7
DM
46fn get_default_repository() -> Option<String> {
47 std::env::var("PBS_REPOSITORY").ok()
48}
49
50fn extract_repository_from_value(
51 param: &Value,
52) -> Result<BackupRepository, Error> {
53
54 let repo_url = param["repository"]
55 .as_str()
56 .map(String::from)
57 .or_else(get_default_repository)
58 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
59
60 let repo: BackupRepository = repo_url.parse()?;
61
62 Ok(repo)
63}
64
65fn extract_repository_from_map(
66 param: &HashMap<String, String>,
67) -> Option<BackupRepository> {
68
69 param.get("repository")
70 .map(String::from)
71 .or_else(get_default_repository)
72 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
73}
74
d0a03d40
DM
75fn record_repository(repo: &BackupRepository) {
76
77 let base = match BaseDirectories::with_prefix("proxmox-backup") {
78 Ok(v) => v,
79 _ => return,
80 };
81
82 // usually $HOME/.cache/proxmox-backup/repo-list
83 let path = match base.place_cache_file("repo-list") {
84 Ok(v) => v,
85 _ => return,
86 };
87
11377a47 88 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
89
90 let repo = repo.to_string();
91
92 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
93
94 let mut map = serde_json::map::Map::new();
95
96 loop {
97 let mut max_used = 0;
98 let mut max_repo = None;
99 for (repo, count) in data.as_object().unwrap() {
100 if map.contains_key(repo) { continue; }
101 if let Some(count) = count.as_i64() {
102 if count > max_used {
103 max_used = count;
104 max_repo = Some(repo);
105 }
106 }
107 }
108 if let Some(repo) = max_repo {
109 map.insert(repo.to_owned(), json!(max_used));
110 } else {
111 break;
112 }
113 if map.len() > 10 { // store max. 10 repos
114 break;
115 }
116 }
117
118 let new_data = json!(map);
119
e18a6c9e 120 let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
d0a03d40
DM
121}
122
49811347 123fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
d0a03d40
DM
124
125 let mut result = vec![];
126
127 let base = match BaseDirectories::with_prefix("proxmox-backup") {
128 Ok(v) => v,
129 _ => return result,
130 };
131
132 // usually $HOME/.cache/proxmox-backup/repo-list
133 let path = match base.place_cache_file("repo-list") {
134 Ok(v) => v,
135 _ => return result,
136 };
137
11377a47 138 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
139
140 if let Some(map) = data.as_object() {
49811347 141 for (repo, _count) in map {
d0a03d40
DM
142 result.push(repo.to_owned());
143 }
144 }
145
146 result
147}
148
e9722f8b 149async fn backup_directory<P: AsRef<Path>>(
cf9271e2 150 client: &BackupWriter,
17d6979a 151 dir_path: P,
247cdbce 152 archive_name: &str,
36898ffc 153 chunk_size: Option<usize>,
2eeaacb9 154 device_set: Option<HashSet<u64>>,
219ef0e6 155 verbose: bool,
5b72c9b4 156 skip_lost_and_found: bool,
f98ac774 157 crypt_config: Option<Arc<CryptConfig>>,
bf6e3217 158 catalog: Arc<Mutex<CatalogWriter<SenderWriter>>>,
2c3891d1 159) -> Result<BackupStats, Error> {
33d64b81 160
2761d6a4 161 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
e9722f8b 162 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
ff3d3100 163
e9722f8b 164 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
5e7a09be 165
c4ff3dce 166 let stream = rx
e9722f8b 167 .map_err(Error::from);
17d6979a 168
c4ff3dce 169 // spawn chunker inside a separate task so that it can run parallel
e9722f8b
WB
170 tokio::spawn(async move {
171 let _ = tx.send_all(&mut chunk_stream).await;
172 });
17d6979a 173
e9722f8b
WB
174 let stats = client
175 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
176 .await?;
bcd879cf 177
2c3891d1 178 Ok(stats)
bcd879cf
DM
179}
180
e9722f8b 181async fn backup_image<P: AsRef<Path>>(
cf9271e2 182 client: &BackupWriter,
6af905c1
DM
183 image_path: P,
184 archive_name: &str,
185 image_size: u64,
36898ffc 186 chunk_size: Option<usize>,
1c0472e8 187 _verbose: bool,
f98ac774 188 crypt_config: Option<Arc<CryptConfig>>,
2c3891d1 189) -> Result<BackupStats, Error> {
6af905c1 190
6af905c1
DM
191 let path = image_path.as_ref().to_owned();
192
e9722f8b 193 let file = tokio::fs::File::open(path).await?;
6af905c1
DM
194
195 let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
196 .map_err(Error::from);
197
36898ffc 198 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
6af905c1 199
e9722f8b
WB
200 let stats = client
201 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
202 .await?;
6af905c1 203
2c3891d1 204 Ok(stats)
6af905c1
DM
205}
206
52c171e4
DM
207fn strip_server_file_expenstion(name: &str) -> String {
208
11377a47
DM
209 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
210 name[..name.len()-5].to_owned()
52c171e4 211 } else {
11377a47 212 name.to_owned() // should not happen
8e39232a 213 }
8e39232a
DM
214}
215
812c6f87
DM
216fn list_backup_groups(
217 param: Value,
218 _info: &ApiMethod,
dd5495d6 219 _rpcenv: &mut dyn RpcEnvironment,
812c6f87
DM
220) -> Result<Value, Error> {
221
2665cef7 222 let repo = extract_repository_from_value(&param)?;
812c6f87 223
cc2ce4a9 224 let client = HttpClient::new(repo.host(), repo.user(), None)?;
812c6f87 225
d0a03d40 226 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
812c6f87 227
e9722f8b
WB
228 let mut result = async_main(async move {
229 client.get(&path, None).await
230 })?;
812c6f87 231
d0a03d40
DM
232 record_repository(&repo);
233
812c6f87 234 // fixme: implement and use output formatter instead ..
80822b95
DM
235 let list = result["data"].as_array_mut().unwrap();
236
237 list.sort_unstable_by(|a, b| {
238 let a_id = a["backup-id"].as_str().unwrap();
239 let a_backup_type = a["backup-type"].as_str().unwrap();
240 let b_id = b["backup-id"].as_str().unwrap();
241 let b_backup_type = b["backup-type"].as_str().unwrap();
242
243 let type_order = a_backup_type.cmp(b_backup_type);
244 if type_order == std::cmp::Ordering::Equal {
245 a_id.cmp(b_id)
246 } else {
247 type_order
248 }
249 });
812c6f87 250
34a816cc
DM
251 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
252
253 let mut result = vec![];
254
812c6f87
DM
255 for item in list {
256
ad20d198
DM
257 let id = item["backup-id"].as_str().unwrap();
258 let btype = item["backup-type"].as_str().unwrap();
259 let epoch = item["last-backup"].as_i64().unwrap();
fa5d6977 260 let last_backup = Utc.timestamp(epoch, 0);
ad20d198 261 let backup_count = item["backup-count"].as_u64().unwrap();
812c6f87 262
1e9a94e5 263 let group = BackupGroup::new(btype, id);
812c6f87
DM
264
265 let path = group.group_path().to_str().unwrap().to_owned();
ad20d198 266
52c171e4
DM
267 let files = item["files"].as_array().unwrap().iter()
268 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
ad20d198 269
34a816cc 270 if output_format == "text" {
fa5d6977
DM
271 println!(
272 "{:20} | {} | {:5} | {}",
273 path,
274 BackupDir::backup_time_to_string(last_backup),
275 backup_count,
276 tools::join(&files, ' '),
277 );
34a816cc
DM
278 } else {
279 result.push(json!({
280 "backup-type": btype,
281 "backup-id": id,
282 "last-backup": epoch,
283 "backup-count": backup_count,
284 "files": files,
285 }));
286 }
812c6f87
DM
287 }
288
9aa3f682 289 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
34a816cc 290
812c6f87
DM
291 Ok(Value::Null)
292}
293
184f17af
DM
294fn list_snapshots(
295 param: Value,
296 _info: &ApiMethod,
dd5495d6 297 _rpcenv: &mut dyn RpcEnvironment,
184f17af
DM
298) -> Result<Value, Error> {
299
2665cef7 300 let repo = extract_repository_from_value(&param)?;
184f17af 301
34a816cc
DM
302 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
303
cc2ce4a9 304 let client = HttpClient::new(repo.host(), repo.user(), None)?;
184f17af 305
9e391bb7 306 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
184f17af 307
15c847f1
DM
308 let mut args = json!({});
309 if let Some(path) = param["group"].as_str() {
310 let group = BackupGroup::parse(path)?;
311 args["backup-type"] = group.backup_type().into();
312 args["backup-id"] = group.backup_id().into();
313 }
314
e9722f8b
WB
315 let result = async_main(async move {
316 client.get(&path, Some(args)).await
317 })?;
184f17af 318
d0a03d40
DM
319 record_repository(&repo);
320
184f17af
DM
321 let list = result["data"].as_array().unwrap();
322
34a816cc
DM
323 let mut result = vec![];
324
184f17af
DM
325 for item in list {
326
327 let id = item["backup-id"].as_str().unwrap();
328 let btype = item["backup-type"].as_str().unwrap();
329 let epoch = item["backup-time"].as_i64().unwrap();
184f17af 330
391d3107 331 let snapshot = BackupDir::new(btype, id, epoch);
184f17af
DM
332
333 let path = snapshot.relative_path().to_str().unwrap().to_owned();
334
52c171e4
DM
335 let files = item["files"].as_array().unwrap().iter()
336 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
184f17af 337
34a816cc 338 if output_format == "text" {
a17a0e7a
DM
339 let size_str = if let Some(size) = item["size"].as_u64() {
340 size.to_string()
341 } else {
342 String::from("-")
343 };
344 println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
34a816cc 345 } else {
a17a0e7a 346 let mut data = json!({
34a816cc
DM
347 "backup-type": btype,
348 "backup-id": id,
349 "backup-time": epoch,
350 "files": files,
a17a0e7a
DM
351 });
352 if let Some(size) = item["size"].as_u64() {
353 data["size"] = size.into();
354 }
355 result.push(data);
34a816cc 356 }
184f17af
DM
357 }
358
f6ede796 359 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
34a816cc 360
184f17af
DM
361 Ok(Value::Null)
362}
363
6f62c924
DM
364fn forget_snapshots(
365 param: Value,
366 _info: &ApiMethod,
dd5495d6 367 _rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
368) -> Result<Value, Error> {
369
2665cef7 370 let repo = extract_repository_from_value(&param)?;
6f62c924
DM
371
372 let path = tools::required_string_param(&param, "snapshot")?;
373 let snapshot = BackupDir::parse(path)?;
374
cc2ce4a9 375 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
6f62c924 376
9e391bb7 377 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
6f62c924 378
e9722f8b
WB
379 let result = async_main(async move {
380 client.delete(&path, Some(json!({
381 "backup-type": snapshot.group().backup_type(),
382 "backup-id": snapshot.group().backup_id(),
383 "backup-time": snapshot.backup_time().timestamp(),
384 }))).await
385 })?;
6f62c924 386
d0a03d40
DM
387 record_repository(&repo);
388
6f62c924
DM
389 Ok(result)
390}
391
e240d8be
DM
392fn api_login(
393 param: Value,
394 _info: &ApiMethod,
395 _rpcenv: &mut dyn RpcEnvironment,
396) -> Result<Value, Error> {
397
398 let repo = extract_repository_from_value(&param)?;
399
cc2ce4a9 400 let client = HttpClient::new(repo.host(), repo.user(), None)?;
e9722f8b 401 async_main(async move { client.login().await })?;
e240d8be
DM
402
403 record_repository(&repo);
404
405 Ok(Value::Null)
406}
407
408fn api_logout(
409 param: Value,
410 _info: &ApiMethod,
411 _rpcenv: &mut dyn RpcEnvironment,
412) -> Result<Value, Error> {
413
414 let repo = extract_repository_from_value(&param)?;
415
416 delete_ticket_info(repo.host(), repo.user())?;
417
418 Ok(Value::Null)
419}
420
9049a8cf
DM
421fn dump_catalog(
422 param: Value,
423 _info: &ApiMethod,
424 _rpcenv: &mut dyn RpcEnvironment,
425) -> Result<Value, Error> {
426
427 let repo = extract_repository_from_value(&param)?;
428
429 let path = tools::required_string_param(&param, "snapshot")?;
430 let snapshot = BackupDir::parse(path)?;
431
11377a47 432 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
9049a8cf
DM
433
434 let crypt_config = match keyfile {
435 None => None,
436 Some(path) => {
a8f10f84 437 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
9025312a 438 Some(Arc::new(CryptConfig::new(key)?))
9049a8cf
DM
439 }
440 };
441
cc2ce4a9 442 let client = HttpClient::new(repo.host(), repo.user(), None)?;
9049a8cf 443
e9722f8b 444 async_main(async move {
9e490a74
DM
445 let client = BackupReader::start(
446 client,
296c50ba 447 crypt_config.clone(),
e9722f8b
WB
448 repo.store(),
449 &snapshot.group().backup_type(),
450 &snapshot.group().backup_id(),
9e490a74
DM
451 snapshot.backup_time(),
452 true,
453 ).await?;
9049a8cf 454
f06b820a 455 let manifest = client.download_manifest().await?;
d2267b11 456
c3d84a22 457 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
bf6e3217
DM
458
459 let most_used = index.find_most_used_chunks(8);
460
461 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
462
463 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
464
465 let mut catalogfile = std::fs::OpenOptions::new()
e9722f8b 466 .write(true)
bf6e3217 467 .read(true)
e9722f8b
WB
468 .custom_flags(libc::O_TMPFILE)
469 .open("/tmp")?;
9049a8cf 470
bf6e3217
DM
471 std::io::copy(&mut reader, &mut catalogfile)
472 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
d2267b11 473
bf6e3217 474 catalogfile.seek(SeekFrom::Start(0))?;
a84ef4c2 475
bf6e3217 476 let mut catalog_reader = CatalogReader::new(catalogfile);
9049a8cf 477
e9722f8b 478 catalog_reader.dump()?;
9049a8cf 479
e9722f8b
WB
480 record_repository(&repo);
481
482 Ok::<(), Error>(())
483 })?;
9049a8cf
DM
484
485 Ok(Value::Null)
486}
487
52c171e4
DM
488fn list_snapshot_files(
489 param: Value,
490 _info: &ApiMethod,
491 _rpcenv: &mut dyn RpcEnvironment,
492) -> Result<Value, Error> {
493
494 let repo = extract_repository_from_value(&param)?;
495
496 let path = tools::required_string_param(&param, "snapshot")?;
497 let snapshot = BackupDir::parse(path)?;
498
499 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
500
cc2ce4a9 501 let client = HttpClient::new(repo.host(), repo.user(), None)?;
52c171e4
DM
502
503 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
504
e9722f8b
WB
505 let mut result = async_main(async move {
506 client.get(&path, Some(json!({
507 "backup-type": snapshot.group().backup_type(),
508 "backup-id": snapshot.group().backup_id(),
509 "backup-time": snapshot.backup_time().timestamp(),
510 }))).await
511 })?;
52c171e4
DM
512
513 record_repository(&repo);
514
8c70e3eb 515 let list: Value = result["data"].take();
52c171e4
DM
516
517 if output_format == "text" {
8c70e3eb
DM
518 for item in list.as_array().unwrap().iter() {
519 println!(
520 "{} {}",
521 strip_server_file_expenstion(item["filename"].as_str().unwrap()),
522 item["size"].as_u64().unwrap_or(0),
523 );
52c171e4
DM
524 }
525 } else {
8c70e3eb 526 format_and_print_result(&list, &output_format);
52c171e4
DM
527 }
528
529 Ok(Value::Null)
530}
531
8cc0d6af
DM
532fn start_garbage_collection(
533 param: Value,
534 _info: &ApiMethod,
dd5495d6 535 _rpcenv: &mut dyn RpcEnvironment,
8cc0d6af
DM
536) -> Result<Value, Error> {
537
2665cef7 538 let repo = extract_repository_from_value(&param)?;
e5f7def4 539 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
8cc0d6af 540
cc2ce4a9 541 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
8cc0d6af 542
d0a03d40 543 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
8cc0d6af 544
e5f7def4
DM
545 async_main(async {
546 let result = client.post(&path, None).await?;
8cc0d6af 547
e5f7def4 548 record_repository(&repo);
d0a03d40 549
e5f7def4
DM
550 let data = &result["data"];
551 if output_format == "text" {
552 if let Some(upid) = data.as_str() {
553 display_task_log(client, upid, true).await?;
554 }
555 } else {
556 format_and_print_result(&data, &output_format);
557 }
558
559 Ok::<_, Error>(())
560 })?;
561
562
563 Ok(Value::Null)
8cc0d6af 564}
33d64b81 565
ae0be2dd
DM
566fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
567
255f378a 568 if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
ae0be2dd
DM
569 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
570 }
571 bail!("unable to parse directory specification '{}'", value);
572}
573
bf6e3217
DM
574fn spawn_catalog_upload(
575 client: Arc<BackupWriter>,
576 crypt_config: Option<Arc<CryptConfig>>,
577) -> Result<
578 (
579 Arc<Mutex<CatalogWriter<SenderWriter>>>,
580 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
581 ), Error>
582{
583 let (catalog_tx, catalog_rx) = mpsc::channel(10); // allow to buffer 10 writes
584 let catalog_stream = catalog_rx.map_err(Error::from);
585 let catalog_chunk_size = 512*1024;
586 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
587
588 let catalog = Arc::new(Mutex::new(CatalogWriter::new(SenderWriter::new(catalog_tx))?));
589
590 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
591
592 tokio::spawn(async move {
593 let catalog_upload_result = client
594 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
595 .await;
596
597 if let Err(ref err) = catalog_upload_result {
598 eprintln!("catalog upload error - {}", err);
599 client.cancel();
600 }
601
602 let _ = catalog_result_tx.send(catalog_upload_result);
603 });
604
605 Ok((catalog, catalog_result_rx))
606}
607
6049b71f
DM
608fn create_backup(
609 param: Value,
610 _info: &ApiMethod,
dd5495d6 611 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 612) -> Result<Value, Error> {
ff5d3707 613
2665cef7 614 let repo = extract_repository_from_value(&param)?;
ae0be2dd
DM
615
616 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
a914a774 617
eed6db39
DM
618 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
619
5b72c9b4
DM
620 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
621
219ef0e6
DM
622 let verbose = param["verbose"].as_bool().unwrap_or(false);
623
ca5d0b61
DM
624 let backup_time_opt = param["backup-time"].as_i64();
625
36898ffc 626 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
2d9d143a 627
247cdbce
DM
628 if let Some(size) = chunk_size_opt {
629 verify_chunk_size(size)?;
2d9d143a
DM
630 }
631
11377a47 632 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
6d0983db 633
f69adc81 634 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
fba30411 635
bbf9e7e9 636 let backup_type = param["backup-type"].as_str().unwrap_or("host");
ca5d0b61 637
2eeaacb9
DM
638 let include_dev = param["include-dev"].as_array();
639
640 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
641
642 if let Some(include_dev) = include_dev {
643 if all_file_systems {
644 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
645 }
646
647 let mut set = HashSet::new();
648 for path in include_dev {
649 let path = path.as_str().unwrap();
650 let stat = nix::sys::stat::stat(path)
651 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
652 set.insert(stat.st_dev);
653 }
654 devices = Some(set);
655 }
656
ae0be2dd 657 let mut upload_list = vec![];
a914a774 658
79679c2d 659 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
6af905c1 660
bf6e3217
DM
661 let mut upload_catalog = false;
662
ae0be2dd
DM
663 for backupspec in backupspec_list {
664 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
bcd879cf 665
eb1804c5
DM
666 use std::os::unix::fs::FileTypeExt;
667
3fa71727
CE
668 let metadata = std::fs::metadata(filename)
669 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
eb1804c5 670 let file_type = metadata.file_type();
23bb8780 671
4af0ee05 672 let extension = target.rsplit('.').next()
11377a47 673 .ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
bcd879cf 674
ec8a9bb9
DM
675 match extension {
676 "pxar" => {
677 if !file_type.is_dir() {
678 bail!("got unexpected file type (expected directory)");
679 }
4af0ee05 680 upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
bf6e3217 681 upload_catalog = true;
ec8a9bb9
DM
682 }
683 "img" => {
eb1804c5 684
ec8a9bb9
DM
685 if !(file_type.is_file() || file_type.is_block_device()) {
686 bail!("got unexpected file type (expected file or block device)");
687 }
eb1804c5 688
e18a6c9e 689 let size = image_size(&PathBuf::from(filename))?;
23bb8780 690
ec8a9bb9 691 if size == 0 { bail!("got zero-sized file '{}'", filename); }
ae0be2dd 692
4af0ee05 693 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
ec8a9bb9
DM
694 }
695 "conf" => {
696 if !file_type.is_file() {
697 bail!("got unexpected file type (expected regular file)");
698 }
4af0ee05 699 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 700 }
79679c2d
DM
701 "log" => {
702 if !file_type.is_file() {
703 bail!("got unexpected file type (expected regular file)");
704 }
4af0ee05 705 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
79679c2d 706 }
ec8a9bb9
DM
707 _ => {
708 bail!("got unknown archive extension '{}'", extension);
709 }
ae0be2dd
DM
710 }
711 }
712
11377a47 713 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
ae0be2dd 714
cc2ce4a9 715 let client = HttpClient::new(repo.host(), repo.user(), None)?;
d0a03d40
DM
716 record_repository(&repo);
717
ca5d0b61
DM
718 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
719
f69adc81 720 println!("Client name: {}", proxmox::tools::nodename());
ca5d0b61
DM
721
722 let start_time = Local::now();
723
7a6cfbd9 724 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
51144821 725
bb823140
DM
726 let (crypt_config, rsa_encrypted_key) = match keyfile {
727 None => (None, None),
6d0983db 728 Some(path) => {
a8f10f84 729 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
bb823140
DM
730
731 let crypt_config = CryptConfig::new(key)?;
732
733 let path = master_pubkey_path()?;
734 if path.exists() {
e18a6c9e 735 let pem_data = file_get_contents(&path)?;
bb823140
DM
736 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
737 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
738 (Some(Arc::new(crypt_config)), Some(enc_key))
739 } else {
740 (Some(Arc::new(crypt_config)), None)
741 }
6d0983db
DM
742 }
743 };
f98ac774 744
e9722f8b 745 async_main(async move {
cf9271e2
DM
746 let client = BackupWriter::start(
747 client,
748 repo.store(),
749 backup_type,
750 &backup_id,
751 backup_time,
752 verbose,
753 ).await?;
e9722f8b 754
59e9ba01
DM
755 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
756 let mut manifest = BackupManifest::new(snapshot);
e9722f8b 757
bf6e3217 758 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
e9722f8b
WB
759
760 for (backup_type, filename, target, size) in upload_list {
761 match backup_type {
762 BackupType::CONFIG => {
763 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
764 let stats = client
765 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
766 .await?;
59e9ba01 767 manifest.add_file(target, stats.size, stats.csum);
e9722f8b
WB
768 }
769 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
770 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
771 let stats = client
772 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
773 .await?;
59e9ba01 774 manifest.add_file(target, stats.size, stats.csum);
e9722f8b
WB
775 }
776 BackupType::PXAR => {
e9722f8b
WB
777 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
778 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
779 let stats = backup_directory(
780 &client,
781 &filename,
782 &target,
783 chunk_size_opt,
784 devices.clone(),
785 verbose,
786 skip_lost_and_found,
787 crypt_config.clone(),
788 catalog.clone(),
789 ).await?;
59e9ba01 790 manifest.add_file(target, stats.size, stats.csum);
e9722f8b
WB
791 catalog.lock().unwrap().end_directory()?;
792 }
793 BackupType::IMAGE => {
794 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
795 let stats = backup_image(
796 &client,
797 &filename,
798 &target,
799 size,
800 chunk_size_opt,
801 verbose,
802 crypt_config.clone(),
803 ).await?;
59e9ba01 804 manifest.add_file(target, stats.size, stats.csum);
e9722f8b 805 }
6af905c1
DM
806 }
807 }
4818c8b6 808
e9722f8b
WB
809 // finalize and upload catalog
810 if upload_catalog {
811 let mutex = Arc::try_unwrap(catalog)
812 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
bf6e3217
DM
813 let mut catalog = mutex.into_inner().unwrap();
814
815 catalog.finish()?;
2761d6a4 816
bf6e3217 817 drop(catalog); // close upload stream
2761d6a4 818
bf6e3217 819 let stats = catalog_result_rx.await??;
9d135fe6 820
bf6e3217 821 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
e9722f8b 822 }
2761d6a4 823
e9722f8b
WB
824 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
825 let target = "rsa-encrypted.key";
826 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
827 let stats = client
828 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
829 .await?;
59e9ba01 830 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum);
e9722f8b
WB
831
832 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
833 /*
834 let mut buffer2 = vec![0u8; rsa.size() as usize];
835 let pem_data = file_get_contents("master-private.pem")?;
836 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
837 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
838 println!("TEST {} {:?}", len, buffer2);
839 */
840 }
9f46c7de 841
59e9ba01
DM
842 // create manifest (index.json)
843 let manifest = manifest.into_json();
2c3891d1 844
e9722f8b 845 println!("Upload index.json to '{:?}'", repo);
59e9ba01 846 let manifest = serde_json::to_string_pretty(&manifest)?.into();
e9722f8b 847 client
59e9ba01 848 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
e9722f8b 849 .await?;
2c3891d1 850
e9722f8b 851 client.finish().await?;
c4ff3dce 852
e9722f8b
WB
853 let end_time = Local::now();
854 let elapsed = end_time.signed_duration_since(start_time);
855 println!("Duration: {}", elapsed);
3ec3ec3f 856
e9722f8b 857 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
3d5c11e5 858
e9722f8b
WB
859 Ok(Value::Null)
860 })
f98ea63d
DM
861}
862
d0a03d40 863fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
f98ea63d
DM
864
865 let mut result = vec![];
866
867 let data: Vec<&str> = arg.splitn(2, ':').collect();
868
bff11030 869 if data.len() != 2 {
8968258b
DM
870 result.push(String::from("root.pxar:/"));
871 result.push(String::from("etc.pxar:/etc"));
bff11030
DM
872 return result;
873 }
f98ea63d 874
496a6784 875 let files = tools::complete_file_name(data[1], param);
f98ea63d
DM
876
877 for file in files {
878 result.push(format!("{}:{}", data[0], file));
879 }
880
881 result
ff5d3707 882}
883
9f912493
DM
884fn restore(
885 param: Value,
886 _info: &ApiMethod,
dd5495d6 887 _rpcenv: &mut dyn RpcEnvironment,
9f912493 888) -> Result<Value, Error> {
e9722f8b
WB
889 async_main(restore_do(param))
890}
9f912493 891
88892ea8
DM
892fn dump_image<W: Write>(
893 client: Arc<BackupReader>,
894 crypt_config: Option<Arc<CryptConfig>>,
895 index: FixedIndexReader,
896 mut writer: W,
fd04ca7a 897 verbose: bool,
88892ea8
DM
898) -> Result<(), Error> {
899
900 let most_used = index.find_most_used_chunks(8);
901
902 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
903
904 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
905 // and thus slows down reading. Instead, directly use RemoteChunkReader
fd04ca7a
DM
906 let mut per = 0;
907 let mut bytes = 0;
908 let start_time = std::time::Instant::now();
909
88892ea8
DM
910 for pos in 0..index.index_count() {
911 let digest = index.index_digest(pos).unwrap();
912 let raw_data = chunk_reader.read_chunk(&digest)?;
913 writer.write_all(&raw_data)?;
fd04ca7a
DM
914 bytes += raw_data.len();
915 if verbose {
916 let next_per = ((pos+1)*100)/index.index_count();
917 if per != next_per {
918 eprintln!("progress {}% (read {} bytes, duration {} sec)",
919 next_per, bytes, start_time.elapsed().as_secs());
920 per = next_per;
921 }
922 }
88892ea8
DM
923 }
924
fd04ca7a
DM
925 let end_time = std::time::Instant::now();
926 let elapsed = end_time.duration_since(start_time);
927 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
928 bytes,
929 elapsed.as_secs_f64(),
930 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
931 );
932
933
88892ea8
DM
934 Ok(())
935}
936
e9722f8b 937async fn restore_do(param: Value) -> Result<Value, Error> {
2665cef7 938 let repo = extract_repository_from_value(&param)?;
9f912493 939
86eda3eb
DM
940 let verbose = param["verbose"].as_bool().unwrap_or(false);
941
46d5aa0a
DM
942 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
943
d5c34d98
DM
944 let archive_name = tools::required_string_param(&param, "archive-name")?;
945
cc2ce4a9 946 let client = HttpClient::new(repo.host(), repo.user(), None)?;
d0a03d40 947
d0a03d40 948 record_repository(&repo);
d5c34d98 949
9f912493 950 let path = tools::required_string_param(&param, "snapshot")?;
9f912493 951
86eda3eb 952 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
d5c34d98 953 let group = BackupGroup::parse(path)?;
9f912493 954
9e391bb7
DM
955 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
956 let result = client.get(&path, Some(json!({
d5c34d98
DM
957 "backup-type": group.backup_type(),
958 "backup-id": group.backup_id(),
e9722f8b 959 }))).await?;
9f912493 960
d5c34d98 961 let list = result["data"].as_array().unwrap();
11377a47 962 if list.is_empty() {
d5c34d98
DM
963 bail!("backup group '{}' does not contain any snapshots:", path);
964 }
9f912493 965
86eda3eb 966 let epoch = list[0]["backup-time"].as_i64().unwrap();
fa5d6977 967 let backup_time = Utc.timestamp(epoch, 0);
86eda3eb 968 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
d5c34d98
DM
969 } else {
970 let snapshot = BackupDir::parse(path)?;
86eda3eb
DM
971 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
972 };
9f912493 973
d5c34d98 974 let target = tools::required_string_param(&param, "target")?;
bf125261 975 let target = if target == "-" { None } else { Some(target) };
2ae7d196 976
11377a47 977 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2ae7d196 978
86eda3eb
DM
979 let crypt_config = match keyfile {
980 None => None,
981 Some(path) => {
a8f10f84 982 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
86eda3eb
DM
983 Some(Arc::new(CryptConfig::new(key)?))
984 }
985 };
d5c34d98 986
afb4cd28
DM
987 let server_archive_name = if archive_name.ends_with(".pxar") {
988 format!("{}.didx", archive_name)
989 } else if archive_name.ends_with(".img") {
990 format!("{}.fidx", archive_name)
991 } else {
f8100e96 992 format!("{}.blob", archive_name)
afb4cd28 993 };
9f912493 994
296c50ba
DM
995 let client = BackupReader::start(
996 client,
997 crypt_config.clone(),
998 repo.store(),
999 &backup_type,
1000 &backup_id,
1001 backup_time,
1002 true,
1003 ).await?;
86eda3eb 1004
f06b820a 1005 let manifest = client.download_manifest().await?;
02fcf372 1006
ad6e5a6f 1007 if server_archive_name == MANIFEST_BLOB_NAME {
f06b820a 1008 let backup_index_data = manifest.into_json().to_string();
02fcf372 1009 if let Some(target) = target {
296c50ba 1010 file_set_contents(target, backup_index_data.as_bytes(), None)?;
02fcf372
DM
1011 } else {
1012 let stdout = std::io::stdout();
1013 let mut writer = stdout.lock();
296c50ba 1014 writer.write_all(backup_index_data.as_bytes())
02fcf372
DM
1015 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1016 }
1017
1018 } else if server_archive_name.ends_with(".blob") {
d2267b11 1019
bb19af73 1020 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
f8100e96 1021
bf125261 1022 if let Some(target) = target {
0d986280
DM
1023 let mut writer = std::fs::OpenOptions::new()
1024 .write(true)
1025 .create(true)
1026 .create_new(true)
1027 .open(target)
1028 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1029 std::io::copy(&mut reader, &mut writer)?;
bf125261
DM
1030 } else {
1031 let stdout = std::io::stdout();
1032 let mut writer = stdout.lock();
0d986280 1033 std::io::copy(&mut reader, &mut writer)
bf125261
DM
1034 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1035 }
f8100e96
DM
1036
1037 } else if server_archive_name.ends_with(".didx") {
86eda3eb 1038
c3d84a22 1039 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
df65bd3d 1040
f4bf7dfc
DM
1041 let most_used = index.find_most_used_chunks(8);
1042
1043 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1044
afb4cd28 1045 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
86eda3eb 1046
bf125261 1047 if let Some(target) = target {
86eda3eb 1048
47651f95 1049 let feature_flags = pxar::flags::DEFAULT;
f701d033
DM
1050 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1051 decoder.set_callback(move |path| {
bf125261 1052 if verbose {
fd04ca7a 1053 eprintln!("{:?}", path);
bf125261
DM
1054 }
1055 Ok(())
1056 });
6a879109
CE
1057 decoder.set_allow_existing_dirs(allow_existing_dirs);
1058
fa7e957c 1059 decoder.restore(Path::new(target), &Vec::new())?;
bf125261 1060 } else {
88892ea8
DM
1061 let mut writer = std::fs::OpenOptions::new()
1062 .write(true)
1063 .open("/dev/stdout")
1064 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
afb4cd28 1065
bf125261
DM
1066 std::io::copy(&mut reader, &mut writer)
1067 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1068 }
afb4cd28 1069 } else if server_archive_name.ends_with(".fidx") {
afb4cd28 1070
72050500 1071 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
df65bd3d 1072
88892ea8
DM
1073 let mut writer = if let Some(target) = target {
1074 std::fs::OpenOptions::new()
bf125261
DM
1075 .write(true)
1076 .create(true)
1077 .create_new(true)
1078 .open(target)
88892ea8 1079 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
bf125261 1080 } else {
88892ea8
DM
1081 std::fs::OpenOptions::new()
1082 .write(true)
1083 .open("/dev/stdout")
1084 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1085 };
afb4cd28 1086
fd04ca7a 1087 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
88892ea8
DM
1088
1089 } else {
f8100e96 1090 bail!("unknown archive file extension (expected .pxar of .img)");
3031e44c 1091 }
fef44d4f
DM
1092
1093 Ok(Value::Null)
45db6f89
DM
1094}
1095
ec34f7eb
DM
1096fn upload_log(
1097 param: Value,
1098 _info: &ApiMethod,
1099 _rpcenv: &mut dyn RpcEnvironment,
1100) -> Result<Value, Error> {
1101
1102 let logfile = tools::required_string_param(&param, "logfile")?;
1103 let repo = extract_repository_from_value(&param)?;
1104
1105 let snapshot = tools::required_string_param(&param, "snapshot")?;
1106 let snapshot = BackupDir::parse(snapshot)?;
1107
cc2ce4a9 1108 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
ec34f7eb 1109
11377a47 1110 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
ec34f7eb
DM
1111
1112 let crypt_config = match keyfile {
1113 None => None,
1114 Some(path) => {
a8f10f84 1115 let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
ec34f7eb 1116 let crypt_config = CryptConfig::new(key)?;
9025312a 1117 Some(Arc::new(crypt_config))
ec34f7eb
DM
1118 }
1119 };
1120
e18a6c9e 1121 let data = file_get_contents(logfile)?;
ec34f7eb 1122
7123ff7d 1123 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
ec34f7eb
DM
1124
1125 let raw_data = blob.into_inner();
1126
1127 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1128
1129 let args = json!({
1130 "backup-type": snapshot.group().backup_type(),
1131 "backup-id": snapshot.group().backup_id(),
1132 "backup-time": snapshot.backup_time().timestamp(),
1133 });
1134
1135 let body = hyper::Body::from(raw_data);
1136
e9722f8b
WB
1137 async_main(async move {
1138 client.upload("application/octet-stream", body, &path, Some(args)).await
1139 })
ec34f7eb
DM
1140}
1141
83b7db02 1142fn prune(
ea7a7ef2 1143 mut param: Value,
83b7db02 1144 _info: &ApiMethod,
dd5495d6 1145 _rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
1146) -> Result<Value, Error> {
1147
2665cef7 1148 let repo = extract_repository_from_value(&param)?;
83b7db02 1149
cc2ce4a9 1150 let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
83b7db02 1151
d0a03d40 1152 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
83b7db02 1153
9fdc3ef4
DM
1154 let group = tools::required_string_param(&param, "group")?;
1155 let group = BackupGroup::parse(group)?;
163e9bbe 1156 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
9fdc3ef4 1157
ea7a7ef2
DM
1158 param.as_object_mut().unwrap().remove("repository");
1159 param.as_object_mut().unwrap().remove("group");
163e9bbe 1160 param.as_object_mut().unwrap().remove("output-format");
ea7a7ef2
DM
1161
1162 param["backup-type"] = group.backup_type().into();
1163 param["backup-id"] = group.backup_id().into();
83b7db02 1164
5a0b484b
DM
1165 async_main(async {
1166 let result = client.post(&path, Some(param)).await?;
74fa81b8 1167
5a0b484b 1168 record_repository(&repo);
3b03abfe 1169
5a0b484b
DM
1170 let data = &result["data"];
1171 if output_format == "text" {
1172 if let Some(upid) = data.as_str() {
1173 display_task_log(client, upid, true).await?;
1174 }
1175 } else {
1176 format_and_print_result(&data, &output_format);
74fa81b8 1177 }
3b03abfe 1178
5a0b484b
DM
1179 Ok::<_, Error>(())
1180 })?;
d0a03d40 1181
43a406fd 1182 Ok(Value::Null)
83b7db02
DM
1183}
1184
34a816cc
DM
1185fn status(
1186 param: Value,
1187 _info: &ApiMethod,
1188 _rpcenv: &mut dyn RpcEnvironment,
1189) -> Result<Value, Error> {
1190
1191 let repo = extract_repository_from_value(&param)?;
1192
1193 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1194
cc2ce4a9 1195 let client = HttpClient::new(repo.host(), repo.user(), None)?;
34a816cc
DM
1196
1197 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1198
e9722f8b 1199 let result = async_main(async move { client.get(&path, None).await })?;
34a816cc
DM
1200 let data = &result["data"];
1201
1202 record_repository(&repo);
1203
1204 if output_format == "text" {
1205 let total = data["total"].as_u64().unwrap();
1206 let used = data["used"].as_u64().unwrap();
1207 let avail = data["avail"].as_u64().unwrap();
1208 let roundup = total/200;
1209
1210 println!(
1211 "total: {} used: {} ({} %) available: {}",
1212 total,
1213 used,
1214 ((used+roundup)*100)/total,
1215 avail,
1216 );
1217 } else {
f6ede796 1218 format_and_print_result(data, &output_format);
34a816cc
DM
1219 }
1220
1221 Ok(Value::Null)
1222}
1223
5a2df000 1224// like get, but simply ignore errors and return Null instead
e9722f8b 1225async fn try_get(repo: &BackupRepository, url: &str) -> Value {
024f11bb 1226
cc2ce4a9 1227 let client = match HttpClient::new(repo.host(), repo.user(), None) {
45cdce06
DM
1228 Ok(v) => v,
1229 _ => return Value::Null,
1230 };
b2388518 1231
e9722f8b 1232 let mut resp = match client.get(url, None).await {
b2388518
DM
1233 Ok(v) => v,
1234 _ => return Value::Null,
1235 };
1236
1237 if let Some(map) = resp.as_object_mut() {
1238 if let Some(data) = map.remove("data") {
1239 return data;
1240 }
1241 }
1242 Value::Null
1243}
1244
b2388518 1245fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1246 async_main(async { complete_backup_group_do(param).await })
1247}
1248
1249async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
024f11bb 1250
b2388518
DM
1251 let mut result = vec![];
1252
2665cef7 1253 let repo = match extract_repository_from_map(param) {
b2388518 1254 Some(v) => v,
024f11bb
DM
1255 _ => return result,
1256 };
1257
b2388518
DM
1258 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1259
e9722f8b 1260 let data = try_get(&repo, &path).await;
b2388518
DM
1261
1262 if let Some(list) = data.as_array() {
024f11bb 1263 for item in list {
98f0b972
DM
1264 if let (Some(backup_id), Some(backup_type)) =
1265 (item["backup-id"].as_str(), item["backup-type"].as_str())
1266 {
1267 result.push(format!("{}/{}", backup_type, backup_id));
024f11bb
DM
1268 }
1269 }
1270 }
1271
1272 result
1273}
1274
b2388518 1275fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1276 async_main(async { complete_group_or_snapshot_do(arg, param).await })
1277}
1278
1279async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
b2388518 1280
b2388518 1281 if arg.matches('/').count() < 2 {
e9722f8b 1282 let groups = complete_backup_group_do(param).await;
543a260f 1283 let mut result = vec![];
b2388518
DM
1284 for group in groups {
1285 result.push(group.to_string());
1286 result.push(format!("{}/", group));
1287 }
1288 return result;
1289 }
1290
e9722f8b 1291 complete_backup_snapshot_do(param).await
543a260f 1292}
b2388518 1293
3fb53e07 1294fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1295 async_main(async { complete_backup_snapshot_do(param).await })
1296}
1297
1298async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
543a260f
DM
1299
1300 let mut result = vec![];
1301
1302 let repo = match extract_repository_from_map(param) {
1303 Some(v) => v,
1304 _ => return result,
1305 };
1306
1307 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
b2388518 1308
e9722f8b 1309 let data = try_get(&repo, &path).await;
b2388518
DM
1310
1311 if let Some(list) = data.as_array() {
1312 for item in list {
1313 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1314 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1315 {
1316 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1317 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1318 }
1319 }
1320 }
1321
1322 result
1323}
1324
45db6f89 1325fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
e9722f8b
WB
1326 async_main(async { complete_server_file_name_do(param).await })
1327}
1328
1329async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
08dc340a
DM
1330
1331 let mut result = vec![];
1332
2665cef7 1333 let repo = match extract_repository_from_map(param) {
08dc340a
DM
1334 Some(v) => v,
1335 _ => return result,
1336 };
1337
1338 let snapshot = match param.get("snapshot") {
1339 Some(path) => {
1340 match BackupDir::parse(path) {
1341 Ok(v) => v,
1342 _ => return result,
1343 }
1344 }
1345 _ => return result,
1346 };
1347
1348 let query = tools::json_object_to_query(json!({
1349 "backup-type": snapshot.group().backup_type(),
1350 "backup-id": snapshot.group().backup_id(),
1351 "backup-time": snapshot.backup_time().timestamp(),
1352 })).unwrap();
1353
1354 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1355
e9722f8b 1356 let data = try_get(&repo, &path).await;
08dc340a
DM
1357
1358 if let Some(list) = data.as_array() {
1359 for item in list {
c4f025eb 1360 if let Some(filename) = item["filename"].as_str() {
08dc340a
DM
1361 result.push(filename.to_owned());
1362 }
1363 }
1364 }
1365
45db6f89
DM
1366 result
1367}
1368
1369fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
52c171e4 1370 complete_server_file_name(arg, param)
e9722f8b
WB
1371 .iter()
1372 .map(|v| strip_server_file_expenstion(&v))
1373 .collect()
08dc340a
DM
1374}
1375
0ec9e1b0
DM
1376fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1377 complete_server_file_name(arg, param)
1378 .iter()
1379 .filter_map(|v| {
1380 let name = strip_server_file_expenstion(&v);
1381 if name.ends_with(".pxar") {
1382 Some(name)
1383 } else {
1384 None
1385 }
1386 })
1387 .collect()
1388}
1389
49811347
DM
1390fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1391
1392 let mut result = vec![];
1393
1394 let mut size = 64;
1395 loop {
1396 result.push(size.to_string());
11377a47 1397 size *= 2;
49811347
DM
1398 if size > 4096 { break; }
1399 }
1400
1401 result
1402}
1403
826f309b 1404fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
ff5d3707 1405
f2401311
DM
1406 // fixme: implement other input methods
1407
1408 use std::env::VarError::*;
1409 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
826f309b 1410 Ok(p) => return Ok(p.as_bytes().to_vec()),
f2401311
DM
1411 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1412 Err(NotPresent) => {
1413 // Try another method
1414 }
1415 }
1416
1417 // If we're on a TTY, query the user for a password
1418 if crate::tools::tty::stdin_isatty() {
826f309b 1419 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
f2401311
DM
1420 }
1421
1422 bail!("no password input mechanism available");
1423}
1424
ac716234
DM
1425fn key_create(
1426 param: Value,
1427 _info: &ApiMethod,
1428 _rpcenv: &mut dyn RpcEnvironment,
1429) -> Result<Value, Error> {
1430
9b06db45
DM
1431 let path = tools::required_string_param(&param, "path")?;
1432 let path = PathBuf::from(path);
ac716234 1433
181f097a 1434 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
ac716234
DM
1435
1436 let key = proxmox::sys::linux::random_data(32)?;
1437
181f097a
DM
1438 if kdf == "scrypt" {
1439 // always read passphrase from tty
1440 if !crate::tools::tty::stdin_isatty() {
1441 bail!("unable to read passphrase - no tty");
1442 }
ac716234 1443
181f097a
DM
1444 let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
1445
ab44acff 1446 let key_config = encrypt_key_with_passphrase(&key, &password)?;
37c5a175 1447
ab44acff 1448 store_key_config(&path, false, key_config)?;
181f097a
DM
1449
1450 Ok(Value::Null)
1451 } else if kdf == "none" {
1452 let created = Local.timestamp(Local::now().timestamp(), 0);
1453
1454 store_key_config(&path, false, KeyConfig {
1455 kdf: None,
1456 created,
ab44acff 1457 modified: created,
181f097a
DM
1458 data: key,
1459 })?;
1460
1461 Ok(Value::Null)
1462 } else {
1463 unreachable!();
1464 }
ac716234
DM
1465}
1466
9f46c7de
DM
1467fn master_pubkey_path() -> Result<PathBuf, Error> {
1468 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1469
1470 // usually $HOME/.config/proxmox-backup/master-public.pem
1471 let path = base.place_config_file("master-public.pem")?;
1472
1473 Ok(path)
1474}
1475
3ea8bfc9
DM
1476fn key_import_master_pubkey(
1477 param: Value,
1478 _info: &ApiMethod,
1479 _rpcenv: &mut dyn RpcEnvironment,
1480) -> Result<Value, Error> {
1481
1482 let path = tools::required_string_param(&param, "path")?;
1483 let path = PathBuf::from(path);
1484
e18a6c9e 1485 let pem_data = file_get_contents(&path)?;
3ea8bfc9
DM
1486
1487 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1488 bail!("Unable to decode PEM data - {}", err);
1489 }
1490
9f46c7de 1491 let target_path = master_pubkey_path()?;
3ea8bfc9 1492
e18a6c9e 1493 file_set_contents(&target_path, &pem_data, None)?;
3ea8bfc9
DM
1494
1495 println!("Imported public master key to {:?}", target_path);
1496
1497 Ok(Value::Null)
1498}
1499
37c5a175
DM
1500fn key_create_master_key(
1501 _param: Value,
1502 _info: &ApiMethod,
1503 _rpcenv: &mut dyn RpcEnvironment,
1504) -> Result<Value, Error> {
1505
1506 // we need a TTY to query the new password
1507 if !crate::tools::tty::stdin_isatty() {
1508 bail!("unable to create master key - no tty");
1509 }
1510
1511 let rsa = openssl::rsa::Rsa::generate(4096)?;
1512 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1513
1514 let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
1515 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
1516
1517 if new_pw != verify_pw {
1518 bail!("Password verification fail!");
1519 }
1520
1521 if new_pw.len() < 5 {
1522 bail!("Password is too short!");
1523 }
1524
1525 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1526 let filename_pub = "master-public.pem";
1527 println!("Writing public master key to {}", filename_pub);
e18a6c9e 1528 file_set_contents(filename_pub, pub_key.as_slice(), None)?;
37c5a175
DM
1529
1530 let cipher = openssl::symm::Cipher::aes_256_cbc();
1531 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
1532
1533 let filename_priv = "master-private.pem";
1534 println!("Writing private master key to {}", filename_priv);
e18a6c9e 1535 file_set_contents(filename_priv, priv_key.as_slice(), None)?;
37c5a175
DM
1536
1537 Ok(Value::Null)
1538}
ac716234
DM
1539
1540fn key_change_passphrase(
1541 param: Value,
1542 _info: &ApiMethod,
1543 _rpcenv: &mut dyn RpcEnvironment,
1544) -> Result<Value, Error> {
1545
9b06db45
DM
1546 let path = tools::required_string_param(&param, "path")?;
1547 let path = PathBuf::from(path);
ac716234 1548
181f097a
DM
1549 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1550
ac716234
DM
1551 // we need a TTY to query the new password
1552 if !crate::tools::tty::stdin_isatty() {
1553 bail!("unable to change passphrase - no tty");
1554 }
1555
a8f10f84 1556 let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
ac716234 1557
181f097a 1558 if kdf == "scrypt" {
ac716234 1559
181f097a
DM
1560 let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
1561 let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
ac716234 1562
181f097a
DM
1563 if new_pw != verify_pw {
1564 bail!("Password verification fail!");
1565 }
1566
1567 if new_pw.len() < 5 {
1568 bail!("Password is too short!");
1569 }
ac716234 1570
ab44acff
DM
1571 let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
1572 new_key_config.created = created; // keep original value
1573
1574 store_key_config(&path, true, new_key_config)?;
ac716234 1575
181f097a
DM
1576 Ok(Value::Null)
1577 } else if kdf == "none" {
ab44acff 1578 let modified = Local.timestamp(Local::now().timestamp(), 0);
181f097a
DM
1579
1580 store_key_config(&path, true, KeyConfig {
1581 kdf: None,
ab44acff
DM
1582 created, // keep original value
1583 modified,
6d0983db 1584 data: key.to_vec(),
181f097a
DM
1585 })?;
1586
1587 Ok(Value::Null)
1588 } else {
1589 unreachable!();
1590 }
f2401311
DM
1591}
1592
1593fn key_mgmt_cli() -> CliCommandMap {
1594
255f378a 1595 const KDF_SCHEMA: Schema =
181f097a 1596 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
255f378a
DM
1597 .format(&ApiStringFormat::Enum(&["scrypt", "none"]))
1598 .default("scrypt")
1599 .schema();
1600
552c2259 1601 #[sortable]
255f378a
DM
1602 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1603 &ApiHandler::Sync(&key_create),
1604 &ObjectSchema::new(
1605 "Create a new encryption key.",
552c2259 1606 &sorted!([
255f378a
DM
1607 ("path", false, &StringSchema::new("File system path.").schema()),
1608 ("kdf", true, &KDF_SCHEMA),
552c2259 1609 ]),
255f378a 1610 )
181f097a 1611 );
7074a0b3 1612
255f378a 1613 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
49fddd98 1614 .arg_param(&["path"])
9b06db45 1615 .completion_cb("path", tools::complete_file_name);
f2401311 1616
552c2259 1617 #[sortable]
255f378a
DM
1618 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1619 &ApiHandler::Sync(&key_change_passphrase),
1620 &ObjectSchema::new(
1621 "Change the passphrase required to decrypt the key.",
552c2259 1622 &sorted!([
255f378a
DM
1623 ("path", false, &StringSchema::new("File system path.").schema()),
1624 ("kdf", true, &KDF_SCHEMA),
552c2259 1625 ]),
255f378a
DM
1626 )
1627 );
7074a0b3 1628
255f378a 1629 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
49fddd98 1630 .arg_param(&["path"])
9b06db45 1631 .completion_cb("path", tools::complete_file_name);
ac716234 1632
255f378a
DM
1633 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1634 &ApiHandler::Sync(&key_create_master_key),
1635 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1636 );
7074a0b3 1637
255f378a
DM
1638 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1639
552c2259 1640 #[sortable]
255f378a
DM
1641 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1642 &ApiHandler::Sync(&key_import_master_pubkey),
1643 &ObjectSchema::new(
1644 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
552c2259 1645 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
255f378a
DM
1646 )
1647 );
7074a0b3 1648
255f378a 1649 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
49fddd98 1650 .arg_param(&["path"])
3ea8bfc9
DM
1651 .completion_cb("path", tools::complete_file_name);
1652
11377a47 1653 CliCommandMap::new()
eb7e2ee0
DM
1654 .insert("create", key_create_cmd_def.into())
1655 .insert("create-master-key", key_create_master_key_cmd_def.into())
1656 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def.into())
1657 .insert("change-passphrase", key_change_passphrase_cmd_def.into())
f2401311
DM
1658}
1659
70235f72
CE
1660fn mount(
1661 param: Value,
1662 _info: &ApiMethod,
1663 _rpcenv: &mut dyn RpcEnvironment,
1664) -> Result<Value, Error> {
1665 let verbose = param["verbose"].as_bool().unwrap_or(false);
1666 if verbose {
1667 // This will stay in foreground with debug output enabled as None is
1668 // passed for the RawFd.
1669 return async_main(mount_do(param, None));
1670 }
1671
1672 // Process should be deamonized.
1673 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1674 let pipe = pipe()?;
1675 match fork() {
11377a47 1676 Ok(ForkResult::Parent { .. }) => {
70235f72
CE
1677 nix::unistd::close(pipe.1).unwrap();
1678 // Blocks the parent process until we are ready to go in the child
1679 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1680 Ok(Value::Null)
1681 }
1682 Ok(ForkResult::Child) => {
1683 nix::unistd::close(pipe.0).unwrap();
1684 nix::unistd::setsid().unwrap();
1685 async_main(mount_do(param, Some(pipe.1)))
1686 }
1687 Err(_) => bail!("failed to daemonize process"),
1688 }
1689}
1690
1691async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1692 let repo = extract_repository_from_value(&param)?;
1693 let archive_name = tools::required_string_param(&param, "archive-name")?;
1694 let target = tools::required_string_param(&param, "target")?;
1695 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1696
1697 record_repository(&repo);
1698
1699 let path = tools::required_string_param(&param, "snapshot")?;
1700 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1701 let group = BackupGroup::parse(path)?;
1702
1703 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1704 let result = client.get(&path, Some(json!({
1705 "backup-type": group.backup_type(),
1706 "backup-id": group.backup_id(),
1707 }))).await?;
1708
1709 let list = result["data"].as_array().unwrap();
11377a47 1710 if list.is_empty() {
70235f72
CE
1711 bail!("backup group '{}' does not contain any snapshots:", path);
1712 }
1713
1714 let epoch = list[0]["backup-time"].as_i64().unwrap();
1715 let backup_time = Utc.timestamp(epoch, 0);
1716 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1717 } else {
1718 let snapshot = BackupDir::parse(path)?;
1719 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1720 };
1721
11377a47 1722 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
70235f72
CE
1723 let crypt_config = match keyfile {
1724 None => None,
1725 Some(path) => {
a8f10f84 1726 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
70235f72
CE
1727 Some(Arc::new(CryptConfig::new(key)?))
1728 }
1729 };
1730
1731 let server_archive_name = if archive_name.ends_with(".pxar") {
1732 format!("{}.didx", archive_name)
1733 } else {
1734 bail!("Can only mount pxar archives.");
1735 };
1736
296c50ba
DM
1737 let client = BackupReader::start(
1738 client,
1739 crypt_config.clone(),
1740 repo.store(),
1741 &backup_type,
1742 &backup_id,
1743 backup_time,
1744 true,
1745 ).await?;
70235f72 1746
f06b820a 1747 let manifest = client.download_manifest().await?;
296c50ba 1748
70235f72 1749 if server_archive_name.ends_with(".didx") {
c3d84a22 1750 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
70235f72
CE
1751 let most_used = index.find_most_used_chunks(8);
1752 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1753 let reader = BufferedDynamicReader::new(index, chunk_reader);
f701d033 1754 let decoder = pxar::Decoder::new(reader)?;
70235f72 1755 let options = OsStr::new("ro,default_permissions");
2a111910 1756 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
70235f72
CE
1757 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
1758
1759 // Mount the session but not call fuse deamonize as this will cause
1760 // issues with the runtime after the fork
1761 let deamonize = false;
1762 session.mount(&Path::new(target), deamonize)?;
1763
1764 if let Some(pipe) = pipe {
1765 nix::unistd::chdir(Path::new("/")).unwrap();
1766 // Finish creation of deamon by redirecting filedescriptors.
1767 let nullfd = nix::fcntl::open(
1768 "/dev/null",
1769 nix::fcntl::OFlag::O_RDWR,
1770 nix::sys::stat::Mode::empty(),
1771 ).unwrap();
1772 nix::unistd::dup2(nullfd, 0).unwrap();
1773 nix::unistd::dup2(nullfd, 1).unwrap();
1774 nix::unistd::dup2(nullfd, 2).unwrap();
1775 if nullfd > 2 {
1776 nix::unistd::close(nullfd).unwrap();
1777 }
1778 // Signal the parent process that we are done with the setup and it can
1779 // terminate.
11377a47 1780 nix::unistd::write(pipe, &[0u8])?;
70235f72
CE
1781 nix::unistd::close(pipe).unwrap();
1782 }
1783
1784 let multithreaded = true;
1785 session.run_loop(multithreaded)?;
1786 } else {
1787 bail!("unknown archive file extension (expected .pxar)");
1788 }
1789
1790 Ok(Value::Null)
1791}
1792
1c6ad6ef 1793fn catalog_shell(
3cf73c4e
CE
1794 param: Value,
1795 _info: &ApiMethod,
1796 _rpcenv: &mut dyn RpcEnvironment,
1797) -> Result<Value, Error> {
1c6ad6ef 1798 async_main(catalog_shell_async(param))
3cf73c4e
CE
1799}
1800
1c6ad6ef 1801async fn catalog_shell_async(param: Value) -> Result<Value, Error> {
3cf73c4e
CE
1802 let repo = extract_repository_from_value(&param)?;
1803 let client = HttpClient::new(repo.host(), repo.user(), None)?;
1804 let path = tools::required_string_param(&param, "snapshot")?;
1805 let archive_name = tools::required_string_param(&param, "archive-name")?;
1806
1807 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1808 let group = BackupGroup::parse(path)?;
1809
1810 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
1811 let result = client.get(&path, Some(json!({
1812 "backup-type": group.backup_type(),
1813 "backup-id": group.backup_id(),
1814 }))).await?;
1815
1816 let list = result["data"].as_array().unwrap();
314bb358 1817 if list.is_empty() {
3cf73c4e
CE
1818 bail!("backup group '{}' does not contain any snapshots:", path);
1819 }
1820
1821 let epoch = list[0]["backup-time"].as_i64().unwrap();
1822 let backup_time = Utc.timestamp(epoch, 0);
1823 (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
1824 } else {
1825 let snapshot = BackupDir::parse(path)?;
1826 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1827 };
1828
1829 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
1830 let crypt_config = match keyfile {
1831 None => None,
1832 Some(path) => {
1833 let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
1834 Some(Arc::new(CryptConfig::new(key)?))
1835 }
1836 };
1837
1838 let server_archive_name = if archive_name.ends_with(".pxar") {
1839 format!("{}.didx", archive_name)
1840 } else {
1841 bail!("Can only mount pxar archives.");
1842 };
1843
1844 let client = BackupReader::start(
1845 client,
1846 crypt_config.clone(),
1847 repo.store(),
1848 &backup_type,
1849 &backup_id,
1850 backup_time,
1851 true,
1852 ).await?;
1853
1854 let tmpfile = std::fs::OpenOptions::new()
1855 .write(true)
1856 .read(true)
1857 .custom_flags(libc::O_TMPFILE)
1858 .open("/tmp")?;
1859
1860 let manifest = client.download_manifest().await?;
1861
1862 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
1863 let most_used = index.find_most_used_chunks(8);
1864 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
1865 let reader = BufferedDynamicReader::new(index, chunk_reader);
f701d033
DM
1866 let mut decoder = pxar::Decoder::new(reader)?;
1867 decoder.set_callback(|path| {
1868 println!("{:?}", path);
1869 Ok(())
1870 });
3cf73c4e
CE
1871
1872 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
1873 let index = DynamicIndexReader::new(tmpfile)
1874 .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
1875
1876 // Note: do not use values stored in index (not trusted) - instead, computed them again
1877 let (csum, size) = index.compute_csum();
1878 manifest.verify_file(CATALOG_NAME, &csum, size)?;
1879
1880 let most_used = index.find_most_used_chunks(8);
1881 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1882 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
1883 let mut catalogfile = std::fs::OpenOptions::new()
1884 .write(true)
1885 .read(true)
1886 .custom_flags(libc::O_TMPFILE)
1887 .open("/tmp")?;
1888
1889 std::io::copy(&mut reader, &mut catalogfile)
1890 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
1891
1892 catalogfile.seek(SeekFrom::Start(0))?;
1893 let catalog_reader = CatalogReader::new(catalogfile);
1894 let state = Shell::new(
1895 catalog_reader,
1896 &server_archive_name,
1897 decoder,
1898 )?;
1899
1900 println!("Starting interactive shell");
1901 state.shell()?;
1902
1903 record_repository(&repo);
1904
1905 Ok(Value::Null)
1906}
1907
1c6ad6ef
DM
1908fn catalog_mgmt_cli() -> CliCommandMap {
1909
1910 #[sortable]
1911 const API_METHOD_SHELL: ApiMethod = ApiMethod::new(
1912 &ApiHandler::Sync(&catalog_shell),
1913 &ObjectSchema::new(
1914 "Shell to interactively inspect and restore snapshots.",
1915 &sorted!([
1916 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
1917 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
1918 ("repository", true, &REPO_URL_SCHEMA),
1919 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
1920 ]),
1921 )
1922 );
1923
1924 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_SHELL)
1925 .arg_param(&["snapshot", "archive-name"])
1926 .completion_cb("repository", complete_repository)
0ec9e1b0 1927 .completion_cb("archive-name", complete_pxar_archive_name)
1c6ad6ef
DM
1928 .completion_cb("snapshot", complete_group_or_snapshot);
1929
1930 #[sortable]
1931 const API_METHOD_DUMP_CATALOG: ApiMethod = ApiMethod::new(
1932 &ApiHandler::Sync(&dump_catalog),
1933 &ObjectSchema::new(
1934 "Dump catalog.",
1935 &sorted!([
1936 ("snapshot", false, &StringSchema::new("Snapshot path.").schema()),
1937 ("repository", true, &REPO_URL_SCHEMA),
1938 ]),
1939 )
1940 );
1941
1942 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
1943 .arg_param(&["snapshot"])
1944 .completion_cb("repository", complete_repository)
1945 .completion_cb("snapshot", complete_backup_snapshot);
1946
1947 CliCommandMap::new()
eb7e2ee0
DM
1948 .insert("dump", catalog_dump_cmd_def.into())
1949 .insert("shell", catalog_shell_cmd_def.into())
1c6ad6ef
DM
1950}
1951
1952
f2401311 1953fn main() {
33d64b81 1954
255f378a
DM
1955 const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new("Backup source specification ([<label>:<path>]).")
1956 .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
1957 .schema();
25f1650b 1958
552c2259 1959 #[sortable]
255f378a
DM
1960 const API_METHOD_CREATE_BACKUP: ApiMethod = ApiMethod::new(
1961 &ApiHandler::Sync(&create_backup),
1962 &ObjectSchema::new(
1963 "Create (host) backup.",
552c2259 1964 &sorted!([
255f378a 1965 (
ae0be2dd 1966 "backupspec",
255f378a
DM
1967 false,
1968 &ArraySchema::new(
74cdb521 1969 "List of backup source specifications ([<label.ext>:<path>] ...)",
255f378a
DM
1970 &BACKUP_SOURCE_SCHEMA,
1971 ).min_length(1).schema()
1972 ),
1973 (
1974 "repository",
1975 true,
1976 &REPO_URL_SCHEMA
1977 ),
1978 (
2eeaacb9 1979 "include-dev",
255f378a
DM
1980 true,
1981 &ArraySchema::new(
2eeaacb9 1982 "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
255f378a
DM
1983 &StringSchema::new("Path to file.").schema()
1984 ).schema()
1985 ),
1986 (
6d0983db 1987 "keyfile",
255f378a
DM
1988 true,
1989 &StringSchema::new("Path to encryption key. All data will be encrypted using this key.").schema()
1990 ),
1991 (
219ef0e6 1992 "verbose",
255f378a
DM
1993 true,
1994 &BooleanSchema::new("Verbose output.")
1995 .default(false)
1996 .schema()
1997 ),
1998 (
5b72c9b4 1999 "skip-lost-and-found",
255f378a
DM
2000 true,
2001 &BooleanSchema::new("Skip lost+found directory")
2002 .default(false)
2003 .schema()
2004 ),
2005 (
bbf9e7e9 2006 "backup-type",
255f378a
DM
2007 true,
2008 &BACKUP_TYPE_SCHEMA,
2009 ),
2010 (
bbf9e7e9 2011 "backup-id",
255f378a
DM
2012 true,
2013 &BACKUP_ID_SCHEMA
2014 ),
2015 (
ca5d0b61 2016 "backup-time",
255f378a
DM
2017 true,
2018 &BACKUP_TIME_SCHEMA
2019 ),
2020 (
2d9d143a 2021 "chunk-size",
255f378a
DM
2022 true,
2023 &IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
2d9d143a
DM
2024 .minimum(64)
2025 .maximum(4096)
2026 .default(4096)
255f378a
DM
2027 .schema()
2028 ),
552c2259 2029 ]),
255f378a
DM
2030 )
2031 );
7074a0b3 2032
255f378a 2033 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
49fddd98 2034 .arg_param(&["backupspec"])
d0a03d40 2035 .completion_cb("repository", complete_repository)
49811347 2036 .completion_cb("backupspec", complete_backup_source)
6d0983db 2037 .completion_cb("keyfile", tools::complete_file_name)
49811347 2038 .completion_cb("chunk-size", complete_chunk_size);
f8838fe9 2039
552c2259 2040 #[sortable]
255f378a
DM
2041 const API_METHOD_UPLOAD_LOG: ApiMethod = ApiMethod::new(
2042 &ApiHandler::Sync(&upload_log),
2043 &ObjectSchema::new(
2044 "Upload backup log file.",
552c2259 2045 &sorted!([
255f378a
DM
2046 (
2047 "snapshot",
2048 false,
2049 &StringSchema::new("Snapshot path.").schema()
2050 ),
2051 (
2052 "logfile",
2053 false,
2054 &StringSchema::new("The path to the log file you want to upload.").schema()
2055 ),
2056 (
2057 "repository",
2058 true,
2059 &REPO_URL_SCHEMA
2060 ),
2061 (
ec34f7eb 2062 "keyfile",
255f378a
DM
2063 true,
2064 &StringSchema::new("Path to encryption key. All data will be encrypted using this key.").schema()
2065 ),
552c2259 2066 ]),
255f378a
DM
2067 )
2068 );
7074a0b3 2069
255f378a 2070 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
49fddd98 2071 .arg_param(&["snapshot", "logfile"])
543a260f 2072 .completion_cb("snapshot", complete_backup_snapshot)
ec34f7eb
DM
2073 .completion_cb("logfile", tools::complete_file_name)
2074 .completion_cb("keyfile", tools::complete_file_name)
2075 .completion_cb("repository", complete_repository);
2076
552c2259 2077 #[sortable]
255f378a
DM
2078 const API_METHOD_LIST_BACKUP_GROUPS: ApiMethod = ApiMethod::new(
2079 &ApiHandler::Sync(&list_backup_groups),
2080 &ObjectSchema::new(
2081 "List backup groups.",
552c2259 2082 &sorted!([
255f378a
DM
2083 ("repository", true, &REPO_URL_SCHEMA),
2084 ("output-format", true, &OUTPUT_FORMAT),
552c2259 2085 ]),
255f378a
DM
2086 )
2087 );
7074a0b3 2088
255f378a 2089 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
d0a03d40 2090 .completion_cb("repository", complete_repository);
41c039e1 2091
552c2259 2092 #[sortable]
255f378a
DM
2093 const API_METHOD_LIST_SNAPSHOTS: ApiMethod = ApiMethod::new(
2094 &ApiHandler::Sync(&list_snapshots),
2095 &ObjectSchema::new(
2096 "List backup snapshots.",
552c2259 2097 &sorted!([
255f378a
DM
2098 ("group", true, &StringSchema::new("Backup group.").schema()),
2099 ("repository", true, &REPO_URL_SCHEMA),
2100 ("output-format", true, &OUTPUT_FORMAT),
552c2259 2101 ]),
255f378a
DM
2102 )
2103 );
7074a0b3 2104
255f378a 2105 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
49fddd98 2106 .arg_param(&["group"])
024f11bb 2107 .completion_cb("group", complete_backup_group)
d0a03d40 2108 .completion_cb("repository", complete_repository);
184f17af 2109
552c2259 2110 #[sortable]
255f378a
DM
2111 const API_METHOD_FORGET_SNAPSHOTS: ApiMethod = ApiMethod::new(
2112 &ApiHandler::Sync(&forget_snapshots),
2113 &ObjectSchema::new(
2114 "Forget (remove) backup snapshots.",
552c2259 2115 &sorted!([
255f378a
DM
2116 ("snapshot", false, &StringSchema::new("Snapshot path.").schema()),
2117 ("repository", true, &REPO_URL_SCHEMA),
552c2259 2118 ]),
255f378a
DM
2119 )
2120 );
7074a0b3 2121
255f378a 2122 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
49fddd98 2123 .arg_param(&["snapshot"])
b2388518 2124 .completion_cb("repository", complete_repository)
543a260f 2125 .completion_cb("snapshot", complete_backup_snapshot);
6f62c924 2126
552c2259 2127 #[sortable]
255f378a
DM
2128 const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
2129 &ApiHandler::Sync(&start_garbage_collection),
2130 &ObjectSchema::new(
2131 "Start garbage collection for a specific repository.",
552c2259 2132 &sorted!([ ("repository", true, &REPO_URL_SCHEMA) ]),
255f378a
DM
2133 )
2134 );
7074a0b3 2135
255f378a 2136 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
d0a03d40 2137 .completion_cb("repository", complete_repository);
8cc0d6af 2138
552c2259 2139 #[sortable]
255f378a
DM
2140 const API_METHOD_RESTORE: ApiMethod = ApiMethod::new(
2141 &ApiHandler::Sync(&restore),
2142 &ObjectSchema::new(
2143 "Restore backup repository.",
552c2259 2144 &sorted!([
255f378a
DM
2145 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2146 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2147 (
2148 "target",
2149 false,
2150 &StringSchema::new(
2151 r###"Target directory path. Use '-' to write to stdandard output.
bf125261
DM
2152
2153We do not extraxt '.pxar' archives when writing to stdandard output.
2154
2155"###
255f378a
DM
2156 ).schema()
2157 ),
2158 (
46d5aa0a 2159 "allow-existing-dirs",
255f378a
DM
2160 true,
2161 &BooleanSchema::new("Do not fail if directories already exists.")
2162 .default(false)
2163 .schema()
2164 ),
2165 ("repository", true, &REPO_URL_SCHEMA),
2166 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2167 (
86eda3eb 2168 "verbose",
7074a0b3 2169 true,
255f378a
DM
2170 &BooleanSchema::new("Verbose output.")
2171 .default(false)
2172 .schema()
2173 ),
552c2259 2174 ]),
255f378a
DM
2175 )
2176 );
7074a0b3 2177
255f378a 2178 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
49fddd98 2179 .arg_param(&["snapshot", "archive-name", "target"])
b2388518 2180 .completion_cb("repository", complete_repository)
08dc340a
DM
2181 .completion_cb("snapshot", complete_group_or_snapshot)
2182 .completion_cb("archive-name", complete_archive_name)
2183 .completion_cb("target", tools::complete_file_name);
9f912493 2184
552c2259 2185 #[sortable]
255f378a
DM
2186 const API_METHOD_LIST_SNAPSHOT_FILES: ApiMethod = ApiMethod::new(
2187 &ApiHandler::Sync(&list_snapshot_files),
2188 &ObjectSchema::new(
2189 "List snapshot files.",
552c2259 2190 &sorted!([
255f378a
DM
2191 ("snapshot", false, &StringSchema::new("Snapshot path.").schema()),
2192 ("repository", true, &REPO_URL_SCHEMA),
2193 ("output-format", true, &OUTPUT_FORMAT),
552c2259 2194 ]),
255f378a
DM
2195 )
2196 );
7074a0b3 2197
255f378a 2198 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
49fddd98 2199 .arg_param(&["snapshot"])
52c171e4 2200 .completion_cb("repository", complete_repository)
543a260f 2201 .completion_cb("snapshot", complete_backup_snapshot);
52c171e4 2202
255f378a
DM
2203 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
2204 &ApiHandler::Sync(&prune),
2205 &ObjectSchema::new(
2206 "Prune backup repository.",
552c2259 2207 &proxmox_backup::add_common_prune_prameters!([
74fa81b8
DM
2208 ("dry-run", true, &BooleanSchema::new(
2209 "Just show what prune would do, but do not delete anything.")
2210 .schema()),
255f378a 2211 ("group", false, &StringSchema::new("Backup group.").schema()),
552c2259 2212 ], [
163e9bbe 2213 ("output-format", true, &OUTPUT_FORMAT),
255f378a 2214 ("repository", true, &REPO_URL_SCHEMA),
552c2259 2215 ])
255f378a
DM
2216 )
2217 );
7074a0b3 2218
255f378a 2219 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
49fddd98 2220 .arg_param(&["group"])
9fdc3ef4 2221 .completion_cb("group", complete_backup_group)
d0a03d40 2222 .completion_cb("repository", complete_repository);
9f912493 2223
552c2259 2224 #[sortable]
255f378a
DM
2225 const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
2226 &ApiHandler::Sync(&status),
2227 &ObjectSchema::new(
2228 "Get repository status.",
552c2259 2229 &sorted!([
255f378a
DM
2230 ("repository", true, &REPO_URL_SCHEMA),
2231 ("output-format", true, &OUTPUT_FORMAT),
552c2259 2232 ]),
255f378a
DM
2233 )
2234 );
7074a0b3 2235
255f378a 2236 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
34a816cc
DM
2237 .completion_cb("repository", complete_repository);
2238
552c2259 2239 #[sortable]
255f378a
DM
2240 const API_METHOD_API_LOGIN: ApiMethod = ApiMethod::new(
2241 &ApiHandler::Sync(&api_login),
2242 &ObjectSchema::new(
2243 "Try to login. If successful, store ticket.",
552c2259 2244 &sorted!([ ("repository", true, &REPO_URL_SCHEMA) ]),
255f378a
DM
2245 )
2246 );
7074a0b3 2247
255f378a 2248 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
e240d8be
DM
2249 .completion_cb("repository", complete_repository);
2250
552c2259 2251 #[sortable]
255f378a
DM
2252 const API_METHOD_API_LOGOUT: ApiMethod = ApiMethod::new(
2253 &ApiHandler::Sync(&api_logout),
2254 &ObjectSchema::new(
2255 "Logout (delete stored ticket).",
552c2259 2256 &sorted!([ ("repository", true, &REPO_URL_SCHEMA) ]),
255f378a
DM
2257 )
2258 );
7074a0b3 2259
255f378a 2260 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
e240d8be 2261 .completion_cb("repository", complete_repository);
32efac1c 2262
552c2259 2263 #[sortable]
255f378a
DM
2264 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2265 &ApiHandler::Sync(&mount),
2266 &ObjectSchema::new(
2267 "Mount pxar archive.",
552c2259 2268 &sorted!([
255f378a
DM
2269 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2270 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2271 ("target", false, &StringSchema::new("Target directory path.").schema()),
2272 ("repository", true, &REPO_URL_SCHEMA),
2273 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2274 ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
552c2259 2275 ]),
255f378a
DM
2276 )
2277 );
7074a0b3 2278
255f378a 2279 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
49fddd98 2280 .arg_param(&["snapshot", "archive-name", "target"])
70235f72
CE
2281 .completion_cb("repository", complete_repository)
2282 .completion_cb("snapshot", complete_group_or_snapshot)
0ec9e1b0 2283 .completion_cb("archive-name", complete_pxar_archive_name)
70235f72 2284 .completion_cb("target", tools::complete_file_name);
e240d8be 2285
3cf73c4e 2286
41c039e1 2287 let cmd_def = CliCommandMap::new()
eb7e2ee0
DM
2288 .insert("backup", backup_cmd_def.into())
2289 .insert("upload-log", upload_log_cmd_def.into())
2290 .insert("forget", forget_cmd_def.into())
2291 .insert("garbage-collect", garbage_collect_cmd_def.into())
2292 .insert("list", list_cmd_def.into())
2293 .insert("login", login_cmd_def.into())
2294 .insert("logout", logout_cmd_def.into())
2295 .insert("prune", prune_cmd_def.into())
2296 .insert("restore", restore_cmd_def.into())
2297 .insert("snapshots", snapshots_cmd_def.into())
2298 .insert("files", files_cmd_def.into())
2299 .insert("status", status_cmd_def.into())
2300 .insert("key", key_mgmt_cli().into())
2301 .insert("mount", mount_cmd_def.into())
2302 .insert("catalog", catalog_mgmt_cli().into());
a914a774 2303
e9722f8b
WB
2304 run_cli_command(cmd_def.into());
2305}
496a6784 2306
e9722f8b
WB
2307fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
2308 let rt = tokio::runtime::Runtime::new().unwrap();
2309 let ret = rt.block_on(fut);
2310 rt.shutdown_now();
2311 ret
ff5d3707 2312}